summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbt150084 <none@none>2008-05-12 16:13:04 -0700
committerbt150084 <none@none>2008-05-12 16:13:04 -0700
commit9da57d7b0ddd8d73b676ce12c040362132cdd538 (patch)
tree60dd36ffd77331393a0a88c03cbee5df999f55b1
parent0d4a0bb3f17c7804d5070ef0b598571de5bfe91f (diff)
downloadillumos-joyent-9da57d7b0ddd8d73b676ce12c040362132cdd538.tar.gz
PSARC/2007/611 Intel 10GbE PCIE NIC Driver
6574882 Solaris need to support Intel 82598 10GbE
-rw-r--r--usr/src/pkgdefs/Makefile1
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/Makefile40
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/pkginfo.tmpl48
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/postinstall135
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/postremove40
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/prototype_com53
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/prototype_i38650
-rw-r--r--usr/src/pkgdefs/SUNWixgbe/prototype_sparc49
-rw-r--r--usr/src/pkgdefs/common_files/i.minorperm_i3864
-rw-r--r--usr/src/pkgdefs/common_files/i.minorperm_sparc4
-rw-r--r--usr/src/tools/opensolaris/license-list1
-rw-r--r--usr/src/uts/common/Makefile.files10
-rw-r--r--usr/src/uts/common/Makefile.rules7
-rw-r--r--usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE26
-rw-r--r--usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE.descrip1
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe.conf92
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_82598.c743
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_api.c745
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_api.h105
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_buf.c891
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_common.c1891
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_common.h81
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_debug.c422
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_debug.h88
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_gld.c742
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_log.c96
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_main.c3984
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_ndd.c356
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_osdep.c38
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_osdep.h132
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_phy.c461
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_phy.h52
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_rx.c380
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_stat.c265
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_sw.h830
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_tx.c1320
-rw-r--r--usr/src/uts/common/io/ixgbe/ixgbe_type.h1494
-rw-r--r--usr/src/uts/intel/Makefile.intel.shared1
-rw-r--r--usr/src/uts/intel/ixgbe/Makefile90
-rw-r--r--usr/src/uts/intel/os/minor_perm2
-rw-r--r--usr/src/uts/sparc/Makefile.sparc.shared1
-rw-r--r--usr/src/uts/sparc/ixgbe/Makefile106
-rw-r--r--usr/src/uts/sparc/os/minor_perm2
43 files changed, 15879 insertions, 0 deletions
diff --git a/usr/src/pkgdefs/Makefile b/usr/src/pkgdefs/Makefile
index 47c8bfe856..ef66d7787a 100644
--- a/usr/src/pkgdefs/Makefile
+++ b/usr/src/pkgdefs/Makefile
@@ -239,6 +239,7 @@ COMMON_SUBDIRS= \
SUNWibsdpu \
SUNWibsdp \
SUNWigb \
+ SUNWixgbe \
SUNWintgige \
SUNWiotu \
SUNWioth \
diff --git a/usr/src/pkgdefs/SUNWixgbe/Makefile b/usr/src/pkgdefs/SUNWixgbe/Makefile
new file mode 100644
index 0000000000..9bf045b26f
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/Makefile
@@ -0,0 +1,40 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+include ../Makefile.com
+
+DATAFILES += depend i.renamenew
+LICENSEFILES += ../../uts/common/io/ixgbe/THIRDPARTYLICENSE
+
+.KEEP_STATE:
+
+all: $(FILES) postinstall postremove
+install: all pkg
+
+include ../Makefile.targ
diff --git a/usr/src/pkgdefs/SUNWixgbe/pkginfo.tmpl b/usr/src/pkgdefs/SUNWixgbe/pkginfo.tmpl
new file mode 100644
index 0000000000..6dacd27896
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/pkginfo.tmpl
@@ -0,0 +1,48 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+PKG=SUNWixgbe
+NAME=Intel 10GbE PCIE NIC Driver
+ARCH="ISA"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGVERS="1.0"
+SUNW_PKGTYPE="root"
+MAXINST="1000"
+CATEGORY=system
+VENDOR="Sun Microsystems, Inc."
+DESC="Intel 10GbE PCIE NIC Driver"
+CLASSES="none renamenew"
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+BASEDIR=/
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
diff --git a/usr/src/pkgdefs/SUNWixgbe/postinstall b/usr/src/pkgdefs/SUNWixgbe/postinstall
new file mode 100644
index 0000000000..cba4264930
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/postinstall
@@ -0,0 +1,135 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+# Function: check_add_drv()
+#
+# This function will check if the module has an entry in etc/name_to_major
+# If not simply calls add_drv with the arguments given. If there is
+# such an entry in name_to_major file, it adds entries in driver_aliases
+# driver_classes and minor_perm if necessary.
+# The syntax of this function is the same as add_drv.
+
+check_add_drv()
+{
+ if [ "$BASEDIR" = "" ]
+ then
+ BASEDIR=/
+ fi
+ alias=""
+ class=""
+ ADD_ALIAS=0
+ ADD_CLASS=0
+ ADD_MINOR=0
+ OPTIND=1
+ IS_NET_DRIVER=0
+
+ cmd="add_drv"
+
+ NO_CMD=
+ while getopts i:b:m:c:N opt
+ do
+ case $opt in
+ N ) NO_CMD=1;;
+ i ) ADD_ALIAS=1
+ alias=$OPTARG
+ cmd=$cmd" -i '$alias'"
+ ;;
+ m ) ADD_MINOR=1
+ minor=$OPTARG
+ cmd=$cmd" -m '$minor'"
+ ;;
+ c) ADD_CLASS=1
+ class=$OPTARG
+ cmd=$cmd" -c $class"
+ ;;
+ b) BASEDIR=$OPTARG
+ cmd=$cmd" -b $BASEDIR"
+ ;;
+ \?) echo "check_add_drv can not handle this option"
+ return
+ ;;
+ esac
+ done
+ shift `/usr/bin/expr $OPTIND - 1`
+
+ drvname=$1
+
+ cmd=$cmd" "$drvname
+
+ drvname=`echo $drvname | /usr/bin/sed 's;.*/;;g'`
+
+ /usr/bin/grep "^$drvname[ ]" $BASEDIR/etc/name_to_major > /dev/null 2>&1
+
+ if [ "$NO_CMD" = "" -a $? -ne 0 ]
+ then
+ eval $cmd
+ else
+ # entry already in name_to_major, add alias, class, minorperm
+ # if necessary
+ if [ $ADD_ALIAS = 1 ]
+ then
+ for i in $alias
+ do
+ /usr/bin/egrep "^$drvname[ ]+$i" $BASEDIR/etc/driver_aliases>/dev/null 2>&1
+ if [ $? -ne 0 ]
+ then
+ echo "$drvname $i" >> $BASEDIR/etc/driver_aliases
+ fi
+ done
+ fi
+
+ if [ $ADD_CLASS = 1 ]
+ then
+ /usr/bin/egrep "^$drvname[ ]+$class( | |$)" $BASEDIR/etc/driver_classes > /dev/null 2>&1
+ if [ $? -ne 0 ]
+ then
+ echo "$drvname\t$class" >> $BASEDIR/etc/driver_classes
+ fi
+ fi
+
+ if [ $ADD_MINOR = 1 ]
+ then
+ /usr/bin/grep "^$drvname:" $BASEDIR/etc/minor_perm > /dev/null 2>&1
+ if [ $? -ne 0 ]
+ then
+ minorentry="$drvname:$minor"
+ echo $minorentry >> $BASEDIR/etc/minor_perm
+ fi
+ fi
+
+ fi
+
+
+}
+
+check_add_drv -i \
+ '"pciex8086,10c6"
+ "pciex8086,10c7"' \
+ -b "$BASEDIR" ixgbe
diff --git a/usr/src/pkgdefs/SUNWixgbe/postremove b/usr/src/pkgdefs/SUNWixgbe/postremove
new file mode 100644
index 0000000000..7f57f11632
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/postremove
@@ -0,0 +1,40 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+BD=${BASEDIR:-/}
+if grep -w ixgbe $BD/etc/name_to_major > /dev/null 2>&1
+then
+ rem_drv -b ${BD} ixgbe
+ if [ $? -ne 0 ]
+ then
+ exit 1
+ fi
+fi
+exit 0
diff --git a/usr/src/pkgdefs/SUNWixgbe/prototype_com b/usr/src/pkgdefs/SUNWixgbe/prototype_com
new file mode 100644
index 0000000000..341650fd38
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/prototype_com
@@ -0,0 +1,53 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+# packaging files
+i pkginfo
+i copyright
+i depend
+i postinstall
+i postremove
+i i.renamenew
+#
+# Intel 10GbE PCIE NIC Driver common files
+#
+d none kernel 0755 root sys
+d none kernel/drv 0755 root sys
+e renamenew kernel/drv/ixgbe.conf 0644 root sys
diff --git a/usr/src/pkgdefs/SUNWixgbe/prototype_i386 b/usr/src/pkgdefs/SUNWixgbe/prototype_i386
new file mode 100644
index 0000000000..4a5f31d036
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/prototype_i386
@@ -0,0 +1,50 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+# Intel 10GbE PCIE NIC Driver i386 specific files
+#
+f none kernel/drv/ixgbe 0755 root sys
+d none kernel/drv/amd64 0755 root sys
+f none kernel/drv/amd64/ixgbe 0755 root sys
diff --git a/usr/src/pkgdefs/SUNWixgbe/prototype_sparc b/usr/src/pkgdefs/SUNWixgbe/prototype_sparc
new file mode 100644
index 0000000000..b7b079bd0e
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWixgbe/prototype_sparc
@@ -0,0 +1,49 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+# Intel 10GbE PCIE NIC Driver SPARC specific files
+#
+d none kernel/drv/sparcv9 0755 root sys
+f none kernel/drv/sparcv9/ixgbe 0755 root sys
diff --git a/usr/src/pkgdefs/common_files/i.minorperm_i386 b/usr/src/pkgdefs/common_files/i.minorperm_i386
index 21e7979017..47fb49c2b1 100644
--- a/usr/src/pkgdefs/common_files/i.minorperm_i386
+++ b/usr/src/pkgdefs/common_files/i.minorperm_i386
@@ -105,6 +105,7 @@ profile:profile 0600 root sys 0644 root sys /dev/dtrace/provider/profile
sdt:sdt 0600 root sys 0644 root sys /dev/dtrace/provider/sdt
systrace:systrace 0600 root sys 0644 root sys /dev/dtrace/provider/systrace
clone:bge 0600 root sys 0666 root sys /dev/bge
+clone:ixgbe 0600 root sys 0666 root sys /dev/ixgbe
clone:rge 0600 root sys 0666 root sys /dev/rge
clone:xge 0600 root sys 0666 root sys /dev/xge
clone:nge 0600 root sys 0666 root sys /dev/nge
@@ -124,6 +125,7 @@ clone:afe 0600 root sys 0666 root sys /dev/afe
clone:dmfe 0600 root sys 0666 root sys /dev/dmfe
clone:mxfe 0600 root sys 0666 root sys /dev/mxfe
bge:* 0600 root sys 0666 root sys /dev/bge*
+ixgbe:* 0600 root sys 0666 root sys /dev/ixgbe*
rge:* 0600 root sys 0666 root sys /dev/rge*
xge:* 0600 root sys 0666 root sys /dev/xge*
nge:* 0600 root sys 0666 root sys /dev/nge*
@@ -261,6 +263,7 @@ sctp6:*
vni:*
cpuid:self
clone:bge
+clone:ixgbe
clone:rge
clone:xge
clone:nge
@@ -281,6 +284,7 @@ clone:afe
clone:dmfe
clone:mxfe
bge:*
+ixgbe:*
rge:*
xge:*
nge:*
diff --git a/usr/src/pkgdefs/common_files/i.minorperm_sparc b/usr/src/pkgdefs/common_files/i.minorperm_sparc
index 5eec85cdc5..76a1fc3d92 100644
--- a/usr/src/pkgdefs/common_files/i.minorperm_sparc
+++ b/usr/src/pkgdefs/common_files/i.minorperm_sparc
@@ -95,6 +95,7 @@ clone:ge 0600 root sys 0666 root sys /dev/ge
clone:hme 0600 root sys 0666 root sys /dev/hme
clone:qfe 0600 root sys 0666 root sys /dev/qfe
clone:bge 0600 root sys 0666 root sys /dev/bge
+clone:ixgbe 0600 root sys 0666 root sys /dev/ixgbe
clone:rge 0600 root sys 0666 root sys /dev/rge
clone:xge 0600 root sys 0666 root sys /dev/xge
clone:nge 0600 root sys 0666 root sys /dev/nge
@@ -108,6 +109,7 @@ clone:afe 0600 root sys 0666 root sys /dev/afe
clone:mxfe 0600 root sys 0666 root sys /dev/mxfe
clone:rtls 0600 root sys 0666 root sys /dev/rtls
bge:* 0600 root sys 0666 root sys /dev/bge*
+ixgbe:* 0600 root sys 0666 root sys /dev/ixgbe*
rge:* 0600 root sys 0666 root sys /dev/rge*
xge:* 0600 root sys 0666 root sys /dev/xge*
nge:* 0600 root sys 0666 root sys /dev/nge*
@@ -274,6 +276,7 @@ mm:allkmem
ssm:*
bscv:*
clone:bge
+clone:ixgbe
clone:rge
clone:xge
clone:nge
@@ -287,6 +290,7 @@ clone:afe
clone:mxfe
clone:rtls
bge:*
+ixgbe:*
rge:*
xge:*
nge:*
diff --git a/usr/src/tools/opensolaris/license-list b/usr/src/tools/opensolaris/license-list
index 2629225cd4..54732457ee 100644
--- a/usr/src/tools/opensolaris/license-list
+++ b/usr/src/tools/opensolaris/license-list
@@ -132,6 +132,7 @@ usr/src/uts/common/io/ipw/THIRDPARTYLICENSE
usr/src/uts/common/io/ipw/fw-ipw2100/LICENSE
usr/src/uts/common/io/iwi/THIRDPARTYLICENSE
usr/src/uts/common/io/iwi/fw-ipw2200/LICENSE
+usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE
usr/src/uts/common/io/mega_sas/THIRDPARTYLICENSE
usr/src/uts/common/io/mxfe/THIRDPARTYLICENSE
usr/src/uts/common/io/pcan/THIRDPARTYLICENSE
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 8b12e6276e..96ae41ad31 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1576,6 +1576,16 @@ IGB_OBJS = igb_82575.o igb_api.o igb_mac.o igb_manage.o \
igb_ndd.o igb_rx.o igb_stat.o igb_tx.o
#
+# Intel 10GbE PCIE NIC driver module
+#
+IXGBE_OBJS = ixgbe_82598.o ixgbe_api.o ixgbe_common.o \
+ ixgbe_phy.o \
+ ixgbe_buf.o ixgbe_debug.o ixgbe_gld.o \
+ ixgbe_log.o ixgbe_main.o ixgbe_ndd.o \
+ ixgbe_osdep.o ixgbe_rx.o ixgbe_stat.o \
+ ixgbe_tx.o
+
+#
# NIU 10G/1G driver module
#
NXGE_OBJS = nxge_mac.o nxge_ipp.o nxge_rxdma.o \
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index 67c4e18b2c..e33cad5a99 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -915,6 +915,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/igb/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/ixgbe/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/common/ipp/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -1757,6 +1761,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/e1000g/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/igb/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/ixgbe/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/ipp/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE b/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE
new file mode 100644
index 0000000000..a744046069
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE
@@ -0,0 +1,26 @@
+DO NOT TRANSLATE OR LOCALIZE.
+
+The following software may be included in this product: <Common
+Driver Code for the Intel(R) 82598 10GbE Controller>; Use of any
+of this software is governed by the terms of the license below:
+
+-------------------Begin Text Block------------------
+Common Driver Code for the Intel(R) 82598 10GbE Controller
+
+Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+
+The contents of this software are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this software except in compliance with the
+License.
+
+You can obtain a copy of the license at:
+ http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When using or redistributing this software, you may do so under the
+License only and provided that the software includes this
+Text Block. No other modification of this Text Block is permitted.
+--------------------End Text Block-------------------
+
diff --git a/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE.descrip
new file mode 100644
index 0000000000..67f7c66701
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/THIRDPARTYLICENSE.descrip
@@ -0,0 +1 @@
+IXGBE DRIVER
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe.conf b/usr/src/uts/common/io/ixgbe/ixgbe.conf
new file mode 100644
index 0000000000..0e46fe5a0d
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe.conf
@@ -0,0 +1,92 @@
+#
+# CDDL HEADER START
+#
+# Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at:
+# http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When using or redistributing this file, you may do so under the
+# License only. No other modification of this header is permitted.
+#
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms of the CDDL.
+#
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+#
+# Driver.conf file for Intel 10GbE PCIE NIC Driver (ixgbe)
+#
+# -------------------- Jumbo Frame --------------------
+# default_mtu
+# The size of the default MTU (payload without the ethernet header)
+# Allowed values: 1500 - 16366
+# Default value: 1500
+#
+# default_mtu = 1500;
+#
+# -------------------- Flow Control --------------------
+# flow_control
+# Ethernet flow control
+# Allowed values: 0 - Disable
+# 1 - Receive only
+# 2 - Transmit only
+# 3 - Receive and transmit
+# default value: 3
+#
+# flow_control = 3;
+#
+# -------------------- Transmit/Receive Queues --------------------
+# tx/rx queue.
+# tx_queue_number
+# The number of the transmit queues
+# Allowed values: 1 - 32
+# Default value: 1
+#
+# tx_ring_size
+# The number of the transmit descriptors per transmit queue
+# Allowed values: 64 - 4096
+# Default value: 512
+#
+# rx_queue_number
+# The number of the receive queues
+# Allowed values: 1 - 64
+# Default value: 1
+#
+# rx_ring_size
+# The number of the receive descriptors per receive queue
+# Allowed values: 64 - 4096
+# Default value: 512
+#
+# Note: The final values of tx_queue_number and rx_queue_number are decided
+# by the number of interrupt vectors obtained by the driver. They could be
+# less than the specified values because of limited interrupt vector number.
+#
+# -------- How to set parameters for a particular interface ---------
+# The example below shows how to locate the device path and set a parameter
+# for a particular ixgbe interface. (Using flow_control as an example)
+#
+# Use the following command to find out the device paths for ixgbe,
+# more /etc/path_to_inst | grep ixgbe
+#
+# For example, if you see,
+# "/pci@7b,0/pci10de,5d@e/pci8086,a15f@0" 0 "ixgbe"
+# "/pci@7b,0/pci10de,5d@e/pci8086,a15f@0,1" 1 "ixgbe"
+#
+# name = "pciex8086,10c6" parent = "/pci@0,0/pci10de,\<pci10de\>5d@e" unit-address = "0"
+# flow_control = 1;
+# name = "pciex8086,10c6" parent = "/pci@0,0/\<pci\>pci10de,5d@e" unit-address = "1"
+# flow_control = 3;
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_82598.c b/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
new file mode 100644
index 0000000000..c22b7170d8
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
@@ -0,0 +1,743 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.87 v2008-03-04 */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
+s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up);
+s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
+s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
+
+/*
+ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82598.
+ * Does not touch the hardware.
+ */
+s32
+ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ (void) ixgbe_init_phy_ops_generic(hw);
+ (void) ixgbe_init_ops_generic(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+
+ /* LEDs */
+ mac->ops.blink_led_start = &ixgbe_blink_led_start_82598;
+ mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+
+ /* Flow Control */
+ mac->ops.setup_fc = &ixgbe_setup_fc_82598;
+
+
+ /* Link */
+ mac->ops.check_link = &ixgbe_check_mac_link_82598;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.setup_link_speed =
+ &ixgbe_setup_copper_link_speed_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_82598;
+ } else {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_link_capabilities_82598;
+ }
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 16;
+ mac->max_tx_queues = 32;
+ mac->max_rx_queues = 64;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32
+ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+ s32 autoc_reg;
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ if (hw->mac.link_settings_loaded) {
+ autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
+ autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc_reg |= hw->mac.link_attach_type;
+ autoc_reg |= hw->mac.link_mode_select;
+ }
+
+ switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = FALSE;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = FALSE;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = TRUE;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc_reg & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc_reg & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = TRUE;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ break;
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32
+ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = TRUE;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
+
+ if (status == IXGBE_SUCCESS) {
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_get_media_type_82598 - Determines media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type
+ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Media type for I82598 is based on device ID */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+
+ return (media_type);
+}
+
+/*
+ * ixgbe_setup_fc_82598 - Configure flow control settings
+ * @hw: pointer to hardware structure
+ * @packetbuf_num: packet buffer number (0-7)
+ *
+ * Configures the flow control settings based on SW configuration. This
+ * function is used for 802.3x flow control configuration only.
+ */
+s32
+ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ u32 frctl_reg;
+ u32 rmcs_reg;
+
+ if (packetbuf_num < 0 || packetbuf_num > 7) {
+ DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
+ " 0-7\n", packetbuf_num);
+ ASSERT(0);
+ }
+
+ frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+ /*
+ * 10 gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.type == ixgbe_fc_default)
+ hw->fc.type = ixgbe_fc_full;
+
+ /*
+ * We want to save off the original Flow Control configuration just in
+ * case we get disconnected and then reconnected into a different hub
+ * or switch with different Flow Control capabilities.
+ */
+ hw->fc.original_type = hw->fc.type;
+
+ /*
+ * The possible values of the "flow_control" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames but not
+ * send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do not
+ * support receiving pause frames)
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.type) {
+ case ixgbe_fc_none:
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled,
+ * and Tx Flow control is disabled.
+ */
+ frctl_reg |= IXGBE_FCTRL_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /*
+ * Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ frctl_reg |= IXGBE_FCTRL_RFCE;
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ default:
+ /* We should never get here. The value should be 0-3. */
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* Enable 802.3x based flow control settings. */
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+ /*
+ * Check for invalid software configuration, zeros are completely
+ * invalid for all parameters used past this point, and if we enable
+ * flow control with zero water marks, we blast flow control packets.
+ */
+ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+ DEBUGOUT("Flow control structure initialized incorrectly\n");
+ return (IXGBE_ERR_INVALID_LINK_SETTINGS);
+ }
+
+ /*
+ * We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ if (hw->fc.type & ixgbe_fc_tx_pause) {
+ if (hw->fc.send_xon) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+ (hw->fc.low_water | IXGBE_FCRTL_XONE));
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+ hw->fc.low_water);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
+ (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_setup_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ */
+s32
+ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ if (hw->mac.link_settings_loaded) {
+ autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
+ autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc_reg |= hw->mac.link_attach_type;
+ autoc_reg |= hw->mac.link_mode_select;
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(50);
+ }
+
+ /* Restart link */
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (hw->phy.autoneg_wait_to_complete) {
+ if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN ||
+ hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autonegotiation did not complete.\n");
+ }
+ }
+ }
+
+ /*
+ * We want to save off the original Flow Control configuration just in
+ * case we get disconnected and then reconnected into a different hub
+ * or switch with different Flow Control capabilities.
+ */
+ hw->fc.original_type = hw->fc.type;
+ (void) ixgbe_setup_fc_82598(hw, 0);
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+ return (status);
+}
+
+/*
+ * ixgbe_check_mac_link_82598 - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: TRUE is link is up, FALSE otherwise
+ *
+ * Reads the links register to determine if link is up and the current speed
+ */
+s32
+ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 links_reg;
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = TRUE;
+ else
+ *link_up = FALSE;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return (IXGBE_SUCCESS);
+}
+
+
+/*
+ * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if auto-negotiation enabled
+ * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ */
+s32
+ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ /* If speed is 10G, then check for CX4 or XAUI. */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+ (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
+ } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ } else if (autoneg) {
+ /* BX mode - Autonegotiate 1G */
+ if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
+ else /* KX/KX4 mode */
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN;
+ } else {
+ status = IXGBE_ERR_LINK_SETUP;
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
+
+ hw->mac.link_settings_loaded = TRUE;
+ /*
+ * Setup and restart the link based on the new values in
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+ (void) ixgbe_setup_mac_link_82598(hw);
+ }
+
+ return (status);
+}
+
+
+/*
+ * ixgbe_setup_copper_link_82598 - Setup copper link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed. Restart
+ * phy and wait for autonegotiate to finish. Then synchronize the
+ * MAC and PHY.
+ */
+s32
+ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Restart autonegotiation on PHY */
+ status = hw->phy.ops.setup_link(hw);
+
+ /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+ /* Set up MAC */
+ (void) ixgbe_setup_mac_link_82598(hw);
+
+ return (status);
+}
+
+/*
+ * ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ */
+s32
+ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+
+ /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+ /* Set up MAC */
+ (void) ixgbe_setup_mac_link_82598(hw);
+
+ return (status);
+}
+
+/*
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ */
+s32
+ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 ctrl;
+ u32 gheccr;
+ u32 i;
+ u32 autoc;
+ u8 analog_val;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /*
+ * Power up the Atlas Tx lanes if they are currently powered down.
+ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+ /* Enable Tx Atlas so packets can be transmitted again */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ &analog_val);
+ analog_val &= ~ IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ analog_val);
+ }
+
+ /* Reset PHY */
+ hw->phy.ops.reset(hw);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests before reset
+ */
+ if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+ }
+
+ /*
+ * Issue global reset to the MAC. This needs to be a SW reset.
+ * If link reset is used, it might reset the MAC when mng is using it
+ */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+ /*
+ * AUTOC register which stores link settings gets cleared
+ * and reloaded from EEPROM after reset. We need to restore
+ * our stored value from init in case SW changed the attach
+ * type or speed. If this is the first time and link settings
+ * have not been stored, store default settings from AUTOC.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if (hw->mac.link_settings_loaded) {
+ autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE);
+ autoc &= ~(IXGBE_AUTOC_LMS_MASK);
+ autoc |= hw->mac.link_attach_type;
+ autoc |= hw->mac.link_mode_select;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ } else {
+ hw->mac.link_attach_type =
+ (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
+ hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
+ hw->mac.link_settings_loaded = TRUE;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ return (status);
+}
+
+/*
+ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq set index
+ */
+s32
+ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_blink_led_start_82598 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ */
+s32
+ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
+{
+ ixgbe_link_speed speed = 0;
+ bool link_up = 0;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /*
+ * Link must be up to auto-blink the LEDs on the 82598EB MAC;
+ * force it if link is down.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up);
+
+ if (!link_up) {
+ autoc_reg |= IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ msec_delay(10);
+ }
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ */
+s32
+ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
+{
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ autoc_reg &= ~IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return (IXGBE_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_api.c b/usr/src/uts/common/io/ixgbe/ixgbe_api.c
new file mode 100644
index 0000000000..4ea692207a
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_api.c
@@ -0,0 +1,745 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.81 v2008-03-04 */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+
+/*
+ * ixgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The ixgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ */
+s32
+ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /*
+ * Set the mac type
+ */
+ (void) ixgbe_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ status = ixgbe_init_ops_82598(hw);
+ break;
+ default:
+ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ */
+s32
+ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_mac_type");
+
+ if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ default:
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+ } else {
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ return (ret_val);
+}
+
+/*
+ * ixgbe_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ */
+s32
+ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_reset_hw - Performs a hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performs a PHY reset, and performs a MAC reset
+ */
+s32
+ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type,
+ * clears all on chip counters, initializes receive address registers,
+ * multicast table, VLAN filter table, calls routine to setup link and
+ * flow control settings, and leaves transmit and receive units disabled
+ * and uninitialized.
+ */
+s32
+ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_clear_hw_cntrs - Clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ */
+s32
+ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_get_media_type - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type
+ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+ ixgbe_media_type_unknown);
+}
+
+/*
+ * ixgbe_get_mac_addr - Get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from the first Receive Address Register
+ * (RAR0) A reset of the adapter must have been performed prior to calling
+ * this function in order for the MAC address to have been loaded from the
+ * EEPROM into RAR0
+ */
+s32
+ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ */
+s32
+ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_get_num_of_tx_queues - Get Tx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ */
+u32
+ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+ return (hw->mac.max_tx_queues);
+}
+
+/*
+ * ixgbe_get_num_of_rx_queues - Get Rx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ */
+u32
+ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+ return (hw->mac.max_rx_queues);
+}
+
+/*
+ * ixgbe_stop_adapter - Disable Rx/Tx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ */
+s32
+ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ */
+s32
+ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ return (ixgbe_read_pba_num_generic(hw, pba_num));
+}
+
+/*
+ * ixgbe_identify_phy - Get PHY type
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ */
+s32
+ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ status = ixgbe_call_func(hw,
+ hw->phy.ops.identify,
+ (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_reset_phy - Perform a PHY reset
+ * @hw: pointer to hardware structure
+ */
+s32
+ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_PHY;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+ return (status);
+}
+
+/*
+ * ixgbe_read_phy_reg - Read PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ */
+s32
+ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_write_phy_reg - Write PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ */
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_setup_phy_link - Restart PHY autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ */
+s32
+ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_setup_phy_link_speed - Set auto advertise
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ *
+ * Sets the auto advertised capabilities
+ */
+s32
+ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_setup_link - Configure link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ */
+s32
+ixgbe_setup_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_check_link - Get link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the links register to determine if link is up and the current speed
+ */
+s32
+ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+ link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_setup_link_speed - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ *
+ * Set the link speed and restarts the link.
+ */
+s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link_speed, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_get_link_capabilities - Returns link capabilities
+ * @hw: pointer to hardware structure
+ *
+ * Determines the link capabilities of the current configuration.
+ */
+s32
+ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_led_on - Turn on LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ *
+ * Turns on the software controllable LEDs.
+ */
+s32
+ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_led_off - Turn off LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ *
+ * Turns off the software controllable LEDs.
+ */
+s32
+ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_blink_led_start - Blink LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Blink LED based on index.
+ */
+s32
+ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_blink_led_stop - Stop blinking LEDs
+ * @hw: pointer to hardware structure
+ *
+ * Stop blinking LED based on index.
+ */
+s32
+ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ */
+s32
+ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/*
+ * ixgbe_write_eeprom - Write word to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ */
+s32
+ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_read_eeprom - Read word from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM
+ */
+s32
+ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum
+ */
+s32
+ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ */
+s32
+ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set"
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ */
+s32
+ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to associate with VMDq index
+ * @vmdq: VMDq set or pool index
+ */
+s32
+ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ */
+s32
+ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ * @hw: pointer to hardware structure
+ */
+u32
+ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+ return (hw->mac.num_rar_entries);
+}
+
+/*
+ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new multicast addresses
+ * @addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ */
+s32
+ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ */
+s32
+ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+ mc_addr_list, mc_addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ */
+s32
+ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ */
+s32
+ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ */
+s32
+ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ */
+s32
+ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+ vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_setup_fc - Set flow control
+ * @hw: pointer to hardware structure
+ * @packetbuf_num: packet buffer number (0-7)
+ *
+ * Configures the flow control settings based on SW configuration.
+ */
+s32
+ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw, packetbuf_num),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs write operation to analog register specified.
+ */
+s32
+ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/*
+ * ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ */
+s32
+ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_api.h b/usr/src/uts/common/io/ixgbe/ixgbe_api.h
new file mode 100644
index 0000000000..dcd2cd78e7
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_api.h
@@ -0,0 +1,105 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.50 v2008-03-04 */
+
+#ifndef _IXGBE_API_H
+#define _IXGBE_API_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+
+#endif /* _IXGBE_API_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_buf.c b/usr/src/uts/common/io/ixgbe/ixgbe_buf.c
new file mode 100644
index 0000000000..aa64ad5006
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_buf.c
@@ -0,0 +1,891 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
+static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
+static int ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *);
+static void ixgbe_free_rbd_ring(ixgbe_rx_ring_t *);
+static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t);
+static void ixgbe_free_dma_buffer(dma_buffer_t *);
+static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
+static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
+static int ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *);
+static void ixgbe_free_rcb_lists(ixgbe_rx_ring_t *);
+
+#ifdef __sparc
+#define IXGBE_DMA_ALIGNMENT 0x0000000000002000ull
+#else
+#define IXGBE_DMA_ALIGNMENT 0x0000000000001000ull
+#endif
+
+/*
+ * DMA attributes for tx/rx descriptors.
+ */
+static ddi_dma_attr_t ixgbe_desc_dma_attr = {
+ DMA_ATTR_V0, /* version number */
+ 0x0000000000000000ull, /* low address */
+ 0xFFFFFFFFFFFFFFFFull, /* high address */
+ 0x00000000FFFFFFFFull, /* dma counter max */
+ IXGBE_DMA_ALIGNMENT, /* alignment */
+ 0x00000FFF, /* burst sizes */
+ 0x00000001, /* minimum transfer size */
+ 0x00000000FFFFFFFFull, /* maximum transfer size */
+ 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
+ 1, /* scatter/gather list length */
+ 0x00000001, /* granularity */
+ DDI_DMA_FLAGERR /* DMA flags */
+};
+
+/*
+ * DMA attributes for tx/rx buffers.
+ */
+static ddi_dma_attr_t ixgbe_buf_dma_attr = {
+ DMA_ATTR_V0, /* version number */
+ 0x0000000000000000ull, /* low address */
+ 0xFFFFFFFFFFFFFFFFull, /* high address */
+ 0x00000000FFFFFFFFull, /* dma counter max */
+ IXGBE_DMA_ALIGNMENT, /* alignment */
+ 0x00000FFF, /* burst sizes */
+ 0x00000001, /* minimum transfer size */
+ 0x00000000FFFFFFFFull, /* maximum transfer size */
+ 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
+ 1, /* scatter/gather list length */
+ 0x00000001, /* granularity */
+ DDI_DMA_FLAGERR /* DMA flags */
+};
+
+/*
+ * DMA attributes for transmit.
+ */
+static ddi_dma_attr_t ixgbe_tx_dma_attr = {
+ DMA_ATTR_V0, /* version number */
+ 0x0000000000000000ull, /* low address */
+ 0xFFFFFFFFFFFFFFFFull, /* high address */
+ 0x00000000FFFFFFFFull, /* dma counter max */
+ 1, /* alignment */
+ 0x00000FFF, /* burst sizes */
+ 0x00000001, /* minimum transfer size */
+ 0x00000000FFFFFFFFull, /* maximum transfer size */
+ 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
+ MAX_COOKIE, /* scatter/gather list length */
+ 0x00000001, /* granularity */
+ DDI_DMA_FLAGERR /* DMA flags */
+};
+
+/*
+ * DMA access attributes for descriptors.
+ */
+static ddi_device_acc_attr_t ixgbe_desc_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+
+/*
+ * DMA access attributes for buffers.
+ */
+static ddi_device_acc_attr_t ixgbe_buf_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/*
+ * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings.
+ */
+int
+ixgbe_alloc_dma(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ ixgbe_tx_ring_t *tx_ring;
+ int i;
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ /*
+ * Allocate receive desciptor ring and control block lists
+ */
+ rx_ring = &ixgbe->rx_rings[i];
+
+ if (ixgbe_alloc_rbd_ring(rx_ring) != IXGBE_SUCCESS)
+ goto alloc_dma_failure;
+
+ if (ixgbe_alloc_rcb_lists(rx_ring) != IXGBE_SUCCESS)
+ goto alloc_dma_failure;
+ }
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ /*
+ * Allocate transmit desciptor ring and control block lists
+ */
+ tx_ring = &ixgbe->tx_rings[i];
+
+ if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS)
+ goto alloc_dma_failure;
+
+ if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS)
+ goto alloc_dma_failure;
+ }
+
+ return (IXGBE_SUCCESS);
+
+alloc_dma_failure:
+ ixgbe_free_dma(ixgbe);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings.
+ */
+void
+ixgbe_free_dma(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ ixgbe_tx_ring_t *tx_ring;
+ int i;
+
+ /*
+ * Free DMA resources of rx rings
+ */
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+ ixgbe_free_rbd_ring(rx_ring);
+ ixgbe_free_rcb_lists(rx_ring);
+ }
+
+ /*
+ * Free DMA resources of tx rings
+ */
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+ ixgbe_free_tbd_ring(tx_ring);
+ ixgbe_free_tcb_lists(tx_ring);
+ }
+}
+
+/*
+ * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
+ */
+static int
+ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
+{
+ int ret;
+ size_t size;
+ size_t len;
+ uint_t cookie_num;
+ dev_info_t *devinfo;
+ ddi_dma_cookie_t cookie;
+ ixgbe_t *ixgbe = tx_ring->ixgbe;
+
+ devinfo = ixgbe->dip;
+ size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;
+
+ /*
+ * If tx head write-back is enabled, an extra tbd is allocated
+ * to save the head write-back value
+ */
+ if (ixgbe->tx_head_wb_enable) {
+ size += sizeof (union ixgbe_adv_tx_desc);
+ }
+
+ /*
+ * Allocate a DMA handle for the transmit descriptor
+ * memory area.
+ */
+ ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
+ DDI_DMA_DONTWAIT, NULL,
+ &tx_ring->tbd_area.dma_handle);
+
+ if (ret != DDI_SUCCESS) {
+ ixgbe_error(ixgbe,
+ "Could not allocate tbd dma handle: %x", ret);
+ tx_ring->tbd_area.dma_handle = NULL;
+
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory to DMA data to and from the transmit
+ * descriptors.
+ */
+ ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
+ size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
+ DDI_DMA_DONTWAIT, NULL,
+ (caddr_t *)&tx_ring->tbd_area.address,
+ &len, &tx_ring->tbd_area.acc_handle);
+
+ if (ret != DDI_SUCCESS) {
+ ixgbe_error(ixgbe,
+ "Could not allocate tbd dma memory: %x", ret);
+ tx_ring->tbd_area.acc_handle = NULL;
+ tx_ring->tbd_area.address = NULL;
+ if (tx_ring->tbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
+ tx_ring->tbd_area.dma_handle = NULL;
+ }
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Initialize the entire transmit buffer descriptor area to zero
+ */
+ bzero(tx_ring->tbd_area.address, len);
+
+ /*
+ * Allocates DMA resources for the memory that was allocated by
+ * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
+ * the memory address
+ */
+ ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
+ NULL, (caddr_t)tx_ring->tbd_area.address,
+ len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
+
+ if (ret != DDI_DMA_MAPPED) {
+ ixgbe_error(ixgbe,
+ "Could not bind tbd dma resource: %x", ret);
+ tx_ring->tbd_area.dma_address = NULL;
+ if (tx_ring->tbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
+ tx_ring->tbd_area.acc_handle = NULL;
+ tx_ring->tbd_area.address = NULL;
+ }
+ if (tx_ring->tbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
+ tx_ring->tbd_area.dma_handle = NULL;
+ }
+ return (IXGBE_FAILURE);
+ }
+
+ ASSERT(cookie_num == 1);
+
+ tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
+ tx_ring->tbd_area.size = len;
+
+ tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
+ tx_ring->tbd_area.address;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
+ */
+static void
+ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring)
+{
+ if (tx_ring->tbd_area.dma_handle != NULL) {
+ (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
+ }
+ if (tx_ring->tbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
+ tx_ring->tbd_area.acc_handle = NULL;
+ }
+ if (tx_ring->tbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
+ tx_ring->tbd_area.dma_handle = NULL;
+ }
+ tx_ring->tbd_area.address = NULL;
+ tx_ring->tbd_area.dma_address = NULL;
+ tx_ring->tbd_area.size = 0;
+
+ tx_ring->tbd_ring = NULL;
+}
+
+/*
+ * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
+ */
+static int
+ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *rx_ring)
+{
+ int ret;
+ size_t size;
+ size_t len;
+ uint_t cookie_num;
+ dev_info_t *devinfo;
+ ddi_dma_cookie_t cookie;
+ ixgbe_t *ixgbe = rx_ring->ixgbe;
+
+ devinfo = ixgbe->dip;
+ size = sizeof (union ixgbe_adv_rx_desc) * rx_ring->ring_size;
+
+ /*
+ * Allocate a new DMA handle for the receive descriptor
+ * memory area.
+ */
+ ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
+ DDI_DMA_DONTWAIT, NULL,
+ &rx_ring->rbd_area.dma_handle);
+
+ if (ret != DDI_SUCCESS) {
+ ixgbe_error(ixgbe,
+ "Could not allocate rbd dma handle: %x", ret);
+ rx_ring->rbd_area.dma_handle = NULL;
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory to DMA data to and from the receive
+ * descriptors.
+ */
+ ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle,
+ size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
+ DDI_DMA_DONTWAIT, NULL,
+ (caddr_t *)&rx_ring->rbd_area.address,
+ &len, &rx_ring->rbd_area.acc_handle);
+
+ if (ret != DDI_SUCCESS) {
+ ixgbe_error(ixgbe,
+ "Could not allocate rbd dma memory: %x", ret);
+ rx_ring->rbd_area.acc_handle = NULL;
+ rx_ring->rbd_area.address = NULL;
+ if (rx_ring->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
+ rx_ring->rbd_area.dma_handle = NULL;
+ }
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Initialize the entire transmit buffer descriptor area to zero
+ */
+ bzero(rx_ring->rbd_area.address, len);
+
+ /*
+ * Allocates DMA resources for the memory that was allocated by
+ * the ddi_dma_mem_alloc call.
+ */
+ ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle,
+ NULL, (caddr_t)rx_ring->rbd_area.address,
+ len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
+
+ if (ret != DDI_DMA_MAPPED) {
+ ixgbe_error(ixgbe,
+ "Could not bind rbd dma resource: %x", ret);
+ rx_ring->rbd_area.dma_address = NULL;
+ if (rx_ring->rbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
+ rx_ring->rbd_area.acc_handle = NULL;
+ rx_ring->rbd_area.address = NULL;
+ }
+ if (rx_ring->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
+ rx_ring->rbd_area.dma_handle = NULL;
+ }
+ return (IXGBE_FAILURE);
+ }
+
+ ASSERT(cookie_num == 1);
+
+ rx_ring->rbd_area.dma_address = cookie.dmac_laddress;
+ rx_ring->rbd_area.size = len;
+
+ rx_ring->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
+ rx_ring->rbd_area.address;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
+ */
+static void
+ixgbe_free_rbd_ring(ixgbe_rx_ring_t *rx_ring)
+{
+ if (rx_ring->rbd_area.dma_handle != NULL) {
+ (void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle);
+ }
+ if (rx_ring->rbd_area.acc_handle != NULL) {
+ ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle);
+ rx_ring->rbd_area.acc_handle = NULL;
+ }
+ if (rx_ring->rbd_area.dma_handle != NULL) {
+ ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle);
+ rx_ring->rbd_area.dma_handle = NULL;
+ }
+ rx_ring->rbd_area.address = NULL;
+ rx_ring->rbd_area.dma_address = NULL;
+ rx_ring->rbd_area.size = 0;
+
+ rx_ring->rbd_ring = NULL;
+}
+
+/*
+ * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
+ */
+static int
+ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
+{
+ int ret;
+ dev_info_t *devinfo = ixgbe->dip;
+ ddi_dma_cookie_t cookie;
+ size_t len;
+ uint_t cookie_num;
+
+ ret = ddi_dma_alloc_handle(devinfo,
+ &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
+ NULL, &buf->dma_handle);
+
+ if (ret != DDI_SUCCESS) {
+ buf->dma_handle = NULL;
+ ixgbe_error(ixgbe,
+ "Could not allocate dma buffer handle: %x", ret);
+ return (IXGBE_FAILURE);
+ }
+
+ ret = ddi_dma_mem_alloc(buf->dma_handle,
+ size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
+ DDI_DMA_DONTWAIT, NULL, &buf->address,
+ &len, &buf->acc_handle);
+
+ if (ret != DDI_SUCCESS) {
+ buf->acc_handle = NULL;
+ buf->address = NULL;
+ if (buf->dma_handle != NULL) {
+ ddi_dma_free_handle(&buf->dma_handle);
+ buf->dma_handle = NULL;
+ }
+ ixgbe_error(ixgbe,
+ "Could not allocate dma buffer memory: %x", ret);
+ return (IXGBE_FAILURE);
+ }
+
+ ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
+ buf->address,
+ len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);
+
+ if (ret != DDI_DMA_MAPPED) {
+ buf->dma_address = NULL;
+ if (buf->acc_handle != NULL) {
+ ddi_dma_mem_free(&buf->acc_handle);
+ buf->acc_handle = NULL;
+ buf->address = NULL;
+ }
+ if (buf->dma_handle != NULL) {
+ ddi_dma_free_handle(&buf->dma_handle);
+ buf->dma_handle = NULL;
+ }
+ ixgbe_error(ixgbe,
+ "Could not bind dma buffer handle: %x", ret);
+ return (IXGBE_FAILURE);
+ }
+
+ ASSERT(cookie_num == 1);
+
+ buf->dma_address = cookie.dmac_laddress;
+ buf->size = len;
+ buf->len = 0;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
+ */
+static void
+ixgbe_free_dma_buffer(dma_buffer_t *buf)
+{
+ if (buf->dma_handle != NULL) {
+ (void) ddi_dma_unbind_handle(buf->dma_handle);
+ buf->dma_address = NULL;
+ } else {
+ return;
+ }
+
+ if (buf->acc_handle != NULL) {
+ ddi_dma_mem_free(&buf->acc_handle);
+ buf->acc_handle = NULL;
+ buf->address = NULL;
+ }
+
+ if (buf->dma_handle != NULL) {
+ ddi_dma_free_handle(&buf->dma_handle);
+ buf->dma_handle = NULL;
+ }
+
+ buf->size = 0;
+ buf->len = 0;
+}
+
+/*
+ * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
+ * of one ring.
+ */
+static int
+ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
+{
+ int i;
+ int ret;
+ tx_control_block_t *tcb;
+ dma_buffer_t *tx_buf;
+ ixgbe_t *ixgbe = tx_ring->ixgbe;
+ dev_info_t *devinfo = ixgbe->dip;
+
+ /*
+ * Allocate memory for the work list.
+ */
+ tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
+ tx_ring->ring_size, KM_NOSLEEP);
+
+ if (tx_ring->work_list == NULL) {
+ ixgbe_error(ixgbe,
+ "Cound not allocate memory for tx work list");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory for the free list.
+ */
+ tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
+ tx_ring->free_list_size, KM_NOSLEEP);
+
+ if (tx_ring->free_list == NULL) {
+ kmem_free(tx_ring->work_list,
+ sizeof (tx_control_block_t *) * tx_ring->ring_size);
+ tx_ring->work_list = NULL;
+
+ ixgbe_error(ixgbe,
+ "Cound not allocate memory for tx free list");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory for the tx control blocks of free list.
+ */
+ tx_ring->tcb_area =
+ kmem_zalloc(sizeof (tx_control_block_t) *
+ tx_ring->free_list_size, KM_NOSLEEP);
+
+ if (tx_ring->tcb_area == NULL) {
+ kmem_free(tx_ring->work_list,
+ sizeof (tx_control_block_t *) * tx_ring->ring_size);
+ tx_ring->work_list = NULL;
+
+ kmem_free(tx_ring->free_list,
+ sizeof (tx_control_block_t *) * tx_ring->free_list_size);
+ tx_ring->free_list = NULL;
+
+ ixgbe_error(ixgbe,
+ "Cound not allocate memory for tx control blocks");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate dma memory for the tx control block of free list.
+ */
+ tcb = tx_ring->tcb_area;
+ for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
+ ASSERT(tcb != NULL);
+
+ tx_ring->free_list[i] = tcb;
+
+ /*
+ * Pre-allocate dma handles for transmit. These dma handles
+ * will be dynamically bound to the data buffers passed down
+ * from the upper layers at the time of transmitting.
+ */
+ ret = ddi_dma_alloc_handle(devinfo,
+ &ixgbe_tx_dma_attr,
+ DDI_DMA_DONTWAIT, NULL,
+ &tcb->tx_dma_handle);
+ if (ret != DDI_SUCCESS) {
+ tcb->tx_dma_handle = NULL;
+ ixgbe_error(ixgbe,
+ "Could not allocate tx dma handle: %x", ret);
+ goto alloc_tcb_lists_fail;
+ }
+
+ /*
+ * Pre-allocate transmit buffers for packets that the
+ * size is less than bcopy_thresh.
+ */
+ tx_buf = &tcb->tx_buf;
+
+ ret = ixgbe_alloc_dma_buffer(ixgbe,
+ tx_buf, ixgbe->tx_buf_size);
+
+ if (ret != IXGBE_SUCCESS) {
+ ASSERT(tcb->tx_dma_handle != NULL);
+ ddi_dma_free_handle(&tcb->tx_dma_handle);
+ tcb->tx_dma_handle = NULL;
+ ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
+ goto alloc_tcb_lists_fail;
+ }
+ }
+
+ return (IXGBE_SUCCESS);
+
+alloc_tcb_lists_fail:
+ ixgbe_free_tcb_lists(tx_ring);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_free_tcb_lists - Release the memory allocated for
+ * the transmit control bolcks of one ring.
+ */
+static void
+ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring)
+{
+ int i;
+ tx_control_block_t *tcb;
+
+ tcb = tx_ring->tcb_area;
+ if (tcb == NULL)
+ return;
+
+ for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
+ ASSERT(tcb != NULL);
+
+ /* Free the tx dma handle for dynamical binding */
+ if (tcb->tx_dma_handle != NULL) {
+ ddi_dma_free_handle(&tcb->tx_dma_handle);
+ tcb->tx_dma_handle = NULL;
+ } else {
+ /*
+ * If the dma handle is NULL, then we don't
+ * have to check the remaining.
+ */
+ break;
+ }
+
+ ixgbe_free_dma_buffer(&tcb->tx_buf);
+ }
+
+ if (tx_ring->tcb_area != NULL) {
+ kmem_free(tx_ring->tcb_area,
+ sizeof (tx_control_block_t) * tx_ring->free_list_size);
+ tx_ring->tcb_area = NULL;
+ }
+
+ if (tx_ring->work_list != NULL) {
+ kmem_free(tx_ring->work_list,
+ sizeof (tx_control_block_t *) * tx_ring->ring_size);
+ tx_ring->work_list = NULL;
+ }
+
+ if (tx_ring->free_list != NULL) {
+ kmem_free(tx_ring->free_list,
+ sizeof (tx_control_block_t *) * tx_ring->free_list_size);
+ tx_ring->free_list = NULL;
+ }
+}
+
+/*
+ * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
+ * of one ring.
+ */
+static int
+ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *rx_ring)
+{
+ int i;
+ int ret;
+ rx_control_block_t *rcb;
+ ixgbe_t *ixgbe = rx_ring->ixgbe;
+ dma_buffer_t *rx_buf;
+ uint32_t rcb_count;
+
+ /*
+ * Allocate memory for the work list.
+ */
+ rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
+ rx_ring->ring_size, KM_NOSLEEP);
+
+ if (rx_ring->work_list == NULL) {
+ ixgbe_error(ixgbe,
+ "Could not allocate memory for rx work list");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory for the free list.
+ */
+ rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
+ rx_ring->free_list_size, KM_NOSLEEP);
+
+ if (rx_ring->free_list == NULL) {
+ kmem_free(rx_ring->work_list,
+ sizeof (rx_control_block_t *) * rx_ring->ring_size);
+ rx_ring->work_list = NULL;
+
+ ixgbe_error(ixgbe,
+ "Cound not allocate memory for rx free list");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory for the rx control blocks for work list and
+ * free list.
+ */
+ rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
+ rx_ring->rcb_area =
+ kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
+ KM_NOSLEEP);
+
+ if (rx_ring->rcb_area == NULL) {
+ kmem_free(rx_ring->work_list,
+ sizeof (rx_control_block_t *) * rx_ring->ring_size);
+ rx_ring->work_list = NULL;
+
+ kmem_free(rx_ring->free_list,
+ sizeof (rx_control_block_t *) * rx_ring->free_list_size);
+ rx_ring->free_list = NULL;
+
+ ixgbe_error(ixgbe,
+ "Cound not allocate memory for rx control blocks");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate dma memory for the rx control blocks
+ */
+ rcb = rx_ring->rcb_area;
+ for (i = 0; i < rcb_count; i++, rcb++) {
+ ASSERT(rcb != NULL);
+
+ if (i < rx_ring->ring_size) {
+ /* Attach the rx control block to the work list */
+ rx_ring->work_list[i] = rcb;
+ } else {
+ /* Attach the rx control block to the free list */
+ rx_ring->free_list[i - rx_ring->ring_size] = rcb;
+ }
+
+ rx_buf = &rcb->rx_buf;
+ ret = ixgbe_alloc_dma_buffer(ixgbe,
+ rx_buf, ixgbe->rx_buf_size);
+
+ if (ret != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
+ goto alloc_rcb_lists_fail;
+ }
+
+ rx_buf->size -= IPHDR_ALIGN_ROOM;
+ rx_buf->address += IPHDR_ALIGN_ROOM;
+ rx_buf->dma_address += IPHDR_ALIGN_ROOM;
+
+ rcb->state = RCB_FREE;
+ rcb->rx_ring = (ixgbe_rx_ring_t *)rx_ring;
+ rcb->free_rtn.free_func = ixgbe_rx_recycle;
+ rcb->free_rtn.free_arg = (char *)rcb;
+
+ rcb->mp = desballoc((unsigned char *)
+ rx_buf->address - IPHDR_ALIGN_ROOM,
+ rx_buf->size + IPHDR_ALIGN_ROOM,
+ 0, &rcb->free_rtn);
+
+ if (rcb->mp != NULL) {
+ rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
+ rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
+ }
+ }
+
+ return (IXGBE_SUCCESS);
+
+alloc_rcb_lists_fail:
+ ixgbe_free_rcb_lists(rx_ring);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
+ */
+static void
+ixgbe_free_rcb_lists(ixgbe_rx_ring_t *rx_ring)
+{
+ int i;
+ rx_control_block_t *rcb;
+ uint32_t rcb_count;
+
+ rcb = rx_ring->rcb_area;
+ if (rcb == NULL)
+ return;
+
+ rcb_count = rx_ring->ring_size + rx_ring->free_list_size;
+ for (i = 0; i < rcb_count; i++, rcb++) {
+ ASSERT(rcb != NULL);
+ ASSERT(rcb->state == RCB_FREE);
+
+ if (rcb->mp != NULL) {
+ freemsg(rcb->mp);
+ rcb->mp = NULL;
+ }
+
+ ixgbe_free_dma_buffer(&rcb->rx_buf);
+ }
+
+ if (rx_ring->rcb_area != NULL) {
+ kmem_free(rx_ring->rcb_area,
+ sizeof (rx_control_block_t) * rcb_count);
+ rx_ring->rcb_area = NULL;
+ }
+
+ if (rx_ring->work_list != NULL) {
+ kmem_free(rx_ring->work_list,
+ sizeof (rx_control_block_t *) * rx_ring->ring_size);
+ rx_ring->work_list = NULL;
+ }
+
+ if (rx_ring->free_list != NULL) {
+ kmem_free(rx_ring->free_list,
+ sizeof (rx_control_block_t *) * rx_ring->free_list_size);
+ rx_ring->free_list = NULL;
+ }
+}
+
+/*
+ * ixgbe_set_fma_flags - Set the attribute for fma support.
+ */
+void
+ixgbe_set_fma_flags(int acc_flag, int dma_flag)
+{
+ if (acc_flag) {
+ ixgbe_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
+ } else {
+ ixgbe_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
+ }
+
+ if (dma_flag) {
+ ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
+ ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
+ ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
+ } else {
+ ixgbe_tx_dma_attr.dma_attr_flags = 0;
+ ixgbe_buf_dma_attr.dma_attr_flags = 0;
+ ixgbe_desc_dma_attr.dma_attr_flags = 0;
+ }
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_common.c b/usr/src/uts/common/io/ixgbe/ixgbe_common.c
new file mode 100644
index 0000000000..f472cbd290
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_common.c
@@ -0,0 +1,1891 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.159 v2008-03-04 */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_common.h"
+#include "ixgbe_api.h"
+
+static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count);
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
+
+static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
+static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+
+/*
+ * ixgbe_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ */
+s32
+ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ eeprom->ops.read = &ixgbe_read_eeprom_generic;
+ eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.validate_checksum =
+ &ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+
+ /* MAC */
+ mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.reset_hw = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.get_media_type = NULL;
+ mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_generic;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_generic;
+
+ /* LEDs */
+ mac->ops.led_on = &ixgbe_led_on_generic;
+ mac->ops.led_off = &ixgbe_led_off_generic;
+ mac->ops.blink_led_start = NULL;
+ mac->ops.blink_led_stop = NULL;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_rar = &ixgbe_set_rar_generic;
+ mac->ops.set_vmdq = NULL;
+ mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+
+ /* Flow Control */
+ mac->ops.setup_fc = NULL;
+
+ /* Link */
+ mac->ops.get_link_capabilities = NULL;
+ mac->ops.setup_link = NULL;
+ mac->ops.setup_link_speed = NULL;
+ mac->ops.check_link = NULL;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ */
+s32
+ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+ u32 ctrl_ext;
+
+ /* Set the media type */
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* Set bus info */
+ hw->mac.ops.get_bus_info(hw);
+
+ /* Identify the PHY */
+ hw->phy.ops.identify(hw);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Clear the VLAN filter table */
+ hw->mac.ops.clear_vfta(hw);
+
+ /* Set up link */
+ hw->mac.ops.setup_link(hw);
+
+ /* Clear statistics registers */
+ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = FALSE;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ */
+s32
+ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+ /* Reset the hardware */
+ hw->mac.ops.reset_hw(hw);
+
+ /* Start the HW */
+ hw->mac.ops.start_hw(hw);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ */
+s32
+ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+ u16 i = 0;
+
+ (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
+ (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
+ (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+
+ for (i = 0; i < 8; i++) {
+ (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
+
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
+ (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
+ (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
+ (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
+ (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
+ (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ for (i = 0; i < 8; i++)
+ (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_RUC);
+ (void) IXGBE_READ_REG(hw, IXGBE_RFC);
+ (void) IXGBE_READ_REG(hw, IXGBE_ROC);
+ (void) IXGBE_READ_REG(hw, IXGBE_RJC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ (void) IXGBE_READ_REG(hw, IXGBE_TORL);
+ (void) IXGBE_READ_REG(hw, IXGBE_TORH);
+ (void) IXGBE_READ_REG(hw, IXGBE_TPR);
+ (void) IXGBE_READ_REG(hw, IXGBE_TPT);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
+ (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
+ for (i = 0; i < 16; i++) {
+ (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ */
+s32
+ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("ixgbe_read_pba_num_generic");
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return (ret_val);
+ }
+ *pba_num = (u32)(data << 16);
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return (ret_val);
+ }
+ *pba_num |= data;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ */
+s32
+ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ */
+s32
+ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ u16 link_status;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ /* Get the negotiated link width and speed from PCI config space */
+ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ */
+s32
+ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = TRUE;
+
+ /* Disable the receive unit */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ reg_val &= ~(IXGBE_RXCTRL_RXEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ (void) IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
+ }
+ }
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) {
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ */
+s32
+ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ */
+s32
+ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ */
+s32
+ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->type = ixgbe_eeprom_spi;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+ eeprom->address_bits = 16;
+ else
+ eeprom->address_bits = 8;
+ DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ */
+s32
+ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((hw->eeprom.address_bits == 8) && (offset >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
+ hw->eeprom.address_bits);
+
+ /* Send the data */
+ data = (data >> 8) | (data << 8);
+ ixgbe_shift_out_eeprom_bits(hw, data, 16);
+ ixgbe_standby_eeprom(hw);
+
+ msec_delay(10);
+
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+out:
+ return (status);
+}
+
+/*
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ */
+s32
+ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /* Prepare the EEPROM for reading */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((hw->eeprom.address_bits == 8) && (offset >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ *data = (word_in >> 8) | (word_in << 8);
+
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+out:
+ return (status);
+}
+
+/*
+ * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ */
+s32
+ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 eerd;
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
+ IXGBE_EEPROM_READ_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eeprom_eerd_done(hw);
+
+ if (status == IXGBE_SUCCESS)
+ *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_READ_REG_DATA);
+ else
+ DEBUGOUT("Eeprom read timed out\n");
+
+out:
+ return (status);
+}
+
+/*
+ * ixgbe_poll_eeprom_eerd_done - Poll EERD status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ */
+static s32
+ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ if (reg & IXGBE_EEPROM_READ_REG_DONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+ return (status);
+}
+
+/*
+ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ *
+ * Prepares EEPROM for access using bit-bang method. This function should
+ * be called before issuing a command to the EEPROM.
+ */
+static s32
+ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 eec;
+ u32 i;
+
+ if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == IXGBE_SUCCESS) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Request EEPROM Access */
+ eec |= IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_GNT)
+ break;
+ usec_delay(5);
+ }
+
+ /* Release if grant not acquired */
+ if (!(eec & IXGBE_EEC_GNT)) {
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+
+ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+ if (status == IXGBE_SUCCESS) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ }
+ return (status);
+}
+
+/*
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ */
+static s32
+ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout;
+ u32 i;
+ u32 swsm;
+
+ /* Set timeout value based on size of EEPROM */
+ timeout = hw->eeprom.word_size + 1;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ msec_delay(1);
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Set the SW EEPROM semaphore bit to request access */
+ swsm |= IXGBE_SWSM_SWESMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (swsm & IXGBE_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ DEBUGOUT("Driver can't access the Eeprom - Semaphore "
+ "not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ */
+static void
+ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_ready_eeprom - Polls for EEPROM ready
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+ u8 spi_stat_reg;
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ ixgbe_standby_eeprom(hw);
+ };
+
+ /*
+ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+ * devices (and only 0-5mSec on 5V devices)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Toggle CS to flush commands */
+ eec |= IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ eec &= ~IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/*
+ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ * @hw: pointer to hardware structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ */
+static void
+ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count)
+{
+ u32 eec;
+ u32 mask;
+ u32 i;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /*
+ * Mask is used to shift "count" bits of "data" out to the EEPROM
+ * one bit at a time. Determine the starting bit based on count
+ */
+ mask = 0x01 << (count - 1);
+
+ for (i = 0; i < count; i++) {
+ /*
+ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock.
+ */
+ if (data & mask)
+ eec |= IXGBE_EEC_DI;
+ else
+ eec &= ~IXGBE_EEC_DI;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ ixgbe_raise_eeprom_clk(hw, &eec);
+ ixgbe_lower_eeprom_clk(hw, &eec);
+
+ /*
+ * Shift mask to signify next bit of data to shift in to the
+ * EEPROM
+ */
+ mask = mask >> 1;
+ };
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eec &= ~IXGBE_EEC_DI;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to hardware structure
+ */
+static u16
+ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+ u32 eec;
+ u32 i;
+ u16 data = 0;
+
+ /*
+ * In order to read a register from the EEPROM, we need to shift
+ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+ * the clock input to the EEPROM (setting the SK bit), and then reading
+ * the value of the "DO" bit. During this "shifting in" process the
+ * "DI" bit should always be clear.
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ ixgbe_raise_eeprom_clk(hw, &eec);
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DI);
+ if (eec & IXGBE_EEC_DO)
+ data |= 1;
+
+ ixgbe_lower_eeprom_clk(hw, &eec);
+ }
+
+ return (data);
+}
+
+/*
+ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC register's current value
+ */
+static void
+ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Raise the clock input to the EEPROM
+ * (setting the SK bit), then delay
+ */
+ *eec = *eec | IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/*
+ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eecd: EECD's current value
+ */
+static void
+ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Lower the clock input to the EEPROM (clearing the SK bit), then
+ * delay
+ */
+ *eec = *eec & ~IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/*
+ * ixgbe_release_eeprom - Release EEPROM, release semaphores
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec |= IXGBE_EEC_CS; /* Pull CS high */
+ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ /* Stop requesting EEPROM access */
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+}
+
+/*
+ * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ */
+static u16
+ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ hw->eeprom.ops.read(hw, i, &pointer);
+
+ /* Make sure the pointer seems valid */
+ if (pointer != 0xFFFF && pointer != 0) {
+ hw->eeprom.ops.read(hw, pointer, &length);
+
+ if (length != 0xFFFF && length != 0) {
+ for (j = pointer+1; j <= pointer+length; j++) {
+ hw->eeprom.ops.read(hw, j, &word);
+ checksum += word;
+ }
+ }
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (checksum);
+}
+
+/*
+ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ */
+s32
+ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == IXGBE_SUCCESS) {
+ checksum = ixgbe_calc_eeprom_checksum(hw);
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ DEBUGOUT("EEPROM read failed\n");
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ */
+s32
+ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == IXGBE_SUCCESS) {
+ checksum = ixgbe_calc_eeprom_checksum(hw);
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ } else {
+ DEBUGOUT("EEPROM read failed\n");
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ */
+s32
+ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ /* Make sure it is not a multicast address */
+ if (IXGBE_IS_MULTICAST(mac_addr)) {
+ DEBUGOUT("MAC address is multicast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+ DEBUGOUT("MAC address is broadcast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ DEBUGOUT("MAC address is all zeros\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ return (status);
+}
+
+/*
+ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ */
+s32
+ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+ /* Make sure we are using a valid rar index range */
+ if (index < rar_entries) {
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ } else {
+ DEBUGOUT("Current RAR index is out of range.");
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_enable_rar - Enable Rx address register
+ * @hw: pointer to hardware structure
+ * @index: index into the RAR table
+ *
+ * Enables the select receive address register.
+ */
+static void
+ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high |= IXGBE_RAH_AV;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/*
+ * ixgbe_disable_rar - Disable Rx address register
+ * @hw: pointer to hardware structure
+ * @index: index into the RAR table
+ *
+ * Disables the select receive address register.
+ */
+static void
+ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= (~IXGBE_RAH_AV);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+}
+
+/*
+ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ */
+s32
+ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ }
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mc_addr_in_rar_count = 0;
+ hw->addr_ctrl.mta_in_use = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ DEBUGOUT(" Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_add_uc_addr - Adds a secondary unicast address.
+ * @hw: pointer to hardware structure
+ * @addr: new address
+ *
+ * Adds it to unused receive address register or goes into promiscuous mode.
+ */
+void
+ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ /*
+ * Place this address in the RAR if there is room,
+ * else put the controller into promiscuous mode
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ rar = hw->addr_ctrl.rar_used_count -
+ hw->addr_ctrl.mc_addr_in_rar_count;
+ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ } else {
+ hw->addr_ctrl.overflow_promisc++;
+ }
+
+ DEBUGOUT("ixgbe_add_uc_addr Complete\n");
+}
+
+/*
+ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new addresses
+ * @addr_count: number of addresses
+ * @next: iterator function to walk the address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ * Drivers using secondary unicast addresses must set user_set_promisc when
+ * manually putting the device into promiscuous mode.
+ */
+s32
+ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr next)
+{
+ u8 *addr;
+ u32 i;
+ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+ u32 uc_addr_in_use;
+ u32 fctrl;
+ u32 vmdq;
+
+ /*
+ * Clear accounting of old secondary address list,
+ * don't count RAR[0]
+ */
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count -
+ hw->addr_ctrl.mc_addr_in_rar_count - 1;
+ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ /* Zero out the other receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use);
+ for (i = 1; i <= uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Add the new addresses */
+ for (i = 0; i < addr_count; i++) {
+ DEBUGOUT(" Adding the secondary addresses:\n");
+ addr = next(hw, &addr_list, &vmdq);
+ ixgbe_add_uc_addr(hw, addr, vmdq);
+ }
+
+ if (hw->addr_ctrl.overflow_promisc) {
+ /* enable promisc if not already in overflow or set by user */
+ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Entering address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ } else {
+ /* only disable if set by overflow, not by user */
+ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Leaving address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= ~IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ }
+
+ DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ */
+static s32
+ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return (vector);
+}
+
+/*
+ * ixgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @hash_value: Multicast address hash value
+ *
+ * Sets the bit-vector in the multicast table.
+ */
+void
+ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = ixgbe_mta_vector(hw, mc_addr);
+ DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+}
+
+/*
+ * ixgbe_add_mc_addr - Adds a multicast address.
+ * @hw: pointer to hardware structure
+ * @mc_addr: new multicast address
+ *
+ * Adds it to unused receive address register or to the multicast table.
+ */
+void
+ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ DEBUGOUT6(" MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+ mc_addr[0], mc_addr[1], mc_addr[2],
+ mc_addr[3], mc_addr[4], mc_addr[5]);
+
+ /*
+ * Place this multicast address in the RAR if there is room,
+ * else put it in the MTA
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ /* use RAR from the end up for multicast */
+ rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
+ hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
+ DEBUGOUT1("Added a multicast address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ hw->addr_ctrl.mc_addr_in_rar_count++;
+ } else {
+ ixgbe_set_mta(hw, mc_addr);
+ }
+
+ DEBUGOUT("ixgbe_add_mc_addr Complete\n");
+}
+
+/*
+ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @next: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ */
+s32
+ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 vmdq;
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
+ hw->addr_ctrl.mc_addr_in_rar_count = 0;
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Zero out the other receive addresses. */
+ DEBUGOUT2("Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
+ rar_entries - 1);
+ for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ DEBUGOUT(" Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ /* Add the new addresses */
+ for (i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+ DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ */
+s32
+ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mc_addr_in_rar_count > 0)
+ for (i = (rar_entries - a->mc_addr_in_rar_count);
+ i < rar_entries; i++)
+ ixgbe_enable_rar(hw, i);
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+ hw->mac.mc_filter_type);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ */
+s32
+ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mc_addr_in_rar_count > 0)
+ for (i = (rar_entries - a->mc_addr_in_rar_count);
+ i < rar_entries; i++)
+ ixgbe_disable_rar(hw, i);
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ */
+s32
+ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ */
+s32
+ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ u32 VftaIndex;
+ u32 BitOffset;
+ u32 VftaReg;
+ u32 VftaByte;
+
+ /* Determine 32-bit word position in array */
+ VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
+ VftaReg &= (~(0x0F << BitOffset));
+ VftaReg |= (vind << BitOffset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
+
+ /* Determine the location of the bit for this VLAN id */
+ BitOffset = vlan & 0x1F; /* lower five bits */
+
+ VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ VftaReg |= (1 << BitOffset);
+ else
+ /* Turn off this VLAN id */
+ VftaReg &= ~(1 << BitOffset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
+ * is returned signifying master requests disabled.
+ */
+s32
+ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+ u32 ctrl;
+ s32 i;
+ s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ ctrl |= IXGBE_CTRL_GIO_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(100);
+ }
+
+ return (status);
+}
+
+
+/*
+ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ */
+s32
+ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ s32 timeout = 200;
+
+ while (timeout) {
+ if (ixgbe_get_eeprom_semaphore(hw))
+ return (-IXGBE_ERR_SWFW_SYNC);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask) or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource, GSSR timeout.\n");
+ return (-IXGBE_ERR_SWFW_SYNC);
+ }
+
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore thought the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ */
+void
+ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+
+ (void) ixgbe_get_eeprom_semaphore(hw);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ gssr &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+}
+
+/*
+ * ixgbe_read_analog_reg8_generic - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ */
+s32
+ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_write_analog_reg8_generic - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ */
+s32
+ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return (IXGBE_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_common.h b/usr/src/uts/common/io/ixgbe/ixgbe_common.h
new file mode 100644
index 0000000000..b7cb80bbe2
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_common.h
@@ -0,0 +1,81 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.79 v2008-03-04 */
+
+#ifndef _IXGBE_COMMON_H
+#define _IXGBE_COMMON_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr func);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
+
+#endif /* _IXGBE_COMMON_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_debug.c b/usr/src/uts/common/io/ixgbe/ixgbe_debug.c
new file mode 100644
index 0000000000..4fad54a08f
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_debug.c
@@ -0,0 +1,422 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+#include "ixgbe_debug.h"
+
+#ifdef IXGBE_DEBUG
+extern ddi_device_acc_attr_t ixgbe_regs_acc_attr;
+
+/*
+ * Dump interrupt-related registers & structures
+ */
+void
+ixgbe_dump_interrupt(void *adapter, char *tag)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)adapter;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_ring_vector_t *vect;
+ uint32_t ivar, reg;
+ int i, j;
+
+ /*
+ * interrupt control registers
+ */
+ ixgbe_log(ixgbe, "interrupt: %s\n", tag);
+ ixgbe_log(ixgbe, "..eims: 0x%x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
+ ixgbe_log(ixgbe, "..eimc: 0x%x\n", IXGBE_READ_REG(hw, IXGBE_EIMC));
+ ixgbe_log(ixgbe, "..eiac: 0x%x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
+ ixgbe_log(ixgbe, "..eiam: 0x%x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
+ ixgbe_log(ixgbe, "..gpie: 0x%x\n", IXGBE_READ_REG(hw, IXGBE_GPIE));
+
+ /* ivar: interrupt vector allocation registers */
+ for (i = 0; i < IXGBE_IVAR_REG_NUM; i++) {
+ if (ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(i))) {
+ ixgbe_log(ixgbe, "ivar[%d]: 0x%x\n", i, ivar);
+ }
+ }
+
+ /* each allocated vector */
+ for (i = 0; i < ixgbe->intr_cnt; i++) {
+ vect = &ixgbe->vect_map[i];
+ ixgbe_log(ixgbe,
+ "vector %d rx rings %d tx rings %d eitr: 0x%x\n",
+ i, vect->rxr_cnt, vect->txr_cnt,
+ IXGBE_READ_REG(hw, IXGBE_EITR(i)));
+
+ /* for each rx ring bit set */
+ j = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
+ while (j >= 0) {
+ ixgbe_log(ixgbe, "rx %d ivar %d rxdctl: 0x%x srrctl: 0x%x\n",
+ j, IXGBE_IVAR_RX_QUEUE(j),
+ IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)),
+ IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)));
+ j = bt_getlowbit(vect->rx_map, (j + 1),
+ (ixgbe->num_rx_rings - 1));
+ }
+
+ /* for each tx ring bit set */
+ j = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
+ while (j >= 0) {
+ ixgbe_log(ixgbe, "tx %d ivar %d txdctl: 0x%x\n",
+ j, IXGBE_IVAR_TX_QUEUE(j),
+ IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)));
+ j = bt_getlowbit(vect->tx_map, (j + 1),
+ (ixgbe->num_tx_rings - 1));
+ }
+ }
+
+ /* reta: RSS redirection table */
+ for (i = 0; i < 32; i++) {
+ ixgbe_log(ixgbe, "reta(%d): 0x%x\n",
+ i, IXGBE_READ_REG(hw, IXGBE_RETA(i)));
+ }
+
+ /* rssrk: RSS random key */
+ for (i = 0; i < 10; i++) {
+ ixgbe_log(ixgbe, "rssrk(%d): 0x%x\n",
+ i, IXGBE_READ_REG(hw, IXGBE_RSSRK(i)));
+ }
+
+ /* check ral/rah */
+ ixgbe_log(ixgbe, "-- ral/rah --\n");
+ for (i = 0; i < 16; i++) {
+ if (reg = IXGBE_READ_REG(hw, IXGBE_RAL(i))) {
+ ixgbe_log(ixgbe, "ral(%d): 0x%x rah(%d): 0x%x\n",
+ i, reg, i, IXGBE_READ_REG(hw, IXGBE_RAH(i)));
+ }
+ }
+
+ /* check mta */
+ ixgbe_log(ixgbe, "-- mta --\n");
+ for (i = 0; i < 128; i++) {
+ if (reg = IXGBE_READ_REG(hw, IXGBE_MTA(i))) {
+ ixgbe_log(ixgbe, "mta(%d): 0x%x\n", i, reg);
+ }
+ }
+
+ /* check vfta */
+ {
+ uint32_t off = IXGBE_VFTA(0);
+ ixgbe_log(ixgbe, "-- vfta --\n");
+ for (i = 0; i < 640; i++) {
+ if (reg = IXGBE_READ_REG(hw, off)) {
+ ixgbe_log(ixgbe, "vfta(0x%x): 0x%x\n", off, reg);
+ }
+ off += 4;
+ }
+ }
+
+ /* check mdef */
+ ixgbe_log(ixgbe, "-- mdef --\n");
+ for (i = 0; i < 8; i++) {
+ if (reg = IXGBE_READ_REG(hw, IXGBE_MDEF(i))) {
+ ixgbe_log(ixgbe, "mdef(%d): 0x%x\n", i, reg);
+ }
+ }
+}
+
+/*
+ * Dump an ethernet address
+ */
+void
+ixgbe_dump_addr(void *adapter, char *tag, const uint8_t *addr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)adapter;
+ char form[25];
+
+ (void) sprintf(form, "%02x:%02x:%02x:%02x:%02x:%02x",
+ *addr, *(addr + 1), *(addr + 2),
+ *(addr + 3), *(addr + 4), *(addr + 5));
+
+ ixgbe_log(ixgbe, "%s %s\n", tag, form);
+}
+
+void
+ixgbe_pci_dump(void *arg)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ ddi_acc_handle_t handle;
+ uint8_t cap_ptr;
+ uint8_t next_ptr;
+ uint32_t msix_bar;
+ uint32_t msix_ctrl;
+ uint32_t msix_tbl_sz;
+ uint32_t tbl_offset;
+ uint32_t tbl_bir;
+ uint32_t pba_offset;
+ uint32_t pba_bir;
+ off_t offset;
+ off_t mem_size;
+ uintptr_t base;
+ ddi_acc_handle_t acc_hdl;
+ int i;
+
+ handle = ixgbe->osdep.cfg_handle;
+
+ ixgbe_log(ixgbe, "Begin dump PCI config space");
+
+ ixgbe_log(ixgbe,
+ "PCI_CONF_VENID:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_VENID));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_DEVID:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_DEVID));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_COMMAND:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_COMM));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_STATUS:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_STAT));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_REVID:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_REVID));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_PROG_CLASS:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_PROGCLASS));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_SUB_CLASS:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_SUBCLASS));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BAS_CLASS:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_BASCLASS));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_CACHE_LINESZ:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_CACHE_LINESZ));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_LATENCY_TIMER:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_LATENCY_TIMER));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_HEADER_TYPE:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_HEADER));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BIST:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_BIST));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE0:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_BASE0));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE1:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_BASE1));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE2:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_BASE2));
+
+ /* MSI-X BAR */
+ msix_bar = pci_config_get32(handle, PCI_CONF_BASE3);
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE3:\t0x%x\n", msix_bar);
+
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE4:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_BASE4));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_BASE5:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_BASE5));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_CIS:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_CIS));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_SUBVENID:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_SUBVENID));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_SUBSYSID:\t0x%x\n",
+ pci_config_get16(handle, PCI_CONF_SUBSYSID));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_ROM:\t0x%x\n",
+ pci_config_get32(handle, PCI_CONF_ROM));
+
+ cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR);
+
+ ixgbe_log(ixgbe,
+ "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr);
+ ixgbe_log(ixgbe,
+ "PCI_CONF_ILINE:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_ILINE));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_IPIN:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_IPIN));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_MIN_G:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_MIN_G));
+ ixgbe_log(ixgbe,
+ "PCI_CONF_MAX_L:\t0x%x\n",
+ pci_config_get8(handle, PCI_CONF_MAX_L));
+
+ /* Power Management */
+ offset = cap_ptr;
+
+ ixgbe_log(ixgbe,
+ "PCI_PM_CAP_ID:\t0x%x\n",
+ pci_config_get8(handle, offset));
+
+ next_ptr = pci_config_get8(handle, offset + 1);
+
+ ixgbe_log(ixgbe,
+ "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr);
+ ixgbe_log(ixgbe,
+ "PCI_PM_CAP:\t0x%x\n",
+ pci_config_get16(handle, offset + PCI_PMCAP));
+ ixgbe_log(ixgbe,
+ "PCI_PM_CSR:\t0x%x\n",
+ pci_config_get16(handle, offset + PCI_PMCSR));
+ ixgbe_log(ixgbe,
+ "PCI_PM_CSR_BSE:\t0x%x\n",
+ pci_config_get8(handle, offset + PCI_PMCSR_BSE));
+ ixgbe_log(ixgbe,
+ "PCI_PM_DATA:\t0x%x\n",
+ pci_config_get8(handle, offset + PCI_PMDATA));
+
+ /* MSI Configuration */
+ offset = next_ptr;
+
+ ixgbe_log(ixgbe,
+ "PCI_MSI_CAP_ID:\t0x%x\n",
+ pci_config_get8(handle, offset));
+
+ next_ptr = pci_config_get8(handle, offset + 1);
+
+ ixgbe_log(ixgbe,
+ "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr);
+ ixgbe_log(ixgbe,
+ "PCI_MSI_CTRL:\t0x%x\n",
+ pci_config_get16(handle, offset + PCI_MSI_CTRL));
+ ixgbe_log(ixgbe,
+ "PCI_MSI_ADDR:\t0x%x\n",
+ pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET));
+ ixgbe_log(ixgbe,
+ "PCI_MSI_ADDR_HI:\t0x%x\n",
+ pci_config_get32(handle, offset + 0x8));
+ ixgbe_log(ixgbe,
+ "PCI_MSI_DATA:\t0x%x\n",
+ pci_config_get16(handle, offset + 0xC));
+
+ /* MSI-X Configuration */
+ offset = next_ptr;
+
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_CAP_ID:\t0x%x\n",
+ pci_config_get8(handle, offset));
+
+ next_ptr = pci_config_get8(handle, offset + 1);
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr);
+
+ msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL);
+ msix_tbl_sz = msix_ctrl & 0x7ff;
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl);
+
+ tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET);
+ tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK;
+ tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK;
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset);
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir);
+
+ pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET);
+ pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK;
+ pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK;
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset);
+ ixgbe_log(ixgbe,
+ "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir);
+
+ /* PCI Express Configuration */
+ offset = next_ptr;
+
+ ixgbe_log(ixgbe,
+ "PCIE_CAP_ID:\t0x%x\n",
+ pci_config_get8(handle, offset + PCIE_CAP_ID));
+
+ next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR);
+
+ ixgbe_log(ixgbe,
+ "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr);
+ ixgbe_log(ixgbe,
+ "PCIE_PCIECAP:\t0x%x\n",
+ pci_config_get16(handle, offset + PCIE_PCIECAP));
+ ixgbe_log(ixgbe,
+ "PCIE_DEVCAP:\t0x%x\n",
+ pci_config_get32(handle, offset + PCIE_DEVCAP));
+ ixgbe_log(ixgbe,
+ "PCIE_DEVCTL:\t0x%x\n",
+ pci_config_get16(handle, offset + PCIE_DEVCTL));
+ ixgbe_log(ixgbe,
+ "PCIE_DEVSTS:\t0x%x\n",
+ pci_config_get16(handle, offset + PCIE_DEVSTS));
+ ixgbe_log(ixgbe,
+ "PCIE_LINKCAP:\t0x%x\n",
+ pci_config_get32(handle, offset + PCIE_LINKCAP));
+ ixgbe_log(ixgbe,
+ "PCIE_LINKCTL:\t0x%x\n",
+ pci_config_get16(handle, offset + PCIE_LINKCTL));
+ ixgbe_log(ixgbe,
+ "PCIE_LINKSTS:\t0x%x\n",
+ pci_config_get16(handle, offset + PCIE_LINKSTS));
+
+ /* MSI-X Memory Space */
+ if (ddi_dev_regsize(ixgbe->dip, 4, &mem_size) != DDI_SUCCESS) {
+ ixgbe_log(ixgbe, "ddi_dev_regsize() failed");
+ return;
+ }
+
+ if ((ddi_regs_map_setup(ixgbe->dip, 4, (caddr_t *)&base, 0, mem_size,
+ &ixgbe_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) {
+ ixgbe_log(ixgbe, "ddi_regs_map_setup() failed");
+ return;
+ }
+
+ ixgbe_log(ixgbe, "MSI-X Memory Space: (mem_size = %d, base = %x)",
+ mem_size, base);
+
+ for (i = 0; i <= msix_tbl_sz; i++) {
+ ixgbe_log(ixgbe, "MSI-X Table Entry(%d):", i);
+ ixgbe_log(ixgbe, "lo_addr:\t%x",
+ ddi_get32(acc_hdl,
+ (uint32_t *)(base + tbl_offset + (i * 16))));
+ ixgbe_log(ixgbe, "up_addr:\t%x",
+ ddi_get32(acc_hdl,
+ (uint32_t *)(base + tbl_offset + (i * 16) + 4)));
+ ixgbe_log(ixgbe, "msg_data:\t%x",
+ ddi_get32(acc_hdl,
+ (uint32_t *)(base + tbl_offset + (i * 16) + 8)));
+ ixgbe_log(ixgbe, "vct_ctrl:\t%x",
+ ddi_get32(acc_hdl,
+ (uint32_t *)(base + tbl_offset + (i * 16) + 12)));
+ }
+
+ ixgbe_log(ixgbe, "MSI-X Pending Bits:\t%x",
+ ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset)));
+
+ ddi_regs_map_free(&acc_hdl);
+}
+#endif
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_debug.h b/usr/src/uts/common/io/ixgbe/ixgbe_debug.h
new file mode 100644
index 0000000000..3c314654c3
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_debug.h
@@ -0,0 +1,88 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#ifndef _IXGBE_DEBUG_H
+#define _IXGBE_DEBUG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef DEBUG
+#define IXGBE_DEBUG
+#endif
+
+#ifdef IXGBE_DEBUG
+
+#define IXGBE_DEBUGLOG_0(adapter, fmt) \
+ ixgbe_log((adapter), (fmt))
+#define IXGBE_DEBUGLOG_1(adapter, fmt, d1) \
+ ixgbe_log((adapter), (fmt), (d1))
+#define IXGBE_DEBUGLOG_2(adapter, fmt, d1, d2) \
+ ixgbe_log((adapter), (fmt), (d1), (d2))
+#define IXGBE_DEBUGLOG_3(adapter, fmt, d1, d2, d3) \
+ ixgbe_log((adapter), (fmt), (d1), (d2), (d3))
+#define IXGBE_DEBUGLOG_6(adapter, fmt, d1, d2, d3, d4, d5, d6) \
+ ixgbe_log((adapter), (fmt), (d1), (d2), (d3), (d4), (d5), (d6))
+
+#define IXGBE_DEBUG_STAT_COND(val, cond) if (cond) (val)++;
+#define IXGBE_DEBUG_STAT(val) (val)++;
+
+#else
+
+#define IXGBE_DEBUGLOG_0(adapter, fmt)
+#define IXGBE_DEBUGLOG_1(adapter, fmt, d1)
+#define IXGBE_DEBUGLOG_2(adapter, fmt, d1, d2)
+#define IXGBE_DEBUGLOG_3(adapter, fmt, d1, d2, d3)
+#define IXGBE_DEBUGLOG_6(adapter, fmt, d1, d2, d3, d4, d5, d6)
+
+#define IXGBE_DEBUG_STAT_COND(val, cond)
+#define IXGBE_DEBUG_STAT(val)
+
+#endif /* IXGBE_DEBUG */
+
+#define IXGBE_STAT(val) (val)++;
+
+#ifdef IXGBE_DEBUG
+
+void ixgbe_pci_dump(void *);
+void ixgbe_dump_interrupt(void *, char *);
+void ixgbe_dump_addr(void *, char *, const uint8_t *);
+
+#endif /* IXGBE_DEBUG */
+
+extern void ixgbe_log(void *, const char *, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IXGBE_DEBUG_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_gld.c b/usr/src/uts/common/io/ixgbe/ixgbe_gld.c
new file mode 100644
index 0000000000..a7c62c5ebe
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_gld.c
@@ -0,0 +1,742 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+/*
+ * Retrieve a value for one of the statistics.
+ */
+int
+ixgbe_m_stat(void *arg, uint_t stat, uint64_t *val)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_stat_t *ixgbe_ks;
+ int i;
+
+ ixgbe_ks = (ixgbe_stat_t *)ixgbe->ixgbe_ks->ks_data;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ switch (stat) {
+ case MAC_STAT_IFSPEED:
+ *val = ixgbe->link_speed * 1000000ull;
+ break;
+
+ case MAC_STAT_MULTIRCV:
+ ixgbe_ks->mprc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_MPRC);
+ *val = ixgbe_ks->mprc.value.ui64;
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ ixgbe_ks->bprc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_BPRC);
+ *val = ixgbe_ks->bprc.value.ui64;
+ break;
+
+ case MAC_STAT_MULTIXMT:
+ ixgbe_ks->mptc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_MPTC);
+ *val = ixgbe_ks->mptc.value.ui64;
+ break;
+
+ case MAC_STAT_BRDCSTXMT:
+ ixgbe_ks->bptc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_BPTC);
+ *val = ixgbe_ks->bptc.value.ui64;
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ for (i = 0; i < 8; i++) {
+ ixgbe_ks->rnbc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ }
+ *val = ixgbe_ks->rnbc.value.ui64;
+ break;
+
+ case MAC_STAT_IERRORS:
+ ixgbe_ks->crcerrs.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ ixgbe_ks->illerrc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ ixgbe_ks->errbc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ ixgbe_ks->rlec.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ *val = ixgbe_ks->crcerrs.value.ui64 +
+ ixgbe_ks->illerrc.value.ui64 +
+ ixgbe_ks->errbc.value.ui64 +
+ ixgbe_ks->rlec.value.ui64;
+ break;
+
+ case MAC_STAT_RBYTES:
+ for (i = 0; i < 16; i++)
+ ixgbe_ks->tor.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ *val = ixgbe_ks->tor.value.ui64;
+ break;
+
+ case MAC_STAT_IPACKETS:
+ ixgbe_ks->tpr.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_TPR);
+ *val = ixgbe_ks->tpr.value.ui64;
+ break;
+
+ case MAC_STAT_OPACKETS:
+ ixgbe_ks->tpt.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_TPT);
+ *val = ixgbe_ks->tpt.value.ui64;
+ break;
+
+ /* RFC 1643 stats */
+ case ETHER_STAT_FCS_ERRORS:
+ ixgbe_ks->crcerrs.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ *val = ixgbe_ks->crcerrs.value.ui64;
+ break;
+
+ case ETHER_STAT_TOOLONG_ERRORS:
+ ixgbe_ks->roc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_ROC);
+ *val = ixgbe_ks->roc.value.ui64;
+ break;
+
+ case ETHER_STAT_MACRCV_ERRORS:
+ ixgbe_ks->crcerrs.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ ixgbe_ks->illerrc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ ixgbe_ks->errbc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ ixgbe_ks->rlec.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ *val = ixgbe_ks->crcerrs.value.ui64 +
+ ixgbe_ks->illerrc.value.ui64 +
+ ixgbe_ks->errbc.value.ui64 +
+ ixgbe_ks->rlec.value.ui64;
+ break;
+
+ /* MII/GMII stats */
+ case ETHER_STAT_XCVR_ADDR:
+ /* The Internal PHY's MDI address for each MAC is 1 */
+ *val = 1;
+ break;
+
+ case ETHER_STAT_XCVR_ID:
+ *val = hw->phy.id;
+ break;
+
+ case ETHER_STAT_XCVR_INUSE:
+ switch (ixgbe->link_speed) {
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ *val =
+ (hw->phy.media_type == ixgbe_media_type_copper) ?
+ XCVR_1000T : XCVR_1000X;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ *val = (hw->phy.media_type == ixgbe_media_type_copper) ?
+ XCVR_100T2 : XCVR_100X;
+ break;
+ default:
+ *val = XCVR_NONE;
+ break;
+ }
+ break;
+
+ case ETHER_STAT_CAP_1000FDX:
+ *val = ixgbe->param_1000fdx_cap;
+ break;
+
+ case ETHER_STAT_CAP_100FDX:
+ *val = ixgbe->param_100fdx_cap;
+ break;
+
+ case ETHER_STAT_CAP_ASMPAUSE:
+ *val = ixgbe->param_asym_pause_cap;
+ break;
+
+ case ETHER_STAT_CAP_PAUSE:
+ *val = ixgbe->param_pause_cap;
+ break;
+
+ case ETHER_STAT_CAP_AUTONEG:
+ *val = ixgbe->param_autoneg_cap;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ *val = ixgbe->param_adv_1000fdx_cap;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100FDX:
+ *val = ixgbe->param_adv_100fdx_cap;
+ break;
+
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ *val = ixgbe->param_adv_asym_pause_cap;
+ break;
+
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ *val = ixgbe->param_adv_pause_cap;
+ break;
+
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ *val = hw->mac.autoneg;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000FDX:
+ *val = ixgbe->param_lp_1000fdx_cap;
+ break;
+
+ case ETHER_STAT_LP_CAP_100FDX:
+ *val = ixgbe->param_lp_100fdx_cap;
+ break;
+
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ *val = ixgbe->param_lp_asym_pause_cap;
+ break;
+
+ case ETHER_STAT_LP_CAP_PAUSE:
+ *val = ixgbe->param_lp_pause_cap;
+ break;
+
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ *val = ixgbe->param_lp_autoneg_cap;
+ break;
+
+ case ETHER_STAT_LINK_ASMPAUSE:
+ *val = ixgbe->param_asym_pause_cap;
+ break;
+
+ case ETHER_STAT_LINK_PAUSE:
+ *val = ixgbe->param_pause_cap;
+ break;
+
+ case ETHER_STAT_LINK_AUTONEG:
+ *val = hw->mac.autoneg;
+ break;
+ case ETHER_STAT_LINK_DUPLEX:
+ *val = LINK_DUPLEX_FULL;
+ break;
+
+ case ETHER_STAT_TOOSHORT_ERRORS:
+ ixgbe_ks->ruc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_RUC);
+ *val = ixgbe_ks->ruc.value.ui64;
+ break;
+
+ case ETHER_STAT_CAP_REMFAULT:
+ *val = ixgbe->param_rem_fault;
+ break;
+
+ case ETHER_STAT_ADV_REMFAULT:
+ *val = ixgbe->param_adv_rem_fault;
+ break;
+
+ case ETHER_STAT_LP_REMFAULT:
+ *val = ixgbe->param_lp_rem_fault;
+ break;
+
+ case ETHER_STAT_JABBER_ERRORS:
+ ixgbe_ks->rjc.value.ui64 +=
+ IXGBE_READ_REG(hw, IXGBE_RJC);
+ *val = ixgbe_ks->rjc.value.ui64;
+ break;
+
+ default:
+ mutex_exit(&ixgbe->gen_lock);
+ return (ENOTSUP);
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK)
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_UNAFFECTED);
+
+ return (0);
+}
+
+/*
+ * Bring the device out of the reset/quiesced state that it
+ * was in when the interface was registered.
+ */
+int
+ixgbe_m_start(void *arg)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EIO);
+ }
+
+ ixgbe->ixgbe_state |= IXGBE_STARTED;
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ /*
+ * Enable and start the watchdog timer
+ */
+ ixgbe_enable_watchdog_timer(ixgbe);
+
+ return (0);
+}
+
+/*
+ * Stop the device and put it in a reset/quiesced state such
+ * that the interface can be unregistered.
+ */
+void
+ixgbe_m_stop(void *arg)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return;
+ }
+
+ ixgbe->ixgbe_state &= ~IXGBE_STARTED;
+
+ ixgbe_stop(ixgbe);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ /*
+ * Disable and stop the watchdog timer
+ */
+ ixgbe_disable_watchdog_timer(ixgbe);
+}
+
+/*
+ * Set the promiscuity of the device.
+ */
+int
+ixgbe_m_promisc(void *arg, boolean_t on)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ uint32_t reg_val;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+ reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ if (on)
+ reg_val |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ else
+ reg_val &= (~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE));
+
+ IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_FCTRL, reg_val);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (0);
+}
+
+/*
+ * Add/remove the addresses to/from the set of multicast
+ * addresses for which the device will receive packets.
+ */
+int
+ixgbe_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ int result;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ result = (add) ? ixgbe_multicst_add(ixgbe, mcst_addr)
+ : ixgbe_multicst_remove(ixgbe, mcst_addr);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (result);
+}
+
+/*
+ * Set a new device unicast address.
+ */
+int
+ixgbe_m_unicst(void *arg, const uint8_t *mac_addr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ int result;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ /*
+ * Store the new MAC address.
+ */
+ bcopy(mac_addr, ixgbe->hw.mac.addr, ETHERADDRL);
+
+ /*
+ * Set MAC address in address slot 0, which is the default address.
+ */
+ result = ixgbe_unicst_set(ixgbe, mac_addr, 0);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (result);
+}
+
+/*
+ * Pass on M_IOCTL messages passed to the DLD, and support
+ * private IOCTLs for debugging and ndd.
+ */
+void
+ixgbe_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ struct iocblk *iocp;
+ enum ioc_reply status;
+
+ iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
+ iocp->ioc_error = 0;
+
+ switch (iocp->ioc_cmd) {
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ case LB_SET_MODE:
+ status = ixgbe_loopback_ioctl(ixgbe, iocp, mp);
+ break;
+
+ case ND_GET:
+ case ND_SET:
+ status = ixgbe_nd_ioctl(ixgbe, q, mp, iocp);
+ break;
+
+ default:
+ status = IOC_INVAL;
+ break;
+ }
+
+ /*
+ * Decide how to reply
+ */
+ switch (status) {
+ default:
+ case IOC_INVAL:
+ /*
+ * Error, reply with a NAK and EINVAL or the specified error
+ */
+ miocnak(q, mp, 0, iocp->ioc_error == 0 ?
+ EINVAL : iocp->ioc_error);
+ break;
+
+ case IOC_DONE:
+ /*
+ * OK, reply already sent
+ */
+ break;
+
+ case IOC_ACK:
+ /*
+ * OK, reply with an ACK
+ */
+ miocack(q, mp, 0, 0);
+ break;
+
+ case IOC_REPLY:
+ /*
+ * OK, send prepared reply as ACK or NAK
+ */
+ mp->b_datap->db_type = iocp->ioc_error == 0 ?
+ M_IOCACK : M_IOCNAK;
+ qreply(q, mp);
+ break;
+ }
+}
+
+
+/*
+ * Find an unused address slot, set the address to it, reserve
+ * this slot and enable the device to start filtering on the
+ * new address.
+ */
+int
+ixgbe_m_unicst_add(void *arg, mac_multi_addr_t *maddr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ mac_addr_slot_t slot;
+ int err;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ if (mac_unicst_verify(ixgbe->mac_hdl,
+ maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EINVAL);
+ }
+
+ if (ixgbe->unicst_avail == 0) {
+ /* no slots available */
+ mutex_exit(&ixgbe->gen_lock);
+ return (ENOSPC);
+ }
+
+ /*
+ * Primary/default address is in slot 0. The next addresses
+ * are the multiple MAC addresses. So multiple MAC address 0
+ * is in slot 1, 1 in slot 2, and so on. So the first multiple
+ * MAC address resides in slot 1.
+ */
+ for (slot = 1; slot < ixgbe->unicst_total; slot++) {
+ if (ixgbe->unicst_addr[slot].mac.set == 0)
+ break;
+ }
+
+ ASSERT((slot > 0) && (slot < ixgbe->unicst_total));
+
+ maddr->mma_slot = slot;
+
+ if ((err = ixgbe_unicst_set(ixgbe, maddr->mma_addr, slot)) == 0) {
+ ixgbe->unicst_addr[slot].mac.set = 1;
+ ixgbe->unicst_avail--;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (err);
+}
+
+/*
+ * Removes a MAC address that was added before.
+ */
+int
+ixgbe_m_unicst_remove(void *arg, mac_addr_slot_t slot)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ int err;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ if ((slot <= 0) || (slot >= ixgbe->unicst_total)) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EINVAL);
+ }
+
+ if (ixgbe->unicst_addr[slot].mac.set == 1) {
+ /*
+ * Copy the default address to the passed slot
+ */
+ if ((err = ixgbe_unicst_set(ixgbe,
+ ixgbe->unicst_addr[0].mac.addr, slot)) == 0) {
+ ixgbe->unicst_addr[slot].mac.set = 0;
+ ixgbe->unicst_avail++;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (err);
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (EINVAL);
+}
+
+/*
+ * Modifies the value of an address that has been added before.
+ * The new address length and the slot number that was returned
+ * in the call to add should be passed in. mma_flags should be
+ * set to 0.
+ * Returns 0 on success.
+ */
+int
+ixgbe_m_unicst_modify(void *arg, mac_multi_addr_t *maddr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ mac_addr_slot_t slot;
+ int err;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ if (mac_unicst_verify(ixgbe->mac_hdl,
+ maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EINVAL);
+ }
+
+ slot = maddr->mma_slot;
+
+ if ((slot <= 0) || (slot >= ixgbe->unicst_total)) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EINVAL);
+ }
+
+ if (ixgbe->unicst_addr[slot].mac.set == 1) {
+ err = ixgbe_unicst_set(ixgbe, maddr->mma_addr, slot);
+ mutex_exit(&ixgbe->gen_lock);
+ return (err);
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (EINVAL);
+}
+
+/*
+ * Get the MAC address and all other information related to
+ * the address slot passed in mac_multi_addr_t.
+ * mma_flags should be set to 0 in the call.
+ * On return, mma_flags can take the following values:
+ * 1) MMAC_SLOT_UNUSED
+ * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR
+ * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR
+ * 4) MMAC_SLOT_USED
+ */
+int
+ixgbe_m_unicst_get(void *arg, mac_multi_addr_t *maddr)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ mac_addr_slot_t slot;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (ECANCELED);
+ }
+
+ slot = maddr->mma_slot;
+
+ if ((slot <= 0) || (slot >= ixgbe->unicst_total)) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (EINVAL);
+ }
+ if (ixgbe->unicst_addr[slot].mac.set == 1) {
+ bcopy(ixgbe->unicst_addr[slot].mac.addr,
+ maddr->mma_addr, ETHERADDRL);
+ maddr->mma_flags = MMAC_SLOT_USED;
+ } else {
+ maddr->mma_flags = MMAC_SLOT_UNUSED;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (0);
+}
+
+/*
+ * Obtain the MAC's capabilities and associated data from
+ * the driver.
+ */
+boolean_t
+ixgbe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+
+ switch (cap) {
+ case MAC_CAPAB_HCKSUM: {
+ uint32_t *tx_hcksum_flags = cap_data;
+
+ /*
+ * We advertise our capabilities only if tx hcksum offload is
+ * enabled. On receive, the stack will accept checksummed
+ * packets anyway, even if we haven't said we can deliver
+ * them.
+ */
+ if (!ixgbe->tx_hcksum_enable)
+ return (B_FALSE);
+
+ *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
+ break;
+ }
+ case MAC_CAPAB_MULTIADDRESS: {
+ multiaddress_capab_t *mmacp = cap_data;
+
+ /*
+ * The number of MAC addresses made available by
+ * this capability is one less than the total as
+ * the primary address in slot 0 is counted in
+ * the total.
+ */
+ mmacp->maddr_naddr = ixgbe->unicst_total - 1;
+ mmacp->maddr_naddrfree = ixgbe->unicst_avail;
+ /* No multiple factory addresses, set mma_flag to 0 */
+ mmacp->maddr_flag = 0;
+ mmacp->maddr_handle = ixgbe;
+ mmacp->maddr_add = ixgbe_m_unicst_add;
+ mmacp->maddr_remove = ixgbe_m_unicst_remove;
+ mmacp->maddr_modify = ixgbe_m_unicst_modify;
+ mmacp->maddr_get = ixgbe_m_unicst_get;
+ mmacp->maddr_reserve = NULL;
+ break;
+ }
+ default:
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_log.c b/usr/src/uts/common/io/ixgbe/ixgbe_log.c
new file mode 100644
index 0000000000..312c082c16
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_log.c
@@ -0,0 +1,96 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+#define LOG_BUF_LEN 128
+
+/*
+ * ixgbe_notice - Report a run-time event (CE_NOTE, to console & log)
+ */
+void
+ixgbe_notice(void *arg, const char *fmt, ...)
+{
+ ixgbe_t *ixgbep = (ixgbe_t *)arg;
+ char buf[LOG_BUF_LEN];
+ va_list ap;
+
+ va_start(ap, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, ap);
+ va_end(ap);
+
+ if (ixgbep != NULL)
+ cmn_err(CE_NOTE, "%s%d: %s", MODULE_NAME, ixgbep->instance,
+ buf);
+ else
+ cmn_err(CE_NOTE, "%s: %s", MODULE_NAME, buf);
+}
+
+/*
+ * ixgbe_log - Log a run-time event (CE_NOTE, to log only)
+ */
+void
+ixgbe_log(void *arg, const char *fmt, ...)
+{
+ ixgbe_t *ixgbep = (ixgbe_t *)arg;
+ char buf[LOG_BUF_LEN];
+ va_list ap;
+
+ va_start(ap, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, ap);
+ va_end(ap);
+
+ if (ixgbep != NULL)
+ cmn_err(CE_NOTE, "!%s%d: %s", MODULE_NAME, ixgbep->instance,
+ buf);
+ else
+ cmn_err(CE_NOTE, "!%s: %s", MODULE_NAME, buf);
+}
+
+/*
+ * ixgbe_error - Log a run-time problem (CE_WARN, to log only)
+ */
+void
+ixgbe_error(void *arg, const char *fmt, ...)
+{
+ ixgbe_t *ixgbep = (ixgbe_t *)arg;
+ char buf[LOG_BUF_LEN];
+ va_list ap;
+
+ va_start(ap, fmt);
+ (void) vsnprintf(buf, sizeof (buf), fmt, ap);
+ va_end(ap);
+
+ if (ixgbep != NULL)
+ cmn_err(CE_WARN, "!%s%d: %s", MODULE_NAME, ixgbep->instance,
+ buf);
+ else
+ cmn_err(CE_WARN, "!%s: %s", MODULE_NAME, buf);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_main.c b/usr/src/uts/common/io/ixgbe/ixgbe_main.c
new file mode 100644
index 0000000000..5e41d2a465
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_main.c
@@ -0,0 +1,3984 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+static char ident[] = "Intel 10Gb Ethernet 1.0.0";
+
+/*
+ * Local function protoypes
+ */
+static int ixgbe_register_mac(ixgbe_t *);
+static int ixgbe_identify_hardware(ixgbe_t *);
+static int ixgbe_regs_map(ixgbe_t *);
+static void ixgbe_init_properties(ixgbe_t *);
+static int ixgbe_init_driver_settings(ixgbe_t *);
+static void ixgbe_init_locks(ixgbe_t *);
+static void ixgbe_destroy_locks(ixgbe_t *);
+static int ixgbe_init(ixgbe_t *);
+static int ixgbe_chip_start(ixgbe_t *);
+static void ixgbe_chip_stop(ixgbe_t *);
+static int ixgbe_reset(ixgbe_t *);
+static void ixgbe_tx_clean(ixgbe_t *);
+static boolean_t ixgbe_tx_drain(ixgbe_t *);
+static boolean_t ixgbe_rx_drain(ixgbe_t *);
+static int ixgbe_alloc_rings(ixgbe_t *);
+static int ixgbe_init_rings(ixgbe_t *);
+static void ixgbe_free_rings(ixgbe_t *);
+static void ixgbe_fini_rings(ixgbe_t *);
+static void ixgbe_setup_rings(ixgbe_t *);
+static void ixgbe_setup_rx(ixgbe_t *);
+static void ixgbe_setup_tx(ixgbe_t *);
+static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
+static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
+static void ixgbe_setup_rss(ixgbe_t *);
+static void ixgbe_init_unicst(ixgbe_t *);
+static void ixgbe_setup_multicst(ixgbe_t *);
+static void ixgbe_get_hw_state(ixgbe_t *);
+static void ixgbe_get_conf(ixgbe_t *);
+static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
+static boolean_t ixgbe_driver_link_check(ixgbe_t *);
+static void ixgbe_local_timer(void *);
+static void ixgbe_arm_watchdog_timer(ixgbe_t *);
+static void ixgbe_start_watchdog_timer(ixgbe_t *);
+static void ixgbe_restart_watchdog_timer(ixgbe_t *);
+static void ixgbe_stop_watchdog_timer(ixgbe_t *);
+static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
+static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
+static boolean_t is_valid_mac_addr(uint8_t *);
+static boolean_t ixgbe_stall_check(ixgbe_t *);
+static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
+static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
+static boolean_t ixgbe_find_mac_address(ixgbe_t *);
+static int ixgbe_alloc_intrs(ixgbe_t *);
+static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
+static int ixgbe_add_intr_handlers(ixgbe_t *);
+static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
+static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
+static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t);
+static int ixgbe_map_rings_to_vectors(ixgbe_t *);
+static void ixgbe_setup_adapter_vector(ixgbe_t *);
+static void ixgbe_rem_intr_handlers(ixgbe_t *);
+static void ixgbe_rem_intrs(ixgbe_t *);
+static int ixgbe_enable_intrs(ixgbe_t *);
+static int ixgbe_disable_intrs(ixgbe_t *);
+static uint_t ixgbe_intr_legacy(void *, void *);
+static uint_t ixgbe_intr_msi(void *, void *);
+static uint_t ixgbe_intr_rx(void *, void *);
+static uint_t ixgbe_intr_tx_other(void *, void *);
+static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
+static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
+static void ixgbe_intr_other_work(ixgbe_t *);
+static void ixgbe_get_driver_control(struct ixgbe_hw *);
+static void ixgbe_release_driver_control(struct ixgbe_hw *);
+
+static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
+static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
+static int ixgbe_resume(dev_info_t *);
+static int ixgbe_suspend(dev_info_t *);
+static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
+static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
+
+static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
+ const void *impl_data);
+static void ixgbe_fm_init(ixgbe_t *);
+static void ixgbe_fm_fini(ixgbe_t *);
+
+static struct cb_ops ixgbe_cb_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* cb_stream */
+ D_MP | D_HOTPLUG, /* cb_flag */
+ CB_REV, /* cb_rev */
+ nodev, /* cb_aread */
+ nodev /* cb_awrite */
+};
+
+static struct dev_ops ixgbe_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ NULL, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ ixgbe_attach, /* devo_attach */
+ ixgbe_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &ixgbe_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ ddi_power /* devo_power */
+};
+
+static struct modldrv ixgbe_modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ ident, /* Discription string */
+ &ixgbe_dev_ops /* driver ops */
+};
+
+static struct modlinkage ixgbe_modlinkage = {
+ MODREV_1, &ixgbe_modldrv, NULL
+};
+
+/*
+ * Access attributes for register mapping
+ */
+ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+
+/*
+ * Loopback property
+ */
+static lb_property_t lb_normal = {
+ normal, "normal", IXGBE_LB_NONE
+};
+
+static lb_property_t lb_mac = {
+ internal, "MAC", IXGBE_LB_INTERNAL_MAC
+};
+
+#define IXGBE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
+
+static mac_callbacks_t ixgbe_m_callbacks = {
+ IXGBE_M_CALLBACK_FLAGS,
+ ixgbe_m_stat,
+ ixgbe_m_start,
+ ixgbe_m_stop,
+ ixgbe_m_promisc,
+ ixgbe_m_multicst,
+ ixgbe_m_unicst,
+ ixgbe_m_tx,
+ NULL,
+ ixgbe_m_ioctl,
+ ixgbe_m_getcapab
+};
+
+/*
+ * Module Initialization Functions.
+ */
+
+int
+_init(void)
+{
+ int status;
+
+ mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
+
+ status = mod_install(&ixgbe_modlinkage);
+
+ if (status != DDI_SUCCESS) {
+ mac_fini_ops(&ixgbe_dev_ops);
+ }
+
+ return (status);
+}
+
+int
+_fini(void)
+{
+ int status;
+
+ status = mod_remove(&ixgbe_modlinkage);
+
+ if (status == DDI_SUCCESS) {
+ mac_fini_ops(&ixgbe_dev_ops);
+ }
+
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ int status;
+
+ status = mod_info(&ixgbe_modlinkage, modinfop);
+
+ return (status);
+}
+
+/*
+ * ixgbe_attach - Driver attach.
+ *
+ * This function is the device specific initialization entry
+ * point. This entry point is required and must be written.
+ * The DDI_ATTACH command must be provided in the attach entry
+ * point. When attach() is called with cmd set to DDI_ATTACH,
+ * all normal kernel services (such as kmem_alloc(9F)) are
+ * available for use by the driver.
+ *
+ * The attach() function will be called once for each instance
+ * of the device on the system with cmd set to DDI_ATTACH.
+ * Until attach() succeeds, the only driver entry points which
+ * may be called are open(9E) and getinfo(9E).
+ */
+static int
+ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
+{
+ ixgbe_t *ixgbe;
+ struct ixgbe_osdep *osdep;
+ struct ixgbe_hw *hw;
+ int instance;
+
+ /*
+ * Check the command and perform corresponding operations
+ */
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_RESUME:
+ return (ixgbe_resume(devinfo));
+
+ case DDI_ATTACH:
+ break;
+ }
+
+ /* Get the device instance */
+ instance = ddi_get_instance(devinfo);
+
+ /* Allocate memory for the instance data structure */
+ ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
+
+ ixgbe->dip = devinfo;
+ ixgbe->instance = instance;
+
+ hw = &ixgbe->hw;
+ osdep = &ixgbe->osdep;
+ hw->back = osdep;
+ osdep->ixgbe = ixgbe;
+
+ /* Attach the instance pointer to the dev_info data structure */
+ ddi_set_driver_private(devinfo, ixgbe);
+
+ /*
+ * Initialize for fma support
+ */
+ ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, "PROP_FM_CAPABLE",
+ 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
+ DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+ ixgbe_fm_init(ixgbe);
+ ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
+
+ /*
+ * Map PCI config space registers
+ */
+ if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to map PCI configurations");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
+
+ /*
+ * Identify the chipset family
+ */
+ if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to identify hardware");
+ goto attach_fail;
+ }
+
+ /*
+ * Map device registers
+ */
+ if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to map device registers");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
+
+ /*
+ * Initialize driver parameters
+ */
+ ixgbe_init_properties(ixgbe);
+ ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
+
+ /*
+ * Allocate interrupts
+ */
+ if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to allocate interrupts");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
+
+ /*
+ * Allocate rx/tx rings based on the ring numbers.
+ * The actual numbers of rx/tx rings are decided by the number of
+ * allocated interrupt vectors, so we should allocate the rings after
+ * interrupts are allocated.
+ */
+ if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
+
+ /*
+ * Map rings to interrupt vectors
+ */
+ if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to map rings to vectors");
+ goto attach_fail;
+ }
+
+ /*
+ * Add interrupt handlers
+ */
+ if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to add interrupt handlers");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
+
+ /*
+ * Initialize driver parameters
+ */
+ if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize driver settings");
+ goto attach_fail;
+ }
+
+ /*
+ * Initialize mutexes for this device.
+ * Do this before enabling the interrupt handler and
+ * register the softint to avoid the condition where
+ * interrupt handler can try using uninitialized mutex.
+ */
+ ixgbe_init_locks(ixgbe);
+ ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
+
+ /*
+ * Initialize chipset hardware
+ */
+ if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize adapter");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
+ goto attach_fail;
+ }
+
+ /*
+ * Initialize DMA and hardware settings for rx/tx rings
+ */
+ if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize rings");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
+
+ /*
+ * Initialize statistics
+ */
+ if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize statistics");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
+
+ /*
+ * Initialize NDD parameters
+ */
+ if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize ndd");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
+
+ /*
+ * Register the driver to the MAC
+ */
+ if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to register MAC");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
+
+ /*
+ * Now that mutex locks are initialized, and the chip is also
+ * initialized, enable interrupts.
+ */
+ if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
+ goto attach_fail;
+ }
+ ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
+
+ ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
+
+ return (DDI_SUCCESS);
+
+attach_fail:
+ ixgbe_unconfigure(devinfo, ixgbe);
+ return (DDI_FAILURE);
+}
+
+/*
+ * ixgbe_detach - Driver detach.
+ *
+ * The detach() function is the complement of the attach routine.
+ * If cmd is set to DDI_DETACH, detach() is used to remove the
+ * state associated with a given instance of a device node
+ * prior to the removal of that instance from the system.
+ *
+ * The detach() function will be called once for each instance
+ * of the device for which there has been a successful attach()
+ * once there are no longer any opens on the device.
+ *
+ * Interrupts routine are disabled, All memory allocated by this
+ * driver are freed.
+ */
+static int
+ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
+{
+ ixgbe_t *ixgbe;
+
+ /*
+ * Check detach command
+ */
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_SUSPEND:
+ return (ixgbe_suspend(devinfo));
+
+ case DDI_DETACH:
+ break;
+ }
+
+
+ /*
+ * Get the pointer to the driver private data structure
+ */
+ ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
+ if (ixgbe == NULL)
+ return (DDI_FAILURE);
+
+ /*
+ * Unregister MAC. If failed, we have to fail the detach
+ */
+ if (mac_unregister(ixgbe->mac_hdl) != 0) {
+ ixgbe_error(ixgbe, "Failed to unregister MAC");
+ return (DDI_FAILURE);
+ }
+ ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
+
+ /*
+ * If the device is still running, it needs to be stopped first.
+ * This check is necessary because under some specific circumstances,
+ * the detach routine can be called without stopping the interface
+ * first.
+ */
+ mutex_enter(&ixgbe->gen_lock);
+ if (ixgbe->ixgbe_state & IXGBE_STARTED) {
+ ixgbe->ixgbe_state &= ~IXGBE_STARTED;
+ ixgbe_stop(ixgbe);
+ mutex_exit(&ixgbe->gen_lock);
+ /* Disable and stop the watchdog timer */
+ ixgbe_disable_watchdog_timer(ixgbe);
+ } else
+ mutex_exit(&ixgbe->gen_lock);
+
+ /*
+ * Check if there are still rx buffers held by the upper layer.
+ * If so, fail the detach.
+ */
+ if (!ixgbe_rx_drain(ixgbe))
+ return (DDI_FAILURE);
+
+ /*
+ * Do the remaining unconfigure routines
+ */
+ ixgbe_unconfigure(devinfo, ixgbe);
+
+ return (DDI_SUCCESS);
+}
+
+static void
+ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
+{
+ /*
+ * Disable interrupt
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
+ (void) ixgbe_disable_intrs(ixgbe);
+ }
+
+ /*
+ * Unregister MAC
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
+ (void) mac_unregister(ixgbe->mac_hdl);
+ }
+
+ /*
+ * Free ndd parameters
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
+ ixgbe_nd_cleanup(ixgbe);
+ }
+
+ /*
+ * Free statistics
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
+ kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
+ }
+
+ /*
+ * Remove interrupt handlers
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
+ ixgbe_rem_intr_handlers(ixgbe);
+ }
+
+ /*
+ * Remove interrupts
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
+ ixgbe_rem_intrs(ixgbe);
+ }
+
+ /*
+ * Remove driver properties
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
+ (void) ddi_prop_remove_all(devinfo);
+ }
+
+ /*
+ * Release the DMA resources of rx/tx rings
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
+ ixgbe_fini_rings(ixgbe);
+ }
+
+ /*
+ * Stop the chipset
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
+ mutex_enter(&ixgbe->gen_lock);
+ ixgbe_chip_stop(ixgbe);
+ mutex_exit(&ixgbe->gen_lock);
+ }
+
+ /*
+ * Free register handle
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
+ if (ixgbe->osdep.reg_handle != NULL)
+ ddi_regs_map_free(&ixgbe->osdep.reg_handle);
+ }
+
+ /*
+ * Free PCI config handle
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
+ if (ixgbe->osdep.cfg_handle != NULL)
+ pci_config_teardown(&ixgbe->osdep.cfg_handle);
+ }
+
+ /*
+ * Free locks
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
+ ixgbe_destroy_locks(ixgbe);
+ }
+
+ /*
+ * Free the rx/tx rings
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
+ ixgbe_free_rings(ixgbe);
+ }
+
+ /*
+ * Unregister FMA capabilities
+ */
+ if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
+ ixgbe_fm_fini(ixgbe);
+ }
+
+ /*
+ * Free the driver data structure
+ */
+ kmem_free(ixgbe, sizeof (ixgbe_t));
+
+ ddi_set_driver_private(devinfo, NULL);
+}
+
+/*
+ * ixgbe_register_mac - Register the driver and its function pointers with
+ * the GLD interface.
+ */
+static int
+ixgbe_register_mac(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ mac_register_t *mac;
+ int status;
+
+ if ((mac = mac_alloc(MAC_VERSION)) == NULL)
+ return (IXGBE_FAILURE);
+
+ mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ mac->m_driver = ixgbe;
+ mac->m_dip = ixgbe->dip;
+ mac->m_src_addr = hw->mac.addr;
+ mac->m_callbacks = &ixgbe_m_callbacks;
+ mac->m_min_sdu = 0;
+ mac->m_max_sdu = ixgbe->default_mtu;
+ mac->m_margin = VLAN_TAGSZ;
+
+ status = mac_register(mac, &ixgbe->mac_hdl);
+
+ mac_free(mac);
+
+ return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_identify_hardware - Identify the type of the chipset.
+ */
+static int
+ixgbe_identify_hardware(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ struct ixgbe_osdep *osdep = &ixgbe->osdep;
+
+ /*
+ * Get the device id
+ */
+ hw->vendor_id =
+ pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
+ hw->device_id =
+ pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
+ hw->revision_id =
+ pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
+ hw->subsystem_device_id =
+ pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
+ hw->subsystem_vendor_id =
+ pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_regs_map - Map the device registers.
+ *
+ */
+static int
+ixgbe_regs_map(ixgbe_t *ixgbe)
+{
+ dev_info_t *devinfo = ixgbe->dip;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ struct ixgbe_osdep *osdep = &ixgbe->osdep;
+ off_t mem_size;
+
+ /*
+ * First get the size of device registers to be mapped.
+ */
+ if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Call ddi_regs_map_setup() to map registers
+ */
+ if ((ddi_regs_map_setup(devinfo, 1,
+ (caddr_t *)&hw->hw_addr, 0,
+ mem_size, &ixgbe_regs_acc_attr,
+ &osdep->reg_handle)) != DDI_SUCCESS) {
+ return (IXGBE_FAILURE);
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_init_properties - Initialize driver properties.
+ */
+static void
+ixgbe_init_properties(ixgbe_t *ixgbe)
+{
+ /*
+ * Get conf file properties, including link settings
+ * jumbo frames, ring number, descriptor number, etc.
+ */
+ ixgbe_get_conf(ixgbe);
+}
+
+/*
+ * ixgbe_init_driver_settings - Initialize driver settings.
+ *
+ * The settings include hardware function pointers, bus information,
+ * rx/tx rings settings, link state, and any other parameters that
+ * need to be setup during driver initialization.
+ */
+static int
+ixgbe_init_driver_settings(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_rx_ring_t *rx_ring;
+ ixgbe_tx_ring_t *tx_ring;
+ uint32_t rx_size;
+ uint32_t tx_size;
+ int i;
+
+ /*
+ * Initialize chipset specific hardware function pointers
+ */
+ if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Set rx buffer size
+ *
+ * The IP header alignment room is counted in the calculation.
+ * The rx buffer size is in unit of 1K that is required by the
+ * chipset hardware.
+ */
+ rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
+ ixgbe->rx_buf_size = ((rx_size >> 10) +
+ ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
+
+ /*
+ * Set tx buffer size
+ */
+ tx_size = ixgbe->max_frame_size;
+ ixgbe->tx_buf_size = ((tx_size >> 10) +
+ ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
+
+ /*
+ * Initialize rx/tx rings parameters
+ */
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+ rx_ring->index = i;
+ rx_ring->ixgbe = ixgbe;
+
+ rx_ring->ring_size = ixgbe->rx_ring_size;
+ rx_ring->free_list_size = ixgbe->rx_ring_size;
+ rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
+ rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
+ }
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+ tx_ring->index = i;
+ tx_ring->ixgbe = ixgbe;
+ if (ixgbe->tx_head_wb_enable)
+ tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
+ else
+ tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
+
+ tx_ring->ring_size = ixgbe->tx_ring_size;
+ tx_ring->free_list_size = ixgbe->tx_ring_size +
+ (ixgbe->tx_ring_size >> 1);
+ tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
+ tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
+ tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
+ tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
+ }
+
+ /*
+ * Initialize values of interrupt throttling rate
+ */
+ for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++)
+ ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
+
+ /*
+ * The initial link state should be "unknown"
+ */
+ ixgbe->link_state = LINK_STATE_UNKNOWN;
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_init_locks - Initialize locks.
+ */
+static void
+ixgbe_init_locks(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ ixgbe_tx_ring_t *tx_ring;
+ int i;
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+ mutex_init(&rx_ring->rx_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ mutex_init(&rx_ring->recycle_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ }
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+ mutex_init(&tx_ring->tx_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ mutex_init(&tx_ring->recycle_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ mutex_init(&tx_ring->tcb_head_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ mutex_init(&tx_ring->tcb_tail_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+ }
+
+ mutex_init(&ixgbe->gen_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+
+ mutex_init(&ixgbe->watchdog_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
+}
+
+/*
+ * ixgbe_destroy_locks - Destroy locks.
+ */
+static void
+ixgbe_destroy_locks(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ ixgbe_tx_ring_t *tx_ring;
+ int i;
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+ mutex_destroy(&rx_ring->rx_lock);
+ mutex_destroy(&rx_ring->recycle_lock);
+ }
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+ mutex_destroy(&tx_ring->tx_lock);
+ mutex_destroy(&tx_ring->recycle_lock);
+ mutex_destroy(&tx_ring->tcb_head_lock);
+ mutex_destroy(&tx_ring->tcb_tail_lock);
+ }
+
+ mutex_destroy(&ixgbe->gen_lock);
+ mutex_destroy(&ixgbe->watchdog_lock);
+}
+
+static int
+ixgbe_resume(dev_info_t *devinfo)
+{
+ ixgbe_t *ixgbe;
+
+ ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
+ if (ixgbe == NULL)
+ return (DDI_FAILURE);
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_STARTED) {
+ if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Enable and start the watchdog timer
+ */
+ ixgbe_enable_watchdog_timer(ixgbe);
+ }
+
+ ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+ixgbe_suspend(dev_info_t *devinfo)
+{
+ ixgbe_t *ixgbe;
+
+ ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
+ if (ixgbe == NULL)
+ return (DDI_FAILURE);
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
+
+ ixgbe_stop(ixgbe);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ /*
+ * Disable and stop the watchdog timer
+ */
+ ixgbe_disable_watchdog_timer(ixgbe);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * ixgbe_init - Initialize the device.
+ */
+static int
+ixgbe_init(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ /*
+ * Reset chipset to put the hardware in a known state
+ * before we try to do anything with the eeprom.
+ */
+ if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto init_fail;
+ }
+
+ /*
+ * Need to init eeprom before validating the checksum.
+ */
+ if (ixgbe_init_eeprom_params(hw) < 0) {
+ ixgbe_error(ixgbe,
+ "Unable to intitialize the eeprom interface.");
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto init_fail;
+ }
+
+ /*
+ * NVM validation
+ */
+ if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state. Call it again,
+ * if it fails a second time it's a real issue.
+ */
+ if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
+ ixgbe_error(ixgbe,
+ "Invalid NVM checksum. Please contact "
+ "the vendor to update the NVM.");
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto init_fail;
+ }
+ }
+
+ /*
+ * Setup default flow control thresholds - enable/disable
+ * & flow control type is controlled by ixgbe.conf
+ */
+ hw->fc.high_water = DEFAULT_FCRTH;
+ hw->fc.low_water = DEFAULT_FCRTL;
+ hw->fc.pause_time = DEFAULT_FCPAUSE;
+ hw->fc.send_xon = B_TRUE;
+
+ /*
+ * Don't wait for auto-negotiation to complete
+ */
+ hw->phy.autoneg_wait_to_complete = B_FALSE;
+
+ /*
+ * Initialize link settings
+ */
+ (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
+
+ /*
+ * Initialize the chipset hardware
+ */
+ if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto init_fail;
+ }
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
+ goto init_fail;
+ }
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ goto init_fail;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+ return (IXGBE_SUCCESS);
+
+init_fail:
+ /*
+ * Reset PHY
+ */
+ (void) ixgbe_reset_phy(hw);
+
+ mutex_exit(&ixgbe->gen_lock);
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
+ * initialize relevant hardware settings.
+ */
+static int
+ixgbe_init_rings(ixgbe_t *ixgbe)
+{
+ int i;
+
+ /*
+ * Allocate buffers for all the rx/tx rings
+ */
+ if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
+ return (IXGBE_FAILURE);
+
+ /*
+ * Setup the rx/tx rings
+ */
+ mutex_enter(&ixgbe->gen_lock);
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++)
+ mutex_enter(&ixgbe->rx_rings[i].rx_lock);
+ for (i = 0; i < ixgbe->num_tx_rings; i++)
+ mutex_enter(&ixgbe->tx_rings[i].tx_lock);
+
+ ixgbe_setup_rings(ixgbe);
+
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
+ */
+static void
+ixgbe_fini_rings(ixgbe_t *ixgbe)
+{
+ /*
+ * Release the DMA/memory resources of rx/tx rings
+ */
+ ixgbe_free_dma(ixgbe);
+}
+
+/*
+ * ixgbe_chip_start - Initialize and start the chipset hardware.
+ */
+static int
+ixgbe_chip_start(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ int i;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ /*
+ * Get the mac address
+ * This function should handle SPARC case correctly.
+ */
+ if (!ixgbe_find_mac_address(ixgbe)) {
+ ixgbe_error(ixgbe, "Failed to get the mac address");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Validate the mac address
+ */
+ (void) ixgbe_init_rx_addrs(hw);
+ if (!is_valid_mac_addr(hw->mac.addr)) {
+ ixgbe_error(ixgbe, "Invalid mac address");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Configure/Initialize hardware
+ */
+ if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) {
+ ixgbe_error(ixgbe, "Failed to initialize hardware");
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Setup adapter interrupt vectors
+ */
+ ixgbe_setup_adapter_vector(ixgbe);
+
+ /*
+ * Initialize unicast addresses.
+ */
+ ixgbe_init_unicst(ixgbe);
+
+ /*
+ * Setup and initialize the mctable structures.
+ */
+ ixgbe_setup_multicst(ixgbe);
+
+ /*
+ * Set interrupt throttling rate
+ */
+ for (i = 0; i < ixgbe->intr_cnt; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
+
+ /*
+ * Save the state of the phy
+ */
+ ixgbe_get_hw_state(ixgbe);
+
+ /*
+ * Make sure driver has control
+ */
+ ixgbe_get_driver_control(hw);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_chip_stop - Stop the chipset hardware
+ */
+static void
+ixgbe_chip_stop(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ /*
+ * Tell firmware driver is no longer in control
+ */
+ ixgbe_release_driver_control(hw);
+
+ /*
+ * Reset the chipset
+ */
+ (void) ixgbe_reset_hw(hw);
+
+ /*
+ * Reset PHY
+ */
+ (void) ixgbe_reset_phy(hw);
+}
+
+/*
+ * ixgbe_reset - Reset the chipset and re-start the driver.
+ *
+ * It involves stopping and re-starting the chipset,
+ * and re-configuring the rx/tx rings.
+ */
+static int
+ixgbe_reset(ixgbe_t *ixgbe)
+{
+ int i;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
+ ixgbe->ixgbe_state &= ~IXGBE_STARTED;
+
+ /*
+ * Disable the adapter interrupts to stop any rx/tx activities
+ * before draining pending data and resetting hardware.
+ */
+ ixgbe_disable_adapter_interrupts(ixgbe);
+
+ /*
+ * Drain the pending transmit packets
+ */
+ (void) ixgbe_tx_drain(ixgbe);
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++)
+ mutex_enter(&ixgbe->rx_rings[i].rx_lock);
+ for (i = 0; i < ixgbe->num_tx_rings; i++)
+ mutex_enter(&ixgbe->tx_rings[i].tx_lock);
+
+ /*
+ * Stop the chipset hardware
+ */
+ ixgbe_chip_stop(ixgbe);
+
+ /*
+ * Clean the pending tx data/resources
+ */
+ ixgbe_tx_clean(ixgbe);
+
+ /*
+ * Start the chipset hardware
+ */
+ if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto reset_failure;
+ }
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ goto reset_failure;
+ }
+
+ /*
+ * Setup the rx/tx rings
+ */
+ ixgbe_setup_rings(ixgbe);
+
+ /*
+ * Enable adapter interrupts
+ * The interrupts must be enabled after the driver state is START
+ */
+ ixgbe_enable_adapter_interrupts(ixgbe);
+
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+
+ ixgbe->ixgbe_state |= IXGBE_STARTED;
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (IXGBE_SUCCESS);
+
+reset_failure:
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
+ */
+static void
+ixgbe_tx_clean(ixgbe_t *ixgbe)
+{
+ ixgbe_tx_ring_t *tx_ring;
+ tx_control_block_t *tcb;
+ link_list_t pending_list;
+ uint32_t desc_num;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ int i, j;
+
+ LINK_LIST_INIT(&pending_list);
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+
+ mutex_enter(&tx_ring->recycle_lock);
+
+ /*
+ * Clean the pending tx data - the pending packets in the
+ * work_list that have no chances to be transmitted again.
+ *
+ * We must ensure the chipset is stopped or the link is down
+ * before cleaning the transmit packets.
+ */
+ desc_num = 0;
+ for (j = 0; j < tx_ring->ring_size; j++) {
+ tcb = tx_ring->work_list[j];
+ if (tcb != NULL) {
+ desc_num += tcb->desc_num;
+
+ tx_ring->work_list[j] = NULL;
+
+ ixgbe_free_tcb(tcb);
+
+ LIST_PUSH_TAIL(&pending_list, &tcb->link);
+ }
+ }
+
+ if (desc_num > 0) {
+ atomic_add_32(&tx_ring->tbd_free, desc_num);
+ ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
+
+ /*
+ * Reset the head and tail pointers of the tbd ring;
+ * Reset the writeback head if it's enable.
+ */
+ tx_ring->tbd_head = 0;
+ tx_ring->tbd_tail = 0;
+ if (ixgbe->tx_head_wb_enable)
+ *tx_ring->tbd_head_wb = 0;
+
+ IXGBE_WRITE_REG(&ixgbe->hw,
+ IXGBE_TDH(tx_ring->index), 0);
+ IXGBE_WRITE_REG(&ixgbe->hw,
+ IXGBE_TDT(tx_ring->index), 0);
+ }
+
+ mutex_exit(&tx_ring->recycle_lock);
+
+ /*
+ * Add the tx control blocks in the pending list to
+ * the free list.
+ */
+ ixgbe_put_free_list(tx_ring, &pending_list);
+ }
+}
+
+/*
+ * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
+ * transmitted.
+ */
+static boolean_t
+ixgbe_tx_drain(ixgbe_t *ixgbe)
+{
+ ixgbe_tx_ring_t *tx_ring;
+ boolean_t done;
+ int i, j;
+
+ /*
+ * Wait for a specific time to allow pending tx packets
+ * to be transmitted.
+ *
+ * Check the counter tbd_free to see if transmission is done.
+ * No lock protection is needed here.
+ *
+ * Return B_TRUE if all pending packets have been transmitted;
+ * Otherwise return B_FALSE;
+ */
+ for (i = 0; i < TX_DRAIN_TIME; i++) {
+
+ done = B_TRUE;
+ for (j = 0; j < ixgbe->num_tx_rings; j++) {
+ tx_ring = &ixgbe->tx_rings[j];
+ done = done &&
+ (tx_ring->tbd_free == tx_ring->ring_size);
+ }
+
+ if (done)
+ break;
+
+ msec_delay(1);
+ }
+
+ return (done);
+}
+
+/*
+ * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
+ */
+static boolean_t
+ixgbe_rx_drain(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ boolean_t done;
+ int i, j;
+
+ /*
+ * Polling the rx free list to check if those rx buffers held by
+ * the upper layer are released.
+ *
+ * Check the counter rcb_free to see if all pending buffers are
+ * released. No lock protection is needed here.
+ *
+ * Return B_TRUE if all pending buffers have been released;
+ * Otherwise return B_FALSE;
+ */
+ for (i = 0; i < RX_DRAIN_TIME; i++) {
+
+ done = B_TRUE;
+ for (j = 0; j < ixgbe->num_rx_rings; j++) {
+ rx_ring = &ixgbe->rx_rings[j];
+ done = done &&
+ (rx_ring->rcb_free == rx_ring->free_list_size);
+ }
+
+ if (done)
+ break;
+
+ msec_delay(1);
+ }
+
+ return (done);
+}
+
+/*
+ * ixgbe_start - Start the driver/chipset.
+ */
+int
+ixgbe_start(ixgbe_t *ixgbe)
+{
+ int i;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++)
+ mutex_enter(&ixgbe->rx_rings[i].rx_lock);
+ for (i = 0; i < ixgbe->num_tx_rings; i++)
+ mutex_enter(&ixgbe->tx_rings[i].tx_lock);
+
+ /*
+ * Start the chipset hardware
+ */
+ if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
+ ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
+ goto start_failure;
+ }
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ goto start_failure;
+ }
+
+ /*
+ * Setup the rx/tx rings
+ */
+ ixgbe_setup_rings(ixgbe);
+
+ /*
+ * Enable adapter interrupts
+ * The interrupts must be enabled after the driver state is START
+ */
+ ixgbe_enable_adapter_interrupts(ixgbe);
+
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+
+ return (IXGBE_SUCCESS);
+
+start_failure:
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_stop - Stop the driver/chipset.
+ */
+void
+ixgbe_stop(ixgbe_t *ixgbe)
+{
+ int i;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ /*
+ * Disable the adapter interrupts
+ */
+ ixgbe_disable_adapter_interrupts(ixgbe);
+
+ /*
+ * Drain the pending tx packets
+ */
+ (void) ixgbe_tx_drain(ixgbe);
+
+ for (i = 0; i < ixgbe->num_rx_rings; i++)
+ mutex_enter(&ixgbe->rx_rings[i].rx_lock);
+ for (i = 0; i < ixgbe->num_tx_rings; i++)
+ mutex_enter(&ixgbe->tx_rings[i].tx_lock);
+
+ /*
+ * Stop the chipset hardware
+ */
+ ixgbe_chip_stop(ixgbe);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
+ }
+
+ /*
+ * Clean the pending tx data/resources
+ */
+ ixgbe_tx_clean(ixgbe);
+
+ for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->tx_rings[i].tx_lock);
+ for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
+ mutex_exit(&ixgbe->rx_rings[i].rx_lock);
+}
+
+/*
+ * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
+ */
+static int
+ixgbe_alloc_rings(ixgbe_t *ixgbe)
+{
+ /*
+ * Allocate memory space for rx rings
+ */
+ ixgbe->rx_rings = kmem_zalloc(
+ sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
+ KM_NOSLEEP);
+
+ if (ixgbe->rx_rings == NULL) {
+ return (IXGBE_FAILURE);
+ }
+
+ /*
+ * Allocate memory space for tx rings
+ */
+ ixgbe->tx_rings = kmem_zalloc(
+ sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
+ KM_NOSLEEP);
+
+ if (ixgbe->tx_rings == NULL) {
+ kmem_free(ixgbe->rx_rings,
+ sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
+ ixgbe->rx_rings = NULL;
+ return (IXGBE_FAILURE);
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_free_rings - Free the memory space of rx/tx rings.
+ */
+static void
+ixgbe_free_rings(ixgbe_t *ixgbe)
+{
+ if (ixgbe->rx_rings != NULL) {
+ kmem_free(ixgbe->rx_rings,
+ sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
+ ixgbe->rx_rings = NULL;
+ }
+
+ if (ixgbe->tx_rings != NULL) {
+ kmem_free(ixgbe->tx_rings,
+ sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
+ ixgbe->tx_rings = NULL;
+ }
+}
+
+/*
+ * ixgbe_setup_rings - Setup rx/tx rings.
+ */
+static void
+ixgbe_setup_rings(ixgbe_t *ixgbe)
+{
+ /*
+ * Setup the rx/tx rings, including the following:
+ *
+ * 1. Setup the descriptor ring and the control block buffers;
+ * 2. Initialize necessary registers for receive/transmit;
+ * 3. Initialize software pointers/parameters for receive/transmit;
+ */
+ ixgbe_setup_rx(ixgbe);
+
+ ixgbe_setup_tx(ixgbe);
+}
+
+static void
+ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
+{
+ ixgbe_t *ixgbe = rx_ring->ixgbe;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ rx_control_block_t *rcb;
+ union ixgbe_adv_rx_desc *rbd;
+ uint32_t size;
+ uint32_t buf_low;
+ uint32_t buf_high;
+ uint32_t reg_val;
+ int i;
+
+ ASSERT(mutex_owned(&rx_ring->rx_lock));
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ for (i = 0; i < ixgbe->rx_ring_size; i++) {
+ rcb = rx_ring->work_list[i];
+ rbd = &rx_ring->rbd_ring[i];
+
+ rbd->read.pkt_addr = rcb->rx_buf.dma_address;
+ rbd->read.hdr_addr = NULL;
+ }
+
+ /*
+ * Initialize the length register
+ */
+ size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
+
+ /*
+ * Initialize the base address registers
+ */
+ buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
+ buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
+
+ /*
+ * Setup head & tail pointers
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
+
+ rx_ring->rbd_next = 0;
+
+ /*
+ * Note: Considering the case that the chipset is being reset
+ * and there are still some buffers held by the upper layer,
+ * we should not reset the values of rcb_head, rcb_tail and
+ * rcb_free if the state is not IXGBE_UNKNOWN.
+ */
+ if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
+ rx_ring->rcb_head = 0;
+ rx_ring->rcb_tail = 0;
+ rx_ring->rcb_free = rx_ring->free_list_size;
+ }
+
+ /*
+ * Setup the Receive Descriptor Control Register (RXDCTL)
+ * PTHRESH=32 descriptors (half the internal cache)
+ * HTHRESH=0 descriptors (to minimize latency on fetch)
+ * WTHRESH defaults to 1 (writeback each descriptor)
+ */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
+ reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
+ reg_val |= 0x0020; /* pthresh */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
+
+ /*
+ * Setup the Split and Replication Receive Control Register.
+ * Set the rx buffer size and the advanced descriptor type.
+ */
+ reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
+ IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
+}
+
+static void
+ixgbe_setup_rx(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t reg_val;
+ int i;
+
+ /*
+ * Set filter control in FCTRL to accept broadcast packets and do
+ * not pass pause frames to host. Flow control settings are already
+ * in this register, so preserve them.
+ */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
+ reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
+
+ /*
+ * Enable the receive unit. This must be done after filter
+ * control is set in FCTRL.
+ */
+ reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
+ | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
+
+ /*
+ * ixgbe_setup_rx_ring must be called after configuring RXCTRL
+ */
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+ ixgbe_setup_rx_ring(rx_ring);
+ }
+
+ /*
+ * The Max Frame Size in MHADD will be internally increased by four
+ * bytes if the packet has a VLAN field, so includes MTU, ethernet
+ * header and frame check sequence.
+ */
+ reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
+ + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
+
+ /*
+ * Setup Jumbo Frame enable bit
+ */
+ if (ixgbe->default_mtu > ETHERMTU) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg_val |= IXGBE_HLREG0_JUMBOEN;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
+ }
+
+ /*
+ * Hardware checksum settings
+ */
+ if (ixgbe->rx_hcksum_enable) {
+ reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
+ }
+
+ /*
+ * Setup RSS for multiple receive queues
+ */
+ if (ixgbe->num_rx_rings > 1)
+ ixgbe_setup_rss(ixgbe);
+}
+
+static void
+ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
+{
+ ixgbe_t *ixgbe = tx_ring->ixgbe;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t size;
+ uint32_t buf_low;
+ uint32_t buf_high;
+ uint32_t reg_val;
+
+ ASSERT(mutex_owned(&tx_ring->tx_lock));
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ /*
+ * Initialize the length register
+ */
+ size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
+
+ /*
+ * Initialize the base address registers
+ */
+ buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
+ buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
+
+ /*
+ * setup TXDCTL(tx_ring->index)
+ */
+ reg_val = IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
+
+ /*
+ * Setup head & tail pointers
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
+
+ /*
+ * Setup head write-back
+ */
+ if (ixgbe->tx_head_wb_enable) {
+ /*
+ * The memory of the head write-back is allocated using
+ * the extra tbd beyond the tail of the tbd ring.
+ */
+ tx_ring->tbd_head_wb = (uint32_t *)
+ ((uintptr_t)tx_ring->tbd_area.address + size);
+ *tx_ring->tbd_head_wb = 0;
+
+ buf_low = (uint32_t)
+ (tx_ring->tbd_area.dma_address + size);
+ buf_high = (uint32_t)
+ ((tx_ring->tbd_area.dma_address + size) >> 32);
+
+ /* Set the head write-back enable bit */
+ buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
+ IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
+
+ /*
+ * Turn off relaxed ordering for head write back or it will
+ * cause problems with the tx recycling
+ */
+ reg_val = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(tx_ring->index));
+ reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw,
+ IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
+ } else {
+ tx_ring->tbd_head_wb = NULL;
+ }
+
+ tx_ring->tbd_head = 0;
+ tx_ring->tbd_tail = 0;
+ tx_ring->tbd_free = tx_ring->ring_size;
+
+ /*
+ * Note: Considering the case that the chipset is being reset,
+ * and there are still some tcb in the pending list,
+ * we should not reset the values of tcb_head, tcb_tail and
+ * tcb_free if the state is not IXGBE_UNKNOWN.
+ */
+ if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
+ tx_ring->tcb_head = 0;
+ tx_ring->tcb_tail = 0;
+ tx_ring->tcb_free = tx_ring->free_list_size;
+ }
+
+ /*
+ * Initialize hardware checksum offload settings
+ */
+ tx_ring->hcksum_context.hcksum_flags = 0;
+ tx_ring->hcksum_context.ip_hdr_len = 0;
+ tx_ring->hcksum_context.mac_hdr_len = 0;
+ tx_ring->hcksum_context.l4_proto = 0;
+}
+
+static void
+ixgbe_setup_tx(ixgbe_t *ixgbe)
+{
+ ixgbe_tx_ring_t *tx_ring;
+ int i;
+
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+ ixgbe_setup_tx_ring(tx_ring);
+ }
+}
+
+/*
+ * ixgbe_setup_rss - Setup receive-side scaling feature.
+ */
+static void
+ixgbe_setup_rss(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t i, j, mrqc, rxcsum;
+ uint32_t random;
+ uint32_t reta;
+
+ /*
+ * Fill out redirection table
+ */
+ j = 0;
+ reta = 0;
+ for (i = 0; i < 128; i++) {
+ reta = (reta << 8) | (j * 0x11);
+ if (j == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ j = ((j + 1) % 4);
+ }
+
+ /*
+ * Fill out hash function seeds with a random constant
+ */
+ for (i = 0; i < 10; i++) {
+ (void) random_get_pseudo_bytes((uint8_t *)&random,
+ sizeof (uint32_t));
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
+ }
+
+ /*
+ * enable RSS & perform hash on these packet types
+ */
+ mrqc = IXGBE_MRQC_RSSEN |
+ IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_EX |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /*
+ * Disable Packet Checksum to enable RSS for multiple receive queues.
+ *
+ * It is an adapter hardware limitation that Packet Checksum is
+ * mutually exclusive with RSS.
+ */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+}
+
+/*
+ * ixgbe_init_unicst - Initialize the unicast addresses.
+ */
+static void
+ixgbe_init_unicst(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ int slot;
+ /*
+ * Here we should consider two situations:
+ *
+ * 1. Chipset is initialized the first time
+ * Initialize the multiple unicast addresses, and
+ * save the default mac address.
+ *
+ * 2. Chipset is reset
+ * Recover the multiple unicast addresses from the
+ * software data structure to the RAR registers.
+ */
+ if (!ixgbe->unicst_init) {
+ /*
+ * Initialize the multiple unicast addresses
+ */
+ ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
+
+ ixgbe->unicst_avail = ixgbe->unicst_total - 1;
+
+ bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr,
+ ETHERADDRL);
+ ixgbe->unicst_addr[0].mac.set = 1;
+
+ for (slot = 1; slot < ixgbe->unicst_total; slot++)
+ ixgbe->unicst_addr[slot].mac.set = 0;
+
+ ixgbe->unicst_init = B_TRUE;
+ } else {
+ /*
+ * Recover the default mac address
+ */
+ bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr,
+ ETHERADDRL);
+
+ /* Re-configure the RAR registers */
+ for (slot = 1; slot < ixgbe->unicst_total; slot++)
+ (void) ixgbe_set_rar(hw, slot,
+ ixgbe->unicst_addr[slot].mac.addr, NULL, NULL);
+ }
+}
+/*
+ * ixgbe_unicst_set - Set the unicast address to the specified slot.
+ */
+int
+ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
+ mac_addr_slot_t slot)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ /*
+ * Save the unicast address in the software data structure
+ */
+ bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
+
+ /*
+ * Set the unicast address to the RAR register
+ */
+ (void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/*
+ * ixgbe_multicst_add - Add a multicst address.
+ */
+int
+ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
+{
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ if ((multiaddr[0] & 01) == 0) {
+ return (EINVAL);
+ }
+
+ if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
+ return (ENOENT);
+ }
+
+ bcopy(multiaddr,
+ &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
+ ixgbe->mcast_count++;
+
+ /*
+ * Update the multicast table in the hardware
+ */
+ ixgbe_setup_multicst(ixgbe);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/*
+ * ixgbe_multicst_remove - Remove a multicst address.
+ */
+int
+ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
+{
+ int i;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ for (i = 0; i < ixgbe->mcast_count; i++) {
+ if (bcmp(multiaddr, &ixgbe->mcast_table[i],
+ ETHERADDRL) == 0) {
+ for (i++; i < ixgbe->mcast_count; i++) {
+ ixgbe->mcast_table[i - 1] =
+ ixgbe->mcast_table[i];
+ }
+ ixgbe->mcast_count--;
+ break;
+ }
+ }
+
+ /*
+ * Update the multicast table in the hardware
+ */
+ ixgbe_setup_multicst(ixgbe);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/*
+ * ixgbe_setup_multicast - Setup multicast data structures.
+ *
+ * This routine initializes all of the multicast related structures
+ * and save them in the hardware registers.
+ */
+static void
+ixgbe_setup_multicst(ixgbe_t *ixgbe)
+{
+ uint8_t *mc_addr_list;
+ uint32_t mc_addr_count;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
+
+ mc_addr_list = (uint8_t *)ixgbe->mcast_table;
+ mc_addr_count = ixgbe->mcast_count;
+
+ /*
+ * Update the multicast addresses to the MTA registers
+ */
+ (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
+ ixgbe_mc_table_itr);
+}
+
+/*
+ * ixgbe_get_conf - Get driver configurations set in driver.conf.
+ *
+ * This routine gets user-configured values out of the configuration
+ * file ixgbe.conf.
+ *
+ * For each configurable value, there is a minimum, a maximum, and a
+ * default.
+ * If user does not configure a value, use the default.
+ * If user configures below the minimum, use the minumum.
+ * If user configures above the maximum, use the maxumum.
+ */
+static void
+ixgbe_get_conf(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t flow_control;
+
+ /*
+ * ixgbe driver supports the following user configurations:
+ *
+ * Jumbo frame configuration:
+ * default_mtu
+ *
+ * Ethernet flow control configuration:
+ * flow_control
+ *
+ * Multiple rings configurations:
+ * tx_queue_number
+ * tx_ring_size
+ * rx_queue_number
+ * rx_ring_size
+ *
+ * Call ixgbe_get_prop() to get the value for a specific
+ * configuration parameter.
+ */
+
+ /*
+ * Jumbo frame configuration - max_frame_size controls host buffer
+ * allocation, so includes MTU, ethernet header, vlan tag and
+ * frame check sequence.
+ */
+ ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
+ MIN_MTU, MAX_MTU, DEFAULT_MTU);
+
+ ixgbe->max_frame_size = ixgbe->default_mtu +
+ sizeof (struct ether_vlan_header) + ETHERFCSL;
+
+ /*
+ * Ethernet flow control configuration
+ */
+ flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
+ ixgbe_fc_none, 3, ixgbe_fc_full);
+ if (flow_control == 3)
+ flow_control = ixgbe_fc_default;
+
+ hw->fc.type = flow_control;
+
+ /*
+ * Multiple rings configurations
+ */
+ ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
+ MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM);
+ ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
+ MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
+
+ ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
+ MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM);
+ ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
+ MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
+
+ /*
+ * Tunable used to force an interrupt type. The only use is
+ * for testing of the lesser interrupt types.
+ * 0 = don't force interrupt type
+ * 1 = force interrupt type MSIX
+ * 2 = force interrupt type MSI
+ * 3 = force interrupt type Legacy
+ */
+ ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
+ IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
+ ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force);
+
+ ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
+ 0, 1, 1);
+ ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
+ 0, 1, 1);
+ ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
+ 0, 1, 0);
+ ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
+ 0, 1, 1);
+
+ ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
+ MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
+ DEFAULT_TX_COPY_THRESHOLD);
+ ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
+ PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
+ MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
+ ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
+ PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
+ MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
+ ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
+ PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
+ MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
+
+ ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
+ MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
+ DEFAULT_RX_COPY_THRESHOLD);
+ ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
+ MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
+ DEFAULT_RX_LIMIT_PER_INTR);
+
+ ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
+ MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
+ DEFAULT_INTR_THROTTLING);
+}
+
+/*
+ * ixgbe_get_prop - Get a property value out of the configuration file
+ * ixgbe.conf.
+ *
+ * Caller provides the name of the property, a default value, a minimum
+ * value, and a maximum value.
+ *
+ * Return configured value of the property, with default, minimum and
+ * maximum properly applied.
+ */
+static int
+ixgbe_get_prop(ixgbe_t *ixgbe,
+ char *propname, /* name of the property */
+ int minval, /* minimum acceptable value */
+ int maxval, /* maximim acceptable value */
+ int defval) /* default value */
+{
+ int value;
+
+ /*
+ * Call ddi_prop_get_int() to read the conf settings
+ */
+ value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
+ DDI_PROP_DONTPASS, propname, defval);
+ if (value > maxval)
+ value = maxval;
+
+ if (value < minval)
+ value = minval;
+
+ return (value);
+}
+
+/*
+ * ixgbe_driver_setup_link - Using the link properties to setup the link.
+ */
+int
+ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
+{
+ struct ixgbe_mac_info *mac;
+ struct ixgbe_phy_info *phy;
+ boolean_t invalid;
+
+ mac = &ixgbe->hw.mac;
+ phy = &ixgbe->hw.phy;
+ invalid = B_FALSE;
+
+ if (ixgbe->param_adv_autoneg_cap == 1) {
+ mac->autoneg = B_TRUE;
+ phy->autoneg_advertised = 0;
+
+ /*
+ * No half duplex support with 10Gb parts
+ */
+ if (ixgbe->param_adv_10000fdx_cap == 1)
+ phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (ixgbe->param_adv_1000fdx_cap == 1)
+ phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (ixgbe->param_adv_100fdx_cap == 1)
+ phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ if (phy->autoneg_advertised == 0)
+ invalid = B_TRUE;
+ } else {
+ ixgbe->hw.mac.autoneg = B_FALSE;
+ }
+
+ if (invalid) {
+ ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
+ "autonegotiation with full link capabilities.");
+ ixgbe->hw.mac.autoneg = B_TRUE;
+ }
+
+ if (setup_hw) {
+ if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS)
+ return (IXGBE_FAILURE);
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_driver_link_check - Link status processing.
+ */
+static boolean_t
+ixgbe_driver_link_check(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
+ boolean_t link_up = B_FALSE;
+ boolean_t link_changed = B_FALSE;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+
+ (void) ixgbe_check_link(hw, &speed, &link_up);
+ if (link_up) {
+ /*
+ * The Link is up, check whether it was marked as down earlier
+ */
+ if (ixgbe->link_state != LINK_STATE_UP) {
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ixgbe->link_speed = SPEED_10GB;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ixgbe->link_speed = SPEED_1GB;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ixgbe->link_speed = SPEED_100;
+ }
+ ixgbe->link_duplex = LINK_DUPLEX_FULL;
+ ixgbe->link_state = LINK_STATE_UP;
+ ixgbe->link_down_timeout = 0;
+ link_changed = B_TRUE;
+ }
+ } else {
+ if (ixgbe->link_state != LINK_STATE_DOWN) {
+ ixgbe->link_speed = 0;
+ ixgbe->link_duplex = 0;
+ ixgbe->link_state = LINK_STATE_DOWN;
+ link_changed = B_TRUE;
+ }
+
+ if (ixgbe->ixgbe_state & IXGBE_STARTED) {
+ if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
+ ixgbe->link_down_timeout++;
+ } else if (ixgbe->link_down_timeout ==
+ MAX_LINK_DOWN_TIMEOUT) {
+ ixgbe_tx_clean(ixgbe);
+ ixgbe->link_down_timeout++;
+ }
+ }
+ }
+
+ return (link_changed);
+}
+
+/*
+ * ixgbe_local_timer - Driver watchdog function.
+ *
+ * This function will handle the transmit stall check, link status check and
+ * other routines.
+ */
+static void
+ixgbe_local_timer(void *arg)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+
+ if (ixgbe_stall_check(ixgbe)) {
+ ixgbe->reset_count++;
+ if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
+ }
+
+ ixgbe_restart_watchdog_timer(ixgbe);
+}
+
+/*
+ * ixgbe_stall_check - Check for transmit stall.
+ *
+ * This function checks if the adapter is stalled (in transmit).
+ *
+ * It is called each time the watchdog timeout is invoked.
+ * If the transmit descriptor reclaim continuously fails,
+ * the watchdog value will increment by 1. If the watchdog
+ * value exceeds the threshold, the ixgbe is assumed to
+ * have stalled and need to be reset.
+ */
+static boolean_t
+ixgbe_stall_check(ixgbe_t *ixgbe)
+{
+ ixgbe_tx_ring_t *tx_ring;
+ boolean_t result;
+ int i;
+
+ if (ixgbe->link_state != LINK_STATE_UP)
+ return (B_FALSE);
+
+ /*
+ * If any tx ring is stalled, we'll reset the chipset
+ */
+ result = B_FALSE;
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ tx_ring = &ixgbe->tx_rings[i];
+
+ if (tx_ring->recycle_fail > 0)
+ tx_ring->stall_watchdog++;
+ else
+ tx_ring->stall_watchdog = 0;
+
+ if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
+ result = B_TRUE;
+ break;
+ }
+ }
+
+ if (result) {
+ tx_ring->stall_watchdog = 0;
+ tx_ring->recycle_fail = 0;
+ }
+
+ return (result);
+}
+
+
+/*
+ * is_valid_mac_addr - Check if the mac address is valid.
+ */
+static boolean_t
+is_valid_mac_addr(uint8_t *mac_addr)
+{
+ const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
+ const uint8_t addr_test2[6] =
+ { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
+ !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static boolean_t
+ixgbe_find_mac_address(ixgbe_t *ixgbe)
+{
+#ifdef __sparc
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uchar_t *bytes;
+ struct ether_addr sysaddr;
+ uint_t nelts;
+ int err;
+ boolean_t found = B_FALSE;
+
+ /*
+ * The "vendor's factory-set address" may already have
+ * been extracted from the chip, but if the property
+ * "local-mac-address" is set we use that instead.
+ *
+ * We check whether it looks like an array of 6
+ * bytes (which it should, if OBP set it). If we can't
+ * make sense of it this way, we'll ignore it.
+ */
+ err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
+ DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
+ if (err == DDI_PROP_SUCCESS) {
+ if (nelts == ETHERADDRL) {
+ while (nelts--)
+ hw->mac.addr[nelts] = bytes[nelts];
+ found = B_TRUE;
+ }
+ ddi_prop_free(bytes);
+ }
+
+ /*
+ * Look up the OBP property "local-mac-address?". If the user has set
+ * 'local-mac-address? = false', use "the system address" instead.
+ */
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
+ "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
+ if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
+ if (localetheraddr(NULL, &sysaddr) != 0) {
+ bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
+ found = B_TRUE;
+ }
+ }
+ ddi_prop_free(bytes);
+ }
+
+ /*
+ * Finally(!), if there's a valid "mac-address" property (created
+ * if we netbooted from this interface), we must use this instead
+ * of any of the above to ensure that the NFS/install server doesn't
+ * get confused by the address changing as Solaris takes over!
+ */
+ err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
+ DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
+ if (err == DDI_PROP_SUCCESS) {
+ if (nelts == ETHERADDRL) {
+ while (nelts--)
+ hw->mac.addr[nelts] = bytes[nelts];
+ found = B_TRUE;
+ }
+ ddi_prop_free(bytes);
+ }
+
+ if (found) {
+ bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
+ return (B_TRUE);
+ }
+#else
+ _NOTE(ARGUNUSED(ixgbe));
+#endif
+
+ return (B_TRUE);
+}
+
+#pragma inline(ixgbe_arm_watchdog_timer)
+static void
+ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
+{
+ /*
+ * Fire a watchdog timer
+ */
+ ixgbe->watchdog_tid =
+ timeout(ixgbe_local_timer,
+ (void *)ixgbe, 1 * drv_usectohz(1000000));
+
+}
+
+/*
+ * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
+ */
+void
+ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
+{
+ mutex_enter(&ixgbe->watchdog_lock);
+
+ if (!ixgbe->watchdog_enable) {
+ ixgbe->watchdog_enable = B_TRUE;
+ ixgbe->watchdog_start = B_TRUE;
+ ixgbe_arm_watchdog_timer(ixgbe);
+ }
+
+ mutex_exit(&ixgbe->watchdog_lock);
+}
+
+/*
+ * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
+ */
+void
+ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
+{
+ timeout_id_t tid;
+
+ mutex_enter(&ixgbe->watchdog_lock);
+
+ ixgbe->watchdog_enable = B_FALSE;
+ ixgbe->watchdog_start = B_FALSE;
+ tid = ixgbe->watchdog_tid;
+ ixgbe->watchdog_tid = 0;
+
+ mutex_exit(&ixgbe->watchdog_lock);
+
+ if (tid != 0)
+ (void) untimeout(tid);
+}
+
+/*
+ * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
+ */
+static void
+ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
+{
+ mutex_enter(&ixgbe->watchdog_lock);
+
+ if (ixgbe->watchdog_enable) {
+ if (!ixgbe->watchdog_start) {
+ ixgbe->watchdog_start = B_TRUE;
+ ixgbe_arm_watchdog_timer(ixgbe);
+ }
+ }
+
+ mutex_exit(&ixgbe->watchdog_lock);
+}
+
+/*
+ * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
+ */
+static void
+ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
+{
+ mutex_enter(&ixgbe->watchdog_lock);
+
+ if (ixgbe->watchdog_start)
+ ixgbe_arm_watchdog_timer(ixgbe);
+
+ mutex_exit(&ixgbe->watchdog_lock);
+}
+
+/*
+ * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
+ */
+static void
+ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
+{
+ timeout_id_t tid;
+
+ mutex_enter(&ixgbe->watchdog_lock);
+
+ ixgbe->watchdog_start = B_FALSE;
+ tid = ixgbe->watchdog_tid;
+ ixgbe->watchdog_tid = 0;
+
+ mutex_exit(&ixgbe->watchdog_lock);
+
+ if (tid != 0)
+ (void) untimeout(tid);
+}
+
+/*
+ * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
+ */
+static void
+ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ /*
+ * mask all interrupts off
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
+
+ /*
+ * for MSI-X, also disable autoclear
+ */
+ if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
+ */
+static void
+ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t eims, eiac, gpie;
+
+ gpie = 0;
+ eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
+ eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
+
+ /*
+ * msi-x mode
+ */
+ if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
+ /* enable autoclear but not on bits 29:20 */
+ eiac = (eims & ~0x3ff00000);
+
+ /* general purpose interrupt enable */
+ gpie |= (IXGBE_GPIE_MSIX_MODE |
+ IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD);
+ /*
+ * non-msi-x mode
+ */
+ } else {
+
+ /* disable autoclear, leave gpie at default */
+ eiac = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_loopback_ioctl - Loopback support.
+ */
+enum ioc_reply
+ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
+{
+ lb_info_sz_t *lbsp;
+ lb_property_t *lbpp;
+ uint32_t *lbmp;
+ uint32_t size;
+ uint32_t value;
+
+ if (mp->b_cont == NULL)
+ return (IOC_INVAL);
+
+ switch (iocp->ioc_cmd) {
+ default:
+ return (IOC_INVAL);
+
+ case LB_GET_INFO_SIZE:
+ size = sizeof (lb_info_sz_t);
+ if (iocp->ioc_count != size)
+ return (IOC_INVAL);
+
+ value = sizeof (lb_normal);
+ value += sizeof (lb_mac);
+
+ lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
+ *lbsp = value;
+ break;
+
+ case LB_GET_INFO:
+ value = sizeof (lb_normal);
+ value += sizeof (lb_mac);
+
+ size = value;
+ if (iocp->ioc_count != size)
+ return (IOC_INVAL);
+
+ value = 0;
+ lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
+
+ lbpp[value++] = lb_normal;
+ lbpp[value++] = lb_mac;
+ break;
+
+ case LB_GET_MODE:
+ size = sizeof (uint32_t);
+ if (iocp->ioc_count != size)
+ return (IOC_INVAL);
+
+ lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
+ *lbmp = ixgbe->loopback_mode;
+ break;
+
+ case LB_SET_MODE:
+ size = 0;
+ if (iocp->ioc_count != sizeof (uint32_t))
+ return (IOC_INVAL);
+
+ lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
+ if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
+ return (IOC_INVAL);
+ break;
+ }
+
+ iocp->ioc_count = size;
+ iocp->ioc_error = 0;
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
+ return (IOC_INVAL);
+ }
+
+ return (IOC_REPLY);
+}
+
+/*
+ * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
+ */
+static boolean_t
+ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
+{
+ struct ixgbe_hw *hw;
+
+ if (mode == ixgbe->loopback_mode)
+ return (B_TRUE);
+
+ hw = &ixgbe->hw;
+
+ ixgbe->loopback_mode = mode;
+
+ if (mode == IXGBE_LB_NONE) {
+ /*
+ * Reset the chip
+ */
+ hw->phy.autoneg_wait_to_complete = B_TRUE;
+ (void) ixgbe_reset(ixgbe);
+ hw->phy.autoneg_wait_to_complete = B_FALSE;
+ return (B_TRUE);
+ }
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ switch (mode) {
+ default:
+ mutex_exit(&ixgbe->gen_lock);
+ return (B_FALSE);
+
+ case IXGBE_LB_INTERNAL_MAC:
+ ixgbe_set_internal_mac_loopback(ixgbe);
+ break;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ return (B_TRUE);
+}
+
+/*
+ * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
+ */
+static void
+ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg;
+ uint8_t atlas;
+
+ hw = &ixgbe->hw;
+
+ /*
+ * Setup MAC loopback
+ */
+ reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
+
+ reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
+ reg &= ~IXGBE_AUTOC_LMS_MASK;
+ IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
+
+ /*
+ * Disable Atlas Tx lanes to keep packets in loopback and not on wire
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
+ &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+ (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
+ atlas);
+
+ (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
+ &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
+ atlas);
+
+ (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
+ &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
+ atlas);
+
+ (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
+ &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
+ atlas);
+ }
+}
+
+#pragma inline(ixgbe_intr_rx_work)
+/*
+ * ixgbe_intr_rx_work - RX processing of ISR.
+ */
+static void
+ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
+{
+ mblk_t *mp;
+
+ mutex_enter(&rx_ring->rx_lock);
+
+ mp = ixgbe_rx(rx_ring);
+ mutex_exit(&rx_ring->rx_lock);
+
+ if (mp != NULL)
+ mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp);
+}
+
+#pragma inline(ixgbe_intr_tx_work)
+/*
+ * ixgbe_intr_tx_work - TX processing of ISR.
+ */
+static void
+ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
+{
+ /*
+ * Recycle the tx descriptors
+ */
+ tx_ring->tx_recycle(tx_ring);
+
+ /*
+ * Schedule the re-transmit
+ */
+ if (tx_ring->reschedule &&
+ (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
+ tx_ring->reschedule = B_FALSE;
+ mac_tx_update(tx_ring->ixgbe->mac_hdl);
+ IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
+ }
+}
+
+#pragma inline(ixgbe_intr_other_work)
+/*
+ * ixgbe_intr_other_work - Other processing of ISR.
+ */
+static void
+ixgbe_intr_other_work(ixgbe_t *ixgbe)
+{
+ boolean_t link_changed;
+
+ ixgbe_stop_watchdog_timer(ixgbe);
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ /*
+ * Take care of link status change
+ */
+ link_changed = ixgbe_driver_link_check(ixgbe);
+
+ /*
+ * Get new phy state
+ */
+ ixgbe_get_hw_state(ixgbe);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ if (link_changed)
+ mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
+
+ ixgbe_start_watchdog_timer(ixgbe);
+}
+
+/*
+ * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
+ */
+static uint_t
+ixgbe_intr_legacy(void *arg1, void *arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ ixgbe_t *ixgbe = (ixgbe_t *)arg1;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_tx_ring_t *tx_ring;
+ uint32_t eicr;
+ mblk_t *mp;
+ boolean_t tx_reschedule;
+ boolean_t link_changed;
+ uint_t result;
+
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
+ mutex_exit(&ixgbe->gen_lock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ mp = NULL;
+ tx_reschedule = B_FALSE;
+ link_changed = B_FALSE;
+
+ /*
+ * Any bit set in eicr: claim this interrupt
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (eicr) {
+ /*
+ * For legacy interrupt, we have only one interrupt,
+ * so we have only one rx ring and one tx ring enabled.
+ */
+ ASSERT(ixgbe->num_rx_rings == 1);
+ ASSERT(ixgbe->num_tx_rings == 1);
+
+ /*
+ * For legacy interrupt, we can't differentiate
+ * between tx and rx, so always clean both
+ */
+ if (eicr & IXGBE_EICR_RTX_QUEUE) {
+
+ /*
+ * Clean the rx descriptors
+ */
+ mp = ixgbe_rx(&ixgbe->rx_rings[0]);
+
+ /*
+ * Recycle the tx descriptors
+ */
+ tx_ring = &ixgbe->tx_rings[0];
+ tx_ring->tx_recycle(tx_ring);
+
+ /*
+ * Schedule the re-transmit
+ */
+ tx_reschedule = (tx_ring->reschedule &&
+ (tx_ring->tbd_free >= tx_ring->resched_thresh));
+ }
+
+ if (eicr & IXGBE_EICR_LSC) {
+
+ /* take care of link status change */
+ link_changed = ixgbe_driver_link_check(ixgbe);
+
+ /* Get new phy state */
+ ixgbe_get_hw_state(ixgbe);
+ }
+
+ result = DDI_INTR_CLAIMED;
+ } else {
+ /*
+ * No interrupt cause bits set: don't claim this interrupt.
+ */
+ result = DDI_INTR_UNCLAIMED;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ /*
+ * Do the following work outside of the gen_lock
+ */
+ if (mp != NULL)
+ mac_rx(ixgbe->mac_hdl, NULL, mp);
+
+ if (tx_reschedule) {
+ tx_ring->reschedule = B_FALSE;
+ mac_tx_update(ixgbe->mac_hdl);
+ IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
+ }
+
+ if (link_changed)
+ mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
+
+ return (result);
+}
+
+/*
+ * ixgbe_intr_msi - Interrupt handler for MSI.
+ */
+static uint_t
+ixgbe_intr_msi(void *arg1, void *arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ ixgbe_t *ixgbe = (ixgbe_t *)arg1;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /*
+ * For MSI interrupt, we have only one vector,
+ * so we have only one rx ring and one tx ring enabled.
+ */
+ ASSERT(ixgbe->num_rx_rings == 1);
+ ASSERT(ixgbe->num_tx_rings == 1);
+
+ /*
+ * For MSI interrupt, we can't differentiate
+ * between tx and rx, so always clean both.
+ */
+ if (eicr & IXGBE_EICR_RTX_QUEUE) {
+ ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
+ ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
+ }
+
+ if (eicr & IXGBE_EICR_LSC) {
+ ixgbe_intr_other_work(ixgbe);
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * ixgbe_intr_rx - Interrupt handler for rx.
+ */
+static uint_t
+ixgbe_intr_rx(void *arg1, void *arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ ixgbe_ring_vector_t *vect = (ixgbe_ring_vector_t *)arg1;
+ ixgbe_t *ixgbe = vect->ixgbe;
+ int r_idx;
+
+ /*
+ * clean each rx ring that has its bit set in the map
+ */
+ r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
+
+ while (r_idx >= 0) {
+ ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
+ r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
+ (ixgbe->num_rx_rings - 1));
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * ixgbe_intr_tx_other - Interrupt handler for both tx and other.
+ *
+ * Always look for Tx cleanup work. Only look for other work if the right
+ * bits are set in the Interrupt Cause Register.
+ */
+static uint_t
+ixgbe_intr_tx_other(void *arg1, void *arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ ixgbe_t *ixgbe = (ixgbe_t *)arg1;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /*
+ * Always look for Tx cleanup work. We don't have separate
+ * transmit vectors, so we have only one tx ring enabled.
+ */
+ ASSERT(ixgbe->num_tx_rings == 1);
+ ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
+
+ /*
+ * Check for "other" causes.
+ */
+ if (eicr & IXGBE_EICR_LSC) {
+ ixgbe_intr_other_work(ixgbe);
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * ixgbe_alloc_intrs - Allocate interrupts for the driver.
+ *
+ * Normal sequence is to try MSI-X; if not sucessful, try MSI;
+ * if not successful, try Legacy.
+ * ixgbe->intr_force can be used to force sequence to start with
+ * any of the 3 types.
+ * If MSI-X is not used, number of tx/rx rings is forced to 1.
+ */
+static int
+ixgbe_alloc_intrs(ixgbe_t *ixgbe)
+{
+ dev_info_t *devinfo;
+ int intr_types;
+ int rc;
+
+ devinfo = ixgbe->dip;
+
+ /*
+ * Get supported interrupt types
+ */
+ rc = ddi_intr_get_supported_types(devinfo, &intr_types);
+
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Get supported interrupt types failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
+
+ ixgbe->intr_type = 0;
+
+ /*
+ * Install MSI-X interrupts
+ */
+ if ((intr_types & DDI_INTR_TYPE_MSIX) &&
+ (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
+ rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
+ if (rc == IXGBE_SUCCESS)
+ return (IXGBE_SUCCESS);
+
+ ixgbe_log(ixgbe,
+ "Allocate MSI-X failed, trying MSI interrupts...");
+ }
+
+ /*
+ * MSI-X not used, force rings to 1
+ */
+ ixgbe->num_rx_rings = 1;
+ ixgbe->num_tx_rings = 1;
+ ixgbe_log(ixgbe,
+ "MSI-X not used, force rx and tx queue number to 1");
+
+ /*
+ * Install MSI interrupts
+ */
+ if ((intr_types & DDI_INTR_TYPE_MSI) &&
+ (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
+ rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
+ if (rc == IXGBE_SUCCESS)
+ return (IXGBE_SUCCESS);
+
+ ixgbe_log(ixgbe,
+ "Allocate MSI failed, trying Legacy interrupts...");
+ }
+
+ /*
+ * Install legacy interrupts
+ */
+ if (intr_types & DDI_INTR_TYPE_FIXED) {
+ rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
+ if (rc == IXGBE_SUCCESS)
+ return (IXGBE_SUCCESS);
+
+ ixgbe_log(ixgbe,
+ "Allocate Legacy interrupts failed");
+ }
+
+ /*
+ * If none of the 3 types succeeded, return failure
+ */
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_alloc_intr_handles - Allocate interrupt handles.
+ *
+ * For legacy and MSI, only 1 handle is needed. For MSI-X,
+ * if fewer than 2 handles are available, return failure.
+ * Upon success, this sets the number of Rx rings to a number that
+ * matches the handles available for Rx interrupts.
+ */
+static int
+ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
+{
+ dev_info_t *devinfo;
+ int request, count, avail, actual;
+ int rx_rings, minimum;
+ int rc;
+
+ devinfo = ixgbe->dip;
+
+ /*
+ * Currently only 1 tx ring is supported. More tx rings
+ * will be supported with future enhancement.
+ */
+ if (ixgbe->num_tx_rings > 1) {
+ ixgbe->num_tx_rings = 1;
+ ixgbe_log(ixgbe,
+ "Use only 1 MSI-X vector for tx, "
+ "force tx queue number to 1");
+ }
+
+ switch (intr_type) {
+ case DDI_INTR_TYPE_FIXED:
+ request = 1; /* Request 1 legacy interrupt handle */
+ minimum = 1;
+ IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
+ break;
+
+ case DDI_INTR_TYPE_MSI:
+ request = 1; /* Request 1 MSI interrupt handle */
+ minimum = 1;
+ IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
+ break;
+
+ case DDI_INTR_TYPE_MSIX:
+ /*
+ * Best number of vectors for the adapter is
+ * # rx rings + # tx rings + 1 for other
+ * But currently we only support number of vectors of
+ * # rx rings + 1 for tx & other
+ */
+ request = ixgbe->num_rx_rings + 1;
+ minimum = 2;
+ IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
+ break;
+
+ default:
+ ixgbe_log(ixgbe,
+ "invalid call to ixgbe_alloc_intr_handles(): %d\n",
+ intr_type);
+ return (IXGBE_FAILURE);
+ }
+ IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
+ request, minimum);
+
+ /*
+ * Get number of supported interrupts
+ */
+ rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
+ if ((rc != DDI_SUCCESS) || (count < minimum)) {
+ ixgbe_log(ixgbe,
+ "Get interrupt number failed. Return: %d, count: %d",
+ rc, count);
+ return (IXGBE_FAILURE);
+ }
+ IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
+
+ /*
+ * Get number of available interrupts
+ */
+ rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
+ if ((rc != DDI_SUCCESS) || (avail < minimum)) {
+ ixgbe_log(ixgbe,
+ "Get interrupt available number failed. "
+ "Return: %d, available: %d", rc, avail);
+ return (IXGBE_FAILURE);
+ }
+ IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
+
+ if (avail < request) {
+ ixgbe_log(ixgbe, "Request %d handles, %d available",
+ request, avail);
+ request = avail;
+ }
+
+ actual = 0;
+ ixgbe->intr_cnt = 0;
+
+ /*
+ * Allocate an array of interrupt handles
+ */
+ ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
+ ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
+
+ rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
+ request, &actual, DDI_INTR_ALLOC_NORMAL);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe, "Allocate interrupts failed. "
+ "return: %d, request: %d, actual: %d",
+ rc, request, actual);
+ goto alloc_handle_fail;
+ }
+ IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
+
+ ixgbe->intr_cnt = actual;
+
+ /*
+ * Now we know the actual number of vectors. Here we assume that
+ * tx and other will share 1 vector and all remaining (must be at
+ * least 1 remaining) will be used for rx.
+ */
+ if (actual < minimum) {
+ ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
+ actual);
+ goto alloc_handle_fail;
+ }
+
+ /*
+ * For MSI-X, actual might force us to reduce number of rx rings
+ */
+ if (intr_type == DDI_INTR_TYPE_MSIX) {
+ rx_rings = actual - 1;
+ if (rx_rings < ixgbe->num_rx_rings) {
+ ixgbe_log(ixgbe,
+ "MSI-X vectors force Rx queue number to %d",
+ rx_rings);
+ ixgbe->num_rx_rings = rx_rings;
+ }
+ }
+
+ /*
+ * Get priority for first vector, assume remaining are all the same
+ */
+ rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Get interrupt priority failed: %d", rc);
+ goto alloc_handle_fail;
+ }
+
+ rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Get interrupt cap failed: %d", rc);
+ goto alloc_handle_fail;
+ }
+
+ ixgbe->intr_type = intr_type;
+
+ return (IXGBE_SUCCESS);
+
+alloc_handle_fail:
+ ixgbe_rem_intrs(ixgbe);
+
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
+ *
+ * Before adding the interrupt handlers, the interrupt vectors have
+ * been allocated, and the rx/tx rings have also been allocated.
+ */
+static int
+ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ int vector;
+ int rc;
+ int i;
+
+ vector = 0;
+
+ switch (ixgbe->intr_type) {
+ case DDI_INTR_TYPE_MSIX:
+ /*
+ * Add interrupt handler for tx + other
+ */
+ rc = ddi_intr_add_handler(ixgbe->htable[vector],
+ (ddi_intr_handler_t *)ixgbe_intr_tx_other,
+ (void *)ixgbe, NULL);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Add tx/other interrupt handler failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ vector++;
+
+ /*
+ * Add interrupt handler for each rx ring
+ */
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ rx_ring = &ixgbe->rx_rings[i];
+
+ /*
+ * install pointer to vect_map[vector]
+ */
+ rc = ddi_intr_add_handler(ixgbe->htable[vector],
+ (ddi_intr_handler_t *)ixgbe_intr_rx,
+ (void *)&ixgbe->vect_map[vector], NULL);
+
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Add rx interrupt handler failed. "
+ "return: %d, rx ring: %d", rc, i);
+ for (vector--; vector >= 0; vector--) {
+ (void) ddi_intr_remove_handler(
+ ixgbe->htable[vector]);
+ }
+ return (IXGBE_FAILURE);
+ }
+
+ rx_ring->intr_vector = vector;
+
+ vector++;
+ }
+ break;
+
+ case DDI_INTR_TYPE_MSI:
+ /*
+ * Add interrupt handlers for the only vector
+ */
+ rc = ddi_intr_add_handler(ixgbe->htable[vector],
+ (ddi_intr_handler_t *)ixgbe_intr_msi,
+ (void *)ixgbe, NULL);
+
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Add MSI interrupt handler failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+
+ rx_ring = &ixgbe->rx_rings[0];
+ rx_ring->intr_vector = vector;
+
+ vector++;
+ break;
+
+ case DDI_INTR_TYPE_FIXED:
+ /*
+ * Add interrupt handlers for the only vector
+ */
+ rc = ddi_intr_add_handler(ixgbe->htable[vector],
+ (ddi_intr_handler_t *)ixgbe_intr_legacy,
+ (void *)ixgbe, NULL);
+
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Add legacy interrupt handler failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+
+ rx_ring = &ixgbe->rx_rings[0];
+ rx_ring->intr_vector = vector;
+
+ vector++;
+ break;
+
+ default:
+ return (IXGBE_FAILURE);
+ }
+
+ ASSERT(vector == ixgbe->intr_cnt);
+
+ return (IXGBE_SUCCESS);
+}
+
+#pragma inline(ixgbe_map_rxring_to_vector)
+/*
+ * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
+ */
+static void
+ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
+{
+ ixgbe->vect_map[v_idx].ixgbe = ixgbe;
+
+ /*
+ * Set bit in map
+ */
+ BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
+
+ /*
+ * Count bits set
+ */
+ ixgbe->vect_map[v_idx].rxr_cnt++;
+
+ /*
+ * Remember bit position
+ */
+ ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
+}
+
+#pragma inline(ixgbe_map_txring_to_vector)
+/*
+ * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
+ */
+static void
+ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
+{
+ ixgbe->vect_map[v_idx].ixgbe = ixgbe;
+
+ /*
+ * Set bit in map
+ */
+ BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
+
+ /*
+ * Count bits set
+ */
+ ixgbe->vect_map[v_idx].txr_cnt++;
+
+ /*
+ * Remember bit position
+ */
+ ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
+}
+
+/*
+ * ixgbe_set_ivar - Set the given entry in the given interrupt vector
+ * allocation register (IVAR).
+ */
+static void
+ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ u32 ivar, index;
+
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = (int_alloc_entry >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
+ ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+}
+
+/*
+ * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors.
+ *
+ * For msi-x, this currently implements only the scheme which is
+ * 1 vector for tx + other, 1 vector for each rx ring.
+ */
+static int
+ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe)
+{
+ int i, vector = 0;
+ int vect_remain = ixgbe->intr_cnt;
+
+ /* initialize vector map */
+ bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
+
+ /*
+ * non-MSI-X case is very simple: all interrupts on vector 0
+ */
+ if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
+ ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
+ ixgbe_map_txring_to_vector(ixgbe, 0, 0);
+ return (IXGBE_SUCCESS);
+ }
+
+ /*
+ * Ring/vector mapping for MSI-X
+ */
+
+ /*
+ * Map vector 0 to tx
+ */
+ ixgbe_map_txring_to_vector(ixgbe, 0, vector++);
+ vect_remain--;
+
+ /*
+ * Map remaining vectors to rx rings
+ */
+ for (i = 0; i < vect_remain; i++) {
+ ixgbe_map_rxring_to_vector(ixgbe, i, vector++);
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
+ *
+ * This relies on queue/vector mapping already set up in the
+ * vect_map[] structures
+ */
+static void
+ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ ixgbe_ring_vector_t *vect; /* vector bitmap */
+ int r_idx; /* ring index */
+ int v_idx; /* vector index */
+
+ /*
+ * Clear any previous entries
+ */
+ for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++)
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
+
+ /*
+ * "Other" is always on vector 0
+ */
+ ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0);
+
+ /*
+ * For each interrupt vector, populate the IVAR table
+ */
+ for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
+ vect = &ixgbe->vect_map[v_idx];
+
+ /*
+ * For each rx ring bit set
+ */
+ r_idx = bt_getlowbit(vect->rx_map, 0,
+ (ixgbe->num_rx_rings - 1));
+
+ while (r_idx >= 0) {
+ ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx),
+ v_idx);
+ r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
+ (ixgbe->num_rx_rings - 1));
+ }
+
+ /*
+ * For each tx ring bit set
+ */
+ r_idx = bt_getlowbit(vect->tx_map, 0,
+ (ixgbe->num_tx_rings - 1));
+
+ while (r_idx >= 0) {
+ ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx),
+ v_idx);
+ r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
+ (ixgbe->num_tx_rings - 1));
+ }
+ }
+}
+
+/*
+ * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
+ */
+static void
+ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < ixgbe->intr_cnt; i++) {
+ rc = ddi_intr_remove_handler(ixgbe->htable[i]);
+ if (rc != DDI_SUCCESS) {
+ IXGBE_DEBUGLOG_1(ixgbe,
+ "Remove intr handler failed: %d", rc);
+ }
+ }
+}
+
+/*
+ * ixgbe_rem_intrs - Remove the allocated interrupts.
+ */
+static void
+ixgbe_rem_intrs(ixgbe_t *ixgbe)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < ixgbe->intr_cnt; i++) {
+ rc = ddi_intr_free(ixgbe->htable[i]);
+ if (rc != DDI_SUCCESS) {
+ IXGBE_DEBUGLOG_1(ixgbe,
+ "Free intr failed: %d", rc);
+ }
+ }
+
+ kmem_free(ixgbe->htable, ixgbe->intr_size);
+ ixgbe->htable = NULL;
+}
+
+/*
+ * ixgbe_enable_intrs - Enable all the ddi interrupts.
+ */
+static int
+ixgbe_enable_intrs(ixgbe_t *ixgbe)
+{
+ int i;
+ int rc;
+
+ /*
+ * Enable interrupts
+ */
+ if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ /*
+ * Call ddi_intr_block_enable() for MSI
+ */
+ rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Enable block intr failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ } else {
+ /*
+ * Call ddi_intr_enable() for Legacy/MSI non block enable
+ */
+ for (i = 0; i < ixgbe->intr_cnt; i++) {
+ rc = ddi_intr_enable(ixgbe->htable[i]);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Enable intr failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ }
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_disable_intrs - Disable all the interrupts.
+ */
+static int
+ixgbe_disable_intrs(ixgbe_t *ixgbe)
+{
+ int i;
+ int rc;
+
+ /*
+ * Disable all interrupts
+ */
+ if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Disable block intr failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ } else {
+ for (i = 0; i < ixgbe->intr_cnt; i++) {
+ rc = ddi_intr_disable(ixgbe->htable[i]);
+ if (rc != DDI_SUCCESS) {
+ ixgbe_log(ixgbe,
+ "Disable intr failed: %d", rc);
+ return (IXGBE_FAILURE);
+ }
+ }
+ }
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
+ */
+static void
+ixgbe_get_hw_state(ixgbe_t *ixgbe)
+{
+ struct ixgbe_hw *hw = &ixgbe->hw;
+ uint32_t links;
+ uint32_t pcs1g_anlp = 0;
+ uint32_t pcs1g_ana = 0;
+
+ ASSERT(mutex_owned(&ixgbe->gen_lock));
+ ixgbe->param_lp_1000fdx_cap = 0;
+ ixgbe->param_lp_100fdx_cap = 0;
+
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links & IXGBE_LINKS_PCS_1G_EN) {
+ pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ ixgbe->param_lp_1000fdx_cap =
+ (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
+ ixgbe->param_lp_100fdx_cap =
+ (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
+ }
+
+ ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0;
+ ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0;
+}
+
+/*
+ * ixgbe_get_driver_control - Notify that driver is in control of device.
+ */
+static void
+ixgbe_get_driver_control(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /*
+ * Notify firmware that driver is in control of device
+ */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+}
+
+/*
+ * ixgbe_release_driver_control - Notify that driver is no longer in control
+ * of device.
+ */
+static void
+ixgbe_release_driver_control(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /*
+ * Notify firmware that driver is no longer in control of device
+ */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+}
+
+/*
+ * ixgbe_atomic_reserve - Atomic decrease operation.
+ */
+int
+ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
+{
+ uint32_t oldval;
+ uint32_t newval;
+
+ /*
+ * ATOMICALLY
+ */
+ do {
+ oldval = *count_p;
+ if (oldval < n)
+ return (-1);
+ newval = oldval - n;
+ } while (atomic_cas_32(count_p, oldval, newval) != oldval);
+
+ return (newval);
+}
+
+/*
+ * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
+ */
+static uint8_t *
+ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
+{
+ _NOTE(ARGUNUSED(hw));
+ _NOTE(ARGUNUSED(vmdq));
+ uint8_t *addr = *upd_ptr;
+ uint8_t *new_ptr;
+
+ new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+ *upd_ptr = new_ptr;
+ return (addr);
+}
+
+/*
+ * FMA support
+ */
+int
+ixgbe_check_acc_handle(ddi_acc_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+ return (de.fme_status);
+}
+
+int
+ixgbe_check_dma_handle(ddi_dma_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
+ return (de.fme_status);
+}
+
+/*
+ * ixgbe_fm_error_cb - The IO fault service error handling callback function.
+ */
+static int
+ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
+{
+ _NOTE(ARGUNUSED(impl_data));
+ /*
+ * as the driver can always deal with an error in any dma or
+ * access handle, we can just return the fme_status value.
+ */
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+static void
+ixgbe_fm_init(ixgbe_t *ixgbe)
+{
+ ddi_iblock_cookie_t iblk;
+ int fma_acc_flag, fma_dma_flag;
+
+ /*
+ * Only register with IO Fault Services if we have some capability
+ */
+ if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
+ ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
+ fma_acc_flag = 1;
+ } else {
+ ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
+ fma_acc_flag = 0;
+ }
+
+ if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
+ fma_dma_flag = 1;
+ } else {
+ fma_dma_flag = 0;
+ }
+
+ ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
+
+ if (ixgbe->fm_capabilities) {
+
+ /*
+ * Register capabilities with IO Fault Services
+ */
+ ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
+
+ /*
+ * Initialize pci ereport capabilities if ereport capable
+ */
+ if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
+ pci_ereport_setup(ixgbe->dip);
+
+ /*
+ * Register error callback if error callback capable
+ */
+ if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
+ ddi_fm_handler_register(ixgbe->dip,
+ ixgbe_fm_error_cb, (void*) ixgbe);
+ }
+}
+
+static void
+ixgbe_fm_fini(ixgbe_t *ixgbe)
+{
+ /*
+ * Only unregister FMA capabilities if they are registered
+ */
+ if (ixgbe->fm_capabilities) {
+
+ /*
+ * Release any resources allocated by pci_ereport_setup()
+ */
+ if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
+ pci_ereport_teardown(ixgbe->dip);
+
+ /*
+ * Un-register error callback if error callback capable
+ */
+ if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
+ ddi_fm_handler_unregister(ixgbe->dip);
+
+ /*
+ * Unregister from IO Fault Service
+ */
+ ddi_fm_fini(ixgbe->dip);
+ }
+}
+
+void
+ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
+ ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
+ }
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_ndd.c b/usr/src/uts/common/io/ixgbe/ixgbe_ndd.c
new file mode 100644
index 0000000000..b60897f64b
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_ndd.c
@@ -0,0 +1,356 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+/* Function prototypes */
+static int ixgbe_nd_get(queue_t *, mblk_t *, caddr_t, cred_t *);
+static int ixgbe_nd_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *);
+static int ixgbe_nd_param_load(ixgbe_t *);
+static void ixgbe_nd_get_param_val(nd_param_t *);
+static void ixgbe_nd_set_param_val(nd_param_t *, uint32_t);
+
+/*
+ * Notes:
+ * The first character of the <name> field encodes the read/write
+ * status of the parameter:
+ * '-' => read-only
+ * '+' => read/write,
+ * '?' => read/write on copper, read-only on serdes
+ * '!' => invisible!
+ *
+ * For writable parameters, we check for a driver property with the
+ * same name; if found, and its value is in range, we initialise
+ * the parameter from the property, overriding the default in the
+ * table below.
+ *
+ * A NULL in the <name> field terminates the array.
+ *
+ * The <info> field is used here to provide the index of the
+ * parameter to be initialised; thus it doesn't matter whether
+ * this table is kept ordered or not.
+ *
+ * The <info> field in the per-instance copy, on the other hand,
+ * is used to count assignments so that we can tell when a magic
+ * parameter has been set via ndd (see ixgbe_nd_set()).
+ */
+static const nd_param_t nd_template[] = {
+/* ixgbe info min max init r/w+name */
+
+/* Our hardware capabilities */
+{ NULL, PARAM_AUTONEG_CAP, 0, 1, 1, "-autoneg_cap" },
+{ NULL, PARAM_PAUSE_CAP, 0, 1, 1, "-pause_cap" },
+{ NULL, PARAM_ASYM_PAUSE_CAP, 0, 1, 1, "-asym_pause_cap" },
+{ NULL, PARAM_10000FDX_CAP, 0, 1, 1, "-10000fdx_cap" },
+{ NULL, PARAM_1000FDX_CAP, 0, 1, 1, "-1000fdx_cap" },
+{ NULL, PARAM_100FDX_CAP, 0, 1, 1, "-100fdx_cap" },
+{ NULL, PARAM_REM_FAULT, 0, 1, 0, "-rem_fault" },
+
+/* Our advertised capabilities */
+{ NULL, PARAM_ADV_AUTONEG_CAP, 0, 1, 1, "?adv_autoneg_cap" },
+{ NULL, PARAM_ADV_PAUSE_CAP, 0, 1, 1, "-adv_pause_cap" },
+{ NULL, PARAM_ADV_ASYM_PAUSE_CAP, 0, 1, 1, "-adv_asym_pause_cap" },
+{ NULL, PARAM_ADV_10000FDX_CAP, 0, 1, 1, "?adv_10000fdx_cap" },
+{ NULL, PARAM_ADV_1000FDX_CAP, 0, 1, 1, "?adv_1000fdx_cap" },
+{ NULL, PARAM_ADV_100FDX_CAP, 0, 1, 1, "?adv_100fdx_cap" },
+{ NULL, PARAM_ADV_REM_FAULT, 0, 1, 0, "-adv_rem_fault" },
+
+/* Partner's advertised capabilities */
+{ NULL, PARAM_LP_AUTONEG_CAP, 0, 1, 0, "-lp_autoneg_cap" },
+{ NULL, PARAM_LP_PAUSE_CAP, 0, 1, 0, "-lp_pause_cap" },
+{ NULL, PARAM_LP_ASYM_PAUSE_CAP, 0, 1, 0, "-lp_asym_pause_cap" },
+{ NULL, PARAM_LP_1000FDX_CAP, 0, 1, 0, "-lp_1000fdx_cap" },
+{ NULL, PARAM_LP_100FDX_CAP, 0, 1, 0, "-lp_100fdx_cap" },
+{ NULL, PARAM_LP_REM_FAULT, 0, 1, 0, "-lp_rem_fault" },
+
+/* Current operating modes */
+{ NULL, PARAM_LINK_STATUS, 0, 1, 0, "-link_status" },
+{ NULL, PARAM_LINK_SPEED, 0, 1000, 0, "-link_speed" },
+{ NULL, PARAM_LINK_DUPLEX, 0, 2, 0, "-link_duplex" },
+
+/* Terminator */
+{ NULL, PARAM_COUNT, 0, 0, 0, NULL }
+};
+
+/*
+ * ixgbe_nd_get - Ndd get parameter values.
+ */
+static int
+ixgbe_nd_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
+{
+ nd_param_t *nd = (nd_param_t *)(uintptr_t)cp;
+ _NOTE(ARGUNUSED(q));
+ _NOTE(ARGUNUSED(credp));
+
+ ixgbe_nd_get_param_val(nd);
+ (void) mi_mpprintf(mp, "%d", nd->val);
+
+ return (0);
+}
+
+/*
+ * ixgbe_nd_set - Ndd set parameter values.
+ */
+static int
+ixgbe_nd_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
+{
+ nd_param_t *nd = (nd_param_t *)(uintptr_t)cp;
+ long new_value;
+ char *end;
+ _NOTE(ARGUNUSED(q));
+ _NOTE(ARGUNUSED(mp));
+ _NOTE(ARGUNUSED(credp));
+
+ new_value = mi_strtol(value, &end, 10);
+ if (end == value)
+ return (EINVAL);
+ if (new_value < nd->min || new_value > nd->max)
+ return (EINVAL);
+
+ ixgbe_nd_set_param_val(nd, new_value);
+
+ return (0);
+}
+
+/*
+ * ixgbe_nd_param_load - Ndd load parameter values.
+ */
+static int
+ixgbe_nd_param_load(ixgbe_t *ixgbe)
+{
+ const nd_param_t *tmpnd;
+ nd_param_t *nd;
+ caddr_t *ndd;
+ pfi_t setfn;
+ char *nm;
+ int value;
+
+ ndd = &ixgbe->nd_data;
+ ASSERT(*ndd == NULL);
+
+ for (tmpnd = nd_template; tmpnd->name != NULL; ++tmpnd) {
+ /*
+ * Copy the template from nd_template[] into the
+ * proper slot in the per-instance parameters,
+ * then register the parameter with nd_load()
+ */
+ nd = &ixgbe->nd_params[tmpnd->info];
+ *nd = *tmpnd;
+ nd->private = ixgbe;
+ ixgbe_nd_get_param_val(nd);
+
+ nm = &nd->name[0];
+ setfn = ixgbe_nd_set;
+
+ if (ixgbe->hw.phy.media_type != ixgbe_media_type_copper) {
+ switch (*nm) {
+ default:
+ break;
+
+ case '?':
+ setfn = NULL;
+ break;
+ }
+ }
+
+ switch (*nm) {
+ default:
+ case '!':
+ continue;
+
+ case '+':
+ case '?':
+ break;
+
+ case '-':
+ setfn = NULL;
+ break;
+ }
+
+ if (!nd_load(ndd, ++nm, ixgbe_nd_get, setfn, (caddr_t)nd))
+ goto nd_fail;
+
+ /*
+ * If the parameter is writable, and there's a property
+ * with the same name, and its value is in range, we use
+ * it to initialise the parameter. If it exists but is
+ * out of range, it's ignored.
+ */
+ if (setfn && IXGBE_PROP_EXISTS(ixgbe->dip, nm)) {
+ value = IXGBE_PROP_GET_INT(ixgbe->dip, nm);
+ if (value >= nd->min && value <= nd->max)
+ nd->val = value;
+ }
+ }
+
+ return (IXGBE_SUCCESS);
+
+nd_fail:
+ ixgbe_log(ixgbe,
+ "ixgbe_nd_param_load: failed at index %d [info %d]",
+ (tmpnd - nd_template), tmpnd->info);
+ nd_free(ndd);
+ return (IXGBE_FAILURE);
+}
+
+/*
+ * ixgbe_nd_get_param_val - Get parameter values.
+ */
+static void
+ixgbe_nd_get_param_val(nd_param_t *nd)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)nd->private;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ switch (nd->info) {
+ case PARAM_LINK_STATUS:
+ nd->val = (ixgbe->link_state == LINK_STATE_UP) ? 1 : 0;
+ break;
+ case PARAM_LINK_SPEED:
+ nd->val = ixgbe->link_speed;
+ break;
+ case PARAM_LINK_DUPLEX:
+ nd->val = ixgbe->link_duplex;
+ break;
+ default:
+ break;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK)
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_UNAFFECTED);
+}
+
+/*
+ * ixgbe_nd_set_param_val - Set parameter values.
+ */
+static void
+ixgbe_nd_set_param_val(nd_param_t *nd, uint32_t value)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)nd->private;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ if (nd->val == value) {
+ mutex_exit(&ixgbe->gen_lock);
+ return;
+ }
+
+ switch (nd->info) {
+ case PARAM_ADV_AUTONEG_CAP:
+ case PARAM_ADV_10000FDX_CAP:
+ case PARAM_ADV_1000FDX_CAP:
+ case PARAM_ADV_100FDX_CAP:
+ nd->val = value;
+ (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK)
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
+}
+
+/*
+ * ixgbe_nd_init - Init for ndd support.
+ */
+int
+ixgbe_nd_init(ixgbe_t *ixgbe)
+{
+ /*
+ * Register all the per-instance properties, initialising
+ * them from the table above or from driver properties set
+ * in the .conf file
+ */
+ if (ixgbe_nd_param_load(ixgbe) != IXGBE_SUCCESS)
+ return (IXGBE_FAILURE);
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_nd_cleanup - Free the Named Dispatch Table by calling nd_free
+ */
+void
+ixgbe_nd_cleanup(ixgbe_t *ixgbe)
+{
+ nd_free(&ixgbe->nd_data);
+}
+
+/*
+ * ixgbe_nd_ioctl - Ndd ioctl.
+ */
+enum ioc_reply
+ixgbe_nd_ioctl(ixgbe_t *ixgbe, queue_t *q,
+ mblk_t *mp, struct iocblk *ioc)
+{
+ boolean_t ok;
+ int cmd;
+
+ cmd = ioc->ioc_cmd;
+ switch (cmd) {
+ default:
+ /* NOTREACHED */
+ ASSERT(FALSE);
+ return (IOC_INVAL);
+
+ case ND_GET:
+ /*
+ * If nd_getset() returns B_FALSE, the command was
+ * not valid (e.g. unknown name), so we just tell the
+ * top-level ioctl code to send a NAK (with code EINVAL).
+ *
+ * Otherwise, nd_getset() will have built the reply to
+ * be sent (but not actually sent it), so we tell the
+ * caller to send the prepared reply.
+ */
+ ok = nd_getset(q, ixgbe->nd_data, mp);
+ return (ok ? IOC_REPLY : IOC_INVAL);
+
+ case ND_SET:
+ /*
+ * All adv_* parameters are locked (read-only) while
+ * the device is in any sort of loopback mode ...
+ */
+ if (ixgbe->loopback_mode != IXGBE_LB_NONE) {
+ ioc->ioc_error = EBUSY;
+ return (IOC_INVAL);
+ }
+
+ ok = nd_getset(q, ixgbe->nd_data, mp);
+ return (ok ? IOC_REPLY : IOC_INVAL);
+ }
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_osdep.c b/usr/src/uts/common/io/ixgbe/ixgbe_osdep.c
new file mode 100644
index 0000000000..b51c1621d8
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_osdep.c
@@ -0,0 +1,38 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_osdep.h"
+#include "ixgbe_api.h"
+
+uint16_t
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
+{
+ return (pci_config_get16(OS_DEP(hw)->cfg_handle, reg));
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_osdep.h b/usr/src/uts/common/io/ixgbe/ixgbe_osdep.h
new file mode 100644
index 0000000000..95163ab642
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_osdep.h
@@ -0,0 +1,132 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#ifndef _IXGBE_OSDEP_H
+#define _IXGBE_OSDEP_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strlog.h>
+#include <sys/kmem.h>
+#include <sys/stat.h>
+#include <sys/kstat.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/dditypes.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+#include <sys/atomic.h>
+#include <sys/note.h>
+#include "ixgbe_debug.h"
+
+/* function declarations */
+struct ixgbe_hw;
+uint16_t ixgbe_read_pci_cfg(struct ixgbe_hw *, uint32_t);
+
+
+#define usec_delay(x) drv_usecwait(x)
+#define msec_delay(x) drv_usecwait(x * 1000)
+
+#ifdef IXGBE_DEBUG
+#define DEBUGOUT(S) IXGBE_DEBUGLOG_0(NULL, S)
+#define DEBUGOUT1(S, A) IXGBE_DEBUGLOG_1(NULL, S, A)
+#define DEBUGOUT2(S, A, B) IXGBE_DEBUGLOG_2(NULL, S, A, B)
+#define DEBUGOUT3(S, A, B, C) IXGBE_DEBUGLOG_3(NULL, S, A, B, C)
+#define DEBUGOUT6(S, A, B, C, D, E, F) \
+ IXGBE_DEBUGLOG_6(NULL, S, A, B, C, D, E, F)
+#define DEBUGFUNC(F) IXGBE_DEBUGLOG_1(NULL, "Entering %s", F)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A)
+#define DEBUGOUT2(S, A, B)
+#define DEBUGOUT3(S, A, B, C)
+#define DEBUGOUT6(S, A, B, C, D, E, F)
+#define DEBUGFUNC(F)
+#endif
+
+#define OS_DEP(hw) ((struct ixgbe_osdep *)((hw)->back))
+
+#define FALSE 0
+#define TRUE 1
+
+#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg
+#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
+#define PCI_COMMAND_REGISTER 0x04
+#define PCI_EX_CONF_CAP 0xE0
+#define MAX_NUM_UNICAST_ADDRESSES 0x10
+#define MAX_NUM_MULTICAST_ADDRESSES 0x1000
+#define SPEED_10GB 10000
+#define SPEED_1GB 1000
+#define SPEED_100 100
+#define FULL_DUPLEX 2
+
+#define IXGBE_WRITE_FLUSH(a) (void) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define IXGBE_WRITE_REG(a, reg, value) \
+ ddi_put32((OS_DEP(hw))->reg_handle, \
+ (uint32_t *)((uintptr_t)(hw)->hw_addr + reg), (value))
+
+#define IXGBE_READ_REG(a, reg) \
+ ddi_get32((OS_DEP(hw))->reg_handle, \
+ (uint32_t *)((uintptr_t)(hw)->hw_addr + reg))
+
+#define msec_delay_irq msec_delay
+
+#define UNREFERENCED_PARAMETER(x) _NOTE(ARGUNUSED(x))
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef boolean_t bool;
+
+struct ixgbe_osdep {
+ ddi_acc_handle_t reg_handle;
+ ddi_acc_handle_t cfg_handle;
+ struct ixgbe *ixgbe;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IXGBE_OSDEP_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_phy.c b/usr/src/uts/common/io/ixgbe/ixgbe_phy.c
new file mode 100644
index 0000000000..8189a3c0e4
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_phy.c
@@ -0,0 +1,461 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.37 v2008-03-04 */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/*
+ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ */
+s32
+ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_generic;
+ phy->ops.reset = &ixgbe_reset_phy_generic;
+ phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+
+ return (IXGBE_SUCCESS);
+}
+
+/*
+ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ */
+s32
+ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 phy_addr;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+ hw->phy.addr = phy_addr;
+ (void) ixgbe_get_phy_id(hw);
+ hw->phy.type =
+ ixgbe_get_phy_type_from_id(hw->phy.id);
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_validate_phy_addr - Determines phy address is valid
+ * @hw: pointer to hardware structure
+ *
+ */
+bool
+ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+ u16 phy_id = 0;
+ bool valid = FALSE;
+
+ hw->phy.addr = phy_addr;
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = TRUE;
+
+ return (valid);
+}
+
+/*
+ * ixgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ */
+s32
+ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_high);
+
+ if (status == IXGBE_SUCCESS) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_get_phy_type_from_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ */
+enum ixgbe_phy_type
+ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum ixgbe_phy_type phy_type;
+
+ switch (phy_id) {
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+ default:
+ phy_type = ixgbe_phy_unknown;
+ break;
+ }
+
+ return (phy_type);
+}
+
+/*
+ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ */
+s32
+ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+ return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ IXGBE_MDIO_PHY_XS_RESET);
+}
+
+/*
+ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ */
+s32
+ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 command;
+ u32 i;
+ u32 data;
+ s32 status = IXGBE_SUCCESS;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (ixgbe_acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == IXGBE_SUCCESS) {
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+ break;
+ }
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ DEBUGOUT("PHY address command did not complete.\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ /*
+ * Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ DEBUGOUT("PHY read command didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ } else {
+ /*
+ * Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+ }
+ }
+
+ ixgbe_release_swfw_sync(hw, gssr);
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ */
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 command;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (ixgbe_acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == IXGBE_SUCCESS) {
+ /*
+ * Put the data in the MDI single read and write data register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ DEBUGOUT("PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ DEBUGOUT("PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+ }
+
+ ixgbe_release_swfw_sync(hw, gssr);
+ }
+
+ return (status);
+}
+
+/*
+ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ */
+s32
+ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_NOT_IMPLEMENTED;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+
+ /*
+ * Set advertisement settings in PHY based on autoneg_advertised
+ * settings. If autoneg_advertised = 0, then advertise default values
+ * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
+ * for a 1G.
+ */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
+ else
+ autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ usec_delay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ }
+
+ if (time_out == max_time_out)
+ status = IXGBE_ERR_LINK_SETUP;
+
+ return (status);
+}
+
+/*
+ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ */
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_PARAMETER(autoneg);
+ UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+ }
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+ }
+
+ /* Setup link based on the new speed settings */
+ hw->phy.ops.setup_link(hw);
+
+ return (IXGBE_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_phy.h b/usr/src/uts/common/io/ixgbe/ixgbe_phy.h
new file mode 100644
index 0000000000..cc9ed91e4b
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_phy.h
@@ -0,0 +1,52 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.17 v2008-03-04 */
+
+#ifndef _IXGBE_PHY_H
+#define _IXGBE_PHY_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg, bool autoneg_weait_to_complete);
+
+#endif /* _IXGBE_PHY_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_rx.c b/usr/src/uts/common/io/ixgbe/ixgbe_rx.c
new file mode 100644
index 0000000000..3f09a4215d
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_rx.c
@@ -0,0 +1,380 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+/* function prototypes */
+static mblk_t *ixgbe_rx_bind(ixgbe_rx_ring_t *, uint32_t, uint32_t);
+static mblk_t *ixgbe_rx_copy(ixgbe_rx_ring_t *, uint32_t, uint32_t);
+static void ixgbe_rx_assoc_hcksum(mblk_t *, uint32_t);
+
+#ifndef IXGBE_DEBUG
+#pragma inline(ixgbe_rx_assoc_hcksum)
+#endif
+
+/*
+ * ixgbe_rx_recycle - The call-back function to reclaim rx buffer.
+ *
+ * This function is called when an mp is freed by the user thru
+ * freeb call (Only for mp constructed through desballoc call).
+ * It returns back the freed buffer to the free list.
+ */
+void
+ixgbe_rx_recycle(caddr_t arg)
+{
+ ixgbe_rx_ring_t *rx_ring;
+ rx_control_block_t *recycle_rcb;
+ uint32_t free_index;
+
+ recycle_rcb = (rx_control_block_t *)(uintptr_t)arg;
+ rx_ring = recycle_rcb->rx_ring;
+
+ if (recycle_rcb->state == RCB_FREE)
+ return;
+
+ recycle_rcb->state = RCB_FREE;
+
+ ASSERT(recycle_rcb->mp == NULL);
+
+ /*
+ * Using the recycled data buffer to generate a new mblk
+ */
+ recycle_rcb->mp = desballoc((unsigned char *)
+ (recycle_rcb->rx_buf.address - IPHDR_ALIGN_ROOM),
+ (recycle_rcb->rx_buf.size + IPHDR_ALIGN_ROOM),
+ 0, &recycle_rcb->free_rtn);
+ if (recycle_rcb->mp != NULL) {
+ recycle_rcb->mp->b_rptr += IPHDR_ALIGN_ROOM;
+ recycle_rcb->mp->b_wptr += IPHDR_ALIGN_ROOM;
+ }
+
+ /*
+ * Put the recycled rx control block into free list
+ */
+ mutex_enter(&rx_ring->recycle_lock);
+
+ free_index = rx_ring->rcb_tail;
+ ASSERT(rx_ring->free_list[free_index] == NULL);
+
+ rx_ring->free_list[free_index] = recycle_rcb;
+ rx_ring->rcb_tail = NEXT_INDEX(free_index, 1, rx_ring->free_list_size);
+
+ mutex_exit(&rx_ring->recycle_lock);
+
+ /*
+ * The atomic operation on the number of the available rx control
+ * blocks in the free list is used to make the recycling mutual
+ * exclusive with the receiving.
+ */
+ atomic_inc_32(&rx_ring->rcb_free);
+ ASSERT(rx_ring->rcb_free <= rx_ring->free_list_size);
+}
+
+/*
+ * ixgbe_rx_copy - Use copy to process the received packet.
+ *
+ * This function will use bcopy to process the packet
+ * and send the copied packet upstream.
+ */
+static mblk_t *
+ixgbe_rx_copy(ixgbe_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
+{
+ rx_control_block_t *current_rcb;
+ mblk_t *mp;
+
+ current_rcb = rx_ring->work_list[index];
+
+ DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
+
+ if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(rx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ /*
+ * Allocate buffer to receive this packet
+ */
+ mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0);
+ if (mp == NULL) {
+ ixgbe_log(rx_ring->ixgbe,
+ "ixgbe_rx_copy: allocate buffer failed");
+ return (NULL);
+ }
+
+ /*
+ * Copy the data received into the new cluster
+ */
+ mp->b_rptr += IPHDR_ALIGN_ROOM;
+ bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len);
+ mp->b_wptr = mp->b_rptr + pkt_len;
+
+ return (mp);
+}
+
+/*
+ * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving.
+ *
+ * This function will use pre-bound DMA buffer to receive the packet
+ * and build mblk that will be sent upstream.
+ */
+static mblk_t *
+ixgbe_rx_bind(ixgbe_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len)
+{
+ rx_control_block_t *current_rcb;
+ rx_control_block_t *free_rcb;
+ uint32_t free_index;
+ mblk_t *mp;
+
+ /*
+ * If the free list is empty, we cannot proceed to send
+ * the current DMA buffer upstream. We'll have to return
+ * and use bcopy to process the packet.
+ */
+ if (ixgbe_atomic_reserve(&rx_ring->rcb_free, 1) < 0)
+ return (NULL);
+
+ current_rcb = rx_ring->work_list[index];
+ /*
+ * If the mp of the rx control block is NULL, try to do
+ * desballoc again.
+ */
+ if (current_rcb->mp == NULL) {
+ current_rcb->mp = desballoc((unsigned char *)
+ (current_rcb->rx_buf.address - IPHDR_ALIGN_ROOM),
+ (current_rcb->rx_buf.size + IPHDR_ALIGN_ROOM),
+ 0, &current_rcb->free_rtn);
+ /*
+ * If it is failed to built a mblk using the current
+ * DMA buffer, we have to return and use bcopy to
+ * process the packet.
+ */
+ if (current_rcb->mp == NULL) {
+ atomic_inc_32(&rx_ring->rcb_free);
+ return (NULL);
+ }
+ }
+ /*
+ * Sync up the data received
+ */
+ DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
+
+ if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(rx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ mp = current_rcb->mp;
+ current_rcb->mp = NULL;
+ current_rcb->state = RCB_SENDUP;
+
+ mp->b_wptr = mp->b_rptr + pkt_len;
+ mp->b_next = mp->b_cont = NULL;
+
+ /*
+ * Strip off one free rx control block from the free list
+ */
+ free_index = rx_ring->rcb_head;
+ free_rcb = rx_ring->free_list[free_index];
+ ASSERT(free_rcb != NULL);
+ rx_ring->free_list[free_index] = NULL;
+ rx_ring->rcb_head = NEXT_INDEX(free_index, 1, rx_ring->free_list_size);
+
+ /*
+ * Put the rx control block to the work list
+ */
+ rx_ring->work_list[index] = free_rcb;
+
+ return (mp);
+}
+
+/*
+ * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate
+ * the hcksum flags.
+ */
+static void
+ixgbe_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error)
+{
+ uint32_t hcksum_flags = 0;
+
+ /*
+ * Check TCP/UDP checksum
+ */
+ if ((status_error & IXGBE_RXD_STAT_L4CS) &&
+ !(status_error & IXGBE_RXDADV_ERR_TCPE))
+ hcksum_flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
+
+ /*
+ * Check IP Checksum
+ */
+ if ((status_error & IXGBE_RXD_STAT_IPCS) &&
+ !(status_error & IXGBE_RXDADV_ERR_IPE))
+ hcksum_flags |= HCK_IPV4_HDRCKSUM;
+
+ if (hcksum_flags != 0) {
+ (void) hcksum_assoc(mp,
+ NULL, NULL, 0, 0, 0, 0, hcksum_flags, 0);
+ }
+}
+
+/*
+ * ixgbe_rx - Receive the data of one ring.
+ *
+ * This function goes throught h/w descriptor in one specified rx ring,
+ * receives the data if the descriptor status shows the data is ready.
+ * It returns a chain of mblks containing the received data, to be
+ * passed up to mac_rx().
+ */
+mblk_t *
+ixgbe_rx(ixgbe_rx_ring_t *rx_ring)
+{
+ union ixgbe_adv_rx_desc *current_rbd;
+ rx_control_block_t *current_rcb;
+ mblk_t *mp;
+ mblk_t *mblk_head;
+ mblk_t **mblk_tail;
+ uint32_t rx_next;
+ uint32_t rx_tail;
+ uint32_t pkt_len;
+ uint32_t status_error;
+ uint32_t pkt_num;
+ ixgbe_t *ixgbe = rx_ring->ixgbe;
+ struct ixgbe_hw *hw = &ixgbe->hw;
+
+ mblk_head = NULL;
+ mblk_tail = &mblk_head;
+
+ /*
+ * Sync the receive descriptors before accepting the packets
+ */
+ DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORKERNEL);
+
+ if (ixgbe_check_dma_handle(rx_ring->rbd_area.dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(rx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ /*
+ * Get the start point of rx bd ring which should be examined
+ * during this cycle.
+ */
+ rx_next = rx_ring->rbd_next;
+
+ current_rbd = &rx_ring->rbd_ring[rx_next];
+ pkt_num = 0;
+ status_error = current_rbd->wb.upper.status_error;
+ while (status_error & IXGBE_RXD_STAT_DD) {
+ /*
+ * If adapter has found errors, but the error
+ * is hardware checksum error, this does not discard the
+ * packet: let upper layer compute the checksum;
+ * Otherwise discard the packet.
+ */
+ if ((status_error & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) ||
+ !(status_error & IXGBE_RXD_STAT_EOP)) {
+ IXGBE_DEBUG_STAT(rx_ring->stat_frame_error);
+ goto rx_discard;
+ }
+
+ IXGBE_DEBUG_STAT_COND(rx_ring->stat_cksum_error,
+ (status_error & IXGBE_RXDADV_ERR_TCPE) ||
+ (status_error & IXGBE_RXDADV_ERR_IPE));
+
+ pkt_len = current_rbd->wb.upper.length;
+ mp = NULL;
+ /*
+ * For packets with length more than the copy threshold,
+ * we'll first try to use the existing DMA buffer to build
+ * an mblk and send the mblk upstream.
+ *
+ * If the first method fails, or the packet length is less
+ * than the copy threshold, we'll allocate a new mblk and
+ * copy the packet data to the new mblk.
+ */
+ if (pkt_len > rx_ring->copy_thresh)
+ mp = ixgbe_rx_bind(rx_ring, rx_next, pkt_len);
+
+ if (mp == NULL)
+ mp = ixgbe_rx_copy(rx_ring, rx_next, pkt_len);
+
+ if (mp != NULL) {
+ /*
+ * Check h/w checksum offload status
+ */
+ if (ixgbe->rx_hcksum_enable)
+ ixgbe_rx_assoc_hcksum(mp, status_error);
+
+ *mblk_tail = mp;
+ mblk_tail = &mp->b_next;
+ }
+
+rx_discard:
+ /*
+ * Reset rx descriptor read bits
+ */
+ current_rcb = rx_ring->work_list[rx_next];
+ current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address;
+ current_rbd->read.hdr_addr = 0;
+
+ rx_next = NEXT_INDEX(rx_next, 1, rx_ring->ring_size);
+
+ /*
+ * The receive function is in interrupt context, so here
+ * limit_per_intr is used to avoid doing receiving too long
+ * per interrupt.
+ */
+ if (++pkt_num > rx_ring->limit_per_intr) {
+ IXGBE_DEBUG_STAT(rx_ring->stat_exceed_pkt);
+ break;
+ }
+
+ current_rbd = &rx_ring->rbd_ring[rx_next];
+ status_error = current_rbd->wb.upper.status_error;
+ }
+
+ DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORDEV);
+
+ rx_ring->rbd_next = rx_next;
+
+ /*
+ * Update the h/w tail accordingly
+ */
+ rx_tail = PREV_INDEX(rx_next, 1, rx_ring->ring_size);
+
+ IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_RDT(rx_ring->index), rx_tail);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(rx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ return (mblk_head);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_stat.c b/usr/src/uts/common/io/ixgbe/ixgbe_stat.c
new file mode 100644
index 0000000000..fa7cbf28e8
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_stat.c
@@ -0,0 +1,265 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+/*
+ * Update driver private statistics.
+ */
+static int
+ixgbe_update_stats(kstat_t *ks, int rw)
+{
+ ixgbe_t *ixgbe;
+ struct ixgbe_hw *hw;
+ ixgbe_stat_t *ixgbe_ks;
+ int i;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ ixgbe = (ixgbe_t *)ks->ks_private;
+ ixgbe_ks = (ixgbe_stat_t *)ks->ks_data;
+ hw = &ixgbe->hw;
+
+ mutex_enter(&ixgbe->gen_lock);
+
+ /*
+ * Basic information
+ */
+ ixgbe_ks->link_speed.value.ui64 = ixgbe->link_speed;
+
+#ifdef IXGBE_DEBUG
+ ixgbe_ks->reset_count.value.ui64 = ixgbe->reset_count;
+
+ ixgbe_ks->rx_frame_error.value.ui64 = 0;
+ ixgbe_ks->rx_cksum_error.value.ui64 = 0;
+ ixgbe_ks->rx_exceed_pkt.value.ui64 = 0;
+ for (i = 0; i < ixgbe->num_rx_rings; i++) {
+ ixgbe_ks->rx_frame_error.value.ui64 +=
+ ixgbe->rx_rings[i].stat_frame_error;
+ ixgbe_ks->rx_cksum_error.value.ui64 +=
+ ixgbe->rx_rings[i].stat_cksum_error;
+ ixgbe_ks->rx_exceed_pkt.value.ui64 +=
+ ixgbe->rx_rings[i].stat_exceed_pkt;
+ }
+
+ ixgbe_ks->tx_overload.value.ui64 = 0;
+ ixgbe_ks->tx_fail_no_tbd.value.ui64 = 0;
+ ixgbe_ks->tx_fail_no_tcb.value.ui64 = 0;
+ ixgbe_ks->tx_fail_dma_bind.value.ui64 = 0;
+ ixgbe_ks->tx_reschedule.value.ui64 = 0;
+ for (i = 0; i < ixgbe->num_tx_rings; i++) {
+ ixgbe_ks->tx_overload.value.ui64 +=
+ ixgbe->tx_rings[i].stat_overload;
+ ixgbe_ks->tx_fail_no_tbd.value.ui64 +=
+ ixgbe->tx_rings[i].stat_fail_no_tbd;
+ ixgbe_ks->tx_fail_no_tcb.value.ui64 +=
+ ixgbe->tx_rings[i].stat_fail_no_tcb;
+ ixgbe_ks->tx_fail_dma_bind.value.ui64 +=
+ ixgbe->tx_rings[i].stat_fail_dma_bind;
+ ixgbe_ks->tx_reschedule.value.ui64 +=
+ ixgbe->tx_rings[i].stat_reschedule;
+ }
+
+ /*
+ * Hardware calculated statistics.
+ */
+ for (i = 0; i < 16; i++) {
+ ixgbe_ks->gprc.value.ul += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ ixgbe_ks->gptc.value.ul += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ ixgbe_ks->tor.value.ui64 += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ ixgbe_ks->got.value.ui64 += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ ixgbe_ks->gor.value.ui64 = ixgbe_ks->tor.value.ui64;
+
+ ixgbe_ks->prc64.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ ixgbe_ks->prc127.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ ixgbe_ks->prc255.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ ixgbe_ks->prc511.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ ixgbe_ks->prc1023.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ ixgbe_ks->prc1522.value.ul += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ ixgbe_ks->ptc64.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ ixgbe_ks->ptc127.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ ixgbe_ks->ptc255.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ ixgbe_ks->ptc511.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ ixgbe_ks->ptc1023.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ ixgbe_ks->ptc1522.value.ul += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+#endif
+
+ ixgbe_ks->mspdc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ ixgbe_ks->mpc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ ixgbe_ks->mlfc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_MLFC);
+ ixgbe_ks->mrfc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_MRFC);
+ ixgbe_ks->rlec.value.ui64 += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ ixgbe_ks->lxontxc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ ixgbe_ks->lxonrxc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ ixgbe_ks->lxofftxc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ ixgbe_ks->lxoffrxc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ ixgbe_ks->ruc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_RUC);
+ ixgbe_ks->rfc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_RFC);
+ ixgbe_ks->roc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_ROC);
+ ixgbe_ks->rjc.value.ui64 += IXGBE_READ_REG(hw, IXGBE_RJC);
+
+ mutex_exit(&ixgbe->gen_lock);
+
+ if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK)
+ ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_UNAFFECTED);
+
+ return (0);
+}
+
+/*
+ * Create and initialize the driver private statistics.
+ */
+int
+ixgbe_init_stats(ixgbe_t *ixgbe)
+{
+ kstat_t *ks;
+ ixgbe_stat_t *ixgbe_ks;
+
+ /*
+ * Create and init kstat
+ */
+ ks = kstat_create(MODULE_NAME, ddi_get_instance(ixgbe->dip),
+ "statistics", "net", KSTAT_TYPE_NAMED,
+ sizeof (ixgbe_stat_t) / sizeof (kstat_named_t), 0);
+
+ if (ks == NULL) {
+ ixgbe_error(ixgbe,
+ "Could not create kernel statistics");
+ return (IXGBE_FAILURE);
+ }
+
+ ixgbe->ixgbe_ks = ks;
+
+ ixgbe_ks = (ixgbe_stat_t *)ks->ks_data;
+
+ /*
+ * Initialize all the statistics.
+ */
+ kstat_named_init(&ixgbe_ks->link_speed, "link_speed",
+ KSTAT_DATA_UINT64);
+
+#ifdef IXGBE_DEBUG
+ kstat_named_init(&ixgbe_ks->reset_count, "reset_count",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rx_frame_error, "rx_frame_error",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rx_cksum_error, "rx_cksum_error",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rx_exceed_pkt, "rx_exceed_pkt",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->tx_overload, "tx_overload",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->tx_fail_no_tbd, "tx_fail_no_tbd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->tx_fail_no_tcb, "tx_fail_no_tcb",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->tx_fail_dma_bind, "tx_fail_dma_bind",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->tx_reschedule, "tx_reschedule",
+ KSTAT_DATA_UINT64);
+
+ kstat_named_init(&ixgbe_ks->gprc, "good_pkts_recvd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->gptc, "good_pkts_xmitd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->gor, "good_octets_recvd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->got, "good_octets_xmitd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc64, "pkts_recvd_( 64b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc127, "pkts_recvd_( 65- 127b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc255, "pkts_recvd_( 127- 255b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc511, "pkts_recvd_( 256- 511b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc1023, "pkts_recvd_( 511-1023b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->prc1522, "pkts_recvd_(1024-1522b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc64, "pkts_xmitd_( 64b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc127, "pkts_xmitd_( 65- 127b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc255, "pkts_xmitd_( 128- 255b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc511, "pkts_xmitd_( 255- 511b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc1023, "pkts_xmitd_( 512-1023b)",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ptc1522, "pkts_xmitd_(1024-1522b)",
+ KSTAT_DATA_UINT64);
+#endif
+
+ kstat_named_init(&ixgbe_ks->mspdc, "mac_short_packet_discard",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->mpc, "missed_packets",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->mlfc, "mac_local_fault",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->mrfc, "mac_remote_fault",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rlec, "recv_length_err",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->lxontxc, "link_xon_xmitd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->lxonrxc, "link_xon_recvd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->lxofftxc, "link_xoff_xmitd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->lxoffrxc, "link_xoff_recvd",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->ruc, "recv_undersize",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rfc, "recv_fragment",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->roc, "recv_oversize",
+ KSTAT_DATA_UINT64);
+ kstat_named_init(&ixgbe_ks->rjc, "recv_jabber",
+ KSTAT_DATA_UINT64);
+
+ /*
+ * Function to provide kernel stat update on demand
+ */
+ ks->ks_update = ixgbe_update_stats;
+
+ ks->ks_private = (void *)ixgbe;
+
+ /*
+ * Add kstat to systems kstat chain
+ */
+ kstat_install(ks);
+
+ return (IXGBE_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_sw.h b/usr/src/uts/common/io/ixgbe/ixgbe_sw.h
new file mode 100644
index 0000000000..d45af917ed
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_sw.h
@@ -0,0 +1,830 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#ifndef _IXGBE_SW_H
+#define _IXGBE_SW_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/debug.h>
+#include <sys/stropts.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strlog.h>
+#include <sys/kmem.h>
+#include <sys/stat.h>
+#include <sys/kstat.h>
+#include <sys/modctl.h>
+#include <sys/errno.h>
+#include <sys/dlpi.h>
+#include <sys/mac.h>
+#include <sys/mac_ether.h>
+#include <sys/vlan.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/pci.h>
+#include <sys/pcie.h>
+#include <sys/sdt.h>
+#include <sys/ethernet.h>
+#include <sys/pattr.h>
+#include <sys/strsubr.h>
+#include <sys/netlb.h>
+#include <sys/random.h>
+#include <inet/common.h>
+#include <inet/ip.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+#include <sys/bitmap.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+#include "ixgbe_api.h"
+
+#define MODULE_NAME "ixgbe" /* module name */
+
+#define IXGBE_FAILURE DDI_FAILURE
+
+#define IXGBE_UNKNOWN 0x00
+#define IXGBE_INITIALIZED 0x01
+#define IXGBE_STARTED 0x02
+#define IXGBE_SUSPENDED 0x04
+
+#define MAX_NUM_UNICAST_ADDRESSES 0x10
+#define MAX_NUM_MULTICAST_ADDRESSES 0x1000
+#define IXGBE_INTR_NONE 0
+#define IXGBE_INTR_MSIX 1
+#define IXGBE_INTR_MSI 2
+#define IXGBE_INTR_LEGACY 3
+
+#define MAX_COOKIE 16
+#define MIN_NUM_TX_DESC 2
+
+/*
+ * Maximum values for user configurable parameters
+ */
+
+/*
+ * MAX_xx_QUEUE_NUM values need to be the maximum of all supported
+ * silicon types.
+ */
+#define MAX_TX_QUEUE_NUM 32
+#define MAX_RX_QUEUE_NUM 64
+
+#define MAX_TX_RING_SIZE 4096
+#define MAX_RX_RING_SIZE 4096
+
+#define MAX_MTU 16366
+#define MAX_RX_LIMIT_PER_INTR 4096
+#define MAX_INTR_THROTTLING 65535
+
+#define MAX_RX_COPY_THRESHOLD 9216
+#define MAX_TX_COPY_THRESHOLD 9216
+#define MAX_TX_RECYCLE_THRESHOLD DEFAULT_TX_RING_SIZE
+#define MAX_TX_OVERLOAD_THRESHOLD DEFAULT_TX_RING_SIZE
+#define MAX_TX_RESCHED_THRESHOLD DEFAULT_TX_RING_SIZE
+
+/*
+ * Minimum values for user configurable parameters
+ */
+#define MIN_TX_QUEUE_NUM 1
+#define MIN_RX_QUEUE_NUM 1
+#define MIN_TX_RING_SIZE 64
+#define MIN_RX_RING_SIZE 64
+
+#define MIN_MTU ETHERMIN
+#define MIN_RX_LIMIT_PER_INTR 16
+#define MIN_INTR_THROTTLING 0
+#define MIN_TX_COPY_THRESHOLD 0
+#define MIN_RX_COPY_THRESHOLD 0
+#define MIN_TX_RECYCLE_THRESHOLD MIN_NUM_TX_DESC
+#define MIN_TX_OVERLOAD_THRESHOLD MIN_NUM_TX_DESC
+#define MIN_TX_RESCHED_THRESHOLD MIN_NUM_TX_DESC
+
+/*
+ * Default values for user configurable parameters
+ */
+#define DEFAULT_TX_QUEUE_NUM 1
+#define DEFAULT_RX_QUEUE_NUM 1
+#define DEFAULT_TX_RING_SIZE 512
+#define DEFAULT_RX_RING_SIZE 512
+
+#define DEFAULT_MTU ETHERMTU
+#define DEFAULT_RX_LIMIT_PER_INTR 256
+#define DEFAULT_INTR_THROTTLING 200 /* In unit of 256 nsec */
+#define DEFAULT_RX_COPY_THRESHOLD 128
+#define DEFAULT_TX_COPY_THRESHOLD 512
+#define DEFAULT_TX_RECYCLE_THRESHOLD MAX_COOKIE
+#define DEFAULT_TX_OVERLOAD_THRESHOLD MIN_NUM_TX_DESC
+#define DEFAULT_TX_RESCHED_THRESHOLD 128
+#define DEFAULT_FCRTH 0x20000
+#define DEFAULT_FCRTL 0x10000
+#define DEFAULT_FCPAUSE 0xFFFF
+
+#define TX_DRAIN_TIME 200
+#define RX_DRAIN_TIME 200
+
+#define STALL_WATCHDOG_TIMEOUT 8 /* 8 seconds */
+#define MAX_LINK_DOWN_TIMEOUT 8 /* 8 seconds */
+
+/*
+ * limits on msi-x vectors for 82598
+ */
+#define IXGBE_MAX_INTR_VECTOR 18
+#define IXGBE_MAX_OTHER_VECTOR 2
+#define IXGBE_MAX_RING_VECTOR (IXGBE_MAX_INTR_VECTOR - IXGBE_MAX_OTHER_VECTOR)
+
+/*
+ * Extra register bit masks for 82598
+ */
+#define IXGBE_PCS1GANA_FDC 0x20
+#define IXGBE_PCS1GANLP_LPFD 0x20
+#define IXGBE_PCS1GANLP_LPHD 0x40
+
+
+/*
+ * Defined for IP header alignment.
+ */
+#define IPHDR_ALIGN_ROOM 2
+
+/*
+ * Bit flags for attach_progress
+ */
+#define ATTACH_PROGRESS_PCI_CONFIG 0x0001 /* PCI config setup */
+#define ATTACH_PROGRESS_REGS_MAP 0x0002 /* Registers mapped */
+#define ATTACH_PROGRESS_PROPS 0x0004 /* Properties initialized */
+#define ATTACH_PROGRESS_ALLOC_INTR 0x0008 /* Interrupts allocated */
+#define ATTACH_PROGRESS_ALLOC_RINGS 0x0010 /* Rings allocated */
+#define ATTACH_PROGRESS_ADD_INTR 0x0020 /* Intr handlers added */
+#define ATTACH_PROGRESS_LOCKS 0x0040 /* Locks initialized */
+#define ATTACH_PROGRESS_INIT 0x0080 /* Device initialized */
+#define ATTACH_PROGRESS_INIT_RINGS 0x0100 /* Rings initialized */
+#define ATTACH_PROGRESS_STATS 0x0200 /* Kstats created */
+#define ATTACH_PROGRESS_NDD 0x0400 /* NDD initialized */
+#define ATTACH_PROGRESS_MAC 0x0800 /* MAC registered */
+#define ATTACH_PROGRESS_ENABLE_INTR 0x1000 /* DDI interrupts enabled */
+#define ATTACH_PROGRESS_FM_INIT 0x2000 /* FMA initialized */
+
+#define PROP_DEFAULT_MTU "default_mtu"
+#define PROP_FLOW_CONTROL "flow_control"
+#define PROP_TX_QUEUE_NUM "tx_queue_number"
+#define PROP_TX_RING_SIZE "tx_ring_size"
+#define PROP_RX_QUEUE_NUM "rx_queue_number"
+#define PROP_RX_RING_SIZE "rx_ring_size"
+
+#define PROP_INTR_FORCE "intr_force"
+#define PROP_TX_HCKSUM_ENABLE "tx_hcksum_enable"
+#define PROP_RX_HCKSUM_ENABLE "rx_hcksum_enable"
+#define PROP_LSO_ENABLE "lso_enable"
+#define PROP_TX_HEAD_WB_ENABLE "tx_head_wb_enable"
+#define PROP_TX_COPY_THRESHOLD "tx_copy_threshold"
+#define PROP_TX_RECYCLE_THRESHOLD "tx_recycle_threshold"
+#define PROP_TX_OVERLOAD_THRESHOLD "tx_overload_threshold"
+#define PROP_TX_RESCHED_THRESHOLD "tx_resched_threshold"
+#define PROP_RX_COPY_THRESHOLD "rx_copy_threshold"
+#define PROP_RX_LIMIT_PER_INTR "rx_limit_per_intr"
+#define PROP_INTR_THROTTLING "intr_throttling"
+#define PROP_FM_CAPABLE "fm_capable"
+
+#define IXGBE_LB_NONE 0
+#define IXGBE_LB_EXTERNAL 1
+#define IXGBE_LB_INTERNAL_MAC 2
+#define IXGBE_LB_INTERNAL_PHY 3
+#define IXGBE_LB_INTERNAL_SERDES 4
+
+/*
+ * Shorthand for the NDD parameters
+ */
+#define param_autoneg_cap nd_params[PARAM_AUTONEG_CAP].val
+#define param_pause_cap nd_params[PARAM_PAUSE_CAP].val
+#define param_asym_pause_cap nd_params[PARAM_ASYM_PAUSE_CAP].val
+#define param_10000fdx_cap nd_params[PARAM_10000FDX_CAP].val
+#define param_1000fdx_cap nd_params[PARAM_1000FDX_CAP].val
+#define param_100fdx_cap nd_params[PARAM_1000FDX_CAP].val
+#define param_rem_fault nd_params[PARAM_REM_FAULT].val
+
+#define param_adv_autoneg_cap nd_params[PARAM_ADV_AUTONEG_CAP].val
+#define param_adv_pause_cap nd_params[PARAM_ADV_PAUSE_CAP].val
+#define param_adv_asym_pause_cap nd_params[PARAM_ADV_ASYM_PAUSE_CAP].val
+#define param_adv_10000fdx_cap nd_params[PARAM_ADV_10000FDX_CAP].val
+#define param_adv_1000fdx_cap nd_params[PARAM_ADV_1000FDX_CAP].val
+#define param_adv_100fdx_cap nd_params[PARAM_ADV_1000FDX_CAP].val
+#define param_adv_rem_fault nd_params[PARAM_ADV_REM_FAULT].val
+
+#define param_lp_autoneg_cap nd_params[PARAM_LP_AUTONEG_CAP].val
+#define param_lp_pause_cap nd_params[PARAM_LP_PAUSE_CAP].val
+#define param_lp_asym_pause_cap nd_params[PARAM_LP_ASYM_PAUSE_CAP].val
+#define param_lp_10000fdx_cap nd_params[PARAM_LP_10000FDX_CAP].val
+#define param_lp_1000fdx_cap nd_params[PARAM_LP_1000FDX_CAP].val
+#define param_lp_100fdx_cap nd_params[PARAM_LP_1000FDX_CAP].val
+#define param_lp_rem_fault nd_params[PARAM_LP_REM_FAULT].val
+
+enum ioc_reply {
+ IOC_INVAL = -1, /* bad, NAK with EINVAL */
+ IOC_DONE, /* OK, reply sent */
+ IOC_ACK, /* OK, just send ACK */
+ IOC_REPLY /* OK, just send reply */
+};
+
+#define MBLK_LEN(mp) ((uintptr_t)(mp)->b_wptr - \
+ (uintptr_t)(mp)->b_rptr)
+
+#define DMA_SYNC(area, flag) ((void) ddi_dma_sync((area)->dma_handle, \
+ 0, 0, (flag)))
+
+/*
+ * Defined for ring index operations
+ * ASSERT(index < limit)
+ * ASSERT(step < limit)
+ * ASSERT(index1 < limit)
+ * ASSERT(index2 < limit)
+ */
+#define NEXT_INDEX(index, step, limit) (((index) + (step)) < (limit) ? \
+ (index) + (step) : (index) + (step) - (limit))
+#define PREV_INDEX(index, step, limit) ((index) >= (step) ? \
+ (index) - (step) : (index) + (limit) - (step))
+#define OFFSET(index1, index2, limit) ((index1) <= (index2) ? \
+ (index2) - (index1) : (index2) + (limit) - (index1))
+
+#define LINK_LIST_INIT(_LH) \
+ (_LH)->head = (_LH)->tail = NULL
+
+#define LIST_GET_HEAD(_LH) ((single_link_t *)((_LH)->head))
+
+#define LIST_POP_HEAD(_LH) \
+ (single_link_t *)(_LH)->head; \
+ { \
+ if ((_LH)->head != NULL) { \
+ (_LH)->head = (_LH)->head->link; \
+ if ((_LH)->head == NULL) \
+ (_LH)->tail = NULL; \
+ } \
+ }
+
+#define LIST_GET_TAIL(_LH) ((single_link_t *)((_LH)->tail))
+
+#define LIST_PUSH_TAIL(_LH, _E) \
+ if ((_LH)->tail != NULL) { \
+ (_LH)->tail->link = (single_link_t *)(_E); \
+ (_LH)->tail = (single_link_t *)(_E); \
+ } else { \
+ (_LH)->head = (_LH)->tail = (single_link_t *)(_E); \
+ } \
+ (_E)->link = NULL;
+
+#define LIST_GET_NEXT(_LH, _E) \
+ (((_LH)->tail == (single_link_t *)(_E)) ? \
+ NULL : ((single_link_t *)(_E))->link)
+
+
+typedef struct single_link {
+ struct single_link *link;
+} single_link_t;
+
+typedef struct link_list {
+ single_link_t *head;
+ single_link_t *tail;
+} link_list_t;
+
+/*
+ * Property lookups
+ */
+#define IXGBE_PROP_EXISTS(d, n) ddi_prop_exists(DDI_DEV_T_ANY, (d), \
+ DDI_PROP_DONTPASS, (n))
+#define IXGBE_PROP_GET_INT(d, n) ddi_prop_get_int(DDI_DEV_T_ANY, (d), \
+ DDI_PROP_DONTPASS, (n), -1)
+
+
+/*
+ * Named Data (ND) Parameter Management Structure
+ */
+typedef struct {
+ struct ixgbe *private;
+ uint32_t info;
+ uint32_t min;
+ uint32_t max;
+ uint32_t val;
+ char *name;
+} nd_param_t;
+
+/*
+ * NDD parameter indexes, divided into:
+ *
+ * read-only parameters describing the hardware's capabilities
+ * read-write parameters controlling the advertised capabilities
+ * read-only parameters describing the partner's capabilities
+ * read-write parameters controlling the force speed and duplex
+ * read-only parameters describing the link state
+ * read-only parameters describing the driver properties
+ * read-write parameters controlling the driver properties
+ */
+enum {
+ PARAM_AUTONEG_CAP,
+ PARAM_PAUSE_CAP,
+ PARAM_ASYM_PAUSE_CAP,
+ PARAM_10000FDX_CAP,
+ PARAM_1000FDX_CAP,
+ PARAM_100FDX_CAP,
+ PARAM_REM_FAULT,
+
+ PARAM_ADV_AUTONEG_CAP,
+ PARAM_ADV_PAUSE_CAP,
+ PARAM_ADV_ASYM_PAUSE_CAP,
+ PARAM_ADV_10000FDX_CAP,
+ PARAM_ADV_1000FDX_CAP,
+ PARAM_ADV_100FDX_CAP,
+ PARAM_ADV_REM_FAULT,
+
+ PARAM_LP_AUTONEG_CAP,
+ PARAM_LP_PAUSE_CAP,
+ PARAM_LP_ASYM_PAUSE_CAP,
+ PARAM_LP_10000FDX_CAP,
+ PARAM_LP_1000FDX_CAP,
+ PARAM_LP_100FDX_CAP,
+ PARAM_LP_REM_FAULT,
+
+ PARAM_LINK_STATUS,
+ PARAM_LINK_SPEED,
+ PARAM_LINK_DUPLEX,
+
+ PARAM_COUNT
+};
+
+typedef union ixgbe_ether_addr {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } reg;
+ struct {
+ uint8_t set;
+ uint8_t redundant;
+ uint8_t addr[ETHERADDRL];
+ } mac;
+} ixgbe_ether_addr_t;
+
+typedef enum {
+ USE_NONE,
+ USE_COPY,
+ USE_DMA
+} tx_type_t;
+
+typedef enum {
+ RCB_FREE,
+ RCB_SENDUP
+} rcb_state_t;
+
+typedef struct hcksum_context {
+ uint32_t hcksum_flags;
+ uint32_t ip_hdr_len;
+ uint32_t mac_hdr_len;
+ uint32_t l4_proto;
+} hcksum_context_t;
+
+/*
+ * Hold address/length of each DMA segment
+ */
+typedef struct sw_desc {
+ uint64_t address;
+ size_t length;
+} sw_desc_t;
+
+/*
+ * Handles and addresses of DMA buffer
+ */
+typedef struct dma_buffer {
+ caddr_t address; /* Virtual address */
+ uint64_t dma_address; /* DMA (Hardware) address */
+ ddi_acc_handle_t acc_handle; /* Data access handle */
+ ddi_dma_handle_t dma_handle; /* DMA handle */
+ size_t size; /* Buffer size */
+ size_t len; /* Data length in the buffer */
+} dma_buffer_t;
+
+/*
+ * Tx Control Block
+ */
+typedef struct tx_control_block {
+ single_link_t link;
+ uint32_t frag_num;
+ uint32_t desc_num;
+ mblk_t *mp;
+ tx_type_t tx_type;
+ ddi_dma_handle_t tx_dma_handle;
+ dma_buffer_t tx_buf;
+ sw_desc_t desc[MAX_COOKIE];
+} tx_control_block_t;
+
+/*
+ * RX Control Block
+ */
+typedef struct rx_control_block {
+ mblk_t *mp;
+ rcb_state_t state;
+ dma_buffer_t rx_buf;
+ frtn_t free_rtn;
+ struct ixgbe_rx_ring *rx_ring;
+} rx_control_block_t;
+
+/*
+ * Software Data Structure for Tx Ring
+ */
+typedef struct ixgbe_tx_ring {
+ uint32_t index; /* Ring index */
+ uint32_t intr_vector; /* Interrupt vector index */
+ uint32_t vect_bit; /* vector's bit in register */
+
+ /*
+ * Mutexes
+ */
+ kmutex_t tx_lock;
+ kmutex_t recycle_lock;
+ kmutex_t tcb_head_lock;
+ kmutex_t tcb_tail_lock;
+
+ /*
+ * Tx descriptor ring definitions
+ */
+ dma_buffer_t tbd_area;
+ union ixgbe_adv_tx_desc *tbd_ring;
+ uint32_t tbd_head; /* Index of next tbd to recycle */
+ uint32_t tbd_tail; /* Index of next tbd to transmit */
+ uint32_t tbd_free; /* Number of free tbd */
+
+ /*
+ * Tx control block list definitions
+ */
+ tx_control_block_t *tcb_area;
+ tx_control_block_t **work_list;
+ tx_control_block_t **free_list;
+ uint32_t tcb_head; /* Head index of free list */
+ uint32_t tcb_tail; /* Tail index of free list */
+ uint32_t tcb_free; /* Number of free tcb in free list */
+
+ uint32_t *tbd_head_wb; /* Head write-back */
+ uint32_t (*tx_recycle)(struct ixgbe_tx_ring *);
+
+ /*
+ * TCP/UDP checksum offload
+ */
+ hcksum_context_t hcksum_context;
+
+ /*
+ * Tx ring settings and status
+ */
+ uint32_t ring_size; /* Tx descriptor ring size */
+ uint32_t free_list_size; /* Tx free list size */
+ uint32_t copy_thresh;
+ uint32_t recycle_thresh;
+ uint32_t overload_thresh;
+ uint32_t resched_thresh;
+
+ boolean_t reschedule;
+ uint32_t recycle_fail;
+ uint32_t stall_watchdog;
+
+#ifdef IXGBE_DEBUG
+ /*
+ * Debug statistics
+ */
+ uint32_t stat_overload;
+ uint32_t stat_fail_no_tbd;
+ uint32_t stat_fail_no_tcb;
+ uint32_t stat_fail_dma_bind;
+ uint32_t stat_reschedule;
+#endif
+
+ /*
+ * Pointer to the ixgbe struct
+ */
+ struct ixgbe *ixgbe;
+
+} ixgbe_tx_ring_t;
+
+/*
+ * Software Receive Ring
+ */
+typedef struct ixgbe_rx_ring {
+ uint32_t index; /* Ring index */
+ uint32_t intr_vector; /* Interrupt vector index */
+ uint32_t vect_bit; /* vector's bit in register */
+
+ /*
+ * Mutexes
+ */
+ kmutex_t rx_lock; /* Rx access lock */
+ kmutex_t recycle_lock; /* Recycle lock, for rcb_tail */
+
+ /*
+ * Rx descriptor ring definitions
+ */
+ dma_buffer_t rbd_area; /* DMA buffer of rx desc ring */
+ union ixgbe_adv_rx_desc *rbd_ring; /* Rx desc ring */
+ uint32_t rbd_next; /* Index of next rx desc */
+
+ /*
+ * Rx control block list definitions
+ */
+ rx_control_block_t *rcb_area;
+ rx_control_block_t **work_list; /* Work list of rcbs */
+ rx_control_block_t **free_list; /* Free list of rcbs */
+ uint32_t rcb_head; /* Index of next free rcb */
+ uint32_t rcb_tail; /* Index to put recycled rcb */
+ uint32_t rcb_free; /* Number of free rcbs */
+
+ /*
+ * Rx ring settings and status
+ */
+ uint32_t ring_size; /* Rx descriptor ring size */
+ uint32_t free_list_size; /* Rx free list size */
+ uint32_t limit_per_intr; /* Max packets per interrupt */
+ uint32_t copy_thresh;
+
+#ifdef IXGBE_DEBUG
+ /*
+ * Debug statistics
+ */
+ uint32_t stat_frame_error;
+ uint32_t stat_cksum_error;
+ uint32_t stat_exceed_pkt;
+#endif
+
+ struct ixgbe *ixgbe; /* Pointer to ixgbe struct */
+
+} ixgbe_rx_ring_t;
+
+/*
+ * structure to map ring cleanup to msi-x vector
+ */
+typedef struct ixgbe_ring_vector {
+ struct ixgbe *ixgbe; /* point to my adapter */
+ ulong_t rx_map[BT_BITOUL(MAX_RX_QUEUE_NUM)]; /* bitmap of rx rings */
+ int rxr_cnt; /* count rx rings */
+ ulong_t tx_map[BT_BITOUL(MAX_TX_QUEUE_NUM)]; /* bitmap of tx rings */
+ int txr_cnt; /* count tx rings */
+} ixgbe_ring_vector_t;
+
+/*
+ * Software adapter state
+ */
+typedef struct ixgbe {
+ int instance;
+ mac_handle_t mac_hdl;
+ dev_info_t *dip;
+ struct ixgbe_hw hw;
+ struct ixgbe_osdep osdep;
+
+ uint32_t ixgbe_state;
+ link_state_t link_state;
+ uint32_t link_speed;
+ uint32_t link_duplex;
+ uint32_t link_down_timeout;
+
+ uint32_t reset_count;
+ uint32_t attach_progress;
+ uint32_t loopback_mode;
+ uint32_t default_mtu;
+ uint32_t max_frame_size;
+
+ /*
+ * Each msi-x vector: map vector to ring cleanup
+ */
+ ixgbe_ring_vector_t vect_map[IXGBE_MAX_RING_VECTOR];
+
+ /*
+ * Receive Rings
+ */
+ ixgbe_rx_ring_t *rx_rings; /* Array of rx rings */
+ uint32_t num_rx_rings; /* Number of rx rings in use */
+ uint32_t rx_ring_size; /* Rx descriptor ring size */
+ uint32_t rx_buf_size; /* Rx buffer size */
+
+ /*
+ * Transmit Rings
+ */
+ ixgbe_tx_ring_t *tx_rings; /* Array of tx rings */
+ uint32_t num_tx_rings; /* Number of tx rings in use */
+ uint32_t tx_ring_size; /* Tx descriptor ring size */
+ uint32_t tx_buf_size; /* Tx buffer size */
+
+ boolean_t tx_head_wb_enable; /* Tx head wrtie-back */
+ boolean_t tx_hcksum_enable; /* Tx h/w cksum offload */
+ boolean_t lso_enable; /* Large Segment Offload */
+ uint32_t tx_copy_thresh; /* Tx copy threshold */
+ uint32_t tx_recycle_thresh; /* Tx recycle threshold */
+ uint32_t tx_overload_thresh; /* Tx overload threshold */
+ uint32_t tx_resched_thresh; /* Tx reschedule threshold */
+ boolean_t rx_hcksum_enable; /* Rx h/w cksum offload */
+ uint32_t rx_copy_thresh; /* Rx copy threshold */
+ uint32_t rx_limit_per_intr; /* Rx pkts per interrupt */
+ uint32_t intr_throttling[IXGBE_MAX_RING_VECTOR];
+ uint32_t intr_force;
+ int fm_capabilities; /* FMA capabilities */
+
+ int intr_type;
+ int intr_cnt;
+ int intr_cap;
+ size_t intr_size;
+ uint_t intr_pri;
+ ddi_intr_handle_t *htable;
+ uint32_t eims_mask;
+
+ kmutex_t gen_lock; /* General lock for device access */
+ kmutex_t watchdog_lock;
+
+ boolean_t watchdog_enable;
+ boolean_t watchdog_start;
+ timeout_id_t watchdog_tid;
+
+ boolean_t unicst_init;
+ uint32_t unicst_avail;
+ uint32_t unicst_total;
+ ixgbe_ether_addr_t unicst_addr[MAX_NUM_UNICAST_ADDRESSES];
+ uint32_t mcast_count;
+ struct ether_addr mcast_table[MAX_NUM_MULTICAST_ADDRESSES];
+
+ /*
+ * Kstat definitions
+ */
+ kstat_t *ixgbe_ks;
+
+ /*
+ * NDD definitions
+ */
+ caddr_t nd_data;
+ nd_param_t nd_params[PARAM_COUNT];
+
+} ixgbe_t;
+
+typedef struct ixgbe_stat {
+
+ kstat_named_t link_speed; /* Link Speed */
+#ifdef IXGBE_DEBUG
+ kstat_named_t reset_count; /* Reset Count */
+
+ kstat_named_t rx_frame_error; /* Rx Error in Packet */
+ kstat_named_t rx_cksum_error; /* Rx Checksum Error */
+ kstat_named_t rx_exceed_pkt; /* Rx Exceed Max Pkt Count */
+
+ kstat_named_t tx_overload; /* Tx Desc Ring Overload */
+ kstat_named_t tx_fail_no_tcb; /* Tx Fail Freelist Empty */
+ kstat_named_t tx_fail_no_tbd; /* Tx Fail Desc Ring Empty */
+ kstat_named_t tx_fail_dma_bind; /* Tx Fail DMA bind */
+ kstat_named_t tx_reschedule; /* Tx Reschedule */
+
+ kstat_named_t gprc; /* Good Packets Received Count */
+ kstat_named_t gptc; /* Good Packets Xmitted Count */
+ kstat_named_t gor; /* Good Octets Received Count */
+ kstat_named_t got; /* Good Octets Xmitd Count */
+ kstat_named_t prc64; /* Packets Received - 64b */
+ kstat_named_t prc127; /* Packets Received - 65-127b */
+ kstat_named_t prc255; /* Packets Received - 127-255b */
+ kstat_named_t prc511; /* Packets Received - 256-511b */
+ kstat_named_t prc1023; /* Packets Received - 511-1023b */
+ kstat_named_t prc1522; /* Packets Received - 1024-1522b */
+ kstat_named_t ptc64; /* Packets Xmitted (64b) */
+ kstat_named_t ptc127; /* Packets Xmitted (64-127b) */
+ kstat_named_t ptc255; /* Packets Xmitted (128-255b) */
+ kstat_named_t ptc511; /* Packets Xmitted (255-511b) */
+ kstat_named_t ptc1023; /* Packets Xmitted (512-1023b) */
+ kstat_named_t ptc1522; /* Packets Xmitted (1024-1522b */
+#endif
+ kstat_named_t crcerrs; /* CRC Error Count */
+ kstat_named_t illerrc; /* Illegal Byte Error Count */
+ kstat_named_t errbc; /* Error Byte Count */
+ kstat_named_t mspdc; /* MAC Short Packet Discard Count */
+ kstat_named_t mpc; /* Missed Packets Count */
+ kstat_named_t mlfc; /* MAC Local Fault Count */
+ kstat_named_t mrfc; /* MAC Remote Fault Count */
+ kstat_named_t rlec; /* Receive Length Error Count */
+ kstat_named_t lxontxc; /* Link XON Transmitted Count */
+ kstat_named_t lxonrxc; /* Link XON Received Count */
+ kstat_named_t lxofftxc; /* Link XOFF Transmitted Count */
+ kstat_named_t lxoffrxc; /* Link XOFF Received Count */
+ kstat_named_t bprc; /* Broadcasts Pkts Received Count */
+ kstat_named_t mprc; /* Multicast Pkts Received Count */
+ kstat_named_t rnbc; /* Receive No Buffers Count */
+ kstat_named_t ruc; /* Receive Undersize Count */
+ kstat_named_t rfc; /* Receive Frag Count */
+ kstat_named_t roc; /* Receive Oversize Count */
+ kstat_named_t rjc; /* Receive Jabber Count */
+ kstat_named_t tor; /* Total Octets Recvd Count */
+ kstat_named_t tpr; /* Total Packets Received */
+ kstat_named_t tpt; /* Total Packets Xmitted */
+ kstat_named_t mptc; /* Multicast Packets Xmited Count */
+ kstat_named_t bptc; /* Broadcast Packets Xmited Count */
+} ixgbe_stat_t;
+
+/*
+ * Function prototypes in ixgbe_buf.c
+ */
+int ixgbe_alloc_dma(ixgbe_t *);
+void ixgbe_free_dma(ixgbe_t *);
+void ixgbe_set_fma_flags(int, int);
+
+/*
+ * Function prototypes in ixgbe_main.c
+ */
+int ixgbe_start(ixgbe_t *);
+void ixgbe_stop(ixgbe_t *);
+int ixgbe_driver_setup_link(ixgbe_t *, boolean_t);
+int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, mac_addr_slot_t);
+int ixgbe_multicst_add(ixgbe_t *, const uint8_t *);
+int ixgbe_multicst_remove(ixgbe_t *, const uint8_t *);
+enum ioc_reply ixgbe_loopback_ioctl(ixgbe_t *, struct iocblk *, mblk_t *);
+
+void ixgbe_enable_watchdog_timer(ixgbe_t *);
+void ixgbe_disable_watchdog_timer(ixgbe_t *);
+int ixgbe_atomic_reserve(uint32_t *, uint32_t);
+
+int ixgbe_check_acc_handle(ddi_acc_handle_t handle);
+int ixgbe_check_dma_handle(ddi_dma_handle_t handle);
+void ixgbe_fm_ereport(ixgbe_t *, char *);
+
+/*
+ * Function prototypes in ixgbe_gld.c
+ */
+int ixgbe_m_start(void *);
+void ixgbe_m_stop(void *);
+int ixgbe_m_promisc(void *, boolean_t);
+int ixgbe_m_multicst(void *, boolean_t, const uint8_t *);
+int ixgbe_m_unicst(void *, const uint8_t *);
+int ixgbe_m_stat(void *, uint_t, uint64_t *);
+void ixgbe_m_resources(void *);
+void ixgbe_m_ioctl(void *, queue_t *, mblk_t *);
+int ixgbe_m_unicst_add(void *, mac_multi_addr_t *);
+int ixgbe_m_unicst_remove(void *, mac_addr_slot_t);
+int ixgbe_m_unicst_modify(void *, mac_multi_addr_t *);
+int ixgbe_m_unicst_get(void *, mac_multi_addr_t *);
+boolean_t ixgbe_m_getcapab(void *, mac_capab_t, void *);
+
+/*
+ * Function prototypes in ixgbe_rx.c
+ */
+mblk_t *ixgbe_rx(ixgbe_rx_ring_t *);
+void ixgbe_rx_recycle(caddr_t arg);
+
+/*
+ * Function prototypes in ixgbe_tx.c
+ */
+mblk_t *ixgbe_m_tx(void *, mblk_t *);
+void ixgbe_free_tcb(tx_control_block_t *);
+void ixgbe_put_free_list(ixgbe_tx_ring_t *, link_list_t *);
+uint32_t ixgbe_tx_recycle_legacy(ixgbe_tx_ring_t *);
+uint32_t ixgbe_tx_recycle_head_wb(ixgbe_tx_ring_t *);
+
+/*
+ * Function prototypes in ixgbe_log.c
+ */
+void ixgbe_notice(void *, const char *, ...);
+void ixgbe_log(void *, const char *, ...);
+void ixgbe_error(void *, const char *, ...);
+
+/*
+ * Function prototypes in ixgbe_ndd.c
+ */
+int ixgbe_nd_init(ixgbe_t *);
+void ixgbe_nd_cleanup(ixgbe_t *);
+enum ioc_reply ixgbe_nd_ioctl(ixgbe_t *, queue_t *, mblk_t *, struct iocblk *);
+
+/*
+ * Function prototypes in ixgbe_stat.c
+ */
+int ixgbe_init_stats(ixgbe_t *);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IXGBE_SW_H */
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_tx.c b/usr/src/uts/common/io/ixgbe/ixgbe_tx.c
new file mode 100644
index 0000000000..ad6cac1e8d
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_tx.c
@@ -0,0 +1,1320 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_sw.h"
+
+static boolean_t ixgbe_tx(ixgbe_tx_ring_t *, mblk_t *);
+static int ixgbe_tx_copy(ixgbe_tx_ring_t *, tx_control_block_t *, mblk_t *,
+ uint32_t, boolean_t, boolean_t);
+static int ixgbe_tx_bind(ixgbe_tx_ring_t *, tx_control_block_t *, mblk_t *,
+ uint32_t);
+static int ixgbe_tx_fill_ring(ixgbe_tx_ring_t *, link_list_t *,
+ hcksum_context_t *);
+static void ixgbe_save_desc(tx_control_block_t *, uint64_t, size_t);
+static tx_control_block_t *ixgbe_get_free_list(ixgbe_tx_ring_t *);
+
+static void ixgbe_get_hcksum_context(mblk_t *, hcksum_context_t *);
+static boolean_t ixgbe_check_hcksum_context(ixgbe_tx_ring_t *,
+ hcksum_context_t *);
+static void ixgbe_fill_hcksum_context(struct ixgbe_adv_tx_context_desc *,
+ hcksum_context_t *);
+
+#ifndef IXGBE_DEBUG
+#pragma inline(ixgbe_save_desc)
+#pragma inline(ixgbe_get_hcksum_context)
+#pragma inline(ixgbe_check_hcksum_context)
+#pragma inline(ixgbe_fill_hcksum_context)
+#endif
+
+/*
+ * ixgbe_m_tx
+ *
+ * The GLDv3 interface to call driver's tx routine to transmit
+ * the mblks.
+ */
+mblk_t *
+ixgbe_m_tx(void *arg, mblk_t *mp)
+{
+ ixgbe_t *ixgbe = (ixgbe_t *)arg;
+ mblk_t *next;
+ ixgbe_tx_ring_t *tx_ring;
+
+ /*
+ * If the adapter is suspended, or it is not started, or the link
+ * is not up, the mblks are simply dropped.
+ */
+ if (((ixgbe->ixgbe_state & IXGBE_SUSPENDED) != 0) ||
+ ((ixgbe->ixgbe_state & IXGBE_STARTED) == 0) ||
+ (ixgbe->link_state != LINK_STATE_UP)) {
+ /* Free the mblk chain */
+ while (mp != NULL) {
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ freemsg(mp);
+ mp = next;
+ }
+
+ return (NULL);
+ }
+
+ /*
+ * Decide which tx ring is used to transmit the packets.
+ * This needs to be updated later to fit the new interface
+ * of the multiple rings support.
+ */
+ tx_ring = &ixgbe->tx_rings[0];
+
+ while (mp != NULL) {
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ if (!ixgbe_tx(tx_ring, mp)) {
+ mp->b_next = next;
+ break;
+ }
+
+ mp = next;
+ }
+
+ return (mp);
+}
+
+/*
+ * ixgbe_tx - Main transmit processing
+ *
+ * Called from ixgbe_m_tx with an mblk ready to transmit. this
+ * routine sets up the transmit descriptors and sends data to
+ * the wire.
+ *
+ * One mblk can consist of several fragments, each fragment
+ * will be processed with different methods based on the size.
+ * For the fragments with size less than the bcopy threshold,
+ * they will be processed by using bcopy; otherwise, they will
+ * be processed by using DMA binding.
+ *
+ * To process the mblk, a tx control block is got from the
+ * free list. One tx control block contains one tx buffer, which
+ * is used to copy mblk fragments' data; and one tx DMA handle,
+ * which is used to bind a mblk fragment with DMA resource.
+ *
+ * Several small mblk fragments can be copied into one tx control
+ * block's buffer, and then the buffer will be transmitted with
+ * one tx descriptor.
+ *
+ * A large fragment only binds with one tx control block's DMA
+ * handle, and it can span several tx descriptors for transmitting.
+ *
+ * So to transmit a packet (mblk), several tx control blocks can
+ * be used. After the processing, those tx control blocks will
+ * be put to the work list.
+ */
+static boolean_t
+ixgbe_tx(ixgbe_tx_ring_t *tx_ring, mblk_t *mp)
+{
+ ixgbe_t *ixgbe = tx_ring->ixgbe;
+ tx_type_t current_flag, next_flag;
+ uint32_t current_len, next_len;
+ uint32_t desc_total;
+ size_t mbsize;
+ int desc_num;
+ boolean_t copy_done, eop;
+ mblk_t *current_mp, *next_mp, *nmp;
+ tx_control_block_t *tcb;
+ hcksum_context_t hcksum_context, *hcksum;
+ link_list_t pending_list;
+
+ /* Get the mblk size */
+ mbsize = 0;
+ for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
+ mbsize += MBLK_LEN(nmp);
+ }
+
+ /*
+ * If the mblk size exceeds the max frame size,
+ * discard this mblk, and return B_TRUE
+ */
+ if (mbsize > (ixgbe->max_frame_size - ETHERFCSL)) {
+ freemsg(mp);
+ IXGBE_DEBUGLOG_0(ixgbe, "ixgbe_tx: packet oversize");
+ return (B_TRUE);
+ }
+
+ /*
+ * Check and recycle tx descriptors.
+ * The recycle threshold here should be selected carefully
+ */
+ if (tx_ring->tbd_free < tx_ring->recycle_thresh)
+ tx_ring->tx_recycle(tx_ring);
+
+ /*
+ * After the recycling, if the tbd_free is less than the
+ * overload_threshold, assert overload, return B_FALSE;
+ * and we need to re-schedule the tx again.
+ */
+ if (tx_ring->tbd_free < tx_ring->overload_thresh) {
+ tx_ring->reschedule = B_TRUE;
+ IXGBE_DEBUG_STAT(tx_ring->stat_overload);
+ return (B_FALSE);
+ }
+
+ /*
+ * The pending_list is a linked list that is used to save
+ * the tx control blocks that have packet data processed
+ * but have not put the data to the tx descriptor ring.
+ * It is used to reduce the lock contention of the tx_lock.
+ */
+ LINK_LIST_INIT(&pending_list);
+ desc_num = 0;
+ desc_total = 0;
+
+ current_mp = mp;
+ current_len = MBLK_LEN(current_mp);
+ /*
+ * Decide which method to use for the first fragment
+ */
+ current_flag = (current_len <= tx_ring->copy_thresh) ?
+ USE_COPY : USE_DMA;
+ /*
+ * If the mblk includes several contiguous small fragments,
+ * they may be copied into one buffer. This flag is used to
+ * indicate whether there are pending fragments that need to
+ * be copied to the current tx buffer.
+ *
+ * If this flag is B_TRUE, it indicates that a new tx control
+ * block is needed to process the next fragment using either
+ * copy or DMA binding.
+ *
+ * Otherwise, it indicates that the next fragment will be
+ * copied to the current tx buffer that is maintained by the
+ * current tx control block. No new tx control block is needed.
+ */
+ copy_done = B_TRUE;
+ while (current_mp) {
+ next_mp = current_mp->b_cont;
+ eop = (next_mp == NULL); /* Last fragment of the packet? */
+ next_len = eop ? 0: MBLK_LEN(next_mp);
+
+ /*
+ * When the current fragment is an empty fragment, if
+ * the next fragment will still be copied to the current
+ * tx buffer, we cannot skip this fragment here. Because
+ * the copy processing is pending for completion. We have
+ * to process this empty fragment in the tx_copy routine.
+ *
+ * If the copy processing is completed or a DMA binding
+ * processing is just completed, we can just skip this
+ * empty fragment.
+ */
+ if ((current_len == 0) && (copy_done)) {
+ current_mp = next_mp;
+ current_len = next_len;
+ current_flag = (current_len <= tx_ring->copy_thresh) ?
+ USE_COPY : USE_DMA;
+ continue;
+ }
+
+ if (copy_done) {
+ /*
+ * Get a new tx control block from the free list
+ */
+ tcb = ixgbe_get_free_list(tx_ring);
+
+ if (tcb == NULL) {
+ IXGBE_DEBUG_STAT(tx_ring->stat_fail_no_tcb);
+ goto tx_failure;
+ }
+
+ /*
+ * Push the tx control block to the pending list
+ * to avoid using lock too early
+ */
+ LIST_PUSH_TAIL(&pending_list, &tcb->link);
+ }
+
+ if (current_flag == USE_COPY) {
+ /*
+ * Check whether to use bcopy or DMA binding to process
+ * the next fragment, and if using bcopy, whether we
+ * need to continue copying the next fragment into the
+ * current tx buffer.
+ */
+ ASSERT((tcb->tx_buf.len + current_len) <=
+ tcb->tx_buf.size);
+
+ if (eop) {
+ /*
+ * This is the last fragment of the packet, so
+ * the copy processing will be completed with
+ * this fragment.
+ */
+ next_flag = USE_NONE;
+ copy_done = B_TRUE;
+ } else if ((tcb->tx_buf.len + current_len + next_len) >
+ tcb->tx_buf.size) {
+ /*
+ * If the next fragment is too large to be
+ * copied to the current tx buffer, we need
+ * to complete the current copy processing.
+ */
+ next_flag = (next_len > tx_ring->copy_thresh) ?
+ USE_DMA: USE_COPY;
+ copy_done = B_TRUE;
+ } else if (next_len > tx_ring->copy_thresh) {
+ /*
+ * The next fragment needs to be processed with
+ * DMA binding. So the copy prcessing will be
+ * completed with the current fragment.
+ */
+ next_flag = USE_DMA;
+ copy_done = B_TRUE;
+ } else {
+ /*
+ * Continue to copy the next fragment to the
+ * current tx buffer.
+ */
+ next_flag = USE_COPY;
+ copy_done = B_FALSE;
+ }
+
+ desc_num = ixgbe_tx_copy(tx_ring, tcb, current_mp,
+ current_len, copy_done, eop);
+ } else {
+ /*
+ * Check whether to use bcopy or DMA binding to process
+ * the next fragment.
+ */
+ next_flag = (next_len > tx_ring->copy_thresh) ?
+ USE_DMA: USE_COPY;
+ ASSERT(copy_done == B_TRUE);
+
+ desc_num = ixgbe_tx_bind(tx_ring, tcb, current_mp,
+ current_len);
+ }
+
+ if (desc_num > 0)
+ desc_total += desc_num;
+ else if (desc_num < 0)
+ goto tx_failure;
+
+ current_mp = next_mp;
+ current_len = next_len;
+ current_flag = next_flag;
+ }
+
+ /*
+ * Attach the mblk to the last tx control block
+ */
+ ASSERT(tcb);
+ ASSERT(tcb->mp == NULL);
+ tcb->mp = mp;
+
+ if (ixgbe->tx_hcksum_enable) {
+ /*
+ * Retrieve checksum context information from the mblk that will
+ * be used to decide whether/how to fill the context descriptor.
+ */
+ hcksum = &hcksum_context;
+ ixgbe_get_hcksum_context(mp, hcksum);
+ } else {
+ hcksum = NULL;
+ }
+
+ /*
+ * Before fill the tx descriptor ring with the data, we need to
+ * ensure there are adequate free descriptors for transmit
+ * (including one context descriptor).
+ */
+ if (tx_ring->tbd_free < (desc_total + 1)) {
+ tx_ring->tx_recycle(tx_ring);
+ }
+
+ mutex_enter(&tx_ring->tx_lock);
+
+ /*
+ * If the number of free tx descriptors is not enough for transmit
+ * then return failure.
+ *
+ * Note: we must put this check under the mutex protection to
+ * ensure the correctness when multiple threads access it in
+ * parallel.
+ */
+ if (tx_ring->tbd_free < (desc_total + 1)) {
+ IXGBE_DEBUG_STAT(tx_ring->stat_fail_no_tbd);
+ mutex_exit(&tx_ring->tx_lock);
+ goto tx_failure;
+ }
+
+ desc_num = ixgbe_tx_fill_ring(tx_ring, &pending_list, hcksum);
+
+ ASSERT((desc_num == desc_total) || (desc_num == (desc_total + 1)));
+
+ mutex_exit(&tx_ring->tx_lock);
+
+ return (B_TRUE);
+
+tx_failure:
+ /*
+ * Discard the mblk and free the used resources
+ */
+ tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
+ while (tcb) {
+ tcb->mp = NULL;
+
+ ixgbe_free_tcb(tcb);
+
+ tcb = (tx_control_block_t *)
+ LIST_GET_NEXT(&pending_list, &tcb->link);
+ }
+
+ /*
+ * Return the tx control blocks in the pending list to the free list.
+ */
+ ixgbe_put_free_list(tx_ring, &pending_list);
+
+ /* Transmit failed, do not drop the mblk, rechedule the transmit */
+ tx_ring->reschedule = B_TRUE;
+
+ return (B_FALSE);
+}
+
+/*
+ * ixgbe_tx_copy
+ *
+ * Copy the mblk fragment to the pre-allocated tx buffer
+ */
+static int
+ixgbe_tx_copy(ixgbe_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
+ uint32_t len, boolean_t copy_done, boolean_t eop)
+{
+ dma_buffer_t *tx_buf;
+ uint32_t desc_num;
+ _NOTE(ARGUNUSED(tx_ring));
+
+ tx_buf = &tcb->tx_buf;
+
+ /*
+ * Copy the packet data of the mblk fragment into the
+ * pre-allocated tx buffer, which is maintained by the
+ * tx control block.
+ *
+ * Several mblk fragments can be copied into one tx buffer.
+ * The destination address of the current copied fragment in
+ * the tx buffer is next to the end of the previous copied
+ * fragment.
+ */
+ if (len > 0) {
+ bcopy(mp->b_rptr, tx_buf->address + tx_buf->len, len);
+
+ tx_buf->len += len;
+ tcb->frag_num++;
+ }
+
+ desc_num = 0;
+
+ /*
+ * If it is the last fragment copied to the current tx buffer,
+ * in other words, if there's no remaining fragment or the remaining
+ * fragment requires a new tx control block to process, we need to
+ * complete the current copy processing by syncing up the current
+ * DMA buffer and saving the descriptor data.
+ */
+ if (copy_done) {
+ /*
+ * For the packet smaller than 64 bytes, we need to
+ * pad it to 60 bytes. The NIC hardware will add 4
+ * bytes of CRC.
+ */
+ if (eop && (tx_buf->len < ETHERMIN)) {
+ bzero(tx_buf->address + tx_buf->len,
+ ETHERMIN - tx_buf->len);
+ tx_buf->len = ETHERMIN;
+ }
+
+ /*
+ * Sync the DMA buffer of the packet data
+ */
+ DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV);
+
+ tcb->tx_type = USE_COPY;
+
+ /*
+ * Save the address and length to the private data structure
+ * of the tx control block, which will be used to fill the
+ * tx descriptor ring after all the fragments are processed.
+ */
+ ixgbe_save_desc(tcb, tx_buf->dma_address, tx_buf->len);
+ desc_num++;
+ }
+
+ return (desc_num);
+}
+
+/*
+ * ixgbe_tx_bind
+ *
+ * Bind the mblk fragment with DMA
+ */
+static int
+ixgbe_tx_bind(ixgbe_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
+ uint32_t len)
+{
+ int status, i;
+ ddi_dma_cookie_t dma_cookie;
+ uint_t ncookies;
+ int desc_num;
+
+ /*
+ * Use DMA binding to process the mblk fragment
+ */
+ status = ddi_dma_addr_bind_handle(tcb->tx_dma_handle, NULL,
+ (caddr_t)mp->b_rptr, len,
+ DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
+ 0, &dma_cookie, &ncookies);
+
+ if (status != DDI_DMA_MAPPED) {
+ IXGBE_DEBUG_STAT(tx_ring->stat_fail_dma_bind);
+ return (-1);
+ }
+
+ tcb->frag_num++;
+ tcb->tx_type = USE_DMA;
+ /*
+ * Each fragment can span several cookies. One cookie will have
+ * one tx descriptor to transmit.
+ */
+ desc_num = 0;
+ for (i = ncookies; i > 0; i--) {
+ /*
+ * Save the address and length to the private data structure
+ * of the tx control block, which will be used to fill the
+ * tx descriptor ring after all the fragments are processed.
+ */
+ ixgbe_save_desc(tcb,
+ dma_cookie.dmac_laddress,
+ dma_cookie.dmac_size);
+
+ desc_num++;
+
+ if (i > 1)
+ ddi_dma_nextcookie(tcb->tx_dma_handle, &dma_cookie);
+ }
+
+ return (desc_num);
+}
+
+/*
+ * ixgbe_get_hcksum_context
+ *
+ * Get the hcksum context information from the mblk
+ */
+static void
+ixgbe_get_hcksum_context(mblk_t *mp, hcksum_context_t *hcksum)
+{
+ uint32_t start;
+ uint32_t flags;
+ uint32_t len;
+ uint32_t size;
+ uint32_t offset;
+ unsigned char *pos;
+ ushort_t etype;
+ uint32_t mac_hdr_len;
+ uint32_t l4_proto;
+
+ ASSERT(mp != NULL);
+
+ hcksum_retrieve(mp, NULL, NULL, &start, NULL, NULL, NULL, &flags);
+
+ hcksum->hcksum_flags = flags;
+
+ if (flags == 0)
+ return;
+
+ etype = 0;
+ mac_hdr_len = 0;
+ l4_proto = 0;
+
+ /*
+ * Firstly get the position of the ether_type/ether_tpid.
+ * Here we don't assume the ether (VLAN) header is fully included
+ * in one mblk fragment, so we go thourgh the fragments to parse
+ * the ether type.
+ */
+ size = len = MBLK_LEN(mp);
+ offset = offsetof(struct ether_header, ether_type);
+ while (size <= offset) {
+ mp = mp->b_cont;
+ ASSERT(mp != NULL);
+ len = MBLK_LEN(mp);
+ size += len;
+ }
+ pos = mp->b_rptr + offset + len - size;
+
+ etype = ntohs(*(ushort_t *)(uintptr_t)pos);
+ if (etype == ETHERTYPE_VLAN) {
+ /*
+ * Get the position of the ether_type in VLAN header
+ */
+ offset = offsetof(struct ether_vlan_header, ether_type);
+ while (size <= offset) {
+ mp = mp->b_cont;
+ ASSERT(mp != NULL);
+ len = MBLK_LEN(mp);
+ size += len;
+ }
+ pos = mp->b_rptr + offset + len - size;
+
+ etype = ntohs(*(ushort_t *)(uintptr_t)pos);
+ mac_hdr_len = sizeof (struct ether_vlan_header);
+ } else {
+ mac_hdr_len = sizeof (struct ether_header);
+ }
+
+ /*
+ * Here we don't assume the IP(V6) header is fully included in
+ * one mblk fragment, so we go thourgh the fragments to parse
+ * the protocol type.
+ */
+ switch (etype) {
+ case ETHERTYPE_IP:
+ offset = offsetof(ipha_t, ipha_protocol) + mac_hdr_len;
+ while (size <= offset) {
+ mp = mp->b_cont;
+ ASSERT(mp != NULL);
+ len = MBLK_LEN(mp);
+ size += len;
+ }
+ pos = mp->b_rptr + offset + len - size;
+
+ l4_proto = *(uint8_t *)pos;
+ break;
+ case ETHERTYPE_IPV6:
+ offset = offsetof(ip6_t, ip6_nxt) + mac_hdr_len;
+ while (size <= offset) {
+ mp = mp->b_cont;
+ ASSERT(mp != NULL);
+ len = MBLK_LEN(mp);
+ size += len;
+ }
+ pos = mp->b_rptr + offset + len - size;
+
+ l4_proto = *(uint8_t *)pos;
+ break;
+ default:
+ /* Unrecoverable error */
+ IXGBE_DEBUGLOG_0(NULL, "Ether type error with tx hcksum");
+ return;
+ }
+
+ hcksum->mac_hdr_len = mac_hdr_len;
+ hcksum->ip_hdr_len = start;
+ hcksum->l4_proto = l4_proto;
+}
+
+/*
+ * ixgbe_check_hcksum_context
+ *
+ * Check if a new context descriptor is needed
+ */
+static boolean_t
+ixgbe_check_hcksum_context(ixgbe_tx_ring_t *tx_ring, hcksum_context_t *hcksum)
+{
+ hcksum_context_t *last;
+
+ if (hcksum == NULL)
+ return (B_FALSE);
+
+ /*
+ * Compare the checksum data retrieved from the mblk and the
+ * stored checksum data of the last context descriptor. The data
+ * need to be checked are:
+ * hcksum_flags
+ * l4_proto
+ * mac_hdr_len
+ * ip_hdr_len
+ * Either one of the above data is changed, a new context descriptor
+ * will be needed.
+ */
+ last = &tx_ring->hcksum_context;
+
+ if (hcksum->hcksum_flags != 0) {
+ if ((hcksum->hcksum_flags != last->hcksum_flags) ||
+ (hcksum->l4_proto != last->l4_proto) ||
+ (hcksum->mac_hdr_len != last->mac_hdr_len) ||
+ (hcksum->ip_hdr_len != last->ip_hdr_len)) {
+
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * ixgbe_fill_hcksum_context
+ *
+ * Fill the context descriptor with hardware checksum informations
+ */
+static void
+ixgbe_fill_hcksum_context(struct ixgbe_adv_tx_context_desc *ctx_tbd,
+ hcksum_context_t *hcksum)
+{
+ /*
+ * Fill the context descriptor with the checksum
+ * context information we've got
+ */
+ ctx_tbd->vlan_macip_lens = hcksum->ip_hdr_len;
+ ctx_tbd->vlan_macip_lens |= hcksum->mac_hdr_len <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
+
+ ctx_tbd->type_tucmd_mlhl =
+ IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ if (hcksum->hcksum_flags & HCK_IPV4_HDRCKSUM)
+ ctx_tbd->type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ if (hcksum->hcksum_flags & HCK_PARTIALCKSUM) {
+ switch (hcksum->l4_proto) {
+ case IPPROTO_TCP:
+ ctx_tbd->type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_UDP:
+ /*
+ * We don't have to explicitly set:
+ * ctx_tbd->type_tucmd_mlhl |=
+ * IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ * Because IXGBE_ADVTXD_TUCMD_L4T_UDP == 0b
+ */
+ break;
+ default:
+ /* Unrecoverable error */
+ IXGBE_DEBUGLOG_0(NULL, "L4 type error with tx hcksum");
+ break;
+ }
+ }
+
+ ctx_tbd->seqnum_seed = 0;
+ ctx_tbd->mss_l4len_idx = 0;
+}
+
+/*
+ * ixgbe_tx_fill_ring
+ *
+ * Fill the tx descriptor ring with the data
+ */
+static int
+ixgbe_tx_fill_ring(ixgbe_tx_ring_t *tx_ring, link_list_t *pending_list,
+ hcksum_context_t *hcksum)
+{
+ struct ixgbe_hw *hw = &tx_ring->ixgbe->hw;
+ boolean_t load_context;
+ uint32_t index, tcb_index, desc_num;
+ union ixgbe_adv_tx_desc *tbd, *first_tbd;
+ tx_control_block_t *tcb, *first_tcb;
+ uint32_t hcksum_flags;
+ int i;
+
+ ASSERT(mutex_owned(&tx_ring->tx_lock));
+
+ tbd = NULL;
+ first_tbd = NULL;
+ first_tcb = NULL;
+ desc_num = 0;
+ hcksum_flags = 0;
+ load_context = B_FALSE;
+
+ /*
+ * Get the index of the first tx descriptor that will be filled,
+ * and the index of the first work list item that will be attached
+ * with the first used tx control block in the pending list.
+ * Note: the two indexes are the same.
+ */
+ index = tx_ring->tbd_tail;
+ tcb_index = tx_ring->tbd_tail;
+
+ if (hcksum != NULL) {
+ hcksum_flags = hcksum->hcksum_flags;
+
+ /*
+ * Check if a new context descriptor is needed for this packet
+ */
+ load_context = ixgbe_check_hcksum_context(tx_ring, hcksum);
+ if (load_context) {
+ first_tcb = (tx_control_block_t *)
+ LIST_GET_HEAD(pending_list);
+ tbd = &tx_ring->tbd_ring[index];
+
+ /*
+ * Fill the context descriptor with the
+ * hardware checksum offload informations.
+ */
+ ixgbe_fill_hcksum_context(
+ (struct ixgbe_adv_tx_context_desc *)tbd, hcksum);
+
+ index = NEXT_INDEX(index, 1, tx_ring->ring_size);
+ desc_num++;
+
+ /*
+ * Store the checksum context data if
+ * a new context descriptor is added
+ */
+ tx_ring->hcksum_context = *hcksum;
+ }
+ }
+
+ first_tbd = &tx_ring->tbd_ring[index];
+
+ /*
+ * Fill tx data descriptors with the data saved in the pending list.
+ * The tx control blocks in the pending list are added to the work list
+ * at the same time.
+ *
+ * The work list is strictly 1:1 corresponding to the descriptor ring.
+ * One item of the work list corresponds to one tx descriptor. Because
+ * one tx control block can span multiple tx descriptors, the tx
+ * control block will be added to the first work list item that
+ * corresponds to the first tx descriptor generated from that tx
+ * control block.
+ */
+ tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
+ while (tcb != NULL) {
+
+ for (i = 0; i < tcb->desc_num; i++) {
+ tbd = &tx_ring->tbd_ring[index];
+
+ tbd->read.buffer_addr = tcb->desc[i].address;
+ tbd->read.cmd_type_len = tcb->desc[i].length;
+
+ tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_RS |
+ IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_DATA;
+
+ tbd->read.olinfo_status = 0;
+
+ index = NEXT_INDEX(index, 1, tx_ring->ring_size);
+ desc_num++;
+ }
+
+ if (first_tcb != NULL) {
+ /*
+ * Count the checksum context descriptor for
+ * the first tx control block.
+ */
+ first_tcb->desc_num++;
+ first_tcb = NULL;
+ }
+
+ /*
+ * Add the tx control block to the work list
+ */
+ ASSERT(tx_ring->work_list[tcb_index] == NULL);
+ tx_ring->work_list[tcb_index] = tcb;
+
+ tcb_index = index;
+ tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
+ }
+
+ /*
+ * The Insert Ethernet CRC (IFCS) bit and the checksum fields are only
+ * valid in the first descriptor of the packet.
+ */
+ ASSERT(first_tbd != NULL);
+ first_tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS;
+
+ /* Set hardware checksum bits */
+ if (hcksum_flags != 0) {
+ if (hcksum_flags & HCK_IPV4_HDRCKSUM)
+ first_tbd->read.olinfo_status |=
+ IXGBE_TXD_POPTS_IXSM << 8;
+ if (hcksum_flags & HCK_PARTIALCKSUM)
+ first_tbd->read.olinfo_status |=
+ IXGBE_TXD_POPTS_TXSM << 8;
+ }
+
+ /*
+ * The last descriptor of packet needs End Of Packet (EOP),
+ * and Report Status (RS) bits set
+ */
+ ASSERT(tbd != NULL);
+ tbd->read.cmd_type_len |=
+ IXGBE_ADVTXD_DCMD_EOP | IXGBE_ADVTXD_DCMD_RS;
+
+ /*
+ * Sync the DMA buffer of the tx descriptor ring
+ */
+ DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORDEV);
+
+ if (ixgbe_check_dma_handle(tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(tx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ /*
+ * Update the number of the free tx descriptors.
+ * The mutual exclusion between the transmission and the recycling
+ * (for the tx descriptor ring and the work list) is implemented
+ * with the atomic operation on the number of the free tx descriptors.
+ *
+ * Note: we should always decrement the counter tbd_free before
+ * advancing the hardware TDT pointer to avoid the race condition -
+ * before the counter tbd_free is decremented, the transmit of the
+ * tx descriptors has done and the counter tbd_free is increased by
+ * the tx recycling.
+ */
+ i = ixgbe_atomic_reserve(&tx_ring->tbd_free, desc_num);
+ ASSERT(i >= 0);
+
+ tx_ring->tbd_tail = index;
+
+ /*
+ * Advance the hardware TDT pointer of the tx descriptor ring
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), index);
+
+ if (ixgbe_check_acc_handle(tx_ring->ixgbe->osdep.reg_handle) !=
+ DDI_FM_OK) {
+ ddi_fm_service_impact(tx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ return (desc_num);
+}
+
+/*
+ * ixgbe_save_desc
+ *
+ * Save the address/length pair to the private array
+ * of the tx control block. The address/length pairs
+ * will be filled into the tx descriptor ring later.
+ */
+static void
+ixgbe_save_desc(tx_control_block_t *tcb, uint64_t address, size_t length)
+{
+ sw_desc_t *desc;
+
+ desc = &tcb->desc[tcb->desc_num];
+ desc->address = address;
+ desc->length = length;
+
+ tcb->desc_num++;
+}
+
+/*
+ * ixgbe_tx_recycle_legacy
+ *
+ * Recycle the tx descriptors and tx control blocks.
+ *
+ * The work list is traversed to check if the corresponding
+ * tx descriptors have been transmitted. If so, the resources
+ * bound to the tx control blocks will be freed, and those
+ * tx control blocks will be returned to the free list.
+ */
+uint32_t
+ixgbe_tx_recycle_legacy(ixgbe_tx_ring_t *tx_ring)
+{
+ uint32_t index, last_index;
+ int desc_num;
+ boolean_t desc_done;
+ tx_control_block_t *tcb;
+ link_list_t pending_list;
+
+ /*
+ * The mutex_tryenter() is used to avoid unnecessary
+ * lock contention.
+ */
+ if (mutex_tryenter(&tx_ring->recycle_lock) == 0)
+ return (0);
+
+ ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
+
+ if (tx_ring->tbd_free == tx_ring->ring_size) {
+ tx_ring->recycle_fail = 0;
+ tx_ring->stall_watchdog = 0;
+ mutex_exit(&tx_ring->recycle_lock);
+ return (0);
+ }
+
+ /*
+ * Sync the DMA buffer of the tx descriptor ring
+ */
+ DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL);
+
+ if (ixgbe_check_dma_handle(tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(tx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ LINK_LIST_INIT(&pending_list);
+ desc_num = 0;
+ index = tx_ring->tbd_head; /* Index of next tbd/tcb to recycle */
+
+ tcb = tx_ring->work_list[index];
+ ASSERT(tcb != NULL);
+
+ desc_done = B_TRUE;
+ while (desc_done && (tcb != NULL)) {
+
+ /*
+ * Get the last tx descriptor of the tx control block.
+ * If the last tx descriptor is done, it is done with
+ * all the tx descriptors of the tx control block.
+ * Then the tx control block and all the corresponding
+ * tx descriptors can be recycled.
+ */
+ last_index = NEXT_INDEX(index, tcb->desc_num - 1,
+ tx_ring->ring_size);
+
+ /*
+ * Check if the Descriptor Done bit is set
+ */
+ desc_done = tx_ring->tbd_ring[last_index].wb.status &
+ IXGBE_TXD_STAT_DD;
+ if (desc_done) {
+ /*
+ * Strip off the tx control block from the work list,
+ * and add it to the pending list.
+ */
+ tx_ring->work_list[index] = NULL;
+ LIST_PUSH_TAIL(&pending_list, &tcb->link);
+
+ /*
+ * Count the total number of the tx descriptors recycled
+ */
+ desc_num += tcb->desc_num;
+
+ /*
+ * Advance the index of the tx descriptor ring
+ */
+ index = NEXT_INDEX(last_index, 1, tx_ring->ring_size);
+
+ tcb = tx_ring->work_list[index];
+ }
+ }
+
+ /*
+ * If no tx descriptors are recycled, no need to do more processing
+ */
+ if (desc_num == 0) {
+ tx_ring->recycle_fail++;
+ mutex_exit(&tx_ring->recycle_lock);
+ return (0);
+ }
+
+ tx_ring->recycle_fail = 0;
+ tx_ring->stall_watchdog = 0;
+
+ /*
+ * Update the head index of the tx descriptor ring
+ */
+ tx_ring->tbd_head = index;
+
+ /*
+ * Update the number of the free tx descriptors with atomic operations
+ */
+ atomic_add_32(&tx_ring->tbd_free, desc_num);
+
+ mutex_exit(&tx_ring->recycle_lock);
+
+ /*
+ * Free the resources used by the tx control blocks
+ * in the pending list
+ */
+ tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
+ while (tcb != NULL) {
+ /*
+ * Release the resources occupied by the tx control block
+ */
+ ixgbe_free_tcb(tcb);
+
+ tcb = (tx_control_block_t *)
+ LIST_GET_NEXT(&pending_list, &tcb->link);
+ }
+
+ /*
+ * Add the tx control blocks in the pending list to the free list.
+ */
+ ixgbe_put_free_list(tx_ring, &pending_list);
+
+ return (desc_num);
+}
+
+/*
+ * ixgbe_tx_recycle_head_wb
+ *
+ * Check the head write-back, and recycle all the transmitted
+ * tx descriptors and tx control blocks.
+ */
+uint32_t
+ixgbe_tx_recycle_head_wb(ixgbe_tx_ring_t *tx_ring)
+{
+ uint32_t index;
+ uint32_t head_wb;
+ int desc_num;
+ tx_control_block_t *tcb;
+ link_list_t pending_list;
+
+ /*
+ * The mutex_tryenter() is used to avoid unnecessary
+ * lock contention.
+ */
+ if (mutex_tryenter(&tx_ring->recycle_lock) == 0)
+ return (0);
+
+ ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
+
+ if (tx_ring->tbd_free == tx_ring->ring_size) {
+ tx_ring->recycle_fail = 0;
+ tx_ring->stall_watchdog = 0;
+ mutex_exit(&tx_ring->recycle_lock);
+ return (0);
+ }
+
+ /*
+ * Sync the DMA buffer of the tx descriptor ring
+ *
+ * Note: For head write-back mode, the tx descriptors will not
+ * be written back, but the head write-back value is stored at
+ * the last extra tbd at the end of the DMA area, we still need
+ * to sync the head write-back value for kernel.
+ *
+ * DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL);
+ */
+ (void) ddi_dma_sync(tx_ring->tbd_area.dma_handle,
+ sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size,
+ sizeof (uint32_t),
+ DDI_DMA_SYNC_FORKERNEL);
+
+ if (ixgbe_check_dma_handle(tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
+ ddi_fm_service_impact(tx_ring->ixgbe->dip,
+ DDI_SERVICE_DEGRADED);
+ }
+
+ LINK_LIST_INIT(&pending_list);
+ desc_num = 0;
+ index = tx_ring->tbd_head; /* Next index to clean */
+
+ /*
+ * Get the value of head write-back
+ */
+ head_wb = *tx_ring->tbd_head_wb;
+ while (index != head_wb) {
+ tcb = tx_ring->work_list[index];
+ ASSERT(tcb != NULL);
+
+ if (OFFSET(index, head_wb, tx_ring->ring_size) <
+ tcb->desc_num) {
+ /*
+ * The current tx control block is not
+ * completely transmitted, stop recycling
+ */
+ break;
+ }
+
+ /*
+ * Strip off the tx control block from the work list,
+ * and add it to the pending list.
+ */
+ tx_ring->work_list[index] = NULL;
+ LIST_PUSH_TAIL(&pending_list, &tcb->link);
+
+ /*
+ * Advance the index of the tx descriptor ring
+ */
+ index = NEXT_INDEX(index, tcb->desc_num, tx_ring->ring_size);
+
+ /*
+ * Count the total number of the tx descriptors recycled
+ */
+ desc_num += tcb->desc_num;
+ }
+
+ /*
+ * If no tx descriptors are recycled, no need to do more processing
+ */
+ if (desc_num == 0) {
+ tx_ring->recycle_fail++;
+ mutex_exit(&tx_ring->recycle_lock);
+ return (0);
+ }
+
+ tx_ring->recycle_fail = 0;
+ tx_ring->stall_watchdog = 0;
+
+ /*
+ * Update the head index of the tx descriptor ring
+ */
+ tx_ring->tbd_head = index;
+
+ /*
+ * Update the number of the free tx descriptors with atomic operations
+ */
+ atomic_add_32(&tx_ring->tbd_free, desc_num);
+
+ mutex_exit(&tx_ring->recycle_lock);
+
+ /*
+ * Free the resources used by the tx control blocks
+ * in the pending list
+ */
+ tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
+ while (tcb) {
+ /*
+ * Release the resources occupied by the tx control block
+ */
+ ixgbe_free_tcb(tcb);
+
+ tcb = (tx_control_block_t *)
+ LIST_GET_NEXT(&pending_list, &tcb->link);
+ }
+
+ /*
+ * Add the tx control blocks in the pending list to the free list.
+ */
+ ixgbe_put_free_list(tx_ring, &pending_list);
+
+ return (desc_num);
+}
+
+/*
+ * ixgbe_free_tcb - free up the tx control block
+ *
+ * Free the resources of the tx control block, including
+ * unbind the previously bound DMA handle, and reset other
+ * control fields.
+ */
+void
+ixgbe_free_tcb(tx_control_block_t *tcb)
+{
+ switch (tcb->tx_type) {
+ case USE_COPY:
+ /*
+ * Reset the buffer length that is used for copy
+ */
+ tcb->tx_buf.len = 0;
+ break;
+ case USE_DMA:
+ /*
+ * Release the DMA resource that is used for
+ * DMA binding.
+ */
+ (void) ddi_dma_unbind_handle(tcb->tx_dma_handle);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Free the mblk
+ */
+ if (tcb->mp != NULL) {
+ freemsg(tcb->mp);
+ tcb->mp = NULL;
+ }
+
+ tcb->tx_type = USE_NONE;
+ tcb->frag_num = 0;
+ tcb->desc_num = 0;
+}
+
+/*
+ * ixgbe_get_free_list - Get a free tx control block from the free list
+ *
+ * The atomic operation on the number of the available tx control block
+ * in the free list is used to keep this routine mutual exclusive with
+ * the routine ixgbe_put_check_list.
+ */
+static tx_control_block_t *
+ixgbe_get_free_list(ixgbe_tx_ring_t *tx_ring)
+{
+ tx_control_block_t *tcb;
+
+ /*
+ * Check and update the number of the free tx control block
+ * in the free list.
+ */
+ if (ixgbe_atomic_reserve(&tx_ring->tcb_free, 1) < 0)
+ return (NULL);
+
+ mutex_enter(&tx_ring->tcb_head_lock);
+
+ tcb = tx_ring->free_list[tx_ring->tcb_head];
+ ASSERT(tcb != NULL);
+ tx_ring->free_list[tx_ring->tcb_head] = NULL;
+ tx_ring->tcb_head = NEXT_INDEX(tx_ring->tcb_head, 1,
+ tx_ring->free_list_size);
+
+ mutex_exit(&tx_ring->tcb_head_lock);
+
+ return (tcb);
+}
+
+/*
+ * ixgbe_put_free_list
+ *
+ * Put a list of used tx control blocks back to the free list
+ *
+ * A mutex is used here to ensure the serialization. The mutual exclusion
+ * between ixgbe_get_free_list and ixgbe_put_free_list is implemented with
+ * the atomic operation on the counter tcb_free.
+ */
+void
+ixgbe_put_free_list(ixgbe_tx_ring_t *tx_ring, link_list_t *pending_list)
+{
+ uint32_t index;
+ int tcb_num;
+ tx_control_block_t *tcb;
+
+ mutex_enter(&tx_ring->tcb_tail_lock);
+
+ index = tx_ring->tcb_tail;
+
+ tcb_num = 0;
+ tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
+ while (tcb != NULL) {
+ ASSERT(tx_ring->free_list[index] == NULL);
+ tx_ring->free_list[index] = tcb;
+
+ tcb_num++;
+
+ index = NEXT_INDEX(index, 1, tx_ring->free_list_size);
+
+ tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
+ }
+
+ tx_ring->tcb_tail = index;
+
+ /*
+ * Update the number of the free tx control block
+ * in the free list. This operation must be placed
+ * under the protection of the lock.
+ */
+ atomic_add_32(&tx_ring->tcb_free, tcb_num);
+
+ mutex_exit(&tx_ring->tcb_tail_lock);
+}
diff --git a/usr/src/uts/common/io/ixgbe/ixgbe_type.h b/usr/src/uts/common/io/ixgbe/ixgbe_type.h
new file mode 100644
index 0000000000..2bc27bf005
--- /dev/null
+++ b/usr/src/uts/common/io/ixgbe/ixgbe_type.h
@@ -0,0 +1,1494 @@
+/*
+ * CDDL HEADER START
+ *
+ * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at:
+ * http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When using or redistributing this file, you may do so under the
+ * License only. No other modification of this header is permitted.
+ *
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms of the CDDL.
+ */
+
+/* IntelVersion: 1.164 v2008-03-04 */
+
+#ifndef _IXGBE_TYPE_H
+#define _IXGBE_TYPE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ixgbe_osdep.h"
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+
+/* General Registers */
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+
+/* NVM Registers */
+#define IXGBE_EEC 0x10010
+#define IXGBE_EERD 0x10014
+#define IXGBE_FLA 0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+#define IXGBE_GRC 0x10200
+
+/* Interrupt Registers */
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
+
+/* Flow Control Registers */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_TFCS 0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? \
+ (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? \
+ (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? \
+ (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? \
+ (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? \
+ (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? \
+ (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + ((_i - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + ((_i - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+
+/* Receive Registers */
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? \
+ (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? \
+ (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? \
+ (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+/* array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31) */
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
+
+/* Wake up registers */
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
+
+/* Music registers */
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+
+/* Stats registers */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC */
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C */
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C */
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C */
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C */
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC */
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? \
+ (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+/* PCI-E registers */
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_FWSM 0x10148
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_TDSTATCTL 0x07C20
+#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C */
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C */
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C */
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C */
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C */
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C */
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C */
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C */
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_ATLASCTL 0x04800
+
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+
+/* FACTPS */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF
+ /* MDI Address (new protocol) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000
+ /* Device Type (new protocol) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift */
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
+#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */
+#define IXGBE_MSCA_READ_AUTOINC 0x0C000000
+ /* OP CODE 11 (read, auto inc) */
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000
+ /* ST CODE 00 (new protocol) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000
+ /* ST CODE 01 (old protocol) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg */
+#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg */
+#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MII_SPEED_SELECTION_REG 0x10
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
+
+/* PHY IDs */
+#define QT2022_PHY_ID 0x0043A400
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE 0x80000000 /* bit 31, XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Rx Flow control enable */
+
+/* PAP bit masks */
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC 0x00000004
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0
+ /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1
+ /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0
+ /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1
+ /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0
+ /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1
+ /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00000008 /* SDP5 IO direction */
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
+#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+
+/* GSSR definitions */
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+
+/* EEC Register */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+
+#ifndef IXGBE_EERD_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read to complete */
+#define IXGBE_EERD_ATTEMPTS 100000
+#endif
+
+/* PCI Bus Info */
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+
+/* Check whether address is multicast. This is little-endian specific check. */
+#define IXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ 0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena */
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000
+ /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | IXGBE_RXD_ERR_OSE | IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | IXGBE_RXDADV_ERR_OSE | IXGBE_RXDADV_ERR_USE)
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE 0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+#ifndef __le16
+/* Little Endian defines */
+#define __le8 u8
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+#endif
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ __le8 cso; /* Checksum offset */
+ __le8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ __le8 status; /* Descriptor status */
+ __le8 css; /* Checksum start */
+ __le16 vlan;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ __le8 status; /* Descriptor status */
+ __le8 errors; /* Descriptor Errors */
+ __le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ /* RSS type, Packet type */
+ __le16 pkt_info;
+ /* Split Header, header len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
+ /* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+enum ixgbe_eeprom_type {
+ ixgbe_eeprom_uninitialized = 0,
+ ixgbe_eeprom_spi,
+ ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82598EB,
+ ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+ ixgbe_phy_qt,
+ ixgbe_phy_xaui,
+ ixgbe_phy_generic
+};
+
+enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_copper,
+ ixgbe_media_type_backplane,
+ ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_type {
+ ixgbe_fc_none = 0,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full,
+ ixgbe_fc_default
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33,
+ ixgbe_bus_speed_66,
+ ixgbe_bus_speed_100,
+ ixgbe_bus_speed_120,
+ ixgbe_bus_speed_133,
+ ixgbe_bus_speed_2500,
+ ixgbe_bus_speed_5000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1,
+ ixgbe_bus_width_pcie_x2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32,
+ ixgbe_bus_width_64,
+ ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mc_addr_in_rar_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+ u32 high_water; /* Flow Control High-water */
+ u32 low_water; /* Flow Control Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum ixgbe_fc_type type; /* Type of flow control */
+ enum ixgbe_fc_type original_type;
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+ u64 crcerrs;
+ u64 illerrc;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc[8];
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mngprc;
+ u64 mngpdc;
+ u64 mngptc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 rqsmr[16];
+ u64 tqsmr[8];
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+ s32 (*init_params)(struct ixgbe_hw *);
+ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*read_analog_reg8)(struct ixgbe_hw *, u32, u8*);
+ s32 (*write_analog_reg8)(struct ixgbe_hw *, u32, u8);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+ bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* LED */
+ s32 (*led_on)(struct ixgbe_hw *, u32);
+ s32 (*led_off)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+
+ /* Flow Control */
+ s32 (*setup_fc)(struct ixgbe_hw *, s32);
+};
+
+struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+ s32 (*reset)(struct ixgbe_hw *);
+ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+ bool);
+};
+
+struct ixgbe_eeprom_info {
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u16 word_size;
+ u16 address_bits;
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 link_attach_type;
+ u32 link_mode_select;
+ bool link_settings_loaded;
+ bool autoneg;
+ bool autoneg_failed;
+};
+
+struct ixgbe_phy_info {
+ struct ixgbe_phy_operations ops;
+ enum ixgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ bool autoneg_wait_to_complete;
+};
+
+struct ixgbe_hw {
+ u8 *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params: error
+
+/* Error Codes */
+#define IXGBE_SUCCESS 0
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#ifndef UNREFERENCED_PARAMETER
+#define UNREFERENCED_PARAMETER(_p)
+#endif
+
+#endif /* _IXGBE_TYPE_H */
diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared
index 9375f78a59..42cbae5ee6 100644
--- a/usr/src/uts/intel/Makefile.intel.shared
+++ b/usr/src/uts/intel/Makefile.intel.shared
@@ -363,6 +363,7 @@ DRV_KMODS += rge
DRV_KMODS += sfe
DRV_KMODS += amd8111s
DRV_KMODS += igb
+DRV_KMODS += ixgbe
$(CLOSED_BUILD)CLOSED_DRV_KMODS += ixgb
#
diff --git a/usr/src/uts/intel/ixgbe/Makefile b/usr/src/uts/intel/ixgbe/Makefile
new file mode 100644
index 0000000000..8c7893819b
--- /dev/null
+++ b/usr/src/uts/intel/ixgbe/Makefile
@@ -0,0 +1,90 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# uts/intel/ixgbe/Makefile
+#
+# This makefile drives the production of the ixgbe
+# network driver kernel module.
+#
+# intel architecture dependent
+#
+
+#
+# Paths to the base of the uts directory trees
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = ixgbe
+OBJECTS = $(IXGBE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(IXGBE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/ixgbe
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(CONFMOD)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Driver depends on MAC & IP
+#
+LDFLAGS += -dy -N misc/mac -N drv/ip
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/os/minor_perm b/usr/src/uts/intel/os/minor_perm
index 93b00060fd..1c9ae9c32f 100644
--- a/usr/src/uts/intel/os/minor_perm
+++ b/usr/src/uts/intel/os/minor_perm
@@ -122,6 +122,7 @@ kssl:* 0666 root sys
lx_ptm:lx_ptmajor 0666 root sys
lx_systrace:* 0644 root sys
clone:bge 0666 root sys
+clone:ixgbe 0666 root sys
clone:rge 0666 root sys
clone:xge 0666 root sys
clone:nge 0666 root sys
@@ -143,6 +144,7 @@ clone:dmfe 0666 root sys
clone:afe 0666 root sys
clone:mxfe 0666 root sys
bge:* 0666 root sys
+ixgbe:* 0666 root sys
rge:* 0666 root sys
xge:* 0666 root sys
nge:* 0666 root sys
diff --git a/usr/src/uts/sparc/Makefile.sparc.shared b/usr/src/uts/sparc/Makefile.sparc.shared
index 44cfda748b..288d181d2c 100644
--- a/usr/src/uts/sparc/Makefile.sparc.shared
+++ b/usr/src/uts/sparc/Makefile.sparc.shared
@@ -264,6 +264,7 @@ DRV_KMODS += rge
DRV_KMODS += sfe
DRV_KMODS += aac
DRV_KMODS += igb
+DRV_KMODS += ixgbe
$(CLOSED_BUILD)CLOSED_DRV_KMODS += ixgb
#
diff --git a/usr/src/uts/sparc/ixgbe/Makefile b/usr/src/uts/sparc/ixgbe/Makefile
new file mode 100644
index 0000000000..9652f597fa
--- /dev/null
+++ b/usr/src/uts/sparc/ixgbe/Makefile
@@ -0,0 +1,106 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# uts/sparc/ixgbe/Makefile
+#
+# This makefile drives the production of the ixgbe
+# network driver kernel module.
+#
+# sparc architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = ixgbe
+OBJECTS = $(IXGBE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(IXGBE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/ixgbe
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sparc/Makefile.sparc
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Override defaults
+#
+INC_PATH += -I$(CONF_SRCDIR)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+
+#
+# Driver depends on MAC & IP
+#
+LDFLAGS += -dy -N misc/mac -N drv/ip
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sparc/Makefile.targ
+
diff --git a/usr/src/uts/sparc/os/minor_perm b/usr/src/uts/sparc/os/minor_perm
index 6697b448f4..566da72d4a 100644
--- a/usr/src/uts/sparc/os/minor_perm
+++ b/usr/src/uts/sparc/os/minor_perm
@@ -150,6 +150,7 @@ zfs:zfs 0666 root sys
scsi_vhci:* 0666 root sys
kssl:* 0666 root sys
clone:bge 0666 root sys
+clone:ixgbe 0666 root sys
clone:rge 0666 root sys
clone:xge 0666 root sys
clone:nge 0666 root sys
@@ -164,6 +165,7 @@ clone:mxfe 0666 root sys
clone:rtls 0666 root sys
clone:vnic 0666 root sys
bge:* 0666 root sys
+ixgbe:* 0666 root sys
rge:* 0666 root sys
xge:* 0666 root sys
nge:* 0666 root sys