diff options
author | xy150489 <none@none> | 2008-01-07 15:27:37 -0800 |
---|---|---|
committer | xy150489 <none@none> | 2008-01-07 15:27:37 -0800 |
commit | c869993e79c1eafbec61a56bf6cea848fe754c71 (patch) | |
tree | 75c8fdcbf99f8d83e6e2affc8148afaf6f4ec8a1 /usr/src | |
parent | b96e88d7982efdf721857ee071c9e6739bab83e9 (diff) | |
download | illumos-gate-c869993e79c1eafbec61a56bf6cea848fe754c71.tar.gz |
PSARC 2007/624 Intel 82575 1Gb PCI Express NIC Driver
6591981 A new driver is needed to support Intel Zoar gigabit NIC
Diffstat (limited to 'usr/src')
43 files changed, 22341 insertions, 2 deletions
diff --git a/usr/src/pkgdefs/Makefile b/usr/src/pkgdefs/Makefile index 50133d0158..1209b6f594 100644 --- a/usr/src/pkgdefs/Makefile +++ b/usr/src/pkgdefs/Makefile @@ -20,7 +20,7 @@ # # -# Copyright 2007 Sun Microsystems, Inc. All rights reserved. +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # ident "%Z%%M% %I% %E% SMI" @@ -232,6 +232,7 @@ COMMON_SUBDIRS= \ SUNWib \ SUNWibsdpu \ SUNWibsdp \ + SUNWigb \ SUNWintgige \ SUNWiotu \ SUNWioth \ diff --git a/usr/src/pkgdefs/SUNWigb/Makefile b/usr/src/pkgdefs/SUNWigb/Makefile new file mode 100644 index 0000000000..19df9a110b --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/Makefile @@ -0,0 +1,38 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +include ../Makefile.com + +DATAFILES += depend i.renamenew + +.KEEP_STATE: + +all: $(FILES) postinstall postremove +install: all pkg + +include ../Makefile.targ diff --git a/usr/src/pkgdefs/SUNWigb/pkginfo.tmpl b/usr/src/pkgdefs/SUNWigb/pkginfo.tmpl new file mode 100644 index 0000000000..0d546fa557 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/pkginfo.tmpl @@ -0,0 +1,47 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +PKG=SUNWigb +NAME=Intel 82575 1Gb PCI Express NIC Driver +ARCH="ISA" +VERSION="ONVERS,REV=0.0.0" +SUNW_PRODNAME="SunOS" +SUNW_PRODVERS="RELEASE/VERSION" +SUNW_PKGVERS="1.0" +SUNW_PKGTYPE="root" +MAXINST="1000" +CATEGORY=system +VENDOR="Sun Microsystems, Inc." +DESC="Intel 82575 1Gb PCI Express NIC Driver" +CLASSES="none renamenew" +HOTLINE="Please contact your local service provider" +EMAIL="" +BASEDIR=/ +SUNW_PKG_ALLZONES="true" +SUNW_PKG_HOLLOW="true" +SUNW_PKG_THISZONE="false" diff --git a/usr/src/pkgdefs/SUNWigb/postinstall b/usr/src/pkgdefs/SUNWigb/postinstall new file mode 100644 index 0000000000..f5284bfbe6 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/postinstall @@ -0,0 +1,135 @@ +#!/sbin/sh +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +# Function: check_add_drv() +# +# This function will check if the module has an entry in etc/name_to_major +# If not simply calls add_drv with the arguments given. If there is +# such an entry in name_to_major file, it adds entries in driver_aliases +# driver_classes and minor_perm if necessary. +# The syntax of this function is the same as add_drv. + +check_add_drv() +{ + if [ "$BASEDIR" = "" ] + then + BASEDIR=/ + fi + alias="" + class="" + ADD_ALIAS=0 + ADD_CLASS=0 + ADD_MINOR=0 + OPTIND=1 + IS_NET_DRIVER=0 + + cmd="add_drv" + + NO_CMD= + while getopts i:b:m:c:N opt + do + case $opt in + N ) NO_CMD=1;; + i ) ADD_ALIAS=1 + alias=$OPTARG + cmd=$cmd" -i '$alias'" + ;; + m ) ADD_MINOR=1 + minor=$OPTARG + cmd=$cmd" -m '$minor'" + ;; + c) ADD_CLASS=1 + class=$OPTARG + cmd=$cmd" -c $class" + ;; + b) BASEDIR=$OPTARG + cmd=$cmd" -b $BASEDIR" + ;; + \?) echo "check_add_drv can not handle this option" + return + ;; + esac + done + shift `/usr/bin/expr $OPTIND - 1` + + drvname=$1 + + cmd=$cmd" "$drvname + + drvname=`echo $drvname | /usr/bin/sed 's;.*/;;g'` + + /usr/bin/grep "^$drvname[ ]" $BASEDIR/etc/name_to_major > /dev/null 2>&1 + + if [ "$NO_CMD" = "" -a $? -ne 0 ] + then + eval $cmd + else + # entry already in name_to_major, add alias, class, minorperm + # if necessary + if [ $ADD_ALIAS = 1 ] + then + for i in $alias + do + /usr/bin/egrep "^$drvname[ ]+$i" $BASEDIR/etc/driver_aliases>/dev/null 2>&1 + if [ $? -ne 0 ] + then + echo "$drvname $i" >> $BASEDIR/etc/driver_aliases + fi + done + fi + + if [ $ADD_CLASS = 1 ] + then + /usr/bin/egrep "^$drvname[ ]+$class( | |$)" $BASEDIR/etc/driver_classes > /dev/null 2>&1 + if [ $? -ne 0 ] + then + echo "$drvname\t$class" >> $BASEDIR/etc/driver_classes + fi + fi + + if [ $ADD_MINOR = 1 ] + then + /usr/bin/grep "^$drvname:" $BASEDIR/etc/minor_perm > /dev/null 2>&1 + if [ $? -ne 0 ] + then + minorentry="$drvname:$minor" + echo $minorentry >> $BASEDIR/etc/minor_perm + fi + fi + + fi + + +} + +check_add_drv -i \ + '"pciex8086,10a7" + "pciex8086,10a9" + "pciex8086,10d6"' \ + -b "$BASEDIR" igb diff --git a/usr/src/pkgdefs/SUNWigb/postremove b/usr/src/pkgdefs/SUNWigb/postremove new file mode 100644 index 0000000000..0b7379f595 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/postremove @@ -0,0 +1,39 @@ +#!/sbin/sh +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +BD=${BASEDIR:-/} +if grep -w igb $BD/etc/name_to_major > /dev/null 2>&1 +then + rem_drv -b ${BD} igb + if [ $? -ne 0 ] + then + exit 1 + fi +fi +exit 0 diff --git a/usr/src/pkgdefs/SUNWigb/prototype_com b/usr/src/pkgdefs/SUNWigb/prototype_com new file mode 100644 index 0000000000..0873cc9174 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/prototype_com @@ -0,0 +1,52 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +# +# This required package information file contains a list of package contents. +# The 'pkgmk' command uses this file to identify the contents of a package +# and their location on the development machine when building the package. +# Can be created via a text editor or through use of the 'pkgproto' command. + +#!search <pathname pathname ...> # where to find pkg objects +#!include <filename> # include another 'prototype' file +#!default <mode> <owner> <group> # default used if not specified on entry +#!<param>=<value> # puts parameter in pkg environment + +# packaging files +i pkginfo +i copyright +i depend +i postinstall +i postremove +i i.renamenew +# +# Intel 1Gb Ethernet Driver common files +# +d none kernel 0755 root sys +d none kernel/drv 0755 root sys +e renamenew kernel/drv/igb.conf 0644 root sys diff --git a/usr/src/pkgdefs/SUNWigb/prototype_i386 b/usr/src/pkgdefs/SUNWigb/prototype_i386 new file mode 100644 index 0000000000..c3f83de3d8 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/prototype_i386 @@ -0,0 +1,49 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +# +# This required package information file contains a list of package contents. +# The 'pkgmk' command uses this file to identify the contents of a package +# and their location on the development machine when building the package. +# Can be created via a text editor or through use of the 'pkgproto' command. + +#!search <pathname pathname ...> # where to find pkg objects +#!include <filename> # include another 'prototype' file +#!default <mode> <owner> <group> # default used if not specified on entry +#!<param>=<value> # puts parameter in pkg environment + +# +# Include ISA independent files (prototype_com) +# +!include prototype_com +# +# Intel 1Gb Ethernet Driver i386 specific files +# +f none kernel/drv/igb 0755 root sys +d none kernel/drv/amd64 0755 root sys +f none kernel/drv/amd64/igb 0755 root sys diff --git a/usr/src/pkgdefs/SUNWigb/prototype_sparc b/usr/src/pkgdefs/SUNWigb/prototype_sparc new file mode 100644 index 0000000000..bb65939cc0 --- /dev/null +++ b/usr/src/pkgdefs/SUNWigb/prototype_sparc @@ -0,0 +1,48 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# + +# +# This required package information file contains a list of package contents. +# The 'pkgmk' command uses this file to identify the contents of a package +# and their location on the development machine when building the package. +# Can be created via a text editor or through use of the 'pkgproto' command. + +#!search <pathname pathname ...> # where to find pkg objects +#!include <filename> # include another 'prototype' file +#!default <mode> <owner> <group> # default used if not specified on entry +#!<param>=<value> # puts parameter in pkg environment + +# +# Include ISA independent files (prototype_com) +# +!include prototype_com +# +# Intel 1Gb Ethernet Driver SPARC specific files +# +d none kernel/drv/sparcv9 0755 root sys +f none kernel/drv/sparcv9/igb 0755 root sys diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files index 916735e982..8adce7c1b1 100644 --- a/usr/src/uts/common/Makefile.files +++ b/usr/src/uts/common/Makefile.files @@ -1543,6 +1543,15 @@ E1000G_OBJS += e1000_80003es2lan.o e1000_82540.o e1000_82541.o e1000_82542.o \ e1000_mac.o e1000_manage.o e1000_nvm.o e1000_osdep.o \ e1000_phy.o e1000g_debug.o e1000g_main.o e1000g_alloc.o \ e1000g_tx.o e1000g_rx.o e1000g_stat.o e1000g_ndd.o + +# +# Intel 82575 1G NIC driver module +# +IGB_OBJS = igb_82575.o igb_api.o igb_mac.o igb_manage.o \ + igb_nvm.o igb_osdep.o igb_phy.o igb_buf.o \ + igb_debug.o igb_gld.o igb_log.o igb_main.o \ + igb_ndd.o igb_rx.o igb_stat.o igb_tx.o + # # NIU 10G/1G driver module # diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules index 35a9c003b6..4b2282b3ef 100644 --- a/usr/src/uts/common/Makefile.rules +++ b/usr/src/uts/common/Makefile.rules @@ -20,7 +20,7 @@ # # -# Copyright 2007 Sun Microsystems, Inc. All rights reserved. +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # ident "%Z%%M% %I% %E% SMI" @@ -887,6 +887,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/e1000g/%.c $(COMPILE.c) -o $@ $< $(CTFCONVERT_O) +$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/igb/%.c + $(COMPILE.c) -o $@ $< + $(CTFCONVERT_O) + $(OBJS_DIR)/%.o: $(UTSBASE)/common/ipp/%.c $(COMPILE.c) -o $@ $< $(CTFCONVERT_O) @@ -1691,6 +1695,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/xge/hal/xgehal/%.c $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/e1000g/%.c @($(LHEAD) $(LINT.c) $< $(LTAIL)) +$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/igb/%.c + @($(LHEAD) $(LINT.c) $< $(LTAIL)) + $(LINTS_DIR)/%.ln: $(UTSBASE)/common/ipp/%.c @($(LHEAD) $(LINT.c) $< $(LTAIL)) diff --git a/usr/src/uts/common/io/igb/igb.conf b/usr/src/uts/common/io/igb/igb.conf new file mode 100644 index 0000000000..c2ae8d4cd3 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb.conf @@ -0,0 +1,162 @@ +# +# CDDL HEADER START +# +# Copyright(c) 2007-2008 Intel Corporation. All rights reserved. +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at: +# http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When using or redistributing this file, you may do so under the +# License only. No other modification of this header is permitted. +# +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms of the CDDL. +# +# +# ident "%Z%%M% %I% %E% SMI" +# +# +# Driver.conf file for Intel 1Gb ethernet driver (igb) +# +# -------------------- Link Configuration -------------------- +# The parameters of link configuration: +# +# adv_cap_autoneg +# Advertise capability of auto-negotiation. +# Allowed values: 0, 1 +# Default value: 1 +# +# adv_cap_1000fdx +# Advertise capability of 1000Mbps full duplex +# Allowed values: 0, 1 +# Default value: 1 +# +# adv_cap_100fdx +# Advertise capability of 100Mbps full duplex +# Allowed values: 0, 1 +# Default value: 1 +# +# adv_cap_100hdx +# Advertise capability of 100Mbps half duplex +# Allowed values: 0, 1 +# Default value: 1 +# +# adv_cap_10fdx +# Advertise capability of 10Mbps full duplex +# Allowed values: 0, 1 +# Default value: 1 +# +# adv_cap_10hdx +# Advertise capability of 10Mbps half duplex +# Allowed values: 0, 1 +# Default value: 1 +# +# There are two methods to configure the link: +# autonegotiation or forced link +# +# The parameter "adv_cap_autoneg" is used to enable autonegotiation or disable +# it (forced link mode). +# +# If autonegotiation is enabled (the default mode), all the "adv_cap_*" +# parameters control which capabilities are advertised to the partner. The +# default is to advertise all the capabilities that the hardware supports. +# The advertised capabilities can also be restricted to a subset. It is not +# possible to advertise a capability that the hardware does not support. +# +# The autonegotiation process will then automatically select the fastest speed/ +# duplex mode supported by both partners. +# +# If autonegotiation is disabled (forced link mode), the link speed/duplex is +# determined by the first link capability that is enabled, in highest-to-lowest +# speed/duplex order. +# +# For example, if adv_cap_1000fdx is enabled, all other values will be ignored; +# to force 10hdx mode, all the faster modes must be explicitly disabled. +# +# Note: +# 1. 1000M half duplex is not supported with igb. +# 2. 1000M speed is not supported with the forced link mode (the value of +# adv_cap_1000fdx will be ignored). +# 3. The forced link mode may result in a non-working link or a half duplex +# link. If forced link mode is used, both the partners should be forced to +# the same link/speed mode. +# +# adv_cap_autoneg = 1; +# adv_cap_1000fdx = 1; +# adv_cap_100fdx = 1; +# adv_cap_100hdx = 1; +# adv_cap_10fdx = 1; +# adv_cap_10hdx = 1; +# +# -------------------- Jumbo Frame -------------------- +# default_mtu +# The size of the default MTU (payload without the ethernet header) +# Allowed values: 1500 - 9000 +# Default value: 1500 +# +# default_mtu = 1500; +# +# -------------------- Flow Control -------------------- +# flow_control +# Ethernet flow control +# Allowed values: 0 - Disable +# 1 - Receive only +# 2 - Transmit only +# 3 - Receive and transmit +# 4 - Use NVROM-programmed factory default setting +# default value: 3 +# +# flow_control = 3; +# +# -------------------- Transmit/Receive Queues -------------------- +# tx_queue_number +# The number of the transmit queues +# Allowed values: 1 - 4 +# Default value: 1 +# +# tx_ring_size +# The number of the transmit descriptors per transmit queue +# Allowed values: 64 - 4096 +# Default value: 512 +# +# rx_queue_number +# The number of the receive queues +# Allowed values: 1 - 4 +# Default value: 1 +# +# rx_ring_size +# The number of the receive descriptors per receive queue +# Allowed values: 64 - 4096 +# Default value: 512 +# +# Note: The final values of tx_queue_number and rx_queue_number are decided +# by the number of interrupt vectors obtained by the driver. They could be +# less than the specified values because of limited interrupt vector number. +# +# -------- How to set parameters for a particular interface --------- +# The example below shows how to locate the device path and set a parameter +# for a particular igb interface. (Using flow_control as an example) +# +# Use the following command to find out the device paths for igb, +# more /etc/path_to_inst | grep igb +# +# For example, if you see, +# "/pci@0,0/pci10de,5d@d/pci8086,0@0" 0 "igb" +# "/pci@0,0/pci10de,5d@d/pci8086,0@0,1" 1 "igb" +# +# name = "pciex8086,10a7" parent = "/pci@0,0/pci10de,5d@d" unit-address = "0" +# flow_control = 1; +# name = "pciex8086,10a7" parent = "/pci@0,0/pci10de,5d@d" unit-address = "0,1" +# flow_control = 3; diff --git a/usr/src/uts/common/io/igb/igb_82575.c b/usr/src/uts/common/io/igb/igb_82575.c new file mode 100644 index 0000000000..c00cdc7582 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_82575.c @@ -0,0 +1,1464 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * e1000_82575 + * e1000_82576 + */ + +#include "igb_api.h" +#include "igb_82575.h" + +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +static void e1000_release_phy_82575(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +static void e1000_release_nvm_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_82575(struct e1000_hw *hw); +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +static void e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index); +static s32 e1000_reset_hw_82575(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw); +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static bool e1000_sgmii_active_82575(struct e1000_hw *hw); +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); + + +struct e1000_dev_spec_82575 { + bool sgmii_active; +}; + +/* + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + */ +static s32 +e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } else { + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_82575; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + func->acquire_phy = e1000_acquire_phy_82575; + func->check_reset_block = e1000_check_reset_block_generic; + func->commit_phy = e1000_phy_sw_reset_generic; + func->get_cfg_done = e1000_get_cfg_done_82575; + func->release_phy = e1000_release_phy_82575; + + if (e1000_sgmii_active_82575(hw)) { + func->reset_phy = e1000_phy_hw_reset_sgmii_82575; + func->read_phy_reg = e1000_read_phy_reg_sgmii_82575; + func->write_phy_reg = e1000_write_phy_reg_sgmii_82575; + } else { + func->reset_phy = e1000_phy_hw_reset_generic; + func->read_phy_reg = e1000_read_phy_reg_igp; + func->write_phy_reg = e1000_write_phy_reg_igp; + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + func->check_polarity = e1000_check_polarity_m88; + func->get_phy_info = e1000_get_phy_info_m88; + func->get_cable_length = e1000_get_cable_length_m88; + func->force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + func->check_polarity = e1000_check_polarity_igp; + func->get_phy_info = e1000_get_phy_info_igp; + func->get_cable_length = e1000_get_cable_length_igp_2; + func->force_speed_duplex = e1000_phy_force_speed_duplex_igp; + func->set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + func->set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + */ +static s32 +e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_82575; + func->read_nvm = e1000_read_nvm_eerd; + func->release_nvm = e1000_release_nvm_82575; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_spi; + + return (E1000_SUCCESS); +} + +/* + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + */ +static s32 +e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + struct e1000_dev_spec_82575 *dev_spec; + u32 ctrl_ext = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + hw->dev_spec_size = sizeof (struct e1000_dev_spec_82575); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + if (ret_val) + goto out; + + dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec; + + /* Set media type */ + /* + * The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to TRUE. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = FALSE; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) == + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) { + hw->phy.media_type = e1000_media_type_internal_serdes; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) { + dev_spec->sgmii_active = TRUE; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? TRUE : FALSE; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + func->init_hw = e1000_init_hw_82575; + /* link setup */ + func->setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + func->setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 + : e1000_setup_fiber_serdes_link_82575; + /* check for link */ + func->check_for_link = e1000_check_for_link_82575; + /* receive address register setting */ + func->rar_set = e1000_rar_set_82575; + /* read mac address */ + func->read_mac_addr = e1000_read_mac_addr_82575; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* blink LED */ + func->blink_led = e1000_blink_led_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_generic; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + func->led_on = e1000_led_on_generic; + func->led_off = e1000_led_off_generic; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + func->get_link_up_info = e1000_get_link_up_info_82575; + +out: + return (ret_val); +} + +/* + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + */ +void +e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->func.init_mac_params = e1000_init_mac_params_82575; + hw->func.init_nvm_params = e1000_init_nvm_params_82575; + hw->func.init_phy_params = e1000_init_phy_params_82575; +} + +/* + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 +e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + + return (e1000_acquire_swfw_sync_82575(hw, mask)); +} + +/* + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void +e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_phy_82575"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_82575(hw, mask); +} + +/* + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + */ +static s32 +e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + return (-E1000_ERR_PARAM); + } + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return (-E1000_ERR_PHY); + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return (-E1000_ERR_PHY); + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return (E1000_SUCCESS); +} + +/* + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + */ +static s32 +e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return (-E1000_ERR_PARAM); + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return (-E1000_ERR_PHY); + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return (-E1000_ERR_PHY); + } + + return (E1000_SUCCESS); +} + +/* + * e1000_get_phy_id_82575 - Retreive PHY addr and id + * @hw: pointer to the HW structure + * + * Retreives the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + */ +static s32 +e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(e1000_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, + phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } + + ret_val = e1000_get_phy_id(hw); + +out: + return (ret_val); +} + +/* + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + */ +static s32 +e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = e1000_write_phy_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = e1000_phy_commit(hw); + +out: + return (ret_val); +} + +/* + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + */ +static s32 +e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return (ret_val); +} + +/* + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclussive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + */ +static s32 +e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = e1000_acquire_nvm_generic(hw); + + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return (ret_val); +} + +/* + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void +e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/* + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +static s32 +e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return (ret_val); +} + +/* + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +static void +e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) { + /* Empty */ + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/* + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + */ +static s32 +e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + } + + /* If EEPROM is not marked present, init the PHY manually */ + if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) { + (void) e1000_phy_init_script_igp3(hw); + } + return (ret_val); +} + +/* + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use pcs to retreive the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + */ +static s32 +e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper || + e1000_sgmii_active_82575(hw)) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + } else { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + } + + return (ret_val); +} + +/* + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + */ +static s32 +e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + /* SGMII link check is done through the PCS register. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + (e1000_sgmii_active_82575(hw))) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + else + ret_val = e1000_check_for_copper_link_generic(hw); + + return (ret_val); +} + +/* + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retreive the current speed and + * duplex, then store the values in the pointers provided. + */ +static s32 +e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = FALSE; + *speed = 0; + *duplex = 0; + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = TRUE; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) { + *speed = SPEED_1000; + } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + *speed = SPEED_100; + } else { + *speed = SPEED_10; + } + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + *duplex = FULL_DUPLEX; + } else { + *duplex = HALF_DUPLEX; + } + } + return (E1000_SUCCESS); +} + +/* + * e1000_rar_set_82575 - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +static void +e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index) +{ + DEBUGFUNC("e1000_rar_set_82575"); + + if (index < E1000_RAR_ENTRIES_82575) { + e1000_rar_set_generic(hw, addr, index); + } +} + +/* + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 +e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + (void) e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + (void) E1000_READ_REG(hw, E1000_ICR); + + (void) e1000_check_alt_mac_addr_generic(hw); + + return (ret_val); +} + +/* + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 +e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + e1000_clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return (ret_val); +} + +/* + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 +e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl, led_ctrl; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + ret_val = e1000_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + /* Setup activity LED */ + led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement + * and perform autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + ret_val = e1000_configure_pcs_link_82575(hw); + if (ret_val) + goto out; + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + /* Config the MAC and PHY after link is up */ + e1000_config_collision_dist_generic(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures speed and duplex for fiber and serdes links. + */ +static s32 +e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_82575"); + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* Force link up, set 1gb, set both sw defined pins */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_SLU | + E1000_CTRL_SPD_1000 | + E1000_CTRL_FRCSPD | + E1000_CTRL_SWDPIN0 | + E1000_CTRL_SWDPIN1; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + + /* Set switch control to serdes energy detect */ + reg = E1000_READ_REG(hw, E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + E1000_WRITE_REG(hw, E1000_CONNSW, reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (hw->mac.autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); + } else { + /* Set PCS register for forced speed */ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + DEBUGOUT1("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); + } + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + return (E1000_SUCCESS); +} + +/* + * e1000_configure_pcs_link_82575 - Configure PCS link + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * only used on copper connections where the serialized gigabit media + * independent interface (sgmii) is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + */ +static s32 +e1000_configure_pcs_link_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg = 0; + + DEBUGFUNC("e1000_configure_pcs_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper || + !(e1000_sgmii_active_82575(hw))) + goto out; + + /* For SGMII, we need to issue a PCS autoneg restart */ + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* AN time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + + if (mac->autoneg) { + /* Make sure forced speed and force link are not set */ + reg &= ~(E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + /* + * The PHY should be setup prior to calling this function. + * All we need to do is restart autoneg and enable autoneg. + */ + reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE; + } else { + /* Set PCS regiseter for forced speed */ + + /* Turn off bits for full duplex, speed, and autoneg */ + reg &= ~(E1000_PCS_LCTL_FSV_1000 | + E1000_PCS_LCTL_FSV_100 | + E1000_PCS_LCTL_FDV_FULL | + E1000_PCS_LCTL_AN_ENABLE); + + /* Check for duplex first */ + if (mac->forced_speed_duplex & E1000_ALL_FULL_DUPLEX) + reg |= E1000_PCS_LCTL_FDV_FULL; + + /* Now set speed */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) + reg |= E1000_PCS_LCTL_FSV_100; + + /* Force speed and force link */ + reg |= E1000_PCS_LCTL_FSD | + E1000_PCS_LCTL_FORCE_LINK | + E1000_PCS_LCTL_FLV_LINK_UP; + + DEBUGOUT1("Wrote 0x%08X to PCS_LCTL to configure forced link\n", + reg); + } + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + +out: + return (E1000_SUCCESS); +} + +/* + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + */ +static bool +e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec; + bool ret_val; + + DEBUGFUNC("e1000_sgmii_active_82575"); + + if (hw->mac.type != e1000_82575) { + ret_val = FALSE; + goto out; + } + + dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec; + + ret_val = dev_spec->sgmii_active; + +out: + return (ret_val); +} + +/* + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + */ +static s32 +e1000_reset_init_script_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + (void) e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + (void) e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + (void) e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return (E1000_SUCCESS); +} + +/* + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + */ +static s32 +e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + if (e1000_check_alt_mac_addr_generic(hw)) + ret_val = e1000_read_mac_addr_generic(hw); + + return (ret_val); +} + +/* + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +static void +e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/* + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + */ +static void +e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + (void) E1000_READ_REG(hw, E1000_PRC64); + (void) E1000_READ_REG(hw, E1000_PRC127); + (void) E1000_READ_REG(hw, E1000_PRC255); + (void) E1000_READ_REG(hw, E1000_PRC511); + (void) E1000_READ_REG(hw, E1000_PRC1023); + (void) E1000_READ_REG(hw, E1000_PRC1522); + (void) E1000_READ_REG(hw, E1000_PTC64); + (void) E1000_READ_REG(hw, E1000_PTC127); + (void) E1000_READ_REG(hw, E1000_PTC255); + (void) E1000_READ_REG(hw, E1000_PTC511); + (void) E1000_READ_REG(hw, E1000_PTC1023); + (void) E1000_READ_REG(hw, E1000_PTC1522); + + (void) E1000_READ_REG(hw, E1000_ALGNERRC); + (void) E1000_READ_REG(hw, E1000_RXERRC); + (void) E1000_READ_REG(hw, E1000_TNCRS); + (void) E1000_READ_REG(hw, E1000_CEXTERR); + (void) E1000_READ_REG(hw, E1000_TSCTC); + (void) E1000_READ_REG(hw, E1000_TSCTFC); + + (void) E1000_READ_REG(hw, E1000_MGTPRC); + (void) E1000_READ_REG(hw, E1000_MGTPDC); + (void) E1000_READ_REG(hw, E1000_MGTPTC); + + (void) E1000_READ_REG(hw, E1000_IAC); + (void) E1000_READ_REG(hw, E1000_ICRXOC); + + (void) E1000_READ_REG(hw, E1000_ICRXPTC); + (void) E1000_READ_REG(hw, E1000_ICRXATC); + (void) E1000_READ_REG(hw, E1000_ICTXPTC); + (void) E1000_READ_REG(hw, E1000_ICTXATC); + (void) E1000_READ_REG(hw, E1000_ICTXQEC); + (void) E1000_READ_REG(hw, E1000_ICTXQMTC); + (void) E1000_READ_REG(hw, E1000_ICRXDMTC); + + (void) E1000_READ_REG(hw, E1000_CBTMPC); + (void) E1000_READ_REG(hw, E1000_HTDPMC); + (void) E1000_READ_REG(hw, E1000_CBRMPC); + (void) E1000_READ_REG(hw, E1000_RPTHC); + (void) E1000_READ_REG(hw, E1000_HGPTC); + (void) E1000_READ_REG(hw, E1000_HTCBDPC); + (void) E1000_READ_REG(hw, E1000_HGORCL); + (void) E1000_READ_REG(hw, E1000_HGORCH); + (void) E1000_READ_REG(hw, E1000_HGOTCL); + (void) E1000_READ_REG(hw, E1000_HGOTCH); + (void) E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + (void) E1000_READ_REG(hw, E1000_SCVPC); +} diff --git a/usr/src/uts/common/io/igb/igb_82575.h b/usr/src/uts/common/io/igb/igb_82575.h new file mode 100644 index 0000000000..1d2f235c2c --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_82575.h @@ -0,0 +1,326 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_82575_H +#define _IGB_82575_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +#define E1000_RAR_ENTRIES_82575 16 + +#ifdef E1000_BIT_FIELDS +struct e1000_adv_data_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen :16; /* Data buffer length */ + u32 rsvd :4; + u32 dtyp :4; /* Descriptor type */ + u32 dcmd :8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status :4; /* Descriptor status */ + u32 idx :4; + u32 popts :6; /* Packet Options */ + u32 paylen :18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen :9; + u32 maclen :7; + u32 vlan_tag :16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc :9; + u32 tucmd :11; + u32 dtyp :4; + u32 adv :8; + u32 rsvd :4; + u32 idx :4; + u32 l4len :8; + u32 mss :16; + } fields; + } l4_setup; +}; +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + /* RSS type, Packet type */ + u16 pkt_info; + /* Split Header, header buffer length */ + u16 hdr_info; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000F000 +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit in RDMA DDP hdr */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +/* Enable specific Tx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 +/* Tx Desc. write-back flushing */ +#define E1000_TXDCTL_SWFLSH 0x04000000 +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +/* Enable specific Rx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 +/* Rx Desc. write-back flushing */ +#define E1000_RXDCTL_SWFLSH 0x04000000 + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_82575_H */ diff --git a/usr/src/uts/common/io/igb/igb_api.c b/usr/src/uts/common/io/igb/igb_api.c new file mode 100644 index 0000000000..5a8f7d49b1 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_api.c @@ -0,0 +1,1091 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_api.h" +#include "igb_mac.h" +#include "igb_nvm.h" +#include "igb_phy.h" + +/* + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + */ +s32 +e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_mac_params) { + ret_val = hw->func.init_mac_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return (ret_val); +} + +/* + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + */ +s32 +e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_nvm_params) { + ret_val = hw->func.init_nvm_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return (ret_val); +} + +/* + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + */ +s32 +e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_phy_params) { + ret_val = hw->func.init_phy_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return (ret_val); +} + +/* + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + */ +s32 +e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return (ret_val); +} + +/* + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: TRUE will initialize the rest of the function pointers + * getting the device ready for use. FALSE will only set + * MAC type and the function pointers for the other init + * functions. Passing FALSE will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + */ +s32 +e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init some generic function pointers that are currently all pointing + * to generic implementations. We do this first allowing a driver + * module to override it afterwards. + */ + hw->func.config_collision_dist = e1000_config_collision_dist_generic; + hw->func.rar_set = e1000_rar_set_generic; + hw->func.validate_mdi_setting = e1000_validate_mdi_setting_generic; + hw->func.mng_host_if_write = e1000_mng_host_if_write_generic; + hw->func.mng_write_cmd_header = e1000_mng_write_cmd_header_generic; + hw->func.mng_enable_host_if = e1000_mng_enable_host_if_generic; + hw->func.wait_autoneg = e1000_wait_autoneg_generic; + hw->func.reload_nvm = e1000_reload_nvm_generic; + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82575: + e1000_init_function_pointers_82575(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + } + +out: + return (ret_val); +} + +/* + * e1000_remove_device - Free device specific structure + * @hw: pointer to the HW structure + * + * If a device specific structure was allocated, this function will + * free it. This is a function pointer entry point called by drivers. + */ +void +e1000_remove_device(struct e1000_hw *hw) +{ + if (hw->func.remove_device) + hw->func.remove_device(hw); +} + +/* + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adaper is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + */ +s32 +e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->func.get_bus_info) + return (hw->func.get_bus_info(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + */ +void +e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->func.clear_vfta) + hw->func.clear_vfta(hw); +} + +/* + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + */ +void +e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->func.write_vfta) + hw->func.write_vfta(hw, offset, value); +} + +/* + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. Currently no func pointer + * exists and all implementations are handled in the generic version of this + * function. + */ +void +e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 rar_used_count, u32 rar_count) +{ + if (hw->func.update_mc_addr_list) + hw->func.update_mc_addr_list(hw, + mc_addr_list, + mc_addr_count, + rar_used_count, + rar_count); +} + +/* + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + */ +s32 +e1000_force_mac_fc(struct e1000_hw *hw) +{ + return (e1000_force_mac_fc_generic(hw)); +} + +/* + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + */ +s32 +e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->func.check_for_link) + return (hw->func.check_for_link(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + */ +bool +e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->func.check_mng_mode) + return (hw->func.check_mng_mode(hw)); + + return (FALSE); +} + +/* + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + */ +s32 +e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return (e1000_mng_write_dhcp_info_generic(hw, buffer, length)); +} + +/* + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + */ +s32 +e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->func.reset_hw) + return (hw->func.reset_hw(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + */ +s32 +e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->func.init_hw) + return (hw->func.init_hw(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + */ +s32 +e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->func.setup_link) + return (hw->func.setup_link(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + */ +s32 +e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->func.get_link_up_info) + return (hw->func.get_link_up_info(hw, speed, duplex)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + */ +s32 +e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->func.setup_led) + return (hw->func.setup_led(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + */ +s32 +e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->func.cleanup_led) + return (hw->func.cleanup_led(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + */ +s32 +e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->func.blink_led) + return (hw->func.blink_led(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + */ +s32 +e1000_led_on(struct e1000_hw *hw) +{ + if (hw->func.led_on) + return (hw->func.led_on(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + */ +s32 +e1000_led_off(struct e1000_hw *hw) +{ + if (hw->func.led_off) + return (hw->func.led_off(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void +e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/* + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void +e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/* + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + */ +s32 +e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return (e1000_disable_pcie_master_generic(hw)); +} + +/* + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + */ +void +e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->func.config_collision_dist) + hw->func.config_collision_dist(hw); +} + +/* + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + */ +void +e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->func.rar_set) + hw->func.rar_set(hw, addr, index); +} + +/* + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + */ +s32 +e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->func.validate_mdi_setting) + return (hw->func.validate_mdi_setting(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_mta_set - Sets multicast table bit + * @hw: pointer to the HW structure + * @hash_value: Multicast hash value. + * + * This sets the bit in the multicast table corresponding to the + * hash value. This is a function pointer entry point called by drivers. + */ +void +e1000_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + if (hw->func.mta_set) + hw->func.mta_set(hw, hash_value); +} + +/* + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + */ +u32 +e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return (e1000_hash_mc_addr_generic(hw, mc_addr)); +} + +/* + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + */ +bool +e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return (e1000_enable_tx_pkt_filtering_generic(hw)); +} + +/* + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + */ +s32 +e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + if (hw->func.mng_host_if_write) + return (hw->func.mng_host_if_write(hw, buffer, length, offset, + sum)); + + return (E1000_NOT_IMPLEMENTED); +} + +/* + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + */ +s32 +e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + if (hw->func.mng_write_cmd_header) + return (hw->func.mng_write_cmd_header(hw, hdr)); + + return (E1000_NOT_IMPLEMENTED); +} + +/* + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + */ +s32 +e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + if (hw->func.mng_enable_host_if) + return (hw->func.mng_enable_host_if(hw)); + + return (E1000_NOT_IMPLEMENTED); +} + +/* + * e1000_wait_autoneg - Waits for autonegotiation completion + * @hw: pointer to the HW structure + * + * Waits for autoneg to complete. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +s32 +e1000_wait_autoneg(struct e1000_hw *hw) +{ + if (hw->func.wait_autoneg) + return (hw->func.wait_autoneg(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + */ +s32 +e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->func.check_reset_block) + return (hw->func.check_reset_block(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + */ +s32 +e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->func.read_phy_reg) + return (hw->func.read_phy_reg(hw, offset, data)); + + return (E1000_SUCCESS); +} + +/* + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + */ +s32 +e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->func.write_phy_reg) + return (hw->func.write_phy_reg(hw, offset, data)); + + return (E1000_SUCCESS); +} + +/* + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + */ +s32 +e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return (e1000_read_kmrn_reg_generic(hw, offset, data)); +} + +/* + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + */ +s32 +e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return (e1000_write_kmrn_reg_generic(hw, offset, data)); +} + +/* + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + */ +s32 +e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->func.get_cable_length) + return (hw->func.get_cable_length(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + */ +s32 +e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->func.get_phy_info) + return (hw->func.get_phy_info(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + */ +s32 +e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->func.reset_phy) + return (hw->func.reset_phy(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + */ +s32 +e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->func.commit_phy) + return (hw->func.commit_phy(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_set_d3_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + */ +s32 +e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->func.set_d0_lplu_state) + return (hw->func.set_d0_lplu_state(hw, active)); + + return (E1000_SUCCESS); +} + +/* + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + */ +s32 +e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->func.set_d3_lplu_state) + return (hw->func.set_d3_lplu_state(hw, active)); + + return (E1000_SUCCESS); +} + +/* + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + */ +s32 +e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->func.read_mac_addr) + return (hw->func.read_mac_addr(hw)); + + return (e1000_read_mac_addr_generic(hw)); +} + +/* + * e1000_read_pba_num - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + */ +s32 +e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num) +{ + return (e1000_read_pba_num_generic(hw, pba_num)); +} + +/* + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + */ +s32 +e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->func.validate_nvm) + return (hw->func.validate_nvm(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +s32 +e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->func.update_nvm) + return (hw->func.update_nvm(hw)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + */ +void +e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->func.reload_nvm) + hw->func.reload_nvm(hw); +} + +/* + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + */ +s32 +e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->func.read_nvm) + return (hw->func.read_nvm(hw, offset, words, data)); + + return (-E1000_ERR_CONFIG); +} + +/* + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + */ +s32 +e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->func.write_nvm) + return (hw->func.write_nvm(hw, offset, words, data)); + + return (E1000_SUCCESS); +} + +/* + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + */ +s32 +e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, u8 data) +{ + return (e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data)); +} + +/* + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + */ +void +e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->func.power_up_phy) + hw->func.power_up_phy(hw); + + (void) e1000_setup_link(hw); +} + +/* + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + */ +void +e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->func.power_down_phy) + hw->func.power_down_phy(hw); +} diff --git a/usr/src/uts/common/io/igb/igb_api.h b/usr/src/uts/common/io/igb/igb_api.h new file mode 100644 index 0000000000..94d7afe12d --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_api.h @@ -0,0 +1,161 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_API_H +#define _IGB_API_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "igb_hw.h" + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +void e1000_remove_device(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_wait_autoneg(struct e1000_hw *hw); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, + u8 *buffer, u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, + u8 *buffer, u16 length); +#ifndef FIFO_WORKAROUND +s32 e1000_fifo_workaround_82547(struct e1000_hw *hw, u16 length); +void e1000_update_tx_fifo_head_82547(struct e1000_hw *hw, u32 length); +void e1000_set_ttl_workaround_state_82541(struct e1000_hw *hw, bool state); +bool e1000_ttl_workaround_enabled_82541(struct e1000_hw *hw); +s32 e1000_igp_ttl_workaround_82547(struct e1000_hw *hw); +#endif + + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * error = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = TRUE; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = FALSE; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= (max_frame_size + 1))) : \ + (((length) > min_frame_size) && \ + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_API_H */ diff --git a/usr/src/uts/common/io/igb/igb_buf.c b/usr/src/uts/common/io/igb/igb_buf.c new file mode 100644 index 0000000000..3503653923 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_buf.c @@ -0,0 +1,871 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +static int igb_alloc_tbd_ring(igb_tx_ring_t *); +static void igb_free_tbd_ring(igb_tx_ring_t *); +static int igb_alloc_rbd_ring(igb_rx_ring_t *); +static void igb_free_rbd_ring(igb_rx_ring_t *); +static int igb_alloc_dma_buffer(igb_t *, dma_buffer_t *, size_t); +static void igb_free_dma_buffer(dma_buffer_t *); +static int igb_alloc_tcb_lists(igb_tx_ring_t *); +static void igb_free_tcb_lists(igb_tx_ring_t *); +static int igb_alloc_rcb_lists(igb_rx_ring_t *); +static void igb_free_rcb_lists(igb_rx_ring_t *); + +#ifdef __sparc +#define IGB_DMA_ALIGNMENT 0x0000000000002000ull +#else +#define IGB_DMA_ALIGNMENT 0x0000000000001000ull +#endif + +/* + * DMA attributes for tx/rx descriptors + */ +static ddi_dma_attr_t igb_desc_dma_attr = { + DMA_ATTR_V0, /* version number */ + 0x0000000000000000ull, /* low address */ + 0xFFFFFFFFFFFFFFFFull, /* high address */ + 0x00000000FFFFFFFFull, /* dma counter max */ + IGB_DMA_ALIGNMENT, /* alignment */ + 0x00000FFF, /* burst sizes */ + 0x00000001, /* minimum transfer size */ + 0x00000000FFFFFFFFull, /* maximum transfer size */ + 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ + 1, /* scatter/gather list length */ + 0x00000001, /* granularity */ + 0 /* DMA flags */ +}; + +/* + * DMA attributes for tx/rx buffers + */ +static ddi_dma_attr_t igb_buf_dma_attr = { + DMA_ATTR_V0, /* version number */ + 0x0000000000000000ull, /* low address */ + 0xFFFFFFFFFFFFFFFFull, /* high address */ + 0x00000000FFFFFFFFull, /* dma counter max */ + IGB_DMA_ALIGNMENT, /* alignment */ + 0x00000FFF, /* burst sizes */ + 0x00000001, /* minimum transfer size */ + 0x00000000FFFFFFFFull, /* maximum transfer size */ + 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ + 1, /* scatter/gather list length */ + 0x00000001, /* granularity */ + 0 /* DMA flags */ +}; + +/* + * DMA attributes for transmit + */ +static ddi_dma_attr_t igb_tx_dma_attr = { + DMA_ATTR_V0, /* version number */ + 0x0000000000000000ull, /* low address */ + 0xFFFFFFFFFFFFFFFFull, /* high address */ + 0x00000000FFFFFFFFull, /* dma counter max */ + 1, /* alignment */ + 0x00000FFF, /* burst sizes */ + 0x00000001, /* minimum transfer size */ + 0x00000000FFFFFFFFull, /* maximum transfer size */ + 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ + MAX_COOKIE, /* scatter/gather list length */ + 0x00000001, /* granularity */ + 0 /* DMA flags */ +}; + +/* + * DMA access attributes for descriptors. + */ +static ddi_device_acc_attr_t igb_desc_acc_attr = { + DDI_DEVICE_ATTR_V0, + DDI_STRUCTURE_LE_ACC, + DDI_STRICTORDER_ACC +}; + +/* + * DMA access attributes for buffers. + */ +static ddi_device_acc_attr_t igb_buf_acc_attr = { + DDI_DEVICE_ATTR_V0, + DDI_NEVERSWAP_ACC, + DDI_STRICTORDER_ACC +}; + + +/* + * igb_alloc_dma - Allocate DMA resources for all rx/tx rings + */ +int +igb_alloc_dma(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + igb_tx_ring_t *tx_ring; + int i; + + for (i = 0; i < igb->num_rx_rings; i++) { + /* + * Allocate receive desciptor ring and control block lists + */ + rx_ring = &igb->rx_rings[i]; + + if (igb_alloc_rbd_ring(rx_ring) != IGB_SUCCESS) + goto alloc_dma_failure; + + if (igb_alloc_rcb_lists(rx_ring) != IGB_SUCCESS) + goto alloc_dma_failure; + } + + for (i = 0; i < igb->num_tx_rings; i++) { + /* + * Allocate transmit desciptor ring and control block lists + */ + tx_ring = &igb->tx_rings[i]; + + if (igb_alloc_tbd_ring(tx_ring) != IGB_SUCCESS) + goto alloc_dma_failure; + + if (igb_alloc_tcb_lists(tx_ring) != IGB_SUCCESS) + goto alloc_dma_failure; + } + + return (IGB_SUCCESS); + +alloc_dma_failure: + igb_free_dma(igb); + + return (IGB_FAILURE); +} + + +/* + * igb_free_dma - Free all the DMA resources of all rx/tx rings + */ +void +igb_free_dma(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + igb_tx_ring_t *tx_ring; + int i; + + /* + * Free DMA resources of rx rings + */ + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + igb_free_rbd_ring(rx_ring); + igb_free_rcb_lists(rx_ring); + } + + /* + * Free DMA resources of tx rings + */ + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + igb_free_tbd_ring(tx_ring); + igb_free_tcb_lists(tx_ring); + } +} + +/* + * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring. + */ +static int +igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring) +{ + int ret; + size_t size; + size_t len; + uint_t cookie_num; + dev_info_t *devinfo; + ddi_dma_cookie_t cookie; + igb_t *igb = tx_ring->igb; + + devinfo = igb->dip; + size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size; + + /* + * If tx head write-back is enabled, an extra tbd is allocated + * to save the head write-back value + */ + if (igb->tx_head_wb_enable) { + size += sizeof (union e1000_adv_tx_desc); + } + + /* + * Allocate a DMA handle for the transmit descriptor + * memory area. + */ + ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, + DDI_DMA_DONTWAIT, NULL, + &tx_ring->tbd_area.dma_handle); + + if (ret != DDI_SUCCESS) { + igb_error(igb, + "Could not allocate tbd dma handle: %x", ret); + tx_ring->tbd_area.dma_handle = NULL; + + return (IGB_FAILURE); + } + + /* + * Allocate memory to DMA data to and from the transmit + * descriptors. + */ + ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle, + size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, + DDI_DMA_DONTWAIT, NULL, + (caddr_t *)&tx_ring->tbd_area.address, + &len, &tx_ring->tbd_area.acc_handle); + + if (ret != DDI_SUCCESS) { + igb_error(igb, + "Could not allocate tbd dma memory: %x", ret); + tx_ring->tbd_area.acc_handle = NULL; + tx_ring->tbd_area.address = NULL; + if (tx_ring->tbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); + tx_ring->tbd_area.dma_handle = NULL; + } + return (IGB_FAILURE); + } + + /* + * Initialize the entire transmit buffer descriptor area to zero + */ + bzero(tx_ring->tbd_area.address, len); + + /* + * Allocates DMA resources for the memory that was allocated by + * the ddi_dma_mem_alloc call. The DMA resources then get bound to the + * the memory address + */ + ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle, + NULL, (caddr_t)tx_ring->tbd_area.address, + len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, + DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); + + if (ret != DDI_DMA_MAPPED) { + igb_error(igb, + "Could not bind tbd dma resource: %x", ret); + tx_ring->tbd_area.dma_address = NULL; + if (tx_ring->tbd_area.acc_handle != NULL) { + ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); + tx_ring->tbd_area.acc_handle = NULL; + tx_ring->tbd_area.address = NULL; + } + if (tx_ring->tbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); + tx_ring->tbd_area.dma_handle = NULL; + } + return (IGB_FAILURE); + } + + ASSERT(cookie_num == 1); + + tx_ring->tbd_area.dma_address = cookie.dmac_laddress; + tx_ring->tbd_area.size = len; + + tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t) + tx_ring->tbd_area.address; + + return (IGB_SUCCESS); +} + +/* + * igb_free_tbd_ring - Free the tx descriptors of one ring. + */ +static void +igb_free_tbd_ring(igb_tx_ring_t *tx_ring) +{ + if (tx_ring->tbd_area.dma_handle != NULL) { + (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle); + } + if (tx_ring->tbd_area.acc_handle != NULL) { + ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); + tx_ring->tbd_area.acc_handle = NULL; + } + if (tx_ring->tbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); + tx_ring->tbd_area.dma_handle = NULL; + } + tx_ring->tbd_area.address = NULL; + tx_ring->tbd_area.dma_address = NULL; + tx_ring->tbd_area.size = 0; + + tx_ring->tbd_ring = NULL; +} + +/* + * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring. + */ +static int +igb_alloc_rbd_ring(igb_rx_ring_t *rx_ring) +{ + int ret; + size_t size; + size_t len; + uint_t cookie_num; + dev_info_t *devinfo; + ddi_dma_cookie_t cookie; + igb_t *igb = rx_ring->igb; + + devinfo = igb->dip; + size = sizeof (union e1000_adv_rx_desc) * rx_ring->ring_size; + + /* + * Allocate a new DMA handle for the receive descriptor + * memory area. + */ + ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, + DDI_DMA_DONTWAIT, NULL, + &rx_ring->rbd_area.dma_handle); + + if (ret != DDI_SUCCESS) { + igb_error(igb, + "Could not allocate rbd dma handle: %x", ret); + rx_ring->rbd_area.dma_handle = NULL; + return (IGB_FAILURE); + } + + /* + * Allocate memory to DMA data to and from the receive + * descriptors. + */ + ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle, + size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, + DDI_DMA_DONTWAIT, NULL, + (caddr_t *)&rx_ring->rbd_area.address, + &len, &rx_ring->rbd_area.acc_handle); + + if (ret != DDI_SUCCESS) { + igb_error(igb, + "Could not allocate rbd dma memory: %x", ret); + rx_ring->rbd_area.acc_handle = NULL; + rx_ring->rbd_area.address = NULL; + if (rx_ring->rbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); + rx_ring->rbd_area.dma_handle = NULL; + } + return (IGB_FAILURE); + } + + /* + * Initialize the entire transmit buffer descriptor area to zero + */ + bzero(rx_ring->rbd_area.address, len); + + /* + * Allocates DMA resources for the memory that was allocated by + * the ddi_dma_mem_alloc call. + */ + ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle, + NULL, (caddr_t)rx_ring->rbd_area.address, + len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, + DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); + + if (ret != DDI_DMA_MAPPED) { + igb_error(igb, + "Could not bind rbd dma resource: %x", ret); + rx_ring->rbd_area.dma_address = NULL; + if (rx_ring->rbd_area.acc_handle != NULL) { + ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); + rx_ring->rbd_area.acc_handle = NULL; + rx_ring->rbd_area.address = NULL; + } + if (rx_ring->rbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); + rx_ring->rbd_area.dma_handle = NULL; + } + return (IGB_FAILURE); + } + + ASSERT(cookie_num == 1); + + rx_ring->rbd_area.dma_address = cookie.dmac_laddress; + rx_ring->rbd_area.size = len; + + rx_ring->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t) + rx_ring->rbd_area.address; + + return (IGB_SUCCESS); +} + +/* + * igb_free_rbd_ring - Free the rx descriptors of one ring. + */ +static void +igb_free_rbd_ring(igb_rx_ring_t *rx_ring) +{ + if (rx_ring->rbd_area.dma_handle != NULL) { + (void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle); + } + if (rx_ring->rbd_area.acc_handle != NULL) { + ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); + rx_ring->rbd_area.acc_handle = NULL; + } + if (rx_ring->rbd_area.dma_handle != NULL) { + ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); + rx_ring->rbd_area.dma_handle = NULL; + } + rx_ring->rbd_area.address = NULL; + rx_ring->rbd_area.dma_address = NULL; + rx_ring->rbd_area.size = 0; + + rx_ring->rbd_ring = NULL; +} + + +/* + * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer + */ +static int +igb_alloc_dma_buffer(igb_t *igb, + dma_buffer_t *buf, size_t size) +{ + int ret; + dev_info_t *devinfo = igb->dip; + ddi_dma_cookie_t cookie; + size_t len; + uint_t cookie_num; + + ret = ddi_dma_alloc_handle(devinfo, + &igb_buf_dma_attr, DDI_DMA_DONTWAIT, + NULL, &buf->dma_handle); + + if (ret != DDI_SUCCESS) { + buf->dma_handle = NULL; + igb_error(igb, + "Could not allocate dma buffer handle: %x", ret); + return (IGB_FAILURE); + } + + ret = ddi_dma_mem_alloc(buf->dma_handle, + size, &igb_buf_acc_attr, DDI_DMA_STREAMING, + DDI_DMA_DONTWAIT, NULL, &buf->address, + &len, &buf->acc_handle); + + if (ret != DDI_SUCCESS) { + buf->acc_handle = NULL; + buf->address = NULL; + if (buf->dma_handle != NULL) { + ddi_dma_free_handle(&buf->dma_handle); + buf->dma_handle = NULL; + } + igb_error(igb, + "Could not allocate dma buffer memory: %x", ret); + return (IGB_FAILURE); + } + + ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL, + buf->address, + len, DDI_DMA_RDWR | DDI_DMA_STREAMING, + DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); + + if (ret != DDI_DMA_MAPPED) { + buf->dma_address = NULL; + if (buf->acc_handle != NULL) { + ddi_dma_mem_free(&buf->acc_handle); + buf->acc_handle = NULL; + buf->address = NULL; + } + if (buf->dma_handle != NULL) { + ddi_dma_free_handle(&buf->dma_handle); + buf->dma_handle = NULL; + } + igb_error(igb, + "Could not bind dma buffer handle: %x", ret); + return (IGB_FAILURE); + } + + ASSERT(cookie_num == 1); + + buf->dma_address = cookie.dmac_laddress; + buf->size = len; + buf->len = 0; + + return (IGB_SUCCESS); +} + +/* + * igb_free_dma_buffer - Free one allocated area of dma memory and handle + */ +static void +igb_free_dma_buffer(dma_buffer_t *buf) +{ + if (buf->dma_handle != NULL) { + (void) ddi_dma_unbind_handle(buf->dma_handle); + buf->dma_address = NULL; + } else { + return; + } + + if (buf->acc_handle != NULL) { + ddi_dma_mem_free(&buf->acc_handle); + buf->acc_handle = NULL; + buf->address = NULL; + } + + if (buf->dma_handle != NULL) { + ddi_dma_free_handle(&buf->dma_handle); + buf->dma_handle = NULL; + } + + buf->size = 0; + buf->len = 0; +} + +/* + * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks + * of one ring. + */ +static int +igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring) +{ + int i; + int ret; + tx_control_block_t *tcb; + dma_buffer_t *tx_buf; + igb_t *igb = tx_ring->igb; + dev_info_t *devinfo = igb->dip; + + /* + * Allocate memory for the work list. + */ + tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) * + tx_ring->ring_size, KM_NOSLEEP); + + if (tx_ring->work_list == NULL) { + igb_error(igb, + "Cound not allocate memory for tx work list"); + return (IGB_FAILURE); + } + + /* + * Allocate memory for the free list. + */ + tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) * + tx_ring->free_list_size, KM_NOSLEEP); + + if (tx_ring->free_list == NULL) { + kmem_free(tx_ring->work_list, + sizeof (tx_control_block_t *) * tx_ring->ring_size); + tx_ring->work_list = NULL; + + igb_error(igb, + "Cound not allocate memory for tx free list"); + return (IGB_FAILURE); + } + + /* + * Allocate memory for the tx control blocks of free list. + */ + tx_ring->tcb_area = + kmem_zalloc(sizeof (tx_control_block_t) * + tx_ring->free_list_size, KM_NOSLEEP); + + if (tx_ring->tcb_area == NULL) { + kmem_free(tx_ring->work_list, + sizeof (tx_control_block_t *) * tx_ring->ring_size); + tx_ring->work_list = NULL; + + kmem_free(tx_ring->free_list, + sizeof (tx_control_block_t *) * tx_ring->free_list_size); + tx_ring->free_list = NULL; + + igb_error(igb, + "Cound not allocate memory for tx control blocks"); + return (IGB_FAILURE); + } + + /* + * Allocate dma memory for the tx control block of free list. + */ + tcb = tx_ring->tcb_area; + for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { + ASSERT(tcb != NULL); + + tx_ring->free_list[i] = tcb; + + /* + * Pre-allocate dma handles for transmit. These dma handles + * will be dynamically bound to the data buffers passed down + * from the upper layers at the time of transmitting. + */ + ret = ddi_dma_alloc_handle(devinfo, + &igb_tx_dma_attr, + DDI_DMA_DONTWAIT, NULL, + &tcb->tx_dma_handle); + if (ret != DDI_SUCCESS) { + tcb->tx_dma_handle = NULL; + igb_error(igb, + "Could not allocate tx dma handle: %x", ret); + goto alloc_tcb_lists_fail; + } + + /* + * Pre-allocate transmit buffers for packets that the + * size is less than bcopy_thresh. + */ + tx_buf = &tcb->tx_buf; + + ret = igb_alloc_dma_buffer(igb, + tx_buf, igb->tx_buf_size); + + if (ret != IGB_SUCCESS) { + ASSERT(tcb->tx_dma_handle != NULL); + ddi_dma_free_handle(&tcb->tx_dma_handle); + tcb->tx_dma_handle = NULL; + igb_error(igb, "Allocate tx dma buffer failed"); + goto alloc_tcb_lists_fail; + } + } + + return (IGB_SUCCESS); + +alloc_tcb_lists_fail: + igb_free_tcb_lists(tx_ring); + + return (IGB_FAILURE); +} + +/* + * igb_free_tcb_lists - Release the memory allocated for + * the transmit control bolcks of one ring. + */ +static void +igb_free_tcb_lists(igb_tx_ring_t *tx_ring) +{ + int i; + tx_control_block_t *tcb; + + tcb = tx_ring->tcb_area; + if (tcb == NULL) + return; + + for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { + ASSERT(tcb != NULL); + + /* Free the tx dma handle for dynamical binding */ + if (tcb->tx_dma_handle != NULL) { + ddi_dma_free_handle(&tcb->tx_dma_handle); + tcb->tx_dma_handle = NULL; + } else { + /* + * If the dma handle is NULL, then we don't + * have to check the remaining. + */ + break; + } + + igb_free_dma_buffer(&tcb->tx_buf); + } + + if (tx_ring->tcb_area != NULL) { + kmem_free(tx_ring->tcb_area, + sizeof (tx_control_block_t) * tx_ring->free_list_size); + tx_ring->tcb_area = NULL; + } + + if (tx_ring->work_list != NULL) { + kmem_free(tx_ring->work_list, + sizeof (tx_control_block_t *) * tx_ring->ring_size); + tx_ring->work_list = NULL; + } + + if (tx_ring->free_list != NULL) { + kmem_free(tx_ring->free_list, + sizeof (tx_control_block_t *) * tx_ring->free_list_size); + tx_ring->free_list = NULL; + } +} + +/* + * igb_alloc_rcb_lists - Memory allocation for the receive control blocks + * of one ring. + */ +static int +igb_alloc_rcb_lists(igb_rx_ring_t *rx_ring) +{ + int i; + int ret; + rx_control_block_t *rcb; + igb_t *igb = rx_ring->igb; + dma_buffer_t *rx_buf; + uint32_t rcb_count; + + /* + * Allocate memory for the work list. + */ + rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) * + rx_ring->ring_size, KM_NOSLEEP); + + if (rx_ring->work_list == NULL) { + igb_error(igb, + "Could not allocate memory for rx work list"); + return (IGB_FAILURE); + } + + /* + * Allocate memory for the free list. + */ + rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) * + rx_ring->free_list_size, KM_NOSLEEP); + + if (rx_ring->free_list == NULL) { + kmem_free(rx_ring->work_list, + sizeof (rx_control_block_t *) * rx_ring->ring_size); + rx_ring->work_list = NULL; + + igb_error(igb, + "Cound not allocate memory for rx free list"); + return (IGB_FAILURE); + } + + /* + * Allocate memory for the rx control blocks for work list and + * free list. + */ + rcb_count = rx_ring->ring_size + rx_ring->free_list_size; + rx_ring->rcb_area = + kmem_zalloc(sizeof (rx_control_block_t) * rcb_count, + KM_NOSLEEP); + + if (rx_ring->rcb_area == NULL) { + kmem_free(rx_ring->work_list, + sizeof (rx_control_block_t *) * rx_ring->ring_size); + rx_ring->work_list = NULL; + + kmem_free(rx_ring->free_list, + sizeof (rx_control_block_t *) * rx_ring->free_list_size); + rx_ring->free_list = NULL; + + igb_error(igb, + "Cound not allocate memory for rx control blocks"); + return (IGB_FAILURE); + } + + /* + * Allocate dma memory for the rx control blocks + */ + rcb = rx_ring->rcb_area; + for (i = 0; i < rcb_count; i++, rcb++) { + ASSERT(rcb != NULL); + + if (i < rx_ring->ring_size) { + /* Attach the rx control block to the work list */ + rx_ring->work_list[i] = rcb; + } else { + /* Attach the rx control block to the free list */ + rx_ring->free_list[i - rx_ring->ring_size] = rcb; + } + + rx_buf = &rcb->rx_buf; + ret = igb_alloc_dma_buffer(igb, + rx_buf, igb->rx_buf_size); + + if (ret != IGB_SUCCESS) { + igb_error(igb, "Allocate rx dma buffer failed"); + goto alloc_rcb_lists_fail; + } + + rx_buf->size -= IPHDR_ALIGN_ROOM; + rx_buf->address += IPHDR_ALIGN_ROOM; + rx_buf->dma_address += IPHDR_ALIGN_ROOM; + + rcb->state = RCB_FREE; + rcb->rx_ring = (igb_rx_ring_t *)rx_ring; + rcb->free_rtn.free_func = igb_rx_recycle; + rcb->free_rtn.free_arg = (char *)rcb; + + rcb->mp = desballoc((unsigned char *) + rx_buf->address - IPHDR_ALIGN_ROOM, + rx_buf->size + IPHDR_ALIGN_ROOM, + 0, &rcb->free_rtn); + + if (rcb->mp != NULL) { + rcb->mp->b_rptr += IPHDR_ALIGN_ROOM; + rcb->mp->b_wptr += IPHDR_ALIGN_ROOM; + } + } + + return (IGB_SUCCESS); + +alloc_rcb_lists_fail: + igb_free_rcb_lists(rx_ring); + + return (IGB_FAILURE); +} + +/* + * igb_free_rcb_lists - Free the receive control blocks of one ring. + */ +static void +igb_free_rcb_lists(igb_rx_ring_t *rx_ring) +{ + int i; + rx_control_block_t *rcb; + uint32_t rcb_count; + + rcb = rx_ring->rcb_area; + if (rcb == NULL) + return; + + rcb_count = rx_ring->ring_size + rx_ring->free_list_size; + for (i = 0; i < rcb_count; i++, rcb++) { + ASSERT(rcb != NULL); + ASSERT(rcb->state == RCB_FREE); + + if (rcb->mp != NULL) { + freemsg(rcb->mp); + rcb->mp = NULL; + } + + igb_free_dma_buffer(&rcb->rx_buf); + } + + if (rx_ring->rcb_area != NULL) { + kmem_free(rx_ring->rcb_area, + sizeof (rx_control_block_t) * rcb_count); + rx_ring->rcb_area = NULL; + } + + if (rx_ring->work_list != NULL) { + kmem_free(rx_ring->work_list, + sizeof (rx_control_block_t *) * rx_ring->ring_size); + rx_ring->work_list = NULL; + } + + if (rx_ring->free_list != NULL) { + kmem_free(rx_ring->free_list, + sizeof (rx_control_block_t *) * rx_ring->free_list_size); + rx_ring->free_list = NULL; + } +} diff --git a/usr/src/uts/common/io/igb/igb_debug.c b/usr/src/uts/common/io/igb/igb_debug.c new file mode 100644 index 0000000000..021656cbaa --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_debug.c @@ -0,0 +1,297 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" +#include "igb_debug.h" + +#ifdef IGB_DEBUG +extern ddi_device_acc_attr_t igb_regs_acc_attr; + +void +pci_dump(void *arg) +{ + igb_t *igb = (igb_t *)arg; + ddi_acc_handle_t handle; + uint8_t cap_ptr; + uint8_t next_ptr; + uint32_t msix_bar; + uint32_t msix_ctrl; + uint32_t msix_tbl_sz; + uint32_t tbl_offset; + uint32_t tbl_bir; + uint32_t pba_offset; + uint32_t pba_bir; + off_t offset; + off_t mem_size; + uintptr_t base; + ddi_acc_handle_t acc_hdl; + int i; + + handle = igb->osdep.cfg_handle; + + igb_log(igb, "Begin dump PCI config space"); + + igb_log(igb, + "PCI_CONF_VENID:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_VENID)); + igb_log(igb, + "PCI_CONF_DEVID:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_DEVID)); + igb_log(igb, + "PCI_CONF_COMMAND:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_COMM)); + igb_log(igb, + "PCI_CONF_STATUS:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_STAT)); + igb_log(igb, + "PCI_CONF_REVID:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_REVID)); + igb_log(igb, + "PCI_CONF_PROG_CLASS:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_PROGCLASS)); + igb_log(igb, + "PCI_CONF_SUB_CLASS:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_SUBCLASS)); + igb_log(igb, + "PCI_CONF_BAS_CLASS:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_BASCLASS)); + igb_log(igb, + "PCI_CONF_CACHE_LINESZ:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_CACHE_LINESZ)); + igb_log(igb, + "PCI_CONF_LATENCY_TIMER:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_LATENCY_TIMER)); + igb_log(igb, + "PCI_CONF_HEADER_TYPE:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_HEADER)); + igb_log(igb, + "PCI_CONF_BIST:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_BIST)); + igb_log(igb, + "PCI_CONF_BASE0:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_BASE0)); + igb_log(igb, + "PCI_CONF_BASE1:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_BASE1)); + igb_log(igb, + "PCI_CONF_BASE2:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_BASE2)); + + /* MSI-X BAR */ + msix_bar = pci_config_get32(handle, PCI_CONF_BASE3); + igb_log(igb, + "PCI_CONF_BASE3:\t0x%x\n", msix_bar); + + igb_log(igb, + "PCI_CONF_BASE4:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_BASE4)); + igb_log(igb, + "PCI_CONF_BASE5:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_BASE5)); + igb_log(igb, + "PCI_CONF_CIS:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_CIS)); + igb_log(igb, + "PCI_CONF_SUBVENID:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_SUBVENID)); + igb_log(igb, + "PCI_CONF_SUBSYSID:\t0x%x\n", + pci_config_get16(handle, PCI_CONF_SUBSYSID)); + igb_log(igb, + "PCI_CONF_ROM:\t0x%x\n", + pci_config_get32(handle, PCI_CONF_ROM)); + + cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR); + + igb_log(igb, + "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr); + igb_log(igb, + "PCI_CONF_ILINE:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_ILINE)); + igb_log(igb, + "PCI_CONF_IPIN:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_IPIN)); + igb_log(igb, + "PCI_CONF_MIN_G:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_MIN_G)); + igb_log(igb, + "PCI_CONF_MAX_L:\t0x%x\n", + pci_config_get8(handle, PCI_CONF_MAX_L)); + + /* Power Management */ + offset = cap_ptr; + + igb_log(igb, + "PCI_PM_CAP_ID:\t0x%x\n", + pci_config_get8(handle, offset)); + + next_ptr = pci_config_get8(handle, offset + 1); + + igb_log(igb, + "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr); + igb_log(igb, + "PCI_PM_CAP:\t0x%x\n", + pci_config_get16(handle, offset + PCI_PMCAP)); + igb_log(igb, + "PCI_PM_CSR:\t0x%x\n", + pci_config_get16(handle, offset + PCI_PMCSR)); + igb_log(igb, + "PCI_PM_CSR_BSE:\t0x%x\n", + pci_config_get8(handle, offset + PCI_PMCSR_BSE)); + igb_log(igb, + "PCI_PM_DATA:\t0x%x\n", + pci_config_get8(handle, offset + PCI_PMDATA)); + + /* MSI Configuration */ + offset = next_ptr; + + igb_log(igb, + "PCI_MSI_CAP_ID:\t0x%x\n", + pci_config_get8(handle, offset)); + + next_ptr = pci_config_get8(handle, offset + 1); + + igb_log(igb, + "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr); + igb_log(igb, + "PCI_MSI_CTRL:\t0x%x\n", + pci_config_get16(handle, offset + PCI_MSI_CTRL)); + igb_log(igb, + "PCI_MSI_ADDR:\t0x%x\n", + pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET)); + igb_log(igb, + "PCI_MSI_ADDR_HI:\t0x%x\n", + pci_config_get32(handle, offset + 0x8)); + igb_log(igb, + "PCI_MSI_DATA:\t0x%x\n", + pci_config_get16(handle, offset + 0xC)); + + /* MSI-X Configuration */ + offset = next_ptr; + + igb_log(igb, + "PCI_MSIX_CAP_ID:\t0x%x\n", + pci_config_get8(handle, offset)); + + next_ptr = pci_config_get8(handle, offset + 1); + igb_log(igb, + "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr); + + msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL); + msix_tbl_sz = msix_ctrl & 0x7ff; + igb_log(igb, + "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl); + + tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET); + tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK; + tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK; + igb_log(igb, + "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset); + igb_log(igb, + "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir); + + pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET); + pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK; + pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK; + igb_log(igb, + "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset); + igb_log(igb, + "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir); + + /* PCI Express Configuration */ + offset = next_ptr; + + igb_log(igb, + "PCIE_CAP_ID:\t0x%x\n", + pci_config_get8(handle, offset + PCIE_CAP_ID)); + + next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR); + + igb_log(igb, + "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr); + igb_log(igb, + "PCIE_PCIECAP:\t0x%x\n", + pci_config_get16(handle, offset + PCIE_PCIECAP)); + igb_log(igb, + "PCIE_DEVCAP:\t0x%x\n", + pci_config_get32(handle, offset + PCIE_DEVCAP)); + igb_log(igb, + "PCIE_DEVCTL:\t0x%x\n", + pci_config_get16(handle, offset + PCIE_DEVCTL)); + igb_log(igb, + "PCIE_DEVSTS:\t0x%x\n", + pci_config_get16(handle, offset + PCIE_DEVSTS)); + igb_log(igb, + "PCIE_LINKCAP:\t0x%x\n", + pci_config_get32(handle, offset + PCIE_LINKCAP)); + igb_log(igb, + "PCIE_LINKCTL:\t0x%x\n", + pci_config_get16(handle, offset + PCIE_LINKCTL)); + igb_log(igb, + "PCIE_LINKSTS:\t0x%x\n", + pci_config_get16(handle, offset + PCIE_LINKSTS)); + + /* MSI-X Memory Space */ + if (ddi_dev_regsize(igb->dip, 4, &mem_size) != DDI_SUCCESS) { + igb_log(igb, "ddi_dev_regsize() failed"); + return; + } + + if ((ddi_regs_map_setup(igb->dip, 4, (caddr_t *)&base, 0, mem_size, + &igb_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) { + igb_log(igb, "ddi_regs_map_setup() failed"); + return; + } + + igb_log(igb, "MSI-X Memory Space: (mem_size = %d, base = %x)", + mem_size, base); + + for (i = 0; i <= msix_tbl_sz; i++) { + igb_log(igb, "MSI-X Table Entry(%d):", i); + igb_log(igb, "lo_addr:\t%x", + ddi_get32(acc_hdl, + (uint32_t *)(base + tbl_offset + (i * 16)))); + igb_log(igb, "up_addr:\t%x", + ddi_get32(acc_hdl, + (uint32_t *)(base + tbl_offset + (i * 16) + 4))); + igb_log(igb, "msg_data:\t%x", + ddi_get32(acc_hdl, + (uint32_t *)(base + tbl_offset + (i * 16) + 8))); + igb_log(igb, "vct_ctrl:\t%x", + ddi_get32(acc_hdl, + (uint32_t *)(base + tbl_offset + (i * 16) + 12))); + } + + igb_log(igb, "MSI-X Pending Bits:\t%x", + ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset))); + + ddi_regs_map_free(&acc_hdl); +} +#endif diff --git a/usr/src/uts/common/io/igb/igb_debug.h b/usr/src/uts/common/io/igb/igb_debug.h new file mode 100644 index 0000000000..a21745537e --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_debug.h @@ -0,0 +1,83 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_DEBUG_H +#define _IGB_DEBUG_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifdef DEBUG +#define IGB_DEBUG +#endif + +#ifdef IGB_DEBUG + +#define IGB_DEBUGLOG_0(adapter, fmt) \ + igb_log((adapter), (fmt)) +#define IGB_DEBUGLOG_1(adapter, fmt, d1) \ + igb_log((adapter), (fmt), (d1)) +#define IGB_DEBUGLOG_2(adapter, fmt, d1, d2) \ + igb_log((adapter), (fmt), (d1), (d2)) +#define IGB_DEBUGLOG_3(adapter, fmt, d1, d2, d3) \ + igb_log((adapter), (fmt), (d1), (d2), (d3)) + +#define IGB_DEBUG_STAT_COND(val, cond) if (cond) (val)++; +#define IGB_DEBUG_STAT(val) (val)++; + +#else + +#define IGB_DEBUGLOG_0(adapter, fmt) +#define IGB_DEBUGLOG_1(adapter, fmt, d1) +#define IGB_DEBUGLOG_2(adapter, fmt, d1, d2) +#define IGB_DEBUGLOG_3(adapter, fmt, d1, d2, d3) + +#define IGB_DEBUG_STAT_COND(val, cond) +#define IGB_DEBUG_STAT(val) + +#endif /* IGB_DEBUG */ + +#define IGB_STAT(val) (val)++; + +#ifdef IGB_DEBUG + +void pci_dump(void *); + +#endif /* IGB_DEBUG */ + +extern void igb_log(void *, const char *, ...); + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_DEBUG_H */ diff --git a/usr/src/uts/common/io/igb/igb_defines.h b/usr/src/uts/common/io/igb/igb_defines.h new file mode 100644 index 0000000000..abed173c46 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_defines.h @@ -0,0 +1,1474 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_DEFINES_H +#define _IGB_DEFINES_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */ +#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO_BM 0x00000800 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0_BM 0x00001000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1_BM 0x00002000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2_BM 0x00004000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3_BM 0x00008000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +/* Mask for all wakeup filters */ +#define E1000_WUFC_ALL_FILTERS_BM 0x0000F0FF +/* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_OFFSET_BM 12 +/* Mask for the 4 flexible filters */ +#define E1000_WUFC_FLX_FILTERS_BM 0x0000F000 +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC +#define E1000_WUS_ARP E1000_WUFC_ARP +#define E1000_WUS_IPV4 E1000_WUFC_IPV4 +#define E1000_WUS_IPV6 E1000_WUFC_IPV6 +#define E1000_WUS_FLX0_BM E1000_WUFC_FLX0_BM +#define E1000_WUS_FLX1_BM E1000_WUFC_FLX1_BM +#define E1000_WUS_FLX2_BM E1000_WUFC_FLX2_BM +#define E1000_WUS_FLX3_BM E1000_WUFC_FLX3_BM +#define E1000_WUS_FLX_FILTERS_BM E1000_WUFC_FLX_FILTERS_BM +#define E1000_WUS_FLX0 E1000_WUFC_FLX0 +#define E1000_WUS_FLX1 E1000_WUFC_FLX1 +#define E1000_WUS_FLX2 E1000_WUFC_FLX2 +#define E1000_WUS_FLX3 E1000_WUFC_FLX3 +#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +/* Reserved (bits 4,5) in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +/* IAME enable bit (27) was removed in >= 82575 */ +/* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_IAME 0x08000000 +/* Clear Interrupt timers after IMS clear */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 +/* packet buffer parity error detection enabled */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_REG_ADDR 0x00FF0000 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_PHY_ADDR 0x07000000 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_RESET 0x10000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 + +/* Receive Decriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 +/* Enable IP address filtering */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable cksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 + +/* FACTPS Definitions */ +#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /* Block new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 +/* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_SW2FW_INT 0x02000000 +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000 +#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000 +#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000 +#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_10 0 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 +#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000 +#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000 +#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +/* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_DOCK_CI 0x00000800 +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +/* BMC external code execution disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0=legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBS_16K E1000_PBA_16K +#define E1000_PBS_24K E1000_PBA_24K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 +/* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 +/* host arb read buffer parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +/* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 +/* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +/* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_DSW 0x00000020 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_PHYINT 0x00001000 +#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +/* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 +/* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 +/* host arb read buffer parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR +/* packet buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR +/* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 +/* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +/* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 +/* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 +/* host arb read buffer parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR +/* packet buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR +/* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 +/* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_NVM_GRANT_ATTEMPTS +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 + +#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */ +#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */ +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +/* For SERDES output amplitude adjustment. */ +#define NVM_SERDES_AMPLITUDE 0x0006 +#define NVM_PHY_CLASS_WORD 0x0007 +#define NVM_INIT_CONTROL1_REG 0x000A +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_FLASH_VERSION 0x0032 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_ANE 0x0800 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 +#define NVM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_MAC_ADDR_OFFSET 0 +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erast/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ +#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 +#define NVM_STATUS_WEN_SPI 0x02 +#define NVM_STATUS_BP0_SPI 0x04 +#define NVM_STATUS_BP1_SPI 0x08 +#define NVM_STATUS_WPEN_SPI 0x80 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080 +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* + * 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +/* + * 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_SPEC_STATUS \ + GG82563_REG(0, 17) /* PHY Specific Status */ +#define GG82563_PHY_INT_ENABLE \ + GG82563_REG(0, 18) /* Interrupt Enable */ +#define GG82563_PHY_SPEC_STATUS_2 \ + GG82563_REG(0, 19) /* PHY Specific Status 2 */ +#define GG82563_PHY_RX_ERR_CNTR \ + GG82563_REG(0, 21) /* Receive Error Counter */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ +#define GG82563_PHY_TEST_CLK_CTRL \ + GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL_2 \ + GG82563_REG(2, 26) /* MAC Specific Control 2 */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PORT_RESET \ + GG82563_REG(193, 17) /* Port Reset */ +#define GG82563_PHY_REVISION_ID \ + GG82563_REG(193, 18) /* Revision ID */ +#define GG82563_PHY_DEVICE_ID \ + GG82563_REG(193, 19) /* Device ID */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ +#define GG82563_PHY_RATE_ADAPT_CTRL \ + GG82563_REG(193, 25) /* Rate Adaptation Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ + GG82563_REG(194, 16) /* FIFO's Control/Status */ +#define GG82563_PHY_KMRN_CTRL \ + GG82563_REG(194, 17) /* Control */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ +#define GG82563_PHY_KMRN_DIAGNOSTIC \ + GG82563_REG(194, 19) /* Diagnostic */ +#define GG82563_PHY_ACK_TIMEOUTS \ + GG82563_REG(194, 20) /* Acknowledge Timeouts */ +#define GG82563_PHY_ADV_ABILITY \ + GG82563_REG(194, 21) /* Advertised Ability */ +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ + GG82563_REG(194, 23) /* Link Partner Advertised Ability */ +#define GG82563_PHY_ADV_NEXT_PAGE \ + GG82563_REG(194, 24) /* Advertised Next Page */ +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ + GG82563_REG(194, 25) /* Link Partner Advertised Next page */ +#define GG82563_PHY_KMRN_MISC \ + GG82563_REG(194, 26) /* Misc. */ + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#ifndef UNREFERENCED_PARAMETER +#define UNREFERENCED_PARAMETER(_p) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_DEFINES_H */ diff --git a/usr/src/uts/common/io/igb/igb_gld.c b/usr/src/uts/common/io/igb/igb_gld.c new file mode 100644 index 0000000000..a493c624a4 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_gld.c @@ -0,0 +1,888 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +int +igb_m_stat(void *arg, uint_t stat, uint64_t *val) +{ + igb_t *igb = (igb_t *)arg; + struct e1000_hw *hw = &igb->hw; + igb_stat_t *igb_ks; + uint32_t low_val, high_val; + + igb_ks = (igb_stat_t *)igb->igb_ks->ks_data; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + switch (stat) { + case MAC_STAT_IFSPEED: + *val = igb->link_speed * 1000000ull; + break; + + case MAC_STAT_MULTIRCV: + igb_ks->mprc.value.ui64 += + E1000_READ_REG(hw, E1000_MPRC); + *val = igb_ks->mprc.value.ui64; + break; + + case MAC_STAT_BRDCSTRCV: + igb_ks->bprc.value.ui64 += + E1000_READ_REG(hw, E1000_BPRC); + *val = igb_ks->bprc.value.ui64; + break; + + case MAC_STAT_MULTIXMT: + igb_ks->mptc.value.ui64 += + E1000_READ_REG(hw, E1000_MPTC); + *val = igb_ks->mptc.value.ui64; + break; + + case MAC_STAT_BRDCSTXMT: + igb_ks->bptc.value.ui64 += + E1000_READ_REG(hw, E1000_BPTC); + *val = igb_ks->bptc.value.ui64; + break; + + case MAC_STAT_NORCVBUF: + igb_ks->rnbc.value.ui64 += + E1000_READ_REG(hw, E1000_RNBC); + *val = igb_ks->rnbc.value.ui64; + break; + + case MAC_STAT_IERRORS: + igb_ks->rxerrc.value.ui64 += + E1000_READ_REG(hw, E1000_RXERRC); + igb_ks->algnerrc.value.ui64 += + E1000_READ_REG(hw, E1000_ALGNERRC); + igb_ks->rlec.value.ui64 += + E1000_READ_REG(hw, E1000_RLEC); + igb_ks->crcerrs.value.ui64 += + E1000_READ_REG(hw, E1000_CRCERRS); + igb_ks->cexterr.value.ui64 += + E1000_READ_REG(hw, E1000_CEXTERR); + *val = igb_ks->rxerrc.value.ui64 + + igb_ks->algnerrc.value.ui64 + + igb_ks->rlec.value.ui64 + + igb_ks->crcerrs.value.ui64 + + igb_ks->cexterr.value.ui64; + break; + + case MAC_STAT_NOXMTBUF: + *val = 0; + break; + + case MAC_STAT_OERRORS: + igb_ks->ecol.value.ui64 += + E1000_READ_REG(hw, E1000_ECOL); + *val = igb_ks->ecol.value.ui64; + break; + + case MAC_STAT_COLLISIONS: + igb_ks->colc.value.ui64 += + E1000_READ_REG(hw, E1000_COLC); + *val = igb_ks->colc.value.ui64; + break; + + case MAC_STAT_RBYTES: + /* + * The 64-bit register will reset whenever the upper + * 32 bits are read. So we need to read the lower + * 32 bits first, then read the upper 32 bits. + */ + low_val = E1000_READ_REG(hw, E1000_TORL); + high_val = E1000_READ_REG(hw, E1000_TORH); + igb_ks->tor.value.ui64 += + (uint64_t)high_val << 32 | (uint64_t)low_val; + *val = igb_ks->tor.value.ui64; + break; + + case MAC_STAT_IPACKETS: + igb_ks->tpr.value.ui64 += + E1000_READ_REG(hw, E1000_TPR); + *val = igb_ks->tpr.value.ui64; + break; + + case MAC_STAT_OBYTES: + /* + * The 64-bit register will reset whenever the upper + * 32 bits are read. So we need to read the lower + * 32 bits first, then read the upper 32 bits. + */ + low_val = E1000_READ_REG(hw, E1000_TOTL); + high_val = E1000_READ_REG(hw, E1000_TOTH); + igb_ks->tot.value.ui64 += + (uint64_t)high_val << 32 | (uint64_t)low_val; + *val = igb_ks->tot.value.ui64; + break; + + case MAC_STAT_OPACKETS: + igb_ks->tpt.value.ui64 += + E1000_READ_REG(hw, E1000_TPT); + *val = igb_ks->tpt.value.ui64; + break; + + /* RFC 1643 stats */ + case ETHER_STAT_ALIGN_ERRORS: + igb_ks->algnerrc.value.ui64 += + E1000_READ_REG(hw, E1000_ALGNERRC); + *val = igb_ks->algnerrc.value.ui64; + break; + + case ETHER_STAT_FCS_ERRORS: + igb_ks->crcerrs.value.ui64 += + E1000_READ_REG(hw, E1000_CRCERRS); + *val = igb_ks->crcerrs.value.ui64; + break; + + case ETHER_STAT_FIRST_COLLISIONS: + igb_ks->scc.value.ui64 += + E1000_READ_REG(hw, E1000_SCC); + *val = igb_ks->scc.value.ui64; + break; + + case ETHER_STAT_MULTI_COLLISIONS: + igb_ks->mcc.value.ui64 += + E1000_READ_REG(hw, E1000_MCC); + *val = igb_ks->mcc.value.ui64; + break; + + case ETHER_STAT_SQE_ERRORS: + igb_ks->sec.value.ui64 += + E1000_READ_REG(hw, E1000_SEC); + *val = igb_ks->sec.value.ui64; + break; + + case ETHER_STAT_DEFER_XMTS: + igb_ks->dc.value.ui64 += + E1000_READ_REG(hw, E1000_DC); + *val = igb_ks->dc.value.ui64; + break; + + case ETHER_STAT_TX_LATE_COLLISIONS: + igb_ks->latecol.value.ui64 += + E1000_READ_REG(hw, E1000_LATECOL); + *val = igb_ks->latecol.value.ui64; + break; + + case ETHER_STAT_EX_COLLISIONS: + igb_ks->ecol.value.ui64 += + E1000_READ_REG(hw, E1000_ECOL); + *val = igb_ks->ecol.value.ui64; + break; + + case ETHER_STAT_MACXMT_ERRORS: + igb_ks->ecol.value.ui64 += + E1000_READ_REG(hw, E1000_ECOL); + *val = igb_ks->ecol.value.ui64; + break; + + case ETHER_STAT_CARRIER_ERRORS: + igb_ks->cexterr.value.ui64 += + E1000_READ_REG(hw, E1000_CEXTERR); + *val = igb_ks->cexterr.value.ui64; + break; + + case ETHER_STAT_TOOLONG_ERRORS: + igb_ks->roc.value.ui64 += + E1000_READ_REG(hw, E1000_ROC); + *val = igb_ks->roc.value.ui64; + break; + + case ETHER_STAT_MACRCV_ERRORS: + igb_ks->rxerrc.value.ui64 += + E1000_READ_REG(hw, E1000_RXERRC); + *val = igb_ks->rxerrc.value.ui64; + break; + + /* MII/GMII stats */ + case ETHER_STAT_XCVR_ADDR: + /* The Internal PHY's MDI address for each MAC is 1 */ + *val = 1; + break; + + case ETHER_STAT_XCVR_ID: + *val = hw->phy.id | hw->phy.revision; + break; + + case ETHER_STAT_XCVR_INUSE: + switch (igb->link_speed) { + case SPEED_1000: + *val = + (hw->phy.media_type == e1000_media_type_copper) ? + XCVR_1000T : XCVR_1000X; + break; + case SPEED_100: + *val = + (hw->phy.media_type == e1000_media_type_copper) ? + (igb->param_100t4_cap == 1) ? + XCVR_100T4 : XCVR_100T2 : XCVR_100X; + break; + case SPEED_10: + *val = XCVR_10; + break; + default: + *val = XCVR_NONE; + break; + } + break; + + case ETHER_STAT_CAP_1000FDX: + *val = igb->param_1000fdx_cap; + break; + + case ETHER_STAT_CAP_1000HDX: + *val = igb->param_1000hdx_cap; + break; + + case ETHER_STAT_CAP_100FDX: + *val = igb->param_100fdx_cap; + break; + + case ETHER_STAT_CAP_100HDX: + *val = igb->param_100hdx_cap; + break; + + case ETHER_STAT_CAP_10FDX: + *val = igb->param_10fdx_cap; + break; + + case ETHER_STAT_CAP_10HDX: + *val = igb->param_10hdx_cap; + break; + + case ETHER_STAT_CAP_ASMPAUSE: + *val = igb->param_asym_pause_cap; + break; + + case ETHER_STAT_CAP_PAUSE: + *val = igb->param_pause_cap; + break; + + case ETHER_STAT_CAP_AUTONEG: + *val = igb->param_autoneg_cap; + break; + + case ETHER_STAT_ADV_CAP_1000FDX: + *val = igb->param_adv_1000fdx_cap; + break; + + case ETHER_STAT_ADV_CAP_1000HDX: + *val = igb->param_adv_1000hdx_cap; + break; + + case ETHER_STAT_ADV_CAP_100FDX: + *val = igb->param_adv_100fdx_cap; + break; + + case ETHER_STAT_ADV_CAP_100HDX: + *val = igb->param_adv_100hdx_cap; + break; + + case ETHER_STAT_ADV_CAP_10FDX: + *val = igb->param_adv_10fdx_cap; + break; + + case ETHER_STAT_ADV_CAP_10HDX: + *val = igb->param_adv_10hdx_cap; + break; + + case ETHER_STAT_ADV_CAP_ASMPAUSE: + *val = igb->param_adv_asym_pause_cap; + break; + + case ETHER_STAT_ADV_CAP_PAUSE: + *val = igb->param_adv_pause_cap; + break; + + case ETHER_STAT_ADV_CAP_AUTONEG: + *val = hw->mac.autoneg; + break; + + case ETHER_STAT_LP_CAP_1000FDX: + *val = igb->param_lp_1000fdx_cap; + break; + + case ETHER_STAT_LP_CAP_1000HDX: + *val = igb->param_lp_1000hdx_cap; + break; + + case ETHER_STAT_LP_CAP_100FDX: + *val = igb->param_lp_100fdx_cap; + break; + + case ETHER_STAT_LP_CAP_100HDX: + *val = igb->param_lp_100hdx_cap; + break; + + case ETHER_STAT_LP_CAP_10FDX: + *val = igb->param_lp_10fdx_cap; + break; + + case ETHER_STAT_LP_CAP_10HDX: + *val = igb->param_lp_10hdx_cap; + break; + + case ETHER_STAT_LP_CAP_ASMPAUSE: + *val = igb->param_lp_asym_pause_cap; + break; + + case ETHER_STAT_LP_CAP_PAUSE: + *val = igb->param_lp_pause_cap; + break; + + case ETHER_STAT_LP_CAP_AUTONEG: + *val = igb->param_lp_autoneg_cap; + break; + + case ETHER_STAT_LINK_ASMPAUSE: + *val = igb->param_asym_pause_cap; + break; + + case ETHER_STAT_LINK_PAUSE: + *val = igb->param_pause_cap; + break; + + case ETHER_STAT_LINK_AUTONEG: + *val = hw->mac.autoneg; + break; + + case ETHER_STAT_LINK_DUPLEX: + *val = (igb->link_duplex == FULL_DUPLEX) ? + LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; + break; + + case ETHER_STAT_TOOSHORT_ERRORS: + igb_ks->ruc.value.ui64 += + E1000_READ_REG(hw, E1000_RUC); + *val = igb_ks->ruc.value.ui64; + break; + + case ETHER_STAT_CAP_REMFAULT: + *val = igb->param_rem_fault; + break; + + case ETHER_STAT_ADV_REMFAULT: + *val = igb->param_adv_rem_fault; + break; + + case ETHER_STAT_LP_REMFAULT: + *val = igb->param_lp_rem_fault; + break; + + case ETHER_STAT_JABBER_ERRORS: + igb_ks->rjc.value.ui64 += + E1000_READ_REG(hw, E1000_RJC); + *val = igb_ks->rjc.value.ui64; + break; + + case ETHER_STAT_CAP_100T4: + *val = igb->param_100t4_cap; + break; + + case ETHER_STAT_ADV_CAP_100T4: + *val = igb->param_adv_100t4_cap; + break; + + case ETHER_STAT_LP_CAP_100T4: + *val = igb->param_lp_100t4_cap; + break; + + default: + mutex_exit(&igb->gen_lock); + return (ENOTSUP); + } + + mutex_exit(&igb->gen_lock); + + return (0); +} + +/* + * Bring the device out of the reset/quiesced state that it + * was in when the interface was registered. + */ +int +igb_m_start(void *arg) +{ + igb_t *igb = (igb_t *)arg; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + if (igb_start(igb) != IGB_SUCCESS) { + mutex_exit(&igb->gen_lock); + return (EIO); + } + + igb->igb_state |= IGB_STARTED; + + mutex_exit(&igb->gen_lock); + + /* + * Enable and start the watchdog timer + */ + igb_enable_watchdog_timer(igb); + + return (0); +} + +/* + * Stop the device and put it in a reset/quiesced state such + * that the interface can be unregistered. + */ +void +igb_m_stop(void *arg) +{ + igb_t *igb = (igb_t *)arg; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return; + } + + igb->igb_state &= ~IGB_STARTED; + + igb_stop(igb); + + mutex_exit(&igb->gen_lock); + + /* + * Disable and stop the watchdog timer + */ + igb_disable_watchdog_timer(igb); +} + +/* + * Set the promiscuity of the device. + */ +int +igb_m_promisc(void *arg, boolean_t on) +{ + igb_t *igb = (igb_t *)arg; + uint32_t reg_val; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL); + + if (on) + reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + else + reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); + + E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val); + + mutex_exit(&igb->gen_lock); + + return (0); +} + +/* + * Add/remove the addresses to/from the set of multicast + * addresses for which the device will receive packets. + */ +int +igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr) +{ + igb_t *igb = (igb_t *)arg; + int result; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + result = (add) ? igb_multicst_add(igb, mcst_addr) + : igb_multicst_remove(igb, mcst_addr); + + mutex_exit(&igb->gen_lock); + + return (result); +} + +/* + * Set a new device unicast address. + */ +int +igb_m_unicst(void *arg, const uint8_t *mac_addr) +{ + igb_t *igb = (igb_t *)arg; + int result; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + /* + * Store the new MAC address. + */ + bcopy(mac_addr, igb->hw.mac.addr, ETHERADDRL); + + /* + * Set MAC address in address slot 0, which is the default address. + */ + result = igb_unicst_set(igb, mac_addr, 0); + + mutex_exit(&igb->gen_lock); + + return (result); +} + +/* + * Pass on M_IOCTL messages passed to the DLD, and support + * private IOCTLs for debugging and ndd. + */ +void +igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp) +{ + igb_t *igb = (igb_t *)arg; + struct iocblk *iocp; + enum ioc_reply status; + + iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; + iocp->ioc_error = 0; + + switch (iocp->ioc_cmd) { + case LB_GET_INFO_SIZE: + case LB_GET_INFO: + case LB_GET_MODE: + case LB_SET_MODE: + status = igb_loopback_ioctl(igb, iocp, mp); + break; + + case ND_GET: + case ND_SET: + status = igb_nd_ioctl(igb, q, mp, iocp); + break; + + default: + status = IOC_INVAL; + break; + } + + /* + * Decide how to reply + */ + switch (status) { + default: + case IOC_INVAL: + /* + * Error, reply with a NAK and EINVAL or the specified error + */ + miocnak(q, mp, 0, iocp->ioc_error == 0 ? + EINVAL : iocp->ioc_error); + break; + + case IOC_DONE: + /* + * OK, reply already sent + */ + break; + + case IOC_ACK: + /* + * OK, reply with an ACK + */ + miocack(q, mp, 0, 0); + break; + + case IOC_REPLY: + /* + * OK, send prepared reply as ACK or NAK + */ + mp->b_datap->db_type = iocp->ioc_error == 0 ? + M_IOCACK : M_IOCNAK; + qreply(q, mp); + break; + } +} + + +/* + * Find an unused address slot, set the address to it, reserve + * this slot and enable the device to start filtering on the + * new address. + */ +int +igb_m_unicst_add(void *arg, mac_multi_addr_t *maddr) +{ + igb_t *igb = (igb_t *)arg; + mac_addr_slot_t slot; + int err; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + if (mac_unicst_verify(igb->mac_hdl, + maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) { + mutex_exit(&igb->gen_lock); + return (EINVAL); + } + + if (igb->unicst_avail == 0) { + /* no slots available */ + mutex_exit(&igb->gen_lock); + return (ENOSPC); + } + + /* + * Primary/default address is in slot 0. The next addresses + * are the multiple MAC addresses. So multiple MAC address 0 + * is in slot 1, 1 in slot 2, and so on. So the first multiple + * MAC address resides in slot 1. + */ + for (slot = 1; slot < igb->unicst_total; slot++) { + if (igb->unicst_addr[slot].mac.set == 0) { + igb->unicst_addr[slot].mac.set = 1; + break; + } + } + + ASSERT((slot > 0) && (slot < igb->unicst_total)); + + igb->unicst_avail--; + mutex_exit(&igb->gen_lock); + + maddr->mma_slot = slot; + + if ((err = igb_unicst_set(igb, maddr->mma_addr, slot)) != 0) { + mutex_enter(&igb->gen_lock); + igb->unicst_addr[slot].mac.set = 0; + igb->unicst_avail++; + mutex_exit(&igb->gen_lock); + } + + return (err); +} + + +/* + * Removes a MAC address that was added before. + */ +int +igb_m_unicst_remove(void *arg, mac_addr_slot_t slot) +{ + igb_t *igb = (igb_t *)arg; + int err; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + if ((slot <= 0) || (slot >= igb->unicst_total)) { + mutex_exit(&igb->gen_lock); + return (EINVAL); + } + + if (igb->unicst_addr[slot].mac.set == 1) { + igb->unicst_addr[slot].mac.set = 0; + igb->unicst_avail++; + + /* Copy the default address to the passed slot */ + if ((err = igb_unicst_set(igb, + igb->unicst_addr[0].mac.addr, slot)) != 0) { + igb->unicst_addr[slot].mac.set = 1; + igb->unicst_avail--; + } + + mutex_exit(&igb->gen_lock); + + return (err); + } + mutex_exit(&igb->gen_lock); + + return (EINVAL); +} + +/* + * Modifies the value of an address that has been added before. + * The new address length and the slot number that was returned + * in the call to add should be passed in. mma_flags should be + * set to 0. + * Returns 0 on success. + */ +int +igb_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) +{ + igb_t *igb = (igb_t *)arg; + mac_addr_slot_t slot; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + if (mac_unicst_verify(igb->mac_hdl, + maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) { + mutex_exit(&igb->gen_lock); + return (EINVAL); + } + + slot = maddr->mma_slot; + + if ((slot <= 0) || (slot >= igb->unicst_total)) { + mutex_exit(&igb->gen_lock); + return (EINVAL); + } + + if (igb->unicst_addr[slot].mac.set == 1) { + mutex_exit(&igb->gen_lock); + + return (igb_unicst_set(igb, maddr->mma_addr, slot)); + } + mutex_exit(&igb->gen_lock); + + return (EINVAL); +} + +/* + * Get the MAC address and all other information related to + * the address slot passed in mac_multi_addr_t. + * mma_flags should be set to 0 in the call. + * On return, mma_flags can take the following values: + * 1) MMAC_SLOT_UNUSED + * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR + * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR + * 4) MMAC_SLOT_USED + */ +int +igb_m_unicst_get(void *arg, mac_multi_addr_t *maddr) +{ + igb_t *igb = (igb_t *)arg; + mac_addr_slot_t slot; + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (ECANCELED); + } + + slot = maddr->mma_slot; + + if ((slot <= 0) || (slot >= igb->unicst_total)) { + mutex_exit(&igb->gen_lock); + return (EINVAL); + } + + if (igb->unicst_addr[slot].mac.set == 1) { + bcopy(igb->unicst_addr[slot].mac.addr, + maddr->mma_addr, ETHERADDRL); + maddr->mma_flags = MMAC_SLOT_USED; + } else { + maddr->mma_flags = MMAC_SLOT_UNUSED; + } + mutex_exit(&igb->gen_lock); + + return (0); +} + +/* + * Obtain the MAC's capabilities and associated data from + * the driver. + */ +boolean_t +igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) +{ + igb_t *igb = (igb_t *)arg; + + switch (cap) { + case MAC_CAPAB_HCKSUM: { + uint32_t *tx_hcksum_flags = cap_data; + + /* + * We advertise our capabilities only if tx hcksum offload is + * enabled. On receive, the stack will accept checksummed + * packets anyway, even if we haven't said we can deliver + * them. + */ + if (!igb->tx_hcksum_enable) + return (B_FALSE); + + *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM; + break; + } + case MAC_CAPAB_MULTIADDRESS: { + multiaddress_capab_t *mmacp = cap_data; + + /* + * The number of MAC addresses made available by + * this capability is one less than the total as + * the primary address in slot 0 is counted in + * the total. + */ + mmacp->maddr_naddr = igb->unicst_total - 1; + mmacp->maddr_naddrfree = igb->unicst_avail; + /* No multiple factory addresses, set mma_flag to 0 */ + mmacp->maddr_flag = 0; + mmacp->maddr_handle = igb; + mmacp->maddr_add = igb_m_unicst_add; + mmacp->maddr_remove = igb_m_unicst_remove; + mmacp->maddr_modify = igb_m_unicst_modify; + mmacp->maddr_get = igb_m_unicst_get; + mmacp->maddr_reserve = NULL; + break; + } + default: + return (B_FALSE); + } + return (B_TRUE); +} diff --git a/usr/src/uts/common/io/igb/igb_hw.h b/usr/src/uts/common/io/igb/igb_hw.h new file mode 100644 index 0000000000..21b1532193 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_hw.h @@ -0,0 +1,626 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_HW_H +#define _IGB_HW_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "igb_osdep.h" +#include "igb_regs.h" +#include "igb_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +typedef enum { + e1000_undefined = 0, + e1000_82575, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +} e1000_mac_type; + +typedef enum { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_eeprom_microwire, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +} e1000_nvm_type; + +typedef enum { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, + e1000_nvm_override_microwire_small, + e1000_nvm_override_microwire_large +} e1000_nvm_override; + +typedef enum { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm +} e1000_phy_type; + +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +} e1000_bus_type; + +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +} e1000_bus_speed; + +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +} e1000_fc_type; + +/* Receive Descriptor */ +struct e1000_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + u64 buffer_addr; + u64 reserved; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + u64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length0; /* length of buffer 0 */ + u16 vlan; /* VLAN tag */ + } middle; + struct { + u16 header_status; + u16 length[3]; /* length of buffers 1-3 */ + } upper; + u64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + u32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + u16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + u32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + u16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + u32 cmd_and_length; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + u16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + u64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + u16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "igb_mac.h" +#include "igb_phy.h" +#include "igb_nvm.h" +#include "igb_manage.h" + +struct e1000_functions { + /* Function pointers for the MAC. */ + s32 (*init_mac_params)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *hw); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + void (*remove_device)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*); + s32 (*mng_write_cmd_header)(struct e1000_hw *hw, + struct e1000_host_mng_command_header *); + s32 (*mng_enable_host_if)(struct e1000_hw *); + s32 (*wait_autoneg)(struct e1000_hw *); + + /* Function pointers for the PHY. */ + s32 (*init_phy_params)(struct e1000_hw *); + s32 (*acquire_phy)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit_phy)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); + void (*release_phy)(struct e1000_hw *); + s32 (*reset_phy)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); + void (*power_up_phy)(struct e1000_hw *); + void (*power_down_phy)(struct e1000_hw *); + + /* Function pointers for the NVM. */ + s32 (*init_nvm_params)(struct e1000_hw *); + s32 (*acquire_nvm)(struct e1000_hw *); + s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); + void (*release_nvm)(struct e1000_hw *); + void (*reload_nvm)(struct e1000_hw *); + s32 (*update_nvm)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate_nvm)(struct e1000_hw *); + s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + u8 addr[6]; + u8 perm_addr[6]; + + e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_av; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + e1000_phy_type type; + + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; + e1000_ms_type ms_type; + e1000_ms_type original_ms_type; + e1000_rev_polarity cable_polarity; + e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + e1000_nvm_type type; + e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + e1000_bus_type type; + e1000_bus_speed speed; + e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + e1000_fc_type type; /* Type of flow control */ + e1000_fc_type original_type; +}; + +struct e1000_hw { + void *back; + void *dev_spec; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_functions func; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + u32 dev_spec_size; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size); +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_free_dev_spec_struct(struct e1000_hw *hw); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_HW_H */ diff --git a/usr/src/uts/common/io/igb/igb_log.c b/usr/src/uts/common/io/igb/igb_log.c new file mode 100644 index 0000000000..48b1e0d502 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_log.c @@ -0,0 +1,93 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +#define LOG_BUF_LEN 128 + +/* + * igb_notice - Report a run-time event (CE_NOTE, to console & log) + */ +void +igb_notice(void *arg, const char *fmt, ...) +{ + igb_t *igbp = (igb_t *)arg; + char buf[LOG_BUF_LEN]; + va_list ap; + + va_start(ap, fmt); + (void) vsnprintf(buf, sizeof (buf), fmt, ap); + va_end(ap); + + if (igbp != NULL) + cmn_err(CE_NOTE, "%s%d: %s", MODULE_NAME, igbp->instance, buf); + else + cmn_err(CE_NOTE, "%s: %s", MODULE_NAME, buf); +} + +/* + * igb_log - Log a run-time event (CE_NOTE, to log only) + */ +void +igb_log(void *arg, const char *fmt, ...) +{ + igb_t *igbp = (igb_t *)arg; + char buf[LOG_BUF_LEN]; + va_list ap; + + va_start(ap, fmt); + (void) vsnprintf(buf, sizeof (buf), fmt, ap); + va_end(ap); + + if (igbp != NULL) + cmn_err(CE_NOTE, "!%s%d: %s", MODULE_NAME, igbp->instance, buf); + else + cmn_err(CE_NOTE, "!%s: %s", MODULE_NAME, buf); +} + +/* + * igb_error - Log a run-time problem (CE_WARN, to log only) + */ +void +igb_error(void *arg, const char *fmt, ...) +{ + igb_t *igbp = (igb_t *)arg; + char buf[LOG_BUF_LEN]; + va_list ap; + + va_start(ap, fmt); + (void) vsnprintf(buf, sizeof (buf), fmt, ap); + va_end(ap); + + if (igbp != NULL) + cmn_err(CE_WARN, "!%s%d: %s", MODULE_NAME, igbp->instance, buf); + else + cmn_err(CE_WARN, "!%s: %s", MODULE_NAME, buf); +} diff --git a/usr/src/uts/common/io/igb/igb_mac.c b/usr/src/uts/common/io/igb/igb_mac.c new file mode 100644 index 0000000000..37a87d191d --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_mac.c @@ -0,0 +1,2071 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_api.h" +#include "igb_mac.h" + +/* + * e1000_remove_device_generic - Free device specific structure + * @hw: pointer to the HW structure + * + * If a device specific structure was allocated, this function will + * free it. + */ +void +e1000_remove_device_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_remove_device_generic"); + + /* Freeing the dev_spec member of e1000_hw structure */ + e1000_free_dev_spec_struct(hw); +} + +/* + * e1000_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + */ +s32 +e1000_get_bus_info_pci_generic(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + u16 pci_header_type; + + DEBUGFUNC("e1000_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & E1000_STATUS_PCIX_MODE) + ? e1000_bus_type_pcix : e1000_bus_type_pci; + + /* Bus speed */ + if (bus->type == e1000_bus_type_pci) { + bus->speed = (status & E1000_STATUS_PCI66) + ? e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + bus->speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + bus->speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + bus->speed = e1000_bus_speed_133; + break; + default: + bus->speed = e1000_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & E1000_STATUS_BUS64) + ? e1000_bus_width_64 : e1000_bus_width_32; + + /* Which PCI(-X) function? */ + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + else + bus->func = 0; + + return (ret_val); +} + +/* + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + */ +s32 +e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 status; + u16 pcie_link_status, pci_header_type; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + bus->speed = e1000_bus_speed_2500; + + ret_val = e1000_read_pcie_cap_reg(hw, + PCIE_LINK_STATUS, &pcie_link_status); + if (ret_val) + bus->width = e1000_bus_width_unknown; + else + bus->width = (e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } + + return (E1000_SUCCESS); +} + +/* + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + */ +void +e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/* + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + */ +void +e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/* + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void +e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set_generic(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(hw); + } +} + +/* + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + */ +s32 +e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF) { + ret_val = -(E1000_NOT_IMPLEMENTED); + goto out; + } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += ETH_ADDR_LEN / sizeof (u16); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + ret_val = -(E1000_NOT_IMPLEMENTED); + goto out; + } + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; + + e1000_rar_set(hw, hw->mac.perm_addr, 0); + +out: + return (ret_val); +} + +/* + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void +e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) { + if (!hw->mac.disable_av) + rar_high |= E1000_RAH_AV; + } + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); +} + +/* + * e1000_mta_set_generic - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + */ +void +e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + DEBUGFUNC("e1000_mta_set_generic"); + /* + * The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); +} + +/* + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + */ +void +e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + u32 hash_value; + u32 i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* + * Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = rar_used_count; i < rar_count; i++) { + if (mc_addr_count) { + e1000_rar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ADDR_LEN; + } else { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(hw); + } + } + + /* Clear the old settings from the MTA */ + DEBUGOUT("Clearing MTA\n"); + for (i = 0; i < hw->mac.mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* Load any remaining multicast addresses into the hash table. */ + for (; mc_addr_count > 0; mc_addr_count--) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + DEBUGOUT1("Hash value = 0x%03X\n", hash_value); + e1000_mta_set(hw, hash_value); + mc_addr_list += ETH_ADDR_LEN; + } +} + +/* + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + */ +u32 +e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return (hash_value); +} + +/* + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command regsiter with the current PCIx status + * regsiter. + */ +void +e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/* + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void +e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + (void) E1000_READ_REG(hw, E1000_CRCERRS); + (void) E1000_READ_REG(hw, E1000_SYMERRS); + (void) E1000_READ_REG(hw, E1000_MPC); + (void) E1000_READ_REG(hw, E1000_SCC); + (void) E1000_READ_REG(hw, E1000_ECOL); + (void) E1000_READ_REG(hw, E1000_MCC); + (void) E1000_READ_REG(hw, E1000_LATECOL); + (void) E1000_READ_REG(hw, E1000_COLC); + (void) E1000_READ_REG(hw, E1000_DC); + (void) E1000_READ_REG(hw, E1000_SEC); + (void) E1000_READ_REG(hw, E1000_RLEC); + (void) E1000_READ_REG(hw, E1000_XONRXC); + (void) E1000_READ_REG(hw, E1000_XONTXC); + (void) E1000_READ_REG(hw, E1000_XOFFRXC); + (void) E1000_READ_REG(hw, E1000_XOFFTXC); + (void) E1000_READ_REG(hw, E1000_FCRUC); + (void) E1000_READ_REG(hw, E1000_GPRC); + (void) E1000_READ_REG(hw, E1000_BPRC); + (void) E1000_READ_REG(hw, E1000_MPRC); + (void) E1000_READ_REG(hw, E1000_GPTC); + (void) E1000_READ_REG(hw, E1000_GORCL); + (void) E1000_READ_REG(hw, E1000_GORCH); + (void) E1000_READ_REG(hw, E1000_GOTCL); + (void) E1000_READ_REG(hw, E1000_GOTCH); + (void) E1000_READ_REG(hw, E1000_RNBC); + (void) E1000_READ_REG(hw, E1000_RUC); + (void) E1000_READ_REG(hw, E1000_RFC); + (void) E1000_READ_REG(hw, E1000_ROC); + (void) E1000_READ_REG(hw, E1000_RJC); + (void) E1000_READ_REG(hw, E1000_TORL); + (void) E1000_READ_REG(hw, E1000_TORH); + (void) E1000_READ_REG(hw, E1000_TOTL); + (void) E1000_READ_REG(hw, E1000_TOTH); + (void) E1000_READ_REG(hw, E1000_TPR); + (void) E1000_READ_REG(hw, E1000_TPT); + (void) E1000_READ_REG(hw, E1000_MPTC); + (void) E1000_READ_REG(hw, E1000_BPTC); +} + +/* + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 +e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = FALSE; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + (void) e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000_config_collision_dist_generic(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +s32 +e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } + +out: + return (ret_val); +} + +/* + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +s32 +e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, E1000_RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = TRUE; + DEBUGOUT("SERDES: Link is up.\n"); + } + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link is down.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + mac->serdes_has_link = (status & E1000_STATUS_LU) + ? TRUE : FALSE; + } + +out: + return (ret_val); +} + +/* + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 +e1000_setup_link_generic(struct e1000_hw *hw) +{ + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + goto out; + + /* + * If flow control is set to default, set flow control based on + * the EEPROM flow control settings. + */ + if (hw->fc.type == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + } + + /* + * We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.original_type = hw->fc.type; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = func->setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address,type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return (ret_val); +} + +/* + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + */ +s32 +e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000_config_collision_dist_generic(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW defineable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void +e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/* + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + */ +s32 +e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + goto out; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + DEBUGOUT("Valid Link Found\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + */ +s32 +e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.type) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric RX + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + +out: + return (ret_val); +} + +/* + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + */ +s32 +e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.type & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return (ret_val); +} + +/* + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + */ +s32 +e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.type = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.type = e1000_fc_tx_pause; + else + hw->fc.type = e1000_fc_full; + +out: + return (ret_val); +} + +/* + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 +e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.type" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.type = %u\n", hw->fc.type); + + switch (hw->fc.type) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return (ret_val); +} + +/* + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 +e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg " + "has not completed.\n"); + goto out; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + * ------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + * ------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.original_type == e1000_fc_full) { + hw->fc.type = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\r\n"); + } else { + hw->fc.type = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = " + "RX PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + * ------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.type = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + * ------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.type = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.type = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.type = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return (ret_val); +} + +/* + * e1000_get_speed_and_duplex_copper_generic - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 +e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return (E1000_SUCCESS); +} + +/* + * e1000_get_speed_and_duplex_fiber_generic - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + */ +s32 +e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + UNREFERENCED_PARAMETER(hw); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return (E1000_SUCCESS); +} + +/* + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +s32 +e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void +e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/* + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 +e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + */ +s32 +e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + +out: + return (ret_val); +} + +/* + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + */ +s32 +e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->func.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return (ret_val); +} + +/* + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + */ +s32 +e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->func.setup_led != e1000_setup_led_generic) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + +out: + return (ret_val); +} + +/* + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + */ +s32 +e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_cleanup_led_generic"); + + if (hw->func.cleanup_led != e1000_cleanup_led_generic) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + +out: + return (ret_val); +} + +/* + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + */ +s32 +e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return (E1000_SUCCESS); +} + +/* + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + */ +s32 +e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return (E1000_SUCCESS); +} + +/* + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + */ +s32 +e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return (E1000_SUCCESS); +} + +/* + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + */ +void +e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +} + +/* + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (E1000_SUCCESS) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 +e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + */ +void +e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if (!mac->ifs_params_forced) { + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + } + + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); +} + +/* + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + */ +void +e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = TRUE; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/* + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + */ +s32 +e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + */ +s32 +e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return (ret_val); +} diff --git a/usr/src/uts/common/io/igb/igb_mac.h b/usr/src/uts/common/io/igb/igb_mac.h new file mode 100644 index 0000000000..ac6abbb481 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_mac.h @@ -0,0 +1,96 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_MAC_H +#define _IGB_MAC_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_set_default_fc_generic(struct e1000_hw *hw); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_config_collision_dist_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_remove_device_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_MAC_H */ diff --git a/usr/src/uts/common/io/igb/igb_main.c b/usr/src/uts/common/io/igb/igb_main.c new file mode 100644 index 0000000000..0f13a82cdb --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_main.c @@ -0,0 +1,4012 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +static char ident[] = "Intel 1Gb Ethernet 1.1.0"; + +/* + * Local function protoypes + */ +static int igb_register_mac(igb_t *); +static int igb_identify_hardware(igb_t *); +static int igb_regs_map(igb_t *); +static void igb_init_properties(igb_t *); +static int igb_init_driver_settings(igb_t *); +static void igb_init_locks(igb_t *); +static void igb_destroy_locks(igb_t *); +static int igb_init(igb_t *); +static int igb_chip_start(igb_t *); +static void igb_chip_stop(igb_t *); +static int igb_reset(igb_t *); +static void igb_tx_clean(igb_t *); +static boolean_t igb_tx_drain(igb_t *); +static boolean_t igb_rx_drain(igb_t *); +static int igb_alloc_rings(igb_t *); +static int igb_init_rings(igb_t *); +static void igb_free_rings(igb_t *); +static void igb_fini_rings(igb_t *); +static void igb_setup_rings(igb_t *); +static void igb_setup_rx(igb_t *); +static void igb_setup_tx(igb_t *); +static void igb_setup_rx_ring(igb_rx_ring_t *); +static void igb_setup_tx_ring(igb_tx_ring_t *); +static void igb_setup_rss(igb_t *); +static void igb_init_unicst(igb_t *); +static void igb_setup_multicst(igb_t *); +static void igb_get_phy_state(igb_t *); +static void igb_get_conf(igb_t *); +static int igb_get_prop(igb_t *, char *, int, int, int); +static boolean_t igb_is_link_up(igb_t *); +static boolean_t igb_link_check(igb_t *); +static void igb_local_timer(void *); +static void igb_arm_watchdog_timer(igb_t *); +static void igb_start_watchdog_timer(igb_t *); +static void igb_restart_watchdog_timer(igb_t *); +static void igb_stop_watchdog_timer(igb_t *); +static void igb_disable_adapter_interrupts(igb_t *); +static void igb_enable_adapter_interrupts(igb_t *); +static boolean_t is_valid_mac_addr(uint8_t *); +static boolean_t igb_stall_check(igb_t *); +static boolean_t igb_set_loopback_mode(igb_t *, uint32_t); +static void igb_set_external_loopback(igb_t *); +static void igb_set_internal_mac_loopback(igb_t *); +static void igb_set_internal_phy_loopback(igb_t *); +static void igb_set_internal_serdes_loopback(igb_t *); +static boolean_t igb_find_mac_address(igb_t *); +static int igb_alloc_intrs(igb_t *); +static int igb_alloc_intrs_msix(igb_t *); +static int igb_alloc_intrs_msi(igb_t *); +static int igb_alloc_intrs_legacy(igb_t *); +static int igb_add_intr_handlers(igb_t *); +static void igb_rem_intr_handlers(igb_t *); +static void igb_rem_intrs(igb_t *); +static int igb_enable_intrs(igb_t *); +static int igb_disable_intrs(igb_t *); +static void igb_setup_adapter_msix(igb_t *); +static uint_t igb_intr_legacy(void *, void *); +static uint_t igb_intr_msi(void *, void *); +static uint_t igb_intr_rx(void *, void *); +static uint_t igb_intr_tx_other(void *, void *); +static void igb_intr_rx_work(igb_rx_ring_t *); +static void igb_intr_tx_work(igb_tx_ring_t *); +static void igb_intr_other_work(igb_t *); +static void igb_get_driver_control(struct e1000_hw *); +static void igb_release_driver_control(struct e1000_hw *); + +static int igb_attach(dev_info_t *, ddi_attach_cmd_t); +static int igb_detach(dev_info_t *, ddi_detach_cmd_t); +static int igb_resume(dev_info_t *); +static int igb_suspend(dev_info_t *); +static void igb_unconfigure(dev_info_t *, igb_t *); + +static struct cb_ops igb_cb_ops = { + nulldev, /* cb_open */ + nulldev, /* cb_close */ + nodev, /* cb_strategy */ + nodev, /* cb_print */ + nodev, /* cb_dump */ + nodev, /* cb_read */ + nodev, /* cb_write */ + nodev, /* cb_ioctl */ + nodev, /* cb_devmap */ + nodev, /* cb_mmap */ + nodev, /* cb_segmap */ + nochpoll, /* cb_chpoll */ + ddi_prop_op, /* cb_prop_op */ + NULL, /* cb_stream */ + D_MP | D_HOTPLUG, /* cb_flag */ + CB_REV, /* cb_rev */ + nodev, /* cb_aread */ + nodev /* cb_awrite */ +}; + +static struct dev_ops igb_dev_ops = { + DEVO_REV, /* devo_rev */ + 0, /* devo_refcnt */ + NULL, /* devo_getinfo */ + nulldev, /* devo_identify */ + nulldev, /* devo_probe */ + igb_attach, /* devo_attach */ + igb_detach, /* devo_detach */ + nodev, /* devo_reset */ + &igb_cb_ops, /* devo_cb_ops */ + NULL, /* devo_bus_ops */ + ddi_power /* devo_power */ +}; + +static struct modldrv igb_modldrv = { + &mod_driverops, /* Type of module. This one is a driver */ + ident, /* Discription string */ + &igb_dev_ops, /* driver ops */ +}; + +static struct modlinkage igb_modlinkage = { + MODREV_1, &igb_modldrv, NULL +}; + +/* Access attributes for register mapping */ +ddi_device_acc_attr_t igb_regs_acc_attr = { + DDI_DEVICE_ATTR_V0, + DDI_STRUCTURE_LE_ACC, + DDI_STRICTORDER_ACC, +}; + +#define IGB_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) + +static mac_callbacks_t igb_m_callbacks = { + IGB_M_CALLBACK_FLAGS, + igb_m_stat, + igb_m_start, + igb_m_stop, + igb_m_promisc, + igb_m_multicst, + igb_m_unicst, + igb_m_tx, + NULL, + igb_m_ioctl, + igb_m_getcapab +}; + + +/* + * Module Initialization Functions + */ + +int +_init(void) +{ + int status; + + mac_init_ops(&igb_dev_ops, MODULE_NAME); + + status = mod_install(&igb_modlinkage); + + if (status != DDI_SUCCESS) { + mac_fini_ops(&igb_dev_ops); + } + + return (status); +} + +int +_fini(void) +{ + int status; + + status = mod_remove(&igb_modlinkage); + + if (status == DDI_SUCCESS) { + mac_fini_ops(&igb_dev_ops); + } + + return (status); + +} + +int +_info(struct modinfo *modinfop) +{ + int status; + + status = mod_info(&igb_modlinkage, modinfop); + + return (status); +} + +/* + * igb_attach - driver attach + * + * This function is the device specific initialization entry + * point. This entry point is required and must be written. + * The DDI_ATTACH command must be provided in the attach entry + * point. When attach() is called with cmd set to DDI_ATTACH, + * all normal kernel services (such as kmem_alloc(9F)) are + * available for use by the driver. + * + * The attach() function will be called once for each instance + * of the device on the system with cmd set to DDI_ATTACH. + * Until attach() succeeds, the only driver entry points which + * may be called are open(9E) and getinfo(9E). + */ +static int +igb_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) +{ + igb_t *igb; + struct igb_osdep *osdep; + struct e1000_hw *hw; + int instance; + + /* + * Check the command and perform corresponding operations + */ + switch (cmd) { + default: + return (DDI_FAILURE); + + case DDI_RESUME: + return (igb_resume(devinfo)); + + case DDI_ATTACH: + break; + } + + /* Get the device instance */ + instance = ddi_get_instance(devinfo); + + /* Allocate memory for the instance data structure */ + igb = kmem_zalloc(sizeof (igb_t), KM_SLEEP); + + igb->dip = devinfo; + igb->instance = instance; + + hw = &igb->hw; + osdep = &igb->osdep; + hw->back = osdep; + osdep->igb = igb; + + /* Attach the instance pointer to the dev_info data structure */ + ddi_set_driver_private(devinfo, igb); + + /* + * Map PCI config space registers + */ + if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { + igb_error(igb, "Failed to map PCI configurations"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; + + /* + * Identify the chipset family + */ + if (igb_identify_hardware(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to identify hardware"); + goto attach_fail; + } + + /* + * Map device registers + */ + if (igb_regs_map(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to map device registers"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_REGS_MAP; + + /* + * Initialize driver parameters + */ + igb_init_properties(igb); + igb->attach_progress |= ATTACH_PROGRESS_PROPS; + + /* + * Allocate interrupts + */ + if (igb_alloc_intrs(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to allocate interrupts"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; + + /* + * Allocate rx/tx rings based on the ring numbers. + * The actual numbers of rx/tx rings are decided by the number of + * allocated interrupt vectors, so we should allocate the rings after + * interrupts are allocated. + */ + if (igb_alloc_rings(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to allocate rx and tx rings"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; + + /* + * Add interrupt handlers + */ + if (igb_add_intr_handlers(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to add interrupt handlers"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_ADD_INTR; + + /* + * Initialize driver parameters + */ + if (igb_init_driver_settings(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to initialize driver settings"); + goto attach_fail; + } + + /* + * Initialize mutexes for this device. + * Do this before enabling the interrupt handler and + * register the softint to avoid the condition where + * interrupt handler can try using uninitialized mutex + */ + igb_init_locks(igb); + igb->attach_progress |= ATTACH_PROGRESS_LOCKS; + + /* + * Initialize chipset hardware + */ + if (igb_init(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to initialize adapter"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_INIT; + + /* + * Initialize DMA and hardware settings for rx/tx rings + */ + if (igb_init_rings(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to initialize rings"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_INIT_RINGS; + + /* + * Initialize statistics + */ + if (igb_init_stats(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to initialize statistics"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_STATS; + + /* + * Initialize NDD parameters + */ + if (igb_nd_init(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to initialize ndd"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_NDD; + + /* + * Register the driver to the MAC + */ + if (igb_register_mac(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to register MAC"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_MAC; + + /* + * Now that mutex locks are initialized, and the chip is also + * initialized, enable interrupts. + */ + if (igb_enable_intrs(igb) != IGB_SUCCESS) { + igb_error(igb, "Failed to enable DDI interrupts"); + goto attach_fail; + } + igb->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; + + igb->igb_state |= IGB_INITIALIZED; + + return (DDI_SUCCESS); + +attach_fail: + igb_unconfigure(devinfo, igb); + return (DDI_FAILURE); +} + +/* + * igb_detach - driver detach + * + * The detach() function is the complement of the attach routine. + * If cmd is set to DDI_DETACH, detach() is used to remove the + * state associated with a given instance of a device node + * prior to the removal of that instance from the system. + * + * The detach() function will be called once for each instance + * of the device for which there has been a successful attach() + * once there are no longer any opens on the device. + * + * Interrupts routine are disabled, All memory allocated by this + * driver are freed. + */ +static int +igb_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) +{ + igb_t *igb; + + /* + * Check detach command + */ + switch (cmd) { + default: + return (DDI_FAILURE); + + case DDI_SUSPEND: + return (igb_suspend(devinfo)); + + case DDI_DETACH: + break; + } + + + /* + * Get the pointer to the driver private data structure + */ + igb = (igb_t *)ddi_get_driver_private(devinfo); + if (igb == NULL) + return (DDI_FAILURE); + + /* + * Unregister MAC. If failed, we have to fail the detach + */ + if (mac_unregister(igb->mac_hdl) != 0) { + igb_error(igb, "Failed to unregister MAC"); + return (DDI_FAILURE); + } + igb->attach_progress &= ~ATTACH_PROGRESS_MAC; + + /* + * If the device is still running, it needs to be stopped first. + * This check is necessary because under some specific circumstances, + * the detach routine can be called without stopping the interface + * first. + */ + mutex_enter(&igb->gen_lock); + if (igb->igb_state & IGB_STARTED) { + igb->igb_state &= ~IGB_STARTED; + igb_stop(igb); + mutex_exit(&igb->gen_lock); + /* Disable and stop the watchdog timer */ + igb_disable_watchdog_timer(igb); + } else + mutex_exit(&igb->gen_lock); + + /* + * Check if there are still rx buffers held by the upper layer. + * If so, fail the detach. + */ + if (!igb_rx_drain(igb)) + return (DDI_FAILURE); + + /* + * Do the remaining unconfigure routines + */ + igb_unconfigure(devinfo, igb); + + return (DDI_SUCCESS); +} + +static void +igb_unconfigure(dev_info_t *devinfo, igb_t *igb) +{ + /* + * Disable interrupt + */ + if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { + (void) igb_disable_intrs(igb); + } + + /* + * Unregister MAC + */ + if (igb->attach_progress & ATTACH_PROGRESS_MAC) { + (void) mac_unregister(igb->mac_hdl); + } + + /* + * Free ndd parameters + */ + if (igb->attach_progress & ATTACH_PROGRESS_NDD) { + igb_nd_cleanup(igb); + } + + /* + * Free statistics + */ + if (igb->attach_progress & ATTACH_PROGRESS_STATS) { + kstat_delete((kstat_t *)igb->igb_ks); + } + + /* + * Remove interrupt handlers + */ + if (igb->attach_progress & ATTACH_PROGRESS_ADD_INTR) { + igb_rem_intr_handlers(igb); + } + + /* + * Remove interrupts + */ + if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { + igb_rem_intrs(igb); + } + + /* + * Remove driver properties + */ + if (igb->attach_progress & ATTACH_PROGRESS_PROPS) { + (void) ddi_prop_remove_all(devinfo); + } + + /* + * Release the DMA resources of rx/tx rings + */ + if (igb->attach_progress & ATTACH_PROGRESS_INIT_RINGS) { + igb_fini_rings(igb); + } + + /* + * Stop the chipset + */ + if (igb->attach_progress & ATTACH_PROGRESS_INIT) { + mutex_enter(&igb->gen_lock); + igb_chip_stop(igb); + mutex_exit(&igb->gen_lock); + } + + /* + * Free register handle + */ + if (igb->attach_progress & ATTACH_PROGRESS_REGS_MAP) { + if (igb->osdep.reg_handle != NULL) + ddi_regs_map_free(&igb->osdep.reg_handle); + } + + /* + * Free PCI config handle + */ + if (igb->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { + if (igb->osdep.cfg_handle != NULL) + pci_config_teardown(&igb->osdep.cfg_handle); + } + + /* + * Free locks + */ + if (igb->attach_progress & ATTACH_PROGRESS_LOCKS) { + igb_destroy_locks(igb); + } + + /* + * Free the rx/tx rings + */ + if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { + igb_free_rings(igb); + } + + /* + * Free device specific structure + */ + e1000_remove_device(&igb->hw); + + /* + * Free the driver data structure + */ + kmem_free(igb, sizeof (igb_t)); + + ddi_set_driver_private(devinfo, NULL); +} + +/* + * igb_register_mac - Register the driver and its function pointers with + * the GLD interface + */ +static int +igb_register_mac(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + mac_register_t *mac; + int status; + + if ((mac = mac_alloc(MAC_VERSION)) == NULL) + return (IGB_FAILURE); + + mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; + mac->m_driver = igb; + mac->m_dip = igb->dip; + mac->m_src_addr = hw->mac.addr; + mac->m_callbacks = &igb_m_callbacks; + mac->m_min_sdu = 0; + mac->m_max_sdu = igb->max_frame_size - + sizeof (struct ether_vlan_header) - ETHERFCSL; + + status = mac_register(mac, &igb->mac_hdl); + + mac_free(mac); + + return ((status == 0) ? IGB_SUCCESS : IGB_FAILURE); +} + +/* + * igb_identify_hardware - Identify the type of the chipset + */ +static int +igb_identify_hardware(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + struct igb_osdep *osdep = &igb->osdep; + + /* + * Get the device id + */ + hw->vendor_id = + pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); + hw->device_id = + pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); + hw->revision_id = + pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); + hw->subsystem_device_id = + pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); + hw->subsystem_vendor_id = + pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); + + /* + * Set the mac type of the adapter based on the device id + */ + if (e1000_set_mac_type(hw) != E1000_SUCCESS) { + return (IGB_FAILURE); + } + + return (IGB_SUCCESS); +} + +/* + * igb_regs_map - Map the device registers + */ +static int +igb_regs_map(igb_t *igb) +{ + dev_info_t *devinfo = igb->dip; + struct e1000_hw *hw = &igb->hw; + struct igb_osdep *osdep = &igb->osdep; + off_t mem_size; + + /* + * First get the size of device registers to be mapped. + */ + if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) { + return (IGB_FAILURE); + } + + /* + * Call ddi_regs_map_setup() to map registers + */ + if ((ddi_regs_map_setup(devinfo, 1, + (caddr_t *)&hw->hw_addr, 0, + mem_size, &igb_regs_acc_attr, + &osdep->reg_handle)) != DDI_SUCCESS) { + return (IGB_FAILURE); + } + + return (IGB_SUCCESS); +} + +/* + * igb_init_properties - Initialize driver properties + */ +static void +igb_init_properties(igb_t *igb) +{ + /* + * Get conf file properties, including link settings + * jumbo frames, ring number, descriptor number, etc. + */ + igb_get_conf(igb); +} + +/* + * igb_init_driver_settings - Initialize driver settings + * + * The settings include hardware function pointers, bus information, + * rx/tx rings settings, link state, and any other parameters that + * need to be setup during driver initialization. + */ +static int +igb_init_driver_settings(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + igb_rx_ring_t *rx_ring; + igb_tx_ring_t *tx_ring; + uint32_t rx_size; + uint32_t tx_size; + int i; + + /* + * Initialize chipset specific hardware function pointers + */ + if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { + return (IGB_FAILURE); + } + + /* + * Get bus information + */ + if (e1000_get_bus_info(hw) != E1000_SUCCESS) { + return (IGB_FAILURE); + } + + /* + * Set rx buffer size + * The IP header alignment room is counted in the calculation. + * The rx buffer size is in unit of 1K that is required by the + * chipset hardware. + */ + rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM; + igb->rx_buf_size = ((rx_size >> 10) + + ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; + + /* + * Set tx buffer size + */ + tx_size = igb->max_frame_size; + igb->tx_buf_size = ((tx_size >> 10) + + ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; + + /* + * Initialize rx/tx rings parameters + */ + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + rx_ring->index = i; + rx_ring->igb = igb; + + rx_ring->ring_size = igb->rx_ring_size; + rx_ring->free_list_size = igb->rx_ring_size; + rx_ring->copy_thresh = igb->rx_copy_thresh; + rx_ring->limit_per_intr = igb->rx_limit_per_intr; + } + + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + tx_ring->index = i; + tx_ring->igb = igb; + if (igb->tx_head_wb_enable) + tx_ring->tx_recycle = igb_tx_recycle_head_wb; + else + tx_ring->tx_recycle = igb_tx_recycle_legacy; + + tx_ring->ring_size = igb->tx_ring_size; + tx_ring->free_list_size = igb->tx_ring_size + + (igb->tx_ring_size >> 1); + tx_ring->copy_thresh = igb->tx_copy_thresh; + tx_ring->recycle_thresh = igb->tx_recycle_thresh; + tx_ring->overload_thresh = igb->tx_overload_thresh; + tx_ring->resched_thresh = igb->tx_resched_thresh; + } + + /* + * Initialize values of interrupt throttling rate + */ + for (i = 1; i < MAX_NUM_EITR; i++) + igb->intr_throttling[i] = igb->intr_throttling[0]; + + /* + * The initial link state should be "unknown" + */ + igb->link_state = LINK_STATE_UNKNOWN; + + return (IGB_SUCCESS); +} + +/* + * igb_init_locks - Initialize locks + */ +static void +igb_init_locks(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + igb_tx_ring_t *tx_ring; + int i; + + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + mutex_init(&rx_ring->rx_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + mutex_init(&rx_ring->recycle_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + } + + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + mutex_init(&tx_ring->tx_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + mutex_init(&tx_ring->recycle_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + mutex_init(&tx_ring->tcb_head_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + mutex_init(&tx_ring->tcb_tail_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + } + + mutex_init(&igb->gen_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); + + mutex_init(&igb->watchdog_lock, NULL, + MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); +} + +/* + * igb_destroy_locks - Destroy locks + */ +static void +igb_destroy_locks(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + igb_tx_ring_t *tx_ring; + int i; + + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + mutex_destroy(&rx_ring->rx_lock); + mutex_destroy(&rx_ring->recycle_lock); + } + + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + mutex_destroy(&tx_ring->tx_lock); + mutex_destroy(&tx_ring->recycle_lock); + mutex_destroy(&tx_ring->tcb_head_lock); + mutex_destroy(&tx_ring->tcb_tail_lock); + } + + mutex_destroy(&igb->gen_lock); + mutex_destroy(&igb->watchdog_lock); +} + +static int +igb_resume(dev_info_t *devinfo) +{ + igb_t *igb; + + igb = (igb_t *)ddi_get_driver_private(devinfo); + if (igb == NULL) + return (DDI_FAILURE); + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_STARTED) { + if (igb_start(igb) != IGB_SUCCESS) { + mutex_exit(&igb->gen_lock); + return (DDI_FAILURE); + } + + /* + * Enable and start the watchdog timer + */ + igb_enable_watchdog_timer(igb); + } + + igb->igb_state &= ~IGB_SUSPENDED; + + mutex_exit(&igb->gen_lock); + + return (DDI_SUCCESS); +} + +static int +igb_suspend(dev_info_t *devinfo) +{ + igb_t *igb; + + igb = (igb_t *)ddi_get_driver_private(devinfo); + if (igb == NULL) + return (DDI_FAILURE); + + mutex_enter(&igb->gen_lock); + + igb->igb_state |= IGB_SUSPENDED; + + igb_stop(igb); + + mutex_exit(&igb->gen_lock); + + /* + * Disable and stop the watchdog timer + */ + igb_disable_watchdog_timer(igb); + + return (DDI_SUCCESS); +} + +/* + * igb_init - Initialize the device + */ +static int +igb_init(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint32_t pba; + uint32_t high_water; + + mutex_enter(&igb->gen_lock); + + /* + * Reset chipset to put the hardware in a known state + * before we try to do anything with the eeprom + */ + (void) e1000_reset_hw(hw); + + /* + * NVM validation + */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state. Call it again, + * if it fails a second time its a real issue. + */ + if (e1000_validate_nvm_checksum(hw) < 0) { + igb_error(igb, + "Invalid NVM checksum. Please contact " + "the vendor to update the NVM."); + goto init_fail; + } + } + + /* + * Set the FIFO size + */ + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + E1000_WRITE_REG(hw, E1000_PBA, pba); + + /* + * Setup flow control + * + * These parameters set thresholds for the adapter's generation(Tx) + * and response(Rx) to Ethernet PAUSE frames. These are just threshold + * settings. Flow control is enabled or disabled in the configuration + * file. + * High-water mark is set down from the top of the rx fifo (not + * sensitive to max_frame_size) and low-water is set just below + * high-water mark. + * The high water mark must be low enough to fit one full frame above + * it in the rx FIFO. Should be the lower of: + * 90% of the Rx FIFO size, or the full Rx FIFO size minus one full + * frame. + */ + high_water = min(((pba << 10) * 9 / 10), + ((pba << 10) - igb->max_frame_size)); + + hw->fc.high_water = high_water & 0xFFF8; + hw->fc.low_water = hw->fc.high_water - 8; + hw->fc.pause_time = E1000_FC_PAUSE_TIME; + hw->fc.send_xon = B_TRUE; + + /* + * Reset the chipset hardware the second time to validate + * the PBA setting. + */ + (void) e1000_reset_hw(hw); + + /* + * Don't wait for auto-negotiation to complete + */ + hw->phy.autoneg_wait_to_complete = B_FALSE; + + /* + * Copper options + */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = B_FALSE; + hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ + } + + /* + * Initialize link settings + */ + (void) igb_setup_link(igb, B_FALSE); + + /* + * Initialize the chipset hardware + */ + if (igb_chip_start(igb) != IGB_SUCCESS) { + goto init_fail; + } + + mutex_exit(&igb->gen_lock); + return (IGB_SUCCESS); + +init_fail: + /* + * Reset PHY if possible + */ + if (e1000_check_reset_block(hw) == E1000_SUCCESS) + (void) e1000_phy_hw_reset(hw); + + mutex_exit(&igb->gen_lock); + return (IGB_FAILURE); +} + +/* + * igb_init_rings - Allocate DMA resources for all rx/tx rings and + * initialize relevant hardware settings. + */ +static int +igb_init_rings(igb_t *igb) +{ + int i; + + /* + * Allocate buffers for all the rx/tx rings + */ + if (igb_alloc_dma(igb) != IGB_SUCCESS) + return (IGB_FAILURE); + + /* + * Setup the rx/tx rings + */ + mutex_enter(&igb->gen_lock); + + for (i = 0; i < igb->num_rx_rings; i++) + mutex_enter(&igb->rx_rings[i].rx_lock); + for (i = 0; i < igb->num_tx_rings; i++) + mutex_enter(&igb->tx_rings[i].tx_lock); + + igb_setup_rings(igb); + + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); + + mutex_exit(&igb->gen_lock); + + return (IGB_SUCCESS); +} + +/* + * igb_fini_rings - Release DMA resources of all rx/tx rings + */ +static void +igb_fini_rings(igb_t *igb) +{ + /* + * Release the DMA/memory resources of rx/tx rings + */ + igb_free_dma(igb); +} + +/* + * igb_chip_start - Initialize and start the chipset hardware + */ +static int +igb_chip_start(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + int i; + + ASSERT(mutex_owned(&igb->gen_lock)); + + /* + * Get the mac address + * This function should handle SPARC case correctly. + */ + if (!igb_find_mac_address(igb)) { + igb_error(igb, "Failed to get the mac address"); + return (IGB_FAILURE); + } + + /* Validate mac address */ + if (!is_valid_mac_addr(hw->mac.addr)) { + igb_error(igb, "Invalid mac address"); + return (IGB_FAILURE); + } + + /* Disable wakeup control by default */ + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* + * Configure/Initialize hardware + */ + if (e1000_init_hw(hw) != E1000_SUCCESS) { + igb_error(igb, "Failed to initialize hardware"); + return (IGB_FAILURE); + } + + /* + * Make sure driver has control + */ + igb_get_driver_control(hw); + + /* + * Setup MSI-X interrupts + */ + if (igb->intr_type == DDI_INTR_TYPE_MSIX) + igb_setup_adapter_msix(igb); + + /* + * Initialize unicast addresses. + */ + igb_init_unicst(igb); + + /* + * Setup and initialize the mctable structures. + */ + igb_setup_multicst(igb); + + /* + * Set interrupt throttling rate + */ + for (i = 0; i < igb->intr_cnt; i++) + E1000_WRITE_REG(hw, E1000_EITR(i), igb->intr_throttling[i]); + + /* Enable PCI-E master */ + if (hw->bus.type == e1000_bus_type_pci_express) { + e1000_enable_pciex_master(hw); + } + + /* + * Save the state of the phy + */ + igb_get_phy_state(igb); + + return (IGB_SUCCESS); +} + +/* + * igb_chip_stop - Stop the chipset hardware + */ +static void +igb_chip_stop(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + + ASSERT(mutex_owned(&igb->gen_lock)); + + /* Tell firmware driver is no longer in control */ + igb_release_driver_control(hw); + + /* + * Reset the chipset + */ + (void) e1000_reset_hw(hw); + + /* + * Reset PHY if possible + */ + if (e1000_check_reset_block(hw) == E1000_SUCCESS) + (void) e1000_phy_hw_reset(hw); +} + +/* + * igb_reset - Reset the chipset and restart the driver. + * + * It involves stopping and re-starting the chipset, + * and re-configuring the rx/tx rings. + */ +static int +igb_reset(igb_t *igb) +{ + int i; + + mutex_enter(&igb->gen_lock); + + ASSERT(igb->igb_state & IGB_STARTED); + + /* + * Disable the adapter interrupts to stop any rx/tx activities + * before draining pending data and resetting hardware. + */ + igb_disable_adapter_interrupts(igb); + + /* + * Drain the pending transmit packets + */ + (void) igb_tx_drain(igb); + + for (i = 0; i < igb->num_rx_rings; i++) + mutex_enter(&igb->rx_rings[i].rx_lock); + for (i = 0; i < igb->num_tx_rings; i++) + mutex_enter(&igb->tx_rings[i].tx_lock); + + /* + * Stop the chipset hardware + */ + igb_chip_stop(igb); + + /* + * Clean the pending tx data/resources + */ + igb_tx_clean(igb); + + /* + * Start the chipset hardware + */ + if (igb_chip_start(igb) != IGB_SUCCESS) { + goto reset_failure; + } + + /* + * Setup the rx/tx rings + */ + igb_setup_rings(igb); + + /* + * Enable adapter interrupts + * The interrupts must be enabled after the driver state is START + */ + igb_enable_adapter_interrupts(igb); + + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); + + mutex_exit(&igb->gen_lock); + + return (IGB_SUCCESS); + +reset_failure: + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); + + mutex_exit(&igb->gen_lock); + + return (IGB_FAILURE); +} + +/* + * igb_tx_clean - Clean the pending transmit packets and DMA resources + */ +static void +igb_tx_clean(igb_t *igb) +{ + igb_tx_ring_t *tx_ring; + tx_control_block_t *tcb; + link_list_t pending_list; + uint32_t desc_num; + int i, j; + + LINK_LIST_INIT(&pending_list); + + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + + mutex_enter(&tx_ring->recycle_lock); + + /* + * Clean the pending tx data - the pending packets in the + * work_list that have no chances to be transmitted again. + * + * We must ensure the chipset is stopped or the link is down + * before cleaning the transmit packets. + */ + desc_num = 0; + for (j = 0; j < tx_ring->ring_size; j++) { + tcb = tx_ring->work_list[j]; + if (tcb != NULL) { + desc_num += tcb->desc_num; + + tx_ring->work_list[j] = NULL; + + igb_free_tcb(tcb); + + LIST_PUSH_TAIL(&pending_list, &tcb->link); + } + } + + if (desc_num > 0) { + atomic_add_32(&tx_ring->tbd_free, desc_num); + ASSERT(tx_ring->tbd_free == tx_ring->ring_size); + + /* + * Reset the head and tail pointers of the tbd ring + */ + tx_ring->tbd_head = 0; + tx_ring->tbd_tail = 0; + + E1000_WRITE_REG(&igb->hw, E1000_TDH(tx_ring->index), 0); + E1000_WRITE_REG(&igb->hw, E1000_TDT(tx_ring->index), 0); + } + + mutex_exit(&tx_ring->recycle_lock); + + /* + * Add the tx control blocks in the pending list to + * the free list. + */ + igb_put_free_list(tx_ring, &pending_list); + } +} + +/* + * igb_tx_drain - Drain the tx rings to allow pending packets to be transmitted + */ +static boolean_t +igb_tx_drain(igb_t *igb) +{ + igb_tx_ring_t *tx_ring; + boolean_t done; + int i, j; + + /* + * Wait for a specific time to allow pending tx packets + * to be transmitted. + * + * Check the counter tbd_free to see if transmission is done. + * No lock protection is needed here. + * + * Return B_TRUE if all pending packets have been transmitted; + * Otherwise return B_FALSE; + */ + for (i = 0; i < TX_DRAIN_TIME; i++) { + + done = B_TRUE; + for (j = 0; j < igb->num_tx_rings; j++) { + tx_ring = &igb->tx_rings[j]; + done = done && + (tx_ring->tbd_free == tx_ring->ring_size); + } + + if (done) + break; + + msec_delay(1); + } + + return (done); +} + +/* + * igb_rx_drain - Wait for all rx buffers to be released by upper layer + */ +static boolean_t +igb_rx_drain(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + boolean_t done; + int i, j; + + /* + * Polling the rx free list to check if those rx buffers held by + * the upper layer are released. + * + * Check the counter rcb_free to see if all pending buffers are + * released. No lock protection is needed here. + * + * Return B_TRUE if all pending buffers have been released; + * Otherwise return B_FALSE; + */ + for (i = 0; i < RX_DRAIN_TIME; i++) { + + done = B_TRUE; + for (j = 0; j < igb->num_rx_rings; j++) { + rx_ring = &igb->rx_rings[j]; + done = done && + (rx_ring->rcb_free == rx_ring->free_list_size); + } + + if (done) + break; + + msec_delay(1); + } + + return (done); +} + +/* + * igb_start - Start the driver/chipset + */ +int +igb_start(igb_t *igb) +{ + int i; + + ASSERT(mutex_owned(&igb->gen_lock)); + + for (i = 0; i < igb->num_rx_rings; i++) + mutex_enter(&igb->rx_rings[i].rx_lock); + for (i = 0; i < igb->num_tx_rings; i++) + mutex_enter(&igb->tx_rings[i].tx_lock); + + /* + * Start the chipset hardware + */ + if (igb_chip_start(igb) != IGB_SUCCESS) { + goto start_failure; + } + + /* + * Setup the rx/tx rings + */ + igb_setup_rings(igb); + + /* + * Enable adapter interrupts + * The interrupts must be enabled after the driver state is START + */ + igb_enable_adapter_interrupts(igb); + + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); + + return (IGB_SUCCESS); + +start_failure: + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); + + return (IGB_FAILURE); +} + +/* + * igb_stop - Stop the driver/chipset + */ +void +igb_stop(igb_t *igb) +{ + int i; + + ASSERT(mutex_owned(&igb->gen_lock)); + + /* + * Disable the adapter interrupts + */ + igb_disable_adapter_interrupts(igb); + + /* + * Drain the pending tx packets + */ + (void) igb_tx_drain(igb); + + for (i = 0; i < igb->num_rx_rings; i++) + mutex_enter(&igb->rx_rings[i].rx_lock); + for (i = 0; i < igb->num_tx_rings; i++) + mutex_enter(&igb->tx_rings[i].tx_lock); + + /* + * Stop the chipset hardware + */ + igb_chip_stop(igb); + + /* + * Clean the pending tx data/resources + */ + igb_tx_clean(igb); + + for (i = igb->num_tx_rings - 1; i >= 0; i--) + mutex_exit(&igb->tx_rings[i].tx_lock); + for (i = igb->num_rx_rings - 1; i >= 0; i--) + mutex_exit(&igb->rx_rings[i].rx_lock); +} + +/* + * igb_alloc_rings - Allocate memory space for rx/tx rings + */ +static int +igb_alloc_rings(igb_t *igb) +{ + /* + * Allocate memory space for rx rings + */ + igb->rx_rings = kmem_zalloc( + sizeof (igb_rx_ring_t) * igb->num_rx_rings, + KM_NOSLEEP); + + if (igb->rx_rings == NULL) { + return (IGB_FAILURE); + } + + /* + * Allocate memory space for tx rings + */ + igb->tx_rings = kmem_zalloc( + sizeof (igb_tx_ring_t) * igb->num_tx_rings, + KM_NOSLEEP); + + if (igb->tx_rings == NULL) { + kmem_free(igb->rx_rings, + sizeof (igb_rx_ring_t) * igb->num_rx_rings); + igb->rx_rings = NULL; + return (IGB_FAILURE); + } + + return (IGB_SUCCESS); +} + +/* + * igb_free_rings - Free the memory space of rx/tx rings. + */ +static void +igb_free_rings(igb_t *igb) +{ + if (igb->rx_rings != NULL) { + kmem_free(igb->rx_rings, + sizeof (igb_rx_ring_t) * igb->num_rx_rings); + igb->rx_rings = NULL; + } + + if (igb->tx_rings != NULL) { + kmem_free(igb->tx_rings, + sizeof (igb_tx_ring_t) * igb->num_tx_rings); + igb->tx_rings = NULL; + } +} + +/* + * igb_setup_rings - Setup rx/tx rings + */ +static void +igb_setup_rings(igb_t *igb) +{ + /* + * Setup the rx/tx rings, including the following: + * + * 1. Setup the descriptor ring and the control block buffers; + * 2. Initialize necessary registers for receive/transmit; + * 3. Initialize software pointers/parameters for receive/transmit; + */ + igb_setup_rx(igb); + + igb_setup_tx(igb); +} + +static void +igb_setup_rx_ring(igb_rx_ring_t *rx_ring) +{ + igb_t *igb = rx_ring->igb; + struct e1000_hw *hw = &igb->hw; + rx_control_block_t *rcb; + union e1000_adv_rx_desc *rbd; + uint32_t size; + uint32_t buf_low; + uint32_t buf_high; + uint32_t reg_val; + int i; + + ASSERT(mutex_owned(&rx_ring->rx_lock)); + ASSERT(mutex_owned(&igb->gen_lock)); + + for (i = 0; i < igb->rx_ring_size; i++) { + rcb = rx_ring->work_list[i]; + rbd = &rx_ring->rbd_ring[i]; + + rbd->read.pkt_addr = rcb->rx_buf.dma_address; + rbd->read.hdr_addr = NULL; + } + + /* + * Initialize the length register + */ + size = rx_ring->ring_size * sizeof (union e1000_adv_rx_desc); + E1000_WRITE_REG(hw, E1000_RDLEN(rx_ring->index), size); + + /* + * Initialize the base address registers + */ + buf_low = (uint32_t)rx_ring->rbd_area.dma_address; + buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32); + E1000_WRITE_REG(hw, E1000_RDBAH(rx_ring->index), buf_high); + E1000_WRITE_REG(hw, E1000_RDBAL(rx_ring->index), buf_low); + + /* + * Setup head & tail pointers + */ + E1000_WRITE_REG(hw, E1000_RDT(rx_ring->index), rx_ring->ring_size - 1); + E1000_WRITE_REG(hw, E1000_RDH(rx_ring->index), 0); + + rx_ring->rbd_next = 0; + + /* + * Note: Considering the case that the chipset is being reset + * and there are still some buffers held by the upper layer, + * we should not reset the values of rcb_head, rcb_tail and + * rcb_free; + */ + if (igb->igb_state == IGB_UNKNOWN) { + rx_ring->rcb_head = 0; + rx_ring->rcb_tail = 0; + rx_ring->rcb_free = rx_ring->free_list_size; + } + + /* + * Setup the Receive Descriptor Control Register (RXDCTL) + */ + reg_val = E1000_READ_REG(hw, E1000_RXDCTL(rx_ring->index)); + reg_val |= E1000_RXDCTL_QUEUE_ENABLE; + reg_val &= 0xFFF00000; + reg_val |= 16; /* pthresh */ + reg_val |= 8 << 8; /* hthresh */ + reg_val |= 1 << 16; /* wthresh */ + E1000_WRITE_REG(hw, E1000_RXDCTL(rx_ring->index), reg_val); + + /* + * Setup the Split and Replication Receive Control Register. + * Set the rx buffer size and the advanced descriptor type. + */ + reg_val = (igb->rx_buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) | + E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + E1000_WRITE_REG(hw, E1000_SRRCTL(rx_ring->index), reg_val); +} + +static void +igb_setup_rx(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + struct e1000_hw *hw = &igb->hw; + uint32_t reg_val; + int i; + + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + igb_setup_rx_ring(rx_ring); + } + + /* + * Setup the Receive Control Register (RCTL), and ENABLE the + * receiver. The initial configuration is to: Enable the receiver, + * accept broadcasts, discard bad packets (and long packets), + * disable VLAN filter checking, set the receive descriptor + * minimum threshold size to 1/2, and the receive buffer size to + * 2k. + */ + reg_val = E1000_RCTL_EN | /* Enable Receive Unit */ + E1000_RCTL_BAM | /* Accept Broadcast Packets */ + E1000_RCTL_LPE | /* Large Packet Enable bit */ + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT) | + E1000_RCTL_RDMTS_HALF | + E1000_RCTL_SECRC | /* Strip Ethernet CRC */ + E1000_RCTL_LBM_NO; /* Loopback Mode = none */ + + E1000_WRITE_REG(hw, E1000_RCTL, reg_val); + + /* + * Setup the Rx Long Packet Max Length register + */ + E1000_WRITE_REG(hw, E1000_RLPML, igb->max_frame_size); + + /* + * Hardware checksum settings + */ + if (igb->rx_hcksum_enable) { + reg_val = + E1000_RXCSUM_TUOFL | /* TCP/UDP checksum */ + E1000_RXCSUM_IPOFL; /* IP checksum */ + + E1000_WRITE_REG(hw, E1000_RXCSUM, reg_val); + } + + /* + * Setup RSS for multiple receive queues + */ + if (igb->num_rx_rings > 1) + igb_setup_rss(igb); +} + +static void +igb_setup_tx_ring(igb_tx_ring_t *tx_ring) +{ + igb_t *igb = tx_ring->igb; + struct e1000_hw *hw = &igb->hw; + uint32_t size; + uint32_t buf_low; + uint32_t buf_high; + uint32_t reg_val; + + ASSERT(mutex_owned(&tx_ring->tx_lock)); + ASSERT(mutex_owned(&igb->gen_lock)); + + /* + * Initialize the length register + */ + size = tx_ring->ring_size * sizeof (union e1000_adv_tx_desc); + E1000_WRITE_REG(hw, E1000_TDLEN(tx_ring->index), size); + + /* + * Initialize the base address registers + */ + buf_low = (uint32_t)tx_ring->tbd_area.dma_address; + buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); + E1000_WRITE_REG(hw, E1000_TDBAL(tx_ring->index), buf_low); + E1000_WRITE_REG(hw, E1000_TDBAH(tx_ring->index), buf_high); + + /* + * Setup head & tail pointers + */ + E1000_WRITE_REG(hw, E1000_TDH(tx_ring->index), 0); + E1000_WRITE_REG(hw, E1000_TDT(tx_ring->index), 0); + + /* + * Setup head write-back + */ + if (igb->tx_head_wb_enable) { + /* + * The memory of the head write-back is allocated using + * the extra tbd beyond the tail of the tbd ring. + */ + tx_ring->tbd_head_wb = (uint32_t *) + ((uintptr_t)tx_ring->tbd_area.address + size); + + buf_low = (uint32_t) + (tx_ring->tbd_area.dma_address + size); + buf_high = (uint32_t) + ((tx_ring->tbd_area.dma_address + size) >> 32); + + /* Set the head write-back enable bit */ + buf_low |= E1000_TX_HEAD_WB_ENABLE; + + E1000_WRITE_REG(hw, E1000_TDWBAL(tx_ring->index), buf_low); + E1000_WRITE_REG(hw, E1000_TDWBAH(tx_ring->index), buf_high); + + /* + * Turn off relaxed ordering for head write back or it will + * cause problems with the tx recycling + */ + reg_val = E1000_READ_REG(hw, + E1000_DCA_TXCTRL(tx_ring->index)); + reg_val &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; + E1000_WRITE_REG(hw, + E1000_DCA_TXCTRL(tx_ring->index), reg_val); + } else { + tx_ring->tbd_head_wb = NULL; + } + + tx_ring->tbd_head = 0; + tx_ring->tbd_tail = 0; + tx_ring->tbd_free = tx_ring->ring_size; + + /* + * Note: Considering the case that the chipset is being reset, + * and there are still some buffers held by the upper layer, + * we should not reset the values of tcb_head, tcb_tail. + */ + if (igb->igb_state == IGB_UNKNOWN) { + tx_ring->tcb_head = 0; + tx_ring->tcb_tail = 0; + tx_ring->tcb_free = tx_ring->free_list_size; + } else { + ASSERT(tx_ring->tcb_free == tx_ring->free_list_size); + } + + /* + * Initialize hardware checksum offload settings + */ + tx_ring->hcksum_context.hcksum_flags = 0; + tx_ring->hcksum_context.ip_hdr_len = 0; + tx_ring->hcksum_context.mac_hdr_len = 0; + tx_ring->hcksum_context.l4_proto = 0; +} + +static void +igb_setup_tx(igb_t *igb) +{ + igb_tx_ring_t *tx_ring; + struct e1000_hw *hw = &igb->hw; + uint32_t reg_val; + int i; + + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + igb_setup_tx_ring(tx_ring); + } + + /* + * Setup the Transmit Control Register (TCTL) + */ + reg_val = E1000_TCTL_PSP | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT) | + (E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT) | + E1000_TCTL_RTLC; + + /* Enable the MULR bit */ + if (hw->bus.type == e1000_bus_type_pci_express) + reg_val |= E1000_TCTL_MULR; + + E1000_WRITE_REG(hw, E1000_TCTL, reg_val); + + /* + * Set the default values for the Tx Inter Packet Gap timer + */ + if (hw->phy.media_type == e1000_media_type_fiber) + reg_val = DEFAULT_82543_TIPG_IPGT_FIBER; + else + reg_val = DEFAULT_82543_TIPG_IPGT_COPPER; + reg_val |= + DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; + reg_val |= + DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; + + E1000_WRITE_REG(hw, E1000_TIPG, reg_val); +} + +/* + * igb_setup_rss - Setup receive-side scaling feature + */ +static void +igb_setup_rss(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint32_t i, mrqc, rxcsum; + int shift; + uint32_t random; + union e1000_reta { + uint32_t dword; + uint8_t bytes[4]; + } reta; + + /* Setup the Redirection Table */ + shift = 6; + for (i = 0; i < (32 * 4); i++) { + reta.bytes[i & 3] = (i % igb->num_rx_rings) << shift; + if ((i & 3) == 3) { + E1000_WRITE_REG(hw, + (E1000_RETA(0) + (i & ~3)), reta.dword); + } + } + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) { + (void) random_get_pseudo_bytes((uint8_t *)&random, + sizeof (uint32_t)); + E1000_WRITE_REG(hw, E1000_RSSRK(i), random); + } + + /* Setup the Multiple Receive Queue Control register */ + mrqc = E1000_MRQC_ENABLE_RSS_4Q; + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* + * Disable Packet Checksum to enable RSS for multiple receive queues. + * + * The Packet Checksum is not ethernet CRC. It is another kind of + * checksum offloading provided by the 82575 chipset besides the IP + * header checksum offloading and the TCP/UDP checksum offloading. + * The Packet Checksum is by default computed over the entire packet + * from the first byte of the DA through the last byte of the CRC, + * including the Ethernet and IP headers. + * + * It is a hardware limitation that Packet Checksum is mutually + * exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); +} + +/* + * igb_init_unicst - Initialize the unicast addresses + */ +static void +igb_init_unicst(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + int slot; + + /* + * Here we should consider two situations: + * + * 1. Chipset is initialized the first time + * Initialize the multiple unicast addresses, and + * save the default mac address. + * + * 2. Chipset is reset + * Recover the multiple unicast addresses from the + * software data structure to the RAR registers. + */ + if (!igb->unicst_init) { + /* Initialize the multiple unicast addresses */ + igb->unicst_total = MAX_NUM_UNICAST_ADDRESSES; + + igb->unicst_avail = igb->unicst_total - 1; + + /* Store the default mac address */ + e1000_rar_set(hw, hw->mac.addr, 0); + + bcopy(hw->mac.addr, igb->unicst_addr[0].mac.addr, + ETHERADDRL); + igb->unicst_addr[0].mac.set = 1; + + for (slot = 1; slot < igb->unicst_total; slot++) + igb->unicst_addr[slot].mac.set = 0; + + igb->unicst_init = B_TRUE; + } else { + /* Recover the default mac address */ + bcopy(igb->unicst_addr[0].mac.addr, hw->mac.addr, + ETHERADDRL); + + /* Store the default mac address */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* Re-configure the RAR registers */ + for (slot = 1; slot < igb->unicst_total; slot++) + e1000_rar_set(hw, + igb->unicst_addr[slot].mac.addr, slot); + } +} + +/* + * igb_unicst_set - Set the unicast address to the specified slot + */ +int +igb_unicst_set(igb_t *igb, const uint8_t *mac_addr, + mac_addr_slot_t slot) +{ + struct e1000_hw *hw = &igb->hw; + + ASSERT(mutex_owned(&igb->gen_lock)); + + /* + * Save the unicast address in the software data structure + */ + bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL); + + /* + * Set the unicast address to the RAR register + */ + e1000_rar_set(hw, (uint8_t *)mac_addr, slot); + + return (0); +} + +/* + * igb_multicst_add - Add a multicst address + */ +int +igb_multicst_add(igb_t *igb, const uint8_t *multiaddr) +{ + ASSERT(mutex_owned(&igb->gen_lock)); + + if ((multiaddr[0] & 01) == 0) { + return (EINVAL); + } + + if (igb->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { + return (ENOENT); + } + + bcopy(multiaddr, + &igb->mcast_table[igb->mcast_count], ETHERADDRL); + igb->mcast_count++; + + /* + * Update the multicast table in the hardware + */ + igb_setup_multicst(igb); + + return (0); +} + +/* + * igb_multicst_remove - Remove a multicst address + */ +int +igb_multicst_remove(igb_t *igb, const uint8_t *multiaddr) +{ + int i; + + ASSERT(mutex_owned(&igb->gen_lock)); + + for (i = 0; i < igb->mcast_count; i++) { + if (bcmp(multiaddr, &igb->mcast_table[i], + ETHERADDRL) == 0) { + for (i++; i < igb->mcast_count; i++) { + igb->mcast_table[i - 1] = + igb->mcast_table[i]; + } + igb->mcast_count--; + break; + } + } + + /* + * Update the multicast table in the hardware + */ + igb_setup_multicst(igb); + + return (0); +} + +/* + * igb_setup_multicast - setup multicast data structures + * + * This routine initializes all of the multicast related structures + * and save them in the hardware registers. + */ +static void +igb_setup_multicst(igb_t *igb) +{ + uint8_t *mc_addr_list; + uint32_t mc_addr_count; + struct e1000_hw *hw = &igb->hw; + + ASSERT(mutex_owned(&igb->gen_lock)); + + ASSERT(igb->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); + + mc_addr_list = (uint8_t *)igb->mcast_table; + mc_addr_count = igb->mcast_count; + + /* + * Update the multicase addresses to the MTA registers + */ + e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, + igb->unicst_total, hw->mac.rar_entry_count); +} + +/* + * igb_get_conf - Get driver configurations set in driver.conf + * + * This routine gets user-configured values out of the configuration + * file igb.conf. + * + * For each configurable value, there is a minimum, a maximum, and a + * default. + * If user does not configure a value, use the default. + * If user configures below the minimum, use the minumum. + * If user configures above the maximum, use the maxumum. + */ +static void +igb_get_conf(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint32_t default_mtu; + uint32_t flow_control; + + /* + * igb driver supports the following user configurations: + * + * Link configurations: + * adv_autoneg_cap + * adv_1000fdx_cap + * adv_100fdx_cap + * adv_100hdx_cap + * adv_10fdx_cap + * adv_10hdx_cap + * Note: 1000hdx is not supported. + * + * Jumbo frame configuration: + * default_mtu + * + * Ethernet flow control configuration: + * flow_control + * + * Multiple rings configurations: + * tx_queue_number + * tx_ring_size + * rx_queue_number + * rx_ring_size + * + * Call igb_get_prop() to get the value for a specific + * configuration parameter. + */ + + /* + * Link configurations + */ + igb->param_adv_autoneg_cap = igb_get_prop(igb, + PROP_ADV_AUTONEG_CAP, 0, 1, 1); + igb->param_adv_1000fdx_cap = igb_get_prop(igb, + PROP_ADV_1000FDX_CAP, 0, 1, 1); + igb->param_adv_100fdx_cap = igb_get_prop(igb, + PROP_ADV_100FDX_CAP, 0, 1, 1); + igb->param_adv_100hdx_cap = igb_get_prop(igb, + PROP_ADV_100HDX_CAP, 0, 1, 1); + igb->param_adv_10fdx_cap = igb_get_prop(igb, + PROP_ADV_10FDX_CAP, 0, 1, 1); + igb->param_adv_10hdx_cap = igb_get_prop(igb, + PROP_ADV_10HDX_CAP, 0, 1, 1); + + /* + * Jumbo frame configurations + */ + default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU, + MIN_MTU, MAX_MTU, DEFAULT_MTU); + + igb->max_frame_size = default_mtu + + sizeof (struct ether_vlan_header) + ETHERFCSL; + + /* + * Ethernet flow control configuration + */ + flow_control = igb_get_prop(igb, PROP_FLOW_CONTROL, + e1000_fc_none, 4, e1000_fc_full); + if (flow_control == 4) + flow_control = e1000_fc_default; + + hw->fc.type = flow_control; + + /* + * Multiple rings configurations + */ + igb->num_tx_rings = igb_get_prop(igb, PROP_TX_QUEUE_NUM, + MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM); + igb->tx_ring_size = igb_get_prop(igb, PROP_TX_RING_SIZE, + MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); + + igb->num_rx_rings = igb_get_prop(igb, PROP_RX_QUEUE_NUM, + MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM); + igb->rx_ring_size = igb_get_prop(igb, PROP_RX_RING_SIZE, + MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); + + /* + * Tunable used to force an interrupt type. The only use is + * for testing of the lesser interrupt types. + * 0 = don't force interrupt type + * 1 = force interrupt type MSIX + * 2 = force interrupt type MSI + * 3 = force interrupt type Legacy + */ + igb->intr_force = igb_get_prop(igb, PROP_INTR_FORCE, + IGB_INTR_NONE, IGB_INTR_LEGACY, IGB_INTR_MSI); + + igb->tx_hcksum_enable = igb_get_prop(igb, PROP_TX_HCKSUM_ENABLE, + 0, 1, 1); + igb->rx_hcksum_enable = igb_get_prop(igb, PROP_RX_HCKSUM_ENABLE, + 0, 1, 1); + igb->lso_enable = igb_get_prop(igb, PROP_LSO_ENABLE, + 0, 1, 0); + igb->tx_head_wb_enable = igb_get_prop(igb, PROP_TX_HEAD_WB_ENABLE, + 0, 1, 1); + + igb->tx_copy_thresh = igb_get_prop(igb, PROP_TX_COPY_THRESHOLD, + MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, + DEFAULT_TX_COPY_THRESHOLD); + igb->tx_recycle_thresh = igb_get_prop(igb, PROP_TX_RECYCLE_THRESHOLD, + MIN_TX_RECYCLE_THRESHOLD, MAX_TX_RECYCLE_THRESHOLD, + DEFAULT_TX_RECYCLE_THRESHOLD); + igb->tx_overload_thresh = igb_get_prop(igb, PROP_TX_OVERLOAD_THRESHOLD, + MIN_TX_OVERLOAD_THRESHOLD, MAX_TX_OVERLOAD_THRESHOLD, + DEFAULT_TX_OVERLOAD_THRESHOLD); + igb->tx_resched_thresh = igb_get_prop(igb, PROP_TX_RESCHED_THRESHOLD, + MIN_TX_RESCHED_THRESHOLD, MAX_TX_RESCHED_THRESHOLD, + DEFAULT_TX_RESCHED_THRESHOLD); + + igb->rx_copy_thresh = igb_get_prop(igb, PROP_RX_COPY_THRESHOLD, + MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, + DEFAULT_RX_COPY_THRESHOLD); + igb->rx_limit_per_intr = igb_get_prop(igb, PROP_RX_LIMIT_PER_INTR, + MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, + DEFAULT_RX_LIMIT_PER_INTR); + + igb->intr_throttling[0] = igb_get_prop(igb, PROP_INTR_THROTTLING, + MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, + DEFAULT_INTR_THROTTLING); +} + +/* + * igb_get_prop - Get a property value out of the configuration file igb.conf + * + * Caller provides the name of the property, a default value, a minimum + * value, and a maximum value. + * + * Return configured value of the property, with default, minimum and + * maximum properly applied. + */ +static int +igb_get_prop(igb_t *igb, + char *propname, /* name of the property */ + int minval, /* minimum acceptable value */ + int maxval, /* maximim acceptable value */ + int defval) /* default value */ +{ + int value; + + /* + * Call ddi_prop_get_int() to read the conf settings + */ + value = ddi_prop_get_int(DDI_DEV_T_ANY, igb->dip, + DDI_PROP_DONTPASS, propname, defval); + + if (value > maxval) + value = maxval; + + if (value < minval) + value = minval; + + return (value); +} + +/* + * igb_setup_link - Using the link properties to setup the link + */ +int +igb_setup_link(igb_t *igb, boolean_t setup_hw) +{ + struct e1000_mac_info *mac; + struct e1000_phy_info *phy; + boolean_t invalid; + + mac = &igb->hw.mac; + phy = &igb->hw.phy; + invalid = B_FALSE; + + if (igb->param_adv_autoneg_cap == 1) { + mac->autoneg = B_TRUE; + phy->autoneg_advertised = 0; + + /* + * 1000hdx is not supported for autonegotiation + */ + if (igb->param_adv_1000fdx_cap == 1) + phy->autoneg_advertised |= ADVERTISE_1000_FULL; + + if (igb->param_adv_100fdx_cap == 1) + phy->autoneg_advertised |= ADVERTISE_100_FULL; + + if (igb->param_adv_100hdx_cap == 1) + phy->autoneg_advertised |= ADVERTISE_100_HALF; + + if (igb->param_adv_10fdx_cap == 1) + phy->autoneg_advertised |= ADVERTISE_10_FULL; + + if (igb->param_adv_10hdx_cap == 1) + phy->autoneg_advertised |= ADVERTISE_10_HALF; + + if (phy->autoneg_advertised == 0) + invalid = B_TRUE; + } else { + mac->autoneg = B_FALSE; + + /* + * 1000fdx and 1000hdx are not supported for forced link + */ + if (igb->param_adv_100fdx_cap == 1) + mac->forced_speed_duplex = ADVERTISE_100_FULL; + else if (igb->param_adv_100hdx_cap == 1) + mac->forced_speed_duplex = ADVERTISE_100_HALF; + else if (igb->param_adv_10fdx_cap == 1) + mac->forced_speed_duplex = ADVERTISE_10_FULL; + else if (igb->param_adv_10hdx_cap == 1) + mac->forced_speed_duplex = ADVERTISE_10_HALF; + else + invalid = B_TRUE; + } + + if (invalid) { + igb_notice(igb, "Invalid link settings. Setup link to " + "autonegotiation with full link capabilities."); + mac->autoneg = B_TRUE; + phy->autoneg_advertised = ADVERTISE_1000_FULL | + ADVERTISE_100_FULL | ADVERTISE_100_HALF | + ADVERTISE_10_FULL | ADVERTISE_10_HALF; + } + + if (setup_hw) { + if (e1000_setup_link(&igb->hw) != E1000_SUCCESS) + return (IGB_FAILURE); + } + + return (IGB_SUCCESS); +} + + +/* + * igb_is_link_up - Check if the link is up + */ +static boolean_t +igb_is_link_up(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + boolean_t link_up; + + ASSERT(mutex_owned(&igb->gen_lock)); + + (void) e1000_check_for_link(hw); + + if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) || + ((hw->phy.media_type == e1000_media_type_internal_serdes) && + (hw->mac.serdes_has_link))) { + link_up = B_TRUE; + } else { + link_up = B_FALSE; + } + + return (link_up); +} + +/* + * igb_link_check - Link status processing + */ +static boolean_t +igb_link_check(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint16_t speed = 0, duplex = 0; + boolean_t link_changed = B_FALSE; + + ASSERT(mutex_owned(&igb->gen_lock)); + + if (igb_is_link_up(igb)) { + /* + * The Link is up, check whether it was marked as down earlier + */ + if (igb->link_state != LINK_STATE_UP) { + (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); + igb->link_speed = speed; + igb->link_duplex = duplex; + igb->link_state = LINK_STATE_UP; + igb->link_down_timeout = 0; + link_changed = B_TRUE; + } + } else { + if (igb->link_state != LINK_STATE_DOWN) { + igb->link_speed = 0; + igb->link_duplex = 0; + igb->link_state = LINK_STATE_DOWN; + link_changed = B_TRUE; + } + + if (igb->igb_state & IGB_STARTED) { + if (igb->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) { + igb->link_down_timeout++; + } else if (igb->link_down_timeout == + MAX_LINK_DOWN_TIMEOUT) { + igb_tx_clean(igb); + igb->link_down_timeout++; + } + } + } + + return (link_changed); +} + +/* + * igb_local_timer - driver watchdog function + * + * This function will handle the transmit stall check, link status check and + * other routines. + */ +static void +igb_local_timer(void *arg) +{ + igb_t *igb = (igb_t *)arg; + struct e1000_hw *hw = &igb->hw; + boolean_t link_changed; + + if (igb_stall_check(igb)) { + igb->reset_count++; + (void) igb_reset(igb); + } + + mutex_enter(&igb->gen_lock); + link_changed = igb_link_check(igb); + mutex_exit(&igb->gen_lock); + + if (link_changed) + mac_link_update(igb->mac_hdl, igb->link_state); + + /* + * Set Timer Interrupts + */ + if (igb->intr_type != DDI_INTR_TYPE_MSIX) + E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); + + igb_restart_watchdog_timer(igb); +} + +/* + * igb_stall_check - check for transmit stall + * + * This function checks if the adapter is stalled (in transmit). + * + * It is called each time the watchdog timeout is invoked. + * If the transmit descriptor reclaim continuously fails, + * the watchdog value will increment by 1. If the watchdog + * value exceeds the threshold, the igb is assumed to + * have stalled and need to be reset. + */ +static boolean_t +igb_stall_check(igb_t *igb) +{ + igb_tx_ring_t *tx_ring; + boolean_t result; + int i; + + if (igb->link_state != LINK_STATE_UP) + return (B_FALSE); + + /* + * If any tx ring is stalled, we'll reset the chipset + */ + result = B_FALSE; + for (i = 0; i < igb->num_tx_rings; i++) { + tx_ring = &igb->tx_rings[i]; + + if (tx_ring->recycle_fail > 0) + tx_ring->stall_watchdog++; + else + tx_ring->stall_watchdog = 0; + + if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { + result = B_TRUE; + break; + } + } + + if (result) { + tx_ring->stall_watchdog = 0; + tx_ring->recycle_fail = 0; + } + + return (result); +} + + +/* + * is_valid_mac_addr - Check if the mac address is valid + */ +static boolean_t +is_valid_mac_addr(uint8_t *mac_addr) +{ + const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; + const uint8_t addr_test2[6] = + { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + + if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || + !(bcmp(addr_test2, mac_addr, ETHERADDRL))) + return (B_FALSE); + + return (B_TRUE); +} + +static boolean_t +igb_find_mac_address(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; +#ifdef __sparc + uchar_t *bytes; + struct ether_addr sysaddr; + uint_t nelts; + int err; + boolean_t found = B_FALSE; + + /* + * The "vendor's factory-set address" may already have + * been extracted from the chip, but if the property + * "local-mac-address" is set we use that instead. + * + * We check whether it looks like an array of 6 + * bytes (which it should, if OBP set it). If we can't + * make sense of it this way, we'll ignore it. + */ + err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, + DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); + if (err == DDI_PROP_SUCCESS) { + if (nelts == ETHERADDRL) { + while (nelts--) + hw->mac.addr[nelts] = bytes[nelts]; + found = B_TRUE; + } + ddi_prop_free(bytes); + } + + /* + * Look up the OBP property "local-mac-address?". If the user has set + * 'local-mac-address? = false', use "the system address" instead. + */ + if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 0, + "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { + if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { + if (localetheraddr(NULL, &sysaddr) != 0) { + bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); + found = B_TRUE; + } + } + ddi_prop_free(bytes); + } + + /* + * Finally(!), if there's a valid "mac-address" property (created + * if we netbooted from this interface), we must use this instead + * of any of the above to ensure that the NFS/install server doesn't + * get confused by the address changing as Solaris takes over! + */ + err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, + DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); + if (err == DDI_PROP_SUCCESS) { + if (nelts == ETHERADDRL) { + while (nelts--) + hw->mac.addr[nelts] = bytes[nelts]; + found = B_TRUE; + } + ddi_prop_free(bytes); + } + + if (found) { + bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); + return (B_TRUE); + } +#endif + + /* + * Read the device MAC address from the EEPROM + */ + if (e1000_read_mac_addr(hw) != E1000_SUCCESS) + return (B_FALSE); + + return (B_TRUE); +} + +#pragma inline(igb_arm_watchdog_timer) + +static void +igb_arm_watchdog_timer(igb_t *igb) +{ + /* + * Fire a watchdog timer + */ + igb->watchdog_tid = + timeout(igb_local_timer, + (void *)igb, 1 * drv_usectohz(1000000)); + +} + +/* + * igb_enable_watchdog_timer - Enable and start the driver watchdog timer + */ +void +igb_enable_watchdog_timer(igb_t *igb) +{ + mutex_enter(&igb->watchdog_lock); + + if (!igb->watchdog_enable) { + igb->watchdog_enable = B_TRUE; + igb->watchdog_start = B_TRUE; + igb_arm_watchdog_timer(igb); + } + + mutex_exit(&igb->watchdog_lock); + +} + +/* + * igb_disable_watchdog_timer - Disable and stop the driver watchdog timer + */ +void +igb_disable_watchdog_timer(igb_t *igb) +{ + timeout_id_t tid; + + mutex_enter(&igb->watchdog_lock); + + igb->watchdog_enable = B_FALSE; + igb->watchdog_start = B_FALSE; + tid = igb->watchdog_tid; + igb->watchdog_tid = 0; + + mutex_exit(&igb->watchdog_lock); + + if (tid != 0) + (void) untimeout(tid); + +} + +/* + * igb_start_watchdog_timer - Start the driver watchdog timer + */ +static void +igb_start_watchdog_timer(igb_t *igb) +{ + mutex_enter(&igb->watchdog_lock); + + if (igb->watchdog_enable) { + if (!igb->watchdog_start) { + igb->watchdog_start = B_TRUE; + igb_arm_watchdog_timer(igb); + } + } + + mutex_exit(&igb->watchdog_lock); +} + +/* + * igb_restart_watchdog_timer - Restart the driver watchdog timer + */ +static void +igb_restart_watchdog_timer(igb_t *igb) +{ + mutex_enter(&igb->watchdog_lock); + + if (igb->watchdog_start) + igb_arm_watchdog_timer(igb); + + mutex_exit(&igb->watchdog_lock); +} + +/* + * igb_stop_watchdog_timer - Stop the driver watchdog timer + */ +static void +igb_stop_watchdog_timer(igb_t *igb) +{ + timeout_id_t tid; + + mutex_enter(&igb->watchdog_lock); + + igb->watchdog_start = B_FALSE; + tid = igb->watchdog_tid; + igb->watchdog_tid = 0; + + mutex_exit(&igb->watchdog_lock); + + if (tid != 0) + (void) untimeout(tid); +} + +/* + * igb_disable_adapter_interrupts - Clear/disable all hardware interrupts + */ +static void +igb_disable_adapter_interrupts(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + + /* + * Set the IMC register to mask all the interrupts, + * including the tx interrupts. + */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + /* + * Additional disabling for MSI-X + */ + if (igb->intr_type == DDI_INTR_TYPE_MSIX) { + E1000_WRITE_REG(hw, E1000_EIMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_EIAC, 0x0); + } + + E1000_WRITE_FLUSH(hw); +} + +/* + * igb_enable_adapter_interrupts - Mask/enable all hardware interrupts + */ +static void +igb_enable_adapter_interrupts(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint32_t reg; + + if (igb->intr_type == DDI_INTR_TYPE_MSIX) { + /* Interrupt enabling for MSI-X */ + E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); + E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC); + + /* Enable MSI-X PBA support */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_PBA_CLR; + + /* Non-selective interrupt clear-on-read */ + reg |= E1000_CTRL_EXT_IRCA; /* Called NSICR in the EAS */ + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + } else { + /* Interrupt enabling for MSI and legacy */ + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); + } + + E1000_WRITE_FLUSH(hw); +} + +/* + * Loopback Support + */ +static lb_property_t lb_normal = + { normal, "normal", IGB_LB_NONE }; +static lb_property_t lb_external = + { external, "External", IGB_LB_EXTERNAL }; +static lb_property_t lb_mac = + { internal, "MAC", IGB_LB_INTERNAL_MAC }; +static lb_property_t lb_phy = + { internal, "PHY", IGB_LB_INTERNAL_PHY }; +static lb_property_t lb_serdes = + { internal, "SerDes", IGB_LB_INTERNAL_SERDES }; + +enum ioc_reply +igb_loopback_ioctl(igb_t *igb, struct iocblk *iocp, mblk_t *mp) +{ + lb_info_sz_t *lbsp; + lb_property_t *lbpp; + struct e1000_hw *hw; + uint32_t *lbmp; + uint32_t size; + uint32_t value; + + hw = &igb->hw; + + if (mp->b_cont == NULL) + return (IOC_INVAL); + + switch (iocp->ioc_cmd) { + default: + return (IOC_INVAL); + + case LB_GET_INFO_SIZE: + size = sizeof (lb_info_sz_t); + if (iocp->ioc_count != size) + return (IOC_INVAL); + + value = sizeof (lb_normal); + value += sizeof (lb_mac); + if (hw->phy.media_type == e1000_media_type_copper) + value += sizeof (lb_phy); + else + value += sizeof (lb_serdes); + value += sizeof (lb_external); + + lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; + *lbsp = value; + break; + + case LB_GET_INFO: + value = sizeof (lb_normal); + value += sizeof (lb_mac); + if (hw->phy.media_type == e1000_media_type_copper) + value += sizeof (lb_phy); + else + value += sizeof (lb_serdes); + value += sizeof (lb_external); + + size = value; + if (iocp->ioc_count != size) + return (IOC_INVAL); + + value = 0; + lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; + + lbpp[value++] = lb_normal; + lbpp[value++] = lb_mac; + if (hw->phy.media_type == e1000_media_type_copper) + lbpp[value++] = lb_phy; + else + lbpp[value++] = lb_serdes; + lbpp[value++] = lb_external; + break; + + case LB_GET_MODE: + size = sizeof (uint32_t); + if (iocp->ioc_count != size) + return (IOC_INVAL); + + lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; + *lbmp = igb->loopback_mode; + break; + + case LB_SET_MODE: + size = 0; + if (iocp->ioc_count != sizeof (uint32_t)) + return (IOC_INVAL); + + lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; + if (!igb_set_loopback_mode(igb, *lbmp)) + return (IOC_INVAL); + break; + } + + iocp->ioc_count = size; + iocp->ioc_error = 0; + + return (IOC_REPLY); +} + +/* + * igb_set_loopback_mode - Setup loopback based on the loopback mode + */ +static boolean_t +igb_set_loopback_mode(igb_t *igb, uint32_t mode) +{ + struct e1000_hw *hw; + + if (mode == igb->loopback_mode) + return (B_TRUE); + + hw = &igb->hw; + + igb->loopback_mode = mode; + + if (mode == IGB_LB_NONE) { + /* Reset the chip */ + hw->phy.autoneg_wait_to_complete = B_TRUE; + (void) igb_reset(igb); + hw->phy.autoneg_wait_to_complete = B_FALSE; + return (B_TRUE); + } + + mutex_enter(&igb->gen_lock); + + switch (mode) { + default: + mutex_exit(&igb->gen_lock); + return (B_FALSE); + + case IGB_LB_EXTERNAL: + igb_set_external_loopback(igb); + break; + + case IGB_LB_INTERNAL_MAC: + igb_set_internal_mac_loopback(igb); + break; + + case IGB_LB_INTERNAL_PHY: + igb_set_internal_phy_loopback(igb); + break; + + case IGB_LB_INTERNAL_SERDES: + igb_set_internal_serdes_loopback(igb); + break; + } + + mutex_exit(&igb->gen_lock); + + return (B_TRUE); +} + +/* + * igb_set_external_loopback - Set the external loopback mode + */ +static void +igb_set_external_loopback(igb_t *igb) +{ + struct e1000_hw *hw; + + hw = &igb->hw; + + /* Set phy to known state */ + (void) e1000_phy_hw_reset(hw); + + (void) e1000_write_phy_reg(hw, 0x0, 0x0140); + (void) e1000_write_phy_reg(hw, 0x9, 0x1b00); + (void) e1000_write_phy_reg(hw, 0x12, 0x1610); + (void) e1000_write_phy_reg(hw, 0x1f37, 0x3f1c); +} + +/* + * igb_set_internal_mac_loopback - Set the internal MAC loopback mode + */ +static void +igb_set_internal_mac_loopback(igb_t *igb) +{ + struct e1000_hw *hw; + uint32_t ctrl; + uint32_t rctl; + + hw = &igb->hw; + + /* Set the Receive Control register */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_LBM_TCVR; + rctl |= E1000_RCTL_LBM_MAC; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* Set the Device Control register */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl |= (E1000_CTRL_SLU | /* Force link up */ + E1000_CTRL_FRCSPD | /* Force speed */ + E1000_CTRL_FRCDPX | /* Force duplex */ + E1000_CTRL_SPD_1000 | /* Force speed to 1000 */ + E1000_CTRL_FD); /* Force full duplex */ + ctrl &= ~E1000_CTRL_ILOS; /* Clear ILOS when there's a link */ + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/* + * igb_set_internal_phy_loopback - Set the internal PHY loopback mode + */ +static void +igb_set_internal_phy_loopback(igb_t *igb) +{ + struct e1000_hw *hw; + uint32_t ctrl_ext; + uint16_t phy_ctrl; + uint16_t phy_pconf; + + hw = &igb->hw; + + /* Set link mode to PHY (00b) in the Extended Control register */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* + * Set PHY control register (0x4140): + * Set full duplex mode + * Set loopback bit + * Clear auto-neg enable bit + * Set PHY speed + */ + phy_ctrl = MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000 | MII_CR_LOOPBACK; + (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); + + /* Set the link disable bit in the Port Configuration register */ + (void) e1000_read_phy_reg(hw, 0x10, &phy_pconf); + phy_pconf |= (uint16_t)1 << 14; + (void) e1000_write_phy_reg(hw, 0x10, phy_pconf); +} + +/* + * igb_set_internal_serdes_loopback - Set the internal SerDes loopback mode + */ +static void +igb_set_internal_serdes_loopback(igb_t *igb) +{ + struct e1000_hw *hw; + uint32_t ctrl_ext; + uint32_t ctrl; + uint32_t pcs_lctl; + uint32_t connsw; + + hw = &igb->hw; + + /* Set link mode to SerDes (11b) in the Extended Control register */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* Configure the SerDes to loopback */ + E1000_WRITE_REG(hw, E1000_SCTL, 0x410); + + /* Set Device Control register */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FD | /* Force full duplex */ + E1000_CTRL_SLU); /* Force link up */ + ctrl &= ~(E1000_CTRL_RFCE | /* Disable receive flow control */ + E1000_CTRL_TFCE | /* Disable transmit flow control */ + E1000_CTRL_LRST); /* Clear link reset */ + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Set PCS Link Control register */ + pcs_lctl = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_lctl |= (E1000_PCS_LCTL_FORCE_LINK | + E1000_PCS_LCTL_FSD | + E1000_PCS_LCTL_FDV_FULL | + E1000_PCS_LCTL_FLV_LINK_UP); + pcs_lctl &= ~E1000_PCS_LCTL_AN_ENABLE; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_lctl); + + /* Set the Copper/Fiber Switch Control - CONNSW register */ + connsw = E1000_READ_REG(hw, E1000_CONNSW); + connsw &= ~E1000_CONNSW_ENRGSRC; + E1000_WRITE_REG(hw, E1000_CONNSW, connsw); +} + +#pragma inline(igb_intr_rx_work) +/* + * igb_intr_rx_work - rx processing of ISR + */ +static void +igb_intr_rx_work(igb_rx_ring_t *rx_ring) +{ + mblk_t *mp; + + mutex_enter(&rx_ring->rx_lock); + mp = igb_rx(rx_ring); + mutex_exit(&rx_ring->rx_lock); + + if (mp != NULL) + mac_rx(rx_ring->igb->mac_hdl, NULL, mp); +} + +#pragma inline(igb_intr_tx_work) +/* + * igb_intr_tx_work - tx processing of ISR + */ +static void +igb_intr_tx_work(igb_tx_ring_t *tx_ring) +{ + /* Recycle the tx descriptors */ + tx_ring->tx_recycle(tx_ring); + + /* Schedule the re-transmit */ + if (tx_ring->reschedule && + (tx_ring->tbd_free >= tx_ring->resched_thresh)) { + tx_ring->reschedule = B_FALSE; + mac_tx_update(tx_ring->igb->mac_hdl); + IGB_DEBUG_STAT(tx_ring->stat_reschedule); + } +} + +#pragma inline(igb_intr_other_work) +/* + * igb_intr_other_work - other processing of ISR + */ +static void +igb_intr_other_work(igb_t *igb) +{ + boolean_t link_changed; + + igb_stop_watchdog_timer(igb); + + mutex_enter(&igb->gen_lock); + + /* + * Because we got a link-status-change interrupt, force + * e1000_check_for_link() to look at phy + */ + igb->hw.mac.get_link_status = B_TRUE; + + /* igb_link_check takes care of link status change */ + link_changed = igb_link_check(igb); + + /* Get new phy state */ + igb_get_phy_state(igb); + + mutex_exit(&igb->gen_lock); + + if (link_changed) + mac_link_update(igb->mac_hdl, igb->link_state); + + igb_start_watchdog_timer(igb); +} + +/* + * igb_intr_legacy - Interrupt handler for legacy interrupts + */ +static uint_t +igb_intr_legacy(void *arg1, void *arg2) +{ + igb_t *igb = (igb_t *)arg1; + igb_tx_ring_t *tx_ring; + uint32_t icr; + mblk_t *mp; + boolean_t tx_reschedule; + boolean_t link_changed; + uint_t result; + + _NOTE(ARGUNUSED(arg2)); + + mutex_enter(&igb->gen_lock); + + if (igb->igb_state & IGB_SUSPENDED) { + mutex_exit(&igb->gen_lock); + return (DDI_INTR_UNCLAIMED); + } + + mp = NULL; + tx_reschedule = B_FALSE; + link_changed = B_FALSE; + icr = E1000_READ_REG(&igb->hw, E1000_ICR); + + if (icr & E1000_ICR_INT_ASSERTED) { + /* + * E1000_ICR_INT_ASSERTED bit was set: + * Read(Clear) the ICR, claim this interrupt, + * look for work to do. + */ + ASSERT(igb->num_rx_rings == 1); + ASSERT(igb->num_tx_rings == 1); + + if (icr & E1000_ICR_RXT0) { + mp = igb_rx(&igb->rx_rings[0]); + } + + if (icr & E1000_ICR_TXDW) { + tx_ring = &igb->tx_rings[0]; + + /* Recycle the tx descriptors */ + tx_ring->tx_recycle(tx_ring); + + /* Schedule the re-transmit */ + tx_reschedule = (tx_ring->reschedule && + (tx_ring->tbd_free >= tx_ring->resched_thresh)); + } + + if (icr & E1000_ICR_LSC) { + /* + * Because we got a link-status-change interrupt, force + * e1000_check_for_link() to look at phy + */ + igb->hw.mac.get_link_status = B_TRUE; + + /* igb_link_check takes care of link status change */ + link_changed = igb_link_check(igb); + + /* Get new phy state */ + igb_get_phy_state(igb); + } + + result = DDI_INTR_CLAIMED; + } else { + /* + * E1000_ICR_INT_ASSERTED bit was not set: + * Don't claim this interrupt. + */ + result = DDI_INTR_UNCLAIMED; + } + + mutex_exit(&igb->gen_lock); + + /* + * Do the following work outside of the gen_lock + */ + if (mp != NULL) + mac_rx(igb->mac_hdl, NULL, mp); + + if (tx_reschedule) { + tx_ring->reschedule = B_FALSE; + mac_tx_update(igb->mac_hdl); + IGB_DEBUG_STAT(tx_ring->stat_reschedule); + } + + if (link_changed) + mac_link_update(igb->mac_hdl, igb->link_state); + + return (result); +} + +/* + * igb_intr_msi - Interrupt handler for MSI + */ +static uint_t +igb_intr_msi(void *arg1, void *arg2) +{ + igb_t *igb = (igb_t *)arg1; + uint32_t icr; + + _NOTE(ARGUNUSED(arg2)); + + icr = E1000_READ_REG(&igb->hw, E1000_ICR); + + /* + * For MSI interrupt, we have only one vector, + * so we have only one rx ring and one tx ring enabled. + */ + ASSERT(igb->num_rx_rings == 1); + ASSERT(igb->num_tx_rings == 1); + + if (icr & E1000_ICR_RXT0) { + igb_intr_rx_work(&igb->rx_rings[0]); + } + + if (icr & E1000_ICR_TXDW) { + igb_intr_tx_work(&igb->tx_rings[0]); + } + + if (icr & E1000_ICR_LSC) { + igb_intr_other_work(igb); + } + + return (DDI_INTR_CLAIMED); +} + +/* + * igb_intr_rx - Interrupt handler for rx + */ +static uint_t +igb_intr_rx(void *arg1, void *arg2) +{ + igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)arg1; + + _NOTE(ARGUNUSED(arg2)); + + /* + * Only used via MSI-X vector so don't check cause bits + * and only clean the given ring. + */ + igb_intr_rx_work(rx_ring); + + return (DDI_INTR_CLAIMED); +} + +/* + * igb_intr_tx_other - Interrupt handler for both tx and other + * + * Always look for Tx cleanup work. Only look for other work if the right + * bits are set in the Interrupt Cause Register. + */ +static uint_t +igb_intr_tx_other(void *arg1, void *arg2) +{ + igb_t *igb = (igb_t *)arg1; + uint32_t icr; + + _NOTE(ARGUNUSED(arg2)); + + icr = E1000_READ_REG(&igb->hw, E1000_ICR); + + /* + * Always look for Tx cleanup work. We don't have separate + * transmit vectors, so we have only one tx ring enabled. + */ + ASSERT(igb->num_tx_rings == 1); + igb_intr_tx_work(&igb->tx_rings[0]); + + /* + * Check for "other" causes. + */ + if (icr & E1000_ICR_LSC) { + igb_intr_other_work(igb); + } + + return (DDI_INTR_CLAIMED); +} + +/* + * igb_alloc_intrs - Allocate interrupts for the driver + * + * Normal sequence is to try MSI-X; if not sucessful, try MSI; + * if not successful, try Legacy. + * igb->intr_force can be used to force sequence to start with + * any of the 3 types. + * If MSI-X is not used, number of tx/rx rings is forced to 1. + */ +static int +igb_alloc_intrs(igb_t *igb) +{ + dev_info_t *devinfo; + int intr_types; + int rc; + + devinfo = igb->dip; + + /* Get supported interrupt types */ + rc = ddi_intr_get_supported_types(devinfo, &intr_types); + + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get supported interrupt types failed: %d", rc); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "Supported interrupt types: %x", intr_types); + + igb->intr_type = 0; + + /* Install MSI-X interrupts */ + if ((intr_types & DDI_INTR_TYPE_MSIX) && + (igb->intr_force <= IGB_INTR_MSIX)) { + rc = igb_alloc_intrs_msix(igb); + + if (rc == IGB_SUCCESS) + return (IGB_SUCCESS); + + igb_log(igb, + "Allocate MSI-X failed, trying MSI interrupts..."); + } + + /* MSI-X not used, force rings to 1 */ + igb->num_rx_rings = 1; + igb->num_tx_rings = 1; + igb_log(igb, + "MSI-X not used, force rx and tx queue number to 1"); + + /* Install MSI interrupts */ + if ((intr_types & DDI_INTR_TYPE_MSI) && + (igb->intr_force <= IGB_INTR_MSI)) { + rc = igb_alloc_intrs_msi(igb); + + if (rc == IGB_SUCCESS) + return (IGB_SUCCESS); + + igb_log(igb, + "Allocate MSI failed, trying Legacy interrupts..."); + } + + /* Install legacy interrupts */ + if (intr_types & DDI_INTR_TYPE_FIXED) { + rc = igb_alloc_intrs_legacy(igb); + + if (rc == IGB_SUCCESS) + return (IGB_SUCCESS); + + igb_log(igb, + "Allocate Legacy interrupts failed"); + } + + /* If none of the 3 types succeeded, return failure */ + return (IGB_FAILURE); +} + +/* + * igb_alloc_intrs_msix - Allocate the MSIX interrupts + * + * If fewer than 2 vectors are available, return failure. + * Upon success, this sets the number of Rx rings to a number that + * matches the vectors available for Rx interrupts. + */ +static int +igb_alloc_intrs_msix(igb_t *igb) +{ + dev_info_t *devinfo; + int request, count, avail, actual; + int rx_rings; + int rc; + + devinfo = igb->dip; + + /* + * Currently only 1 tx ring is supported. More tx rings + * will be supported with future enhancement. + */ + if (igb->num_tx_rings > 1) { + igb->num_tx_rings = 1; + igb_log(igb, + "Use only 1 MSI-X vector for tx, " + "force tx queue number to 1"); + } + + /* + * Best number of vectors for the adapter is + * # rx rings + # tx rings + 1 for other + * But currently we only support number of vectors of + * # rx rings + 1 for tx & other + */ + request = igb->num_rx_rings + 1; + IGB_DEBUGLOG_1(igb, "MSI-X interrupts requested: %d", request); + + /* Get number of supported interrupts */ + rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSIX, &count); + if ((rc != DDI_SUCCESS) || (count == 0)) { + igb_log(igb, + "Get interrupt number failed. Return: %d, count: %d", + rc, count); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "MSI-X interrupts supported: %d", count); + + /* Get number of available interrupts */ + rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSIX, &avail); + if ((rc != DDI_SUCCESS) || (avail == 0)) { + igb_log(igb, + "Get interrupt available number failed. " + "Return: %d, available: %d", rc, avail); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "MSI-X interrupts available: %d", avail); + + if (avail < request) { + igb_log(igb, + "Request %d MSI-X vectors, %d available", + request, avail); + request = avail; + } + + actual = 0; + igb->intr_cnt = 0; + + /* Allocate an array of interrupt handles */ + igb->intr_size = request * sizeof (ddi_intr_handle_t); + igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP); + + /* Call ddi_intr_alloc() */ + rc = ddi_intr_alloc(devinfo, igb->htable, DDI_INTR_TYPE_MSIX, 0, + request, &actual, DDI_INTR_ALLOC_NORMAL); + if (rc != DDI_SUCCESS) { + igb_log(igb, "Allocate MSI-X interrupts failed. " + "return: %d, request: %d, actual: %d", + rc, request, actual); + goto alloc_msix_fail; + } + IGB_DEBUGLOG_1(igb, "MSI-X interrupts actually allocated: %d", actual); + + igb->intr_cnt = actual; + + /* + * Now we know the actual number of vectors. Here we assume that + * tx and other will share 1 vector and all remaining (must be at + * least 1 remaining) will be used for rx. + */ + if (actual < 2) { + igb_log(igb, "Insufficient MSI-X interrupts available: %d", + actual); + goto alloc_msix_fail; + } + + rx_rings = actual - 1; + if (rx_rings < igb->num_rx_rings) { + igb_log(igb, "MSI-X vectors force Rx queue number to %d", + rx_rings); + igb->num_rx_rings = rx_rings; + } + + /* Get priority for first vector, assume remaining are all the same */ + rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt priority failed: %d", rc); + goto alloc_msix_fail; + } + + rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt cap failed: %d", rc); + goto alloc_msix_fail; + } + + igb->intr_type = DDI_INTR_TYPE_MSIX; + + return (IGB_SUCCESS); + +alloc_msix_fail: + igb_rem_intrs(igb); + + return (IGB_FAILURE); +} + +/* + * igb_alloc_intrs_msi - Allocate the MSI interrupts + */ +static int +igb_alloc_intrs_msi(igb_t *igb) +{ + dev_info_t *devinfo; + int request, count, avail, actual; + int rc; + + devinfo = igb->dip; + + /* Request 1 MSI interrupt vector */ + request = 1; + IGB_DEBUGLOG_1(igb, "MSI interrupts requested: %d", request); + + /* Get number of supported interrupts */ + rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count); + if ((rc != DDI_SUCCESS) || (count == 0)) { + igb_log(igb, + "Get MSI supported number failed. Return: %d, count: %d", + rc, count); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "MSI interrupts supported: %d", count); + + /* Get number of available interrupts */ + rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail); + if ((rc != DDI_SUCCESS) || (avail == 0)) { + igb_log(igb, + "Get MSI available number failed. " + "Return: %d, available: %d", rc, avail); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "MSI interrupts available: %d", avail); + + actual = 0; + igb->intr_cnt = 0; + + /* Allocate an array of interrupt handles */ + igb->intr_size = request * sizeof (ddi_intr_handle_t); + igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP); + + /* Call ddi_intr_alloc() */ + rc = ddi_intr_alloc(devinfo, igb->htable, DDI_INTR_TYPE_MSI, 0, + request, &actual, DDI_INTR_ALLOC_NORMAL); + if ((rc != DDI_SUCCESS) || (actual == 0)) { + igb_log(igb, + "Allocate MSI interrupts failed: %d", rc); + goto alloc_msi_fail; + } + + ASSERT(actual == 1); + igb->intr_cnt = actual; + + /* Get priority for first msi, assume remaining are all the same */ + rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt priority failed: %d", rc); + goto alloc_msi_fail; + } + + rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt cap failed: %d\n", rc); + goto alloc_msi_fail; + + } + + igb->intr_type = DDI_INTR_TYPE_MSI; + + return (IGB_SUCCESS); + +alloc_msi_fail: + igb_rem_intrs(igb); + + return (IGB_FAILURE); +} + +/* + * igb_alloc_intrs_legacy - Allocate the Legacy interrupts + */ +static int +igb_alloc_intrs_legacy(igb_t *igb) +{ + dev_info_t *devinfo; + int request, count, avail, actual; + int rc; + + devinfo = igb->dip; + + /* Request 1 Legacy interrupt vector */ + request = 1; + IGB_DEBUGLOG_1(igb, "Legacy interrupts requested: %d", request); + + /* Get number of supported interrupts */ + rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count); + if ((rc != DDI_SUCCESS) || (count == 0)) { + igb_log(igb, + "Get Legacy supported number failed. Return: %d, count: %d", + rc, count); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "Legacy interrupts supported: %d", count); + + /* Get number of available interrupts */ + rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_FIXED, &avail); + if ((rc != DDI_SUCCESS) || (avail == 0)) { + igb_log(igb, + "Get Legacy available number failed. " + "Return: %d, available: %d", rc, avail); + return (IGB_FAILURE); + } + IGB_DEBUGLOG_1(igb, "Legacy interrupts available: %d", avail); + + actual = 0; + igb->intr_cnt = 0; + + /* Allocate an array of interrupt handles */ + igb->intr_size = request * sizeof (ddi_intr_handle_t); + igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP); + + /* Call ddi_intr_alloc() */ + rc = ddi_intr_alloc(devinfo, igb->htable, DDI_INTR_TYPE_FIXED, 0, + request, &actual, DDI_INTR_ALLOC_NORMAL); + if ((rc != DDI_SUCCESS) || (actual == 0)) { + igb_log(igb, + "Allocate Legacy interrupts failed: %d", rc); + goto alloc_legacy_fail; + } + + ASSERT(actual == 1); + igb->intr_cnt = actual; + + /* Get priority for first msi, assume remaining are all the same */ + rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt priority failed: %d", rc); + goto alloc_legacy_fail; + } + + rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Get interrupt cap failed: %d\n", rc); + goto alloc_legacy_fail; + } + + igb->intr_type = DDI_INTR_TYPE_FIXED; + + return (IGB_SUCCESS); + +alloc_legacy_fail: + igb_rem_intrs(igb); + + return (IGB_FAILURE); +} + +/* + * igb_add_intr_handlers - Add interrupt handlers based on the interrupt type + * + * Before adding the interrupt handlers, the interrupt vectors have + * been allocated, and the rx/tx rings have also been allocated. + */ +static int +igb_add_intr_handlers(igb_t *igb) +{ + igb_rx_ring_t *rx_ring; + int vector; + int rc; + int i; + + vector = 0; + + switch (igb->intr_type) { + case DDI_INTR_TYPE_MSIX: + /* Add interrupt handler for tx + other */ + rc = ddi_intr_add_handler(igb->htable[vector], + (ddi_intr_handler_t *)igb_intr_tx_other, + (void *)igb, NULL); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Add tx/other interrupt handler failed: %d", rc); + return (IGB_FAILURE); + } + vector++; + + /* Add interrupt handler for each rx ring */ + for (i = 0; i < igb->num_rx_rings; i++) { + rx_ring = &igb->rx_rings[i]; + + rc = ddi_intr_add_handler(igb->htable[vector], + (ddi_intr_handler_t *)igb_intr_rx, + (void *)rx_ring, NULL); + + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Add rx interrupt handler failed. " + "return: %d, rx ring: %d", rc, i); + for (vector--; vector >= 0; vector--) { + (void) ddi_intr_remove_handler( + igb->htable[vector]); + } + return (IGB_FAILURE); + } + + rx_ring->intr_vector = vector; + + vector++; + } + break; + + case DDI_INTR_TYPE_MSI: + /* Add interrupt handlers for the only vector */ + rc = ddi_intr_add_handler(igb->htable[vector], + (ddi_intr_handler_t *)igb_intr_msi, + (void *)igb, NULL); + + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Add MSI interrupt handler failed: %d", rc); + return (IGB_FAILURE); + } + + rx_ring = &igb->rx_rings[0]; + rx_ring->intr_vector = vector; + + vector++; + break; + + case DDI_INTR_TYPE_FIXED: + /* Add interrupt handlers for the only vector */ + rc = ddi_intr_add_handler(igb->htable[vector], + (ddi_intr_handler_t *)igb_intr_legacy, + (void *)igb, NULL); + + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Add legacy interrupt handler failed: %d", rc); + return (IGB_FAILURE); + } + + rx_ring = &igb->rx_rings[0]; + rx_ring->intr_vector = vector; + + vector++; + break; + + default: + return (IGB_FAILURE); + } + + ASSERT(vector == igb->intr_cnt); + + return (IGB_SUCCESS); +} + +/* + * igb_setup_adapter_msix - setup the adapter to use MSI-X interrupts + * + * For each vector enabled on the adapter, Set the MSIXBM register accordingly + */ +static void +igb_setup_adapter_msix(igb_t *igb) +{ + uint32_t eims = 0; + int i, vector; + struct e1000_hw *hw = &igb->hw; + + /* + * Set vector for Tx + Other causes + * NOTE assumption that there is only one of these and it is vector 0 + */ + vector = 0; + igb->eims_mask = E1000_EICR_TX_QUEUE0 | E1000_EICR_OTHER; + E1000_WRITE_REG(hw, E1000_MSIXBM(vector), igb->eims_mask); + + vector++; + for (i = 0; i < igb->num_rx_rings; i++) { + /* + * Set vector for each rx ring + */ + eims = (E1000_EICR_RX_QUEUE0 << i); + E1000_WRITE_REG(hw, E1000_MSIXBM(vector), eims); + + /* + * Accumulate bits to enable in igb_enable_adapter_interrupts() + */ + igb->eims_mask |= eims; + + vector++; + } + + ASSERT(vector == igb->intr_cnt); + + /* + * Disable IAM for ICR interrupt bits + */ + E1000_WRITE_REG(hw, E1000_IAM, 0); + E1000_WRITE_FLUSH(hw); +} + +/* + * igb_rem_intr_handlers - remove the interrupt handlers + */ +static void +igb_rem_intr_handlers(igb_t *igb) +{ + int i; + int rc; + + for (i = 0; i < igb->intr_cnt; i++) { + rc = ddi_intr_remove_handler(igb->htable[i]); + if (rc != DDI_SUCCESS) { + IGB_DEBUGLOG_1(igb, + "Remove intr handler failed: %d", rc); + } + } +} + +/* + * igb_rem_intrs - remove the allocated interrupts + */ +static void +igb_rem_intrs(igb_t *igb) +{ + int i; + int rc; + + for (i = 0; i < igb->intr_cnt; i++) { + rc = ddi_intr_free(igb->htable[i]); + if (rc != DDI_SUCCESS) { + IGB_DEBUGLOG_1(igb, + "Free intr failed: %d", rc); + } + } + + kmem_free(igb->htable, igb->intr_size); + igb->htable = NULL; +} + +/* + * igb_enable_intrs - enable all the ddi interrupts + */ +static int +igb_enable_intrs(igb_t *igb) +{ + int i; + int rc; + + /* Enable interrupts */ + if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) { + /* Call ddi_intr_block_enable() for MSI */ + rc = ddi_intr_block_enable(igb->htable, igb->intr_cnt); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Enable block intr failed: %d", rc); + return (IGB_FAILURE); + } + } else { + /* Call ddi_intr_enable() for Legacy/MSI non block enable */ + for (i = 0; i < igb->intr_cnt; i++) { + rc = ddi_intr_enable(igb->htable[i]); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Enable intr failed: %d", rc); + return (IGB_FAILURE); + } + } + } + + return (IGB_SUCCESS); +} + +/* + * igb_disable_intrs - disable all the ddi interrupts + */ +static int +igb_disable_intrs(igb_t *igb) +{ + int i; + int rc; + + /* Disable all interrupts */ + if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) { + rc = ddi_intr_block_disable(igb->htable, igb->intr_cnt); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Disable block intr failed: %d", rc); + return (IGB_FAILURE); + } + } else { + for (i = 0; i < igb->intr_cnt; i++) { + rc = ddi_intr_disable(igb->htable[i]); + if (rc != DDI_SUCCESS) { + igb_log(igb, + "Disable intr failed: %d", rc); + return (IGB_FAILURE); + } + } + } + + return (IGB_SUCCESS); +} + +/* + * igb_get_phy_state - Get and save the parameters read from PHY registers + */ +static void +igb_get_phy_state(igb_t *igb) +{ + struct e1000_hw *hw = &igb->hw; + uint16_t phy_ctrl; + uint16_t phy_status; + uint16_t phy_an_adv; + uint16_t phy_an_exp; + uint16_t phy_ext_status; + uint16_t phy_1000t_ctrl; + uint16_t phy_1000t_status; + uint16_t phy_lp_able; + + ASSERT(mutex_owned(&igb->gen_lock)); + + (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); + (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &phy_an_adv); + (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_an_exp); + (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status); + (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_1000t_ctrl); + (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_1000t_status); + (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_lp_able); + + igb->param_autoneg_cap = + (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; + igb->param_pause_cap = + (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; + igb->param_asym_pause_cap = + (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; + igb->param_1000fdx_cap = ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || + (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; + igb->param_1000hdx_cap = ((phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || + (phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; + igb->param_100t4_cap = + (phy_status & MII_SR_100T4_CAPS) ? 1 : 0; + igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) || + (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; + igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) || + (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; + igb->param_10fdx_cap = + (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; + igb->param_10hdx_cap = + (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; + igb->param_rem_fault = + (phy_status & MII_SR_REMOTE_FAULT) ? 1 : 0; + + igb->param_adv_autoneg_cap = hw->mac.autoneg; + igb->param_adv_pause_cap = + (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; + igb->param_adv_asym_pause_cap = + (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; + igb->param_adv_1000hdx_cap = + (phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; + igb->param_adv_100t4_cap = + (phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; + igb->param_adv_rem_fault = + (phy_an_adv & NWAY_AR_REMOTE_FAULT) ? 1 : 0; + if (igb->param_adv_autoneg_cap == 1) { + igb->param_adv_1000fdx_cap = + (phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0; + igb->param_adv_100fdx_cap = + (phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0; + igb->param_adv_100hdx_cap = + (phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0; + igb->param_adv_10fdx_cap = + (phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; + igb->param_adv_10hdx_cap = + (phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; + } + + igb->param_lp_autoneg_cap = + (phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; + igb->param_lp_pause_cap = + (phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; + igb->param_lp_asym_pause_cap = + (phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; + igb->param_lp_1000fdx_cap = + (phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; + igb->param_lp_1000hdx_cap = + (phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; + igb->param_lp_100t4_cap = + (phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; + igb->param_lp_100fdx_cap = + (phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; + igb->param_lp_100hdx_cap = + (phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; + igb->param_lp_10fdx_cap = + (phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; + igb->param_lp_10hdx_cap = + (phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; + igb->param_lp_rem_fault = + (phy_lp_able & NWAY_LPAR_REMOTE_FAULT) ? 1 : 0; +} + +/* + * igb_get_driver_control + */ +static void +igb_get_driver_control(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Notify firmware that driver is in control of device */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_DRV_LOAD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); +} + +/* + * igb_release_driver_control + */ +static void +igb_release_driver_control(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Notify firmware that driver is no longer in control of device */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_DRV_LOAD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); +} + +/* + * igb_atomic_reserve - Atomic decrease operation + */ +int +igb_atomic_reserve(uint32_t *count_p, uint32_t n) +{ + uint32_t oldval; + uint32_t newval; + + /* ATOMICALLY */ + do { + oldval = *count_p; + if (oldval < n) + return (-1); + newval = oldval - n; + } while (atomic_cas_32(count_p, oldval, newval) != oldval); + + return (newval); +} diff --git a/usr/src/uts/common/io/igb/igb_manage.c b/usr/src/uts/common/io/igb/igb_manage.c new file mode 100644 index 0000000000..6cf9459e99 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_manage.c @@ -0,0 +1,393 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_api.h" +#include "igb_manage.h" + +static u8 e1000_calculate_checksum(u8 *buffer, u32 length); + +/* + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + */ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return (0); + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/* + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + */ +s32 +e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + s32 ret_val = E1000_SUCCESS; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_check_mng_mode_generic - Generic check managament mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + */ +bool +e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/* + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + */ +bool +e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + bool tx_filter = TRUE; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + /* No manageability, no filtering */ + if (!e1000_check_mng_mode(hw)) { + tx_filter = FALSE; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val != E1000_SUCCESS) { + tx_filter = FALSE; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) { + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, offset + i); + } + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if (hdr_csum != csum) + goto out; + if (hdr->signature != E1000_IAMT_SIGNATURE) + goto out; + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + tx_filter = FALSE; + +out: + hw->mac.tx_pkt_filtering = tx_filter; + return (tx_filter); +} + +/* + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + */ +s32 +e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + goto out; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof (hdr), &(hdr.checksum)); + if (ret_val) + goto out; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + goto out; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + +out: + return (ret_val); +} + +/* + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + */ +s32 +e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof (struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)(uintptr_t)hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return (E1000_SUCCESS); +} + +/* + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + */ +s32 +e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + s32 ret_val = E1000_SUCCESS; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { + ret_val = -E1000_ERR_PARAM; + goto out; + } + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof (u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof (u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof (u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i, data); + } + +out: + return (ret_val); +} + +/* + * e1000_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to allow ARPs to be processed by the host. + */ +bool +e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = FALSE; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = TRUE; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = TRUE; + goto out; + } + } + +out: + return (ret_val); +} diff --git a/usr/src/uts/common/io/igb/igb_manage.h b/usr/src/uts/common/io/igb/igb_manage.h new file mode 100644 index 0000000000..6801e23fbd --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_manage.h @@ -0,0 +1,92 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_MANAGE_H +#define _IGB_MANAGE_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +} e1000_mng_mode; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +/* Process HI command limit */ +#define E1000_HI_COMMAND_TIMEOUT 500 + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_MANAGE_H */ diff --git a/usr/src/uts/common/io/igb/igb_ndd.c b/usr/src/uts/common/io/igb/igb_ndd.c new file mode 100644 index 0000000000..9e4e8d0c68 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_ndd.c @@ -0,0 +1,368 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +/* Function prototypes */ +static int igb_nd_get(queue_t *, mblk_t *, caddr_t, cred_t *); +static int igb_nd_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *); +static int igb_nd_param_load(igb_t *); +static void igb_nd_get_param_val(nd_param_t *); +static void igb_nd_set_param_val(nd_param_t *, uint32_t); + +/* + * Notes: + * The first character of the <name> field encodes the read/write + * status of the parameter: + * '-' => read-only + * '+' => read/write, + * '?' => read/write on copper, read-only on serdes + * '!' => invisible! + * + * For writable parameters, we check for a driver property with the + * same name; if found, and its value is in range, we initialise + * the parameter from the property, overriding the default in the + * table below. + * + * A NULL in the <name> field terminates the array. + * + * The <info> field is used here to provide the index of the + * parameter to be initialised; thus it doesn't matter whether + * this table is kept ordered or not. + * + * The <info> field in the per-instance copy, on the other hand, + * is used to count assignments so that we can tell when a magic + * parameter has been set via ndd (see igb_nd_set()). + */ +static const nd_param_t nd_template[] = { +/* igb info min max init r/w+name */ + +/* Our hardware capabilities */ +{ NULL, PARAM_AUTONEG_CAP, 0, 1, 1, "-autoneg_cap" }, +{ NULL, PARAM_PAUSE_CAP, 0, 1, 1, "-pause_cap" }, +{ NULL, PARAM_ASYM_PAUSE_CAP, 0, 1, 1, "-asym_pause_cap" }, +{ NULL, PARAM_1000FDX_CAP, 0, 1, 1, "-1000fdx_cap" }, +{ NULL, PARAM_1000HDX_CAP, 0, 1, 1, "-1000hdx_cap" }, +{ NULL, PARAM_100T4_CAP, 0, 1, 0, "-100T4_cap" }, +{ NULL, PARAM_100FDX_CAP, 0, 1, 1, "-100fdx_cap" }, +{ NULL, PARAM_100HDX_CAP, 0, 1, 1, "-100hdx_cap" }, +{ NULL, PARAM_10FDX_CAP, 0, 1, 1, "-10fdx_cap" }, +{ NULL, PARAM_10HDX_CAP, 0, 1, 1, "-10hdx_cap" }, +{ NULL, PARAM_REM_FAULT, 0, 1, 0, "-rem_fault" }, + +/* Our advertised capabilities */ +{ NULL, PARAM_ADV_AUTONEG_CAP, 0, 1, 1, "?adv_autoneg_cap" }, +{ NULL, PARAM_ADV_PAUSE_CAP, 0, 1, 1, "-adv_pause_cap" }, +{ NULL, PARAM_ADV_ASYM_PAUSE_CAP, 0, 1, 1, "-adv_asym_pause_cap" }, +{ NULL, PARAM_ADV_1000FDX_CAP, 0, 1, 1, "?adv_1000fdx_cap" }, +{ NULL, PARAM_ADV_1000HDX_CAP, 0, 1, 1, "-adv_1000hdx_cap" }, +{ NULL, PARAM_ADV_100T4_CAP, 0, 1, 0, "-adv_100T4_cap" }, +{ NULL, PARAM_ADV_100FDX_CAP, 0, 1, 1, "?adv_100fdx_cap" }, +{ NULL, PARAM_ADV_100HDX_CAP, 0, 1, 1, "?adv_100hdx_cap" }, +{ NULL, PARAM_ADV_10FDX_CAP, 0, 1, 1, "?adv_10fdx_cap" }, +{ NULL, PARAM_ADV_10HDX_CAP, 0, 1, 1, "?adv_10hdx_cap" }, +{ NULL, PARAM_ADV_REM_FAULT, 0, 1, 0, "-adv_rem_fault" }, + +/* Partner's advertised capabilities */ +{ NULL, PARAM_LP_AUTONEG_CAP, 0, 1, 0, "-lp_autoneg_cap" }, +{ NULL, PARAM_LP_PAUSE_CAP, 0, 1, 0, "-lp_pause_cap" }, +{ NULL, PARAM_LP_ASYM_PAUSE_CAP, 0, 1, 0, "-lp_asym_pause_cap" }, +{ NULL, PARAM_LP_1000FDX_CAP, 0, 1, 0, "-lp_1000fdx_cap" }, +{ NULL, PARAM_LP_1000HDX_CAP, 0, 1, 0, "-lp_1000hdx_cap" }, +{ NULL, PARAM_LP_100T4_CAP, 0, 1, 0, "-lp_100T4_cap" }, +{ NULL, PARAM_LP_100FDX_CAP, 0, 1, 0, "-lp_100fdx_cap" }, +{ NULL, PARAM_LP_100HDX_CAP, 0, 1, 0, "-lp_100hdx_cap" }, +{ NULL, PARAM_LP_10FDX_CAP, 0, 1, 0, "-lp_10fdx_cap" }, +{ NULL, PARAM_LP_10HDX_CAP, 0, 1, 0, "-lp_10hdx_cap" }, +{ NULL, PARAM_LP_REM_FAULT, 0, 1, 0, "-lp_rem_fault" }, + +/* Current operating modes */ +{ NULL, PARAM_LINK_STATUS, 0, 1, 0, "-link_status" }, +{ NULL, PARAM_LINK_SPEED, 0, 1000, 0, "-link_speed" }, +{ NULL, PARAM_LINK_DUPLEX, 0, 2, 0, "-link_duplex" }, + +/* Terminator */ +{ NULL, PARAM_COUNT, 0, 0, 0, NULL } +}; + + +/* + * igb_nd_get - ndd get parameter values + */ +static int +igb_nd_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp) +{ + nd_param_t *nd = (nd_param_t *)(uintptr_t)cp; + _NOTE(ARGUNUSED(q)); + _NOTE(ARGUNUSED(credp)); + + igb_nd_get_param_val(nd); + (void) mi_mpprintf(mp, "%d", nd->val); + + return (0); +} + +/* + * igb_nd_set - ndd set parameter values + */ +static int +igb_nd_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp) +{ + nd_param_t *nd = (nd_param_t *)(uintptr_t)cp; + long new_value; + char *end; + _NOTE(ARGUNUSED(q)); + _NOTE(ARGUNUSED(mp)); + _NOTE(ARGUNUSED(credp)); + + new_value = mi_strtol(value, &end, 10); + if (end == value) + return (EINVAL); + if (new_value < nd->min || new_value > nd->max) + return (EINVAL); + + igb_nd_set_param_val(nd, new_value); + + return (0); +} + +/* + * igb_nd_param_load + */ +static int +igb_nd_param_load(igb_t *igb) +{ + const nd_param_t *tmpnd; + nd_param_t *nd; + caddr_t *ndd; + pfi_t setfn; + char *nm; + int value; + + ndd = &igb->nd_data; + ASSERT(*ndd == NULL); + + for (tmpnd = nd_template; tmpnd->name != NULL; ++tmpnd) { + /* + * Copy the template from nd_template[] into the + * proper slot in the per-instance parameters, + * then register the parameter with nd_load() + */ + nd = &igb->nd_params[tmpnd->info]; + *nd = *tmpnd; + nd->private = igb; + igb_nd_get_param_val(nd); + + nm = &nd->name[0]; + setfn = igb_nd_set; + + if (igb->hw.phy.media_type != e1000_media_type_copper) { + switch (*nm) { + default: + break; + + case '?': + setfn = NULL; + break; + } + } + + switch (*nm) { + default: + case '!': + continue; + + case '+': + case '?': + break; + + case '-': + setfn = NULL; + break; + } + + if (!nd_load(ndd, ++nm, igb_nd_get, setfn, (caddr_t)nd)) + goto nd_fail; + + /* + * If the parameter is writable, and there's a property + * with the same name, and its value is in range, we use + * it to initialise the parameter. If it exists but is + * out of range, it's ignored. + */ + if (setfn && IGB_PROP_EXISTS(igb->dip, nm)) { + value = IGB_PROP_GET_INT(igb->dip, nm); + if (value >= nd->min && value <= nd->max) + nd->val = value; + } + } + + return (IGB_SUCCESS); + +nd_fail: + igb_log(igb, + "igb_nd_param_load: failed at index %d [info %d]", + (tmpnd - nd_template), tmpnd->info); + nd_free(ndd); + return (IGB_FAILURE); +} + + +/* + * igb_nd_get_param_val + */ +static void +igb_nd_get_param_val(nd_param_t *nd) +{ + igb_t *igb = (igb_t *)nd->private; + + mutex_enter(&igb->gen_lock); + + switch (nd->info) { + case PARAM_LINK_STATUS: + nd->val = (igb->link_state == LINK_STATE_UP) ? 1 : 0; + break; + case PARAM_LINK_SPEED: + nd->val = igb->link_speed; + break; + case PARAM_LINK_DUPLEX: + nd->val = igb->link_duplex; + break; + default: + break; + } + + mutex_exit(&igb->gen_lock); +} + +/* + * igb_nd_set_param_val + */ +static void +igb_nd_set_param_val(nd_param_t *nd, uint32_t value) +{ + igb_t *igb = (igb_t *)nd->private; + + mutex_enter(&igb->gen_lock); + + if (nd->val == value) { + mutex_exit(&igb->gen_lock); + return; + } + + switch (nd->info) { + case PARAM_ADV_AUTONEG_CAP: + case PARAM_ADV_1000FDX_CAP: + case PARAM_ADV_100FDX_CAP: + case PARAM_ADV_100HDX_CAP: + case PARAM_ADV_10FDX_CAP: + case PARAM_ADV_10HDX_CAP: + nd->val = value; + (void) igb_setup_link(igb, B_TRUE); + break; + + default: + break; + } + + mutex_exit(&igb->gen_lock); +} + +/* + * comment describing function + */ +int +igb_nd_init(igb_t *igb) +{ + /* + * Register all the per-instance properties, initialising + * them from the table above or from driver properties set + * in the .conf file + */ + if (igb_nd_param_load(igb) != IGB_SUCCESS) + return (IGB_FAILURE); + + return (IGB_SUCCESS); +} + + +/* + * Free the Named Dispatch Table by calling nd_free + */ +void +igb_nd_cleanup(igb_t *igb) +{ + nd_free(&igb->nd_data); +} + +/* + * comment describing function + */ +enum ioc_reply +igb_nd_ioctl(igb_t *igb, queue_t *q, + mblk_t *mp, struct iocblk *ioc) +{ + boolean_t ok; + int cmd; + + cmd = ioc->ioc_cmd; + switch (cmd) { + default: + /* NOTREACHED */ + ASSERT(FALSE); + return (IOC_INVAL); + + case ND_GET: + /* + * If nd_getset() returns B_FALSE, the command was + * not valid (e.g. unknown name), so we just tell the + * top-level ioctl code to send a NAK (with code EINVAL). + * + * Otherwise, nd_getset() will have built the reply to + * be sent (but not actually sent it), so we tell the + * caller to send the prepared reply. + */ + ok = nd_getset(q, igb->nd_data, mp); + return (ok ? IOC_REPLY : IOC_INVAL); + + case ND_SET: + /* + * All adv_* parameters are locked (read-only) while + * the device is in any sort of loopback mode ... + */ + if (igb->loopback_mode != IGB_LB_NONE) { + ioc->ioc_error = EBUSY; + return (IOC_INVAL); + } + + ok = nd_getset(q, igb->nd_data, mp); + return (ok ? IOC_REPLY : IOC_INVAL); + } +} diff --git a/usr/src/uts/common/io/igb/igb_nvm.c b/usr/src/uts/common/io/igb/igb_nvm.c new file mode 100644 index 0000000000..175aa9122c --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_nvm.c @@ -0,0 +1,916 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_api.h" +#include "igb_nvm.h" + +/* + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + */ +static void +e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/* + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + */ +static void +e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/* + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ +static void +e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/* + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + */ +static u16 +e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return (data); +} + +/* + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +s32 +e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = E1000_SUCCESS; + break; + } + + usec_delay(5); + } + + return (ret_val); +} + +/* + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + */ +s32 +e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return (ret_val); +} + +/* + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + */ +static void +e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_lower_eec_clk(hw, &eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/* + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + */ +void +e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { + /* CS on Microcwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + } +} + +/* + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void +e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/* + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + */ +static s32 +e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 ret_val = E1000_SUCCESS; + u16 timeout = 0; + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + usec_delay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return (ret_val); +} + +/* + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + */ +s32 +e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + e1000_release_nvm(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + */ +s32 +e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("e1000_read_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* + * Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = e1000_shift_in_eec_bits(hw, 16); + e1000_standby_nvm(hw); + } + +release: + e1000_release_nvm(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 +e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return (ret_val); +} + +/* + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + */ +s32 +e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + msec_delay(10); + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + msec_delay(10); +release: + e1000_release_nvm(hw); + +out: + return (ret_val); +} + +/* + * e1000_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + */ +s32 +e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + e1000_standby_nvm(hw); + + while (words_written < words) { + e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + e1000_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + e1000_shift_out_eec_bits(hw, data[words_written], 16); + + e1000_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -E1000_ERR_NVM; + goto release; + } + + e1000_standby_nvm(hw); + + words_written++; + } + + e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + e1000_release_nvm(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_pba_num_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + */ +s32 +e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_read_pba_num_generic"); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + *pba_num = (u32)(nvm_data << 16); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + *pba_num |= nvm_data; + +out: + return (ret_val); +} + +/* + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + */ +s32 +e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return (ret_val); +} + +/* + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 +e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 +e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum.\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + */ +void +e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/* Function pointers local to this file and not intended for public use */ + +/* + * e1000_acquire_nvm - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * For those silicon families which have implemented a NVM acquire function, + * run the defined function else return success. + */ +s32 +e1000_acquire_nvm(struct e1000_hw *hw) +{ + if (hw->func.acquire_nvm) + return (hw->func.acquire_nvm(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * For those silicon families which have implemented a NVM release function, + * run the defined fucntion else return success. + */ +void +e1000_release_nvm(struct e1000_hw *hw) +{ + if (hw->func.release_nvm) + hw->func.release_nvm(hw); +} diff --git a/usr/src/uts/common/io/igb/igb_nvm.h b/usr/src/uts/common/io/igb/igb_nvm.h new file mode 100644 index 0000000000..0b4cb6e7af --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_nvm.h @@ -0,0 +1,71 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_NVM_H +#define _IGB_NVM_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_stop_nvm(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/* Function pointers */ +s32 e1000_acquire_nvm(struct e1000_hw *hw); +void e1000_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_NVM_H */ diff --git a/usr/src/uts/common/io/igb/igb_osdep.c b/usr/src/uts/common/io/igb/igb_osdep.c new file mode 100644 index 0000000000..9d03c05494 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_osdep.c @@ -0,0 +1,116 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_osdep.h" +#include "igb_api.h" + + +s32 +e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size) +{ + hw->dev_spec = kmem_zalloc(size, KM_SLEEP); + + return (E1000_SUCCESS); +} + +void +e1000_free_dev_spec_struct(struct e1000_hw *hw) +{ + if (hw->dev_spec == NULL) + return; + + kmem_free(hw->dev_spec, hw->dev_spec_size); + hw->dev_spec = NULL; +} + +void +e1000_pci_set_mwi(struct e1000_hw *hw) +{ + uint16_t val = hw->bus.pci_cmd_word | CMD_MEM_WRT_INVALIDATE; + + e1000_write_pci_cfg(hw, PCI_COMMAND_REGISTER, &val); +} + +void +e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + uint16_t val = hw->bus.pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE; + + e1000_write_pci_cfg(hw, PCI_COMMAND_REGISTER, &val); +} + +void +e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) +{ + pci_config_put16(OS_DEP(hw)->cfg_handle, reg, *value); +} + +void +e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) +{ + *value = + pci_config_get16(OS_DEP(hw)->cfg_handle, reg); +} + +/* + * The real intent of this routine is to return the value from pci-e + * config space at offset reg into the capability space. + * ICH devices are "PCI Express"-ish. They have a configuration space, + * but do not contain PCI Express Capability registers, so this returns + * the equivalent of "not supported" + */ +int32_t +e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) +{ + *value = pci_config_get16(OS_DEP(hw)->cfg_handle, + PCI_EX_CONF_CAP + reg); + + return (0); +} + +/* + * Enables PCI-Express master access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - none. + */ +void +e1000_enable_pciex_master(struct e1000_hw *hw) +{ + uint32_t ctrl; + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} diff --git a/usr/src/uts/common/io/igb/igb_osdep.h b/usr/src/uts/common/io/igb/igb_osdep.h new file mode 100644 index 0000000000..43e0765b6d --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_osdep.h @@ -0,0 +1,167 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_OSDEP_H +#define _IGB_OSDEP_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include <sys/types.h> +#include <sys/conf.h> +#include <sys/debug.h> +#include <sys/stropts.h> +#include <sys/stream.h> +#include <sys/strlog.h> +#include <sys/kmem.h> +#include <sys/stat.h> +#include <sys/kstat.h> +#include <sys/modctl.h> +#include <sys/errno.h> +#include <sys/ddi.h> +#include <sys/dditypes.h> +#include <sys/sunddi.h> +#include <sys/pci.h> +#include <sys/atomic.h> +#include <sys/note.h> +#include "igb_debug.h" + +#define usec_delay(x) drv_usecwait(x) +#define msec_delay(x) drv_usecwait(x * 1000) + +#ifdef IGB_DEBUG +#define DEBUGOUT(S) IGB_DEBUGLOG_0(NULL, S) +#define DEBUGOUT1(S, A) IGB_DEBUGLOG_1(NULL, S, A) +#define DEBUGOUT2(S, A, B) IGB_DEBUGLOG_2(NULL, S, A, B) +#define DEBUGOUT3(S, A, B, C) IGB_DEBUGLOG_3(NULL, S, A, B, C) +#define DEBUGFUNC(F) +#else +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A) +#define DEBUGOUT2(S, A, B) +#define DEBUGOUT3(S, A, B, C) +#define DEBUGFUNC(F) +#endif + +#define OS_DEP(hw) ((struct igb_osdep *)((hw)->back)) + +#define FALSE 0 +#define TRUE 1 + +#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ +#define PCI_COMMAND_REGISTER 0x04 +#define PCI_EX_CONF_CAP 0xE0 + + +/* + * Constants used in setting flow control thresholds + */ +#define E1000_PBA_MASK 0xffff +#define E1000_PBA_SHIFT 10 +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* PHY Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define E1000_WRITE_FLUSH(a) (void) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_WRITE_REG(hw, reg, value) \ + ddi_put32((OS_DEP(hw))->reg_handle, \ + (uint32_t *)((uintptr_t)(hw)->hw_addr + reg), (value)) + +#define E1000_READ_REG(hw, reg) \ + ddi_get32((OS_DEP(hw))->reg_handle, \ + (uint32_t *)((uintptr_t)(hw)->hw_addr + reg)) + +#define E1000_WRITE_REG_ARRAY(hw, reg, offset, value) \ + ddi_put32((OS_DEP(hw))->reg_handle, \ + (uint32_t *)((uintptr_t)(hw)->hw_addr + reg + ((offset) << 2)), \ + (value)) + +#define E1000_READ_REG_ARRAY(hw, reg, offset) \ + ddi_get32((OS_DEP(hw))->reg_handle, \ + (uint32_t *)((uintptr_t)(hw)->hw_addr + reg + ((offset) << 2))) + +#define E1000_WRITE_REG_ARRAY_DWORD(a, reg, offset, value) \ + E1000_WRITE_REG_ARRAY(a, reg, offset, value) +#define E1000_READ_REG_ARRAY_DWORD(a, reg, offset) \ + E1000_READ_REG_ARRAY(a, reg, offset) + +#define msec_delay_irq msec_delay + +#define UNREFERENCED_PARAMETER(x) _NOTE(ARGUNUSED(x)) + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint8_t UCHAR; /* 8-bit unsigned */ +typedef UCHAR UINT8; /* 8-bit unsigned */ +typedef uint16_t USHORT; /* 16-bit unsigned */ +typedef uint16_t UINT16; /* 16-bit unsigned */ +typedef uint32_t ULONG; /* 32-bit unsigned */ +typedef uint32_t UINT32; +typedef uint32_t UINT; /* 32-bit unsigned */ +typedef UCHAR BOOLEAN; +typedef BOOLEAN bool; +typedef UCHAR *PUCHAR; +typedef UINT *PUINT; +typedef ULONG *PLONG; +typedef ULONG NDIS_STATUS; +typedef USHORT *PUSHORT; +typedef PUSHORT PUINT16; /* 16-bit unsigned pointer */ +typedef ULONG E1000_32_BIT_PHYSICAL_ADDRESS, + *PFX_32_BIT_PHYSICAL_ADDRESS; +typedef uint64_t E1000_64_BIT_PHYSICAL_ADDRESS, + *PFX_64_BIT_PHYSICAL_ADDRESS; + +struct igb_osdep { + ddi_acc_handle_t reg_handle; + ddi_acc_handle_t cfg_handle; + struct igb *igb; +}; + + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_OSDEP_H */ diff --git a/usr/src/uts/common/io/igb/igb_phy.c b/usr/src/uts/common/io/igb/igb_phy.c new file mode 100644 index 0000000000..e85b54a315 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_phy.c @@ -0,0 +1,2556 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_api.h" +#include "igb_phy.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static void e1000_release_phy(struct e1000_hw *hw); +static s32 e1000_acquire_phy(struct e1000_hw *hw); +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof (e1000_m88_cable_length_table) / \ + sizeof (e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof (e1000_igp_2_cable_length_table) / \ + sizeof (e1000_igp_2_cable_length_table[0])) + +/* + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + */ +s32 +e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/* + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 +e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id"); + + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return (ret_val); +} + +/* + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + */ +s32 +e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return (ret_val); +} + +/* + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + */ +s32 +e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return (ret_val); +} + +/* + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +s32 +e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + */ +s32 +e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 +e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + */ +s32 +e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_igp"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, (u16)offset); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 +e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, (u16)offset); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + */ +s32 +e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_read_kmrn_reg_generic"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + */ +s32 +e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + */ +s32 +e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* For newer PHYs this bit is downshift enable */ + if (phy->type == e1000_phy_m88) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1000_read_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = e1000_phy_commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + */ +s32 +e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 15ms for MAC to configure PHY from NVM settings. */ + msec_delay(15); + + /* + * The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +s32 +e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + goto out; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for " + "autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = TRUE; + +out: + return (ret_val); +} + +/* + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +s32 +e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, + PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) { + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.type) { + case e1000_fc_none: + /* + * Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = e1000_write_phy_reg(hw, + PHY_1000T_CTRL, mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + */ +s32 +e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + e1000_config_collision_dist_generic(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return (ret_val); +} + +/* + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + */ +s32 +e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Link taking longer than expected.\n"); + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return (ret_val); +} + +/* + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + */ +s32 +e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000_write_phy_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + goto out; + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return (ret_val); +} + +/* + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + */ +void +e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.type = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + /* LINTED */ + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + e1000_config_collision_dist_generic(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/* + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + */ +s32 +e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return (ret_val); +} + +/* + * e1000_check_downshift_generic - Checks whether a downshift in speed occured + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + */ +s32 +e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = FALSE; + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_read_phy_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE; + +out: + return (ret_val); +} + +/* + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + */ +s32 +e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return (ret_val); +} + +/* + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + */ +s32 +e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1000_read_phy_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return (ret_val); +} + +/* + * e1000_wait_autoneg_generic - Wait for auto-neg compeletion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +s32 +e1000_wait_autoneg_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg_generic"); + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return (ret_val); +} + +/* + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 +e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay_irq(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations) ? TRUE : FALSE; + + return (ret_val); +} + +/* + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + */ +s32 +e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return (ret_val); +} + +/* + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which reperesent the + * cobination of course and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + */ +s32 +e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* + * Getting bits 15:9, which represent the combination of + * course and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return (ret_val); +} + +/* + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + */ +s32 +e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (hw->phy.media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? TRUE + : FALSE; + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return (ret_val); +} + +/* + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + */ +s32 +e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = TRUE; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return (ret_val); +} + +/* + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + */ +s32 +e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + usec_delay(1); + +out: + return (ret_val); +} + +/* + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + */ +s32 +e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + ret_val = e1000_check_reset_block(hw); + if (ret_val) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + e1000_release_phy(hw); + + ret_val = e1000_get_phy_cfg_done(hw); + +out: + return (ret_val); +} + +/* + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + */ +s32 +e1000_get_cfg_done_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + UNREFERENCED_PARAMETER(hw); + + msec_delay_irq(10); + + return (E1000_SUCCESS); +} + +/* Internal function pointers */ + +/* + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + */ +static s32 +e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->func.get_cfg_done) + return (hw->func.get_cfg_done(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + */ +static void +e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->func.release_phy) + hw->func.release_phy(hw); +} + +/* + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + */ +static s32 +e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->func.acquire_phy) + return (hw->func.acquire_phy(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return (E1000_SUCCESS). + */ +s32 +e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->func.force_speed_duplex) + return (hw->func.force_speed_duplex(hw)); + + return (E1000_SUCCESS); +} + +/* + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + */ +s32 +e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + (void) e1000_write_phy_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + (void) e1000_write_phy_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + (void) e1000_write_phy_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + (void) e1000_write_phy_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Giga mode */ + (void) e1000_write_phy_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + (void) e1000_write_phy_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + (void) e1000_write_phy_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + (void) e1000_write_phy_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + (void) e1000_write_phy_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + (void) e1000_write_phy_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + (void) e1000_write_phy_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + (void) e1000_write_phy_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + (void) e1000_write_phy_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + (void) e1000_write_phy_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + (void) e1000_write_phy_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + (void) e1000_write_phy_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + (void) e1000_write_phy_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + (void) e1000_write_phy_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + (void) e1000_write_phy_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + (void) e1000_write_phy_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + (void) e1000_write_phy_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + (void) e1000_write_phy_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + (void) e1000_write_phy_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + (void) e1000_write_phy_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + (void) e1000_write_phy_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + (void) e1000_write_phy_reg(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + (void) e1000_write_phy_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + (void) e1000_write_phy_reg(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + (void) e1000_write_phy_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + (void) e1000_write_phy_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + (void) e1000_write_phy_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + (void) e1000_write_phy_reg(hw, 0x0000, 0x1340); + + return (E1000_SUCCESS); +} + +/* + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + */ +e1000_phy_type +e1000_get_phy_type_from_id(u32 phy_id) +{ + e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return (phy_type); +} + +/* + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + */ +s32 +e1000_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr = 0; + u32 i = 0; + e1000_phy_type phy_type = e1000_phy_unknown; + + do { + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + (void) e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = E1000_SUCCESS; + break; + } + } + i++; + } while ((ret_val != E1000_SUCCESS) && (i < 100)); + + return (ret_val); +} + +/* + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + */ +static u32 +e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return (phy_addr); +} + +/* + * e1000_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 +e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + DEBUGFUNC("e1000_write_phy_reg_bm"); + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, + offset, &data, FALSE); + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + */ +s32 +e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select = 0; + u32 page = offset >> IGP_PAGE_SHIFT; + u32 page_shift = 0; + + DEBUGFUNC("e1000_write_phy_reg_bm"); + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, + offset, data, TRUE); + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + */ +s32 +e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_write_phy_reg_bm2"); + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, + offset, data, TRUE); + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, + BM_PHY_PAGE_SELECT, + page); + + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 +e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_write_phy_reg_bm2"); + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, + offset, &data, FALSE); + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, + BM_PHY_PAGE_SELECT, + page); + + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return (ret_val); +} + +/* + * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. Note that procedure to read the wakeup + * registers are different. It works as such: + * 1) Set page 769, register 17, bit 2 = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769_17.2 to its original value + */ +s32 +e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, + u32 offset, u16 *data, bool read) +{ + s32 ret_val; + u16 reg = ((u16)offset) & PHY_REG_MASK; + u16 phy_reg = 0; + u8 phy_acquired = 1; + + DEBUGFUNC("e1000_read_phy_wakeup_reg_bm"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) { + DEBUGOUT("Couldnt acquire PHY\n"); + phy_acquired = 0; + goto out; + } + + /* All operations in this function are phy address 1 */ + hw->phy.addr = 1; + + /* Set page 769 */ + (void) e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); + if (ret_val) { + DEBUGOUT("Couldnt read PHY page 769\n"); + goto out; + } + + /* First clear bit 4 to avoid a power state change */ + phy_reg &= ~(BM_WUC_HOST_WU_BIT); + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + DEBUGOUT("Couldnt clear PHY page 769 bit 4\n"); + goto out; + } + + /* Write bit 2 = 1, and clear bit 4 to 769_17 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, + phy_reg | BM_WUC_ENABLE_BIT); + if (ret_val) { + DEBUGOUT("Couldnt write PHY page 769 bit 2\n"); + goto out; + } + + /* Select page 800 */ + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_PAGE << IGP_PAGE_SHIFT)); + + /* Write the page 800 offset value using opcode 0x11 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + DEBUGOUT("Couldnt write address opcode to page 800\n"); + goto out; + } + + if (read) { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000_read_phy_reg_mdic(hw, + BM_WUC_DATA_OPCODE, + data); + } else { + /* Read the page 800 value using opcode 0x12 */ + ret_val = e1000_write_phy_reg_mdic(hw, + BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + DEBUGOUT("Couldnt read data value from page 800\n"); + goto out; + } + + /* + * Restore 769_17.2 to its original value + * Set page 769 + */ + (void) e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); + + /* Clear 769_17.2 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + DEBUGOUT("Couldnt clear PHY page 769 bit 2\n"); + goto out; + } + +out: + if (phy_acquired == 1) + e1000_release_phy(hw); + return (ret_val); +} + +/* + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + */ +void +e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + (void) e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + (void) e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg); +} + +/* + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + */ +void +e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + (void) e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + (void) e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} diff --git a/usr/src/uts/common/io/igb/igb_phy.h b/usr/src/uts/common/io/igb/igb_phy.h new file mode 100644 index 0000000000..f60d059bd1 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_phy.h @@ -0,0 +1,197 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_PHY_H +#define _IGB_PHY_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_autoneg(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, + bool read); +s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); + +#define E1000_MAX_PHY_ADDR 4 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE 769 +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 +#define BM_CR_CTRL1_ENERGY_DETECT 0x0300 /* Enable Energy Detect */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0008 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define IGP03E1000_PHY_MISC_CTRL 0x1B +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_PHY_H */ diff --git a/usr/src/uts/common/io/igb/igb_regs.h b/usr/src/uts/common/io/igb/igb_regs.h new file mode 100644 index 0000000000..d208b1bc03 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_regs.h @@ -0,0 +1,367 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_REGS_H +#define _IGB_REGS_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n))) +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 \ + ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 \ + ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 \ + ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 \ + ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 \ + ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 \ + ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 \ + ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 \ + ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 \ + ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 \ + ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 \ + ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 \ + ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 \ + ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + (_n << 8)) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_TDWBAL(_n) ((_n) < 4 \ + ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 \ + ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (0x05400 + ((_i) * 8)) +#define E1000_RAH(_i) (0x05404 + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +/* Link Partner Ability Next Page - RW */ +#define E1000_PCS_LPABNP 0x04224 +/* 1GSTAT Code Violation Packet Count - RW */ +#define E1000_1GSTAT_RCV 0x04228 +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Software-Firmware Synchronization - RW */ +#define E1000_SW_FW_SYNC 0x05B5C +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +/* Function Active and Power State to MNG */ +#define E1000_FACTPS 0x05B30 +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Inteface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +/* Immediate Interrupt Ext */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* MSI-X Table entry addr low reg 0 - RW */ +#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) +/* MSI-X Table entry addr upper reg 0 - RW */ +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) +/* MSI-X Table entry message reg 0 - RW */ +#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) +/* MSI-X Table entry vector ctrl reg 0 - RW */ +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) +#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */ +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_REGS_H */ diff --git a/usr/src/uts/common/io/igb/igb_rx.c b/usr/src/uts/common/io/igb/igb_rx.c new file mode 100644 index 0000000000..f217a33e3d --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_rx.c @@ -0,0 +1,364 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +/* function prototypes */ +static mblk_t *igb_rx_bind(igb_rx_ring_t *, uint32_t, uint32_t); +static mblk_t *igb_rx_copy(igb_rx_ring_t *, uint32_t, uint32_t); +static void igb_rx_assoc_hcksum(mblk_t *, uint32_t); + +#ifndef IGB_DEBUG +#pragma inline(igb_rx_assoc_hcksum) +#endif + + +/* + * igb_rx_recycle - the call-back function to reclaim rx buffer + * + * This function is called when an mp is freed by the user thru + * freeb call (Only for mp constructed through desballoc call). + * It returns back the freed buffer to the free list. + */ +void +igb_rx_recycle(caddr_t arg) +{ + igb_rx_ring_t *rx_ring; + rx_control_block_t *recycle_rcb; + uint32_t free_index; + + recycle_rcb = (rx_control_block_t *)(uintptr_t)arg; + rx_ring = recycle_rcb->rx_ring; + + if (recycle_rcb->state == RCB_FREE) + return; + + recycle_rcb->state = RCB_FREE; + + ASSERT(recycle_rcb->mp == NULL); + + /* + * Using the recycled data buffer to generate a new mblk + */ + recycle_rcb->mp = desballoc((unsigned char *) + (recycle_rcb->rx_buf.address - IPHDR_ALIGN_ROOM), + (recycle_rcb->rx_buf.size + IPHDR_ALIGN_ROOM), + 0, &recycle_rcb->free_rtn); + if (recycle_rcb->mp != NULL) { + recycle_rcb->mp->b_rptr += IPHDR_ALIGN_ROOM; + recycle_rcb->mp->b_wptr += IPHDR_ALIGN_ROOM; + } + + /* + * Put the recycled rx control block into free list + */ + mutex_enter(&rx_ring->recycle_lock); + + free_index = rx_ring->rcb_tail; + ASSERT(rx_ring->free_list[free_index] == NULL); + + rx_ring->free_list[free_index] = recycle_rcb; + rx_ring->rcb_tail = NEXT_INDEX(free_index, 1, rx_ring->free_list_size); + + mutex_exit(&rx_ring->recycle_lock); + + /* + * The atomic operation on the number of the available rx control + * blocks in the free list is used to make the recycling mutual + * exclusive with the receiving. + */ + atomic_inc_32(&rx_ring->rcb_free); + ASSERT(rx_ring->rcb_free <= rx_ring->free_list_size); +} + +/* + * igb_rx_copy - Use copy to process the received packet + * + * This function will use bcopy to process the packet + * and send the copied packet upstream + */ +static mblk_t * +igb_rx_copy(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len) +{ + rx_control_block_t *current_rcb; + mblk_t *mp; + + current_rcb = rx_ring->work_list[index]; + + DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); + + /* + * Allocate buffer to receive this packet + */ + mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0); + if (mp == NULL) { + igb_log(rx_ring->igb, "igb_rx_copy: allocate buffer failed"); + return (NULL); + } + + /* + * Copy the data received into the new cluster + */ + mp->b_rptr += IPHDR_ALIGN_ROOM; + bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len); + mp->b_wptr = mp->b_rptr + pkt_len; + + return (mp); +} + +/* + * igb_rx_bind - Use existing DMA buffer to build mblk for receiving + * + * This function will use pre-bound DMA buffer to receive the packet + * and build mblk that will be sent upstream. + */ +static mblk_t * +igb_rx_bind(igb_rx_ring_t *rx_ring, uint32_t index, uint32_t pkt_len) +{ + rx_control_block_t *current_rcb; + rx_control_block_t *free_rcb; + uint32_t free_index; + mblk_t *mp; + + /* + * If the free list is empty, we cannot proceed to send + * the current DMA buffer upstream. We'll have to return + * and use bcopy to process the packet. + */ + if (igb_atomic_reserve(&rx_ring->rcb_free, 1) < 0) + return (NULL); + + current_rcb = rx_ring->work_list[index]; + /* + * If the mp of the rx control block is NULL, try to do + * desballoc again. + */ + if (current_rcb->mp == NULL) { + current_rcb->mp = desballoc((unsigned char *) + (current_rcb->rx_buf.address - IPHDR_ALIGN_ROOM), + (current_rcb->rx_buf.size + IPHDR_ALIGN_ROOM), + 0, ¤t_rcb->free_rtn); + /* + * If it is failed to built a mblk using the current + * DMA buffer, we have to return and use bcopy to + * process the packet. + */ + if (current_rcb->mp == NULL) { + atomic_inc_32(&rx_ring->rcb_free); + return (NULL); + } + } + /* + * Sync up the data received + */ + DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); + + mp = current_rcb->mp; + current_rcb->mp = NULL; + current_rcb->state = RCB_SENDUP; + + mp->b_wptr = mp->b_rptr + pkt_len; + mp->b_next = mp->b_cont = NULL; + + /* + * Strip off one free rx control block from the free list + */ + free_index = rx_ring->rcb_head; + free_rcb = rx_ring->free_list[free_index]; + ASSERT(free_rcb != NULL); + rx_ring->free_list[free_index] = NULL; + rx_ring->rcb_head = NEXT_INDEX(free_index, 1, rx_ring->free_list_size); + + /* + * Put the rx control block to the work list + */ + rx_ring->work_list[index] = free_rcb; + + return (mp); +} + +/* + * igb_rx_assoc_hcksum + * + * Check the rx hardware checksum status and associate the hcksum flags + */ +static void +igb_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error) +{ + uint32_t hcksum_flags = 0; + + /* Ignore Checksum Indication */ + if (status_error & E1000_RXD_STAT_IXSM) + return; + + /* + * Check TCP/UDP checksum + */ + if (((status_error & E1000_RXD_STAT_TCPCS) || + (status_error & E1000_RXD_STAT_UDPCS)) && + !(status_error & E1000_RXDEXT_STATERR_TCPE)) + hcksum_flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK; + + /* + * Check IP Checksum + */ + if ((status_error & E1000_RXD_STAT_IPCS) && + !(status_error & E1000_RXDEXT_STATERR_IPE)) + hcksum_flags |= HCK_IPV4_HDRCKSUM; + + if (hcksum_flags != 0) { + (void) hcksum_assoc(mp, + NULL, NULL, 0, 0, 0, 0, hcksum_flags, 0); + } +} + +/* + * igb_rx - Receive the data of one ring + * + * This function goes throught h/w descriptor in one specified rx ring, + * receives the data if the descriptor status shows the data is ready. + * It returns a chain of mblks containing the received data, to be + * passed up to mac_rx(). + */ +mblk_t * +igb_rx(igb_rx_ring_t *rx_ring) +{ + union e1000_adv_rx_desc *current_rbd; + rx_control_block_t *current_rcb; + mblk_t *mp; + mblk_t *mblk_head; + mblk_t **mblk_tail; + uint32_t rx_next; + uint32_t rx_tail; + uint32_t pkt_len; + uint32_t status_error; + uint32_t pkt_num; + igb_t *igb = rx_ring->igb; + + mblk_head = NULL; + mblk_tail = &mblk_head; + + /* + * Sync the receive descriptors before + * accepting the packets + */ + DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORKERNEL); + + /* + * Get the start point of rx bd ring which should be examined + * during this cycle. + */ + rx_next = rx_ring->rbd_next; + + current_rbd = &rx_ring->rbd_ring[rx_next]; + pkt_num = 0; + status_error = current_rbd->wb.upper.status_error; + while (status_error & E1000_RXD_STAT_DD) { + /* + * If hardware has found the errors, but the error + * is hardware checksum error, here does not discard the + * packet, and let upper layer compute the checksum; + * Otherwise discard the packet. + */ + if ((status_error & E1000_RXDEXT_ERR_FRAME_ERR_MASK) || + !(status_error & E1000_RXD_STAT_EOP)) { + IGB_DEBUG_STAT(rx_ring->stat_frame_error); + goto rx_discard; + } + + IGB_DEBUG_STAT_COND(rx_ring->stat_cksum_error, + (status_error & E1000_RXDEXT_STATERR_TCPE) || + (status_error & E1000_RXDEXT_STATERR_IPE)); + + pkt_len = current_rbd->wb.upper.length; + mp = NULL; + /* + * For packets with length more than the copy threshold, + * we'll firstly try to use the existed DMA buffer to built + * a mblk and send the mblk upstream. + * + * If the first method fails, or the packet length is less + * than the copy threshold, we'll allocate a new mblk and + * copy the packet data to the mblk. + */ + if (pkt_len > rx_ring->copy_thresh) + mp = igb_rx_bind(rx_ring, rx_next, pkt_len); + + if (mp == NULL) + mp = igb_rx_copy(rx_ring, rx_next, pkt_len); + + if (mp != NULL) { + /* + * Check h/w checksum offload status + */ + if (igb->rx_hcksum_enable) + igb_rx_assoc_hcksum(mp, status_error); + + *mblk_tail = mp; + mblk_tail = &mp->b_next; + } + +rx_discard: + /* + * Reset rx descriptor read bits + */ + current_rcb = rx_ring->work_list[rx_next]; + current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address; + current_rbd->read.hdr_addr = 0; + + rx_next = NEXT_INDEX(rx_next, 1, rx_ring->ring_size); + + /* + * The receive function is in interrupt context, so here + * limit_per_intr is used to avoid doing receiving too long + * per interrupt. + */ + if (++pkt_num > rx_ring->limit_per_intr) { + IGB_DEBUG_STAT(rx_ring->stat_exceed_pkt); + break; + } + + current_rbd = &rx_ring->rbd_ring[rx_next]; + status_error = current_rbd->wb.upper.status_error; + } + + DMA_SYNC(&rx_ring->rbd_area, DDI_DMA_SYNC_FORDEV); + + rx_ring->rbd_next = rx_next; + + /* + * Update the h/w tail accordingly + */ + rx_tail = PREV_INDEX(rx_next, 1, rx_ring->ring_size); + + E1000_WRITE_REG(&igb->hw, E1000_RDT(rx_ring->index), rx_tail); + + return (mblk_head); +} diff --git a/usr/src/uts/common/io/igb/igb_stat.c b/usr/src/uts/common/io/igb/igb_stat.c new file mode 100644 index 0000000000..b44e4e9b3e --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_stat.c @@ -0,0 +1,268 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +/* + * Update driver private statistics. + */ +static int +igb_update_stats(kstat_t *ks, int rw) +{ + igb_t *igb; + struct e1000_hw *hw; + igb_stat_t *igb_ks; + uint32_t val_low, val_high; +#ifdef IGB_DEBUG + int i; +#endif + + if (rw == KSTAT_WRITE) + return (EACCES); + + igb = (igb_t *)ks->ks_private; + igb_ks = (igb_stat_t *)ks->ks_data; + hw = &igb->hw; + + mutex_enter(&igb->gen_lock); + + /* + * Basic information. + */ + igb_ks->link_speed.value.ui64 = igb->link_speed; + +#ifdef IGB_DEBUG + igb_ks->reset_count.value.ui64 = igb->reset_count; + + igb_ks->rx_frame_error.value.ui64 = 0; + igb_ks->rx_cksum_error.value.ui64 = 0; + igb_ks->rx_exceed_pkt.value.ui64 = 0; + for (i = 0; i < igb->num_rx_rings; i++) { + igb_ks->rx_frame_error.value.ui64 += + igb->rx_rings[i].stat_frame_error; + igb_ks->rx_cksum_error.value.ui64 += + igb->rx_rings[i].stat_cksum_error; + igb_ks->rx_exceed_pkt.value.ui64 += + igb->rx_rings[i].stat_exceed_pkt; + } + + igb_ks->tx_overload.value.ui64 = 0; + igb_ks->tx_fail_no_tbd.value.ui64 = 0; + igb_ks->tx_fail_no_tcb.value.ui64 = 0; + igb_ks->tx_fail_dma_bind.value.ui64 = 0; + igb_ks->tx_reschedule.value.ui64 = 0; + for (i = 0; i < igb->num_tx_rings; i++) { + igb_ks->tx_overload.value.ui64 += + igb->tx_rings[i].stat_overload; + igb_ks->tx_fail_no_tbd.value.ui64 += + igb->tx_rings[i].stat_fail_no_tbd; + igb_ks->tx_fail_no_tcb.value.ui64 += + igb->tx_rings[i].stat_fail_no_tcb; + igb_ks->tx_fail_dma_bind.value.ui64 += + igb->tx_rings[i].stat_fail_dma_bind; + igb_ks->tx_reschedule.value.ui64 += + igb->tx_rings[i].stat_reschedule; + } + + /* + * Hardware calculated statistics. + */ + igb_ks->gprc.value.ul += E1000_READ_REG(hw, E1000_GPRC); + igb_ks->gptc.value.ul += E1000_READ_REG(hw, E1000_GPTC); + igb_ks->prc64.value.ul += E1000_READ_REG(hw, E1000_PRC64); + igb_ks->prc127.value.ul += E1000_READ_REG(hw, E1000_PRC127); + igb_ks->prc255.value.ul += E1000_READ_REG(hw, E1000_PRC255); + igb_ks->prc511.value.ul += E1000_READ_REG(hw, E1000_PRC511); + igb_ks->prc1023.value.ul += E1000_READ_REG(hw, E1000_PRC1023); + igb_ks->prc1522.value.ul += E1000_READ_REG(hw, E1000_PRC1522); + igb_ks->ptc64.value.ul += E1000_READ_REG(hw, E1000_PTC64); + igb_ks->ptc127.value.ul += E1000_READ_REG(hw, E1000_PTC127); + igb_ks->ptc255.value.ul += E1000_READ_REG(hw, E1000_PTC255); + igb_ks->ptc511.value.ul += E1000_READ_REG(hw, E1000_PTC511); + igb_ks->ptc1023.value.ul += E1000_READ_REG(hw, E1000_PTC1023); + igb_ks->ptc1522.value.ul += E1000_READ_REG(hw, E1000_PTC1522); + + /* + * The 64-bit register will reset whenever the upper + * 32 bits are read. So we need to read the lower + * 32 bits first, then read the upper 32 bits. + */ + val_low = E1000_READ_REG(hw, E1000_GORCL); + val_high = E1000_READ_REG(hw, E1000_GORCH); + igb_ks->gor.value.ui64 += (uint64_t)val_high << 32 | (uint64_t)val_low; + + val_low = E1000_READ_REG(hw, E1000_GOTCL); + val_high = E1000_READ_REG(hw, E1000_GOTCH); + igb_ks->got.value.ui64 += (uint64_t)val_high << 32 | (uint64_t)val_low; +#endif + + igb_ks->symerrs.value.ui64 += E1000_READ_REG(hw, E1000_SYMERRS); + igb_ks->mpc.value.ui64 += E1000_READ_REG(hw, E1000_MPC); + igb_ks->rlec.value.ui64 += E1000_READ_REG(hw, E1000_RLEC); + igb_ks->fcruc.value.ui64 += E1000_READ_REG(hw, E1000_FCRUC); + igb_ks->rfc.value.ul += E1000_READ_REG(hw, E1000_RFC); + igb_ks->tncrs.value.ul += E1000_READ_REG(hw, E1000_TNCRS); + igb_ks->tsctc.value.ul += E1000_READ_REG(hw, E1000_TSCTC); + igb_ks->tsctfc.value.ul += E1000_READ_REG(hw, E1000_TSCTFC); + igb_ks->xonrxc.value.ui64 += E1000_READ_REG(hw, E1000_XONRXC); + igb_ks->xontxc.value.ui64 += E1000_READ_REG(hw, E1000_XONTXC); + igb_ks->xoffrxc.value.ui64 += E1000_READ_REG(hw, E1000_XOFFRXC); + igb_ks->xofftxc.value.ui64 += E1000_READ_REG(hw, E1000_XOFFTXC); + + mutex_exit(&igb->gen_lock); + + return (0); +} + +/* + * Create and initialize the driver private statistics. + */ +int +igb_init_stats(igb_t *igb) +{ + kstat_t *ks; + igb_stat_t *igb_ks; + + /* + * Create and init kstat + */ + ks = kstat_create(MODULE_NAME, ddi_get_instance(igb->dip), + "statistics", "net", KSTAT_TYPE_NAMED, + sizeof (igb_stat_t) / sizeof (kstat_named_t), 0); + + if (ks == NULL) { + igb_error(igb, + "Could not create kernel statistics"); + return (IGB_FAILURE); + } + + igb->igb_ks = ks; + + igb_ks = (igb_stat_t *)ks->ks_data; + + /* + * Initialize all the statistics. + */ + kstat_named_init(&igb_ks->link_speed, "link_speed", + KSTAT_DATA_UINT64); + +#ifdef IGB_DEBUG + kstat_named_init(&igb_ks->reset_count, "reset_count", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->rx_frame_error, "rx_frame_error", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->rx_cksum_error, "rx_cksum_error", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->rx_exceed_pkt, "rx_exceed_pkt", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tx_overload, "tx_overload", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tx_fail_no_tbd, "tx_fail_no_tbd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tx_fail_no_tcb, "tx_fail_no_tcb", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tx_fail_dma_bind, "tx_fail_dma_bind", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tx_reschedule, "tx_reschedule", + KSTAT_DATA_UINT64); + + kstat_named_init(&igb_ks->gprc, "good_pkts_recvd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->gptc, "good_pkts_xmitd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->gor, "good_octets_recvd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->got, "good_octets_xmitd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc64, "pkts_recvd_( 64b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc127, "pkts_recvd_( 65- 127b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc255, "pkts_recvd_( 127- 255b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc511, "pkts_recvd_( 256- 511b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc1023, "pkts_recvd_( 511-1023b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->prc1522, "pkts_recvd_(1024-1522b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc64, "pkts_xmitd_( 64b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc127, "pkts_xmitd_( 65- 127b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc255, "pkts_xmitd_( 128- 255b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc511, "pkts_xmitd_( 255- 511b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc1023, "pkts_xmitd_( 512-1023b)", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->ptc1522, "pkts_xmitd_(1024-1522b)", + KSTAT_DATA_UINT64); +#endif + + kstat_named_init(&igb_ks->symerrs, "recv_symbol_errors", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->mpc, "recv_missed_packets", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->rlec, "recv_length_errors", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->fcruc, "recv_unsupport_FC_pkts", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->rfc, "recv_frag", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tncrs, "xmit_with_no_CRS", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tsctc, "xmit_TCP_seg_contexts", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->tsctfc, "xmit_TCP_seg_contexts_fail", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->xonrxc, "XONs_recvd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->xontxc, "XONs_xmitd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->xoffrxc, "XOFFs_recvd", + KSTAT_DATA_UINT64); + kstat_named_init(&igb_ks->xofftxc, "XOFFs_xmitd", + KSTAT_DATA_UINT64); + + /* + * Function to provide kernel stat update on demand + */ + ks->ks_update = igb_update_stats; + + ks->ks_private = (void *)igb; + + /* + * Add kstat to systems kstat chain + */ + kstat_install(ks); + + return (IGB_SUCCESS); +} diff --git a/usr/src/uts/common/io/igb/igb_sw.h b/usr/src/uts/common/io/igb/igb_sw.h new file mode 100644 index 0000000000..bd376c28db --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_sw.h @@ -0,0 +1,829 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#ifndef _IGB_SW_H +#define _IGB_SW_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include <sys/types.h> +#include <sys/conf.h> +#include <sys/debug.h> +#include <sys/stropts.h> +#include <sys/stream.h> +#include <sys/strsun.h> +#include <sys/strlog.h> +#include <sys/kmem.h> +#include <sys/stat.h> +#include <sys/kstat.h> +#include <sys/modctl.h> +#include <sys/errno.h> +#include <sys/dlpi.h> +#include <sys/mac.h> +#include <sys/mac_ether.h> +#include <sys/vlan.h> +#include <sys/ddi.h> +#include <sys/sunddi.h> +#include <sys/pci.h> +#include <sys/pcie.h> +#include <sys/sdt.h> +#include <sys/ethernet.h> +#include <sys/pattr.h> +#include <sys/strsubr.h> +#include <sys/netlb.h> +#include <sys/random.h> +#include <inet/common.h> +#include <inet/ip.h> +#include <inet/mi.h> +#include <inet/nd.h> +#include "igb_api.h" +#include "igb_82575.h" + + +#define MODULE_NAME "igb" /* module name */ + +#define IGB_SUCCESS DDI_SUCCESS +#define IGB_FAILURE DDI_FAILURE + +#define IGB_UNKNOWN 0x00 +#define IGB_INITIALIZED 0x01 +#define IGB_STARTED 0x02 +#define IGB_SUSPENDED 0x04 + +#define IGB_INTR_NONE 0 +#define IGB_INTR_MSIX 1 +#define IGB_INTR_MSI 2 +#define IGB_INTR_LEGACY 3 + +#define MAX_NUM_UNICAST_ADDRESSES E1000_RAR_ENTRIES +#define MAX_NUM_MULTICAST_ADDRESSES 256 +#define MAX_NUM_EITR 10 +#define MAX_COOKIE 16 +#define MIN_NUM_TX_DESC 2 + +/* + * Maximum values for user configurable parameters + */ +#define MAX_TX_QUEUE_NUM 4 +#define MAX_RX_QUEUE_NUM 4 +#define MAX_TX_RING_SIZE 4096 +#define MAX_RX_RING_SIZE 4096 + +#define MAX_MTU 9000 +#define MAX_RX_LIMIT_PER_INTR 4096 +#define MAX_RX_INTR_DELAY 65535 +#define MAX_RX_INTR_ABS_DELAY 65535 +#define MAX_TX_INTR_DELAY 65535 +#define MAX_TX_INTR_ABS_DELAY 65535 +#define MAX_INTR_THROTTLING 65535 + +#define MAX_RX_COPY_THRESHOLD 9216 +#define MAX_TX_COPY_THRESHOLD 9216 +#define MAX_TX_RECYCLE_THRESHOLD DEFAULT_TX_RING_SIZE +#define MAX_TX_OVERLOAD_THRESHOLD DEFAULT_TX_RING_SIZE +#define MAX_TX_RESCHED_THRESHOLD DEFAULT_TX_RING_SIZE + +/* + * Minimum values for user configurable parameters + */ +#define MIN_TX_QUEUE_NUM 1 +#define MIN_RX_QUEUE_NUM 1 +#define MIN_TX_RING_SIZE 64 +#define MIN_RX_RING_SIZE 64 + +#define MIN_MTU ETHERMIN +#define MIN_RX_LIMIT_PER_INTR 16 +#define MIN_RX_INTR_DELAY 0 +#define MIN_RX_INTR_ABS_DELAY 0 +#define MIN_TX_INTR_DELAY 0 +#define MIN_TX_INTR_ABS_DELAY 0 +#define MIN_INTR_THROTTLING 0 +#define MIN_RX_COPY_THRESHOLD 0 +#define MIN_TX_COPY_THRESHOLD 0 +#define MIN_TX_RECYCLE_THRESHOLD MIN_NUM_TX_DESC +#define MIN_TX_OVERLOAD_THRESHOLD MIN_NUM_TX_DESC +#define MIN_TX_RESCHED_THRESHOLD MIN_NUM_TX_DESC + +/* + * Default values for user configurable parameters + */ +#define DEFAULT_TX_QUEUE_NUM 1 +#define DEFAULT_RX_QUEUE_NUM 1 +#define DEFAULT_TX_RING_SIZE 512 +#define DEFAULT_RX_RING_SIZE 512 + +#define DEFAULT_MTU ETHERMTU +#define DEFAULT_RX_LIMIT_PER_INTR 256 +#define DEFAULT_RX_INTR_DELAY 0 +#define DEFAULT_RX_INTR_ABS_DELAY 0 +#define DEFAULT_TX_INTR_DELAY 300 +#define DEFAULT_TX_INTR_ABS_DELAY 0 +#define DEFAULT_INTR_THROTTLING 200 /* In unit of 256 nsec */ +#define DEFAULT_RX_COPY_THRESHOLD 128 +#define DEFAULT_TX_COPY_THRESHOLD 512 +#define DEFAULT_TX_RECYCLE_THRESHOLD MAX_COOKIE +#define DEFAULT_TX_OVERLOAD_THRESHOLD MIN_NUM_TX_DESC +#define DEFAULT_TX_RESCHED_THRESHOLD 128 + +#define TX_DRAIN_TIME 200 +#define RX_DRAIN_TIME 200 + +#define STALL_WATCHDOG_TIMEOUT 8 /* 8 seconds */ +#define MAX_LINK_DOWN_TIMEOUT 8 /* 8 seconds */ + +/* + * Defined for IP header alignment. + */ +#define IPHDR_ALIGN_ROOM 2 + +/* + * Bit flags for attach_progress + */ +#define ATTACH_PROGRESS_PCI_CONFIG 0x0001 /* PCI config setup */ +#define ATTACH_PROGRESS_REGS_MAP 0x0002 /* Registers mapped */ +#define ATTACH_PROGRESS_PROPS 0x0004 /* Properties initialized */ +#define ATTACH_PROGRESS_ALLOC_INTR 0x0008 /* Interrupts allocated */ +#define ATTACH_PROGRESS_ALLOC_RINGS 0x0010 /* Rings allocated */ +#define ATTACH_PROGRESS_ADD_INTR 0x0020 /* Intr handlers added */ +#define ATTACH_PROGRESS_LOCKS 0x0040 /* Locks initialized */ +#define ATTACH_PROGRESS_INIT 0x0080 /* Device initialized */ +#define ATTACH_PROGRESS_INIT_RINGS 0x0100 /* Rings initialized */ +#define ATTACH_PROGRESS_STATS 0x0200 /* Kstats created */ +#define ATTACH_PROGRESS_NDD 0x0400 /* NDD initialized */ +#define ATTACH_PROGRESS_MAC 0x0800 /* MAC registered */ +#define ATTACH_PROGRESS_ENABLE_INTR 0x1000 /* DDI interrupts enabled */ + + +#define PROP_ADV_AUTONEG_CAP "adv_autoneg_cap" +#define PROP_ADV_1000FDX_CAP "adv_1000fdx_cap" +#define PROP_ADV_1000HDX_CAP "adv_1000hdx_cap" +#define PROP_ADV_100FDX_CAP "adv_100fdx_cap" +#define PROP_ADV_100HDX_CAP "adv_100hdx_cap" +#define PROP_ADV_10FDX_CAP "adv_10fdx_cap" +#define PROP_ADV_10HDX_CAP "adv_10hdx_cap" +#define PROP_DEFAULT_MTU "default_mtu" +#define PROP_FLOW_CONTROL "flow_control" +#define PROP_TX_QUEUE_NUM "tx_queue_number" +#define PROP_TX_RING_SIZE "tx_ring_size" +#define PROP_RX_QUEUE_NUM "rx_queue_number" +#define PROP_RX_RING_SIZE "rx_ring_size" + +#define PROP_INTR_FORCE "intr_force" +#define PROP_TX_HCKSUM_ENABLE "tx_hcksum_enable" +#define PROP_RX_HCKSUM_ENABLE "rx_hcksum_enable" +#define PROP_LSO_ENABLE "lso_enable" +#define PROP_TX_HEAD_WB_ENABLE "tx_head_wb_enable" +#define PROP_TX_COPY_THRESHOLD "tx_copy_threshold" +#define PROP_TX_RECYCLE_THRESHOLD "tx_recycle_threshold" +#define PROP_TX_OVERLOAD_THRESHOLD "tx_overload_threshold" +#define PROP_TX_RESCHED_THRESHOLD "tx_resched_threshold" +#define PROP_RX_COPY_THRESHOLD "rx_copy_threshold" +#define PROP_RX_LIMIT_PER_INTR "rx_limit_per_intr" +#define PROP_INTR_THROTTLING "intr_throttling" + +#define IGB_LB_NONE 0 +#define IGB_LB_EXTERNAL 1 +#define IGB_LB_INTERNAL_MAC 2 +#define IGB_LB_INTERNAL_PHY 3 +#define IGB_LB_INTERNAL_SERDES 4 + +/* + * Shorthand for the NDD parameters + */ +#define param_autoneg_cap nd_params[PARAM_AUTONEG_CAP].val +#define param_pause_cap nd_params[PARAM_PAUSE_CAP].val +#define param_asym_pause_cap nd_params[PARAM_ASYM_PAUSE_CAP].val +#define param_1000fdx_cap nd_params[PARAM_1000FDX_CAP].val +#define param_1000hdx_cap nd_params[PARAM_1000HDX_CAP].val +#define param_100t4_cap nd_params[PARAM_100T4_CAP].val +#define param_100fdx_cap nd_params[PARAM_100FDX_CAP].val +#define param_100hdx_cap nd_params[PARAM_100HDX_CAP].val +#define param_10fdx_cap nd_params[PARAM_10FDX_CAP].val +#define param_10hdx_cap nd_params[PARAM_10HDX_CAP].val +#define param_rem_fault nd_params[PARAM_REM_FAULT].val + +#define param_adv_autoneg_cap nd_params[PARAM_ADV_AUTONEG_CAP].val +#define param_adv_pause_cap nd_params[PARAM_ADV_PAUSE_CAP].val +#define param_adv_asym_pause_cap nd_params[PARAM_ADV_ASYM_PAUSE_CAP].val +#define param_adv_1000fdx_cap nd_params[PARAM_ADV_1000FDX_CAP].val +#define param_adv_1000hdx_cap nd_params[PARAM_ADV_1000HDX_CAP].val +#define param_adv_100t4_cap nd_params[PARAM_ADV_100T4_CAP].val +#define param_adv_100fdx_cap nd_params[PARAM_ADV_100FDX_CAP].val +#define param_adv_100hdx_cap nd_params[PARAM_ADV_100HDX_CAP].val +#define param_adv_10fdx_cap nd_params[PARAM_ADV_10FDX_CAP].val +#define param_adv_10hdx_cap nd_params[PARAM_ADV_10HDX_CAP].val +#define param_adv_rem_fault nd_params[PARAM_ADV_REM_FAULT].val + +#define param_lp_autoneg_cap nd_params[PARAM_LP_AUTONEG_CAP].val +#define param_lp_pause_cap nd_params[PARAM_LP_PAUSE_CAP].val +#define param_lp_asym_pause_cap nd_params[PARAM_LP_ASYM_PAUSE_CAP].val +#define param_lp_1000fdx_cap nd_params[PARAM_LP_1000FDX_CAP].val +#define param_lp_1000hdx_cap nd_params[PARAM_LP_1000HDX_CAP].val +#define param_lp_100t4_cap nd_params[PARAM_LP_100T4_CAP].val +#define param_lp_100fdx_cap nd_params[PARAM_LP_100FDX_CAP].val +#define param_lp_100hdx_cap nd_params[PARAM_LP_100HDX_CAP].val +#define param_lp_10fdx_cap nd_params[PARAM_LP_10FDX_CAP].val +#define param_lp_10hdx_cap nd_params[PARAM_LP_10HDX_CAP].val +#define param_lp_rem_fault nd_params[PARAM_LP_REM_FAULT].val + +enum ioc_reply { + IOC_INVAL = -1, /* bad, NAK with EINVAL */ + IOC_DONE, /* OK, reply sent */ + IOC_ACK, /* OK, just send ACK */ + IOC_REPLY /* OK, just send reply */ +}; + +#define MBLK_LEN(mp) ((uintptr_t)(mp)->b_wptr - \ + (uintptr_t)(mp)->b_rptr) + +#define DMA_SYNC(area, flag) ((void) ddi_dma_sync((area)->dma_handle, \ + 0, 0, (flag))) + +/* + * Defined for ring index operations + * ASSERT(index < limit) + * ASSERT(step < limit) + * ASSERT(index1 < limit) + * ASSERT(index2 < limit) + */ +#define NEXT_INDEX(index, step, limit) (((index) + (step)) < (limit) ? \ + (index) + (step) : (index) + (step) - (limit)) +#define PREV_INDEX(index, step, limit) ((index) >= (step) ? \ + (index) - (step) : (index) + (limit) - (step)) +#define OFFSET(index1, index2, limit) ((index1) <= (index2) ? \ + (index2) - (index1) : (index2) + (limit) - (index1)) + +#define LINK_LIST_INIT(_LH) \ + (_LH)->head = (_LH)->tail = NULL + +#define LIST_GET_HEAD(_LH) ((single_link_t *)((_LH)->head)) + +#define LIST_POP_HEAD(_LH) \ + (single_link_t *)(_LH)->head; \ + { \ + if ((_LH)->head != NULL) { \ + (_LH)->head = (_LH)->head->link; \ + if ((_LH)->head == NULL) \ + (_LH)->tail = NULL; \ + } \ + } + +#define LIST_GET_TAIL(_LH) ((single_link_t *)((_LH)->tail)) + +#define LIST_PUSH_TAIL(_LH, _E) \ + if ((_LH)->tail != NULL) { \ + (_LH)->tail->link = (single_link_t *)(_E); \ + (_LH)->tail = (single_link_t *)(_E); \ + } else { \ + (_LH)->head = (_LH)->tail = (single_link_t *)(_E); \ + } \ + (_E)->link = NULL; + +#define LIST_GET_NEXT(_LH, _E) \ + (((_LH)->tail == (single_link_t *)(_E)) ? \ + NULL : ((single_link_t *)(_E))->link) + + +typedef struct single_link { + struct single_link *link; +} single_link_t; + +typedef struct link_list { + single_link_t *head; + single_link_t *tail; +} link_list_t; + +/* + * Property lookups + */ +#define IGB_PROP_EXISTS(d, n) ddi_prop_exists(DDI_DEV_T_ANY, (d), \ + DDI_PROP_DONTPASS, (n)) +#define IGB_PROP_GET_INT(d, n) ddi_prop_get_int(DDI_DEV_T_ANY, (d), \ + DDI_PROP_DONTPASS, (n), -1) + + +/* + * Named Data (ND) Parameter Management Structure + */ +typedef struct { + struct igb *private; + uint32_t info; + uint32_t min; + uint32_t max; + uint32_t val; + char *name; +} nd_param_t; + +/* + * NDD parameter indexes, divided into: + * + * read-only parameters describing the hardware's capabilities + * read-write parameters controlling the advertised capabilities + * read-only parameters describing the partner's capabilities + * read-write parameters controlling the force speed and duplex + * read-only parameters describing the link state + * read-only parameters describing the driver properties + * read-write parameters controlling the driver properties + */ +enum { + PARAM_AUTONEG_CAP, + PARAM_PAUSE_CAP, + PARAM_ASYM_PAUSE_CAP, + PARAM_1000FDX_CAP, + PARAM_1000HDX_CAP, + PARAM_100T4_CAP, + PARAM_100FDX_CAP, + PARAM_100HDX_CAP, + PARAM_10FDX_CAP, + PARAM_10HDX_CAP, + PARAM_REM_FAULT, + + PARAM_ADV_AUTONEG_CAP, + PARAM_ADV_PAUSE_CAP, + PARAM_ADV_ASYM_PAUSE_CAP, + PARAM_ADV_1000FDX_CAP, + PARAM_ADV_1000HDX_CAP, + PARAM_ADV_100T4_CAP, + PARAM_ADV_100FDX_CAP, + PARAM_ADV_100HDX_CAP, + PARAM_ADV_10FDX_CAP, + PARAM_ADV_10HDX_CAP, + PARAM_ADV_REM_FAULT, + + PARAM_LP_AUTONEG_CAP, + PARAM_LP_PAUSE_CAP, + PARAM_LP_ASYM_PAUSE_CAP, + PARAM_LP_1000FDX_CAP, + PARAM_LP_1000HDX_CAP, + PARAM_LP_100T4_CAP, + PARAM_LP_100FDX_CAP, + PARAM_LP_100HDX_CAP, + PARAM_LP_10FDX_CAP, + PARAM_LP_10HDX_CAP, + PARAM_LP_REM_FAULT, + + PARAM_LINK_STATUS, + PARAM_LINK_SPEED, + PARAM_LINK_DUPLEX, + + PARAM_COUNT +}; + +typedef union igb_ether_addr { + struct { + uint32_t high; + uint32_t low; + } reg; + struct { + uint8_t set; + uint8_t redundant; + uint8_t addr[ETHERADDRL]; + } mac; +} igb_ether_addr_t; + +typedef enum { + USE_NONE, + USE_COPY, + USE_DMA +} tx_type_t; + +typedef enum { + RCB_FREE, + RCB_SENDUP +} rcb_state_t; + +typedef struct hcksum_context { + uint32_t hcksum_flags; + uint32_t ip_hdr_len; + uint32_t mac_hdr_len; + uint32_t l4_proto; +} hcksum_context_t; + +/* Hold address/length of each DMA segment */ +typedef struct sw_desc { + uint64_t address; + size_t length; +} sw_desc_t; + +/* Handles and addresses of DMA buffer */ +typedef struct dma_buffer { + caddr_t address; /* Virtual address */ + uint64_t dma_address; /* DMA (Hardware) address */ + ddi_acc_handle_t acc_handle; /* Data access handle */ + ddi_dma_handle_t dma_handle; /* DMA handle */ + size_t size; /* Buffer size */ + size_t len; /* Data length in the buffer */ +} dma_buffer_t; + +/* + * Tx Control Block + */ +typedef struct tx_control_block { + single_link_t link; + uint32_t frag_num; + uint32_t desc_num; + mblk_t *mp; + tx_type_t tx_type; + ddi_dma_handle_t tx_dma_handle; + dma_buffer_t tx_buf; + sw_desc_t desc[MAX_COOKIE]; +} tx_control_block_t; + +/* + * RX Control Block + */ +typedef struct rx_control_block { + mblk_t *mp; + rcb_state_t state; + dma_buffer_t rx_buf; + frtn_t free_rtn; + struct igb_rx_ring *rx_ring; +} rx_control_block_t; + +/* + * Software Data Structure for Tx Ring + */ +typedef struct igb_tx_ring { + uint32_t index; /* Ring index */ + + /* + * Mutexes + */ + kmutex_t tx_lock; + kmutex_t recycle_lock; + kmutex_t tcb_head_lock; + kmutex_t tcb_tail_lock; + + /* + * Tx descriptor ring definitions + */ + dma_buffer_t tbd_area; + union e1000_adv_tx_desc *tbd_ring; + uint32_t tbd_head; /* Index of next tbd to recycle */ + uint32_t tbd_tail; /* Index of next tbd to transmit */ + uint32_t tbd_free; /* Number of free tbd */ + + /* + * Tx control block list definitions + */ + tx_control_block_t *tcb_area; + tx_control_block_t **work_list; + tx_control_block_t **free_list; + uint32_t tcb_head; /* Head index of free list */ + uint32_t tcb_tail; /* Tail index of free list */ + uint32_t tcb_free; /* Number of free tcb in free list */ + + uint32_t *tbd_head_wb; /* Head write-back */ + uint32_t (*tx_recycle)(struct igb_tx_ring *); + + /* + * TCP/UDP checksum offload + */ + hcksum_context_t hcksum_context; + + /* + * Tx ring settings and status + */ + uint32_t ring_size; /* Tx descriptor ring size */ + uint32_t free_list_size; /* Tx free list size */ + uint32_t copy_thresh; + uint32_t recycle_thresh; + uint32_t overload_thresh; + uint32_t resched_thresh; + + boolean_t reschedule; + uint32_t recycle_fail; + uint32_t stall_watchdog; + +#ifdef IGB_DEBUG + /* + * Debug statistics + */ + uint32_t stat_overload; + uint32_t stat_fail_no_tbd; + uint32_t stat_fail_no_tcb; + uint32_t stat_fail_dma_bind; + uint32_t stat_reschedule; +#endif + + /* + * Pointer to the igb struct + */ + struct igb *igb; + +} igb_tx_ring_t; + +/* + * Software Receive Ring + */ +typedef struct igb_rx_ring { + uint32_t index; /* Ring index */ + uint32_t intr_vector; /* Interrupt vector index */ + + /* + * Mutexes + */ + kmutex_t rx_lock; /* Rx access lock */ + kmutex_t recycle_lock; /* Recycle lock, for rcb_tail */ + + /* + * Rx descriptor ring definitions + */ + dma_buffer_t rbd_area; /* DMA buffer of rx desc ring */ + union e1000_adv_rx_desc *rbd_ring; /* Rx desc ring */ + uint32_t rbd_next; /* Index of next rx desc */ + + /* + * Rx control block list definitions + */ + rx_control_block_t *rcb_area; + rx_control_block_t **work_list; /* Work list of rcbs */ + rx_control_block_t **free_list; /* Free list of rcbs */ + uint32_t rcb_head; /* Index of next free rcb */ + uint32_t rcb_tail; /* Index to put recycled rcb */ + uint32_t rcb_free; /* Number of free rcbs */ + + /* + * Rx ring settings and status + */ + uint32_t ring_size; /* Rx descriptor ring size */ + uint32_t free_list_size; /* Rx free list size */ + uint32_t limit_per_intr; /* Max packets per interrupt */ + uint32_t copy_thresh; + +#ifdef IGB_DEBUG + /* + * Debug statistics + */ + uint32_t stat_frame_error; + uint32_t stat_cksum_error; + uint32_t stat_exceed_pkt; +#endif + + struct igb *igb; /* Pointer to igb struct */ + +} igb_rx_ring_t; + +typedef struct igb { + int instance; + mac_handle_t mac_hdl; + dev_info_t *dip; + struct e1000_hw hw; + struct igb_osdep osdep; + + uint32_t igb_state; + link_state_t link_state; + uint32_t link_speed; + uint32_t link_duplex; + uint32_t link_down_timeout; + + uint32_t reset_count; + uint32_t attach_progress; + uint32_t loopback_mode; + uint32_t max_frame_size; + + /* + * Receive Rings + */ + igb_rx_ring_t *rx_rings; /* Array of rx rings */ + uint32_t num_rx_rings; /* Number of rx rings in use */ + uint32_t rx_ring_size; /* Rx descriptor ring size */ + uint32_t rx_buf_size; /* Rx buffer size */ + + /* + * Transmit Rings + */ + igb_tx_ring_t *tx_rings; /* Array of tx rings */ + uint32_t num_tx_rings; /* Number of tx rings in use */ + uint32_t tx_ring_size; /* Tx descriptor ring size */ + uint32_t tx_buf_size; /* Tx buffer size */ + + boolean_t tx_head_wb_enable; /* Tx head wrtie-back */ + boolean_t tx_hcksum_enable; /* Tx h/w cksum offload */ + boolean_t lso_enable; /* Large Segment Offload */ + uint32_t tx_copy_thresh; /* Tx copy threshold */ + uint32_t tx_recycle_thresh; /* Tx recycle threshold */ + uint32_t tx_overload_thresh; /* Tx overload threshold */ + uint32_t tx_resched_thresh; /* Tx reschedule threshold */ + boolean_t rx_hcksum_enable; /* Rx h/w cksum offload */ + uint32_t rx_copy_thresh; /* Rx copy threshold */ + uint32_t rx_limit_per_intr; /* Rx pkts per interrupt */ + uint32_t intr_throttling[MAX_NUM_EITR]; + uint32_t intr_force; + + int intr_type; + int intr_cnt; + int intr_cap; + size_t intr_size; + uint_t intr_pri; + ddi_intr_handle_t *htable; + uint32_t eims_mask; + + kmutex_t gen_lock; /* General lock for device access */ + kmutex_t watchdog_lock; + + boolean_t watchdog_enable; + boolean_t watchdog_start; + timeout_id_t watchdog_tid; + + boolean_t unicst_init; + uint32_t unicst_avail; + uint32_t unicst_total; + igb_ether_addr_t unicst_addr[MAX_NUM_UNICAST_ADDRESSES]; + uint32_t mcast_count; + struct ether_addr mcast_table[MAX_NUM_MULTICAST_ADDRESSES]; + + /* + * Kstat definitions + */ + kstat_t *igb_ks; + + /* + * NDD definitions + */ + caddr_t nd_data; + nd_param_t nd_params[PARAM_COUNT]; + +} igb_t; + +typedef struct igb_stat { + + kstat_named_t link_speed; /* Link Speed */ +#ifdef IGB_DEBUG + kstat_named_t reset_count; /* Reset Count */ + + kstat_named_t rx_frame_error; /* Rx Error in Packet */ + kstat_named_t rx_cksum_error; /* Rx Checksum Error */ + kstat_named_t rx_exceed_pkt; /* Rx Exceed Max Pkt Count */ + + kstat_named_t tx_overload; /* Tx Desc Ring Overload */ + kstat_named_t tx_fail_no_tcb; /* Tx Fail Freelist Empty */ + kstat_named_t tx_fail_no_tbd; /* Tx Fail Desc Ring Empty */ + kstat_named_t tx_fail_dma_bind; /* Tx Fail DMA bind */ + kstat_named_t tx_reschedule; /* Tx Reschedule */ + + kstat_named_t gprc; /* Good Packets Received Count */ + kstat_named_t gptc; /* Good Packets Xmitted Count */ + kstat_named_t gor; /* Good Octets Received Count */ + kstat_named_t got; /* Good Octets Xmitd Count */ + kstat_named_t prc64; /* Packets Received - 64b */ + kstat_named_t prc127; /* Packets Received - 65-127b */ + kstat_named_t prc255; /* Packets Received - 127-255b */ + kstat_named_t prc511; /* Packets Received - 256-511b */ + kstat_named_t prc1023; /* Packets Received - 511-1023b */ + kstat_named_t prc1522; /* Packets Received - 1024-1522b */ + kstat_named_t ptc64; /* Packets Xmitted (64b) */ + kstat_named_t ptc127; /* Packets Xmitted (64-127b) */ + kstat_named_t ptc255; /* Packets Xmitted (128-255b) */ + kstat_named_t ptc511; /* Packets Xmitted (255-511b) */ + kstat_named_t ptc1023; /* Packets Xmitted (512-1023b) */ + kstat_named_t ptc1522; /* Packets Xmitted (1024-1522b */ +#endif + kstat_named_t crcerrs; /* CRC Error Count */ + kstat_named_t symerrs; /* Symbol Error Count */ + kstat_named_t mpc; /* Missed Packet Count */ + kstat_named_t scc; /* Single Collision Count */ + kstat_named_t ecol; /* Excessive Collision Count */ + kstat_named_t mcc; /* Multiple Collision Count */ + kstat_named_t latecol; /* Late Collision Count */ + kstat_named_t colc; /* Collision Count */ + kstat_named_t dc; /* Defer Count */ + kstat_named_t sec; /* Sequence Error Count */ + kstat_named_t rlec; /* Receive Length Error Count */ + kstat_named_t xonrxc; /* XON Received Count */ + kstat_named_t xontxc; /* XON Xmitted Count */ + kstat_named_t xoffrxc; /* XOFF Received Count */ + kstat_named_t xofftxc; /* Xoff Xmitted Count */ + kstat_named_t fcruc; /* Unknown Flow Conrol Packet Rcvd Count */ + kstat_named_t bprc; /* Broadcasts Pkts Received Count */ + kstat_named_t mprc; /* Multicast Pkts Received Count */ + kstat_named_t rnbc; /* Receive No Buffers Count */ + kstat_named_t ruc; /* Receive Undersize Count */ + kstat_named_t rfc; /* Receive Frag Count */ + kstat_named_t roc; /* Receive Oversize Count */ + kstat_named_t rjc; /* Receive Jabber Count */ + kstat_named_t tor; /* Total Octets Recvd Count */ + kstat_named_t tot; /* Total Octets Xmted Count */ + kstat_named_t tpr; /* Total Packets Received */ + kstat_named_t tpt; /* Total Packets Xmitted */ + kstat_named_t mptc; /* Multicast Packets Xmited Count */ + kstat_named_t bptc; /* Broadcast Packets Xmited Count */ + kstat_named_t algnerrc; /* Alignment Error count */ + kstat_named_t rxerrc; /* Rx Error Count */ + kstat_named_t tncrs; /* Transmit with no CRS */ + kstat_named_t cexterr; /* Carrier Extension Error count */ + kstat_named_t tsctc; /* TCP seg contexts xmit count */ + kstat_named_t tsctfc; /* TCP seg contexts xmit fail count */ +} igb_stat_t; + +/* + * Function prototypes in e1000_osdep.c + */ +void e1000_enable_pciex_master(struct e1000_hw *); + +/* + * Function prototypes in igb_buf.c + */ +int igb_alloc_dma(igb_t *); +void igb_free_dma(igb_t *); + +/* + * Function prototypes in igb_main.c + */ +int igb_start(igb_t *); +void igb_stop(igb_t *); +int igb_setup_link(igb_t *, boolean_t); +int igb_unicst_set(igb_t *, const uint8_t *, mac_addr_slot_t); +int igb_multicst_add(igb_t *, const uint8_t *); +int igb_multicst_remove(igb_t *, const uint8_t *); +enum ioc_reply igb_loopback_ioctl(igb_t *, struct iocblk *, mblk_t *); +void igb_enable_watchdog_timer(igb_t *); +void igb_disable_watchdog_timer(igb_t *); +int igb_atomic_reserve(uint32_t *, uint32_t); + +/* + * Function prototypes in igb_gld.c + */ +int igb_m_start(void *); +void igb_m_stop(void *); +int igb_m_promisc(void *, boolean_t); +int igb_m_multicst(void *, boolean_t, const uint8_t *); +int igb_m_unicst(void *, const uint8_t *); +int igb_m_stat(void *, uint_t, uint64_t *); +void igb_m_resources(void *); +void igb_m_ioctl(void *, queue_t *, mblk_t *); +int igb_m_unicst_add(void *, mac_multi_addr_t *); +int igb_m_unicst_remove(void *, mac_addr_slot_t); +int igb_m_unicst_modify(void *, mac_multi_addr_t *); +int igb_m_unicst_get(void *, mac_multi_addr_t *); +boolean_t igb_m_getcapab(void *, mac_capab_t, void *); + +/* + * Function prototypes in igb_rx.c + */ +mblk_t *igb_rx(igb_rx_ring_t *); +void igb_rx_recycle(caddr_t arg); + +/* + * Function prototypes in igb_tx.c + */ +mblk_t *igb_m_tx(void *, mblk_t *); +void igb_free_tcb(tx_control_block_t *); +void igb_put_free_list(igb_tx_ring_t *, link_list_t *); +uint32_t igb_tx_recycle_legacy(igb_tx_ring_t *); +uint32_t igb_tx_recycle_head_wb(igb_tx_ring_t *); + +/* + * Function prototypes in igb_log.c + */ +void igb_notice(void *, const char *, ...); +void igb_log(void *, const char *, ...); +void igb_error(void *, const char *, ...); + +/* + * Function prototypes in igb_ndd.c + */ +int igb_nd_init(igb_t *); +void igb_nd_cleanup(igb_t *); +enum ioc_reply igb_nd_ioctl(igb_t *, queue_t *, mblk_t *, struct iocblk *); + +/* + * Function prototypes in igb_stat.c + */ +int igb_init_stats(igb_t *); + + +#ifdef __cplusplus +} +#endif + +#endif /* _IGB_SW_H */ diff --git a/usr/src/uts/common/io/igb/igb_tx.c b/usr/src/uts/common/io/igb/igb_tx.c new file mode 100644 index 0000000000..2261022c49 --- /dev/null +++ b/usr/src/uts/common/io/igb/igb_tx.c @@ -0,0 +1,1297 @@ +/* + * CDDL HEADER START + * + * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at: + * http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When using or redistributing this file, you may do so under the + * License only. No other modification of this header is permitted. + * + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms of the CDDL. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "igb_sw.h" + +static boolean_t igb_tx(igb_tx_ring_t *, mblk_t *); +static int igb_tx_copy(igb_tx_ring_t *, tx_control_block_t *, mblk_t *, + uint32_t, boolean_t, boolean_t); +static int igb_tx_bind(igb_tx_ring_t *, tx_control_block_t *, mblk_t *, + uint32_t); +static int igb_tx_fill_ring(igb_tx_ring_t *, link_list_t *, hcksum_context_t *); +static void igb_save_desc(tx_control_block_t *, uint64_t, size_t); +static tx_control_block_t *igb_get_free_list(igb_tx_ring_t *); + +static void igb_get_hcksum_context(mblk_t *, hcksum_context_t *); +static boolean_t igb_check_hcksum_context(igb_tx_ring_t *, hcksum_context_t *); +static void igb_fill_hcksum_context(struct e1000_adv_tx_context_desc *, + hcksum_context_t *); + +#ifndef IGB_DEBUG +#pragma inline(igb_save_desc) +#pragma inline(igb_get_hcksum_context) +#pragma inline(igb_check_hcksum_context) +#pragma inline(igb_fill_hcksum_context) +#endif + +/* + * igb_m_tx + * + * The GLDv3 interface to call driver's tx routine to transmit + * the mblks. + */ +mblk_t * +igb_m_tx(void *arg, mblk_t *mp) +{ + igb_t *igb = (igb_t *)arg; + mblk_t *next; + igb_tx_ring_t *tx_ring; + + /* + * If the adapter is suspended, or it is not started, or the link + * is not up, the mblks are simply dropped. + */ + if (((igb->igb_state & IGB_SUSPENDED) != 0) || + ((igb->igb_state & IGB_STARTED) == 0) || + (igb->link_state != LINK_STATE_UP)) { + /* Free the mblk chain */ + while (mp != NULL) { + next = mp->b_next; + mp->b_next = NULL; + + freemsg(mp); + mp = next; + } + + return (NULL); + } + + /* + * Decide which tx ring is used to transmit the packets. + * This needs to be updated later to fit the new interface + * of the multiple rings support. + */ + tx_ring = &igb->tx_rings[0]; + + while (mp != NULL) { + next = mp->b_next; + mp->b_next = NULL; + + if (!igb_tx(tx_ring, mp)) { + mp->b_next = next; + break; + } + + mp = next; + } + + return (mp); +} + +/* + * igb_tx - Main transmit processing + * + * Called from igb_m_tx with an mblk ready to transmit. this + * routine sets up the transmit descriptors and sends data to + * the wire. + * + * One mblk can consist of several fragments, each fragment + * will be processed with different methods based on the size. + * For the fragments with size less than the bcopy threshold, + * they will be processed by using bcopy; otherwise, they will + * be processed by using DMA binding. + * + * To process the mblk, a tx control block is got from the + * free list. One tx control block contains one tx buffer, which + * is used to copy mblk fragments' data; and one tx DMA handle, + * which is used to bind a mblk fragment with DMA resource. + * + * Several small mblk fragments can be copied into one tx control + * block's buffer, and then the buffer will be transmitted with + * one tx descriptor. + * + * A large fragment only binds with one tx control block's DMA + * handle, and it can span several tx descriptors for transmitting. + * + * So to transmit a packet (mblk), several tx control blocks can + * be used. After the processing, those tx control blocks will + * be put to the work list. + */ +static boolean_t +igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp) +{ + igb_t *igb = tx_ring->igb; + tx_type_t current_flag, next_flag; + uint32_t current_len, next_len; + uint32_t desc_total; + size_t mbsize; + int desc_num; + boolean_t copy_done, eop; + mblk_t *current_mp, *next_mp, *nmp; + tx_control_block_t *tcb; + hcksum_context_t hcksum_context, *hcksum; + link_list_t pending_list; + + /* Get the mblk size */ + mbsize = 0; + for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) { + mbsize += MBLK_LEN(nmp); + } + + /* + * If the mblk size exceeds the max frame size, + * discard this mblk, and return B_TRUE + */ + if (mbsize > (igb->max_frame_size - ETHERFCSL)) { + freemsg(mp); + IGB_DEBUGLOG_0(igb, "igb_tx: packet oversize"); + return (B_TRUE); + } + + /* + * Check and recycle tx descriptors. + * The recycle threshold here should be selected carefully + */ + if (tx_ring->tbd_free < tx_ring->recycle_thresh) + tx_ring->tx_recycle(tx_ring); + + /* + * After the recycling, if the tbd_free is less than the + * overload_threshold, assert overload, return B_FALSE; + * and we need to re-schedule the tx again. + */ + if (tx_ring->tbd_free < tx_ring->overload_thresh) { + tx_ring->reschedule = B_TRUE; + IGB_DEBUG_STAT(tx_ring->stat_overload); + return (B_FALSE); + } + + /* + * The pending_list is a linked list that is used to save + * the tx control blocks that have packet data processed + * but have not put the data to the tx descriptor ring. + * It is used to reduce the lock contention of the tx_lock. + */ + LINK_LIST_INIT(&pending_list); + desc_num = 0; + desc_total = 0; + + current_mp = mp; + current_len = MBLK_LEN(current_mp); + /* + * Decide which method to use for the first fragment + */ + current_flag = (current_len <= tx_ring->copy_thresh) ? + USE_COPY : USE_DMA; + /* + * If the mblk includes several contiguous small fragments, + * they may be copied into one buffer. This flag is used to + * indicate whether there are pending fragments that need to + * be copied to the current tx buffer. + * + * If this flag is B_TRUE, it indicates that a new tx control + * block is needed to process the next fragment using either + * copy or DMA binding. + * + * Otherwise, it indicates that the next fragment will be + * copied to the current tx buffer that is maintained by the + * current tx control block. No new tx control block is needed. + */ + copy_done = B_TRUE; + while (current_mp) { + next_mp = current_mp->b_cont; + eop = (next_mp == NULL); /* Last fragment of the packet? */ + next_len = eop ? 0: MBLK_LEN(next_mp); + + /* + * When the current fragment is an empty fragment, if + * the next fragment will still be copied to the current + * tx buffer, we cannot skip this fragment here. Because + * the copy processing is pending for completion. We have + * to process this empty fragment in the tx_copy routine. + * + * If the copy processing is completed or a DMA binding + * processing is just completed, we can just skip this + * empty fragment. + */ + if ((current_len == 0) && (copy_done)) { + current_mp = next_mp; + current_len = next_len; + current_flag = (current_len <= tx_ring->copy_thresh) ? + USE_COPY : USE_DMA; + continue; + } + + if (copy_done) { + /* + * Get a new tx control block from the free list + */ + tcb = igb_get_free_list(tx_ring); + + if (tcb == NULL) { + IGB_DEBUG_STAT(tx_ring->stat_fail_no_tcb); + goto tx_failure; + } + + /* + * Push the tx control block to the pending list + * to avoid using lock too early + */ + LIST_PUSH_TAIL(&pending_list, &tcb->link); + } + + if (current_flag == USE_COPY) { + /* + * Check whether to use bcopy or DMA binding to process + * the next fragment, and if using bcopy, whether we + * need to continue copying the next fragment into the + * current tx buffer. + */ + ASSERT((tcb->tx_buf.len + current_len) <= + tcb->tx_buf.size); + + if (eop) { + /* + * This is the last fragment of the packet, so + * the copy processing will be completed with + * this fragment. + */ + next_flag = USE_NONE; + copy_done = B_TRUE; + } else if ((tcb->tx_buf.len + current_len + next_len) > + tcb->tx_buf.size) { + /* + * If the next fragment is too large to be + * copied to the current tx buffer, we need + * to complete the current copy processing. + */ + next_flag = (next_len > tx_ring->copy_thresh) ? + USE_DMA: USE_COPY; + copy_done = B_TRUE; + } else if (next_len > tx_ring->copy_thresh) { + /* + * The next fragment needs to be processed with + * DMA binding. So the copy prcessing will be + * completed with the current fragment. + */ + next_flag = USE_DMA; + copy_done = B_TRUE; + } else { + /* + * Continue to copy the next fragment to the + * current tx buffer. + */ + next_flag = USE_COPY; + copy_done = B_FALSE; + } + + desc_num = igb_tx_copy(tx_ring, tcb, current_mp, + current_len, copy_done, eop); + } else { + /* + * Check whether to use bcopy or DMA binding to process + * the next fragment. + */ + next_flag = (next_len > tx_ring->copy_thresh) ? + USE_DMA: USE_COPY; + ASSERT(copy_done == B_TRUE); + + desc_num = igb_tx_bind(tx_ring, tcb, current_mp, + current_len); + } + + if (desc_num > 0) + desc_total += desc_num; + else if (desc_num < 0) + goto tx_failure; + + current_mp = next_mp; + current_len = next_len; + current_flag = next_flag; + } + + /* + * Attach the mblk to the last tx control block + */ + ASSERT(tcb); + ASSERT(tcb->mp == NULL); + tcb->mp = mp; + + if (igb->tx_hcksum_enable) { + /* + * Retrieve checksum context information from the mblk that will + * be used to decide whether/how to fill the context descriptor. + */ + hcksum = &hcksum_context; + igb_get_hcksum_context(mp, hcksum); + } else { + hcksum = NULL; + } + + /* + * Before fill the tx descriptor ring with the data, we need to + * ensure there are adequate free descriptors for transmit + * (including one context descriptor). + */ + if (tx_ring->tbd_free < (desc_total + 1)) { + tx_ring->tx_recycle(tx_ring); + } + + mutex_enter(&tx_ring->tx_lock); + + /* + * If the number of free tx descriptors is not enough for transmit + * then return failure. + * + * Note: we must put this check under the mutex protection to + * ensure the correctness when multiple threads access it in + * parallel. + */ + if (tx_ring->tbd_free < (desc_total + 1)) { + IGB_DEBUG_STAT(tx_ring->stat_fail_no_tbd); + mutex_exit(&tx_ring->tx_lock); + goto tx_failure; + } + + desc_num = igb_tx_fill_ring(tx_ring, &pending_list, hcksum); + + ASSERT((desc_num == desc_total) || (desc_num == (desc_total + 1))); + + mutex_exit(&tx_ring->tx_lock); + + return (B_TRUE); + +tx_failure: + /* + * Discard the mblk and free the used resources + */ + tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list); + while (tcb) { + tcb->mp = NULL; + + igb_free_tcb(tcb); + + tcb = (tx_control_block_t *) + LIST_GET_NEXT(&pending_list, &tcb->link); + } + + /* + * Return the tx control blocks in the pending list to the free list. + */ + igb_put_free_list(tx_ring, &pending_list); + + /* Transmit failed, do not drop the mblk, rechedule the transmit */ + tx_ring->reschedule = B_TRUE; + + return (B_FALSE); +} + +/* + * igb_tx_copy + * + * Copy the mblk fragment to the pre-allocated tx buffer + */ +static int +igb_tx_copy(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp, + uint32_t len, boolean_t copy_done, boolean_t eop) +{ + dma_buffer_t *tx_buf; + uint32_t desc_num; + _NOTE(ARGUNUSED(tx_ring)); + + tx_buf = &tcb->tx_buf; + + /* + * Copy the packet data of the mblk fragment into the + * pre-allocated tx buffer, which is maintained by the + * tx control block. + * + * Several mblk fragments can be copied into one tx buffer. + * The destination address of the current copied fragment in + * the tx buffer is next to the end of the previous copied + * fragment. + */ + if (len > 0) { + bcopy(mp->b_rptr, tx_buf->address + tx_buf->len, len); + + tx_buf->len += len; + tcb->frag_num++; + } + + desc_num = 0; + + /* + * If it is the last fragment copied to the current tx buffer, + * in other words, if there's no remaining fragment or the remaining + * fragment requires a new tx control block to process, we need to + * complete the current copy processing by syncing up the current + * DMA buffer and saving the descriptor data. + */ + if (copy_done) { + /* + * For the packet smaller than 64 bytes, we need to + * pad it to 60 bytes. The NIC hardware will add 4 + * bytes of CRC. + */ + if (eop && (tx_buf->len < ETHERMIN)) { + bzero(tx_buf->address + tx_buf->len, + ETHERMIN - tx_buf->len); + tx_buf->len = ETHERMIN; + } + + /* + * Sync the DMA buffer of the packet data + */ + DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV); + + tcb->tx_type = USE_COPY; + + /* + * Save the address and length to the private data structure + * of the tx control block, which will be used to fill the + * tx descriptor ring after all the fragments are processed. + */ + igb_save_desc(tcb, tx_buf->dma_address, tx_buf->len); + desc_num++; + } + + return (desc_num); +} + +/* + * igb_tx_bind + * + * Bind the mblk fragment with DMA + */ +static int +igb_tx_bind(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp, + uint32_t len) +{ + int status, i; + ddi_dma_cookie_t dma_cookie; + uint_t ncookies; + int desc_num; + + /* + * Use DMA binding to process the mblk fragment + */ + status = ddi_dma_addr_bind_handle(tcb->tx_dma_handle, NULL, + (caddr_t)mp->b_rptr, len, + DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, + 0, &dma_cookie, &ncookies); + + if (status != DDI_DMA_MAPPED) { + IGB_DEBUG_STAT(tx_ring->stat_fail_dma_bind); + return (-1); + } + + tcb->frag_num++; + tcb->tx_type = USE_DMA; + /* + * Each fragment can span several cookies. One cookie will have + * one tx descriptor to transmit. + */ + desc_num = 0; + for (i = ncookies; i > 0; i--) { + /* + * Save the address and length to the private data structure + * of the tx control block, which will be used to fill the + * tx descriptor ring after all the fragments are processed. + */ + igb_save_desc(tcb, + dma_cookie.dmac_laddress, + dma_cookie.dmac_size); + + desc_num++; + + if (i > 1) + ddi_dma_nextcookie(tcb->tx_dma_handle, &dma_cookie); + } + + return (desc_num); +} + +/* + * igb_get_hcksum_context + * + * Get the hcksum context information from the mblk + */ +static void +igb_get_hcksum_context(mblk_t *mp, hcksum_context_t *hcksum) +{ + uint32_t start; + uint32_t flags; + uint32_t len; + uint32_t size; + uint32_t offset; + unsigned char *pos; + ushort_t etype; + uint32_t mac_hdr_len; + uint32_t l4_proto; + + ASSERT(mp != NULL); + + hcksum_retrieve(mp, NULL, NULL, &start, NULL, NULL, NULL, &flags); + + hcksum->hcksum_flags = flags; + + if (flags == 0) + return; + + etype = 0; + mac_hdr_len = 0; + l4_proto = 0; + + /* + * Firstly get the position of the ether_type/ether_tpid. + * Here we don't assume the ether (VLAN) header is fully included + * in one mblk fragment, so we go thourgh the fragments to parse + * the ether type. + */ + size = len = MBLK_LEN(mp); + offset = offsetof(struct ether_header, ether_type); + while (size <= offset) { + mp = mp->b_cont; + ASSERT(mp != NULL); + len = MBLK_LEN(mp); + size += len; + } + pos = mp->b_rptr + offset + len - size; + + etype = ntohs(*(ushort_t *)(uintptr_t)pos); + if (etype == ETHERTYPE_VLAN) { + /* + * Get the position of the ether_type in VLAN header + */ + offset = offsetof(struct ether_vlan_header, ether_type); + while (size <= offset) { + mp = mp->b_cont; + ASSERT(mp != NULL); + len = MBLK_LEN(mp); + size += len; + } + pos = mp->b_rptr + offset + len - size; + + etype = ntohs(*(ushort_t *)(uintptr_t)pos); + mac_hdr_len = sizeof (struct ether_vlan_header); + } else { + mac_hdr_len = sizeof (struct ether_header); + } + + /* + * Here we don't assume the IP(V6) header is fully included in + * one mblk fragment, so we go thourgh the fragments to parse + * the protocol type. + */ + switch (etype) { + case ETHERTYPE_IP: + offset = offsetof(ipha_t, ipha_protocol) + mac_hdr_len; + while (size <= offset) { + mp = mp->b_cont; + ASSERT(mp != NULL); + len = MBLK_LEN(mp); + size += len; + } + pos = mp->b_rptr + offset + len - size; + + l4_proto = *(uint8_t *)pos; + break; + case ETHERTYPE_IPV6: + offset = offsetof(ip6_t, ip6_nxt) + mac_hdr_len; + while (size <= offset) { + mp = mp->b_cont; + ASSERT(mp != NULL); + len = MBLK_LEN(mp); + size += len; + } + pos = mp->b_rptr + offset + len - size; + + l4_proto = *(uint8_t *)pos; + break; + default: + /* Unrecoverable error */ + IGB_DEBUGLOG_0(NULL, "Ether type error with tx hcksum"); + return; + } + + hcksum->mac_hdr_len = mac_hdr_len; + hcksum->ip_hdr_len = start; + hcksum->l4_proto = l4_proto; +} + +/* + * igb_check_hcksum_context + * + * Check if a new context descriptor is needed + */ +static boolean_t +igb_check_hcksum_context(igb_tx_ring_t *tx_ring, hcksum_context_t *hcksum) +{ + hcksum_context_t *last; + + if (hcksum == NULL) + return (B_FALSE); + + /* + * Compare the checksum data retrieved from the mblk and the + * stored checksum data of the last context descriptor. The data + * need to be checked are: + * hcksum_flags + * l4_proto + * mac_hdr_len + * ip_hdr_len + * Either one of the above data is changed, a new context descriptor + * will be needed. + */ + last = &tx_ring->hcksum_context; + + if (hcksum->hcksum_flags != 0) { + if ((hcksum->hcksum_flags != last->hcksum_flags) || + (hcksum->l4_proto != last->l4_proto) || + (hcksum->mac_hdr_len != last->mac_hdr_len) || + (hcksum->ip_hdr_len != last->ip_hdr_len)) { + + return (B_TRUE); + } + } + + return (B_FALSE); +} + +/* + * igb_fill_hcksum_context + * + * Fill the context descriptor with hardware checksum informations + */ +static void +igb_fill_hcksum_context(struct e1000_adv_tx_context_desc *ctx_tbd, + hcksum_context_t *hcksum) +{ + /* + * Fill the context descriptor with the checksum + * context information we've got + */ + ctx_tbd->vlan_macip_lens = hcksum->ip_hdr_len; + ctx_tbd->vlan_macip_lens |= hcksum->mac_hdr_len << + E1000_ADVTXD_MACLEN_SHIFT; + + ctx_tbd->type_tucmd_mlhl = + E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + if (hcksum->hcksum_flags & HCK_IPV4_HDRCKSUM) + ctx_tbd->type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; + + if (hcksum->hcksum_flags & HCK_PARTIALCKSUM) { + switch (hcksum->l4_proto) { + case IPPROTO_TCP: + ctx_tbd->type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case IPPROTO_UDP: + /* + * We don't have to explicitly set: + * ctx_tbd->type_tucmd_mlhl |= + * E1000_ADVTXD_TUCMD_L4T_UDP; + * Because E1000_ADVTXD_TUCMD_L4T_UDP == 0b + */ + break; + default: + /* Unrecoverable error */ + IGB_DEBUGLOG_0(NULL, "L4 type error with tx hcksum"); + break; + } + } + + ctx_tbd->seqnum_seed = 0; + ctx_tbd->mss_l4len_idx = 0; +} + +/* + * igb_tx_fill_ring + * + * Fill the tx descriptor ring with the data + */ +static int +igb_tx_fill_ring(igb_tx_ring_t *tx_ring, link_list_t *pending_list, + hcksum_context_t *hcksum) +{ + struct e1000_hw *hw = &tx_ring->igb->hw; + boolean_t load_context; + uint32_t index, tcb_index, desc_num; + union e1000_adv_tx_desc *tbd, *first_tbd; + tx_control_block_t *tcb, *first_tcb; + uint32_t hcksum_flags; + int i; + + ASSERT(mutex_owned(&tx_ring->tx_lock)); + + tbd = NULL; + first_tbd = NULL; + first_tcb = NULL; + desc_num = 0; + hcksum_flags = 0; + load_context = B_FALSE; + + /* + * Get the index of the first tx descriptor that will be filled, + * and the index of the first work list item that will be attached + * with the first used tx control block in the pending list. + * Note: the two indexes are the same. + */ + index = tx_ring->tbd_tail; + tcb_index = tx_ring->tbd_tail; + + if (hcksum != NULL) { + hcksum_flags = hcksum->hcksum_flags; + + /* + * Check if a new context descriptor is needed for this packet + */ + load_context = igb_check_hcksum_context(tx_ring, hcksum); + if (load_context) { + first_tcb = (tx_control_block_t *) + LIST_GET_HEAD(pending_list); + tbd = &tx_ring->tbd_ring[index]; + + /* + * Fill the context descriptor with the + * hardware checksum offload informations. + */ + igb_fill_hcksum_context( + (struct e1000_adv_tx_context_desc *)tbd, hcksum); + + index = NEXT_INDEX(index, 1, tx_ring->ring_size); + desc_num++; + + /* + * Store the checksum context data if + * a new context descriptor is added + */ + tx_ring->hcksum_context = *hcksum; + } + } + + first_tbd = &tx_ring->tbd_ring[index]; + + /* + * Fill tx data descriptors with the data saved in the pending list. + * The tx control blocks in the pending list are added to the work list + * at the same time. + * + * The work list is strictly 1:1 corresponding to the descriptor ring. + * One item of the work list corresponds to one tx descriptor. Because + * one tx control block can span multiple tx descriptors, the tx + * control block will be added to the first work list item that + * corresponds to the first tx descriptor generated from that tx + * control block. + */ + tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list); + while (tcb != NULL) { + + for (i = 0; i < tcb->desc_num; i++) { + tbd = &tx_ring->tbd_ring[index]; + + tbd->read.buffer_addr = tcb->desc[i].address; + tbd->read.cmd_type_len = tcb->desc[i].length; + + tbd->read.cmd_type_len |= E1000_ADVTXD_DCMD_RS | + E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_DATA; + + tbd->read.olinfo_status = 0; + + index = NEXT_INDEX(index, 1, tx_ring->ring_size); + desc_num++; + } + + if (first_tcb != NULL) { + /* + * Count the checksum context descriptor for + * the first tx control block. + */ + first_tcb->desc_num++; + first_tcb = NULL; + } + + /* + * Add the tx control block to the work list + */ + ASSERT(tx_ring->work_list[tcb_index] == NULL); + tx_ring->work_list[tcb_index] = tcb; + + tcb_index = index; + tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list); + } + + /* + * The Insert Ethernet CRC (IFCS) bit and the checksum fields are only + * valid in the first descriptor of the packet. + */ + ASSERT(first_tbd != NULL); + first_tbd->read.cmd_type_len |= E1000_ADVTXD_DCMD_IFCS; + + /* Set hardware checksum bits */ + if (hcksum_flags != 0) { + if (hcksum_flags & HCK_IPV4_HDRCKSUM) + first_tbd->read.olinfo_status |= + E1000_TXD_POPTS_IXSM << 8; + if (hcksum_flags & HCK_PARTIALCKSUM) + first_tbd->read.olinfo_status |= + E1000_TXD_POPTS_TXSM << 8; + } + + /* + * The last descriptor of packet needs End Of Packet (EOP), + * and Report Status (RS) bits set + */ + ASSERT(tbd != NULL); + tbd->read.cmd_type_len |= + E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS; + + /* + * Sync the DMA buffer of the tx descriptor ring + */ + DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORDEV); + + /* + * Update the number of the free tx descriptors. + * The mutual exclusion between the transmission and the recycling + * (for the tx descriptor ring and the work list) is implemented + * with the atomic operation on the number of the free tx descriptors. + * + * Note: we should always decrement the counter tbd_free before + * advancing the hardware TDT pointer to avoid the race condition - + * before the counter tbd_free is decremented, the transmit of the + * tx descriptors has done and the counter tbd_free is increased by + * the tx recycling. + */ + i = igb_atomic_reserve(&tx_ring->tbd_free, desc_num); + ASSERT(i >= 0); + + tx_ring->tbd_tail = index; + + /* + * Advance the hardware TDT pointer of the tx descriptor ring + */ + E1000_WRITE_REG(hw, E1000_TDT(tx_ring->index), index); + + return (desc_num); +} + +/* + * igb_save_desc + * + * Save the address/length pair to the private array + * of the tx control block. The address/length pairs + * will be filled into the tx descriptor ring later. + */ +static void +igb_save_desc(tx_control_block_t *tcb, uint64_t address, size_t length) +{ + sw_desc_t *desc; + + desc = &tcb->desc[tcb->desc_num]; + desc->address = address; + desc->length = length; + + tcb->desc_num++; +} + +/* + * igb_tx_recycle_legacy + * + * Recycle the tx descriptors and tx control blocks. + * + * The work list is traversed to check if the corresponding + * tx descriptors have been transmitted. If so, the resources + * bound to the tx control blocks will be freed, and those + * tx control blocks will be returned to the free list. + */ +uint32_t +igb_tx_recycle_legacy(igb_tx_ring_t *tx_ring) +{ + uint32_t index, last_index; + int desc_num; + boolean_t desc_done; + tx_control_block_t *tcb; + link_list_t pending_list; + + /* + * The mutex_tryenter() is used to avoid unnecessary + * lock contention. + */ + if (mutex_tryenter(&tx_ring->recycle_lock) == 0) + return (0); + + ASSERT(tx_ring->tbd_free <= tx_ring->ring_size); + + if (tx_ring->tbd_free == tx_ring->ring_size) { + tx_ring->recycle_fail = 0; + tx_ring->stall_watchdog = 0; + mutex_exit(&tx_ring->recycle_lock); + return (0); + } + + /* + * Sync the DMA buffer of the tx descriptor ring + */ + DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL); + + LINK_LIST_INIT(&pending_list); + desc_num = 0; + index = tx_ring->tbd_head; /* Index of next tbd/tcb to recycle */ + + tcb = tx_ring->work_list[index]; + ASSERT(tcb != NULL); + + desc_done = B_TRUE; + while (desc_done && (tcb != NULL)) { + + /* + * Get the last tx descriptor of the tx control block. + * If the last tx descriptor is done, it is done with + * all the tx descriptors of the tx control block. + * Then the tx control block and all the corresponding + * tx descriptors can be recycled. + */ + last_index = NEXT_INDEX(index, tcb->desc_num - 1, + tx_ring->ring_size); + + /* + * Check if the Descriptor Done bit is set + */ + desc_done = tx_ring->tbd_ring[last_index].wb.status & + E1000_TXD_STAT_DD; + if (desc_done) { + /* + * Strip off the tx control block from the work list, + * and add it to the pending list. + */ + tx_ring->work_list[index] = NULL; + LIST_PUSH_TAIL(&pending_list, &tcb->link); + + /* + * Count the total number of the tx descriptors recycled + */ + desc_num += tcb->desc_num; + + /* + * Advance the index of the tx descriptor ring + */ + index = NEXT_INDEX(last_index, 1, tx_ring->ring_size); + + tcb = tx_ring->work_list[index]; + } + } + + /* + * If no tx descriptors are recycled, no need to do more processing + */ + if (desc_num == 0) { + tx_ring->recycle_fail++; + mutex_exit(&tx_ring->recycle_lock); + return (0); + } + + tx_ring->recycle_fail = 0; + tx_ring->stall_watchdog = 0; + + /* + * Update the head index of the tx descriptor ring + */ + tx_ring->tbd_head = index; + + /* + * Update the number of the free tx descriptors with atomic operations + */ + atomic_add_32(&tx_ring->tbd_free, desc_num); + + mutex_exit(&tx_ring->recycle_lock); + + /* + * Free the resources used by the tx control blocks + * in the pending list + */ + tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list); + while (tcb != NULL) { + /* + * Release the resources occupied by the tx control block + */ + igb_free_tcb(tcb); + + tcb = (tx_control_block_t *) + LIST_GET_NEXT(&pending_list, &tcb->link); + } + + /* + * Add the tx control blocks in the pending list to the free list. + */ + igb_put_free_list(tx_ring, &pending_list); + + return (desc_num); +} + +/* + * igb_tx_recycle_head_wb + * + * Check the head write-back, and recycle all the transmitted + * tx descriptors and tx control blocks. + */ +uint32_t +igb_tx_recycle_head_wb(igb_tx_ring_t *tx_ring) +{ + uint32_t index; + uint32_t head_wb; + int desc_num; + tx_control_block_t *tcb; + link_list_t pending_list; + + /* + * The mutex_tryenter() is used to avoid unnecessary + * lock contention. + */ + if (mutex_tryenter(&tx_ring->recycle_lock) == 0) + return (0); + + ASSERT(tx_ring->tbd_free <= tx_ring->ring_size); + + if (tx_ring->tbd_free == tx_ring->ring_size) { + tx_ring->recycle_fail = 0; + tx_ring->stall_watchdog = 0; + mutex_exit(&tx_ring->recycle_lock); + return (0); + } + + /* + * Sync the DMA buffer of the tx descriptor ring + * + * Note: For head write-back mode, the tx descriptors will not + * be written back, but the head write-back value is stored at + * the last extra tbd at the end of the DMA area, we still need + * to sync the head write-back value for kernel. + * + * DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL); + */ + (void) ddi_dma_sync(tx_ring->tbd_area.dma_handle, + sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size, + sizeof (uint32_t), + DDI_DMA_SYNC_FORKERNEL); + + LINK_LIST_INIT(&pending_list); + desc_num = 0; + index = tx_ring->tbd_head; /* Next index to clean */ + + /* + * Get the value of head write-back + */ + head_wb = *tx_ring->tbd_head_wb; + while (index != head_wb) { + tcb = tx_ring->work_list[index]; + ASSERT(tcb != NULL); + + if (OFFSET(index, head_wb, tx_ring->ring_size) < + tcb->desc_num) { + /* + * The current tx control block is not + * completely transmitted, stop recycling + */ + break; + } + + /* + * Strip off the tx control block from the work list, + * and add it to the pending list. + */ + tx_ring->work_list[index] = NULL; + LIST_PUSH_TAIL(&pending_list, &tcb->link); + + /* + * Advance the index of the tx descriptor ring + */ + index = NEXT_INDEX(index, tcb->desc_num, tx_ring->ring_size); + + /* + * Count the total number of the tx descriptors recycled + */ + desc_num += tcb->desc_num; + } + + /* + * If no tx descriptors are recycled, no need to do more processing + */ + if (desc_num == 0) { + tx_ring->recycle_fail++; + mutex_exit(&tx_ring->recycle_lock); + return (0); + } + + tx_ring->recycle_fail = 0; + tx_ring->stall_watchdog = 0; + + /* + * Update the head index of the tx descriptor ring + */ + tx_ring->tbd_head = index; + + /* + * Update the number of the free tx descriptors with atomic operations + */ + atomic_add_32(&tx_ring->tbd_free, desc_num); + + mutex_exit(&tx_ring->recycle_lock); + + /* + * Free the resources used by the tx control blocks + * in the pending list + */ + tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list); + while (tcb) { + /* + * Release the resources occupied by the tx control block + */ + igb_free_tcb(tcb); + + tcb = (tx_control_block_t *) + LIST_GET_NEXT(&pending_list, &tcb->link); + } + + /* + * Add the tx control blocks in the pending list to the free list. + */ + igb_put_free_list(tx_ring, &pending_list); + + return (desc_num); +} + +/* + * igb_free_tcb - free up the tx control block + * + * Free the resources of the tx control block, including + * unbind the previously bound DMA handle, and reset other + * control fields. + */ +void +igb_free_tcb(tx_control_block_t *tcb) +{ + switch (tcb->tx_type) { + case USE_COPY: + /* + * Reset the buffer length that is used for copy + */ + tcb->tx_buf.len = 0; + break; + case USE_DMA: + /* + * Release the DMA resource that is used for + * DMA binding. + */ + (void) ddi_dma_unbind_handle(tcb->tx_dma_handle); + break; + default: + break; + } + + /* + * Free the mblk + */ + if (tcb->mp != NULL) { + freemsg(tcb->mp); + tcb->mp = NULL; + } + + tcb->tx_type = USE_NONE; + tcb->frag_num = 0; + tcb->desc_num = 0; +} + +/* + * igb_get_free_list - Get a free tx control block from the free list + * + * The atomic operation on the number of the available tx control block + * in the free list is used to keep this routine mutual exclusive with + * the routine igb_put_check_list. + */ +static tx_control_block_t * +igb_get_free_list(igb_tx_ring_t *tx_ring) +{ + tx_control_block_t *tcb; + + /* + * Check and update the number of the free tx control block + * in the free list. + */ + if (igb_atomic_reserve(&tx_ring->tcb_free, 1) < 0) + return (NULL); + + mutex_enter(&tx_ring->tcb_head_lock); + + tcb = tx_ring->free_list[tx_ring->tcb_head]; + ASSERT(tcb != NULL); + tx_ring->free_list[tx_ring->tcb_head] = NULL; + tx_ring->tcb_head = NEXT_INDEX(tx_ring->tcb_head, 1, + tx_ring->free_list_size); + + mutex_exit(&tx_ring->tcb_head_lock); + + return (tcb); +} + +/* + * igb_put_free_list + * + * Put a list of used tx control blocks back to the free list + * + * A mutex is used here to ensure the serialization. The mutual exclusion + * between igb_get_free_list and igb_put_free_list is implemented with + * the atomic operation on the counter tcb_free. + */ +void +igb_put_free_list(igb_tx_ring_t *tx_ring, link_list_t *pending_list) +{ + uint32_t index; + int tcb_num; + tx_control_block_t *tcb; + + mutex_enter(&tx_ring->tcb_tail_lock); + + index = tx_ring->tcb_tail; + + tcb_num = 0; + tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list); + while (tcb != NULL) { + ASSERT(tx_ring->free_list[index] == NULL); + tx_ring->free_list[index] = tcb; + + tcb_num++; + + index = NEXT_INDEX(index, 1, tx_ring->free_list_size); + + tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list); + } + + tx_ring->tcb_tail = index; + + /* + * Update the number of the free tx control block + * in the free list. This operation must be placed + * under the protection of the lock. + */ + atomic_add_32(&tx_ring->tcb_free, tcb_num); + + mutex_exit(&tx_ring->tcb_tail_lock); +} diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared index 39d95c1fca..891f268c4c 100644 --- a/usr/src/uts/intel/Makefile.intel.shared +++ b/usr/src/uts/intel/Makefile.intel.shared @@ -354,6 +354,7 @@ DRV_KMODS += nge DRV_KMODS += rge DRV_KMODS += sfe DRV_KMODS += amd8111s +DRV_KMODS += igb $(CLOSED_BUILD)CLOSED_DRV_KMODS += ixgb # diff --git a/usr/src/uts/intel/igb/Makefile b/usr/src/uts/intel/igb/Makefile new file mode 100644 index 0000000000..72ff7af3c7 --- /dev/null +++ b/usr/src/uts/intel/igb/Makefile @@ -0,0 +1,90 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# +# uts/intel/igb/Makefile +# +# This makefile drives the production of the igb +# network driver kernel module. +# +# intel architecture dependent +# + +# +# Paths to the base of the uts directory trees +# +UTSBASE = ../.. + +# +# Define the module and object file sets. +# +MODULE = igb +OBJECTS = $(IGB_OBJS:%=$(OBJS_DIR)/%) +LINTS = $(IGB_OBJS:%.o=$(LINTS_DIR)/%.ln) +ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE) +CONF_SRCDIR = $(UTSBASE)/common/io/igb + +# +# Include common rules. +# +include $(UTSBASE)/intel/Makefile.intel + +# +# Define targets +# +ALL_TARGET = $(BINARY) $(CONFMOD) +LINT_TARGET = $(MODULE).lint +INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE) + +# +# Driver depends on MAC & IP +# +LDFLAGS += -dy -N misc/mac -N drv/ip + +# +# Default build targets. +# +.KEEP_STATE: + +def: $(DEF_DEPS) + +all: $(ALL_DEPS) + +clean: $(CLEAN_DEPS) + +clobber: $(CLOBBER_DEPS) + +lint: $(LINT_DEPS) + +modlintlib: $(MODLINTLIB_DEPS) + +clean.lint: $(CLEAN_LINT_DEPS) + +install: $(INSTALL_DEPS) + +# +# Include common targets. +# +include $(UTSBASE)/intel/Makefile.targ diff --git a/usr/src/uts/sparc/Makefile.sparc.shared b/usr/src/uts/sparc/Makefile.sparc.shared index 93edd00412..d747b59a74 100644 --- a/usr/src/uts/sparc/Makefile.sparc.shared +++ b/usr/src/uts/sparc/Makefile.sparc.shared @@ -262,6 +262,7 @@ DRV_KMODS += pcwl DRV_KMODS += rge DRV_KMODS += sfe DRV_KMODS += aac +DRV_KMODS += igb $(CLOSED_BUILD)CLOSED_DRV_KMODS += ixgb # diff --git a/usr/src/uts/sparc/igb/Makefile b/usr/src/uts/sparc/igb/Makefile new file mode 100644 index 0000000000..4183caf54b --- /dev/null +++ b/usr/src/uts/sparc/igb/Makefile @@ -0,0 +1,106 @@ +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or http://www.opensolaris.org/os/licensing. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# +# Copyright 2008 Sun Microsystems, Inc. All rights reserved. +# Use is subject to license terms. +# +# ident "%Z%%M% %I% %E% SMI" +# +# uts/sparc/igb/Makefile +# +# This makefile drives the production of the igb +# network driver kernel module. +# +# intel architecture dependent +# + +# +# Path to the base of the uts directory tree (usually /usr/src/uts). +# +UTSBASE = ../.. + +# +# Define the module and object file sets. +# +MODULE = igb +OBJECTS = $(IGB_OBJS:%=$(OBJS_DIR)/%) +LINTS = $(IGB_OBJS:%.o=$(LINTS_DIR)/%.ln) +ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE) +CONF_SRCDIR = $(UTSBASE)/common/io/igb + +# +# Include common rules. +# +include $(UTSBASE)/sparc/Makefile.sparc + +# +# Define targets +# +ALL_TARGET = $(BINARY) $(SRC_CONFFILE) +LINT_TARGET = $(MODULE).lint +INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE) + +# +# Override defaults +# +INC_PATH += -I$(CONF_SRCDIR) + +# +# lint pass one enforcement +# +CFLAGS += $(CCVERBOSE) + +# +# Turn on doubleword alignment for 64 bit registers +# +CFLAGS += -dalign + +# +# Driver depends on MAC & IP +# +LDFLAGS += -dy -N misc/mac -N drv/ip + +# +# Default build targets. +# +.KEEP_STATE: + +def: $(DEF_DEPS) + +all: $(ALL_DEPS) + +clean: $(CLEAN_DEPS) + +clobber: $(CLOBBER_DEPS) + +lint: $(LINT_DEPS) + +modlintlib: $(MODLINTLIB_DEPS) + +clean.lint: $(CLEAN_LINT_DEPS) + +install: $(INSTALL_DEPS) + +# +# Include common targets. +# +include $(UTSBASE)/sparc/Makefile.targ + |