summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSukumar Swaminathan <Sukumar.Swaminathan@Sun.COM>2009-11-07 09:32:37 -0800
committerSukumar Swaminathan <Sukumar.Swaminathan@Sun.COM>2009-11-07 09:32:37 -0800
commitbafec74292ca6805e5acb387856f4e60a5314b37 (patch)
tree7f1194f208bebb7bcb9433b054e9b513bcd09b2c
parent24fd5dc40967be3acf455139813c114a29e7c529 (diff)
downloadillumos-joyent-bafec74292ca6805e5acb387856f4e60a5314b37.tar.gz
PSARC/2009/525 qlge - QLogic PCIe converged NIC driver
6871527 FCoE, qlge driver - Add NIC side of support for new Qlogic FCoE adapter, Europa
-rw-r--r--usr/src/pkgdefs/SUNWqlc/Makefile7
-rw-r--r--usr/src/pkgdefs/SUNWqlc/pkginfo.tmpl6
-rw-r--r--usr/src/pkgdefs/SUNWqlc/postinstall138
-rw-r--r--usr/src/pkgdefs/SUNWqlc/postinstall.tmpl60
-rw-r--r--usr/src/pkgdefs/SUNWqlc/preremove.tmpl (renamed from usr/src/pkgdefs/SUNWqlc/preremove)16
-rw-r--r--usr/src/pkgdefs/SUNWqlc/prototype_i3862
-rw-r--r--usr/src/pkgdefs/SUNWqlc/prototype_sparc1
-rw-r--r--usr/src/uts/common/Makefile.files2
-rw-r--r--usr/src/uts/common/Makefile.rules7
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c7287
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_dbg.c2943
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_flash.c1403
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_gld.c919
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_mpi.c1194
-rw-r--r--usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge.h904
-rw-r--r--usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_dbg.h104
-rw-r--r--usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_hw.h2503
-rw-r--r--usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_open.h49
-rw-r--r--usr/src/uts/intel/Makefile.intel.shared1
-rw-r--r--usr/src/uts/intel/qlge/Makefile93
-rw-r--r--usr/src/uts/sparc/Makefile.sparc.shared1
-rw-r--r--usr/src/uts/sparc/qlge/Makefile93
22 files changed, 17581 insertions, 152 deletions
diff --git a/usr/src/pkgdefs/SUNWqlc/Makefile b/usr/src/pkgdefs/SUNWqlc/Makefile
index 158abd2b31..cf4d24ae51 100644
--- a/usr/src/pkgdefs/SUNWqlc/Makefile
+++ b/usr/src/pkgdefs/SUNWqlc/Makefile
@@ -20,21 +20,22 @@
#
#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
include ../Makefile.com
+TMPLFILES += postinstall preremove
DATAFILES += i.qlc
-
LICENSEFILES += $(PKGDEFS)/SUNWqlc/lic_Qlogic
.KEEP_STATE:
-all: $(FILES) depend preremove postinstall
+all: $(FILES) depend
install: all pkg
include ../Makefile.targ
+include ../Makefile.prtarg
diff --git a/usr/src/pkgdefs/SUNWqlc/pkginfo.tmpl b/usr/src/pkgdefs/SUNWqlc/pkginfo.tmpl
index 6d4d263fcd..1c0ec55292 100644
--- a/usr/src/pkgdefs/SUNWqlc/pkginfo.tmpl
+++ b/usr/src/pkgdefs/SUNWqlc/pkginfo.tmpl
@@ -19,7 +19,7 @@
# CDDL HEADER END
#
#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
@@ -30,14 +30,14 @@
#
PKG="SUNWqlc"
-NAME="Qlogic ISP 2200/2202 Fibre Channel Device Driver"
+NAME="Qlogic ISP Fibre Channel Device Driver and GLDv3 NIC driver"
ARCH="ISA"
CATEGORY="system"
BASEDIR=/
SUNW_PKGVERS="1.0"
SUNW_PKGTYPE="root"
CLASSES="none qlc"
-DESC="Qlogic ISP 2200/2202 Fibre Channel Device Driver"
+DESC="Qlogic ISP Fibre Channel Device Driver and GLDv3 NIC driver"
SUNW_PRODNAME="SunOS"
SUNW_PRODVERS="RELEASE/VERSION"
VERSION="ONVERS,REV=0.0.0"
diff --git a/usr/src/pkgdefs/SUNWqlc/postinstall b/usr/src/pkgdefs/SUNWqlc/postinstall
deleted file mode 100644
index 5e9fb16e61..0000000000
--- a/usr/src/pkgdefs/SUNWqlc/postinstall
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/bin/sh
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-#
-#
-# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
-#
-
-PATH="/usr/bin:/usr/sbin:$PATH"; export PATH
-
-# Driver definitions
-DRVR_NAME=qlc
-PERM_OPT="-m '* 0600 root sys'"
-CLASS_OPT="-c fibre-channel"
-
-DRVR_ALIASES_sparc="\
- pci1077,2200 \
- pci1077,2300 \
- pci1077,2312 \
- pci1077,2422 \
- pciex1077,2432 \
- pciex1077,2532 \
- pciex1077,5432 \
- pciex1077,8001 \
- "
-
-DRVR_ALIASES_i386="\
- pci1077,2200 \
- pci1077,2300 \
- pci1077,2312 \
- pci1077,132 \
- pci1077,2422 \
- pciex1077,2432 \
- pciex1077,2532 \
- pciex1077,5432 \
- pciex1077,8001 \
- "
-
-HARDWARE_STRINGS_sparc="\
- SUNW,qlc \
- pci1077,2422 \
- pciex1077,2432 \
- pci1077,2432 \
- pciex1077,2532 \
- pciex1077,5432 \
- pciex1077,8001 \
- "
-
-HARDWARE_STRING_i386="\
- SUNW,qlc \
- pci1077,132 \
- pci1077,2422 \
- pciex1077,2432 \
- pci1077,2432 \
- pciex1077,2532 \
- pciex1077,5432 \
- pciex1077,8001 \
- "
-
-if [ ${ARCH} = "sparc" ]; then
- DRVR_ALIASES_LIST=$DRVR_ALIASES_sparc
- HARDWARE_STRINGS=$HARDWARE_STRINGS_sparc
-elif [ ${ARCH} = "i386" ]; then
- DRVR_ALIASES_LIST=$DRVR_ALIASES_i386
- HARDWARE_STRINGS=$HARDWARE_STRINGS_i386
-else
- echo "\n$0 Failed: ${ARCH} is not supported.\n" >&2
- exit 1
-fi
-
-
-
-for ALIAS in $DRVR_ALIASES_LIST ; do
- if [ -z "$ALIASES_OPT" ] ; then
- ALIASES_OPT="-i '\"$ALIAS\""
- else
- ALIASES_OPT="$ALIASES_OPT \"$ALIAS\""
- fi
-done
-ALIASES_OPT="$ALIASES_OPT'"
-
-
-for STRING in $HARDWARE_STRINGS ; do
- if [ -z "$HARDWARE_LIST" ] ; then
- HARDWARE_LIST="$STRING"
- else
- # Seperate items with pipe to represent or with egrep.
- HARDWARE_LIST="$HARDWARE_LIST|$STRING"
- fi
-done
-
-
-if [ -z "${BASEDIR}" ]; then
- echo "\n$0 Failed: BASEDIR is not set.\n" >&2
- exit 1
-fi
-
-
-# Remove existing definition, if it exists.
-/usr/sbin/rem_drv -b "${BASEDIR}" ${DRVR_NAME} > /dev/null 2>&1
-
-# Check for hardware
-prtconf -pv | egrep "${HARDWARE_LIST}" > /dev/null 2>&1
-if [ $? -eq 0 ]; then
- # Hardware is present; use command to attach the drivers
- ADD_DRV="add_drv -b ${BASEDIR}"
-else
- # No hardware found on the system, prevent attachment
- ADD_DRV="add_drv -n -b ${BASEDIR}"
-fi
-
-eval ${ADD_DRV} "${PERM_OPT}" ${CLASS_OPT} "${ALIASES_OPT}" ${DRVR_NAME}
-if [ $? -ne 0 ]; then
- echo "\nCommand Failed:\n${ADD_DRV} "${PERM_OPT}" \
- ${CLASS_OPT} "${ALIASES_OPT}" ${DRVR_NAME}\n" >&2
- exit 1
-fi
-
-exit 0
diff --git a/usr/src/pkgdefs/SUNWqlc/postinstall.tmpl b/usr/src/pkgdefs/SUNWqlc/postinstall.tmpl
new file mode 100644
index 0000000000..67b93ebaf3
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWqlc/postinstall.tmpl
@@ -0,0 +1,60 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+include drv_utils
+
+ret=0
+
+if [ ${ARCH} = "sparc" ]; then
+ pkg_drvadd -i \
+ '"pci1077,2200"
+ "pci1077,2300"
+ "pci1077,2312"
+ "pci1077,2422"
+ "pciex1077,2432"
+ "pciex1077,2532"
+ "pciex1077,5432"
+ "pciex1077,8001"' \
+ -m '* 0666 root sys' -c "fibre-channel" qlc || ret=1
+elif [ ${ARCH} = "i386" ]; then
+ pkg_drvadd -i \
+ '"pci1077,2200"
+ "pci1077,2300"
+ "pci1077,2312"
+ "pci1077,132"
+ "pci1077,2422"
+ "pciex1077,2432"
+ "pciex1077,2532"
+ "pciex1077,5432"
+ "pciex1077,8001"' \
+ -m '* 0666 root sys' -c "fibre-channel" qlc || ret=1
+else
+ echo "\n$0 Failed: ${ARCH} is not supported.\n" >&2
+ ret=1
+fi
+
+pkg_drvadd -i "pciex1077,8000" -m '* 0666 root sys' qlge || ret=1
+
+exit $ret
diff --git a/usr/src/pkgdefs/SUNWqlc/preremove b/usr/src/pkgdefs/SUNWqlc/preremove.tmpl
index 141a053b22..f775fc7344 100644
--- a/usr/src/pkgdefs/SUNWqlc/preremove
+++ b/usr/src/pkgdefs/SUNWqlc/preremove.tmpl
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/sbin/sh
#
# CDDL HEADER START
#
@@ -19,15 +19,15 @@
#
# CDDL HEADER END
#
-#
-#
-# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-DRVR_NAME=qlc
+include drv_utils
+
+ret=0
-# Remove the driver entries but leave it attached.
-/usr/sbin/rem_drv -b ${BASEDIR} ${DRVR_NAME}
+pkg_drvrem qlc || ret=1
+pkg_drvrem qlge || ret=1
-exit 0
+exit $ret
diff --git a/usr/src/pkgdefs/SUNWqlc/prototype_i386 b/usr/src/pkgdefs/SUNWqlc/prototype_i386
index 251e157868..51dbd420cd 100644
--- a/usr/src/pkgdefs/SUNWqlc/prototype_i386
+++ b/usr/src/pkgdefs/SUNWqlc/prototype_i386
@@ -46,8 +46,10 @@
# SUNWqlc
#
f none kernel/drv/qlc 0755 root sys
+f none kernel/drv/qlge 0755 root sys
d none kernel/drv/amd64 0755 root sys
f none kernel/drv/amd64/qlc 0755 root sys
+f none kernel/drv/amd64/qlge 0755 root sys
f none kernel/misc/qlc/qlc_fw_2200 0755 root sys
f none kernel/misc/qlc/qlc_fw_2300 0755 root sys
f none kernel/misc/qlc/qlc_fw_2400 0755 root sys
diff --git a/usr/src/pkgdefs/SUNWqlc/prototype_sparc b/usr/src/pkgdefs/SUNWqlc/prototype_sparc
index 6121197726..b21d9e901a 100644
--- a/usr/src/pkgdefs/SUNWqlc/prototype_sparc
+++ b/usr/src/pkgdefs/SUNWqlc/prototype_sparc
@@ -47,6 +47,7 @@
#
d none kernel/drv/sparcv9 0755 root sys
f none kernel/drv/sparcv9/qlc 0755 root sys
+f none kernel/drv/sparcv9/qlge 0755 root sys
d none kernel/misc/qlc/sparcv9 0755 root sys
f none kernel/misc/qlc/sparcv9/qlc_fw_2200 0755 root sys
f none kernel/misc/qlc/sparcv9/qlc_fw_2300 0755 root sys
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 3afb9a6bf6..f4c848bac8 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1028,6 +1028,8 @@ QLC_FW_6322_OBJS += ql_fw_6322.o
QLC_FW_8100_OBJS += ql_fw_8100.o
+QLGE_OBJS += qlge.o qlge_dbg.o qlge_flash.o qlge_gld.o qlge_mpi.o
+
ZCONS_OBJS += zcons.o
NV_SATA_OBJS += nv_sata.o
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index 277ac46685..fac41fb41a 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -1042,6 +1042,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/fibre-channel/fca/qlc/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/fibre-channel/fca/qlge/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/fibre-channel/fca/emlxs/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -2229,6 +2233,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/fibre-channel/impl/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/fibre-channel/fca/qlc/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/fibre-channel/fca/qlge/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/fibre-channel/fca/emlxs/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c
new file mode 100644
index 0000000000..f4fa8e964e
--- /dev/null
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge.c
@@ -0,0 +1,7287 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#include <qlge.h>
+#include <sys/atomic.h>
+#include <sys/strsubr.h>
+#include <sys/pattr.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <inet/ip.h>
+
+
+
+/*
+ * Local variables
+ */
+static struct ether_addr ql_ether_broadcast_addr =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static char version[] = "QLogic GLDv3 Driver " VERSIONSTR;
+
+/*
+ * Local function prototypes
+ */
+static void ql_free_resources(dev_info_t *, qlge_t *);
+static void ql_fini_kstats(qlge_t *);
+static uint32_t ql_get_link_state(qlge_t *);
+static void ql_read_conf(qlge_t *);
+static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
+ ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
+ size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
+static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
+static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
+static int ql_route_initialize(qlge_t *);
+static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
+static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
+static int ql_bringdown_adapter(qlge_t *);
+static int ql_bringup_adapter(qlge_t *);
+static int ql_asic_reset(qlge_t *);
+static void ql_wake_mpi_reset_soft_intr(qlge_t *);
+static void ql_stop_timer(qlge_t *qlge);
+
+/*
+ * TX dma maping handlers allow multiple sscatter-gather lists
+ */
+ddi_dma_attr_t tx_mapping_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ QL_DMA_LOW_ADDRESS, /* low DMA address range */
+ QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
+ QL_DMA_XFER_COUNTER, /* DMA counter register */
+ QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
+ QL_DMA_BURSTSIZES, /* DMA burstsizes */
+ QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
+ QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
+ QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
+ QL_MAX_TX_DMA_HANDLES, /* s/g list length */
+ QL_DMA_GRANULARITY, /* granularity of device */
+ QL_DMA_XFER_FLAGS /* DMA transfer flags */
+};
+
+/*
+ * Receive buffers and Request/Response queues do not allow scatter-gather lists
+ */
+ddi_dma_attr_t dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ QL_DMA_LOW_ADDRESS, /* low DMA address range */
+ QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
+ QL_DMA_XFER_COUNTER, /* DMA counter register */
+ QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
+ QL_DMA_BURSTSIZES, /* DMA burstsizes */
+ QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
+ QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
+ QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
+ 1, /* s/g list length, i.e no sg list */
+ QL_DMA_GRANULARITY, /* granularity of device */
+ QL_DMA_XFER_FLAGS /* DMA transfer flags */
+};
+
+/*
+ * DMA access attribute structure.
+ */
+/* device register access from host */
+ddi_device_acc_attr_t ql_dev_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/* host ring descriptors */
+ddi_device_acc_attr_t ql_desc_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/* host ring buffer */
+ddi_device_acc_attr_t ql_buf_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+/*
+ * Hash key table for Receive Side Scaling (RSS) support
+ */
+const uint8_t key_data[] = {
+ 0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
+ 0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
+ 0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
+ 0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+
+static inline unsigned int
+ql_read_sh_reg(const volatile void *addr)
+{
+ return (*(volatile uint32_t *)addr);
+}
+
+/*
+ * Read 32 bit atomically
+ */
+uint32_t
+ql_atomic_read_32(volatile uint32_t *target)
+{
+ /*
+ * atomic_add_32_nv returns the new value after the add,
+ * we are adding 0 so we should get the original value
+ */
+ return (atomic_add_32_nv(target, 0));
+}
+
+/*
+ * Set 32 bit atomically
+ */
+void
+ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
+{
+ (void) atomic_swap_32(target, newval);
+}
+
+
+/*
+ * Setup device PCI configuration registers.
+ * Kernel context.
+ */
+static void
+ql_pci_config(qlge_t *qlge)
+{
+ uint16_t w;
+
+ qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
+ PCI_CONF_VENID);
+ qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
+ PCI_CONF_DEVID);
+
+ /*
+ * we want to respect framework's setting of PCI
+ * configuration space command register and also
+ * want to make sure that all bits of interest to us
+ * are properly set in PCI Command register(0x04).
+ * PCI_COMM_IO 0x1 I/O access enable
+ * PCI_COMM_MAE 0x2 Memory access enable
+ * PCI_COMM_ME 0x4 bus master enable
+ * PCI_COMM_MEMWR_INVAL 0x10 memory write and invalidate enable.
+ */
+ w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
+ w = (uint16_t)(w & (~PCI_COMM_IO));
+ w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
+ /* PCI_COMM_MEMWR_INVAL | */
+ PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
+
+ pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
+
+ ql_dump_pci_config(qlge);
+}
+
+/*
+ * This routine parforms the neccessary steps to set GLD mac information
+ * such as Function number, xgmac mask and shift bits
+ */
+static int
+ql_set_mac_info(qlge_t *qlge)
+{
+ uint32_t value;
+ int rval = DDI_SUCCESS;
+ uint32_t fn0_net, fn1_net;
+
+ /* set default value */
+ qlge->fn0_net = FN0_NET;
+ qlge->fn1_net = FN1_NET;
+
+ if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) read MPI register failed",
+ __func__, qlge->instance);
+ } else {
+ fn0_net = (value >> 1) & 0x07;
+ fn1_net = (value >> 5) & 0x07;
+ if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
+ cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
+ "nic0 function number %d,"
+ "nic1 function number %d "
+ "use default\n",
+ __func__, qlge->instance, value, fn0_net, fn1_net);
+ } else {
+ qlge->fn0_net = fn0_net;
+ qlge->fn1_net = fn1_net;
+ }
+ }
+
+ /* Get the function number that the driver is associated with */
+ value = ql_read_reg(qlge, REG_STATUS);
+ qlge->func_number = (uint8_t)((value >> 6) & 0x03);
+ QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
+ value, qlge->func_number));
+
+ /* The driver is loaded on a non-NIC function? */
+ if ((qlge->func_number != qlge->fn0_net) &&
+ (qlge->func_number != qlge->fn1_net)) {
+ cmn_err(CE_WARN,
+ "Invalid function number = 0x%x\n", qlge->func_number);
+ return (DDI_FAILURE);
+ }
+ /* network port 0? */
+ if (qlge->func_number == qlge->fn0_net) {
+ qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
+ qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
+ } else {
+ qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
+ qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
+ }
+
+ return (rval);
+
+}
+
+/*
+ * write to doorbell register
+ */
+void
+ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
+{
+ ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
+}
+
+/*
+ * read from doorbell register
+ */
+uint32_t
+ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
+{
+ uint32_t ret;
+
+ ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
+
+ return (ret);
+}
+
+/*
+ * This function waits for a specific bit to come ready
+ * in a given register. It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+static int
+ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
+{
+ uint32_t temp;
+ int count = UDELAY_COUNT;
+
+ while (count) {
+ temp = ql_read_reg(qlge, reg);
+
+ /* check for errors */
+ if ((temp & err_bit) != 0) {
+ break;
+ } else if ((temp & bit) != 0)
+ return (DDI_SUCCESS);
+ qlge_delay(UDELAY_DELAY);
+ count--;
+ }
+ cmn_err(CE_WARN,
+ "Waiting for reg %x to come ready failed.", reg);
+ return (DDI_FAILURE);
+}
+
+/*
+ * The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int
+ql_wait_cfg(qlge_t *qlge, uint32_t bit)
+{
+ int count = UDELAY_COUNT;
+ uint32_t temp;
+
+ while (count) {
+ temp = ql_read_reg(qlge, REG_CONFIGURATION);
+ if ((temp & CFG_LE) != 0) {
+ break;
+ }
+ if ((temp & bit) == 0)
+ return (DDI_SUCCESS);
+ qlge_delay(UDELAY_DELAY);
+ count--;
+ }
+ cmn_err(CE_WARN,
+ "Waiting for cfg register bit %x failed.", bit);
+ return (DDI_FAILURE);
+}
+
+
+/*
+ * Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+static int
+ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
+{
+ int status = DDI_SUCCESS;
+ uint32_t mask;
+ uint32_t value;
+
+ status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
+ if (status != DDI_SUCCESS) {
+ goto exit;
+ }
+ status = ql_wait_cfg(qlge, bit);
+ if (status != DDI_SUCCESS) {
+ goto exit;
+ }
+
+ ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
+ ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
+
+ mask = CFG_Q_MASK | (bit << 16);
+ value = bit | (q_id << CFG_Q_SHIFT);
+ ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
+
+ /*
+ * Wait for the bit to clear after signaling hw.
+ */
+ status = ql_wait_cfg(qlge, bit);
+ ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
+
+exit:
+ return (status);
+}
+
+/*
+ * Initialize adapter instance
+ */
+static int
+ql_init_instance(qlge_t *qlge)
+{
+ int i;
+
+ /* Default value */
+ qlge->mac_flags = QL_MAC_INIT;
+ qlge->mtu = ETHERMTU; /* set normal size as default */
+ qlge->page_size = VM_PAGE_SIZE; /* default page size */
+ /* Set up the default ring sizes. */
+ qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
+ qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+ /* Set up the coalescing parameters. */
+ qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
+ qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
+ qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
+ qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
+ qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
+ qlge->ql_dbgprnt = 0;
+#if QL_DEBUG
+ qlge->ql_dbgprnt = QL_DEBUG;
+#endif /* QL_DEBUG */
+
+ /*
+ * TODO: Should be obtained from configuration or based off
+ * number of active cpus SJP 4th Mar. 09
+ */
+ qlge->tx_ring_count = 1;
+ qlge->rss_ring_count = 4;
+ qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
+
+ for (i = 0; i < MAX_RX_RINGS; i++) {
+ qlge->rx_polls[i] = 0;
+ qlge->rx_interrupts[i] = 0;
+ }
+
+ /*
+ * Set up the operating parameters.
+ */
+ qlge->multicast_list_count = 0;
+
+ /*
+ * Set up the max number of unicast list
+ */
+ qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
+ qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
+
+ /*
+ * read user defined properties in .conf file
+ */
+ ql_read_conf(qlge); /* mtu, pause, LSO etc */
+
+ QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
+
+ /* choose Memory Space mapping and get Vendor Id, Device ID etc */
+ ql_pci_config(qlge);
+ qlge->ip_hdr_offset = 0;
+
+ if (qlge->device_id == 0x8000) {
+ /* Schultz card */
+ qlge->cfg_flags |= CFG_CHIP_8100;
+ /* enable just ipv4 chksum offload for Schultz */
+ qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
+ /*
+ * Schultz firmware does not do pseduo IP header checksum
+ * calculation, needed to be done by driver
+ */
+ qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
+ if (qlge->lso_enable)
+ qlge->cfg_flags |= CFG_LSO;
+ qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
+ /* Schultz must split packet header */
+ qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
+ qlge->max_read_mbx = 5;
+ qlge->ip_hdr_offset = 2;
+ }
+
+ /* Set Function Number and some of the iocb mac information */
+ if (ql_set_mac_info(qlge) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ /* Read network settings from NVRAM */
+ /* After nvram is read successfully, update dev_addr */
+ if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
+ for (i = 0; i < ETHERADDRL; i++) {
+ qlge->dev_addr.ether_addr_octet[i] =
+ qlge->nic_config.factory_MAC[i];
+ }
+ } else {
+ cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ bcopy(qlge->dev_addr.ether_addr_octet,
+ qlge->unicst_addr[0].addr.ether_addr_octet,
+ ETHERADDRL);
+ QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
+ &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
+
+ qlge->port_link_state = LS_DOWN;
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * This hardware semaphore provides the mechanism for exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int
+ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
+{
+ uint32_t sem_bits = 0;
+
+ switch (sem_mask) {
+ case SEM_XGMAC0_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+ break;
+ case SEM_XGMAC1_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+ break;
+ case SEM_ICB_MASK:
+ sem_bits = SEM_SET << SEM_ICB_SHIFT;
+ break;
+ case SEM_MAC_ADDR_MASK:
+ sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+ break;
+ case SEM_FLASH_MASK:
+ sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+ break;
+ case SEM_PROBE_MASK:
+ sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+ break;
+ case SEM_RT_IDX_MASK:
+ sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+ break;
+ case SEM_PROC_REG_MASK:
+ sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+ break;
+ default:
+ cmn_err(CE_WARN, "Bad Semaphore mask!.");
+ return (DDI_FAILURE);
+ }
+
+ ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
+ return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
+}
+
+/*
+ * Lock a specific bit of Semaphore register to gain
+ * access to a particular shared register
+ */
+int
+ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
+{
+ unsigned int wait_count = 30;
+
+ while (wait_count) {
+ if (!ql_sem_trylock(qlge, sem_mask))
+ return (DDI_SUCCESS);
+ qlge_delay(100);
+ wait_count--;
+ }
+ cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
+ __func__, qlge->instance, sem_mask);
+ return (DDI_FAILURE);
+}
+
+/*
+ * Unock a specific bit of Semaphore register to release
+ * access to a particular shared register
+ */
+void
+ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
+{
+ ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
+ ql_read_reg(qlge, REG_SEMAPHORE); /* flush */
+}
+
+/*
+ * Get property value from configuration file.
+ *
+ * string = property string pointer.
+ *
+ * Returns:
+ * 0xFFFFFFFF = no property else property value.
+ */
+static uint32_t
+ql_get_prop(qlge_t *qlge, char *string)
+{
+ char buf[256];
+ uint32_t data;
+
+ /* Get adapter instance parameter. */
+ (void) sprintf(buf, "hba%d-%s", qlge->instance, string);
+ data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
+ (int)0xffffffff);
+
+ /* Adapter instance parameter found? */
+ if (data == 0xffffffff) {
+ /* No, get default parameter. */
+ data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
+ string, (int)0xffffffff);
+ }
+
+ return (data);
+}
+
+/*
+ * Read user setting from configuration file.
+ */
+static void
+ql_read_conf(qlge_t *qlge)
+{
+ uint32_t data;
+
+ /* clear configuration flags */
+ qlge->cfg_flags = 0;
+
+ /* Get default rx_copy enable/disable. */
+ if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
+ data == 0) {
+ qlge->cfg_flags &= ~CFG_RX_COPY_MODE;
+ qlge->rx_copy = B_FALSE;
+ QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
+ } else if (data == 1) {
+ qlge->cfg_flags |= CFG_RX_COPY_MODE;
+ qlge->rx_copy = B_TRUE;
+ QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
+ }
+
+ /* Get mtu packet size. */
+ data = ql_get_prop(qlge, "mtu");
+ if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
+ if (qlge->mtu != data) {
+ qlge->mtu = data;
+ cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
+ }
+ }
+
+ /* Get pause mode, default is Per Priority mode. */
+ qlge->pause = PAUSE_MODE_PER_PRIORITY;
+ data = ql_get_prop(qlge, "pause");
+ if (data <= PAUSE_MODE_PER_PRIORITY) {
+ if (qlge->pause != data) {
+ qlge->pause = data;
+ cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
+ }
+ }
+
+ /* Get tx_max_coalesced_frames. */
+ qlge->tx_max_coalesced_frames = 5;
+ data = ql_get_prop(qlge, "tx_max_coalesced_frames");
+ /* if data is valid */
+ if ((data != 0xffffffff) && data) {
+ if (qlge->tx_max_coalesced_frames != data) {
+ qlge->tx_max_coalesced_frames = (uint16_t)data;
+ }
+ }
+
+ /* Get split header payload_copy_thresh. */
+ qlge->payload_copy_thresh = 6;
+ data = ql_get_prop(qlge, "payload_copy_thresh");
+ /* if data is valid */
+ if ((data != 0xffffffff) && (data != 0)) {
+ if (qlge->payload_copy_thresh != data) {
+ qlge->payload_copy_thresh = data;
+ }
+ }
+
+ /* large send offload (LSO) capability. */
+ qlge->lso_enable = 1;
+ data = ql_get_prop(qlge, "lso_enable");
+ /* if data is valid */
+ if (data != 0xffffffff) {
+ if (qlge->lso_enable != data) {
+ qlge->lso_enable = (uint16_t)data;
+ }
+ }
+}
+
+/*
+ * Enable global interrupt
+ */
+static void
+ql_enable_global_interrupt(qlge_t *qlge)
+{
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
+ (INTR_EN_EI << 16) | INTR_EN_EI);
+ qlge->flags |= INTERRUPTS_ENABLED;
+}
+
+/*
+ * Disable global interrupt
+ */
+static void
+ql_disable_global_interrupt(qlge_t *qlge)
+{
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
+ qlge->flags &= ~INTERRUPTS_ENABLED;
+}
+
+/*
+ * Enable one ring interrupt
+ */
+void
+ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
+{
+ struct intr_ctx *ctx = qlge->intr_ctx + intr;
+
+ QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
+ __func__, qlge->instance, intr, ctx->irq_cnt));
+
+ if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
+ /*
+ * Always enable if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
+ return;
+ }
+
+ if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
+ mutex_enter(&qlge->hw_mutex);
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
+ mutex_exit(&qlge->hw_mutex);
+ QL_PRINT(DBG_INTR,
+ ("%s(%d): write %x to intr enable register \n",
+ __func__, qlge->instance, ctx->intr_en_mask));
+ }
+}
+
+/*
+ * ql_forced_disable_completion_interrupt
+ * Used by call from OS, may be called without
+ * a pending interrupt so force the disable
+ */
+uint32_t
+ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
+{
+ uint32_t var = 0;
+ struct intr_ctx *ctx = qlge->intr_ctx + intr;
+
+ QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
+ __func__, qlge->instance, intr, ctx->irq_cnt));
+
+ if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
+ var = ql_read_reg(qlge, REG_STATUS);
+ return (var);
+ }
+
+ mutex_enter(&qlge->hw_mutex);
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
+ var = ql_read_reg(qlge, REG_STATUS);
+ mutex_exit(&qlge->hw_mutex);
+
+ return (var);
+}
+
+/*
+ * Disable a completion interrupt
+ */
+void
+ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
+{
+ struct intr_ctx *ctx;
+
+ ctx = qlge->intr_ctx + intr;
+ QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
+ __func__, qlge->instance, intr, ctx->irq_cnt));
+ /*
+ * HW disables for us if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
+ return;
+
+ if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
+ mutex_enter(&qlge->hw_mutex);
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
+ mutex_exit(&qlge->hw_mutex);
+ }
+ atomic_inc_32(&ctx->irq_cnt);
+}
+
+/*
+ * Enable all completion interrupts
+ */
+static void
+ql_enable_all_completion_interrupts(qlge_t *qlge)
+{
+ int i;
+ uint32_t value = 1;
+
+ for (i = 0; i < qlge->intr_cnt; i++) {
+ /*
+ * Set the count to 1 for Legacy / MSI interrupts or for the
+ * default interrupt (0)
+ */
+ if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
+ ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
+ }
+ ql_enable_completion_interrupt(qlge, i);
+ }
+}
+
+/*
+ * Disable all completion interrupts
+ */
+static void
+ql_disable_all_completion_interrupts(qlge_t *qlge)
+{
+ int i;
+ uint32_t value = 0;
+
+ for (i = 0; i < qlge->intr_cnt; i++) {
+
+ /*
+ * Set the count to 0 for Legacy / MSI interrupts or for the
+ * default interrupt (0)
+ */
+ if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
+ ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
+
+ ql_disable_completion_interrupt(qlge, i);
+ }
+}
+
+/*
+ * Update small buffer queue producer index
+ */
+static void
+ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ /* Update the buffer producer index */
+ QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx));
+ ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
+ rx_ring->sbq_prod_idx);
+}
+
+/*
+ * Update large buffer queue producer index
+ */
+static void
+ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ /* Update the buffer producer index */
+ QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx));
+ ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
+ rx_ring->lbq_prod_idx);
+}
+
+/*
+ * Adds a small buffer descriptor to end of its in use list,
+ * assumes sbq_lock is already taken
+ */
+static void
+ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
+ struct bq_desc *sbq_desc)
+{
+ uint32_t inuse_idx = rx_ring->sbq_use_tail;
+
+ rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
+ inuse_idx++;
+ if (inuse_idx >= rx_ring->sbq_len)
+ inuse_idx = 0;
+ rx_ring->sbq_use_tail = inuse_idx;
+ atomic_inc_32(&rx_ring->sbuf_in_use_count);
+ ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
+}
+
+/*
+ * Get a small buffer descriptor from its in use list
+ */
+static struct bq_desc *
+ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc = NULL;
+ uint32_t inuse_idx;
+
+ /* Pick from head of in use list */
+ inuse_idx = rx_ring->sbq_use_head;
+ sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
+ rx_ring->sbuf_in_use[inuse_idx] = NULL;
+
+ if (sbq_desc != NULL) {
+ inuse_idx++;
+ if (inuse_idx >= rx_ring->sbq_len)
+ inuse_idx = 0;
+ rx_ring->sbq_use_head = inuse_idx;
+ atomic_dec_32(&rx_ring->sbuf_in_use_count);
+ atomic_inc_32(&rx_ring->rx_indicate);
+ sbq_desc->upl_inuse = 1;
+ /* if mp is NULL */
+ if (sbq_desc->mp == NULL) {
+ /* try to remap mp again */
+ sbq_desc->mp =
+ desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
+ rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
+ }
+ }
+
+ return (sbq_desc);
+}
+
+/*
+ * Add a small buffer descriptor to its free list
+ */
+static void
+ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
+ struct bq_desc *sbq_desc)
+{
+ uint32_t free_idx;
+
+ /* Add to the end of free list */
+ free_idx = rx_ring->sbq_free_tail;
+ rx_ring->sbuf_free[free_idx] = sbq_desc;
+ ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
+ free_idx++;
+ if (free_idx >= rx_ring->sbq_len)
+ free_idx = 0;
+ rx_ring->sbq_free_tail = free_idx;
+ atomic_inc_32(&rx_ring->sbuf_free_count);
+}
+
+/*
+ * Get a small buffer descriptor from its free list
+ */
+static struct bq_desc *
+ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc;
+ uint32_t free_idx;
+
+ free_idx = rx_ring->sbq_free_head;
+ /* Pick from top of free list */
+ sbq_desc = rx_ring->sbuf_free[free_idx];
+ rx_ring->sbuf_free[free_idx] = NULL;
+ if (sbq_desc != NULL) {
+ free_idx++;
+ if (free_idx >= rx_ring->sbq_len)
+ free_idx = 0;
+ rx_ring->sbq_free_head = free_idx;
+ atomic_dec_32(&rx_ring->sbuf_free_count);
+ ASSERT(rx_ring->sbuf_free_count != 0);
+ }
+ return (sbq_desc);
+}
+
+/*
+ * Add a large buffer descriptor to its in use list
+ */
+static void
+ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
+ struct bq_desc *lbq_desc)
+{
+ uint32_t inuse_idx;
+
+ inuse_idx = rx_ring->lbq_use_tail;
+
+ rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
+ inuse_idx++;
+ if (inuse_idx >= rx_ring->lbq_len)
+ inuse_idx = 0;
+ rx_ring->lbq_use_tail = inuse_idx;
+ atomic_inc_32(&rx_ring->lbuf_in_use_count);
+}
+
+/*
+ * Get a large buffer descriptor from in use list
+ */
+static struct bq_desc *
+ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+ uint32_t inuse_idx;
+
+ /* Pick from head of in use list */
+ inuse_idx = rx_ring->lbq_use_head;
+ lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
+ rx_ring->lbuf_in_use[inuse_idx] = NULL;
+
+ if (lbq_desc != NULL) {
+ inuse_idx++;
+ if (inuse_idx >= rx_ring->lbq_len)
+ inuse_idx = 0;
+ rx_ring->lbq_use_head = inuse_idx;
+ atomic_dec_32(&rx_ring->lbuf_in_use_count);
+ atomic_inc_32(&rx_ring->rx_indicate);
+ lbq_desc->upl_inuse = 1;
+
+ /* if mp is NULL */
+ if (lbq_desc->mp == NULL) {
+ /* try to remap mp again */
+ lbq_desc->mp =
+ desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
+ rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
+ }
+ }
+ return (lbq_desc);
+}
+
+/*
+ * Add a large buffer descriptor to free list
+ */
+static void
+ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
+ struct bq_desc *lbq_desc)
+{
+ uint32_t free_idx;
+
+ /* Add to the end of free list */
+ free_idx = rx_ring->lbq_free_tail;
+ rx_ring->lbuf_free[free_idx] = lbq_desc;
+ free_idx++;
+ if (free_idx >= rx_ring->lbq_len)
+ free_idx = 0;
+ rx_ring->lbq_free_tail = free_idx;
+ atomic_inc_32(&rx_ring->lbuf_free_count);
+ ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
+}
+
+/*
+ * Get a large buffer descriptor from its free list
+ */
+static struct bq_desc *
+ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+ uint32_t free_idx;
+
+ free_idx = rx_ring->lbq_free_head;
+ /* Pick from head of free list */
+ lbq_desc = rx_ring->lbuf_free[free_idx];
+ rx_ring->lbuf_free[free_idx] = NULL;
+
+ if (lbq_desc != NULL) {
+ free_idx++;
+ if (free_idx >= rx_ring->lbq_len)
+ free_idx = 0;
+ rx_ring->lbq_free_head = free_idx;
+ atomic_dec_32(&rx_ring->lbuf_free_count);
+ ASSERT(rx_ring->lbuf_free_count != 0);
+ }
+ return (lbq_desc);
+}
+
+/*
+ * Add a small buffer descriptor to free list
+ */
+static void
+ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
+{
+ struct rx_ring *rx_ring = sbq_desc->rx_ring;
+ uint64_t *sbq_entry;
+ qlge_t *qlge = (qlge_t *)rx_ring->qlge;
+ /*
+ * Sync access
+ */
+ mutex_enter(&rx_ring->sbq_lock);
+
+ sbq_desc->upl_inuse = 0;
+
+ /*
+ * If we are freeing the buffers as a result of adapter unload, get out
+ */
+ if ((sbq_desc->free_buf != NULL) ||
+ (qlge->mac_flags == QL_MAC_DETACH)) {
+ if (sbq_desc->free_buf == NULL)
+ atomic_dec_32(&rx_ring->rx_indicate);
+ mutex_exit(&rx_ring->sbq_lock);
+ return;
+ }
+#ifdef QLGE_LOAD_UNLOAD
+ if (rx_ring->rx_indicate == 0)
+ cmn_err(CE_WARN, "sbq: indicate wrong");
+#endif
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ uint32_t sb_consumer_idx;
+ uint32_t sb_producer_idx;
+ uint32_t num_free_buffers;
+ uint32_t temp;
+
+ temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
+ sb_producer_idx = temp & 0x0000ffff;
+ sb_consumer_idx = (temp >> 16);
+
+ if (sb_consumer_idx > sb_producer_idx)
+ num_free_buffers = NUM_SMALL_BUFFERS -
+ (sb_consumer_idx - sb_producer_idx);
+ else
+ num_free_buffers = sb_producer_idx - sb_consumer_idx;
+
+ if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
+ qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
+
+#endif
+
+ ASSERT(sbq_desc->mp == NULL);
+
+#ifdef QLGE_LOAD_UNLOAD
+ if (rx_ring->rx_indicate > 0xFF000000)
+ cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
+ " sbq_desc index %d.",
+ rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
+ sbq_desc->index);
+#endif
+ if (alloc_memory) {
+ sbq_desc->mp =
+ desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
+ rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
+ if (sbq_desc->mp == NULL) {
+ rx_ring->rx_failed_sbq_allocs++;
+ }
+ }
+
+ /* Got the packet from the stack decrement rx_indicate count */
+ atomic_dec_32(&rx_ring->rx_indicate);
+
+ ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
+
+ /* Rearm if possible */
+ if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
+ (qlge->mac_flags == QL_MAC_STARTED)) {
+ sbq_entry = rx_ring->sbq_dma.vaddr;
+ sbq_entry += rx_ring->sbq_prod_idx;
+
+ while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
+ /* Get first one from free list */
+ sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
+
+ *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
+ sbq_entry++;
+ rx_ring->sbq_prod_idx++;
+ if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
+ rx_ring->sbq_prod_idx = 0;
+ sbq_entry = rx_ring->sbq_dma.vaddr;
+ }
+ /* Add to end of in use list */
+ ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
+ }
+
+ /* Update small buffer queue producer index */
+ ql_update_sbq_prod_idx(qlge, rx_ring);
+ }
+
+ mutex_exit(&rx_ring->sbq_lock);
+ QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
+ __func__, qlge->instance, rx_ring->sbuf_free_count));
+}
+
+/*
+ * rx recycle call back function
+ */
+static void
+ql_release_to_sbuf_free_list(caddr_t p)
+{
+ struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
+
+ if (sbq_desc == NULL)
+ return;
+ ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
+}
+
+/*
+ * Add a large buffer descriptor to free list
+ */
+static void
+ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
+{
+ struct rx_ring *rx_ring = lbq_desc->rx_ring;
+ uint64_t *lbq_entry;
+ qlge_t *qlge = rx_ring->qlge;
+
+ /* Sync access */
+ mutex_enter(&rx_ring->lbq_lock);
+
+ lbq_desc->upl_inuse = 0;
+ /*
+ * If we are freeing the buffers as a result of adapter unload, get out
+ */
+ if ((lbq_desc->free_buf != NULL) ||
+ (qlge->mac_flags == QL_MAC_DETACH)) {
+ if (lbq_desc->free_buf == NULL)
+ atomic_dec_32(&rx_ring->rx_indicate);
+ mutex_exit(&rx_ring->lbq_lock);
+ return;
+ }
+#ifdef QLGE_LOAD_UNLOAD
+ if (rx_ring->rx_indicate == 0)
+ cmn_err(CE_WARN, "lbq: indicate wrong");
+#endif
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ uint32_t lb_consumer_idx;
+ uint32_t lb_producer_idx;
+ uint32_t num_free_buffers;
+ uint32_t temp;
+
+ temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
+
+ lb_producer_idx = temp & 0x0000ffff;
+ lb_consumer_idx = (temp >> 16);
+
+ if (lb_consumer_idx > lb_producer_idx)
+ num_free_buffers = NUM_LARGE_BUFFERS -
+ (lb_consumer_idx - lb_producer_idx);
+ else
+ num_free_buffers = lb_producer_idx - lb_consumer_idx;
+
+ if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
+ qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
+ }
+#endif
+
+ ASSERT(lbq_desc->mp == NULL);
+#ifdef QLGE_LOAD_UNLOAD
+ if (rx_ring->rx_indicate > 0xFF000000)
+ cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
+ "lbq_desc index %d",
+ rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
+ lbq_desc->index);
+#endif
+ if (alloc_memory) {
+ lbq_desc->mp =
+ desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
+ rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
+ if (lbq_desc->mp == NULL) {
+ rx_ring->rx_failed_lbq_allocs++;
+ }
+ }
+
+ /* Got the packet from the stack decrement rx_indicate count */
+ atomic_dec_32(&rx_ring->rx_indicate);
+
+ ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
+
+ /* Rearm if possible */
+ if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
+ (qlge->mac_flags == QL_MAC_STARTED)) {
+ lbq_entry = rx_ring->lbq_dma.vaddr;
+ lbq_entry += rx_ring->lbq_prod_idx;
+ while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
+ /* Get first one from free list */
+ lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
+
+ *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
+ lbq_entry++;
+ rx_ring->lbq_prod_idx++;
+ if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
+ rx_ring->lbq_prod_idx = 0;
+ lbq_entry = rx_ring->lbq_dma.vaddr;
+ }
+
+ /* Add to end of in use list */
+ ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
+ }
+
+ /* Update large buffer queue producer index */
+ ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
+ }
+
+ mutex_exit(&rx_ring->lbq_lock);
+ QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
+ __func__, rx_ring->lbuf_free_count));
+}
+/*
+ * rx recycle call back function
+ */
+static void
+ql_release_to_lbuf_free_list(caddr_t p)
+{
+ struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
+
+ if (lbq_desc == NULL)
+ return;
+ ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
+}
+
+/*
+ * free small buffer queue buffers
+ */
+static void
+ql_free_sbq_buffers(struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc;
+ uint32_t i;
+ uint32_t j = rx_ring->sbq_free_head;
+ int force_cnt = 0;
+
+ for (i = 0; i < rx_ring->sbuf_free_count; i++) {
+ sbq_desc = rx_ring->sbuf_free[j];
+ sbq_desc->free_buf = 1;
+ j++;
+ if (j >= rx_ring->sbq_len) {
+ j = 0;
+ }
+ if (sbq_desc->mp != NULL) {
+ freemsg(sbq_desc->mp);
+ sbq_desc->mp = NULL;
+ }
+ }
+ rx_ring->sbuf_free_count = 0;
+
+ j = rx_ring->sbq_use_head;
+ for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
+ sbq_desc = rx_ring->sbuf_in_use[j];
+ sbq_desc->free_buf = 1;
+ j++;
+ if (j >= rx_ring->sbq_len) {
+ j = 0;
+ }
+ if (sbq_desc->mp != NULL) {
+ freemsg(sbq_desc->mp);
+ sbq_desc->mp = NULL;
+ }
+ }
+ rx_ring->sbuf_in_use_count = 0;
+
+ sbq_desc = &rx_ring->sbq_desc[0];
+ for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
+ /*
+ * Set flag so that the callback does not allocate a new buffer
+ */
+ sbq_desc->free_buf = 1;
+ if (sbq_desc->upl_inuse != 0) {
+ force_cnt++;
+ }
+ if (sbq_desc->bd_dma.dma_handle != NULL) {
+ ql_free_phys(&sbq_desc->bd_dma.dma_handle,
+ &sbq_desc->bd_dma.acc_handle);
+ sbq_desc->bd_dma.dma_handle = NULL;
+ sbq_desc->bd_dma.acc_handle = NULL;
+ }
+ }
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
+ rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
+#endif
+ if (rx_ring->sbuf_in_use != NULL) {
+ kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
+ sizeof (struct bq_desc *)));
+ rx_ring->sbuf_in_use = NULL;
+ }
+
+ if (rx_ring->sbuf_free != NULL) {
+ kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
+ sizeof (struct bq_desc *)));
+ rx_ring->sbuf_free = NULL;
+ }
+}
+
+/* Allocate small buffers */
+static int
+ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc;
+ int i;
+ ddi_dma_cookie_t dma_cookie;
+
+ rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
+ sizeof (struct bq_desc *), KM_NOSLEEP);
+ if (rx_ring->sbuf_free == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: sbuf_free_list alloc: failed",
+ __func__);
+ rx_ring->sbuf_free_count = 0;
+ goto alloc_sbuf_err;
+ }
+
+ rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
+ sizeof (struct bq_desc *), KM_NOSLEEP);
+ if (rx_ring->sbuf_in_use == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: sbuf_inuse_list alloc: failed",
+ __func__);
+ rx_ring->sbuf_in_use_count = 0;
+ goto alloc_sbuf_err;
+ }
+ rx_ring->sbq_use_head = 0;
+ rx_ring->sbq_use_tail = 0;
+ rx_ring->sbq_free_head = 0;
+ rx_ring->sbq_free_tail = 0;
+ sbq_desc = &rx_ring->sbq_desc[0];
+
+ for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
+ /* Allocate buffer */
+ if (ql_alloc_phys(qlge->dip, &sbq_desc->bd_dma.dma_handle,
+ &ql_buf_acc_attr,
+ DDI_DMA_READ | DDI_DMA_STREAMING,
+ &sbq_desc->bd_dma.acc_handle,
+ (size_t)rx_ring->sbq_buf_size, /* mem size */
+ (size_t)0, /* default alignment */
+ (caddr_t *)&sbq_desc->bd_dma.vaddr,
+ &dma_cookie) != 0) {
+ cmn_err(CE_WARN,
+ "!%s: ddi_dma_alloc_handle: failed",
+ __func__);
+ goto alloc_sbuf_err;
+ }
+
+ /* Set context for Return buffer callback */
+ sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
+ sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
+ sbq_desc->rx_recycle.free_arg = (caddr_t)sbq_desc;
+ sbq_desc->rx_ring = rx_ring;
+ sbq_desc->upl_inuse = 0;
+ sbq_desc->free_buf = 0;
+
+ sbq_desc->mp =
+ desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
+ rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
+ if (sbq_desc->mp == NULL) {
+ cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
+ goto alloc_sbuf_err;
+ }
+ ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
+ }
+
+ return (DDI_SUCCESS);
+
+alloc_sbuf_err:
+ ql_free_sbq_buffers(rx_ring);
+ return (DDI_FAILURE);
+}
+
+static void
+ql_free_lbq_buffers(struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+ uint32_t i, j;
+ int force_cnt = 0;
+
+ j = rx_ring->lbq_free_head;
+ for (i = 0; i < rx_ring->lbuf_free_count; i++) {
+ lbq_desc = rx_ring->lbuf_free[j];
+ lbq_desc->free_buf = 1;
+ j++;
+ if (j >= rx_ring->lbq_len)
+ j = 0;
+ if (lbq_desc->mp != NULL) {
+ freemsg(lbq_desc->mp);
+ lbq_desc->mp = NULL;
+ }
+ }
+ rx_ring->lbuf_free_count = 0;
+
+ j = rx_ring->lbq_use_head;
+ for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
+ lbq_desc = rx_ring->lbuf_in_use[j];
+ lbq_desc->free_buf = 1;
+ j++;
+ if (j >= rx_ring->lbq_len) {
+ j = 0;
+ }
+ if (lbq_desc->mp != NULL) {
+ freemsg(lbq_desc->mp);
+ lbq_desc->mp = NULL;
+ }
+ }
+ rx_ring->lbuf_in_use_count = 0;
+
+ lbq_desc = &rx_ring->lbq_desc[0];
+ for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
+ /* Set flag so that callback will not allocate a new buffer */
+ lbq_desc->free_buf = 1;
+ if (lbq_desc->upl_inuse != 0) {
+ force_cnt++;
+ }
+ if (lbq_desc->bd_dma.dma_handle != NULL) {
+ ql_free_phys(&lbq_desc->bd_dma.dma_handle,
+ &lbq_desc->bd_dma.acc_handle);
+ lbq_desc->bd_dma.dma_handle = NULL;
+ lbq_desc->bd_dma.acc_handle = NULL;
+ }
+ }
+#ifdef QLGE_LOAD_UNLOAD
+ if (force_cnt) {
+ cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
+ rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
+ force_cnt);
+ }
+#endif
+ if (rx_ring->lbuf_in_use != NULL) {
+ kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
+ sizeof (struct bq_desc *)));
+ rx_ring->lbuf_in_use = NULL;
+ }
+
+ if (rx_ring->lbuf_free != NULL) {
+ kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
+ sizeof (struct bq_desc *)));
+ rx_ring->lbuf_free = NULL;
+ }
+}
+
+/* Allocate large buffers */
+static int
+ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+ ddi_dma_cookie_t dma_cookie;
+ int i;
+ uint32_t lbq_buf_size;
+
+ rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
+ sizeof (struct bq_desc *), KM_NOSLEEP);
+ if (rx_ring->lbuf_free == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: lbuf_free_list alloc: failed",
+ __func__);
+ rx_ring->lbuf_free_count = 0;
+ goto alloc_lbuf_err;
+ }
+
+ rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
+ sizeof (struct bq_desc *), KM_NOSLEEP);
+
+ if (rx_ring->lbuf_in_use == NULL) {
+ cmn_err(CE_WARN,
+ "!%s: lbuf_inuse_list alloc: failed",
+ __func__);
+ rx_ring->lbuf_in_use_count = 0;
+ goto alloc_lbuf_err;
+ }
+ rx_ring->lbq_use_head = 0;
+ rx_ring->lbq_use_tail = 0;
+ rx_ring->lbq_free_head = 0;
+ rx_ring->lbq_free_tail = 0;
+
+ lbq_buf_size = (qlge->mtu == ETHERMTU) ?
+ NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
+
+ lbq_desc = &rx_ring->lbq_desc[0];
+ for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
+ rx_ring->lbq_buf_size = lbq_buf_size;
+ /* Allocate buffer */
+ if (ql_alloc_phys(qlge->dip, &lbq_desc->bd_dma.dma_handle,
+ &ql_buf_acc_attr,
+ DDI_DMA_READ | DDI_DMA_STREAMING,
+ &lbq_desc->bd_dma.acc_handle,
+ (size_t)rx_ring->lbq_buf_size, /* mem size */
+ (size_t)0, /* default alignment */
+ (caddr_t *)&lbq_desc->bd_dma.vaddr,
+ &dma_cookie) != 0) {
+ cmn_err(CE_WARN,
+ "!%s: ddi_dma_alloc_handle: failed",
+ __func__);
+ goto alloc_lbuf_err;
+ }
+
+ /* Set context for Return buffer callback */
+ lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
+ lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
+ lbq_desc->rx_recycle.free_arg = (caddr_t)lbq_desc;
+ lbq_desc->rx_ring = rx_ring;
+ lbq_desc->upl_inuse = 0;
+ lbq_desc->free_buf = 0;
+
+ lbq_desc->mp =
+ desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
+ rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
+ if (lbq_desc->mp == NULL) {
+ cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
+ goto alloc_lbuf_err;
+ }
+ ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
+ } /* For all large buffers */
+
+ return (DDI_SUCCESS);
+
+alloc_lbuf_err:
+ ql_free_lbq_buffers(rx_ring);
+ return (DDI_FAILURE);
+}
+
+/*
+ * Free rx buffers
+ */
+static void
+ql_free_rx_buffers(qlge_t *qlge)
+{
+ int i;
+ struct rx_ring *rx_ring;
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ if (rx_ring->type != TX_Q) {
+ ql_free_lbq_buffers(rx_ring);
+ ql_free_sbq_buffers(rx_ring);
+ }
+ }
+}
+
+/*
+ * Allocate rx buffers
+ */
+static int
+ql_alloc_rx_buffers(qlge_t *qlge)
+{
+ struct rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ if (rx_ring->type != TX_Q) {
+ if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
+ goto alloc_err;
+ if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
+ goto alloc_err;
+ }
+ }
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].type == RX_Q) {
+ qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
+ qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
+ }
+ qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
+ }
+#endif
+ return (DDI_SUCCESS);
+
+alloc_err:
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * Initialize large buffer queue ring
+ */
+static void
+ql_init_lbq_ring(struct rx_ring *rx_ring)
+{
+ uint16_t i;
+ struct bq_desc *lbq_desc;
+
+ bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
+ for (i = 0; i < rx_ring->lbq_len; i++) {
+ lbq_desc = &rx_ring->lbq_desc[i];
+ lbq_desc->index = i;
+ }
+}
+
+/*
+ * Initialize small buffer queue ring
+ */
+static void
+ql_init_sbq_ring(struct rx_ring *rx_ring)
+{
+ uint16_t i;
+ struct bq_desc *sbq_desc;
+
+ bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
+ for (i = 0; i < rx_ring->sbq_len; i++) {
+ sbq_desc = &rx_ring->sbq_desc[i];
+ sbq_desc->index = i;
+ }
+}
+
+/*
+ * Calculate the pseudo-header checksum if hardware can not do
+ */
+static void
+ql_pseudo_cksum(uint8_t *buf)
+{
+ uint32_t cksum;
+ uint16_t iphl;
+ uint16_t proto;
+
+ iphl = (uint16_t)(4 * (buf[0] & 0xF));
+ cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
+ cksum += proto = buf[9];
+ cksum += (((uint16_t)buf[12])<<8) + buf[13];
+ cksum += (((uint16_t)buf[14])<<8) + buf[15];
+ cksum += (((uint16_t)buf[16])<<8) + buf[17];
+ cksum += (((uint16_t)buf[18])<<8) + buf[19];
+ cksum = (cksum>>16) + (cksum & 0xFFFF);
+ cksum = (cksum>>16) + (cksum & 0xFFFF);
+
+ /*
+ * Point it to the TCP/UDP header, and
+ * update the checksum field.
+ */
+ buf += iphl + ((proto == IPPROTO_TCP) ?
+ TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
+
+ *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
+
+}
+
+/*
+ * Transmit an incoming packet.
+ */
+mblk_t *
+ql_ring_tx(void *arg, mblk_t *mp)
+{
+ struct tx_ring *tx_ring = (struct tx_ring *)arg;
+ qlge_t *qlge = tx_ring->qlge;
+ mblk_t *next;
+ int rval;
+ uint32_t tx_count = 0;
+
+ if (qlge->port_link_state == LS_DOWN) {
+ /* can not send message while link is down */
+ mblk_t *tp;
+ cmn_err(CE_WARN, "tx failed due to link down");
+
+ while (mp != NULL) {
+ tp = mp->b_next;
+ mp->b_next = NULL;
+ freemsg(mp);
+ mp = tp;
+ }
+ goto exit;
+ }
+
+ mutex_enter(&tx_ring->tx_lock);
+ /* if mac is not started, driver is not ready, can not send */
+ if (tx_ring->mac_flags != QL_MAC_STARTED) {
+ cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
+ " return packets",
+ __func__, qlge->instance, tx_ring->mac_flags);
+ mutex_exit(&tx_ring->tx_lock);
+ goto exit;
+ }
+
+ /* we must try to send all */
+ while (mp != NULL) {
+ /*
+ * if number of available slots is less than a threshold,
+ * then quit
+ */
+ if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
+ tx_ring->queue_stopped = 1;
+ rval = DDI_FAILURE;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_WARN, "%s(%d) no resources",
+ __func__, qlge->instance);
+#endif
+ tx_ring->defer++;
+ /*
+ * If we return the buffer back we are expected to call
+ * mac_tx_ring_update() when resources are available
+ */
+ break;
+ }
+
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ rval = ql_send_common(tx_ring, mp);
+
+ if (rval != DDI_SUCCESS) {
+ mp->b_next = next;
+ break;
+ }
+ tx_count++;
+ mp = next;
+ }
+
+ /*
+ * After all msg blocks are mapped or copied to tx buffer,
+ * trigger the hardware to send!
+ */
+ if (tx_count > 0) {
+ ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
+ tx_ring->prod_idx);
+ }
+
+ mutex_exit(&tx_ring->tx_lock);
+exit:
+ return (mp);
+}
+
+
+/*
+ * This function builds an mblk list for the given inbound
+ * completion.
+ */
+
+static mblk_t *
+ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ mblk_t *mp = NULL;
+ mblk_t *mp1 = NULL; /* packet header */
+ mblk_t *mp2 = NULL; /* packet content */
+ struct bq_desc *lbq_desc;
+ struct bq_desc *sbq_desc;
+ uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
+ uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
+ uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ uint32_t pkt_len = payload_len + header_len;
+ uint32_t done;
+ uint64_t *curr_ial_ptr;
+ uint32_t ial_data_addr_low;
+ uint32_t actual_data_addr_low;
+ mblk_t *mp_ial = NULL; /* ial chained packets */
+ uint32_t size;
+
+ /*
+ * Check if error flags are set
+ */
+ if (err_flag != 0) {
+ if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
+ rx_ring->frame_too_long++;
+ if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
+ rx_ring->frame_too_short++;
+ if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
+ rx_ring->fcs_err++;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
+#endif
+ QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
+ (uint8_t *)ib_mac_rsp, 8,
+ (size_t)sizeof (struct ib_mac_iocb_rsp));
+ }
+
+ /* header should not be in large buffer */
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
+ cmn_err(CE_WARN, "header in large buffer or invalid!");
+ err_flag |= 1;
+ }
+ /*
+ * Handle the header buffer if present.
+ * packet header must be valid and saved in one small buffer
+ * broadcast/multicast packets' headers not splitted
+ */
+ if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
+ (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+ QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
+ header_len));
+ /* Sync access */
+ sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
+
+ ASSERT(sbq_desc != NULL);
+
+ /*
+ * Validate addresses from the ASIC with the
+ * expected sbuf address
+ */
+ if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
+ != ib_mac_rsp->hdr_addr) {
+ /* Small buffer address mismatch */
+ cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
+ " in wrong small buffer",
+ __func__, qlge->instance, rx_ring->cq_id);
+ goto fetal_error;
+ }
+ /* get this packet */
+ mp1 = sbq_desc->mp;
+ if ((err_flag != 0)|| (mp1 == NULL)) {
+ /* failed on this packet, put it back for re-arming */
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_WARN, "get header from small buffer fail");
+#endif
+ ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
+ mp1 = NULL;
+ } else {
+ /* Flush DMA'd data */
+ (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
+ 0, header_len, DDI_DMA_SYNC_FORKERNEL);
+
+ if ((qlge->ip_hdr_offset != 0)&&
+ (header_len < SMALL_BUFFER_SIZE)) {
+ /*
+ * copy entire header to a 2 bytes boundary
+ * address for 8100 adapters so that the IP
+ * header can be on a 4 byte boundary address
+ */
+ bcopy(mp1->b_rptr,
+ (mp1->b_rptr + SMALL_BUFFER_SIZE +
+ qlge->ip_hdr_offset),
+ header_len);
+ mp1->b_rptr += SMALL_BUFFER_SIZE +
+ qlge->ip_hdr_offset;
+ }
+
+ /*
+ * Adjust the mp payload_len to match
+ * the packet header payload_len
+ */
+ mp1->b_wptr = mp1->b_rptr + header_len;
+ mp1->b_next = mp1->b_cont = NULL;
+ QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
+ (uint8_t *)mp1->b_rptr, 8, header_len);
+ }
+ }
+
+ /*
+ * packet data or whole packet can be in small or one or
+ * several large buffer(s)
+ */
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /*
+ * The data is in a single small buffer.
+ */
+ sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
+
+ ASSERT(sbq_desc != NULL);
+
+ QL_PRINT(DBG_RX,
+ ("%d bytes in a single small buffer, sbq_desc = %p, "
+ "sbq_desc->bd_dma.dma_addr = %x,"
+ " ib_mac_rsp->data_addr = %x, mp = %p\n",
+ payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
+ ib_mac_rsp->data_addr, sbq_desc->mp));
+
+ /*
+ * Validate addresses from the ASIC with the
+ * expected sbuf address
+ */
+ if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
+ != ib_mac_rsp->data_addr) {
+ /* Small buffer address mismatch */
+ cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
+ " in wrong small buffer",
+ __func__, qlge->instance, rx_ring->cq_id);
+ goto fetal_error;
+ }
+ /* get this packet */
+ mp2 = sbq_desc->mp;
+ if ((err_flag != 0) || (mp2 == NULL)) {
+#ifdef QLGE_LOAD_UNLOAD
+ /* failed on this packet, put it back for re-arming */
+ cmn_err(CE_WARN, "ignore bad data from small buffer");
+#endif
+ ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
+ mp2 = NULL;
+ } else {
+ /* Adjust the buffer length to match the payload_len */
+ mp2->b_wptr = mp2->b_rptr + payload_len;
+ mp2->b_next = mp2->b_cont = NULL;
+ /* Flush DMA'd data */
+ (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
+ 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
+ QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
+ (uint8_t *)mp2->b_rptr, 8, payload_len);
+ /*
+ * if payload is too small , copy to
+ * the end of packet header
+ */
+ if ((mp1 != NULL) &&
+ (payload_len <= qlge->payload_copy_thresh) &&
+ (pkt_len <
+ (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
+ bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
+ mp1->b_wptr += payload_len;
+ freemsg(mp2);
+ mp2 = NULL;
+ }
+ }
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /*
+ * The data is in a single large buffer.
+ */
+ lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
+
+ QL_PRINT(DBG_RX,
+ ("%d bytes in a single large buffer, lbq_desc = %p, "
+ "lbq_desc->bd_dma.dma_addr = %x,"
+ " ib_mac_rsp->data_addr = %x, mp = %p\n",
+ payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
+ ib_mac_rsp->data_addr, lbq_desc->mp));
+
+ ASSERT(lbq_desc != NULL);
+
+ /*
+ * Validate addresses from the ASIC with
+ * the expected lbuf address
+ */
+ if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
+ != ib_mac_rsp->data_addr) {
+ /* Large buffer address mismatch */
+ cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
+ " in wrong large buffer",
+ __func__, qlge->instance, rx_ring->cq_id);
+ goto fetal_error;
+ }
+ mp2 = lbq_desc->mp;
+ if ((err_flag != 0) || (mp2 == NULL)) {
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_WARN, "ignore bad data from large buffer");
+#endif
+ /* failed on this packet, put it back for re-arming */
+ ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
+ mp2 = NULL;
+ } else {
+ /*
+ * Adjust the buffer length to match
+ * the packet payload_len
+ */
+ mp2->b_wptr = mp2->b_rptr + payload_len;
+ mp2->b_next = mp2->b_cont = NULL;
+ /* Flush DMA'd data */
+ (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
+ 0, payload_len, DDI_DMA_SYNC_FORKERNEL);
+ QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
+ (uint8_t *)mp2->b_rptr, 8, payload_len);
+ /*
+ * if payload is too small , copy to
+ * the end of packet header
+ */
+ if ((mp1 != NULL) &&
+ (payload_len <= qlge->payload_copy_thresh) &&
+ (pkt_len<
+ (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
+ bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
+ mp1->b_wptr += payload_len;
+ freemsg(mp2);
+ mp2 = NULL;
+ }
+ }
+ } else if (payload_len) {
+ /*
+ * payload available but not in sml nor lrg buffer,
+ * so, it is saved in IAL
+ */
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "packet chained in IAL \n");
+#endif
+ /* lrg buf addresses are saved in one small buffer */
+ sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
+ curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
+ done = 0;
+ while (!done) {
+ ial_data_addr_low =
+ (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
+ 0xFFFFFFFE);
+ /* check if this is the last packet fragment */
+ done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
+ curr_ial_ptr++;
+ /*
+ * The data is in one or several large buffer(s).
+ */
+ lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
+ actual_data_addr_low =
+ (uint32_t)(lbq_desc->bd_dma.dma_addr &
+ 0xFFFFFFFE);
+ if (ial_data_addr_low != actual_data_addr_low) {
+ cmn_err(CE_WARN,
+ "packet saved in wrong ial lrg buffer"
+ " expected %x, actual %lx",
+ ial_data_addr_low,
+ (uintptr_t)lbq_desc->bd_dma.dma_addr);
+ goto fetal_error;
+ }
+
+ if (mp_ial == NULL) {
+ mp_ial = mp2 = lbq_desc->mp;
+ } else {
+ mp2->b_cont = lbq_desc->mp;
+ mp2 = lbq_desc->mp;
+ }
+ mp2->b_next = NULL;
+ mp2->b_cont = NULL;
+ size = (payload_len < rx_ring->lbq_buf_size)?
+ payload_len : rx_ring->lbq_buf_size;
+ mp2->b_wptr = mp2->b_rptr + size;
+ /* Flush DMA'd data */
+ (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
+ 0, size, DDI_DMA_SYNC_FORKERNEL);
+ payload_len -= size;
+ QL_DUMP(DBG_RX, "\t Mac data dump:\n",
+ (uint8_t *)mp2->b_rptr, 8, size);
+ }
+ mp2 = mp_ial;
+ freemsg(sbq_desc->mp);
+ }
+ /*
+ * some packets' hdr not split, then send mp2 upstream, otherwise,
+ * concatenate message block mp2 to the tail of message header, mp1
+ */
+ if (!err_flag) {
+ if (mp1) {
+ if (mp2) {
+ QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n"));
+ linkb(mp1, mp2); /* mp1->b_cont = mp2; */
+ mp = mp1;
+ } else {
+ QL_PRINT(DBG_RX, ("packet in mp1 only\n"));
+ mp = mp1;
+ }
+ } else if (mp2) {
+ QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
+ mp = mp2;
+ }
+ }
+ return (mp);
+
+fetal_error:
+ /* Fetal Error! */
+ *mp->b_wptr = 0;
+ return (mp);
+
+}
+
+/*
+ * Bump completion queue consumer index.
+ */
+static void
+ql_update_cq(struct rx_ring *rx_ring)
+{
+ rx_ring->cnsmr_idx++;
+ rx_ring->curr_entry++;
+ if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
+ rx_ring->cnsmr_idx = 0;
+ rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
+ }
+}
+
+/*
+ * Update completion queue consumer index.
+ */
+static void
+ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+ qlge_t *qlge = rx_ring->qlge;
+
+ ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
+ rx_ring->cnsmr_idx);
+}
+
+/*
+ * Processes a SYS-Chip Event Notification Completion Event.
+ * The incoming notification event that describes a link up/down
+ * or some sorts of error happens.
+ */
+static void
+ql_process_chip_ae_intr(qlge_t *qlge,
+ struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
+{
+ uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
+ uint32_t soft_req = 0;
+
+ switch (eventType) {
+ case SYS_EVENT_PORT_LINK_UP: /* 0x0h */
+ QL_PRINT(DBG_MBX, ("Port Link Up\n"));
+ break;
+
+ case SYS_EVENT_PORT_LINK_DOWN: /* 0x1h */
+ QL_PRINT(DBG_MBX, ("Port Link Down\n"));
+ break;
+
+ case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
+ cmn_err(CE_WARN, "A multiple CAM hits look up error "
+ "occurred");
+ soft_req |= NEED_HW_RESET;
+ break;
+
+ case SYS_EVENT_SOFT_ECC_ERR: /* 0x7h */
+ cmn_err(CE_WARN, "Soft ECC error detected");
+ soft_req |= NEED_HW_RESET;
+ break;
+
+ case SYS_EVENT_MGMT_FATAL_ERR: /* 0x8h */
+ cmn_err(CE_WARN, "Management (MPI) Processor fatal"
+ " error occured");
+ soft_req |= NEED_MPI_RESET;
+ break;
+
+ case SYS_EVENT_MAC_INTERRUPT: /* 0x9h */
+ QL_PRINT(DBG_MBX, ("MAC Interrupt"));
+ break;
+
+ case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF: /* 0x40h */
+ cmn_err(CE_WARN, "PCI Error reading small/large "
+ "buffers occured");
+ soft_req |= NEED_HW_RESET;
+ break;
+
+ default:
+ QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
+ "type 0x%x occured",
+ __func__, qlge->instance, eventType));
+ break;
+ }
+
+ if ((soft_req & NEED_MPI_RESET) != 0) {
+ ql_wake_mpi_reset_soft_intr(qlge);
+ } else if ((soft_req & NEED_HW_RESET) != 0) {
+ ql_wake_asic_reset_soft_intr(qlge);
+ }
+}
+
+/*
+ * set received packet checksum flag
+ */
+void
+ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
+{
+ uint32_t flags;
+
+ /* Not TCP or UDP packet? nothing more to do */
+ if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
+ ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
+ return;
+
+ /* No CKO support for IPv6 */
+ if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
+ return;
+
+ /*
+ * If checksum error, don't set flags; stack will calculate
+ * checksum, detect the error and update statistics
+ */
+ if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
+ ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
+ return;
+
+ /* TCP or UDP packet and checksum valid */
+ if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
+ ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
+ flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
+ (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
+ }
+ if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
+ ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
+ flags = HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
+ (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, flags, 0);
+ }
+}
+
+/*
+ * This function goes through h/w descriptor in one specified rx ring,
+ * receives the data if the descriptor status shows the data is ready.
+ * It returns a chain of mblks containing the received data, to be
+ * passed up to mac_rx_ring().
+ */
+mblk_t *
+ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
+{
+ qlge_t *qlge = rx_ring->qlge;
+ uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ib_mac_iocb_rsp *net_rsp;
+ mblk_t *mp;
+ mblk_t *mblk_head;
+ mblk_t **mblk_tail;
+ uint32_t received_bytes = 0;
+ boolean_t done = B_FALSE;
+ uint32_t length;
+
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ uint32_t consumer_idx;
+ uint32_t producer_idx;
+ uint32_t num_free_entries;
+ uint32_t temp;
+
+ temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
+ consumer_idx = temp & 0x0000ffff;
+ producer_idx = (temp >> 16);
+
+ if (consumer_idx > producer_idx)
+ num_free_entries = (consumer_idx - producer_idx);
+ else
+ num_free_entries = NUM_RX_RING_ENTRIES - (
+ producer_idx - consumer_idx);
+
+ if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
+ qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
+
+#endif
+ mblk_head = NULL;
+ mblk_tail = &mblk_head;
+
+ while (!done && (prod != rx_ring->cnsmr_idx)) {
+ QL_PRINT(DBG_RX,
+ ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
+ __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
+
+ net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
+ (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
+ (off_t)((uintptr_t)net_rsp -
+ (uintptr_t)rx_ring->cq_dma.vaddr),
+ (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
+ QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
+ rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
+
+ switch (net_rsp->opcode) {
+
+ case OPCODE_IB_MAC_IOCB:
+ /* Adding length of pkt header and payload */
+ length = le32_to_cpu(net_rsp->data_len) +
+ le32_to_cpu(net_rsp->hdr_len);
+ if ((poll_bytes != QLGE_POLL_ALL) &&
+ ((received_bytes + length) > poll_bytes)) {
+ done = B_TRUE;
+ continue;
+ }
+ received_bytes += length;
+
+ mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
+ if (mp != NULL) {
+ if (rx_ring->mac_flags != QL_MAC_STARTED) {
+ /*
+ * Increment number of packets we have
+ * indicated to the stack, should be
+ * decremented when we get it back
+ * or when freemsg is called
+ */
+ ASSERT(rx_ring->rx_indicate
+ <= rx_ring->cq_len);
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_WARN, "%s do not send to OS,"
+ " mac_flags %d, indicate %d",
+ __func__, rx_ring->mac_flags,
+ rx_ring->rx_indicate);
+#endif
+ QL_PRINT(DBG_RX,
+ ("cq_id = %d, packet "
+ "dropped, mac not "
+ "enabled.\n",
+ rx_ring->cq_id));
+ rx_ring->rx_pkt_dropped_mac_unenabled++;
+
+ /* rx_lock is expected to be held */
+ mutex_exit(&rx_ring->rx_lock);
+ freemsg(mp);
+ mutex_enter(&rx_ring->rx_lock);
+ mp = NULL;
+ }
+
+ if (mp != NULL) {
+ /*
+ * IP full packet has been
+ * successfully verified by
+ * H/W and is correct
+ */
+ ql_set_rx_cksum(mp, net_rsp);
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ *mblk_tail = mp;
+ mblk_tail = &mp->b_next;
+ }
+ } else {
+ QL_PRINT(DBG_RX,
+ ("cq_id = %d, packet dropped\n",
+ rx_ring->cq_id));
+ rx_ring->rx_packets_dropped_no_buffer++;
+ }
+ break;
+
+ case OPCODE_IB_SYS_EVENT_IOCB:
+ ql_process_chip_ae_intr(qlge,
+ (struct ib_sys_event_iocb_rsp *)
+ net_rsp);
+ break;
+
+ default:
+ cmn_err(CE_WARN,
+ "%s Ring(%d)Hit default case, not handled!"
+ " dropping the packet, "
+ "opcode = %x.", __func__, rx_ring->cq_id,
+ net_rsp->opcode);
+ break;
+ }
+ /* increment cnsmr_idx and curr_entry */
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+
+ }
+ /* update cnsmr_idx */
+ ql_write_cq_idx(rx_ring);
+ /* do not enable interrupt for polling mode */
+ if (poll_bytes == QLGE_POLL_ALL)
+ ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
+ return (mblk_head);
+}
+
+/* Process an outbound completion from an rx ring. */
+static void
+ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct tx_ring *tx_ring;
+ struct tx_ring_desc *tx_ring_desc;
+ int j;
+
+ tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
+ tx_ring_desc = tx_ring->wq_desc;
+ tx_ring_desc += mac_rsp->tid;
+
+ if (tx_ring_desc->tx_type == USE_DMA) {
+ QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
+ __func__, qlge->instance));
+
+ /*
+ * Release the DMA resource that is used for
+ * DMA binding.
+ */
+ for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
+ (void) ddi_dma_unbind_handle(
+ tx_ring_desc->tx_dma_handle[j]);
+ }
+
+ tx_ring_desc->tx_dma_handle_used = 0;
+ /*
+ * Free the mblk after sending completed
+ */
+ if (tx_ring_desc->mp != NULL) {
+ freemsg(tx_ring_desc->mp);
+ tx_ring_desc->mp = NULL;
+ }
+ }
+
+ tx_ring->obytes += tx_ring_desc->tx_bytes;
+ tx_ring->opackets++;
+
+ if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
+ OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
+ tx_ring->errxmt++;
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+ /* EMPTY */
+ QL_PRINT(DBG_TX,
+ ("Total descriptor length did not match "
+ "transfer length.\n"));
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+ /* EMPTY */
+ QL_PRINT(DBG_TX,
+ ("Frame too short to be legal, not sent.\n"));
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+ /* EMPTY */
+ QL_PRINT(DBG_TX,
+ ("Frame too long, but sent anyway.\n"));
+ }
+ if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
+ /* EMPTY */
+ QL_PRINT(DBG_TX,
+ ("PCI backplane error. Frame not sent.\n"));
+ }
+ }
+ atomic_inc_32(&tx_ring->tx_free_count);
+}
+
+/*
+ * clean up tx completion iocbs
+ */
+static int
+ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+ qlge_t *qlge = rx_ring->qlge;
+ uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ob_mac_iocb_rsp *net_rsp = NULL;
+ int count = 0;
+ struct tx_ring *tx_ring;
+ boolean_t resume_tx = B_FALSE;
+
+ mutex_enter(&rx_ring->rx_lock);
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ {
+ uint32_t consumer_idx;
+ uint32_t producer_idx;
+ uint32_t num_free_entries;
+ uint32_t temp;
+
+ temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
+ consumer_idx = temp & 0x0000ffff;
+ producer_idx = (temp >> 16);
+
+ if (consumer_idx > producer_idx)
+ num_free_entries = (consumer_idx - producer_idx);
+ else
+ num_free_entries = NUM_RX_RING_ENTRIES -
+ (producer_idx - consumer_idx);
+
+ if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
+ qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
+
+ }
+#endif
+ /* While there are entries in the completion queue. */
+ while (prod != rx_ring->cnsmr_idx) {
+
+ QL_PRINT(DBG_RX,
+ ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
+
+ net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+ (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
+ (off_t)((uintptr_t)net_rsp -
+ (uintptr_t)rx_ring->cq_dma.vaddr),
+ (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
+
+ QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
+ "response packet data\n",
+ rx_ring->curr_entry, 8,
+ (size_t)sizeof (*net_rsp));
+
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_OFFLOAD_IOCB:
+ case OPCODE_OB_MAC_IOCB:
+ ql_process_mac_tx_intr(qlge, net_rsp);
+ break;
+
+ default:
+ cmn_err(CE_WARN,
+ "%s Hit default case, not handled! "
+ "dropping the packet,"
+ " opcode = %x.",
+ __func__, net_rsp->opcode);
+ break;
+ }
+ count++;
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ }
+ ql_write_cq_idx(rx_ring);
+
+ mutex_exit(&rx_ring->rx_lock);
+
+ tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
+
+ mutex_enter(&tx_ring->tx_lock);
+
+ if (tx_ring->queue_stopped &&
+ (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ tx_ring->queue_stopped = 0;
+ resume_tx = B_TRUE;
+ }
+
+ mutex_exit(&tx_ring->tx_lock);
+ /* Don't hold the lock during OS callback */
+ if (resume_tx)
+ RESUME_TX(tx_ring);
+ return (count);
+}
+
+/*
+ * reset asic when error happens
+ */
+/* ARGSUSED */
+static uint_t
+ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
+{
+ qlge_t *qlge = (qlge_t *)((void *)arg1);
+ int status;
+
+ mutex_enter(&qlge->gen_mutex);
+ status = ql_bringdown_adapter(qlge);
+ if (status != DDI_SUCCESS)
+ goto error;
+
+ status = ql_bringup_adapter(qlge);
+ if (status != DDI_SUCCESS)
+ goto error;
+ mutex_exit(&qlge->gen_mutex);
+ return (DDI_INTR_CLAIMED);
+
+error:
+ mutex_exit(&qlge->gen_mutex);
+ cmn_err(CE_WARN,
+ "qlge up/down cycle failed, closing device");
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Reset MPI
+ */
+/* ARGSUSED */
+static uint_t
+ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
+{
+ qlge_t *qlge = (qlge_t *)((void *)arg1);
+
+ ql_reset_mpi_risc(qlge);
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Process MPI mailbox messages
+ */
+/* ARGSUSED */
+static uint_t
+ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
+{
+ qlge_t *qlge = (qlge_t *)((void *)arg1);
+
+ ql_do_mpi_intr(qlge);
+ return (DDI_INTR_CLAIMED);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void
+ql_wake_asic_reset_soft_intr(qlge_t *qlge)
+{
+ (void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
+}
+
+static void
+ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
+{
+ (void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
+}
+
+static void
+ql_wake_mpi_event_soft_intr(qlge_t *qlge)
+{
+ (void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
+}
+
+/*
+ * This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple interrupt vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+/* ARGSUSED */
+static uint_t
+ql_isr(caddr_t arg1, caddr_t arg2)
+{
+ struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
+ qlge_t *qlge = rx_ring->qlge;
+ struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
+ uint32_t var, prod;
+ int i;
+ int work_done = 0;
+
+ mblk_t *mp;
+
+ _NOTE(ARGUNUSED(arg2));
+
+ ++qlge->rx_interrupts[rx_ring->cq_id];
+
+ if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
+ ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
+ var = ql_read_reg(qlge, REG_ERROR_STATUS);
+ var = ql_read_reg(qlge, REG_STATUS);
+ var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ ql_disable_completion_interrupt(qlge, intr_ctx->intr);
+
+ /*
+ * Check the default queue and wake handler if active.
+ */
+ rx_ring = &qlge->rx_ring[0];
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
+ prod, rx_ring->cnsmr_idx));
+ /* check if interrupt is due to incoming packet */
+ if (prod != rx_ring->cnsmr_idx) {
+ QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
+ ql_disable_completion_interrupt(qlge, intr_ctx->intr);
+ mutex_enter(&rx_ring->rx_lock);
+ mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
+ mutex_exit(&rx_ring->rx_lock);
+
+ if (mp != NULL)
+ RX_UPSTREAM(rx_ring, mp);
+ work_done++;
+ } else {
+ /*
+ * If interrupt is not due to incoming packet, read status
+ * register to see if error happens or mailbox interrupt.
+ */
+ var = ql_read_reg(qlge, REG_STATUS);
+ if ((var & STATUS_FE) != 0) {
+ ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
+
+ cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
+ var = ql_read_reg(qlge, REG_ERROR_STATUS);
+ cmn_err(CE_WARN,
+ "Resetting chip. Error Status Register = 0x%x",
+ var);
+ ql_wake_asic_reset_soft_intr(qlge);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ /*
+ * Check MPI processor activity.
+ */
+ if ((var & STATUS_PI) != 0) {
+ /*
+ * We've got an async event or mailbox completion.
+ * Handle it and clear the source of the interrupt.
+ */
+ ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
+
+ QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
+ ql_disable_completion_interrupt(qlge, intr_ctx->intr);
+ ql_wake_mpi_event_soft_intr(qlge);
+ work_done++;
+ }
+ }
+
+ if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
+ /*
+ * Start the DPC for each active queue.
+ */
+ for (i = 1; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+
+ if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+ rx_ring->cnsmr_idx) {
+ QL_PRINT(DBG_INTR,
+ ("Waking handler for rx_ring[%d].\n", i));
+
+ ql_disable_completion_interrupt(qlge,
+ rx_ring->irq);
+ if (rx_ring->type == TX_Q) {
+ ql_clean_outbound_rx_ring(rx_ring);
+ ql_enable_completion_interrupt(
+ rx_ring->qlge, rx_ring->irq);
+ } else {
+ mutex_enter(&rx_ring->rx_lock);
+ mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
+ mutex_exit(&rx_ring->rx_lock);
+ if (mp != NULL)
+ RX_UPSTREAM(rx_ring, mp);
+#ifdef QLGE_LOAD_UNLOAD
+ if (rx_ring->mac_flags ==
+ QL_MAC_STOPPED)
+ cmn_err(CE_NOTE,
+ "%s rx_indicate(%d) %d\n",
+ __func__, i,
+ rx_ring->rx_indicate);
+#endif
+ }
+ work_done++;
+ }
+ }
+ }
+
+ ql_enable_completion_interrupt(qlge, intr_ctx->intr);
+
+ return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
+}
+
+/*
+ * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
+ */
+/* ARGSUSED */
+static uint_t
+ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
+{
+ struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
+ qlge_t *qlge = rx_ring->qlge;
+ _NOTE(ARGUNUSED(arg2));
+
+ ++qlge->rx_interrupts[rx_ring->cq_id];
+ ql_clean_outbound_rx_ring(rx_ring);
+ ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Poll n_bytes of chained incoming packets
+ */
+mblk_t *
+ql_ring_rx_poll(void *arg, int n_bytes)
+{
+ struct rx_ring *rx_ring = (struct rx_ring *)arg;
+ qlge_t *qlge = rx_ring->qlge;
+ mblk_t *mp = NULL;
+ uint32_t var;
+
+ ASSERT(n_bytes >= 0);
+ QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
+ __func__, rx_ring->cq_id, n_bytes));
+
+ ++qlge->rx_polls[rx_ring->cq_id];
+
+ if (n_bytes == 0)
+ return (mp);
+ mutex_enter(&rx_ring->rx_lock);
+ mp = ql_ring_rx(rx_ring, n_bytes);
+ mutex_exit(&rx_ring->rx_lock);
+
+ if ((rx_ring->cq_id == 0) && (mp == NULL)) {
+ var = ql_read_reg(qlge, REG_STATUS);
+ /*
+ * Check for fatal error.
+ */
+ if ((var & STATUS_FE) != 0) {
+ ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
+ var = ql_read_reg(qlge, REG_ERROR_STATUS);
+ cmn_err(CE_WARN, "Got fatal error %x.", var);
+ ql_wake_asic_reset_soft_intr(qlge);
+ }
+ /*
+ * Check MPI processor activity.
+ */
+ if ((var & STATUS_PI) != 0) {
+ /*
+ * We've got an async event or mailbox completion.
+ * Handle it and clear the source of the interrupt.
+ */
+ ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
+ ql_do_mpi_intr(qlge);
+ }
+ }
+
+ return (mp);
+}
+
+/*
+ * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
+ */
+/* ARGSUSED */
+static uint_t
+ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
+{
+ struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
+ qlge_t *qlge = rx_ring->qlge;
+ mblk_t *mp;
+ _NOTE(ARGUNUSED(arg2));
+
+ QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
+
+ ++qlge->rx_interrupts[rx_ring->cq_id];
+
+ mutex_enter(&rx_ring->rx_lock);
+ mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
+ mutex_exit(&rx_ring->rx_lock);
+
+ if (mp != NULL)
+ RX_UPSTREAM(rx_ring, mp);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+
+/*
+ *
+ * Allocate DMA Buffer for ioctl service
+ *
+ */
+static int
+ql_alloc_ioctl_dma_buf(qlge_t *qlge)
+{
+ uint64_t phy_addr;
+ uint64_t alloc_size;
+ ddi_dma_cookie_t dma_cookie;
+
+ alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
+ max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
+ if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
+ &ql_buf_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &qlge->ioctl_buf_dma_attr.acc_handle,
+ (size_t)alloc_size, /* mem size */
+ (size_t)0, /* alignment */
+ (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
+ &dma_cookie) != 0) {
+ cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ phy_addr = dma_cookie.dmac_laddress;
+
+ if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
+
+ QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
+ "phy_addr = 0x%lx\n",
+ __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * Function to free physical memory.
+ */
+static void
+ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
+{
+ if (dma_handle != NULL) {
+ (void) ddi_dma_unbind_handle(*dma_handle);
+ if (acc_handle != NULL)
+ ddi_dma_mem_free(acc_handle);
+ ddi_dma_free_handle(dma_handle);
+ }
+}
+
+/*
+ * Function to free ioctl dma buffer.
+ */
+static void
+ql_free_ioctl_dma_buf(qlge_t *qlge)
+{
+ if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
+ ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
+ &qlge->ioctl_buf_dma_attr.acc_handle);
+
+ qlge->ioctl_buf_dma_attr.vaddr = NULL;
+ qlge->ioctl_buf_dma_attr.dma_handle = NULL;
+ }
+}
+
+/*
+ * Free shadow register space used for request and completion queues
+ */
+static void
+ql_free_shadow_space(qlge_t *qlge)
+{
+ if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
+ ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
+ &qlge->host_copy_shadow_dma_attr.acc_handle);
+ bzero(&qlge->host_copy_shadow_dma_attr,
+ sizeof (qlge->host_copy_shadow_dma_attr));
+ }
+
+ if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
+ ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
+ &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
+ bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
+ sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
+ }
+}
+
+/*
+ * Allocate shadow register space for request and completion queues
+ */
+static int
+ql_alloc_shadow_space(qlge_t *qlge)
+{
+ ddi_dma_cookie_t dma_cookie;
+
+ if (ql_alloc_phys(qlge->dip,
+ &qlge->host_copy_shadow_dma_attr.dma_handle,
+ &ql_dev_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &qlge->host_copy_shadow_dma_attr.acc_handle,
+ (size_t)VM_PAGE_SIZE, /* mem size */
+ (size_t)4, /* 4 bytes alignment */
+ (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&qlge->host_copy_shadow_dma_attr,
+ sizeof (qlge->host_copy_shadow_dma_attr));
+
+ cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
+ "response shadow registers", __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
+
+ if (ql_alloc_phys(qlge->dip,
+ &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
+ (size_t)VM_PAGE_SIZE, /* mem size */
+ (size_t)4, /* 4 bytes alignment */
+ (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
+ sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
+
+ cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
+ "for request shadow registers",
+ __func__, qlge->instance);
+ goto err_wqp_sh_area;
+ }
+ qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
+
+ return (DDI_SUCCESS);
+
+err_wqp_sh_area:
+ ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
+ &qlge->host_copy_shadow_dma_attr.acc_handle);
+ bzero(&qlge->host_copy_shadow_dma_attr,
+ sizeof (qlge->host_copy_shadow_dma_attr));
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * Initialize a tx ring
+ */
+static void
+ql_init_tx_ring(struct tx_ring *tx_ring)
+{
+ int i;
+ struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
+ struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
+
+ for (i = 0; i < tx_ring->wq_len; i++) {
+ tx_ring_desc->index = i;
+ tx_ring_desc->queue_entry = mac_iocb_ptr;
+ mac_iocb_ptr++;
+ tx_ring_desc++;
+ }
+ tx_ring->tx_free_count = tx_ring->wq_len;
+ tx_ring->queue_stopped = 0;
+}
+
+/*
+ * Free one tx ring resources
+ */
+static void
+ql_free_tx_resources(struct tx_ring *tx_ring)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ int i, j;
+
+ ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
+ bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
+
+ if (tx_ring->wq_desc != NULL) {
+ tx_ring_desc = tx_ring->wq_desc;
+ for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+ for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
+ if (tx_ring_desc->tx_dma_handle[j]) {
+ /*
+ * The unbinding will happen in tx
+ * completion, here we just free the
+ * handles
+ */
+ ddi_dma_free_handle(
+ &(tx_ring_desc->tx_dma_handle[j]));
+ tx_ring_desc->tx_dma_handle[j] = NULL;
+ }
+ }
+ if (tx_ring_desc->oal != NULL) {
+ tx_ring_desc->oal_dma_addr = 0;
+ tx_ring_desc->oal = NULL;
+ tx_ring_desc->copy_buffer = NULL;
+ tx_ring_desc->copy_buffer_dma_addr = 0;
+
+ ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
+ &tx_ring_desc->oal_dma.acc_handle);
+ }
+ }
+ kmem_free(tx_ring->wq_desc,
+ tx_ring->wq_len * sizeof (struct tx_ring_desc));
+ tx_ring->wq_desc = NULL;
+ }
+ /* free the wqicb struct */
+ if (tx_ring->wqicb_dma.dma_handle) {
+ ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
+ &tx_ring->wqicb_dma.acc_handle);
+ bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
+ }
+}
+
+/*
+ * Allocate work (request) queue memory and transmit
+ * descriptors for this transmit ring
+ */
+static int
+ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
+{
+ ddi_dma_cookie_t dma_cookie;
+ struct tx_ring_desc *tx_ring_desc;
+ int i, j;
+ uint32_t length;
+
+ /* allocate dma buffers for obiocbs */
+ if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &tx_ring->wq_dma.acc_handle,
+ (size_t)tx_ring->wq_size, /* mem size */
+ (size_t)128, /* alignment:128 bytes boundary */
+ (caddr_t *)&tx_ring->wq_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
+ cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ tx_ring->wq_desc =
+ kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
+ KM_NOSLEEP);
+ if (tx_ring->wq_desc == NULL) {
+ goto err;
+ } else {
+ tx_ring_desc = tx_ring->wq_desc;
+ /*
+ * Allocate a large enough structure to hold the following
+ * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
+ * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
+ */
+ for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+ length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
+ + QL_MAX_COPY_LENGTH;
+
+ if (ql_alloc_phys(qlge->dip,
+ &tx_ring_desc->oal_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &tx_ring_desc->oal_dma.acc_handle,
+ (size_t)length, /* mem size */
+ (size_t)0, /* default alignment:8 bytes boundary */
+ (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&tx_ring_desc->oal_dma,
+ sizeof (tx_ring_desc->oal_dma));
+ cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
+ "oal alloc failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
+ tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
+ tx_ring_desc->copy_buffer =
+ (caddr_t)((uint8_t *)tx_ring_desc->oal
+ + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
+ tx_ring_desc->copy_buffer_dma_addr =
+ (tx_ring_desc->oal_dma_addr
+ + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
+
+ /* Allocate dma handles for transmit buffers */
+ for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
+ if (ddi_dma_alloc_handle(qlge->dip,
+ &tx_mapping_dma_attr,
+ DDI_DMA_DONTWAIT,
+ 0, &tx_ring_desc->tx_dma_handle[j])
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "!%s: ddi_dma_alloc_handle: "
+ "tx_dma_handle "
+ "alloc failed", __func__);
+ goto err;
+ }
+ }
+ }
+ }
+ /* alloc a wqicb control block to load this tx ring to hw */
+ if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &tx_ring->wqicb_dma.acc_handle,
+ (size_t)sizeof (struct wqicb_t), /* mem size */
+ (size_t)0, /* alignment:128 bytes boundary */
+ (caddr_t *)&tx_ring->wqicb_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
+ cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ return (DDI_SUCCESS);
+
+err:
+ ql_free_tx_resources(tx_ring);
+ return (DDI_FAILURE);
+}
+
+/*
+ * Free one rx ring resources
+ */
+static void
+ql_free_rx_resources(struct rx_ring *rx_ring)
+{
+ /* Free the small buffer queue. */
+ if (rx_ring->sbq_dma.dma_handle) {
+ ql_free_phys(&rx_ring->sbq_dma.dma_handle,
+ &rx_ring->sbq_dma.acc_handle);
+ bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
+ }
+
+ /* Free the small buffer queue control blocks. */
+ kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
+ sizeof (struct bq_desc));
+ rx_ring->sbq_desc = NULL;
+
+ /* Free the large buffer queue. */
+ if (rx_ring->lbq_dma.dma_handle) {
+ ql_free_phys(&rx_ring->lbq_dma.dma_handle,
+ &rx_ring->lbq_dma.acc_handle);
+ bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
+ }
+
+ /* Free the large buffer queue control blocks. */
+ kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
+ sizeof (struct bq_desc));
+ rx_ring->lbq_desc = NULL;
+
+ /* Free cqicb struct */
+ if (rx_ring->cqicb_dma.dma_handle) {
+ ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
+ &rx_ring->cqicb_dma.acc_handle);
+ bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
+ }
+ /* Free the rx queue. */
+ if (rx_ring->cq_dma.dma_handle) {
+ ql_free_phys(&rx_ring->cq_dma.dma_handle,
+ &rx_ring->cq_dma.acc_handle);
+ bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
+ }
+}
+
+/*
+ * Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure.
+ */
+static int
+ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ ddi_dma_cookie_t dma_cookie;
+
+ if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &rx_ring->cq_dma.acc_handle,
+ (size_t)rx_ring->cq_size, /* mem size */
+ (size_t)128, /* alignment:128 bytes boundary */
+ (caddr_t *)&rx_ring->cq_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
+ cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ if (rx_ring->sbq_len != 0) {
+ /*
+ * Allocate small buffer queue.
+ */
+ if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &rx_ring->sbq_dma.acc_handle,
+ (size_t)rx_ring->sbq_size, /* mem size */
+ (size_t)128, /* alignment:128 bytes boundary */
+ (caddr_t *)&rx_ring->sbq_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
+ cmn_err(CE_WARN,
+ "%s(%d): small buffer queue allocation failed.",
+ __func__, qlge->instance);
+ goto err_mem;
+ }
+ rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ /*
+ * Allocate small buffer queue control blocks.
+ */
+ rx_ring->sbq_desc =
+ kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
+ KM_NOSLEEP);
+ if (rx_ring->sbq_desc == NULL) {
+ cmn_err(CE_WARN,
+ "sbq control block allocation failed.");
+ goto err_mem;
+ }
+
+ ql_init_sbq_ring(rx_ring);
+ }
+
+ if (rx_ring->lbq_len != 0) {
+ /*
+ * Allocate large buffer queue.
+ */
+ if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &rx_ring->lbq_dma.acc_handle,
+ (size_t)rx_ring->lbq_size, /* mem size */
+ (size_t)128, /* alignment:128 bytes boundary */
+ (caddr_t *)&rx_ring->lbq_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
+ cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
+ __func__, qlge->instance);
+ goto err_mem;
+ }
+ rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ /*
+ * Allocate large buffer queue control blocks.
+ */
+ rx_ring->lbq_desc =
+ kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
+ KM_NOSLEEP);
+ if (rx_ring->lbq_desc == NULL) {
+ cmn_err(CE_WARN,
+ "Large buffer queue control block allocation "
+ "failed.");
+ goto err_mem;
+ }
+ ql_init_lbq_ring(rx_ring);
+ }
+
+ if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &rx_ring->cqicb_dma.acc_handle,
+ (size_t)sizeof (struct cqicb_t), /* mem size */
+ (size_t)0, /* alignment:128 bytes boundary */
+ (caddr_t *)&rx_ring->cqicb_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
+ cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ return (DDI_SUCCESS);
+
+err_mem:
+ ql_free_rx_resources(rx_ring);
+ return (DDI_FAILURE);
+}
+
+/*
+ * Frees tx/rx queues memory resources
+ */
+static void
+ql_free_mem_resources(qlge_t *qlge)
+{
+ int i;
+
+ if (qlge->ricb_dma.dma_handle) {
+ /* free the ricb struct */
+ ql_free_phys(&qlge->ricb_dma.dma_handle,
+ &qlge->ricb_dma.acc_handle);
+ bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
+ }
+
+ ql_free_rx_buffers(qlge);
+
+ ql_free_ioctl_dma_buf(qlge);
+
+ for (i = 0; i < qlge->tx_ring_count; i++)
+ ql_free_tx_resources(&qlge->tx_ring[i]);
+
+ for (i = 0; i < qlge->rx_ring_count; i++)
+ ql_free_rx_resources(&qlge->rx_ring[i]);
+
+ ql_free_shadow_space(qlge);
+}
+
+/*
+ * Allocate buffer queues, large buffers and small buffers etc
+ *
+ * This API is called in the gld_attach member function. It is called
+ * only once. Later reset,reboot should not re-allocate all rings and
+ * buffers.
+ */
+static int
+ql_alloc_mem_resources(qlge_t *qlge)
+{
+ int i;
+ ddi_dma_cookie_t dma_cookie;
+
+ /* Allocate space for our shadow registers */
+ if (ql_alloc_shadow_space(qlge))
+ return (DDI_FAILURE);
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
+ cmn_err(CE_WARN, "RX resource allocation failed.");
+ goto err_mem;
+ }
+ }
+ /* Allocate tx queue resources */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
+ cmn_err(CE_WARN, "Tx resource allocation failed.");
+ goto err_mem;
+ }
+ }
+
+ if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
+ goto err_mem;
+ }
+
+ if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
+ __func__, qlge->instance);
+ goto err_mem;
+ }
+
+ qlge->sequence |= INIT_ALLOC_RX_BUF;
+
+ if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
+ &ql_desc_acc_attr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ &qlge->ricb_dma.acc_handle,
+ (size_t)sizeof (struct ricb), /* mem size */
+ (size_t)0, /* alignment:128 bytes boundary */
+ (caddr_t *)&qlge->ricb_dma.vaddr,
+ &dma_cookie) != 0) {
+ bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
+ cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
+
+ return (DDI_SUCCESS);
+
+err_mem:
+ ql_free_mem_resources(qlge);
+ return (DDI_FAILURE);
+}
+
+
+/*
+ * Function used to allocate physical memory and zero it.
+ */
+
+static int
+ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
+ ddi_device_acc_attr_t *device_acc_attr,
+ uint_t dma_flags,
+ ddi_acc_handle_t *acc_handle,
+ size_t size,
+ size_t alignment,
+ caddr_t *vaddr,
+ ddi_dma_cookie_t *dma_cookie)
+{
+ size_t rlen;
+ uint_t cnt;
+
+ /*
+ * Workaround for SUN XMITS buffer must end and start on 8 byte
+ * boundary. Else, hardware will overrun the buffer. Simple fix is
+ * to make sure buffer has enough room for overrun.
+ */
+ if (size & 7) {
+ size += 8 - (size & 7);
+ }
+
+ /* Adjust the alignment if requested */
+ if (alignment) {
+ dma_attr.dma_attr_align = alignment;
+ }
+
+ /*
+ * Allocate DMA handle
+ */
+ if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_SLEEP, NULL,
+ dma_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED",
+ __func__);
+ return (QL_ERROR);
+ }
+ /*
+ * Allocate DMA memory
+ */
+ if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
+ dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING), DDI_DMA_SLEEP,
+ NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
+ ddi_dma_free_handle(dma_handle);
+ }
+ if (vaddr == NULL) {
+ cmn_err(CE_WARN, "alloc_phys: Memory alloc Failed");
+ ddi_dma_free_handle(dma_handle);
+ return (QL_ERROR);
+ }
+
+ if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
+ dma_flags, DDI_DMA_SLEEP, NULL,
+ dma_cookie, &cnt) != DDI_DMA_MAPPED) {
+ ddi_dma_mem_free(acc_handle);
+
+ ddi_dma_free_handle(dma_handle);
+ cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
+ __func__);
+ return (QL_ERROR);
+ }
+
+ if (cnt != 1) {
+
+ ql_free_phys(dma_handle, acc_handle);
+
+ cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
+ __func__);
+ return (QL_ERROR);
+ }
+
+ bzero((caddr_t)*vaddr, rlen);
+
+ return (0);
+}
+
+/*
+ * Add interrupt handlers based on the interrupt type.
+ * Before adding the interrupt handlers, the interrupt vectors should
+ * have been allocated, and the rx/tx rings have also been allocated.
+ */
+static int
+ql_add_intr_handlers(qlge_t *qlge)
+{
+ int vector = 0;
+ int rc, i;
+ uint32_t value;
+ struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
+
+ switch (qlge->intr_type) {
+ case DDI_INTR_TYPE_MSIX:
+ /*
+ * Add interrupt handler for rx and tx rings: vector[0 -
+ * (qlge->intr_cnt -1)].
+ */
+ value = 0;
+ for (vector = 0; vector < qlge->intr_cnt; vector++) {
+ ql_atomic_set_32(&intr_ctx->irq_cnt, value);
+
+ /*
+ * associate interrupt vector with interrupt handler
+ */
+ rc = ddi_intr_add_handler(qlge->htable[vector],
+ (ddi_intr_handler_t *)intr_ctx->handler,
+ (void *)&qlge->rx_ring[vector], NULL);
+
+ if (rc != DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT,
+ ("Add rx interrupt handler failed. "
+ "return: %d, vector: %d", rc, vector));
+ for (vector--; vector >= 0; vector--) {
+ (void) ddi_intr_remove_handler(
+ qlge->htable[vector]);
+ }
+ return (DDI_FAILURE);
+ }
+ intr_ctx++;
+ }
+ break;
+
+ case DDI_INTR_TYPE_MSI:
+ /*
+ * Add interrupt handlers for the only vector
+ */
+ ql_atomic_set_32(&intr_ctx->irq_cnt, value);
+
+ rc = ddi_intr_add_handler(qlge->htable[vector],
+ ql_isr,
+ (caddr_t)&qlge->rx_ring[0], NULL);
+
+ if (rc != DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT,
+ ("Add MSI interrupt handler failed: %d\n", rc));
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case DDI_INTR_TYPE_FIXED:
+ /*
+ * Add interrupt handlers for the only vector
+ */
+ ql_atomic_set_32(&intr_ctx->irq_cnt, value);
+
+ rc = ddi_intr_add_handler(qlge->htable[vector],
+ ql_isr,
+ (caddr_t)&qlge->rx_ring[0], NULL);
+
+ if (rc != DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT,
+ ("Add legacy interrupt handler failed: %d\n", rc));
+ return (DDI_FAILURE);
+ }
+ break;
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ /* Enable interrupts */
+ /* Block enable */
+ if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
+ qlge->intr_cnt));
+ (void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
+ } else { /* Non block enable */
+ for (i = 0; i < qlge->intr_cnt; i++) {
+ QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d\n,"
+ "handle 0x%x\n", i, qlge->htable[i]));
+ (void) ddi_intr_enable(qlge->htable[i]);
+ }
+ }
+ qlge->sequence |= INIT_INTR_ENABLED;
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Here we build the intr_ctx structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_ctx structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void
+ql_resolve_queues_to_irqs(qlge_t *qlge)
+{
+ int i = 0;
+ struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
+
+ if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
+ /*
+ * Each rx_ring has its own intr_ctx since we
+ * have separate vectors for each queue.
+ * This only true when MSI-X is enabled.
+ */
+ for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
+ qlge->rx_ring[i].irq = i;
+ intr_ctx->intr = i;
+ intr_ctx->qlge = qlge;
+
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in critical path.
+ */
+ intr_ctx->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
+ INTR_EN_IHD | i;
+ intr_ctx->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+ INTR_EN_IHD | i;
+ intr_ctx->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
+ | i;
+
+ if (i == 0) {
+ /*
+ * Default queue handles bcast/mcast plus
+ * async events.
+ */
+ intr_ctx->handler = ql_isr;
+ } else if (qlge->rx_ring[i].type == TX_Q) {
+ /*
+ * Outbound queue is for outbound completions
+ * only.
+ */
+ intr_ctx->handler = ql_msix_tx_isr;
+ } else {
+ /*
+ * Inbound queues handle unicast frames only.
+ */
+ intr_ctx->handler = ql_msix_rx_isr;
+ }
+ }
+ } else {
+ /*
+ * All rx_rings use the same intr_ctx since
+ * there is only one vector.
+ */
+ intr_ctx->intr = 0;
+ intr_ctx->qlge = qlge;
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in the critical path.
+ */
+ intr_ctx->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_ENABLE;
+ intr_ctx->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE;
+ intr_ctx->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_READ;
+ /*
+ * Single interrupt means one handler for all rings.
+ */
+ intr_ctx->handler = ql_isr;
+ for (i = 0; i < qlge->rx_ring_count; i++)
+ qlge->rx_ring[i].irq = 0;
+ }
+}
+
+
+/*
+ * Free allocated interrupts.
+ */
+static void
+ql_free_irq_vectors(qlge_t *qlge)
+{
+ int i;
+ int rc;
+
+ if (qlge->sequence & INIT_INTR_ENABLED) {
+ /* Disable all interrupts */
+ if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_disable() */
+ (void) ddi_intr_block_disable(qlge->htable,
+ qlge->intr_cnt);
+ } else {
+ for (i = 0; i < qlge->intr_cnt; i++) {
+ (void) ddi_intr_disable(qlge->htable[i]);
+ }
+ }
+
+ qlge->sequence &= ~INIT_INTR_ENABLED;
+ }
+
+ for (i = 0; i < qlge->intr_cnt; i++) {
+
+ if (qlge->sequence & INIT_ADD_INTERRUPT)
+ (void) ddi_intr_remove_handler(qlge->htable[i]);
+
+ if (qlge->sequence & INIT_INTR_ALLOC) {
+ rc = ddi_intr_free(qlge->htable[i]);
+ if (rc != DDI_SUCCESS) {
+ /* EMPTY */
+ QL_PRINT(DBG_INIT, ("Free intr failed: %d",
+ rc));
+ }
+ }
+ }
+ if (qlge->sequence & INIT_INTR_ALLOC)
+ qlge->sequence &= ~INIT_INTR_ALLOC;
+
+ if (qlge->sequence & INIT_ADD_INTERRUPT)
+ qlge->sequence &= ~INIT_ADD_INTERRUPT;
+
+ if (qlge->htable) {
+ kmem_free(qlge->htable, qlge->intr_size);
+ qlge->htable = NULL;
+ }
+}
+
+/*
+ * Allocate interrupt vectors
+ * For legacy and MSI, only 1 handle is needed.
+ * For MSI-X, if fewer than 2 vectors are available, return failure.
+ * Upon success, this maps the vectors to rx and tx rings for
+ * interrupts.
+ */
+static int
+ql_request_irq_vectors(qlge_t *qlge, int intr_type)
+{
+ dev_info_t *devinfo;
+ uint32_t request, orig;
+ int count, avail, actual;
+ int minimum;
+ int rc;
+
+ devinfo = qlge->dip;
+
+ switch (intr_type) {
+ case DDI_INTR_TYPE_FIXED:
+ request = 1; /* Request 1 legacy interrupt handle */
+ minimum = 1;
+ QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
+ break;
+
+ case DDI_INTR_TYPE_MSI:
+ request = 1; /* Request 1 MSI interrupt handle */
+ minimum = 1;
+ QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
+ break;
+
+ case DDI_INTR_TYPE_MSIX:
+ /*
+ * Ideal number of vectors for the adapter is
+ * # rss rings + tx completion rings for default completion
+ * queue.
+ */
+ request = qlge->rx_ring_count;
+
+ orig = request;
+ if (request > (MAX_RX_RINGS))
+ request = MAX_RX_RINGS;
+ minimum = 2;
+ QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
+ break;
+
+ default:
+ QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
+ return (DDI_FAILURE);
+ }
+
+ QL_PRINT(DBG_INIT, ("interrupt handles requested: %d minimum: %d\n",
+ request, minimum));
+
+ /*
+ * Get number of supported interrupts
+ */
+ rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
+ if ((rc != DDI_SUCCESS) || (count < minimum)) {
+ QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
+ "count: %d\n", rc, count));
+ return (DDI_FAILURE);
+ }
+ QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
+
+ /*
+ * Get number of available interrupts
+ */
+ rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
+ if ((rc != DDI_SUCCESS) || (avail < minimum)) {
+ QL_PRINT(DBG_INIT,
+ ("Get interrupt available number failed. Return:"
+ " %d, available: %d\n", rc, avail));
+ return (DDI_FAILURE);
+ }
+ QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
+
+ if (avail < request) {
+ QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
+ request, avail));
+ request = avail;
+ }
+
+ actual = 0;
+ qlge->intr_cnt = 0;
+
+ /*
+ * Allocate an array of interrupt handles
+ */
+ qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
+ qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
+
+ rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
+ (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
+ if (rc != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
+ " %d, request: %d, actual: %d",
+ __func__, qlge->instance, rc, request, actual);
+ goto ql_intr_alloc_fail;
+ }
+ qlge->intr_cnt = actual;
+
+ qlge->sequence |= INIT_INTR_ALLOC;
+
+ /*
+ * If the actual number of vectors is less than the minumum
+ * then fail.
+ */
+ if (actual < minimum) {
+ cmn_err(CE_WARN,
+ "Insufficient interrupt handles available: %d", actual);
+ goto ql_intr_alloc_fail;
+ }
+
+ /*
+ * For MSI-X, actual might force us to reduce number of tx & rx rings
+ */
+ if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
+ if (actual < MAX_RX_RINGS) {
+ qlge->tx_ring_count = 1;
+ qlge->rss_ring_count = actual - 1;
+ qlge->rx_ring_count = qlge->tx_ring_count +
+ qlge->rss_ring_count;
+ }
+ }
+ /*
+ * Get priority for first vector, assume remaining are all the same
+ */
+ rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
+ if (rc != DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
+ goto ql_intr_alloc_fail;
+ }
+
+ rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
+ if (rc != DDI_SUCCESS) {
+ QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
+ goto ql_intr_alloc_fail;
+ }
+
+ qlge->intr_type = intr_type;
+
+ return (DDI_SUCCESS);
+
+ql_intr_alloc_fail:
+ ql_free_irq_vectors(qlge);
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
+ * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
+ * transmit queue.
+ */
+int
+ql_alloc_irqs(qlge_t *qlge)
+{
+ int intr_types;
+ int rval;
+
+ /*
+ * Get supported interrupt types
+ */
+ if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
+ __func__, qlge->instance);
+
+ return (DDI_FAILURE);
+ }
+
+ QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
+ __func__, qlge->instance, intr_types));
+
+ /* Install MSI-X interrupts */
+ if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
+ QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
+ __func__, qlge->instance, intr_types));
+ rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
+ if (rval == DDI_SUCCESS) {
+ return (rval);
+ }
+ QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
+ " trying MSI interrupts ...\n", __func__, qlge->instance));
+ }
+
+ /*
+ * We will have 2 completion queues in MSI / Legacy mode,
+ * Queue 0 for default completions
+ * Queue 1 for transmit completions
+ */
+ qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
+ qlge->tx_ring_count = 1; /* Single tx completion queue */
+ qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
+
+ QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
+ __func__, qlge->instance));
+ /*
+ * Add the h/w interrupt handler and initialise mutexes
+ */
+ rval = DDI_FAILURE;
+
+ /*
+ * If OS supports MSIX interrupt but fails to allocate, then try
+ * MSI interrupt. If MSI interrupt allocation fails also, then roll
+ * back to fixed interrupt.
+ */
+ if (intr_types & DDI_INTR_TYPE_MSI) {
+ rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
+ if (rval == DDI_SUCCESS) {
+ qlge->intr_type = DDI_INTR_TYPE_MSI;
+ QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
+ __func__, qlge->instance));
+ }
+ }
+
+ /* Try Fixed interrupt Legacy mode */
+ if (rval != DDI_SUCCESS) {
+ rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
+ "allocation failed",
+ __func__, qlge->instance);
+ } else {
+ qlge->intr_type = DDI_INTR_TYPE_FIXED;
+ QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
+ __func__, qlge->instance));
+ }
+ }
+
+ return (rval);
+}
+
+static void
+ql_free_rx_tx_locks(qlge_t *qlge)
+{
+ int i;
+ struct rx_ring *rx_ring;
+ struct tx_ring *tx_ring;
+
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ mutex_destroy(&tx_ring->tx_lock);
+ }
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ mutex_destroy(&rx_ring->rx_lock);
+ mutex_destroy(&rx_ring->sbq_lock);
+ mutex_destroy(&rx_ring->lbq_lock);
+ }
+}
+
+/*
+ * Frees all resources allocated during attach.
+ *
+ * Input:
+ * dip = pointer to device information structure.
+ * sequence = bits indicating resources to free.
+ *
+ * Context:
+ * Kernel context.
+ */
+static void
+ql_free_resources(dev_info_t *dip, qlge_t *qlge)
+{
+
+ /* Disable driver timer */
+ ql_stop_timer(qlge);
+
+ if (qlge->sequence & INIT_MAC_REGISTERED) {
+ mac_unregister(qlge->mh);
+ qlge->sequence &= ~INIT_MAC_REGISTERED;
+ }
+
+ if (qlge->sequence & INIT_MAC_ALLOC) {
+ /* Nothing to do, macp is already freed */
+ qlge->sequence &= ~INIT_MAC_ALLOC;
+ }
+
+ if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
+ pci_config_teardown(&qlge->pci_handle);
+ qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
+ }
+
+ if (qlge->sequence & INIT_ADD_INTERRUPT) {
+ ql_free_irq_vectors(qlge);
+ qlge->sequence &= ~INIT_ADD_INTERRUPT;
+ }
+
+ if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
+ (void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
+ (void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
+ (void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
+ qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
+ }
+
+ if (qlge->sequence & INIT_KSTATS) {
+ ql_fini_kstats(qlge);
+ qlge->sequence &= ~INIT_KSTATS;
+ }
+
+ if (qlge->sequence & INIT_MUTEX) {
+ mutex_destroy(&qlge->gen_mutex);
+ mutex_destroy(&qlge->hw_mutex);
+ mutex_destroy(&qlge->mbx_mutex);
+ cv_destroy(&qlge->cv_mbx_intr);
+ qlge->sequence &= ~INIT_MUTEX;
+ }
+
+ if (qlge->sequence & INIT_LOCKS_CREATED) {
+ ql_free_rx_tx_locks(qlge);
+ qlge->sequence &= ~INIT_LOCKS_CREATED;
+ }
+
+ if (qlge->sequence & INIT_MEMORY_ALLOC) {
+ ql_free_mem_resources(qlge);
+ qlge->sequence &= ~INIT_MEMORY_ALLOC;
+ }
+
+ if (qlge->sequence & INIT_REGS_SETUP) {
+ ddi_regs_map_free(&qlge->dev_handle);
+ qlge->sequence &= ~INIT_REGS_SETUP;
+ }
+
+ if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
+ ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
+ qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
+ }
+
+ /*
+ * free flash flt table that allocated in attach stage
+ */
+ if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
+ (qlge->flt.header.length != 0)) {
+ kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
+ qlge->flt.ql_flt_entry_ptr = NULL;
+ }
+
+ /* finally, free qlge structure */
+ if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
+ kmem_free(qlge, sizeof (qlge_t));
+ }
+
+ ddi_prop_remove_all(dip);
+ ddi_set_driver_private(dip, NULL);
+
+}
+
+/*
+ * Set promiscuous mode of the driver
+ * Caller must catch HW_LOCK
+ */
+void
+ql_set_promiscuous(qlge_t *qlge, int mode)
+{
+ if (mode) {
+ ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
+ RT_IDX_VALID, 1);
+ } else {
+ ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
+ RT_IDX_VALID, 0);
+ }
+}
+/*
+ * Write 'data1' to Mac Protocol Address Index Register and
+ * 'data2' to Mac Protocol Address Data Register
+ * Assuming that the Mac Protocol semaphore lock has been acquired.
+ */
+static int
+ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
+{
+ int return_value = DDI_SUCCESS;
+
+ if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
+ MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
+ "timeout.");
+ return_value = DDI_FAILURE;
+ goto out;
+ }
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
+out:
+ return (return_value);
+}
+/*
+ * Enable the 'index'ed multicast address in the host memory's multicast_list
+ */
+int
+ql_add_multicast_address(qlge_t *qlge, int index)
+{
+ int rtn_val = DDI_FAILURE;
+ uint32_t offset;
+ uint32_t value1, value2;
+
+ /* Acquire the required semaphore */
+ if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ /* Program Offset0 - lower 32 bits of the MAC address */
+ offset = 0;
+ value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
+ (index << 4) | offset;
+ value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
+ if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
+ goto out;
+
+ /* Program offset1: upper 16 bits of the MAC address */
+ offset = 1;
+ value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
+ (index<<4) | offset;
+ value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
+ |qlge->multicast_list[index].addr.ether_addr_octet[1]);
+ if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
+ goto out;
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+ ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
+ return (rtn_val);
+}
+
+/*
+ * Disable the 'index'ed multicast address in the host memory's multicast_list
+ */
+int
+ql_remove_multicast_address(qlge_t *qlge, int index)
+{
+ int rtn_val = DDI_FAILURE;
+ uint32_t offset;
+ uint32_t value1, value2;
+
+ /* Acquire the required semaphore */
+ if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+ /* Program Offset0 - lower 32 bits of the MAC address */
+ offset = 0;
+ value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
+ value2 =
+ ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
+ |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
+ if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
+ goto out;
+ }
+ /* Program offset1: upper 16 bits of the MAC address */
+ offset = 1;
+ value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
+ value2 = 0;
+ if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
+ goto out;
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+ ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
+ return (rtn_val);
+}
+
+/*
+ * Add a new multicast address to the list of supported list
+ * This API is called after OS called gld_set_multicast (GLDv2)
+ * or m_multicst (GLDv3)
+ *
+ * Restriction:
+ * The number of maximum multicast address is limited by hardware.
+ */
+int
+ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
+{
+ uint32_t index = qlge->multicast_list_count;
+ int rval = DDI_SUCCESS;
+ int status;
+
+ if ((ep[0] & 01) == 0) {
+ rval = EINVAL;
+ goto exit;
+ }
+
+ /* if there is an availabe space in multicast_list, then add it */
+ if (index < MAX_MULTICAST_LIST_SIZE) {
+ bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
+ ETHERADDRL);
+ /* increment the total number of addresses in multicast list */
+ ql_add_multicast_address(qlge, index);
+ qlge->multicast_list_count++;
+ QL_PRINT(DBG_GLD,
+ ("%s(%d): added to index of multicast list= 0x%x, "
+ "total %d\n", __func__, qlge->instance, index,
+ qlge->multicast_list_count));
+
+ if (index > MAX_MULTICAST_HW_SIZE) {
+ if (!qlge->multicast_promisc) {
+ status = ql_set_routing_reg(qlge,
+ RT_IDX_ALLMULTI_SLOT,
+ RT_IDX_MCAST, 1);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing reg "
+ "for mcast promisc mode.");
+ rval = ENOENT;
+ goto exit;
+ }
+ qlge->multicast_promisc = B_TRUE;
+ }
+ }
+ } else {
+ rval = ENOENT;
+ }
+exit:
+ return (rval);
+}
+
+/*
+ * Remove an old multicast address from the list of supported multicast
+ * addresses. This API is called after OS called gld_set_multicast (GLDv2)
+ * or m_multicst (GLDv3)
+ * The number of maximum multicast address is limited by hardware.
+ */
+int
+ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
+{
+ uint32_t total = qlge->multicast_list_count;
+ int i = 0;
+ int rmv_index = 0;
+ size_t length = sizeof (ql_multicast_addr);
+ int status;
+
+ for (i = 0; i < total; i++) {
+ if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
+ continue;
+ }
+
+ rmv_index = i;
+ /* block move the reset of other multicast address forward */
+ length = ((total -1) -i) * sizeof (ql_multicast_addr);
+ if (length > 0) {
+ bcopy(&qlge->multicast_list[i+1],
+ &qlge->multicast_list[i], length);
+ }
+ qlge->multicast_list_count--;
+ if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
+ /*
+ * there is a deletion in multicast list table,
+ * re-enable them
+ */
+ for (i = rmv_index; i < qlge->multicast_list_count;
+ i++) {
+ ql_add_multicast_address(qlge, i);
+ }
+ /* and disable the last one */
+ ql_remove_multicast_address(qlge, i);
+
+ /* disable multicast promiscuous mode */
+ if (qlge->multicast_promisc) {
+ status = ql_set_routing_reg(qlge,
+ RT_IDX_ALLMULTI_SLOT,
+ RT_IDX_MCAST, 0);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing reg for "
+ "mcast promisc mode.");
+ goto exit;
+ }
+ /* write to config register */
+ qlge->multicast_promisc = B_FALSE;
+ }
+ }
+ break;
+ }
+exit:
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Read a XGMAC register
+ */
+int
+ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
+{
+ int rtn_val = DDI_FAILURE;
+
+ /* wait for XGMAC Address register RDY bit set */
+ if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
+ BIT_SET, 10) != DDI_SUCCESS) {
+ goto out;
+ }
+ /* start rx transaction */
+ ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
+
+ /*
+ * wait for XGMAC Address register RDY bit set,
+ * which indicates data is ready
+ */
+ if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
+ BIT_SET, 10) != DDI_SUCCESS) {
+ goto out;
+ }
+ /* read data from XGAMC_DATA register */
+ *val = ql_read_reg(qlge, REG_XGMAC_DATA);
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * Implement checksum offload for IPv4 IP packets
+ */
+static void
+ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
+ struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+ struct ip *iphdr = NULL;
+ struct ether_header *ethhdr;
+ struct ether_vlan_header *ethvhdr;
+ struct tcphdr *tcp_hdr;
+ uint32_t etherType;
+ int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
+ int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
+
+ ethhdr = (struct ether_header *)((void *)bp);
+ ethvhdr = (struct ether_vlan_header *)((void *)bp);
+ /* Is this vlan packet? */
+ if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
+ mac_hdr_len = sizeof (struct ether_vlan_header);
+ etherType = ntohs(ethvhdr->ether_type);
+ } else {
+ mac_hdr_len = sizeof (struct ether_header);
+ etherType = ntohs(ethhdr->ether_type);
+ }
+ /* Is this IPv4 or IPv6 packet? */
+ if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
+ IPV4_VERSION) {
+ if (etherType == ETHERTYPE_IP /* 0800 */) {
+ iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_TX,
+ ("%s(%d) : IPv4 None IP packet type 0x%x\n",
+ __func__, qlge->instance, etherType));
+ }
+ }
+ /* ipV4 packets */
+ if (iphdr != NULL) {
+
+ ip_hdr_len = IPH_HDR_LENGTH(iphdr);
+ QL_PRINT(DBG_TX,
+ ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
+ " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
+
+ ip_hdr_off = mac_hdr_len;
+ QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
+ __func__, qlge->instance, ip_hdr_len));
+
+ mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
+ OB_MAC_IOCB_REQ_IPv4);
+
+ if (pflags & HCK_IPV4_HDRCKSUM) {
+ QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
+ __func__, qlge->instance));
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
+ mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
+ OB_MAC_IOCB_REQ_IC);
+ iphdr->ip_sum = 0;
+ mac_iocb_ptr->hdr_off = (uint16_t)
+ cpu_to_le16(ip_hdr_off);
+ }
+ if (pflags & HCK_FULLCKSUM) {
+ if (iphdr->ip_p == IPPROTO_TCP) {
+ tcp_hdr =
+ (struct tcphdr *)(void *)
+ ((uint8_t *)(void *)iphdr + ip_hdr_len);
+ QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
+ __func__, qlge->instance));
+ mac_iocb_ptr->opcode =
+ OPCODE_OB_MAC_OFFLOAD_IOCB;
+ mac_iocb_ptr->flag1 =
+ (uint8_t)(mac_iocb_ptr->flag1 |
+ OB_MAC_IOCB_REQ_TC);
+ mac_iocb_ptr->flag2 =
+ (uint8_t)(mac_iocb_ptr->flag2 |
+ OB_MAC_IOCB_REQ_IC);
+ iphdr->ip_sum = 0;
+ tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
+ tcp_udp_hdr_len = tcp_hdr->th_off*4;
+ QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
+ __func__, qlge->instance, tcp_udp_hdr_len));
+ hdr_off = ip_hdr_off;
+ tcp_udp_hdr_off <<= 6;
+ hdr_off |= tcp_udp_hdr_off;
+ mac_iocb_ptr->hdr_off =
+ (uint16_t)cpu_to_le16(hdr_off);
+ mac_iocb_ptr->protocol_hdr_len = (uint16_t)
+ cpu_to_le16(mac_hdr_len + ip_hdr_len +
+ tcp_udp_hdr_len);
+
+ /*
+ * if the chip is unable to do pseudo header
+ * cksum calculation, do it in then put the
+ * result to the data passed to the chip
+ */
+ if (qlge->cfg_flags &
+ CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
+ ql_pseudo_cksum((uint8_t *)iphdr);
+ }
+ } else if (iphdr->ip_p == IPPROTO_UDP) {
+ QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
+ __func__, qlge->instance));
+ mac_iocb_ptr->opcode =
+ OPCODE_OB_MAC_OFFLOAD_IOCB;
+ mac_iocb_ptr->flag1 =
+ (uint8_t)(mac_iocb_ptr->flag1 |
+ OB_MAC_IOCB_REQ_UC);
+ mac_iocb_ptr->flag2 =
+ (uint8_t)(mac_iocb_ptr->flag2 |
+ OB_MAC_IOCB_REQ_IC);
+ iphdr->ip_sum = 0;
+ tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
+ tcp_udp_hdr_len = sizeof (struct udphdr);
+ QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
+ __func__, qlge->instance, tcp_udp_hdr_len));
+ hdr_off = ip_hdr_off;
+ tcp_udp_hdr_off <<= 6;
+ hdr_off |= tcp_udp_hdr_off;
+ mac_iocb_ptr->hdr_off =
+ (uint16_t)cpu_to_le16(hdr_off);
+ mac_iocb_ptr->protocol_hdr_len = (uint16_t)
+ cpu_to_le16(mac_hdr_len + ip_hdr_len
+ + tcp_udp_hdr_len);
+
+ /*
+ * if the chip is unable to calculate pseudo
+ * hdr cksum,do it in then put the result to
+ * the data passed to the chip
+ */
+ if (qlge->cfg_flags &
+ CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
+ ql_pseudo_cksum((uint8_t *)iphdr);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * For TSO/LSO:
+ * MAC frame transmission with TCP large segment offload is performed in the
+ * same way as the MAC frame transmission with checksum offload with the
+ * exception that the maximum TCP segment size (MSS) must be specified to
+ * allow the chip to segment the data into legal sized frames.
+ * The host also needs to calculate a pseudo-header checksum over the
+ * following fields:
+ * Source IP Address, Destination IP Address, and the Protocol.
+ * The TCP length is not included in the pseudo-header calculation.
+ * The pseudo-header checksum is place in the TCP checksum field of the
+ * prototype header.
+ */
+static void
+ql_lso_pseudo_cksum(uint8_t *buf)
+{
+ uint32_t cksum;
+ uint16_t iphl;
+ uint16_t proto;
+
+ /*
+ * Calculate the LSO pseudo-header checksum.
+ */
+ iphl = (uint16_t)(4 * (buf[0] & 0xF));
+ cksum = proto = buf[9];
+ cksum += (((uint16_t)buf[12])<<8) + buf[13];
+ cksum += (((uint16_t)buf[14])<<8) + buf[15];
+ cksum += (((uint16_t)buf[16])<<8) + buf[17];
+ cksum += (((uint16_t)buf[18])<<8) + buf[19];
+ cksum = (cksum>>16) + (cksum & 0xFFFF);
+ cksum = (cksum>>16) + (cksum & 0xFFFF);
+
+ /*
+ * Point it to the TCP/UDP header, and
+ * update the checksum field.
+ */
+ buf += iphl + ((proto == IPPROTO_TCP) ?
+ TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
+
+ *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
+}
+
+/*
+ * Tell the hardware to do Large Send Offload (LSO)
+ *
+ * Some fields in ob_mac_iocb need to be set so hardware can know what is
+ * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
+ * in the right place of the packet etc, thus, hardware can process the
+ * packet correctly.
+ */
+static void
+ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
+ struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+ struct ip *iphdr = NULL;
+ struct ether_header *ethhdr;
+ struct ether_vlan_header *ethvhdr;
+ struct tcphdr *tcp_hdr;
+ struct udphdr *udp_hdr;
+ uint32_t etherType;
+ uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
+ uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
+
+ ethhdr = (struct ether_header *)(void *)bp;
+ ethvhdr = (struct ether_vlan_header *)(void *)bp;
+
+ /* Is this vlan packet? */
+ if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
+ mac_hdr_len = sizeof (struct ether_vlan_header);
+ etherType = ntohs(ethvhdr->ether_type);
+ } else {
+ mac_hdr_len = sizeof (struct ether_header);
+ etherType = ntohs(ethhdr->ether_type);
+ }
+ /* Is this IPv4 or IPv6 packet? */
+ if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
+ IPV4_VERSION) {
+ if (etherType == ETHERTYPE_IP /* 0800 */) {
+ iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
+ " type 0x%x\n",
+ __func__, qlge->instance, etherType));
+ }
+ }
+
+ if (iphdr != NULL) { /* ipV4 packets */
+ ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
+ QL_PRINT(DBG_TX,
+ ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
+ " bytes \n", __func__, qlge->instance, ip_hdr_len));
+
+ ip_hdr_off = mac_hdr_len;
+ QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
+ __func__, qlge->instance, ip_hdr_len));
+
+ mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
+ OB_MAC_IOCB_REQ_IPv4);
+ if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
+ if (iphdr->ip_p == IPPROTO_TCP) {
+ tcp_hdr = (struct tcphdr *)(void *)
+ ((uint8_t *)(void *)iphdr +
+ ip_hdr_len);
+ QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
+ "packet\n",
+ __func__, qlge->instance));
+ mac_iocb_ptr->opcode =
+ OPCODE_OB_MAC_OFFLOAD_IOCB;
+ mac_iocb_ptr->flag1 =
+ (uint8_t)(mac_iocb_ptr->flag1 |
+ OB_MAC_IOCB_REQ_LSO);
+ iphdr->ip_sum = 0;
+ tcp_udp_hdr_off =
+ (uint16_t)(mac_hdr_len+ip_hdr_len);
+ tcp_udp_hdr_len =
+ (uint16_t)(tcp_hdr->th_off*4);
+ QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
+ __func__, qlge->instance, tcp_udp_hdr_len));
+ hdr_off = ip_hdr_off;
+ tcp_udp_hdr_off <<= 6;
+ hdr_off |= tcp_udp_hdr_off;
+ mac_iocb_ptr->hdr_off =
+ (uint16_t)cpu_to_le16(hdr_off);
+ mac_iocb_ptr->protocol_hdr_len = (uint16_t)
+ cpu_to_le16(mac_hdr_len + ip_hdr_len +
+ tcp_udp_hdr_len);
+ mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
+
+ /*
+ * if the chip is unable to calculate pseudo
+ * header checksum, do it in then put the result
+ * to the data passed to the chip
+ */
+ if (qlge->cfg_flags &
+ CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
+ ql_lso_pseudo_cksum((uint8_t *)iphdr);
+ } else if (iphdr->ip_p == IPPROTO_UDP) {
+ udp_hdr = (struct udphdr *)(void *)
+ ((uint8_t *)(void *)iphdr
+ + ip_hdr_len);
+ QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
+ "packet\n",
+ __func__, qlge->instance));
+ mac_iocb_ptr->opcode =
+ OPCODE_OB_MAC_OFFLOAD_IOCB;
+ mac_iocb_ptr->flag1 =
+ (uint8_t)(mac_iocb_ptr->flag1 |
+ OB_MAC_IOCB_REQ_LSO);
+ iphdr->ip_sum = 0;
+ tcp_udp_hdr_off =
+ (uint16_t)(mac_hdr_len+ip_hdr_len);
+ tcp_udp_hdr_len =
+ (uint16_t)(udp_hdr->uh_ulen*4);
+ QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
+ __func__, qlge->instance, tcp_udp_hdr_len));
+ hdr_off = ip_hdr_off;
+ tcp_udp_hdr_off <<= 6;
+ hdr_off |= tcp_udp_hdr_off;
+ mac_iocb_ptr->hdr_off =
+ (uint16_t)cpu_to_le16(hdr_off);
+ mac_iocb_ptr->protocol_hdr_len = (uint16_t)
+ cpu_to_le16(mac_hdr_len + ip_hdr_len +
+ tcp_udp_hdr_len);
+ mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
+
+ /*
+ * if the chip is unable to do pseudo header
+ * checksum calculation, do it here then put the
+ * result to the data passed to the chip
+ */
+ if (qlge->cfg_flags &
+ CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
+ ql_lso_pseudo_cksum((uint8_t *)iphdr);
+ }
+ }
+ }
+}
+
+/*
+ * Generic packet sending function which is used to send one packet.
+ */
+int
+ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
+{
+ struct tx_ring_desc *tx_cb;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+ mblk_t *tp;
+ size_t msg_len = 0;
+ size_t off;
+ caddr_t bp;
+ size_t nbyte, total_len;
+ uint_t i = 0;
+ int j = 0, frags = 0;
+ uint32_t phy_addr_low, phy_addr_high;
+ uint64_t phys_addr;
+ clock_t now;
+ uint32_t pflags = 0;
+ uint32_t mss = 0;
+ enum tx_mode_t tx_mode;
+ struct oal_entry *oal_entry;
+ int status;
+ uint_t ncookies, oal_entries, max_oal_entries;
+ size_t max_seg_len = 0;
+ boolean_t use_lso = B_FALSE;
+ struct oal_entry *tx_entry = NULL;
+ struct oal_entry *last_oal_entry;
+ qlge_t *qlge = tx_ring->qlge;
+ ddi_dma_cookie_t dma_cookie;
+ size_t tx_buf_len = QL_MAX_COPY_LENGTH;
+ int force_pullup = 0;
+
+ tp = mp;
+ total_len = msg_len = 0;
+ max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
+
+ /* Calculate number of data and segments in the incoming message */
+ for (tp = mp; tp != NULL; tp = tp->b_cont) {
+ nbyte = MBLKL(tp);
+ total_len += nbyte;
+ max_seg_len = max(nbyte, max_seg_len);
+ QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
+ "total length: %d\n", frags, nbyte));
+ frags++;
+ }
+
+ if (total_len >= QL_LSO_MAX) {
+ freemsg(mp);
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
+ __func__, (int)total_len);
+#endif
+ return (NULL);
+ }
+
+ bp = (caddr_t)mp->b_rptr;
+ if (bp[0] & 1) {
+ if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
+ ETHERADDRL) == 0) {
+ QL_PRINT(DBG_TX, ("Broadcast packet\n"));
+ tx_ring->brdcstxmt++;
+ } else {
+ QL_PRINT(DBG_TX, ("multicast packet\n"));
+ tx_ring->multixmt++;
+ }
+ }
+
+ tx_ring->obytes += total_len;
+ tx_ring->opackets ++;
+
+ QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
+ " max seg len: %d\n", total_len, frags, max_seg_len));
+
+ /* claim a free slot in tx ring */
+ tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
+
+ /* get the tx descriptor */
+ mac_iocb_ptr = tx_cb->queue_entry;
+
+ bzero((void *)mac_iocb_ptr, sizeof (*mac_iocb_ptr));
+
+ ASSERT(tx_cb->mp == NULL);
+
+ /*
+ * Decide to use DMA map or copy mode.
+ * DMA map mode must be used when the total msg length is more than the
+ * tx buffer length.
+ */
+
+ if (total_len > tx_buf_len)
+ tx_mode = USE_DMA;
+ else if (max_seg_len > QL_MAX_COPY_LENGTH)
+ tx_mode = USE_DMA;
+ else
+ tx_mode = USE_COPY;
+
+ if (qlge->chksum_cap) {
+ hcksum_retrieve(mp, NULL, NULL, NULL,
+ NULL, NULL, NULL, &pflags);
+ QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
+ "is 0x%x \n", pflags, qlge->chksum_cap));
+ if (qlge->lso_enable) {
+ uint32_t lso_flags = 0;
+ lso_info_get(mp, &mss, &lso_flags);
+ use_lso = (lso_flags == HW_LSO);
+ }
+ QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
+ mss, use_lso));
+ }
+
+do_pullup:
+
+ /* concatenate all frags into one large packet if too fragmented */
+ if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
+ force_pullup) {
+ mblk_t *mp1;
+ if ((mp1 = msgpullup(mp, -1)) != NULL) {
+ freemsg(mp);
+ mp = mp1;
+ frags = 1;
+ } else {
+ tx_ring->tx_fail_dma_bind++;
+ goto bad;
+ }
+ }
+
+ tx_cb->tx_bytes = (uint32_t)total_len;
+ tx_cb->mp = mp;
+ tx_cb->tx_dma_handle_used = 0;
+
+ if (tx_mode == USE_DMA) {
+ msg_len = total_len;
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+ mac_iocb_ptr->tid = tx_ring->prod_idx;
+ mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
+ mac_iocb_ptr->txq_idx = tx_ring->wq_id;
+
+ tx_entry = &mac_iocb_ptr->oal_entry[0];
+ oal_entry = NULL;
+
+ for (tp = mp, oal_entries = j = 0; tp != NULL;
+ tp = tp->b_cont) {
+ /* if too many tx dma handles needed */
+ if (j >= QL_MAX_TX_DMA_HANDLES) {
+ tx_ring->tx_no_dma_handle++;
+ if (!force_pullup) {
+ force_pullup = 1;
+ goto do_pullup;
+ } else {
+ goto bad;
+ }
+ }
+ nbyte = (uint16_t)MBLKL(tp);
+ if (nbyte == 0)
+ continue;
+
+ status = ddi_dma_addr_bind_handle(
+ tx_cb->tx_dma_handle[j], NULL,
+ (caddr_t)tp->b_rptr, nbyte,
+ DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
+ 0, &dma_cookie, &ncookies);
+
+ QL_PRINT(DBG_TX, ("map sending data segment: %d, "
+ "length: %d, spans in %d cookies\n",
+ j, nbyte, ncookies));
+
+ if (status != DDI_DMA_MAPPED) {
+ goto bad;
+ }
+ /*
+ * Each fragment can span several cookies. One cookie
+ * will use one tx descriptor to transmit.
+ */
+ for (i = ncookies; i > 0; i--, tx_entry++,
+ oal_entries++) {
+ /*
+ * The number of TX descriptors that can be
+ * saved in tx iocb and oal list is limited
+ */
+ if (oal_entries > max_oal_entries) {
+ tx_ring->tx_no_dma_cookie++;
+ if (!force_pullup) {
+ force_pullup = 1;
+ goto do_pullup;
+ } else {
+ goto bad;
+ }
+ }
+
+ if ((oal_entries == TX_DESC_PER_IOCB) &&
+ !oal_entry) {
+ /*
+ * Time to switch to an oal list
+ * The last entry should be copied
+ * to first entry in the oal list
+ */
+ oal_entry = tx_cb->oal;
+ tx_entry =
+ &mac_iocb_ptr->oal_entry[
+ TX_DESC_PER_IOCB-1];
+ bcopy(tx_entry, oal_entry,
+ sizeof (*oal_entry));
+
+ /*
+ * last entry should be updated to
+ * point to the extended oal list itself
+ */
+ tx_entry->buf_addr_low =
+ cpu_to_le32(
+ LS_64BITS(tx_cb->oal_dma_addr));
+ tx_entry->buf_addr_high =
+ cpu_to_le32(
+ MS_64BITS(tx_cb->oal_dma_addr));
+ /*
+ * Point tx_entry to the oal list
+ * second entry
+ */
+ tx_entry = &oal_entry[1];
+ }
+
+ tx_entry->buf_len =
+ (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
+ phys_addr = dma_cookie.dmac_laddress;
+ tx_entry->buf_addr_low =
+ cpu_to_le32(LS_64BITS(phys_addr));
+ tx_entry->buf_addr_high =
+ cpu_to_le32(MS_64BITS(phys_addr));
+
+ last_oal_entry = tx_entry;
+
+ if (i > 1)
+ ddi_dma_nextcookie(
+ tx_cb->tx_dma_handle[j],
+ &dma_cookie);
+ }
+ j++;
+ }
+ /*
+ * if OAL is used, the last oal entry in tx iocb indicates
+ * number of additional address/len pairs in OAL
+ */
+ if (oal_entries > TX_DESC_PER_IOCB) {
+ tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
+ tx_entry->buf_len = (uint32_t)
+ (cpu_to_le32((sizeof (struct oal_entry) *
+ (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
+ }
+ last_oal_entry->buf_len = cpu_to_le32(
+ le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
+
+ tx_cb->tx_dma_handle_used = j;
+ QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
+ j, oal_entries));
+
+ bp = (caddr_t)mp->b_rptr;
+ }
+ if (tx_mode == USE_COPY) {
+ bp = tx_cb->copy_buffer;
+ off = 0;
+ nbyte = 0;
+ frags = 0;
+ /*
+ * Copy up to tx_buf_len of the transmit data
+ * from mp to tx buffer
+ */
+ for (tp = mp; tp != NULL; tp = tp->b_cont) {
+ nbyte = MBLKL(tp);
+ if ((off + nbyte) <= tx_buf_len) {
+ bcopy(tp->b_rptr, &bp[off], nbyte);
+ off += nbyte;
+ frags ++;
+ }
+ }
+
+ msg_len = off;
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+ mac_iocb_ptr->tid = tx_ring->prod_idx;
+ mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
+ mac_iocb_ptr->txq_idx = tx_ring->wq_id;
+
+ QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
+ "from %d segaments\n", msg_len, frags));
+
+ phys_addr = tx_cb->copy_buffer_dma_addr;
+ phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
+ phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
+
+ QL_DUMP(DBG_TX, "\t requested sending data:\n",
+ (uint8_t *)tx_cb->copy_buffer, 8, total_len);
+
+ mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
+ cpu_to_le32(msg_len | OAL_LAST_ENTRY);
+ mac_iocb_ptr->oal_entry[0].buf_addr_low = phy_addr_low;
+ mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
+
+ freemsg(mp); /* no need, we have copied */
+ tx_cb->mp = NULL;
+ } /* End of Copy Mode */
+
+ /* Do TSO/LSO on TCP packet? */
+ if (use_lso && mss) {
+ ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
+ } else if (pflags & qlge->chksum_cap) {
+ /* Do checksum offloading */
+ ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
+ }
+
+ /* let device know the latest outbound IOCB */
+ (void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
+ (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
+ (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
+
+ if (tx_mode == USE_DMA) {
+ /* let device know the latest outbound OAL if necessary */
+ if (oal_entries > TX_DESC_PER_IOCB) {
+ (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
+ (off_t)0,
+ (sizeof (struct oal_entry) *
+ (oal_entries -TX_DESC_PER_IOCB+1)),
+ DDI_DMA_SYNC_FORDEV);
+ }
+ } else { /* for USE_COPY mode, tx buffer has changed */
+ /* let device know the latest change */
+ (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
+ /* copy buf offset */
+ (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
+ msg_len, DDI_DMA_SYNC_FORDEV);
+ }
+
+ /* save how the packet was sent */
+ tx_cb->tx_type = tx_mode;
+
+ QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
+ /* reduce the number of available tx slot */
+ atomic_dec_32(&tx_ring->tx_free_count);
+
+ tx_ring->prod_idx++;
+ if (tx_ring->prod_idx >= tx_ring->wq_len)
+ tx_ring->prod_idx = 0;
+
+ now = ddi_get_lbolt();
+ qlge->last_tx_time = now;
+
+ return (DDI_SUCCESS);
+
+bad:
+ /*
+ * if for any reason driver can not send, delete
+ * the message pointer, mp
+ */
+ now = ddi_get_lbolt();
+ freemsg(mp);
+ mp = NULL;
+ for (i = 0; i < j; i++)
+ (void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
+
+ QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
+ __func__, qlge->instance, (int)now));
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * Initializes hardware and driver software flags before the driver
+ * is finally ready to work.
+ */
+int
+ql_do_start(qlge_t *qlge)
+{
+ int i;
+ struct rx_ring *rx_ring;
+ uint16_t lbq_buf_size;
+ int rings_done;
+
+ ASSERT(qlge != NULL);
+
+ mutex_enter(&qlge->hw_mutex);
+
+ /* Reset adapter */
+ ql_asic_reset(qlge);
+
+ lbq_buf_size = (uint16_t)
+ ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
+ if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
+ qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
+#endif
+ /*
+ * Check if any ring has buffers still with upper layers
+ * If buffers are pending with upper layers, we use the
+ * existing buffers and don't reallocate new ones
+ * Unfortunately there is no way to evict buffers from
+ * upper layers. Using buffers with the current size may
+ * cause slightly sub-optimal performance, but that seems
+ * to be the easiest way to handle this situation.
+ */
+ rings_done = 0;
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ if (rx_ring->rx_indicate == 0)
+ rings_done++;
+ else
+ break;
+ }
+ /*
+ * No buffers pending with upper layers;
+ * reallocte them for new MTU size
+ */
+ if (rings_done >= qlge->rx_ring_count) {
+ /* free large buffer pool */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ if (rx_ring->type != TX_Q) {
+ ql_free_sbq_buffers(rx_ring);
+ ql_free_lbq_buffers(rx_ring);
+ }
+ }
+ /* reallocate large buffer pool */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ if (rx_ring->type != TX_Q) {
+ ql_alloc_sbufs(qlge, rx_ring);
+ ql_alloc_lbufs(qlge, rx_ring);
+ }
+ }
+ }
+ }
+
+ if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "qlge bringup adapter failed");
+ ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
+ mutex_exit(&qlge->hw_mutex);
+ return (DDI_FAILURE);
+ }
+
+ mutex_exit(&qlge->hw_mutex);
+
+ /* Get current link state */
+ qlge->port_link_state = ql_get_link_state(qlge);
+
+ if (qlge->port_link_state == LS_UP) {
+ QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
+ __func__, qlge->instance));
+ /* If driver detects a carrier on */
+ CARRIER_ON(qlge);
+ } else {
+ QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
+ __func__, qlge->instance));
+ /* If driver detects a lack of carrier */
+ CARRIER_OFF(qlge);
+ }
+ qlge->mac_flags = QL_MAC_STARTED;
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Stop currently running driver
+ * Driver needs to stop routing new packets to driver and wait until
+ * all pending tx/rx buffers to be free-ed.
+ */
+int
+ql_do_stop(qlge_t *qlge)
+{
+ int rc = DDI_FAILURE;
+ uint32_t i, j, k;
+ struct bq_desc *sbq_desc, *lbq_desc;
+ struct rx_ring *rx_ring;
+
+ ASSERT(qlge != NULL);
+
+ CARRIER_OFF(qlge);
+
+ rc = ql_bringdown_adapter(qlge);
+ if (rc != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "qlge bringdown adapter failed.");
+ } else
+ rc = DDI_SUCCESS;
+
+ for (k = 0; k < qlge->rx_ring_count; k++) {
+ rx_ring = &qlge->rx_ring[k];
+ if (rx_ring->type != TX_Q) {
+ j = rx_ring->lbq_use_head;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
+ " to free list %d\n total %d\n",
+ k, rx_ring->lbuf_in_use_count,
+ rx_ring->lbuf_free_count,
+ rx_ring->lbuf_in_use_count +
+ rx_ring->lbuf_free_count);
+#endif
+ for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
+ lbq_desc = rx_ring->lbuf_in_use[j];
+ j++;
+ if (j >= rx_ring->lbq_len) {
+ j = 0;
+ }
+ if (lbq_desc->mp) {
+ atomic_inc_32(&rx_ring->rx_indicate);
+ freemsg(lbq_desc->mp);
+ }
+ }
+ rx_ring->lbq_use_head = j;
+ rx_ring->lbq_use_tail = j;
+ rx_ring->lbuf_in_use_count = 0;
+ j = rx_ring->sbq_use_head;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
+ " to free list %d\n total %d \n",
+ k, rx_ring->sbuf_in_use_count,
+ rx_ring->sbuf_free_count,
+ rx_ring->sbuf_in_use_count +
+ rx_ring->sbuf_free_count);
+#endif
+ for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
+ sbq_desc = rx_ring->sbuf_in_use[j];
+ j++;
+ if (j >= rx_ring->sbq_len) {
+ j = 0;
+ }
+ if (sbq_desc->mp) {
+ atomic_inc_32(&rx_ring->rx_indicate);
+ freemsg(sbq_desc->mp);
+ }
+ }
+ rx_ring->sbq_use_head = j;
+ rx_ring->sbq_use_tail = j;
+ rx_ring->sbuf_in_use_count = 0;
+ }
+ }
+
+ qlge->mac_flags = QL_MAC_STOPPED;
+
+ return (rc);
+}
+
+/*
+ * Support
+ */
+
+void
+ql_disable_isr(qlge_t *qlge)
+{
+ /*
+ * disable the hardware interrupt
+ */
+ ISP_DISABLE_GLOBAL_INTRS(qlge);
+
+ qlge->flags &= ~INTERRUPTS_ENABLED;
+}
+
+
+
+/*
+ * busy wait for 'usecs' microseconds.
+ */
+void
+qlge_delay(clock_t usecs)
+{
+ drv_usecwait(usecs);
+}
+
+/*
+ * retrieve firmware details.
+ */
+
+pci_cfg_t *
+ql_get_pci_config(qlge_t *qlge)
+{
+ return (&(qlge->pci_cfg));
+}
+
+/*
+ * Get current Link status
+ */
+static uint32_t
+ql_get_link_state(qlge_t *qlge)
+{
+ uint32_t bitToCheck = 0;
+ uint32_t temp, linkState;
+
+ if (qlge->func_number == qlge->fn0_net) {
+ bitToCheck = STS_PL0;
+ } else {
+ bitToCheck = STS_PL1;
+ }
+ temp = ql_read_reg(qlge, REG_STATUS);
+ QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
+ __func__, qlge->instance, temp));
+
+ if (temp & bitToCheck) {
+ linkState = LS_UP;
+ } else {
+ linkState = LS_DOWN;
+ }
+ if (CFG_IST(qlge, CFG_CHIP_8100)) {
+ /* for Schultz, link Speed is fixed to 10G, full duplex */
+ qlge->speed = SPEED_10G;
+ qlge->duplex = 1;
+ }
+ return (linkState);
+}
+/*
+ * Get current link status and report to OS
+ */
+static void
+ql_get_and_report_link_state(qlge_t *qlge)
+{
+ uint32_t cur_link_state;
+
+ /* Get current link state */
+ cur_link_state = ql_get_link_state(qlge);
+ /* if link state has changed */
+ if (cur_link_state != qlge->port_link_state) {
+
+ qlge->port_link_state = cur_link_state;
+
+ if (qlge->port_link_state == LS_UP) {
+ QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
+ __func__, qlge->instance));
+ /* If driver detects a carrier on */
+ CARRIER_ON(qlge);
+ } else {
+ QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
+ __func__, qlge->instance));
+ /* If driver detects a lack of carrier */
+ CARRIER_OFF(qlge);
+ }
+ }
+}
+
+/*
+ * timer callback function executed after timer expires
+ */
+static void
+ql_timer(void* arg)
+{
+ ql_get_and_report_link_state((qlge_t *)arg);
+}
+
+/*
+ * stop the running timer if activated
+ */
+static void
+ql_stop_timer(qlge_t *qlge)
+{
+ timeout_id_t timer_id;
+ /* Disable driver timer */
+ if (qlge->ql_timer_timeout_id != NULL) {
+ timer_id = qlge->ql_timer_timeout_id;
+ qlge->ql_timer_timeout_id = NULL;
+ (void) untimeout(timer_id);
+ }
+}
+
+/*
+ * stop then restart timer
+ */
+void
+ql_restart_timer(qlge_t *qlge)
+{
+ ql_stop_timer(qlge);
+ qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
+ qlge->ql_timer_timeout_id = timeout(ql_timer,
+ (void *)qlge, qlge->ql_timer_ticks);
+}
+
+/* ************************************************************************* */
+/*
+ * Hardware K-Stats Data Structures and Subroutines
+ */
+/* ************************************************************************* */
+static const ql_ksindex_t ql_kstats_hw[] = {
+ /* PCI related hardware information */
+ { 0, "Vendor Id" },
+ { 1, "Device Id" },
+ { 2, "Command" },
+ { 3, "Status" },
+ { 4, "Revision Id" },
+ { 5, "Cache Line Size" },
+ { 6, "Latency Timer" },
+ { 7, "Header Type" },
+ { 9, "I/O base addr" },
+ { 10, "Control Reg Base addr low" },
+ { 11, "Control Reg Base addr high" },
+ { 12, "Doorbell Reg Base addr low" },
+ { 13, "Doorbell Reg Base addr high" },
+ { 14, "Subsystem Vendor Id" },
+ { 15, "Subsystem Device ID" },
+ { 16, "PCIe Device Control" },
+ { 17, "PCIe Link Status" },
+
+ { -1, NULL },
+};
+
+/*
+ * kstat update function for PCI registers
+ */
+static int
+ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
+{
+ qlge_t *qlge;
+ kstat_named_t *knp;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ qlge = ksp->ks_private;
+ knp = ksp->ks_data;
+ (knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
+ (knp++)->value.ui32 = qlge->pci_cfg.device_id;
+ (knp++)->value.ui32 = qlge->pci_cfg.command;
+ (knp++)->value.ui32 = qlge->pci_cfg.status;
+ (knp++)->value.ui32 = qlge->pci_cfg.revision;
+ (knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
+ (knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
+ (knp++)->value.ui32 = qlge->pci_cfg.header_type;
+ (knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
+ (knp++)->value.ui32 =
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
+ (knp++)->value.ui32 =
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
+ (knp++)->value.ui32 =
+ qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
+ (knp++)->value.ui32 =
+ qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
+ (knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
+ (knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
+ (knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
+ (knp++)->value.ui32 = qlge->pci_cfg.link_status;
+
+ return (0);
+}
+
+static const ql_ksindex_t ql_kstats_mii[] = {
+ /* MAC/MII related hardware information */
+ { 0, "mtu"},
+
+ { -1, NULL},
+};
+
+
+/*
+ * kstat update function for MII related information.
+ */
+static int
+ql_kstats_mii_update(kstat_t *ksp, int flag)
+{
+ qlge_t *qlge;
+ kstat_named_t *knp;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ qlge = ksp->ks_private;
+ knp = ksp->ks_data;
+
+ (knp++)->value.ui32 = qlge->mtu;
+
+ return (0);
+}
+
+static const ql_ksindex_t ql_kstats_reg[] = {
+ /* Register information */
+ { 0, "System (0x08)" },
+ { 1, "Reset/Fail Over(0x0Ch" },
+ { 2, "Function Specific Control(0x10)" },
+ { 3, "Status (0x30)" },
+ { 4, "Intr Enable (0x34)" },
+ { 5, "Intr Status1 (0x3C)" },
+ { 6, "Error Status (0x54)" },
+ { 7, "XGMAC Flow Control(0x11C)" },
+ { 8, "XGMAC Tx Pause Frames(0x230)" },
+ { 9, "XGMAC Rx Pause Frames(0x388)" },
+ { 10, "XGMAC Rx FIFO Drop Count(0x5B8)" },
+ { 11, "interrupts actually allocated" },
+ { 12, "interrupts on rx ring 0" },
+ { 13, "interrupts on rx ring 1" },
+ { 14, "interrupts on rx ring 2" },
+ { 15, "interrupts on rx ring 3" },
+ { 16, "interrupts on rx ring 4" },
+ { 17, "interrupts on rx ring 5" },
+ { 18, "interrupts on rx ring 6" },
+ { 19, "interrupts on rx ring 7" },
+ { 20, "polls on rx ring 0" },
+ { 21, "polls on rx ring 1" },
+ { 22, "polls on rx ring 2" },
+ { 23, "polls on rx ring 3" },
+ { 24, "polls on rx ring 4" },
+ { 25, "polls on rx ring 5" },
+ { 26, "polls on rx ring 6" },
+ { 27, "polls on rx ring 7" },
+ { 28, "tx no resource on ring 0" },
+ { 29, "tx dma bind fail on ring 0" },
+ { 30, "tx dma no handle on ring 0" },
+ { 31, "tx dma no cookie on ring 0" },
+ { 32, "MPI firmware major version"},
+ { 33, "MPI firmware minor version"},
+ { 34, "MPI firmware sub version"},
+
+ { -1, NULL},
+};
+
+
+/*
+ * kstat update function for device register set
+ */
+static int
+ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
+{
+ qlge_t *qlge;
+ kstat_named_t *knp;
+ uint32_t val32;
+ int i = 0;
+ struct tx_ring *tx_ring;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ qlge = ksp->ks_private;
+ knp = ksp->ks_data;
+
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
+ (knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
+
+ if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
+ return (0);
+ }
+ ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
+ (knp++)->value.ui32 = val32;
+
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
+ (knp++)->value.ui32 = val32;
+
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
+ (knp++)->value.ui32 = val32;
+
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
+ (knp++)->value.ui32 = val32;
+
+ ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
+
+ (knp++)->value.ui32 = qlge->intr_cnt;
+
+ for (i = 0; i < 8; i++) {
+ (knp++)->value.ui32 = qlge->rx_interrupts[i];
+ }
+
+ for (i = 0; i < 8; i++) {
+ (knp++)->value.ui32 = qlge->rx_polls[i];
+ }
+
+ tx_ring = &qlge->tx_ring[0];
+ (knp++)->value.ui32 = tx_ring->defer;
+ (knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
+ (knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
+ (knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
+
+ (knp++)->value.ui32 = qlge->fw_version_info.major_version;
+ (knp++)->value.ui32 = qlge->fw_version_info.minor_version;
+ (knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
+
+ return (0);
+}
+
+
+static kstat_t *
+ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
+ const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
+{
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char *np;
+ int type;
+
+ size /= sizeof (ql_ksindex_t);
+ ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
+ KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
+ if (ksp == NULL)
+ return (NULL);
+
+ ksp->ks_private = qlge;
+ ksp->ks_update = update;
+ for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
+ switch (*np) {
+ default:
+ type = KSTAT_DATA_UINT32;
+ break;
+ case '&':
+ np += 1;
+ type = KSTAT_DATA_CHAR;
+ break;
+ }
+ kstat_named_init(knp, np, (uint8_t)type);
+ }
+ kstat_install(ksp);
+
+ return (ksp);
+}
+
+/*
+ * Setup various kstat
+ */
+int
+ql_init_kstats(qlge_t *qlge)
+{
+ /* Hardware KStats */
+ qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
+ qlge->instance, "chip", ql_kstats_hw,
+ sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
+ if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ /* MII KStats */
+ qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
+ qlge->instance, "mii", ql_kstats_mii,
+ sizeof (ql_kstats_mii), ql_kstats_mii_update);
+ if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ /* REG KStats */
+ qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
+ qlge->instance, "reg", ql_kstats_reg,
+ sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
+ if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * delete all kstat
+ */
+void
+ql_fini_kstats(qlge_t *qlge)
+{
+ int i;
+
+ for (i = 0; i < QL_KSTAT_COUNT; i++) {
+ if (qlge->ql_kstats[i] != NULL)
+ kstat_delete(qlge->ql_kstats[i]);
+ }
+}
+
+/* ************************************************************************* */
+/*
+ * kstat end
+ */
+/* ************************************************************************* */
+
+/*
+ * Setup the parameters for receive and transmit rings including buffer sizes
+ * and completion queue sizes
+ */
+static int
+ql_setup_rings(qlge_t *qlge)
+{
+ uint8_t i;
+ struct rx_ring *rx_ring;
+ struct tx_ring *tx_ring;
+ uint16_t lbq_buf_size;
+
+ lbq_buf_size = (uint16_t)
+ ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE);
+
+ /*
+ * rx_ring[0] is always the default queue.
+ */
+ /*
+ * qlge->rx_ring_count:
+ * Total number of rx_rings. This includes a number
+ * of outbound completion handler rx_rings, and a
+ * number of inbound completion handler rx_rings.
+ * rss is only enabled if we have more than 1 rx completion
+ * queue. If we have a single rx completion queue
+ * then all rx completions go to this queue and
+ * the last completion queue
+ */
+
+ qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
+
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ bzero((void *)tx_ring, sizeof (*tx_ring));
+ tx_ring->qlge = qlge;
+ tx_ring->wq_id = i;
+ tx_ring->wq_len = qlge->tx_ring_size;
+ tx_ring->wq_size = (uint32_t)(
+ tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
+
+ /*
+ * The completion queue ID for the tx rings start
+ * immediately after the last rss completion queue.
+ */
+ tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
+ }
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ bzero((void *)rx_ring, sizeof (*rx_ring));
+ rx_ring->qlge = qlge;
+ rx_ring->cq_id = i;
+ if (i != 0)
+ rx_ring->cpu = (i) % qlge->rx_ring_count;
+ else
+ rx_ring->cpu = 0;
+
+ if (i < qlge->rss_ring_count) {
+ /*
+ * Inbound completions (RSS) queues
+ * Default queue is queue 0 which handles
+ * unicast plus bcast/mcast and async events.
+ * Other inbound queues handle unicast frames only.
+ */
+ rx_ring->cq_len = qlge->rx_ring_size;
+ rx_ring->cq_size = (uint32_t)
+ (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
+ rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+ rx_ring->lbq_size = (uint32_t)
+ (rx_ring->lbq_len * sizeof (uint64_t));
+ rx_ring->lbq_buf_size = lbq_buf_size;
+ rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+ rx_ring->sbq_size = (uint32_t)
+ (rx_ring->sbq_len * sizeof (uint64_t));
+ rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+ rx_ring->type = RX_Q;
+
+ QL_PRINT(DBG_GLD,
+ ("%s(%d)Allocating rss completion queue %d "
+ "on cpu %d\n", __func__, qlge->instance,
+ rx_ring->cq_id, rx_ring->cpu));
+ } else {
+ /*
+ * Outbound queue handles outbound completions only
+ */
+ /* outbound cq is same size as tx_ring it services. */
+ rx_ring->cq_len = qlge->tx_ring_size;
+ rx_ring->cq_size = (uint32_t)
+ (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
+ rx_ring->lbq_len = 0;
+ rx_ring->lbq_size = 0;
+ rx_ring->lbq_buf_size = 0;
+ rx_ring->sbq_len = 0;
+ rx_ring->sbq_size = 0;
+ rx_ring->sbq_buf_size = 0;
+ rx_ring->type = TX_Q;
+
+ QL_PRINT(DBG_GLD,
+ ("%s(%d)Allocating TX completion queue %d on"
+ " cpu %d\n", __func__, qlge->instance,
+ rx_ring->cq_id, rx_ring->cpu));
+ }
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static int
+ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
+ void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
+ (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
+ /* first shadow area is used by wqicb's host copy of consumer index */
+ + sizeof (uint64_t);
+ uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
+ (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
+ + sizeof (uint64_t);
+ /* lrg/sml bufq pointers */
+ uint8_t *buf_q_base_reg =
+ (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
+ (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
+ uint64_t buf_q_base_reg_dma =
+ qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
+ (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
+ caddr_t doorbell_area =
+ qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
+ int err = 0;
+ uint16_t bq_len;
+ uint64_t tmp;
+ uint64_t *base_indirect_ptr;
+ int page_entries;
+
+ /* Set up the shadow registers for this ring. */
+ rx_ring->prod_idx_sh_reg = shadow_reg;
+ rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+
+ rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
+ rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
+
+ QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
+ " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
+ rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
+
+ buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
+ buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
+ rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
+ rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
+
+ /* PCI doorbell mem area + 0x00 for consumer index register */
+ rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
+ rx_ring->cnsmr_idx = 0;
+ *rx_ring->prod_idx_sh_reg = 0;
+ rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
+
+ /* PCI doorbell mem area + 0x04 for valid register */
+ rx_ring->valid_db_reg = (uint32_t *)(void *)
+ ((uint8_t *)(void *)doorbell_area + 0x04);
+
+ /* PCI doorbell mem area + 0x18 for large buffer consumer */
+ rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
+ ((uint8_t *)(void *)doorbell_area + 0x18);
+
+ /* PCI doorbell mem area + 0x1c */
+ rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
+ ((uint8_t *)(void *)doorbell_area + 0x1c);
+
+ bzero((void *)cqicb, sizeof (*cqicb));
+
+ cqicb->msix_vect = (uint8_t)rx_ring->irq;
+
+ bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
+ (uint16_t)0 : (uint16_t)rx_ring->cq_len);
+ cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+
+ cqicb->cq_base_addr_lo =
+ cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
+ cqicb->cq_base_addr_hi =
+ cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
+
+ cqicb->prod_idx_addr_lo =
+ cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
+ cqicb->prod_idx_addr_hi =
+ cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
+
+ /*
+ * Set up the control block load flags.
+ */
+ cqicb->flags = FLAGS_LC | /* Load queue base address */
+ FLAGS_LV | /* Load MSI-X vector */
+ FLAGS_LI; /* Load irq delay values */
+ if (rx_ring->lbq_len) {
+ /* Load lbq values */
+ cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
+ tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
+ base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += VM_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < (int)(
+ ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
+
+ cqicb->lbq_addr_lo =
+ cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
+ cqicb->lbq_addr_hi =
+ cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
+ bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
+ (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
+ cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
+ bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
+ (uint16_t)rx_ring->lbq_len);
+ cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
+ rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_curr_idx = 0;
+ }
+ if (rx_ring->sbq_len) {
+ /* Load sbq values */
+ cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
+ tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
+ base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
+ page_entries = 0;
+
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += VM_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < (uint32_t)
+ (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
+
+ cqicb->sbq_addr_lo =
+ cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
+ cqicb->sbq_addr_hi =
+ cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
+ cqicb->sbq_buf_size = (uint16_t)
+ cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
+ bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
+ (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
+ cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
+ rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_curr_idx = 0;
+ }
+ switch (rx_ring->type) {
+ case TX_Q:
+ cqicb->irq_delay = (uint16_t)
+ cpu_to_le16(qlge->tx_coalesce_usecs);
+ cqicb->pkt_delay = (uint16_t)
+ cpu_to_le16(qlge->tx_max_coalesced_frames);
+ break;
+
+ case DEFAULT_Q:
+ cqicb->irq_delay = 0;
+ cqicb->pkt_delay = 0;
+ break;
+
+ case RX_Q:
+ /*
+ * Inbound completion handling rx_rings run in
+ * separate NAPI contexts.
+ */
+ cqicb->irq_delay = (uint16_t)
+ cpu_to_le16(qlge->rx_coalesce_usecs);
+ cqicb->pkt_delay = (uint16_t)
+ cpu_to_le16(qlge->rx_max_coalesced_frames);
+ break;
+ default:
+ cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
+ rx_ring->type);
+ }
+ QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
+ rx_ring->cq_id));
+ /* QL_DUMP_CQICB(qlge, cqicb); */
+ err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
+ rx_ring->cq_id);
+ if (err) {
+ cmn_err(CE_WARN, "Failed to load CQICB.");
+ return (err);
+ }
+
+ rx_ring->rx_packets_dropped_no_buffer = 0;
+ rx_ring->rx_pkt_dropped_mac_unenabled = 0;
+ rx_ring->rx_failed_sbq_allocs = 0;
+ rx_ring->rx_failed_lbq_allocs = 0;
+ rx_ring->rx_packets = 0;
+ rx_ring->rx_bytes = 0;
+ rx_ring->frame_too_long = 0;
+ rx_ring->frame_too_short = 0;
+ rx_ring->fcs_err = 0;
+
+ return (err);
+}
+
+/*
+ * start RSS
+ */
+static int
+ql_start_rss(qlge_t *qlge)
+{
+ struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
+ int status = 0;
+ int i;
+ uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
+
+ bzero((void *)ricb, sizeof (*ricb));
+
+ ricb->base_cq = RSS_L4K;
+ ricb->flags =
+ (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
+ RSS_RT6);
+ ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
+
+ /*
+ * Fill out the Indirection Table.
+ */
+ for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
+ hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
+
+ (void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
+ (void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
+
+ QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
+
+ status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
+ if (status) {
+ cmn_err(CE_WARN, "Failed to load RICB.");
+ return (status);
+ }
+
+ return (status);
+}
+
+/*
+ * load a tx ring control block to hw and start this ring
+ */
+static int
+ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
+{
+ struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
+ caddr_t doorbell_area =
+ qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
+ void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
+ (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
+ uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
+ (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
+ int err = 0;
+
+ /*
+ * Assign doorbell registers for this tx_ring.
+ */
+
+ /* TX PCI doorbell mem area for tx producer index */
+ tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
+ tx_ring->prod_idx = 0;
+ /* TX PCI doorbell mem area + 0x04 */
+ tx_ring->valid_db_reg = (uint32_t *)(void *)
+ ((uint8_t *)(void *)doorbell_area + 0x04);
+
+ /*
+ * Assign shadow registers for this tx_ring.
+ */
+ tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+ tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+ *tx_ring->cnsmr_idx_sh_reg = 0;
+
+ QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
+ " phys_addr 0x%lx\n",
+ __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
+ tx_ring->cnsmr_idx_sh_reg_dma));
+
+ wqicb->len =
+ (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+ wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+ Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+ wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
+ wqicb->rid = 0;
+ wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
+ wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
+ wqicb->cnsmr_idx_addr_lo =
+ cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
+ wqicb->cnsmr_idx_addr_hi =
+ cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
+
+ ql_init_tx_ring(tx_ring);
+ /* QL_DUMP_WQICB(qlge, wqicb); */
+ err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
+ tx_ring->wq_id);
+
+ if (err) {
+ cmn_err(CE_WARN, "Failed to load WQICB.");
+ return (err);
+ }
+ return (err);
+}
+
+/*
+ * Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+int
+ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
+ uint16_t index)
+{
+ uint32_t offset = 0;
+ int status = DDI_SUCCESS;
+
+ switch (type) {
+ case MAC_ADDR_TYPE_MULTI_MAC:
+ case MAC_ADDR_TYPE_CAM_MAC: {
+ uint32_t cam_output;
+ uint32_t upper = (addr[0] << 8) | addr[1];
+ uint32_t lower =
+ (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ (addr[5]);
+
+ QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
+ MAC_ADDR_TYPE_MULTI_MAC) ?
+ "MULTICAST" : "UNICAST"));
+ QL_PRINT(DBG_INIT,
+ ("addr %02x %02x %02x %02x %02x %02x at index %d in "
+ "the CAM.\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], index));
+
+ status = ql_wait_reg_rdy(qlge,
+ REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ /* offset 0 - lower 32 bits of the MAC address */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
+ (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
+ status = ql_wait_reg_rdy(qlge,
+ REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ /* offset 1 - upper 16 bits of the MAC address */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
+ (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
+ status = ql_wait_reg_rdy(qlge,
+ REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ /* offset 2 - CQ ID associated with this MAC address */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
+ (offset) | (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ /*
+ * This field should also include the queue id
+ * and possibly the function id. Right now we hardcode
+ * the route field to NIC core.
+ */
+ if (type == MAC_ADDR_TYPE_CAM_MAC) {
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
+ (0 <<
+ CAM_OUT_CQ_ID_SHIFT));
+
+ /* route to NIC core */
+ ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
+ cam_output);
+ }
+ break;
+ }
+ default:
+ cmn_err(CE_WARN,
+ "Address type %d not yet supported.", type);
+ status = DDI_FAILURE;
+ }
+exit:
+ return (status);
+}
+
+/*
+ * The NIC function for this chip has 16 routing indexes. Each one can be used
+ * to route different frame types to various inbound queues. We send broadcast
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int
+ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
+{
+ int status;
+ uint32_t value = 0;
+
+ QL_PRINT(DBG_INIT,
+ ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
+ (enable ? "Adding" : "Removing"),
+ ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
+ ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
+ ((index ==
+ RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
+ ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
+ ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
+ ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
+ ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
+ ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
+ ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
+ ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
+ ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
+ ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
+ ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
+ ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
+ ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
+ ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
+ (enable ? "to" : "from")));
+
+ switch (mask) {
+ case RT_IDX_CAM_HIT:
+ value = RT_IDX_DST_CAM_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_VALID: /* Promiscuous Mode frames. */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_MCAST: /* Pass up All Multicast frames. */
+ value = RT_IDX_DST_CAM_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
+ value = RT_IDX_DST_CAM_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
+ value = RT_IDX_DST_RSS | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ case 0: /* Clear the E-bit on an entry. */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (index << RT_IDX_IDX_SHIFT); /* index */
+ break;
+
+ default:
+ cmn_err(CE_WARN, "Mask type %d not yet supported.",
+ mask);
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (value != 0) {
+ status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
+ if (status)
+ goto exit;
+ value |= (enable ? RT_IDX_E : 0);
+ ql_write_reg(qlge, REG_ROUTING_INDEX, value);
+ ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
+ }
+
+exit:
+ return (status);
+}
+
+/*
+ * Clear all the entries in the routing table.
+ * Caller must get semaphore in advance.
+ */
+
+static int
+ql_stop_routing(qlge_t *qlge)
+{
+ int status = 0;
+ int i;
+ /* Clear all the entries in the routing table. */
+ for (i = 0; i < 16; i++) {
+ status = ql_set_routing_reg(qlge, i, 0, 0);
+ if (status) {
+ cmn_err(CE_WARN, "Stop routing failed. ");
+ }
+ }
+ return (status);
+}
+
+/* Initialize the frame-to-queue routing. */
+static int
+ql_route_initialize(qlge_t *qlge)
+{
+ int status = 0;
+
+ status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
+ if (status != DDI_SUCCESS)
+ return (status);
+
+ /* Clear all the entries in the routing table. */
+ status = ql_stop_routing(qlge);
+ if (status) {
+ goto exit;
+ }
+ status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing register for broadcast packets.");
+ goto exit;
+ }
+ /*
+ * If we have more than one inbound queue, then turn on RSS in the
+ * routing block.
+ */
+ if (qlge->rss_ring_count > 1) {
+ status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
+ RT_IDX_RSS_MATCH, 1);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing register for MATCH RSS "
+ "packets.");
+ goto exit;
+ }
+ }
+
+ status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
+ RT_IDX_CAM_HIT, 1);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing register for CAM packets.");
+ goto exit;
+ }
+
+ status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
+ RT_IDX_MCAST_MATCH, 1);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init routing register for Multicast "
+ "packets.");
+ }
+
+exit:
+ ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
+ return (status);
+}
+
+/*
+ * Initialize hardware
+ */
+static int
+ql_device_initialize(qlge_t *qlge)
+{
+ uint32_t value, mask, required_max_frame_size;
+ int i;
+ int status = 0;
+ uint16_t pause = PAUSE_MODE_DISABLED;
+ boolean_t update_port_config = B_FALSE;
+ /*
+ * Set up the System register to halt on errors.
+ */
+ value = SYS_EFE | SYS_FAE;
+ mask = value << 16;
+ ql_write_reg(qlge, REG_SYSTEM, mask | value);
+
+ /* Set the default queue. */
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+
+ ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
+
+ /* Enable the MPI interrupt. */
+ ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
+ | INTR_MASK_PI);
+ /* Enable the function, set pagesize, enable error checking. */
+ value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+ FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
+ /* Set/clear header splitting. */
+ if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
+ value |= FSC_SH;
+ ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
+ }
+ mask = FSC_VM_PAGESIZE_MASK |
+ FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+ ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
+ /*
+ * check current port max frame size, if different from OS setting,
+ * then we need to change
+ */
+ required_max_frame_size =
+ (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
+
+ if (ql_get_port_cfg(qlge) == DDI_SUCCESS) {
+ /* if correct frame size but different from required size */
+ if (qlge->port_cfg_info.max_frame_size !=
+ required_max_frame_size) {
+ QL_PRINT(DBG_MBX,
+ ("update frame size, current %d, new %d\n",
+ qlge->port_cfg_info.max_frame_size,
+ required_max_frame_size));
+ qlge->port_cfg_info.max_frame_size =
+ required_max_frame_size;
+ update_port_config = B_TRUE;
+ }
+ if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
+ pause = PAUSE_MODE_STANDARD;
+ else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
+ pause = PAUSE_MODE_PER_PRIORITY;
+ if (pause != qlge->pause) {
+ update_port_config = B_TRUE;
+ }
+ /*
+ * Always update port config for now to work around
+ * a hardware bug
+ */
+ update_port_config = B_TRUE;
+
+ /* if need to update port configuration */
+ if (update_port_config)
+ ql_set_port_cfg(qlge);
+ } else
+ cmn_err(CE_WARN, "ql_get_port_cfg failed");
+
+ /* Start up the rx queues. */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to start rx ring[%d]", i);
+ return (status);
+ }
+ }
+
+ /*
+ * If there is more than one inbound completion queue
+ * then download a RICB to configure RSS.
+ */
+ if (qlge->rss_ring_count > 1) {
+ status = ql_start_rss(qlge);
+ if (status) {
+ cmn_err(CE_WARN, "Failed to start RSS.");
+ return (status);
+ }
+ }
+
+ /* Start up the tx queues. */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to start tx ring[%d]", i);
+ return (status);
+ }
+ }
+ qlge->selected_tx_ring = 0;
+ /* Set the frame routing filter. */
+ status = ql_route_initialize(qlge);
+ if (status) {
+ cmn_err(CE_WARN,
+ "Failed to init CAM/Routing tables.");
+ return (status);
+ }
+
+ return (status);
+}
+
+/*
+ * Issue soft reset to chip.
+ */
+static int
+ql_asic_reset(qlge_t *qlge)
+{
+ uint32_t value;
+ int max_wait_time = 3;
+ int status = DDI_SUCCESS;
+
+ ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
+ |FUNCTION_RESET);
+
+ max_wait_time = 3;
+ do {
+ value = ql_read_reg(qlge, REG_RESET_FAILOVER);
+ if ((value & FUNCTION_RESET) == 0)
+ break;
+ qlge_delay(QL_ONE_SEC_DELAY);
+ } while ((--max_wait_time));
+
+ if (max_wait_time == 0) {
+ cmn_err(CE_WARN,
+ "TIMEOUT!!! errored out of resetting the chip!");
+ status = DDI_FAILURE;
+ }
+
+ return (status);
+}
+
+/*
+ * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
+ * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
+ * to be used by hardware.
+ */
+static void
+ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc;
+ int i;
+ uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
+ uint32_t arm_count;
+
+ if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
+ arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
+ else {
+ /* Adjust to a multiple of 16 */
+ arm_count = (rx_ring->sbuf_free_count / 16) * 16;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
+#endif
+ }
+ for (i = 0; i < arm_count; i++) {
+ sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
+ if (sbq_desc == NULL)
+ break;
+ /* Arm asic */
+ *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
+ sbq_entry++;
+
+ /* link the descriptors to in_use_list */
+ ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
+ rx_ring->sbq_prod_idx++;
+ }
+ ql_update_sbq_prod_idx(qlge, rx_ring);
+}
+
+/*
+ * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
+ * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
+ * to be used by hardware.
+ */
+static void
+ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+ int i;
+ uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
+ uint32_t arm_count;
+
+ if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
+ arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
+ else {
+ /* Adjust to a multiple of 16 */
+ arm_count = (rx_ring->lbuf_free_count / 16) * 16;
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
+#endif
+ }
+ for (i = 0; i < arm_count; i++) {
+ lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
+ if (lbq_desc == NULL)
+ break;
+ /* Arm asic */
+ *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
+ lbq_entry++;
+
+ /* link the descriptors to in_use_list */
+ ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
+ rx_ring->lbq_prod_idx++;
+ }
+ ql_update_lbq_prod_idx(qlge, rx_ring);
+}
+
+
+/*
+ * Initializes the adapter by configuring request and response queues,
+ * allocates and ARMs small and large receive buffers to the
+ * hardware
+ */
+static int
+ql_bringup_adapter(qlge_t *qlge)
+{
+ int i;
+
+ if (ql_device_initialize(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
+ __func__, qlge->instance);
+ goto err_bringup;
+ }
+ qlge->sequence |= INIT_ADAPTER_UP;
+
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].type != TX_Q) {
+ qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
+ qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
+ }
+ qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
+ }
+#endif
+ /* Arm buffers */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].type != TX_Q) {
+ ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
+ ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
+ }
+ }
+
+ /* Enable work/request queues */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ if (qlge->tx_ring[i].valid_db_reg)
+ ql_write_doorbell_reg(qlge,
+ qlge->tx_ring[i].valid_db_reg,
+ REQ_Q_VALID);
+ }
+
+ /* Enable completion queues */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].valid_db_reg)
+ ql_write_doorbell_reg(qlge,
+ qlge->rx_ring[i].valid_db_reg,
+ RSP_Q_VALID);
+ }
+
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ mutex_enter(&qlge->tx_ring[i].tx_lock);
+ qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
+ mutex_exit(&qlge->tx_ring[i].tx_lock);
+ }
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ mutex_enter(&qlge->rx_ring[i].rx_lock);
+ qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
+ mutex_exit(&qlge->rx_ring[i].rx_lock);
+ }
+
+ /* This mutex will get re-acquired in enable_completion interrupt */
+ mutex_exit(&qlge->hw_mutex);
+ /* Traffic can start flowing now */
+ ql_enable_all_completion_interrupts(qlge);
+ mutex_enter(&qlge->hw_mutex);
+
+ ql_enable_global_interrupt(qlge);
+
+ qlge->sequence |= ADAPTER_INIT;
+ return (DDI_SUCCESS);
+
+err_bringup:
+ ql_asic_reset(qlge);
+ return (DDI_FAILURE);
+}
+
+/*
+ * Initialize mutexes of each rx/tx rings
+ */
+static int
+ql_init_rx_tx_locks(qlge_t *qlge)
+{
+ struct tx_ring *tx_ring;
+ struct rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ }
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * ql_attach - Driver attach.
+ */
+static int
+ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance;
+ qlge_t *qlge;
+ int rval;
+ uint16_t w;
+ mac_register_t *macp = NULL;
+ rval = DDI_FAILURE;
+
+ /* first get the instance */
+ instance = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ /*
+ * Check that hardware is installed in a DMA-capable slot
+ */
+ if (ddi_slaveonly(dip) == DDI_SUCCESS) {
+ cmn_err(CE_WARN, "?%s(%d): Not installed in a "
+ "DMA-capable slot", ADAPTER_NAME, instance);
+ break;
+ }
+
+ /*
+ * No support for high-level interrupts
+ */
+ if (ddi_intr_hilevel(dip, 0) != 0) {
+ cmn_err(CE_WARN, "?%s(%d): No support for high-level"
+ " intrs", ADAPTER_NAME, instance);
+ break;
+ }
+
+ /*
+ * Allocate our per-device-instance structure
+ */
+
+ qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
+ ASSERT(qlge != NULL);
+
+ qlge->sequence |= INIT_SOFTSTATE_ALLOC;
+
+ qlge->dip = dip;
+ qlge->instance = instance;
+
+ /*
+ * Setup the ISP8x00 registers address mapping to be
+ * accessed by this particular driver.
+ * 0x0 Configuration Space
+ * 0x1 I/O Space
+ * 0x2 1st Memory Space address - Control Register Set
+ * 0x3 2nd Memory Space address - Doorbell Memory Space
+ */
+
+ w = 2;
+ if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
+ sizeof (dev_reg_t), &ql_dev_acc_attr,
+ &qlge->dev_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): Unable to map device "
+ "registers", ADAPTER_NAME, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
+ qlge->iobase));
+
+ qlge->sequence |= INIT_REGS_SETUP;
+
+ /* map Doorbell memory space */
+ w = 3;
+ if (ddi_regs_map_setup(dip, w,
+ (caddr_t *)&qlge->doorbell_reg_iobase, 0,
+ 0x100000 /* sizeof (dev_doorbell_reg_t) */,
+ &ql_dev_acc_attr,
+ &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
+ "registers",
+ ADAPTER_NAME, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
+ qlge->doorbell_reg_iobase));
+
+ qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
+
+ /*
+ * Allocate a macinfo structure for this instance
+ */
+ if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
+ cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
+ __func__, instance);
+ ql_free_resources(dip, qlge);
+ return (NULL);
+ }
+ /* save adapter status to dip private data */
+ ddi_set_driver_private(dip, qlge);
+ QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
+ ADAPTER_NAME, instance));
+
+ qlge->sequence |= INIT_MAC_ALLOC;
+
+ /*
+ * Attach this instance of the device
+ */
+ /* Setup PCI Local Bus Configuration resource. */
+ if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
+ ADAPTER_NAME, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ qlge->sequence |= INIT_PCI_CONFIG_SETUP;
+
+ if (ql_init_instance(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
+ "instance", ADAPTER_NAME, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ /* Setup interrupt vectors */
+ if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
+ ql_free_resources(dip, qlge);
+ break;
+ }
+ qlge->sequence |= INIT_INTR_ALLOC;
+
+ /* Configure queues */
+ if (ql_setup_rings(qlge) != DDI_SUCCESS) {
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ qlge->sequence |= INIT_SETUP_RINGS;
+ /*
+ * Map queues to interrupt vectors
+ */
+ ql_resolve_queues_to_irqs(qlge);
+ /*
+ * Add interrupt handlers
+ */
+ if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Failed to add interrupt "
+ "handlers");
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ qlge->sequence |= INIT_ADD_INTERRUPT;
+ QL_PRINT(DBG_GLD, ("%s(%d): Add interrupt handler done\n",
+ ADAPTER_NAME, instance));
+
+ /* Initialize mutex, need the interrupt priority */
+ ql_init_rx_tx_locks(qlge);
+
+ qlge->sequence |= INIT_LOCKS_CREATED;
+
+ /*
+ * Use a soft interrupt to do something that we do not want
+ * to do in regular network functions or with mutexs being held
+ */
+ if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
+ DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
+ != DDI_SUCCESS) {
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
+ DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
+ != DDI_SUCCESS) {
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
+ DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
+ != DDI_SUCCESS) {
+ ql_free_resources(dip, qlge);
+ break;
+ }
+
+ qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
+
+ /*
+ * mutex to protect the adapter state structure.
+ * initialize mutexes according to the interrupt priority
+ */
+ mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+ mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(qlge->intr_pri));
+
+ /* Mailbox wait and interrupt conditional variable. */
+ cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
+
+ qlge->sequence |= INIT_MUTEX;
+
+ /*
+ * KStats
+ */
+ if (ql_init_kstats(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): KState initialization failed",
+ ADAPTER_NAME, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+ qlge->sequence |= INIT_KSTATS;
+
+ /*
+ * Initialize gld macinfo structure
+ */
+ ql_gld3_init(qlge, macp);
+
+ if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): mac_register failed",
+ __func__, instance);
+ ql_free_resources(dip, qlge);
+ break;
+ }
+ qlge->sequence |= INIT_MAC_REGISTERED;
+ QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
+ ADAPTER_NAME, instance));
+
+ mac_free(macp);
+ macp = NULL;
+
+ qlge->mac_flags = QL_MAC_ATTACHED;
+
+ /*
+ * Allocate memory resources
+ */
+ if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d): memory allocation failed",
+ __func__, qlge->instance);
+ ql_free_mem_resources(qlge);
+ ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
+ return (DDI_FAILURE);
+ }
+ qlge->sequence |= INIT_MEMORY_ALLOC;
+
+ ddi_report_dev(dip);
+
+ rval = DDI_SUCCESS;
+ break;
+/*
+ * DDI_RESUME
+ * When called with cmd set to DDI_RESUME, attach() must
+ * restore the hardware state of a device (power may have been
+ * removed from the device), allow pending requests to con-
+ * tinue, and service new requests. In this case, the driver
+ * must not make any assumptions about the state of the
+ * hardware, but must restore the state of the device except
+ * for the power level of components.
+ *
+ */
+ case DDI_RESUME:
+
+ if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
+ return (DDI_FAILURE);
+
+ QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
+ __func__, qlge->instance));
+
+ mutex_enter(&qlge->gen_mutex);
+ rval = ql_do_start(qlge);
+ mutex_exit(&qlge->gen_mutex);
+ break;
+
+ default:
+ break;
+ }
+ return (rval);
+}
+
+/*
+ * Unbind all pending tx dma handles during driver bring down
+ */
+static void
+ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ int i, j;
+
+ if (tx_ring->wq_desc) {
+ tx_ring_desc = tx_ring->wq_desc;
+ for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+ for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
+ if (tx_ring_desc->tx_dma_handle[j]) {
+ (void) ddi_dma_unbind_handle(
+ tx_ring_desc->tx_dma_handle[j]);
+ }
+ }
+ tx_ring_desc->tx_dma_handle_used = 0;
+ } /* end of for loop */
+ }
+}
+/*
+ * Wait for all the packets sent to the chip to finish transmission
+ * to prevent buffers to be unmapped before or during a transmit operation
+ */
+static int
+ql_wait_tx_quiesce(qlge_t *qlge)
+{
+ int count = MAX_TX_WAIT_COUNT, i;
+ int rings_done;
+ volatile struct tx_ring *tx_ring;
+ uint32_t consumer_idx;
+ uint32_t producer_idx;
+ uint32_t temp;
+ int done = 0;
+ int rval = DDI_FAILURE;
+
+ while (!done) {
+ rings_done = 0;
+
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ temp = ql_read_doorbell_reg(qlge,
+ tx_ring->prod_idx_db_reg);
+ producer_idx = temp & 0x0000ffff;
+ consumer_idx = (temp >> 16);
+
+ /*
+ * Get the pending iocb count, ones which have not been
+ * pulled down by the chip
+ */
+ if (producer_idx >= consumer_idx)
+ temp = (producer_idx - consumer_idx);
+ else
+ temp = (tx_ring->wq_len - consumer_idx) +
+ producer_idx;
+
+ if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
+ rings_done++;
+ else {
+ done = 1;
+ break;
+ }
+ }
+
+ /* If all the rings are done */
+ if (rings_done >= qlge->tx_ring_count) {
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "%s(%d) done successfully \n",
+ __func__, qlge->instance);
+#endif
+ rval = DDI_SUCCESS;
+ break;
+ }
+
+ qlge_delay(100);
+
+ count--;
+ if (!count) {
+
+ count = MAX_TX_WAIT_COUNT;
+#ifdef QLGE_LOAD_UNLOAD
+ volatile struct rx_ring *rx_ring;
+ cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
+ " Transmits on queue %d to complete .\n",
+ __func__, qlge->instance,
+ (qlge->tx_ring[i].wq_len -
+ qlge->tx_ring[i].tx_free_count),
+ i);
+
+ rx_ring = &qlge->rx_ring[i+1];
+ temp = ql_read_doorbell_reg(qlge,
+ rx_ring->cnsmr_idx_db_reg);
+ consumer_idx = temp & 0x0000ffff;
+ producer_idx = (temp >> 16);
+ cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
+ " Producer %d, Consumer %d\n",
+ __func__, qlge->instance,
+ i+1,
+ producer_idx, consumer_idx);
+
+ temp = ql_read_doorbell_reg(qlge,
+ tx_ring->prod_idx_db_reg);
+ producer_idx = temp & 0x0000ffff;
+ consumer_idx = (temp >> 16);
+ cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
+ " Producer %d, Consumer %d\n",
+ __func__, qlge->instance, i,
+ producer_idx, consumer_idx);
+#endif
+
+ /* For now move on */
+ break;
+ }
+ }
+ /* Stop the request queue */
+ mutex_enter(&qlge->hw_mutex);
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ if (qlge->tx_ring[i].valid_db_reg) {
+ ql_write_doorbell_reg(qlge,
+ qlge->tx_ring[i].valid_db_reg, 0);
+ }
+ }
+ mutex_exit(&qlge->hw_mutex);
+ return (rval);
+}
+
+/*
+ * Wait for all the receives indicated to the stack to come back
+ */
+static int
+ql_wait_rx_complete(qlge_t *qlge)
+{
+ int i;
+ /* Disable all the completion queues */
+ mutex_enter(&qlge->hw_mutex);
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].valid_db_reg) {
+ ql_write_doorbell_reg(qlge,
+ qlge->rx_ring[i].valid_db_reg, 0);
+ }
+ }
+ mutex_exit(&qlge->hw_mutex);
+
+ /* Wait for OS to return all rx buffers */
+ qlge_delay(QL_ONE_SEC_DELAY);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * stop the driver
+ */
+static int
+ql_bringdown_adapter(qlge_t *qlge)
+{
+ int i;
+ int status = DDI_SUCCESS;
+
+ qlge->mac_flags = QL_MAC_BRINGDOWN;
+ if (qlge->sequence & ADAPTER_INIT) {
+ /* stop forwarding external packets to driver */
+ status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
+ if (status)
+ return (status);
+ ql_stop_routing(qlge);
+ ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
+ /*
+ * Set the flag for receive and transmit
+ * operations to cease
+ */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ mutex_enter(&qlge->tx_ring[i].tx_lock);
+ qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
+ mutex_exit(&qlge->tx_ring[i].tx_lock);
+ }
+
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ mutex_enter(&qlge->rx_ring[i].rx_lock);
+ qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
+ mutex_exit(&qlge->rx_ring[i].rx_lock);
+ }
+
+ /*
+ * Need interrupts to be running while the transmit
+ * completions are cleared. Wait for the packets
+ * queued to the chip to be sent out
+ */
+ (void) ql_wait_tx_quiesce(qlge);
+ /* Interrupts not needed from now */
+ ql_disable_all_completion_interrupts(qlge);
+
+ mutex_enter(&qlge->hw_mutex);
+ /* Disable Global interrupt */
+ ql_disable_global_interrupt(qlge);
+ mutex_exit(&qlge->hw_mutex);
+
+ /* Wait for all the indicated packets to come back */
+ status = ql_wait_rx_complete(qlge);
+
+ mutex_enter(&qlge->hw_mutex);
+ /* Reset adapter */
+ ql_asic_reset(qlge);
+ /*
+ * Unbind all tx dma handles to prevent pending tx descriptors'
+ * dma handles from being re-used.
+ */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
+ }
+
+ qlge->sequence &= ~ADAPTER_INIT;
+
+ mutex_exit(&qlge->hw_mutex);
+ }
+ return (status);
+}
+
+/*
+ * ql_detach
+ * Used to remove all the states associated with a given
+ * instances of a device node prior to the removal of that
+ * instance from the system.
+ */
+static int
+ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ qlge_t *qlge;
+ int rval;
+
+ rval = DDI_SUCCESS;
+
+ switch (cmd) {
+ case DDI_DETACH:
+
+ if ((qlge = QL_GET_DEV(dip)) == NULL)
+ return (DDI_FAILURE);
+ rval = ql_bringdown_adapter(qlge);
+ if (rval != DDI_SUCCESS)
+ break;
+
+ qlge->mac_flags = QL_MAC_DETACH;
+
+ /* free memory resources */
+ if (qlge->sequence & INIT_MEMORY_ALLOC) {
+ ql_free_mem_resources(qlge);
+ qlge->sequence &= ~INIT_MEMORY_ALLOC;
+ }
+ ql_free_resources(dip, qlge);
+
+ break;
+
+ case DDI_SUSPEND:
+ if ((qlge = QL_GET_DEV(dip)) == NULL)
+ return (DDI_FAILURE);
+
+ mutex_enter(&qlge->gen_mutex);
+ if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
+ (qlge->mac_flags == QL_MAC_STARTED)) {
+ ql_do_stop(qlge);
+ }
+ qlge->mac_flags = QL_MAC_SUSPENDED;
+ mutex_exit(&qlge->gen_mutex);
+
+ break;
+ default:
+ rval = DDI_FAILURE;
+ break;
+ }
+
+ return (rval);
+}
+
+/*
+ * quiesce(9E) entry point.
+ *
+ * This function is called when the system is single-threaded at high
+ * PIL with preemption disabled. Therefore, this function must not be
+ * blocked.
+ *
+ * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
+ */
+int
+ql_quiesce(dev_info_t *dip)
+{
+ qlge_t *qlge;
+ int i;
+
+ if ((qlge = QL_GET_DEV(dip)) == NULL)
+ return (DDI_FAILURE);
+
+ if (CFG_IST(qlge, CFG_CHIP_8100)) {
+ /* stop forwarding external packets to driver */
+ ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
+ ql_stop_routing(qlge);
+ ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
+ /* Stop all the request queues */
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ if (qlge->tx_ring[i].valid_db_reg) {
+ ql_write_doorbell_reg(qlge,
+ qlge->tx_ring[i].valid_db_reg, 0);
+ }
+ }
+ qlge_delay(QL_ONE_SEC_DELAY/4);
+ /* Interrupts not needed from now */
+ /* Disable MPI interrupt */
+ ql_write_reg(qlge, REG_INTERRUPT_MASK,
+ (INTR_MASK_PI << 16));
+ ql_disable_global_interrupt(qlge);
+
+ /* Disable all the rx completion queues */
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ if (qlge->rx_ring[i].valid_db_reg) {
+ ql_write_doorbell_reg(qlge,
+ qlge->rx_ring[i].valid_db_reg, 0);
+ }
+ }
+ qlge_delay(QL_ONE_SEC_DELAY/4);
+ qlge->mac_flags = QL_MAC_STOPPED;
+ /* Reset adapter */
+ ql_asic_reset(qlge);
+ qlge_delay(100);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
+
+/*
+ * Loadable Driver Interface Structures.
+ * Declare and initialize the module configuration section...
+ */
+static struct modldrv modldrv = {
+ &mod_driverops, /* type of module: driver */
+ version, /* name of module */
+ &ql_ops /* driver dev_ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+/*
+ * Loadable Module Routines
+ */
+
+/*
+ * _init
+ * Initializes a loadable module. It is called before any other
+ * routine in a loadable module.
+ */
+int
+_init(void)
+{
+ int rval;
+
+ mac_init_ops(&ql_ops, ADAPTER_NAME);
+ rval = mod_install(&modlinkage);
+ if (rval != DDI_SUCCESS) {
+ mac_fini_ops(&ql_ops);
+ cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
+ ADAPTER_NAME);
+ }
+
+ return (rval);
+}
+
+/*
+ * _fini
+ * Prepares a module for unloading. It is called when the system
+ * wants to unload a module. If the module determines that it can
+ * be unloaded, then _fini() returns the value returned by
+ * mod_remove(). Upon successful return from _fini() no other
+ * routine in the module will be called before _init() is called.
+ */
+int
+_fini(void)
+{
+ int rval;
+
+ rval = mod_remove(&modlinkage);
+ if (rval == DDI_SUCCESS) {
+ mac_fini_ops(&ql_ops);
+ }
+
+ return (rval);
+}
+
+/*
+ * _info
+ * Returns information about loadable module.
+ */
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_dbg.c b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_dbg.c
new file mode 100644
index 0000000000..5493b30729
--- /dev/null
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_dbg.c
@@ -0,0 +1,2943 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#include <qlge.h>
+
+static uint32_t ql_dump_buf_8(uint8_t *, uint32_t, uint32_t);
+static uint32_t ql_dump_buf_16(uint16_t *, uint32_t, uint32_t);
+static uint32_t ql_dump_buf_32(uint32_t *, uint32_t, uint32_t);
+static uint32_t ql_dump_buf_64(uint64_t *, uint32_t, uint32_t);
+static int ql_binary_core_dump(qlge_t *, uint32_t, uint32_t *);
+
+static char ISP_8100_REGION[] = {
+ "nic: nic_boot, nic_param, nic_vpd \n"
+ "mpi: mpi_fw, mpi_config, edc_fw\n"
+ "fc: fc_boot, fc_fw, fc_nvram, fc_vpd"};
+static char ISP_8100_AVAILABLE_DUMPS[] = {"core,register,all"};
+
+/*
+ * Get byte from I/O port
+ */
+uint8_t
+ql_get8(qlge_t *qlge, uint32_t index)
+{
+ uint8_t ret;
+
+ ret = (uint8_t)ddi_get8(qlge->dev_handle,
+ (uint8_t *)(((caddr_t)qlge->iobase) + index));
+ return (ret);
+}
+
+/*
+ * Get word from I/O port
+ */
+uint16_t
+ql_get16(qlge_t *qlge, uint32_t index)
+{
+ uint16_t ret;
+
+ ret = (uint16_t)ddi_get16(qlge->dev_handle,
+ (uint16_t *)(void *)(((caddr_t)qlge->iobase) + index));
+ return (ret);
+}
+
+/*
+ * Get double word from I/O port
+ */
+uint32_t
+ql_get32(qlge_t *qlge, uint32_t index)
+{
+ uint32_t ret;
+
+ ret = ddi_get32(qlge->dev_handle,
+ (uint32_t *)(void *)(((caddr_t)qlge->iobase) + index));
+ return (ret);
+}
+
+/*
+ * Send byte to I/O port
+ */
+void
+ql_put8(qlge_t *qlge, uint32_t index, uint8_t data)
+{
+ ddi_put8(qlge->dev_handle,
+ (uint8_t *)(((caddr_t)qlge->iobase) + index), data);
+}
+
+/*
+ * Send word to I/O port
+ */
+void
+ql_put16(qlge_t *qlge, uint32_t index, uint16_t data)
+{
+ ddi_put16(qlge->dev_handle,
+ (uint16_t *)(void *)(((caddr_t)qlge->iobase) + index), data);
+}
+
+/*
+ * Send double word to I/O port
+ */
+void
+ql_put32(qlge_t *qlge, uint32_t index, uint32_t data)
+{
+ ddi_put32(qlge->dev_handle,
+ (uint32_t *)(void *)(((caddr_t)qlge->iobase) + index), data);
+}
+
+/*
+ * Read from a register
+ */
+uint32_t
+ql_read_reg(qlge_t *qlge, uint32_t reg)
+{
+ uint32_t data = ql_get32(qlge, reg);
+
+ return (data);
+}
+
+/*
+ * Write 32 bit data to a register
+ */
+void
+ql_write_reg(qlge_t *qlge, uint32_t reg, uint32_t data)
+{
+ ql_put32(qlge, reg, data);
+}
+
+/*
+ * Set semaphore register bit to lock access to a shared register
+ */
+int
+ql_sem_lock(qlge_t *qlge, uint32_t sem_mask, uint32_t sem_bits)
+{
+ uint32_t value;
+
+ ql_put32(qlge, REG_SEMAPHORE, (sem_mask | sem_bits));
+ value = ql_get32(qlge, REG_SEMAPHORE);
+ return ((value & (sem_mask >> 16)) == sem_bits);
+}
+/*
+ * Wait up to "delay" seconds until the register "reg"'s
+ * "wait_bit" is set
+ * Default wait time is 5 seconds if "delay" time was not set.
+ */
+int
+ql_wait_reg_bit(qlge_t *qlge, uint32_t reg, uint32_t wait_bit, int set,
+ uint32_t delay)
+{
+ uint32_t reg_status;
+ uint32_t timer = 5; /* 5 second */
+ int rtn_val = DDI_SUCCESS;
+ uint32_t delay_ticks;
+
+ if (delay != 0)
+ timer = delay;
+
+ delay_ticks = timer * 100;
+ /*
+ * wait for Configuration register test bit to be set,
+ * if not, then it is still busy.
+ */
+ do {
+ reg_status = ql_read_reg(qlge, reg);
+ /* wait for bit set or reset? */
+ if (set == BIT_SET) {
+ if (reg_status & wait_bit)
+ break;
+ else
+ qlge_delay(QL_ONE_SEC_DELAY / 100);
+ } else {
+ if (reg_status & wait_bit)
+ qlge_delay(QL_ONE_SEC_DELAY / 100);
+ else
+ break;
+ }
+ } while (--delay_ticks);
+
+ if (delay_ticks == 0) {
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+/*
+ * Dump the value of control registers
+ */
+void
+ql_dump_all_contrl_regs(qlge_t *qlge)
+{
+ int i;
+ uint32_t data;
+
+ for (i = 0; i < 0xff; i = i+4) {
+ data = ql_read_reg(qlge, i);
+ ql_printf("\tregister# 0x%x value: 0x%x\n", i, data);
+ }
+}
+
+/*
+ * Prints string plus buffer.
+ */
+void
+ql_dump_buf(char *string, uint8_t *buffer, uint8_t wd_size,
+ uint32_t count)
+{
+ uint32_t offset = 0;
+
+ if (strcmp(string, "") != 0)
+ ql_printf(string);
+
+ if ((buffer == NULL) || (count == 0))
+ return;
+
+ switch (wd_size) {
+ case 8:
+ while (count) {
+ count = ql_dump_buf_8(buffer, count, offset);
+ offset += 8;
+ buffer += 8;
+ }
+ break;
+
+ case 16:
+ while (count) {
+ count = ql_dump_buf_16((uint16_t *)(void *)buffer,
+ count, offset);
+ offset += 16;
+ buffer += 16;
+ }
+ break;
+ case 32:
+ while (count) {
+ count = ql_dump_buf_32((uint32_t *)(void *)buffer,
+ count, offset);
+ offset += 16;
+ buffer += 16;
+ }
+ break;
+ case 64:
+ while (count) {
+ count = ql_dump_buf_64((uint64_t *)(void *)buffer,
+ count, offset);
+ offset += 16;
+ buffer += 16;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Print as 8bit bytes
+ */
+static uint32_t
+ql_dump_buf_8(uint8_t *bp, uint32_t count, uint32_t offset)
+{
+ switch (count) {
+ case 1:
+ ql_printf("0x%016x : %02x\n",
+ offset,
+ *bp);
+ break;
+
+ case 2:
+ ql_printf("0x%016x : %02x %02x\n",
+ offset,
+ *bp, *(bp+1));
+ break;
+
+ case 3:
+ ql_printf("0x%016x : %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2));
+ break;
+
+ case 4:
+ ql_printf("0x%016x : %02x %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3));
+ break;
+
+ case 5:
+ ql_printf("0x%016x : %02x %02x %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4));
+ break;
+
+ case 6:
+ ql_printf("0x%016x : %02x %02x %02x %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5));
+ break;
+
+ case 7:
+ ql_printf("0x%016x : %02x %02x %02x %02x %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5), *(bp+6));
+ break;
+
+ default:
+ ql_printf("0x%016x : %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5), *(bp+6),
+ *(bp+7));
+ break;
+
+ }
+
+ if (count < 8) {
+ count = 0;
+ } else {
+ count -= 8;
+ }
+
+ return (count);
+}
+
+/*
+ * Print as 16bit
+ */
+static uint32_t
+ql_dump_buf_16(uint16_t *bp, uint32_t count, uint32_t offset)
+{
+
+ switch (count) {
+ case 1:
+ ql_printf("0x%016x : %04x\n",
+ offset,
+ *bp);
+ break;
+
+ case 2:
+ ql_printf("0x%016x : %04x %04x\n",
+ offset,
+ *bp, *(bp+1));
+ break;
+
+ case 3:
+ ql_printf("0x%016x : %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2));
+ break;
+
+ case 4:
+ ql_printf("0x%016x : %04x %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3));
+ break;
+
+ case 5:
+ ql_printf("0x%016x : %04x %04x %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4));
+ break;
+
+ case 6:
+ ql_printf("0x%016x : %04x %04x %04x %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5));
+ break;
+
+ case 7:
+ ql_printf("0x%016x : %04x %04x %04x %04x %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5), *(bp+6));
+ break;
+
+ default:
+ ql_printf("0x%016x : %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3), *(bp+4), *(bp+5), *(bp+6),
+ *(bp+7));
+ break;
+ }
+
+ if (count < 8) {
+ count = 0;
+ } else {
+ count -= 8;
+ }
+
+ return (count);
+}
+
+/*
+ * Print as 32bit
+ */
+static uint32_t
+ql_dump_buf_32(uint32_t *bp, uint32_t count, uint32_t offset)
+{
+
+ switch (count) {
+ case 1:
+ ql_printf("0x%016x : %08x\n",
+ offset,
+ *bp);
+ break;
+
+ case 2:
+ ql_printf("0x%016x : %08x %08x\n",
+ offset,
+ *bp, *(bp+1));
+ break;
+
+ case 3:
+ ql_printf("0x%016x : %08x %08x %08x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2));
+ break;
+
+ default:
+ ql_printf("0x%016x : %08x %08x %08x %08x\n",
+ offset,
+ *bp, *(bp+1), *(bp+2), *(bp+3));
+ break;
+ }
+
+ if (count < 4) {
+ count = 0;
+ } else {
+ count -= 4;
+ }
+
+ return (count);
+}
+
+/*
+ * Print as 64bit
+ */
+static uint32_t
+ql_dump_buf_64(uint64_t *bp, uint32_t count, uint32_t offset)
+{
+
+ switch (count) {
+ case 1:
+ ql_printf("0x%016x : %016x\n",
+ offset,
+ *bp);
+ break;
+
+ default:
+ ql_printf("0x%016x : %016x %016x\n",
+ offset,
+ *bp, *(bp+1));
+ break;
+
+ }
+
+ if (count < 2) {
+ count = 0;
+ } else {
+ count -= 2;
+ }
+
+ return (count);
+}
+
+/*
+ * Print CQICB control block information
+ */
+/* ARGSUSED */
+void
+ql_dump_cqicb(qlge_t *qlge, struct cqicb_t *cqicb)
+{
+ _NOTE(ARGUNUSED(qlge));
+ ASSERT(qlge != NULL);
+ ASSERT(cqicb != NULL);
+ ql_printf("ql_dump_cqicb:entered\n");
+
+ ql_printf("\t msix_vect = 0x%x\n",
+ cqicb->msix_vect);
+ ql_printf("\t reserved1 = 0x%x\n",
+ cqicb->reserved1);
+ ql_printf("\t reserved2 = 0x%x\n",
+ cqicb->reserved2);
+ ql_printf("\t flags = 0x%x\n",
+ cqicb->flags);
+ ql_printf("\t len = 0x%x\n",
+ le16_to_cpu(cqicb->len));
+ ql_printf("\t rid = 0x%x\n",
+ le16_to_cpu(cqicb->rid));
+ ql_printf("\t cq_base_addr_lo = 0x%x\n",
+ le32_to_cpu(cqicb->cq_base_addr_lo));
+ ql_printf("\t cq_base_addr_hi = 0x%x\n",
+ le32_to_cpu(cqicb->cq_base_addr_hi));
+ ql_printf("\t prod_idx_addr_lo = %x\n",
+ le32_to_cpu(cqicb->prod_idx_addr_lo));
+ ql_printf("\t prod_idx_addr_hi = %x\n",
+ le32_to_cpu(cqicb->prod_idx_addr_hi));
+ ql_printf("\t pkt_delay = %d\n",
+ le16_to_cpu(cqicb->pkt_delay));
+ ql_printf("\t irq_delay = 0x%x\n",
+ le16_to_cpu(cqicb->irq_delay));
+ ql_printf("\t lbq_addr_lo = 0x%x\n",
+ le32_to_cpu(cqicb->lbq_addr_lo));
+ ql_printf("\t lbq_addr_hi = 0x%x\n",
+ le32_to_cpu(cqicb->lbq_addr_hi));
+ ql_printf("\t lbq_buf_size = 0x%x\n",
+ le16_to_cpu(cqicb->lbq_buf_size));
+ ql_printf("\t lbq_len = 0x%x\n",
+ le16_to_cpu(cqicb->lbq_len));
+ ql_printf("\t sbq_addr_lo = 0x%x\n",
+ le32_to_cpu(cqicb->sbq_addr_lo));
+ ql_printf("\t sbq_addr_hi = 0x%x\n",
+ le32_to_cpu(cqicb->sbq_addr_hi));
+ ql_printf("\t sbq_buf_size = 0x%x\n",
+ le16_to_cpu(cqicb->sbq_buf_size));
+ ql_printf("\t sbq_len = 0x%x\n",
+ le16_to_cpu(cqicb->sbq_len));
+
+ ql_printf("ql_dump_cqicb:exiting\n");
+}
+
+/*
+ * Print WQICB control block information
+ */
+/* ARGSUSED */
+void
+ql_dump_wqicb(qlge_t *qlge, struct wqicb_t *wqicb)
+{
+ _NOTE(ARGUNUSED(qlge));
+ ASSERT(qlge != NULL);
+ ASSERT(wqicb != NULL);
+
+ ql_printf("ql_dump_wqicb:entered\n");
+
+ ql_printf("\t len = %x\n",
+ le16_to_cpu(wqicb->len));
+ ql_printf("\t flags = %x\n",
+ le16_to_cpu(wqicb->flags));
+ ql_printf("\t cq_id_rss = %x\n",
+ le16_to_cpu(wqicb->cq_id_rss));
+ ql_printf("\t rid = 0x%x\n",
+ le16_to_cpu(wqicb->rid));
+ ql_printf("\t wq_addr_lo = 0x%x\n",
+ le32_to_cpu(wqicb->wq_addr_lo));
+ ql_printf("\t wq_addr_hi = 0x%x\n",
+ le32_to_cpu(wqicb->wq_addr_hi));
+ ql_printf("\t cnsmr_idx_addr_lo = %x\n",
+ le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
+ ql_printf("\t cnsmr_idx_addr_hi = %x\n",
+ le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
+
+ ql_printf("ql_dump_wqicb:exit\n");
+}
+
+/*
+ * Print request descriptor information
+ */
+void
+ql_dump_req_pkt(qlge_t *qlge, struct ob_mac_iocb_req *pkt, void *oal,
+ int number)
+{
+ int i = 0;
+ struct oal_entry *oal_entry;
+
+ ql_printf("ql_dump_req_pkt(%d):enter\n", qlge->instance);
+
+ ql_printf("\t opcode = 0x%x\n",
+ pkt->opcode);
+ ql_printf("\t flag0 = 0x%x\n",
+ pkt->flag0);
+ ql_printf("\t flag1 = 0x%x\n",
+ pkt->flag1);
+ ql_printf("\t flag2 = 0x%x\n",
+ pkt->flag2);
+ ql_printf("\t frame_len = 0x%x\n",
+ le16_to_cpu(pkt->frame_len));
+ ql_printf("\t transaction_id_low = 0x%x\n",
+ le16_to_cpu(pkt->tid));
+ ql_printf("\t txq_idx = 0x%x\n",
+ le16_to_cpu(pkt->txq_idx));
+ ql_printf("\t protocol_hdr_len = 0x%x\n",
+ le16_to_cpu(pkt->protocol_hdr_len));
+ ql_printf("\t hdr_off = %d\n",
+ le16_to_cpu(pkt->hdr_off));
+ ql_printf("\t vlan_tci = %d\n",
+ le16_to_cpu(pkt->vlan_tci));
+ ql_printf("\t mss = %d\n",
+ le16_to_cpu(pkt->mss));
+
+ /* if OAL is needed */
+ if (number > TX_DESC_PER_IOCB) {
+ for (i = 0; i < TX_DESC_PER_IOCB; i++) {
+ ql_printf("\t buf_addr%d_low = 0x%x\n",
+ i, pkt->oal_entry[i].buf_addr_low);
+ ql_printf("\t buf_addr%d_high = 0x%x\n",
+ i, pkt->oal_entry[i].buf_addr_high);
+ ql_printf("\t buf%d_len = 0x%x\n",
+ i, pkt->oal_entry[i].buf_len);
+ }
+ oal_entry = (struct oal_entry *)oal;
+ ql_printf("\t additional %d tx descriptors in OAL\n",
+ (number - TX_DESC_PER_IOCB + 1));
+ for (i = 0; i < (number-TX_DESC_PER_IOCB + 1); i++) {
+ ql_printf("\t buf_addr%d_low = 0x%x\n",
+ i, oal_entry[i].buf_addr_low);
+ ql_printf("\t buf_addr%d_high = 0x%x\n",
+ i, oal_entry[i].buf_addr_high);
+ ql_printf("\t buf%d_len = 0x%x\n",
+ i, oal_entry[i].buf_len);
+ }
+ } else {
+ for (i = 0; i < number; i++) {
+ ql_printf("\t buf_addr%d_low = 0x%x\n",
+ i, pkt->oal_entry[i].buf_addr_low);
+ ql_printf("\t buf_addr%d_high = 0x%x\n",
+ i, pkt->oal_entry[i].buf_addr_high);
+ ql_printf("\t buf%d_len = 0x%x\n",
+ i, pkt->oal_entry[i].buf_len);
+ }
+ }
+ ql_printf("ql_dump_req_pkt:exiting\n");
+}
+
+/*
+ * Print PCI configuration
+ */
+void
+ql_dump_pci_config(qlge_t *qlge)
+{
+ qlge->pci_cfg.vendor_id = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_VENID);
+
+ qlge->pci_cfg.device_id = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_DEVID);
+
+ qlge->pci_cfg.command = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
+
+ qlge->pci_cfg.status = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_STAT);
+
+ qlge->pci_cfg.revision = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_REVID);
+
+ qlge->pci_cfg.prog_class = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_PROGCLASS);
+
+ qlge->pci_cfg.sub_class = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_SUBCLASS);
+
+ qlge->pci_cfg.base_class = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_BASCLASS);
+
+ qlge->pci_cfg.cache_line_size = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_CACHE_LINESZ);
+
+ qlge->pci_cfg.latency_timer = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_LATENCY_TIMER);
+
+ qlge->pci_cfg.header_type = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_HEADER);
+
+ qlge->pci_cfg.io_base_address =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_BASE0);
+
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_BASE1);
+
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_BASE2);
+
+ qlge->pci_cfg.pci_doorbell_mem_base_address_lower =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_BASE3);
+
+ qlge->pci_cfg.pci_doorbell_mem_base_address_upper =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_BASE4);
+
+ qlge->pci_cfg.sub_vendor_id = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_SUBVENID);
+
+ qlge->pci_cfg.sub_device_id = (uint16_t)
+ pci_config_get16(qlge->pci_handle, PCI_CONF_SUBSYSID);
+
+ qlge->pci_cfg.expansion_rom =
+ pci_config_get32(qlge->pci_handle, PCI_CONF_ROM);
+
+ qlge->pci_cfg.intr_line = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_ILINE);
+
+ qlge->pci_cfg.intr_pin = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_IPIN);
+
+ qlge->pci_cfg.min_grant = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_MIN_G);
+
+ qlge->pci_cfg.max_latency = (uint8_t)
+ pci_config_get8(qlge->pci_handle, PCI_CONF_MAX_L);
+
+ qlge->pci_cfg.pcie_device_control = (uint16_t)
+ pci_config_get16(qlge->pci_handle, 0x54);
+
+ qlge->pci_cfg.link_status = (uint16_t)
+ pci_config_get16(qlge->pci_handle, 0x5e);
+
+ qlge->pci_cfg.msi_msg_control = (uint16_t)
+ pci_config_get16(qlge->pci_handle, 0x8a);
+
+ qlge->pci_cfg.msi_x_msg_control = (uint16_t)
+ pci_config_get16(qlge->pci_handle, 0xa2);
+
+ if (qlge->ql_dbgprnt & DBG_GLD) {
+ ql_printf("%s(%d): enter\n",
+ __func__, qlge->instance);
+ ql_printf("\tvendorid =0x%x.\n",
+ qlge->pci_cfg.vendor_id);
+ ql_printf("\tdeviceid =0x%x.\n",
+ qlge->pci_cfg.device_id);
+ ql_printf("\tcommand =0x%x.\n",
+ qlge->pci_cfg.command);
+ ql_printf("\tstatus =0x%x.\n",
+ qlge->pci_cfg.status);
+ ql_printf("\trevision id =0x%x.\n",
+ qlge->pci_cfg.revision);
+ ql_printf("\tprogram class =0x%x.\n",
+ qlge->pci_cfg.prog_class);
+ ql_printf("\tsubclass code =0x%x.\n",
+ qlge->pci_cfg.sub_class);
+ ql_printf("\tbase class code =0x%x.\n",
+ qlge->pci_cfg.base_class);
+ ql_printf("\tcache line size =0x%x.\n",
+ qlge->pci_cfg.cache_line_size);
+ ql_printf("\tlatency timer =0x%x.\n",
+ qlge->pci_cfg.latency_timer);
+ ql_printf("\theader =0x%x.\n",
+ qlge->pci_cfg.header_type);
+ ql_printf("\tI/O Base Register Address0 =0x%x.\n",
+ qlge->pci_cfg.io_base_address);
+ ql_printf("\tpci_cntl_reg_set_mem_base_address_lower =0x%x.\n",
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower);
+ ql_printf("\tpci_cntl_reg_set_mem_base_address_upper =0x%x.\n",
+ qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper);
+ ql_printf("\tpci_doorbell_mem_base_address_lower =0x%x.\n",
+ qlge->pci_cfg.pci_doorbell_mem_base_address_lower);
+ ql_printf("\tpci_doorbell_mem_base_address_upper =0x%x.\n",
+ qlge->pci_cfg.pci_doorbell_mem_base_address_upper);
+ ql_printf("\tSubsytem Vendor Id =0x%x.\n",
+ qlge->pci_cfg.sub_vendor_id);
+ ql_printf("\tSubsytem Id =0x%x.\n",
+ qlge->pci_cfg.sub_device_id);
+ ql_printf("\tExpansion ROM Base Register =0x%x.\n",
+ qlge->pci_cfg.expansion_rom);
+ ql_printf("\tInterrupt Line =0x%x.\n",
+ qlge->pci_cfg.intr_line);
+ ql_printf("\tInterrupt Pin =0x%x.\n",
+ qlge->pci_cfg.intr_pin);
+ ql_printf("\tMin Grant =0x%x.\n",
+ qlge->pci_cfg.min_grant);
+ ql_printf("\tMax Grant =0x%x.\n",
+ qlge->pci_cfg.max_latency);
+ ql_printf("\tdevice_control =0x%x.\n",
+ qlge->pci_cfg.pcie_device_control);
+ ql_printf("\tlink_status =0x%x.\n",
+ qlge->pci_cfg.link_status);
+ ql_printf("\tmsi_msg_control =0x%x.\n",
+ qlge->pci_cfg.msi_msg_control);
+ ql_printf("\tmsi_x_msg_control =0x%x.\n",
+ qlge->pci_cfg.msi_x_msg_control);
+
+ ql_printf("%s(%d): exit\n", __func__, qlge->instance);
+ }
+}
+
+/*
+ * Print a formated string
+ */
+void
+ql_printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_CONT, fmt, ap);
+ va_end(ap);
+
+}
+
+/*
+ * Read all control registers value and save in a string
+ */
+static uint32_t
+read_ctrl_reg_set(qlge_t *qlge, caddr_t bufp)
+{
+ int i, j;
+ uint32_t data;
+ caddr_t bp = bufp;
+ uint32_t cnt;
+
+ /* read Reg 0 -0xC4 */
+ for (i = 0, j = 0; i <= 0xfc; i += 4) {
+ data = ql_read_reg(qlge, i);
+ (void) sprintf(bp, "Register[%x] = 0x%x\n", i, data);
+ bp += strlen(bp);
+ if (i == REG_INTERRUPT_ENABLE) {
+ /* Read */
+ data = INTR_EN_TYPE_READ;
+ ql_write_reg(qlge, i, (data | (data << 16)));
+ data = ql_read_reg(qlge, i);
+ if (data & INTR_EN_EN) {
+ (void) sprintf(bp, "Intr0 enabled: 0x%x\n",
+ data);
+ bp += strlen(bp);
+ } else {
+ (void) sprintf(bp, "Intr0 disabled: 0x%x\n",
+ data);
+ bp += strlen(bp);
+ }
+ }
+ j++;
+ }
+ *bp = '\0';
+ bp++;
+ cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
+ QL_PRINT(DBG_GLD, ("%s(%d) %x bytes to export\n",
+ __func__, qlge->instance, cnt));
+ return (cnt);
+}
+
+/*
+ * Get address and size of image tables in flash memory
+ */
+static int
+ql_get_flash_table_region_info(qlge_t *qlge, uint32_t region, uint32_t *addr,
+ uint32_t *size)
+{
+ int rval = DDI_SUCCESS;
+
+ switch (region) {
+ case FLT_REGION_FDT:
+ *addr = ISP_8100_FDT_ADDR;
+ *size = ISP_8100_FDT_SIZE;
+ break;
+ case FLT_REGION_FLT:
+ *addr = ISP_8100_FLT_ADDR;
+ *size = ISP_8100_FLT_SIZE;
+ break;
+ case FLT_REGION_NIC_BOOT_CODE:
+ *addr = ISP_8100_NIC_BOOT_CODE_ADDR;
+ *size = ISP_8100_NIC_BOOT_CODE_SIZE;
+ break;
+ case FLT_REGION_MPI_FW_USE:
+ *addr = ISP_8100_MPI_FW_USE_ADDR;
+ *size = ISP_8100_MPI_FW_USE_SIZE;
+ break;
+ case FLT_REGION_MPI_RISC_FW:
+ *addr = ISP_8100_MPI_RISC_FW_ADDR;
+ *size = ISP_8100_MPI_RISC_FW_SIZE;
+ break;
+ case FLT_REGION_VPD0:
+ *addr = ISP_8100_VPD0_ADDR;
+ *size = ISP_8100_VPD0_SIZE;
+ break;
+ case FLT_REGION_NIC_PARAM0:
+ *addr = ISP_8100_NIC_PARAM0_ADDR;
+ *size = ISP_8100_NIC_PARAM0_SIZE;
+ break;
+ case FLT_REGION_VPD1:
+ *addr = ISP_8100_VPD1_ADDR;
+ *size = ISP_8100_VPD1_SIZE;
+ break;
+ case FLT_REGION_NIC_PARAM1:
+ *addr = ISP_8100_NIC_PARAM1_ADDR;
+ *size = ISP_8100_NIC_PARAM1_SIZE;
+ break;
+ case FLT_REGION_MPI_CFG:
+ *addr = ISP_8100_MPI_CFG_ADDR;
+ *size = ISP_8100_MPI_CFG_SIZE;
+ break;
+ case FLT_REGION_EDC_PHY_FW:
+ *addr = ISP_8100_EDC_PHY_FW_ADDR;
+ *size = ISP_8100_EDC_PHY_FW_SIZE;
+ break;
+ case FLT_REGION_FC_BOOT_CODE:
+ *addr = ISP_8100_FC_BOOT_CODE_ADDR;
+ *size = ISP_8100_FC_BOOT_CODE_SIZE;
+ break;
+ case FLT_REGION_FC_FW:
+ *addr = ISP_8100_FC_FW_ADDR;
+ *size = ISP_8100_FC_FW_SIZE;
+ break;
+ default:
+ cmn_err(CE_WARN, "%s(%d): Unknown region code %x!",
+ __func__, qlge->instance, region);
+ rval = DDI_FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * Get PCI bus information
+ */
+static int
+ql_get_pci_bus_info(qlge_t *qlge, uint32_t *pci_bus_info_ptr)
+{
+ dev_info_t *dip;
+ int *options;
+ unsigned int noptions;
+ int rval = DDI_FAILURE;
+
+ dip = qlge->dip;
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
+ "assigned-addresses", &options, &noptions) == DDI_PROP_SUCCESS) {
+ QL_PRINT(DBG_GLD, ("%s(%d) %d options\n",
+ __func__, qlge->instance, noptions));
+
+ if (noptions != 0) {
+ *pci_bus_info_ptr = options[0];
+ rval = DDI_SUCCESS;
+ }
+
+ ddi_prop_free(options);
+ }
+ return (rval);
+}
+
+/*
+ * Build the first packet header in case that 1k+ data transfer is required
+ */
+void
+build_init_pkt_header(qlge_t *qlge, ioctl_header_info_t *pheader, uint32_t size)
+{
+ qlge->ioctl_total_length = size;
+ QL_PRINT(DBG_GLD, ("%d bytes used in kernel buffer\n",
+ qlge->ioctl_total_length));
+ qlge->expected_trans_times =
+ (uint16_t)(qlge->ioctl_total_length / IOCTL_MAX_DATA_LEN);
+ if ((qlge->ioctl_total_length % IOCTL_MAX_DATA_LEN) != 0)
+ qlge->expected_trans_times++;
+ QL_PRINT(DBG_GLD, ("expected transer times %d \n",
+ qlge->expected_trans_times));
+ qlge->ioctl_transferred_bytes = 0;
+ /*
+ * tell user total bytes prepare to receive in the
+ * following transactions
+ */
+ pheader->version = 0;
+ pheader->total_length = qlge->ioctl_total_length;
+ pheader->payload_length = 0;
+ pheader->expected_trans_times = qlge->expected_trans_times;
+}
+
+/*
+ * Do ioctl on hardware
+ */
+/* ARGSUSED */
+enum ioc_reply
+ql_chip_ioctl(qlge_t *qlge, queue_t *q, mblk_t *mp)
+{
+ mblk_t *dmp;
+ int cmd, i, rval;
+ struct ql_device_reg *reg;
+ struct ql_pci_reg *pci_reg;
+ struct ql_flash_io_info *flash_io_info_ptr;
+ pci_cfg_t *pci_cfg;
+ uint32_t *pvalue;
+ struct qlnic_prop_info *prop_ptr;
+ ql_adapter_info_t *adapter_info_ptr;
+ uint16_t payload_len;
+ uint32_t remaining_bytes;
+ ioctl_header_info_t *pheader;
+ caddr_t bp, bdesc;
+ uint32_t len;
+ uint32_t addr, size, region;
+ struct iocblk *iocp = (struct iocblk *)(void *)mp->b_rptr;
+ uint16_t iltds_image_entry_regions[] = {
+ FLT_REGION_NIC_BOOT_CODE, FLT_REGION_MPI_RISC_FW,
+ FLT_REGION_EDC_PHY_FW, FLT_REGION_FC_BOOT_CODE,
+ FLT_REGION_FC_FW};
+ ql_iltds_description_header_t *iltds_ptr;
+ ql_iltds_header_t *ql_iltds_header_ptr;
+ uint32_t offset;
+ uint16_t requested_dump;
+
+ /*
+ * There should be a M_DATA mblk following
+ * the initial M_IOCTL mblk
+ */
+ if ((dmp = mp->b_cont) == NULL) {
+ cmn_err(CE_WARN, "%s(%d) b_count NULL",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+
+ cmd = iocp->ioc_cmd;
+
+ reg = (struct ql_device_reg *)(void *)dmp->b_rptr;
+ pci_reg = (struct ql_pci_reg *)(void *)dmp->b_rptr;
+ pvalue = (uint32_t *)(void *)dmp->b_rptr;
+ flash_io_info_ptr = (struct ql_flash_io_info *)(void *)dmp->b_rptr;
+ adapter_info_ptr = (ql_adapter_info_t *)(void *)dmp->b_rptr;
+
+ switch (cmd) {
+ case QLA_GET_DBGLEAVEL:
+ if (iocp->ioc_count != sizeof (*pvalue)) {
+ return (IOC_INVAL);
+ }
+ *pvalue = qlge->ql_dbgprnt;
+ break;
+
+ case QLA_SET_DBGLEAVEL:
+ if (iocp->ioc_count != sizeof (*pvalue)) {
+ return (IOC_INVAL);
+ }
+ qlge->ql_dbgprnt = *pvalue;
+ break;
+
+ case QLA_WRITE_REG:
+ if (iocp->ioc_count != sizeof (*reg)) {
+ return (IOC_INVAL);
+ }
+ ql_write_reg(qlge, reg->addr, reg->value);
+ break;
+
+ case QLA_READ_PCI_REG:
+ if (iocp->ioc_count != sizeof (*pci_reg)) {
+ return (IOC_INVAL);
+ }
+ /* protect against bad addr values */
+ if (pci_reg->addr > 0xff)
+ return (IOC_INVAL);
+ pci_reg->value =
+ (uint16_t)pci_config_get16(qlge->pci_handle,
+ pci_reg->addr);
+ break;
+
+ case QLA_WRITE_PCI_REG:
+ if (iocp->ioc_count != sizeof (*pci_reg)) {
+ return (IOC_INVAL);
+ }
+ /* protect against bad addr values */
+ if (pci_reg->addr > 0xff)
+ return (IOC_INVAL);
+ pci_config_put16(qlge->pci_handle, pci_reg->addr,
+ pci_reg->value);
+ break;
+
+ case QLA_PCI_STATUS:
+ len = (uint32_t)iocp->ioc_count;
+ if (len != sizeof (pci_cfg_t)) {
+ cmn_err(CE_WARN, "QLA_PCI_STATUS size error, "
+ "driver size 0x%x not 0x%x ",
+ (int)MBLKL(dmp),
+ (int)sizeof (pci_cfg_t));
+ return (IOC_INVAL);
+ }
+ pci_cfg = (pci_cfg_t *)(void *)dmp->b_rptr;
+ /* get PCI configuration */
+ bcopy((const void *)(&qlge->pci_cfg),
+ (void *)pci_cfg, len);
+ break;
+
+ case QLA_GET_PROP:
+ len = (uint32_t)iocp->ioc_count;
+ if (len != sizeof (struct qlnic_prop_info)) {
+ cmn_err(CE_WARN, "QLA_GET_PROP size error, "
+ "driver size 0x%x not 0x%x ",
+ (int)MBLKL(dmp),
+ (int)sizeof (pci_cfg_t));
+ return (IOC_INVAL);
+ }
+ prop_ptr =
+ (struct qlnic_prop_info *)(void *)dmp->b_rptr;
+ /* get various properties */
+ ql_get_firmware_version(qlge,
+ &prop_ptr->mpi_version);
+ ql_get_fw_state(qlge, &prop_ptr->fw_state);
+ qlge_get_link_status(qlge, &prop_ptr->link_status);
+ break;
+
+ case QLA_LIST_ADAPTER_INFO:
+ /* count must be exactly same */
+ if (iocp->ioc_count != sizeof (ql_adapter_info_t)) {
+ return (IOC_INVAL);
+ }
+ if (ql_get_pci_bus_info(qlge,
+ &(adapter_info_ptr->pci_binding)) != DDI_SUCCESS) {
+ return (IOC_INVAL);
+ }
+ adapter_info_ptr->vendor_id =
+ qlge->pci_cfg.vendor_id;
+ adapter_info_ptr->sub_vendor_id =
+ qlge->pci_cfg.sub_vendor_id;
+ adapter_info_ptr->device_id =
+ qlge->pci_cfg.device_id;
+ adapter_info_ptr->sub_device_id =
+ qlge->pci_cfg.sub_device_id;
+
+ bcopy(qlge->unicst_addr[0].addr.ether_addr_octet,
+ &(adapter_info_ptr->cur_addr), ETHERADDRL);
+ break;
+
+ case QLA_SHOW_REGION:
+ len = (uint32_t)iocp->ioc_count;
+ bdesc = (caddr_t)dmp->b_rptr;
+ if (CFG_IST(qlge, CFG_CHIP_8100))
+ (void) sprintf(bdesc, "ISP 8100 available "
+ "regions %s", ISP_8100_REGION);
+ break;
+
+ case QLA_CONTINUE_COPY_OUT:
+ if (qlge->ioctl_buf_ptr == NULL)
+ return (IOC_INVAL);
+ len = (uint32_t)iocp->ioc_count;
+ bp = qlge->ioctl_buf_ptr;
+ bp += qlge->ioctl_transferred_bytes;
+ remaining_bytes =
+ qlge->ioctl_total_length -
+ qlge->ioctl_transferred_bytes;
+ /* how many data bytes sent this time */
+ payload_len =
+ (uint16_t)min(IOCTL_MAX_DATA_LEN, remaining_bytes);
+ /* create packet header */
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ pheader->version = 0;
+ pheader->total_length = qlge->ioctl_total_length;
+ pheader->expected_trans_times =
+ qlge->expected_trans_times;
+ pheader->payload_length = payload_len;
+ /* create packet payload */
+ bdesc = (caddr_t)dmp->b_rptr;
+ bdesc += IOCTL_HEADER_LEN;
+ bcopy(bp, bdesc, pheader->payload_length);
+ qlge->ioctl_transferred_bytes +=
+ pheader->payload_length;
+ QL_PRINT(DBG_GLD, ("QLA_CONTINUE_COPY_OUT, %d bytes"
+ " exported \n", payload_len));
+ if (qlge->ioctl_transferred_bytes >=
+ qlge->ioctl_total_length) {
+ QL_PRINT(DBG_GLD, ("all data out,clean up \n"));
+ kmem_free(qlge->ioctl_buf_ptr,
+ qlge->ioctl_buf_lenth);
+ qlge->ioctl_buf_ptr = NULL;
+ qlge->ioctl_buf_lenth = 0;
+ }
+ iocp->ioc_count = len;
+ break;
+
+ case QLA_CONTINUE_COPY_IN:
+ if (qlge->ioctl_buf_ptr == NULL)
+ return (IOC_INVAL);
+ len = (uint32_t)iocp->ioc_count;
+ bdesc = qlge->ioctl_buf_ptr;
+ bdesc += qlge->ioctl_transferred_bytes;
+ remaining_bytes = qlge->ioctl_total_length -
+ qlge->ioctl_transferred_bytes;
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ payload_len = pheader->payload_length;
+ /* create packet header */
+ pheader->version = 0;
+ pheader->total_length = qlge->ioctl_total_length;
+ pheader->expected_trans_times =
+ qlge->expected_trans_times;
+ /* get packet payload */
+ bp = (caddr_t)dmp->b_rptr;
+ bp += IOCTL_HEADER_LEN;
+ bcopy(bp, bdesc, pheader->payload_length);
+ qlge->ioctl_transferred_bytes +=
+ pheader->payload_length;
+ QL_PRINT(DBG_GLD, ("QLA_CONTINUE_COPY_IN, %d bytes "
+ "received \n", payload_len));
+ if (qlge->ioctl_transferred_bytes >=
+ qlge->ioctl_total_length) {
+ region = pheader->option[0];
+ ql_get_flash_table_region_info(qlge, region,
+ &addr, &size);
+ QL_PRINT(DBG_GLD, ("write data to region 0x%x,"
+ " addr 0x%x, max size %d bytes\n",
+ region, addr, size));
+ qlge_load_flash(qlge,
+ (uint8_t *)qlge->ioctl_buf_ptr,
+ qlge->ioctl_transferred_bytes /* size */,
+ addr);
+ QL_PRINT(DBG_GLD, ("all %d data written, do "
+ "clean up \n",
+ qlge->ioctl_transferred_bytes));
+ kmem_free(qlge->ioctl_buf_ptr,
+ qlge->ioctl_buf_lenth);
+ qlge->ioctl_buf_ptr = NULL;
+ qlge->ioctl_buf_lenth = 0;
+ }
+ iocp->ioc_count = len;
+ break;
+
+ case QLA_READ_CONTRL_REGISTERS:
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_lenth =
+ IOCTL_MAX_BUF_SIZE; /* 512k */
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to "
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ }
+ len = read_ctrl_reg_set(qlge, qlge->ioctl_buf_ptr);
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader, len);
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_SUPPORTED_DUMP_TYPES: /* show available regions */
+ len = (uint32_t)iocp->ioc_count;
+ bdesc = (caddr_t)dmp->b_rptr;
+ if (CFG_IST(qlge, CFG_CHIP_8100))
+ (void) sprintf(bdesc, "ISP 8100 supported dump"
+ " types: %s", ISP_8100_AVAILABLE_DUMPS);
+ break;
+
+ case QLA_GET_BINARY_CORE_DUMP:
+ len = (uint32_t)iocp->ioc_count;
+ requested_dump = *((uint16_t *)(void *)dmp->b_rptr);
+ rval = ql_binary_core_dump(qlge, requested_dump, &len);
+ if (rval == DDI_SUCCESS) {
+ pheader =
+ (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader, len);
+ iocp->ioc_count = sizeof (*pheader);
+ } else {
+ cmn_err(CE_WARN, "ql_binary_core_dump error");
+ return (IOC_INVAL);
+ }
+ break;
+
+ case QLA_TRIGGER_SYS_ERROR_EVENT:
+ ql_trigger_system_error_event(qlge);
+ break;
+
+ case QLA_READ_VPD:
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_lenth =
+ IOCTL_MAX_BUF_SIZE; /* 512k */
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to "
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ }
+ len = (uint32_t)iocp->ioc_count;
+ QL_PRINT(DBG_GLD, (" 0x%x user buffer available \n",
+ len));
+ ql_flash_vpd(qlge, (uint8_t *)qlge->ioctl_buf_ptr);
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader,
+ ISP_8100_VPD0_SIZE);
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_MANUAL_READ_FLASH:
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_lenth =
+ IOCTL_MAX_BUF_SIZE; /* 512k */
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to "
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ }
+ len = (uint32_t)iocp->ioc_count;
+ rval = qlge_dump_fcode(qlge,
+ (uint8_t *)qlge->ioctl_buf_ptr,
+ flash_io_info_ptr->size,
+ flash_io_info_ptr->addr);
+ if (rval != DDI_SUCCESS) {
+ return (IOC_INVAL);
+ }
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader,
+ flash_io_info_ptr->size);
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_READ_FLASH:
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_lenth = IOCTL_MAX_BUF_SIZE;
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to"
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ }
+ len = (uint32_t)iocp->ioc_count;
+ region = *pvalue;
+ if (ql_get_flash_table_region_info(qlge, region, &addr,
+ &size) != DDI_SUCCESS)
+ return (IOC_INVAL);
+ rval = qlge_dump_fcode(qlge,
+ (uint8_t *)qlge->ioctl_buf_ptr,
+ size, addr);
+ if (rval != DDI_SUCCESS) {
+ return (IOC_INVAL);
+ }
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader, size);
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_WRITE_FLASH:
+ len = (uint32_t)iocp->ioc_count;
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ region = pheader->option[0];
+ qlge->ioctl_buf_lenth = pheader->total_length;
+ qlge->ioctl_total_length = pheader->total_length;
+ qlge->expected_trans_times =
+ pheader->expected_trans_times;
+ qlge->ioctl_transferred_bytes = 0;
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to "
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ }
+ QL_PRINT(DBG_GLD, ("QLA_WRITE_FLASH write to region "
+ "%x, total buffer size 0x%x bytes\n",
+ region, qlge->ioctl_buf_lenth));
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_READ_FW_IMAGE:
+ if (qlge->ioctl_buf_ptr != NULL) {
+ kmem_free(qlge->ioctl_buf_ptr,
+ qlge->ioctl_buf_lenth);
+ }
+ qlge->ioctl_buf_lenth = IOCTL_MAX_BUF_SIZE * 4;
+ qlge->ioctl_buf_ptr = kmem_zalloc(qlge->ioctl_buf_lenth,
+ KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to "
+ "allocate ioctl buffer",
+ __func__, qlge->instance);
+ return (IOC_INVAL);
+ }
+ len = (uint32_t)iocp->ioc_count;
+ iltds_ptr = (ql_iltds_description_header_t *)
+ (void *)qlge->ioctl_buf_ptr;
+ iltds_ptr->iltds_table_header.signature =
+ FLASH_ILTDS_SIGNATURE;
+ iltds_ptr->iltds_table_header.table_version = 1;
+ iltds_ptr->iltds_table_header.length =
+ ILTDS_DESCRIPTION_HEADERS_LEN;
+ iltds_ptr->iltds_table_header.number_entries =
+ IMAGE_TABLE_IMAGE_DEFAULT_ENTRIES +
+ 1 /* timestamp */;
+ iltds_ptr->iltds_table_header.reserved = 0;
+ iltds_ptr->iltds_table_header.version = 1;
+ /* where is the flash data saved */
+ bdesc = qlge->ioctl_buf_ptr +
+ ILTDS_DESCRIPTION_HEADERS_LEN;
+ offset = iltds_ptr->iltds_table_header.length;
+ for (i = 0; i < IMAGE_TABLE_IMAGE_DEFAULT_ENTRIES;
+ i++) {
+ region = iltds_image_entry_regions[i];
+ if (ql_get_flash_table_region_info(qlge,
+ region, &addr, &size) != DDI_SUCCESS)
+ return (IOC_INVAL);
+ QL_PRINT(DBG_GLD, ("region %x addr 0x%x, 0x%x "
+ "bytes\n", region, addr, size));
+ /* Dump one image entry */
+ rval = qlge_dump_fcode(qlge, (uint8_t *)bdesc,
+ size, addr);
+ if (rval != DDI_SUCCESS) {
+ return (IOC_INVAL);
+ }
+ bdesc += size;
+ iltds_ptr->img_entry[i].region_type =
+ (uint16_t)region;
+ iltds_ptr->img_entry[i].region_version_len = 0;
+ iltds_ptr->img_entry[i].region_version[0] = 0;
+ iltds_ptr->img_entry[i].region_version[1] = 0;
+ iltds_ptr->img_entry[i].region_version[2] = 0;
+ iltds_ptr->img_entry[i].offset_lo = LSW(offset);
+ iltds_ptr->img_entry[i].offset_hi = MSW(offset);
+ iltds_ptr->img_entry[i].size_lo = LSW(size);
+ iltds_ptr->img_entry[i].size_hi = MSW(size);
+ iltds_ptr->img_entry[i].swap_mode = 0;
+ iltds_ptr->img_entry[i].card_type = 0;
+ QL_PRINT(DBG_GLD, ("image offset %x size %x "
+ "bytes\n", offset, size));
+ QL_PRINT(DBG_GLD, ("offset %x lsw %x msw %x"
+ " \n", offset, LSW(offset), MSW(offset)));
+ offset += size;
+ }
+ /* Last entry */
+ iltds_ptr->time_stamp.region_type =
+ FLT_REGION_TIME_STAMP;
+ iltds_ptr->time_stamp.region_version_len = 0;
+ iltds_ptr->time_stamp.region_version[0] = 0;
+ iltds_ptr->time_stamp.region_version[1] = 0;
+ iltds_ptr->time_stamp.region_version[2] = 0;
+ iltds_ptr->time_stamp.year = 0x09;
+ iltds_ptr->time_stamp.month = 0x01;
+ iltds_ptr->time_stamp.day = 0x20;
+ iltds_ptr->time_stamp.hour = 0x14;
+ iltds_ptr->time_stamp.min = 0x20;
+ iltds_ptr->time_stamp.sec = 0x50;
+
+ pheader = (ioctl_header_info_t *)(void *)dmp->b_rptr;
+ /* build initial ioctl packet header */
+ build_init_pkt_header(qlge, pheader, offset);
+ iocp->ioc_count = sizeof (*pheader);
+ break;
+
+ case QLA_WRITE_FW_IMAGE_HEADERS:
+ len = (uint32_t)iocp->ioc_count;
+ if (len == 0)
+ return (IOC_INVAL);
+ ql_iltds_header_ptr =
+ (ql_iltds_header_t *)(void *)dmp->b_rptr;
+ if (len != ql_iltds_header_ptr->length) {
+ cmn_err(CE_WARN, "QLA_WRITE_FW_IMAGE_HEADERS "
+ "data length error!"
+ " %x bytes expected, %x received",
+ ql_iltds_header_ptr->length, len);
+ return (IOC_INVAL);
+ }
+ QL_PRINT(DBG_GLD, ("Fw Image header len 0x%x bytes, "
+ "0x%x entries\n",
+ len, ql_iltds_header_ptr->number_entries));
+ ql_dump_buf("all copy in data:\n",
+ (uint8_t *)dmp->b_rptr, 8, len);
+ mp->b_cont = NULL;
+ break;
+
+ case QLA_SOFT_RESET:
+ iocp->ioc_count = 0;
+ ql_wake_asic_reset_soft_intr(qlge);
+ QL_PRINT(DBG_GLD, ("QLA_SOFT_RESET started \n"));
+ break;
+
+ default:
+ return (IOC_INVAL);
+ }
+
+ return (IOC_REPLY);
+}
+
+/*
+ * Loopback ioctl code
+ */
+static lb_property_t loopmodes[] = {
+ { normal, "normal", QLGE_LOOP_NONE },
+ { internal, "parallel", QLGE_LOOP_INTERNAL_PARALLEL },
+ { internal, "serial", QLGE_LOOP_INTERNAL_SERIAL },
+};
+
+/*
+ * Set Loopback mode
+ */
+static enum ioc_reply
+qlge_set_loop_mode(qlge_t *qlge, uint32_t mode)
+{
+ /*
+ * If the mode is same as current mode ...
+ */
+ if (mode == qlge->loop_back_mode)
+ return (IOC_ACK);
+
+ /*
+ * Validate the requested mode
+ */
+ switch (mode) {
+ default:
+ return (IOC_INVAL);
+
+ case QLGE_LOOP_NONE:
+ case QLGE_LOOP_INTERNAL_PARALLEL:
+ case QLGE_LOOP_INTERNAL_SERIAL:
+ break;
+ }
+
+ /*
+ * All OK; reprogram for the new mode ...
+ */
+ qlge->loop_back_mode = mode;
+ mutex_enter(&qlge->mbx_mutex);
+ ql_set_port_cfg(qlge);
+ mutex_exit(&qlge->mbx_mutex);
+ return (IOC_REPLY);
+}
+/*
+ * Loopback ioctl
+ */
+/* ARGSUSED */
+enum ioc_reply
+ql_loop_ioctl(qlge_t *qlge, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+ lb_info_sz_t *lbsp;
+ lb_property_t *lbpp;
+ uint32_t *lbmp;
+ int cmd;
+
+ _NOTE(ARGUNUSED(wq))
+ /*
+ * Validate format of ioctl
+ */
+ if (mp->b_cont == NULL)
+ return (IOC_INVAL);
+
+ cmd = iocp->ioc_cmd;
+ switch (cmd) {
+ default:
+ /* NOTREACHED */
+ QL_PRINT(DBG_GLD, ("%s(%d) invalid cmd 0x%x\n",
+ __func__, qlge->instance, cmd));
+ return (IOC_INVAL);
+
+ case LB_GET_INFO_SIZE:
+ if (iocp->ioc_count != sizeof (lb_info_sz_t))
+ return (IOC_INVAL);
+ lbsp = (void *)mp->b_cont->b_rptr;
+ *lbsp = sizeof (loopmodes);
+ return (IOC_REPLY);
+
+ case LB_GET_INFO:
+ if (iocp->ioc_count != sizeof (loopmodes))
+ return (IOC_INVAL);
+ lbpp = (void *)mp->b_cont->b_rptr;
+ bcopy(loopmodes, lbpp, sizeof (loopmodes));
+ return (IOC_REPLY);
+
+ case LB_GET_MODE:
+ if (iocp->ioc_count != sizeof (uint32_t))
+ return (IOC_INVAL);
+ lbmp = (void *)mp->b_cont->b_rptr;
+ *lbmp = qlge->loop_back_mode;
+ return (IOC_REPLY);
+
+ case LB_SET_MODE:
+ if (iocp->ioc_count != sizeof (uint32_t))
+ return (IOC_INVAL);
+ lbmp = (void *)mp->b_cont->b_rptr;
+ return (qlge_set_loop_mode(qlge, *lbmp));
+ }
+}
+
+/*
+ * Dumps binary data from firmware.
+ */
+static int
+ql_8xxx_binary_core_dump_with_header(qlge_t *qlge, caddr_t buf,
+ uint32_t *len_ptr)
+{
+ caddr_t bp = buf;
+ int rval = DDI_SUCCESS;
+ ql_dump_image_header_t *ql_dump_image_header_ptr =
+ (ql_dump_image_header_t *)(void *)bp;
+
+ ql_dump_image_header_ptr->signature = DUMP_IMAGE_HEADER_SIGNATURE;
+ ql_dump_image_header_ptr->version = 1;
+ ql_dump_image_header_ptr->header_length = 16;
+ ql_dump_image_header_ptr->data_type = DUMP_TYPE_CORE_DUMP;
+ /* point to real dump data area */
+ bp += sizeof (ql_dump_image_header_t);
+ bcopy(&qlge->ql_mpi_coredump, bp, sizeof (ql_mpi_coredump_t));
+ ql_dump_image_header_ptr->data_length = sizeof (ql_mpi_coredump_t);
+ /* total length: header + data image */
+ ql_dump_image_header_ptr->checksum = (uint16_t)
+ (ql_dump_image_header_ptr->signature
+ +ql_dump_image_header_ptr->version
+ +ql_dump_image_header_ptr->header_length
+ +ql_dump_image_header_ptr->data_type
+ +ql_dump_image_header_ptr->data_length);
+
+ *len_ptr = ql_dump_image_header_ptr->header_length +
+ ql_dump_image_header_ptr->data_length;
+ QL_PRINT(DBG_GLD, ("%s done,core dump lenth %d bytes\n",
+ __func__, *len_ptr));
+ return (rval);
+}
+
+/*
+ * Dump registers value in binary format
+ */
+static int
+ql_8xxx_binary_register_dump_with_header(qlge_t *qlge, caddr_t buf,
+ uint32_t *len_ptr)
+{
+ caddr_t bp = buf;
+ int i;
+ uint32_t *data_ptr;
+ int rval = DDI_SUCCESS;
+
+ ql_dump_image_header_t *ql_dump_image_header_ptr =
+ (ql_dump_image_header_t *)(void *)bp;
+ ql_dump_image_header_ptr->signature =
+ DUMP_IMAGE_HEADER_SIGNATURE;
+ ql_dump_image_header_ptr->version = 1;
+ ql_dump_image_header_ptr->header_length = 16;
+ ql_dump_image_header_ptr->data_type = DUMP_TYPE_REGISTER_DUMP;
+ /* point to real dump data area */
+ bp += sizeof (ql_dump_image_header_t);
+ data_ptr = (uint32_t *)(void *)bp;
+
+ for (i = 0; i <= 0xfc; i += 4) {
+ *data_ptr = ql_read_reg(qlge, i);
+ data_ptr++;
+ }
+ ql_dump_image_header_ptr->data_length = 0x100; /* 0 ~ 0xFF */
+ /* total length: header + data image */
+ ql_dump_image_header_ptr->checksum = (uint16_t)
+ (ql_dump_image_header_ptr->signature
+ +ql_dump_image_header_ptr->version
+ +ql_dump_image_header_ptr->header_length
+ +ql_dump_image_header_ptr->data_type
+ +ql_dump_image_header_ptr->data_length);
+
+ *len_ptr = ql_dump_image_header_ptr->header_length +
+ ql_dump_image_header_ptr->data_length;
+
+ QL_PRINT(DBG_GLD, ("%s done, dump lenth %x bytes\n", __func__,
+ *len_ptr));
+
+ return (rval);
+}
+
+/*
+ * Core dump in binary format
+ */
+static int
+ql_binary_core_dump(qlge_t *qlge, uint32_t requested_dumps, uint32_t *len_ptr)
+{
+ int rval = DDI_FAILURE;
+ uint32_t length, size = 0;
+ uint64_t timestamp;
+ caddr_t bp;
+ ql_dump_header_t *ql_dump_header_ptr;
+ ql_dump_footer_t *ql_dump_footer_ptr;
+
+ if (qlge->ioctl_buf_ptr == NULL) {
+ qlge->ioctl_buf_lenth = IOCTL_MAX_BUF_SIZE; /* 512k */
+ qlge->ioctl_buf_ptr =
+ kmem_zalloc(qlge->ioctl_buf_lenth, KM_SLEEP);
+ if (qlge->ioctl_buf_ptr == NULL) {
+ cmn_err(CE_WARN,
+ "%s(%d): Unable to allocate ioctl buffer",
+ __func__, qlge->instance);
+ goto out;
+ }
+ }
+
+ /* description info header */
+ ql_dump_header_ptr = (ql_dump_header_t *)(void *)qlge->ioctl_buf_ptr;
+ /* add QTSB signature */
+ ql_dump_header_ptr->signature = DUMP_DESCRIPTION_HEADER_SIGNATURE;
+ ql_dump_header_ptr->version = 1;
+ ql_dump_header_ptr->length = 16;
+ ql_dump_header_ptr->reserved = 0;
+ /* get dump creation timestamp */
+ timestamp = ddi_get_time();
+ timestamp *= 1000000;
+ ql_dump_header_ptr->time_stamp_lo = LSW(timestamp);
+ ql_dump_header_ptr->time_stamp_hi = MSW(timestamp);
+ /* point to first image header area */
+ length = sizeof (ql_dump_header_t);
+ bp = (caddr_t)qlge->ioctl_buf_ptr + length;
+
+ if (CFG_IST(qlge, CFG_CHIP_8100)) {
+ /* if dumping all */
+ if ((requested_dumps & DUMP_REQUEST_ALL) != 0) {
+ ql_dump_header_ptr->num_dumps = 2;
+ ql_8xxx_binary_core_dump_with_header(qlge, bp, &size);
+ length += size;
+ bp = (caddr_t)qlge->ioctl_buf_ptr + length;
+ ql_8xxx_binary_register_dump_with_header(qlge,
+ bp, &size);
+ length += size;
+ bp = (caddr_t)qlge->ioctl_buf_ptr + length;
+ } else if ((requested_dumps & DUMP_REQUEST_CORE) != 0) {
+ ql_dump_header_ptr->num_dumps = 1;
+ ql_8xxx_binary_core_dump_with_header(qlge, bp, &size);
+ length += size;
+ bp = (caddr_t)qlge->ioctl_buf_ptr + length;
+ } else if ((requested_dumps & DUMP_REQUEST_REGISTER) != 0) {
+ ql_dump_header_ptr->num_dumps = 1;
+ ql_8xxx_binary_register_dump_with_header(qlge,
+ bp, &size);
+ length += size;
+ bp = (caddr_t)qlge->ioctl_buf_ptr + length;
+ } else {
+ cmn_err(CE_WARN, "%s(%d): not supported dump type %d",
+ __func__, qlge->instance, requested_dumps);
+ goto out;
+ }
+ }
+
+ ql_dump_footer_ptr = (ql_dump_footer_t *)(void *)bp;
+ ql_dump_footer_ptr->signature = DUMP_DESCRIPTION_FOOTER_SIGNATURE;
+ ql_dump_footer_ptr->version = 1;
+ ql_dump_footer_ptr->length = 16;
+ ql_dump_footer_ptr->reserved = 0;
+ timestamp = ddi_get_time();
+ timestamp *= 1000000;
+ ql_dump_footer_ptr->time_stamp_lo = LSW(timestamp);
+ ql_dump_footer_ptr->time_stamp_hi = MSW(timestamp);
+ length += ql_dump_footer_ptr->length;
+ rval = DDI_SUCCESS;
+ *len_ptr = length;
+ QL_PRINT(DBG_MBX, ("%s(%d): exiting,total %x bytes\n",
+ __func__, qlge->instance, length));
+out:
+ return (rval);
+}
+
+/*
+ * build core dump segment header
+ */
+static void
+ql_build_coredump_seg_header(mpi_coredump_segment_header_t *seg_hdr,
+ uint32_t seg_number, uint32_t seg_size, uint8_t *desc)
+{
+ (void) memset(seg_hdr, 0, sizeof (mpi_coredump_segment_header_t));
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->seg_number = seg_number;
+ seg_hdr->seg_size = seg_size;
+ (void) memcpy(seg_hdr->description, desc,
+ (sizeof (seg_hdr->description))-1);
+}
+
+/*
+ * Unpause MPI risc
+ */
+static int
+ql_unpause_mpi_risc(qlge_t *qlge)
+{
+ uint32_t tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read_reg(qlge, REG_HOST_CMD_STATUS);
+ if ((tmp & CSR_RP) == 0)
+ return (DDI_FAILURE);
+
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS, CSR_CMD_CLR_PAUSE);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Pause MPI risc
+ */
+static int
+ql_pause_mpi_risc(qlge_t *qlge)
+{
+ uint32_t tmp;
+ int count = 10;
+
+ /* Pause the RISC */
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read_reg(qlge, REG_HOST_CMD_STATUS);
+ if ((tmp & CSR_RP) != 0)
+ break;
+ qlge_delay(10);
+ count--;
+ } while (count);
+ return ((count == 0) ? DDI_FAILURE : DDI_SUCCESS);
+}
+
+/*
+ * Get Interrupt Status registers value
+ */
+static void
+ql_get_intr_states(qlge_t *qlge, uint32_t *buf)
+{
+ int i;
+
+ for (i = 0; i < MAX_RX_RINGS; i++, buf++) {
+ /* read the interrupt enable register for each rx ring */
+ ql_write_reg(qlge, REG_INTERRUPT_ENABLE, 0x037f0300 + i);
+ *buf = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
+ }
+}
+
+/*
+ * Read serdes register
+ */
+static int
+ql_read_serdes_reg(qlge_t *qlge, uint32_t reg, uint32_t *data)
+{
+ int rtn_val = DDI_FAILURE;
+
+ /* wait for reg to come ready */
+ if (ql_wait_reg_bit(qlge, REG_XG_SERDES_ADDR,
+ XG_SERDES_ADDR_RDY, BIT_SET, 0) != DDI_SUCCESS)
+ goto exit;
+ /* set up for reg read */
+ ql_write_reg(qlge, REG_XG_SERDES_ADDR, reg | PROC_ADDR_R);
+ /* wait for reg to come ready */
+ if (ql_wait_reg_bit(qlge, REG_XG_SERDES_ADDR,
+ XG_SERDES_ADDR_RDY, BIT_SET, 0) != DDI_SUCCESS)
+ goto exit;
+ /* get the data */
+ *data = ql_read_reg(qlge, REG_XG_SERDES_DATA);
+ rtn_val = DDI_SUCCESS;
+exit:
+ return (rtn_val);
+}
+
+/*
+ * Read XGMAC register
+ */
+static int
+ql_get_xgmac_regs(qlge_t *qlge, uint32_t *buf)
+{
+ int status;
+ int i;
+
+ for (i = 0; i < XGMAC_REGISTER_END; i += 4, buf ++) {
+ switch (i) {
+ case PAUSE_SRC_LO :
+ case PAUSE_SRC_HI :
+ case GLOBAL_CFG :
+ case TX_CFG :
+ case RX_CFG :
+ case FLOW_CTL :
+ case PAUSE_OPCODE :
+ case PAUSE_TIMER :
+ case PAUSE_FRM_DEST_LO :
+ case PAUSE_FRM_DEST_HI :
+ case MAC_TX_PARAMS :
+ case MAC_RX_PARAMS :
+ case MAC_SYS_INT :
+ case MAC_SYS_INT_MASK :
+ case MAC_MGMT_INT :
+ case MAC_MGMT_IN_MASK :
+ case EXT_ARB_MODE :
+ case TX_PKTS :
+ case TX_PKTS_LO :
+ case TX_BYTES :
+ case TX_BYTES_LO :
+ case TX_MCAST_PKTS :
+ case TX_MCAST_PKTS_LO :
+ case TX_BCAST_PKTS :
+ case TX_BCAST_PKTS_LO :
+ case TX_UCAST_PKTS :
+ case TX_UCAST_PKTS_LO :
+ case TX_CTL_PKTS :
+ case TX_CTL_PKTS_LO :
+ case TX_PAUSE_PKTS :
+ case TX_PAUSE_PKTS_LO :
+ case TX_64_PKT :
+ case TX_64_PKT_LO :
+ case TX_65_TO_127_PKT :
+ case TX_65_TO_127_PKT_LO :
+ case TX_128_TO_255_PKT :
+ case TX_128_TO_255_PKT_LO :
+ case TX_256_511_PKT :
+ case TX_256_511_PKT_LO :
+ case TX_512_TO_1023_PKT :
+ case TX_512_TO_1023_PKT_LO :
+ case TX_1024_TO_1518_PKT :
+ case TX_1024_TO_1518_PKT_LO :
+ case TX_1519_TO_MAX_PKT :
+ case TX_1519_TO_MAX_PKT_LO :
+ case TX_UNDERSIZE_PKT :
+ case TX_UNDERSIZE_PKT_LO :
+ case TX_OVERSIZE_PKT :
+ case TX_OVERSIZE_PKT_LO :
+ case RX_HALF_FULL_DET :
+ case TX_HALF_FULL_DET_LO :
+ case RX_OVERFLOW_DET :
+ case TX_OVERFLOW_DET_LO :
+ case RX_HALF_FULL_MASK :
+ case TX_HALF_FULL_MASK_LO :
+ case RX_OVERFLOW_MASK :
+ case TX_OVERFLOW_MASK_LO :
+ case STAT_CNT_CTL :
+ case AUX_RX_HALF_FULL_DET :
+ case AUX_TX_HALF_FULL_DET :
+ case AUX_RX_OVERFLOW_DET :
+ case AUX_TX_OVERFLOW_DET :
+ case AUX_RX_HALF_FULL_MASK :
+ case AUX_TX_HALF_FULL_MASK :
+ case AUX_RX_OVERFLOW_MASK :
+ case AUX_TX_OVERFLOW_MASK :
+ case RX_BYTES :
+ case RX_BYTES_LO :
+ case RX_BYTES_OK :
+ case RX_BYTES_OK_LO :
+ case RX_PKTS :
+ case RX_PKTS_LO :
+ case RX_PKTS_OK :
+ case RX_PKTS_OK_LO :
+ case RX_BCAST_PKTS :
+ case RX_BCAST_PKTS_LO :
+ case RX_MCAST_PKTS :
+ case RX_MCAST_PKTS_LO :
+ case RX_UCAST_PKTS :
+ case RX_UCAST_PKTS_LO :
+ case RX_UNDERSIZE_PKTS :
+ case RX_UNDERSIZE_PKTS_LO :
+ case RX_OVERSIZE_PKTS :
+ case RX_OVERSIZE_PKTS_LO :
+ case RX_JABBER_PKTS :
+ case RX_JABBER_PKTS_LO :
+ case RX_UNDERSIZE_FCERR_PKTS :
+ case RX_UNDERSIZE_FCERR_PKTS_LO :
+ case RX_DROP_EVENTS :
+ case RX_DROP_EVENTS_LO :
+ case RX_FCERR_PKTS :
+ case RX_FCERR_PKTS_LO :
+ case RX_ALIGN_ERR :
+ case RX_ALIGN_ERR_LO :
+ case RX_SYMBOL_ERR :
+ case RX_SYMBOL_ERR_LO :
+ case RX_MAC_ERR :
+ case RX_MAC_ERR_LO :
+ case RX_CTL_PKTS :
+ case RX_CTL_PKTS_LO :
+ case RX_PAUSE_PKTS :
+ case RX_PAUSE_PKTS_LO :
+ case RX_64_PKTS :
+ case RX_64_PKTS_LO :
+ case RX_65_TO_127_PKTS :
+ case RX_65_TO_127_PKTS_LO :
+ case RX_128_255_PKTS :
+ case RX_128_255_PKTS_LO :
+ case RX_256_511_PKTS :
+ case RX_256_511_PKTS_LO :
+ case RX_512_TO_1023_PKTS :
+ case RX_512_TO_1023_PKTS_LO :
+ case RX_1024_TO_1518_PKTS :
+ case RX_1024_TO_1518_PKTS_LO :
+ case RX_1519_TO_MAX_PKTS :
+ case RX_1519_TO_MAX_PKTS_LO :
+ case RX_LEN_ERR_PKTS :
+ case RX_LEN_ERR_PKTS_LO :
+ case MDIO_TX_DATA :
+ case MDIO_RX_DATA :
+ case MDIO_CMD :
+ case MDIO_PHY_ADDR :
+ case MDIO_PORT :
+ case MDIO_STATUS :
+ case TX_CBFC_PAUSE_FRAMES0 :
+ case TX_CBFC_PAUSE_FRAMES0_LO :
+ case TX_CBFC_PAUSE_FRAMES1 :
+ case TX_CBFC_PAUSE_FRAMES1_LO :
+ case TX_CBFC_PAUSE_FRAMES2 :
+ case TX_CBFC_PAUSE_FRAMES2_LO :
+ case TX_CBFC_PAUSE_FRAMES3 :
+ case TX_CBFC_PAUSE_FRAMES3_LO :
+ case TX_CBFC_PAUSE_FRAMES4 :
+ case TX_CBFC_PAUSE_FRAMES4_LO :
+ case TX_CBFC_PAUSE_FRAMES5 :
+ case TX_CBFC_PAUSE_FRAMES5_LO :
+ case TX_CBFC_PAUSE_FRAMES6 :
+ case TX_CBFC_PAUSE_FRAMES6_LO :
+ case TX_CBFC_PAUSE_FRAMES7 :
+ case TX_CBFC_PAUSE_FRAMES7_LO :
+ case TX_FCOE_PKTS :
+ case TX_FCOE_PKTS_LO :
+ case TX_MGMT_PKTS :
+ case TX_MGMT_PKTS_LO :
+ case RX_CBFC_PAUSE_FRAMES0 :
+ case RX_CBFC_PAUSE_FRAMES0_LO :
+ case RX_CBFC_PAUSE_FRAMES1 :
+ case RX_CBFC_PAUSE_FRAMES1_LO :
+ case RX_CBFC_PAUSE_FRAMES2 :
+ case RX_CBFC_PAUSE_FRAMES2_LO :
+ case RX_CBFC_PAUSE_FRAMES3 :
+ case RX_CBFC_PAUSE_FRAMES3_LO :
+ case RX_CBFC_PAUSE_FRAMES4 :
+ case RX_CBFC_PAUSE_FRAMES4_LO :
+ case RX_CBFC_PAUSE_FRAMES5 :
+ case RX_CBFC_PAUSE_FRAMES5_LO :
+ case RX_CBFC_PAUSE_FRAMES6 :
+ case RX_CBFC_PAUSE_FRAMES6_LO :
+ case RX_CBFC_PAUSE_FRAMES7 :
+ case RX_CBFC_PAUSE_FRAMES7_LO :
+ case RX_FCOE_PKTS :
+ case RX_FCOE_PKTS_LO :
+ case RX_MGMT_PKTS :
+ case RX_MGMT_PKTS_LO :
+ case RX_NIC_FIFO_DROP :
+ case RX_NIC_FIFO_DROP_LO :
+ case RX_FCOE_FIFO_DROP :
+ case RX_FCOE_FIFO_DROP_LO :
+ case RX_MGMT_FIFO_DROP :
+ case RX_MGMT_FIFO_DROP_LO :
+ case RX_PKTS_PRIORITY0 :
+ case RX_PKTS_PRIORITY0_LO :
+ case RX_PKTS_PRIORITY1 :
+ case RX_PKTS_PRIORITY1_LO :
+ case RX_PKTS_PRIORITY2 :
+ case RX_PKTS_PRIORITY2_LO :
+ case RX_PKTS_PRIORITY3 :
+ case RX_PKTS_PRIORITY3_LO :
+ case RX_PKTS_PRIORITY4 :
+ case RX_PKTS_PRIORITY4_LO :
+ case RX_PKTS_PRIORITY5 :
+ case RX_PKTS_PRIORITY5_LO :
+ case RX_PKTS_PRIORITY6 :
+ case RX_PKTS_PRIORITY6_LO :
+ case RX_PKTS_PRIORITY7 :
+ case RX_PKTS_PRIORITY7_LO :
+ case RX_OCTETS_PRIORITY0 :
+ case RX_OCTETS_PRIORITY0_LO :
+ case RX_OCTETS_PRIORITY1 :
+ case RX_OCTETS_PRIORITY1_LO :
+ case RX_OCTETS_PRIORITY2 :
+ case RX_OCTETS_PRIORITY2_LO :
+ case RX_OCTETS_PRIORITY3 :
+ case RX_OCTETS_PRIORITY3_LO :
+ case RX_OCTETS_PRIORITY4 :
+ case RX_OCTETS_PRIORITY4_LO :
+ case RX_OCTETS_PRIORITY5 :
+ case RX_OCTETS_PRIORITY5_LO :
+ case RX_OCTETS_PRIORITY6 :
+ case RX_OCTETS_PRIORITY6_LO :
+ case RX_OCTETS_PRIORITY7 :
+ case RX_OCTETS_PRIORITY7_LO :
+ case TX_PKTS_PRIORITY0 :
+ case TX_PKTS_PRIORITY0_LO :
+ case TX_PKTS_PRIORITY1 :
+ case TX_PKTS_PRIORITY1_LO :
+ case TX_PKTS_PRIORITY2 :
+ case TX_PKTS_PRIORITY2_LO :
+ case TX_PKTS_PRIORITY3 :
+ case TX_PKTS_PRIORITY3_LO :
+ case TX_PKTS_PRIORITY4 :
+ case TX_PKTS_PRIORITY4_LO :
+ case TX_PKTS_PRIORITY5 :
+ case TX_PKTS_PRIORITY5_LO :
+ case TX_PKTS_PRIORITY6 :
+ case TX_PKTS_PRIORITY6_LO :
+ case TX_PKTS_PRIORITY7 :
+ case TX_PKTS_PRIORITY7_LO :
+ case TX_OCTETS_PRIORITY0 :
+ case TX_OCTETS_PRIORITY0_LO :
+ case TX_OCTETS_PRIORITY1 :
+ case TX_OCTETS_PRIORITY1_LO :
+ case TX_OCTETS_PRIORITY2 :
+ case TX_OCTETS_PRIORITY2_LO :
+ case TX_OCTETS_PRIORITY3 :
+ case TX_OCTETS_PRIORITY3_LO :
+ case TX_OCTETS_PRIORITY4 :
+ case TX_OCTETS_PRIORITY4_LO :
+ case TX_OCTETS_PRIORITY5 :
+ case TX_OCTETS_PRIORITY5_LO :
+ case TX_OCTETS_PRIORITY6 :
+ case TX_OCTETS_PRIORITY6_LO :
+ case TX_OCTETS_PRIORITY7 :
+ case TX_OCTETS_PRIORITY7_LO :
+ case RX_DISCARD_PRIORITY0 :
+ case RX_DISCARD_PRIORITY0_LO :
+ case RX_DISCARD_PRIORITY1 :
+ case RX_DISCARD_PRIORITY1_LO :
+ case RX_DISCARD_PRIORITY2 :
+ case RX_DISCARD_PRIORITY2_LO :
+ case RX_DISCARD_PRIORITY3 :
+ case RX_DISCARD_PRIORITY3_LO :
+ case RX_DISCARD_PRIORITY4 :
+ case RX_DISCARD_PRIORITY4_LO :
+ case RX_DISCARD_PRIORITY5 :
+ case RX_DISCARD_PRIORITY5_LO :
+ case RX_DISCARD_PRIORITY6 :
+ case RX_DISCARD_PRIORITY6_LO :
+ case RX_DISCARD_PRIORITY7 :
+ case RX_DISCARD_PRIORITY7_LO :
+ status = ql_read_xgmac_reg(qlge, i, buf);
+ if (status != DDI_SUCCESS)
+ goto err;
+ break;
+
+ default:
+ break;
+ }
+ }
+err:
+ return (status);
+}
+
+/*
+ * Read MPI related registers
+ */
+static int
+ql_get_mpi_regs(qlge_t *qlge, uint32_t *buf, uint32_t offset, uint32_t count)
+{
+ int i, rtn_val = DDI_FAILURE;
+
+ for (i = 0; i < count; i++, buf++) {
+ if (ql_read_processor_data(qlge, offset + i, buf)
+ != DDI_SUCCESS) {
+ goto out;
+ }
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * Read processor "shadow" register "addr" value and save
+ * in "data".Assume all the locks&semaphore have been acquired
+ */
+static int
+ql_get_mpi_shadow_regs(qlge_t *qlge, uint32_t *buf)
+{
+ uint32_t i;
+ int rtn_val = DDI_FAILURE;
+
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ if (ql_write_processor_data(qlge, RISC_124,
+ (SHADOW_OFFSET | i << 20)) != DDI_SUCCESS)
+ goto end;
+ if (ql_read_processor_data(qlge, RISC_127, buf) != DDI_SUCCESS)
+ goto end;
+ }
+ rtn_val = DDI_SUCCESS;
+
+end:
+ return (rtn_val);
+}
+
+#define SYS_CLOCK 0x00
+#define PCI_CLOCK 0x80
+#define FC_CLOCK 0x140
+#define XGM_CLOCK 0x180
+#define ADDRESS_REGISTER_ENABLE 0x00010000
+#define UP 0x00008000
+#define MAX_MUX 0x40
+#define MAX_MODULES 0x1F
+
+static uint32_t *
+ql_get_probe(qlge_t *qlge, uint32_t clock, uint8_t *valid, uint32_t *buf)
+{
+ uint32_t module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < MAX_MODULES; module ++) {
+ if (valid[module]) {
+ for (mux_sel = 0; mux_sel < MAX_MUX; mux_sel++) {
+ probe = clock | ADDRESS_REGISTER_ENABLE |
+ mux_sel |(module << 9);
+
+ ql_write_reg(qlge, REG_PRB_MX_ADDR, probe);
+ lo_val = ql_read_reg(qlge, REG_PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf ++;
+ }
+ probe |= UP;
+ ql_write_reg(qlge, REG_PRB_MX_ADDR, probe);
+ hi_val = ql_read_reg(qlge, REG_PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ }
+ return (buf);
+}
+
+static int
+ql_get_probe_dump(qlge_t *qlge, uint32_t *buf)
+{
+ uint8_t sys_clock_valid_modules[0x20] = {
+ 1, /* 0x00 */
+ 1, /* 0x01 */
+ 1, /* 0x02 */
+ 0, /* 0x03 */
+ 1, /* 0x04 */
+ 1, /* 0x05 */
+ 1, /* 0x06 */
+ 1, /* 0x07 */
+ 1, /* 0x08 */
+ 1, /* 0x09 */
+ 1, /* 0x0A */
+ 1, /* 0x0B */
+ 1, /* 0x0C */
+ 1, /* 0x0D */
+ 1, /* 0x0E */
+ 0, /* 0x0F */
+ 1, /* 0x10 */
+ 1, /* 0x11 */
+ 1, /* 0x12 */
+ 1, /* 0x13 */
+ 0, /* 0x14 */
+ 0, /* 0x15 */
+ 0, /* 0x16 */
+ 0, /* 0x17 */
+ 0, /* 0x18 */
+ 0, /* 0x19 */
+ 0, /* 0x1A */
+ 0, /* 0x1B */
+ 0, /* 0x1C */
+ 0, /* 0x1D */
+ 0, /* 0x1E */
+ 0 /* 0x1F */
+ };
+
+ unsigned char pci_clock_valid_modules[0x20] = {
+ 1, /* 0x00 */
+ 0, /* 0x01 */
+ 0, /* 0x02 */
+ 0, /* 0x03 */
+ 0, /* 0x04 */
+ 0, /* 0x05 */
+ 1, /* 0x06 */
+ 1, /* 0x07 */
+ 0, /* 0x08 */
+ 0, /* 0x09 */
+ 0, /* 0x0A */
+ 0, /* 0x0B */
+ 0, /* 0x0C */
+ 0, /* 0x0D */
+ 1, /* 0x0E */
+ 0, /* 0x0F */
+ 0, /* 0x10 */
+ 0, /* 0x11 */
+ 0, /* 0x12 */
+ 0, /* 0x13 */
+ 0, /* 0x14 */
+ 0, /* 0x15 */
+ 0, /* 0x16 */
+ 0, /* 0x17 */
+ 0, /* 0x18 */
+ 0, /* 0x19 */
+ 0, /* 0x1A */
+ 0, /* 0x1B */
+ 0, /* 0x1C */
+ 0, /* 0x1D */
+ 0, /* 0x1E */
+ 0 /* 0x1F */
+ };
+
+ unsigned char xgm_clock_valid_modules[0x20] = {
+ 1, /* 0x00 */
+ 0, /* 0x01 */
+ 0, /* 0x02 */
+ 1, /* 0x03 */
+ 0, /* 0x04 */
+ 0, /* 0x05 */
+ 0, /* 0x06 */
+ 0, /* 0x07 */
+ 1, /* 0x08 */
+ 1, /* 0x09 */
+ 0, /* 0x0A */
+ 0, /* 0x0B */
+ 1, /* 0x0C */
+ 1, /* 0x0D */
+ 1, /* 0x0E */
+ 0, /* 0x0F */
+ 1, /* 0x10 */
+ 1, /* 0x11 */
+ 0, /* 0x12 */
+ 0, /* 0x13 */
+ 0, /* 0x14 */
+ 0, /* 0x15 */
+ 0, /* 0x16 */
+ 0, /* 0x17 */
+ 0, /* 0x18 */
+ 0, /* 0x19 */
+ 0, /* 0x1A */
+ 0, /* 0x1B */
+ 0, /* 0x1C */
+ 0, /* 0x1D */
+ 0, /* 0x1E */
+ 0 /* 0x1F */
+ };
+
+ unsigned char fc_clock_valid_modules[0x20] = {
+ 1, /* 0x00 */
+ 0, /* 0x01 */
+ 0, /* 0x02 */
+ 0, /* 0x03 */
+ 0, /* 0x04 */
+ 0, /* 0x05 */
+ 0, /* 0x06 */
+ 0, /* 0x07 */
+ 0, /* 0x08 */
+ 0, /* 0x09 */
+ 0, /* 0x0A */
+ 0, /* 0x0B */
+ 1, /* 0x0C */
+ 1, /* 0x0D */
+ 0, /* 0x0E */
+ 0, /* 0x0F */
+ 0, /* 0x10 */
+ 0, /* 0x11 */
+ 0, /* 0x12 */
+ 0, /* 0x13 */
+ 0, /* 0x14 */
+ 0, /* 0x15 */
+ 0, /* 0x16 */
+ 0, /* 0x17 */
+ 0, /* 0x18 */
+ 0, /* 0x19 */
+ 0, /* 0x1A */
+ 0, /* 0x1B */
+ 0, /* 0x1C */
+ 0, /* 0x1D */
+ 0, /* 0x1E */
+ 0 /* 0x1F */
+ };
+
+ /*
+ * First we have to enable the probe mux
+ */
+ ql_write_processor_data(qlge, 0x100e, 0x18a20000);
+
+ buf = ql_get_probe(qlge, SYS_CLOCK, sys_clock_valid_modules, buf);
+
+ buf = ql_get_probe(qlge, PCI_CLOCK, pci_clock_valid_modules, buf);
+
+ buf = ql_get_probe(qlge, XGM_CLOCK, xgm_clock_valid_modules, buf);
+
+ buf = ql_get_probe(qlge, FC_CLOCK, fc_clock_valid_modules, buf);
+
+ return (0);
+
+}
+
+/*
+ * Dump rounting index registers
+ */
+void
+ql_get_routing_index_registers(qlge_t *qlge, uint32_t *buf)
+{
+ uint32_t type, index, index_max;
+ uint32_t result_index;
+ uint32_t result_data;
+ uint32_t val;
+
+ for (type = 0; type < 4; type ++) {
+ if (type < 2) {
+ index_max = 8;
+ } else {
+ index_max = 16;
+ }
+ for (index = 0; index < index_max; index ++) {
+ val = 0x04000000 | (type << 16) | (index << 8);
+ ql_write_reg(qlge, REG_ROUTING_INDEX, val);
+ result_index = 0;
+ while ((result_index & 0x40000000) == 0) {
+ result_index =
+ ql_read_reg(qlge, REG_ROUTING_INDEX);
+ }
+ result_data = ql_read_reg(qlge, REG_ROUTING_DATA);
+ *buf = type;
+ buf ++;
+ *buf = index;
+ buf ++;
+ *buf = result_index;
+ buf ++;
+ *buf = result_data;
+ buf ++;
+ }
+ }
+}
+
+/*
+ * Dump mac protocol registers
+ */
+void
+ql_get_mac_protocol_registers(qlge_t *qlge, uint32_t *buf)
+{
+#define RS_AND_ADR 0x06000000
+#define RS_ONLY 0x04000000
+#define NUM_TYPES 10
+ uint32_t result_index, result_data;
+ uint32_t type;
+ uint32_t index;
+ uint32_t offset;
+ uint32_t val;
+ uint32_t initial_val;
+ uint32_t max_index;
+ uint32_t max_offset;
+
+ for (type = 0; type < NUM_TYPES; type ++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val = RS_AND_ADR;
+ max_index = 512;
+ max_offset = 3;
+ break;
+
+ case 1: /* Multicast MAC Address */
+ initial_val = RS_ONLY;
+ max_index = 32;
+ max_offset = 2;
+ break;
+
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ initial_val = RS_ONLY;
+ max_index = 4096;
+ max_offset = 1;
+ break;
+
+ case 4: /* FC MAC addresses */
+ initial_val = RS_ONLY;
+ max_index = 4;
+ max_offset = 2;
+ break;
+
+ case 5: /* Mgmt MAC addresses */
+ initial_val = RS_ONLY;
+ max_index = 8;
+ max_offset = 2;
+ break;
+
+ case 6: /* Mgmt VLAN addresses */
+ initial_val = RS_ONLY;
+ max_index = 16;
+ max_offset = 1;
+ break;
+
+ case 7: /* Mgmt IPv4 address */
+ initial_val = RS_ONLY;
+ max_index = 4;
+ max_offset = 1;
+ break;
+
+ case 8: /* Mgmt IPv6 address */
+ initial_val = RS_ONLY;
+ max_index = 4;
+ max_offset = 4;
+ break;
+
+ case 9: /* Mgmt TCP/UDP Dest port */
+ initial_val = RS_ONLY;
+ max_index = 4;
+ max_offset = 1;
+ break;
+
+ default:
+ cmn_err(CE_WARN, "Bad type!!! 0x%08x", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index ++) {
+ for (offset = 0; offset < max_offset; offset ++) {
+ val = initial_val | (type << 16) | (index << 4)
+ | (offset);
+ ql_write_reg(qlge,
+ REG_MAC_PROTOCOL_ADDRESS_INDEX, val);
+ result_index = 0;
+ while ((result_index & 0x40000000) == 0) {
+ result_index = ql_read_reg(qlge,
+ REG_MAC_PROTOCOL_ADDRESS_INDEX);
+ }
+ result_data =
+ ql_read_reg(qlge, REG_MAC_PROTOCOL_DATA);
+ *buf = result_index;
+ buf ++;
+ *buf = result_data;
+ buf ++;
+ }
+ }
+ }
+}
+
+/*
+ * Dump serdes registers
+ */
+static int
+ql_get_serdes_regs(qlge_t *qlge, struct ql_mpi_coredump *mpi_coredump)
+{
+ uint32_t i, j;
+ int status;
+
+ for (i = 0, j = 0; i <= 0x000000034; i += 4) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xaui_an[j++]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x800, j = 0; i <= 0x880; i += 4) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xaui_hss_pcs[j++]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1000, j = 0; i <= 0x1034; i += 4) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_an[j++]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1050, j = 0; i <= 0x107c; i += 4) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_train[j++]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1800, j = 0; i <= 0x1838; i += 4) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_hss_pcs[j++]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1c00; i <= 0x1c1f; i++) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_hss_tx[i]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_hss_rx[i]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+ for (i = 0x1e00; i <= 0x1e1f; i++) {
+ status = ql_read_serdes_reg(qlge, i,
+ &mpi_coredump->serdes_xfi_hss_pll[i]);
+ if (status != DDI_SUCCESS) {
+ goto err;
+ }
+ }
+
+err:
+ if (status != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Serdes register 0x%x access error", i);
+ }
+
+ return (status);
+}
+
+/*
+ * Dump ets registers
+ */
+static int
+ql_get_ets_regs(qlge_t *qlge, uint32_t *buf)
+{
+ int i;
+
+ /*
+ * First read out the NIC ETS
+ */
+ for (i = 0; i < 8; i++, buf++) {
+ ql_write_reg(qlge, REG_NIC_ENHANCED_TX_SCHEDULE,
+ i << 29 | 0x08000000);
+ /* wait for reg to come ready */
+ /* get the data */
+ *buf = ql_read_reg(qlge, REG_NIC_ENHANCED_TX_SCHEDULE);
+ }
+ /*
+ * Now read out the CNA ETS
+ */
+ for (i = 0; i < 2; i ++, buf ++) {
+ ql_write_reg(qlge, REG_CNA_ENHANCED_TX_SCHEDULE,
+ i << 29 | 0x08000000);
+ /* wait for reg to come ready */
+ *buf = ql_read_reg(qlge, REG_CNA_ENHANCED_TX_SCHEDULE);
+ }
+
+ return (0);
+}
+
+/*
+ * Core dump in binary format
+ */
+int
+ql_8xxx_binary_core_dump(qlge_t *qlge, ql_mpi_coredump_t *mpi_coredump)
+{
+ int rtn_val = DDI_FAILURE;
+ uint64_t timestamp, phy_addr;
+ uint32_t addr;
+ int i;
+
+ if (ql_sem_spinlock(qlge, QL_PROCESSOR_SEM_MASK) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ /* pause the risc */
+ if (ql_pause_mpi_risc(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) Wait for RISC paused timeout.",
+ __func__, qlge->instance);
+ goto out;
+ }
+
+ /* 0:make core dump header */
+ bzero(&(mpi_coredump->mpi_global_header),
+ sizeof (mpi_coredump_global_header_t));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ (void) strcpy(mpi_coredump->mpi_global_header.id_string,
+ "MPI Coredump");
+ timestamp = ddi_get_time();
+ timestamp *= 1000000;
+ mpi_coredump->mpi_global_header.time_lo = LSW(timestamp);
+ mpi_coredump->mpi_global_header.time_hi = MSW(timestamp);
+ mpi_coredump->mpi_global_header.total_image_size =
+ (uint32_t)(sizeof (ql_mpi_coredump_t));
+ mpi_coredump->mpi_global_header.global_header_size =
+ sizeof (mpi_coredump_global_header_t);
+ (void) strcpy(mpi_coredump->mpi_global_header.driver_info,
+ "driver version is "VERSIONSTR);
+
+ /* 1:MPI Core Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM, sizeof (mpi_coredump->core_regs_seg_hdr) +
+ sizeof (mpi_coredump->mpi_core_regs) +
+ sizeof (mpi_coredump->mpi_core_sh_regs),
+ (uint8_t *)"Core Registers");
+
+ /* first, read 127 core registers */
+ ql_get_mpi_regs(qlge, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ /* read the next 16 shadow registers */
+ ql_get_mpi_shadow_regs(qlge, &mpi_coredump->mpi_core_sh_regs[0]);
+
+ /* 2:MPI Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->test_logic_regs),
+ (uint8_t *)"Test Logic Regs");
+
+ ql_get_mpi_regs(qlge, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+
+ /* 3:RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->rmii_regs),
+ (uint8_t *)"RMII Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+
+ /* 4:FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->fcmac1_regs),
+ (uint8_t *)"FCMAC1 Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+
+ /* 5:FCMAC2 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->fcmac2_regs),
+ (uint8_t *)"FCMAC2 Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+
+ /* 6:FC1 Mailbox Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->fc1_mbx_regs),
+ (uint8_t *)"FC1 MBox Regs");
+ ql_get_mpi_regs(qlge, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+
+ /* 7:IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->ide_regs),
+ (uint8_t *)"IDE Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+
+ /* 8:Host1 Mailbox Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->nic1_mbx_regs),
+ (uint8_t *)"NIC1 MBox Regs");
+ ql_get_mpi_regs(qlge, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+
+ /* 9:SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->smbus_regs),
+ (uint8_t *)"SMBus Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+
+ /* 10:FC2 Mailbox Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->fc2_mbx_regs),
+ (uint8_t *)"FC2 MBox Regs");
+ ql_get_mpi_regs(qlge, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+
+ /* 11:Host2 Mailbox Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->nic2_mbx_regs),
+ (uint8_t *)"NIC2 MBox Regs");
+ ql_get_mpi_regs(qlge, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+
+ /* 12:i2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->i2c_regs),
+ (uint8_t *)"I2C Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+
+ /* 13:MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->memc_regs),
+ (uint8_t *)"MEMC Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+
+ /* 14:PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->pbus_regs),
+ (uint8_t *)"PBUS Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+
+ /* 15:MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->mde_regs),
+ (uint8_t *)"MDE Registers");
+ ql_get_mpi_regs(qlge, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xaui_an),
+ (uint8_t *)"XAUI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xaui_hss_pcs),
+ (uint8_t *)"XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr,
+ XFI_AN_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_an),
+ (uint8_t *)"XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_train),
+ (uint8_t *)"XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_hss_pcs),
+ (uint8_t *)"XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_hss_tx),
+ (uint8_t *)"XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_hss_rx),
+ (uint8_t *)"XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->serdes_xfi_hss_pll),
+ (uint8_t *)"XFI HSS PLL Registers");
+
+ ql_get_serdes_regs(qlge, mpi_coredump);
+
+ /* 16:NIC Ctrl Registers Port1 */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->nic_regs),
+ (uint8_t *)"NIC Registers");
+ i = 0;
+ for (addr = 0; addr <= 0xFC; i++) {
+ mpi_coredump->nic_regs[i] = ql_read_reg(qlge, addr);
+ addr += 4;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->intr_states),
+ (uint8_t *)"INTR States");
+ ql_get_intr_states(qlge, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->xgmac),
+ (uint8_t *)"NIC XGMac Registers");
+ ql_get_xgmac_regs(qlge, &mpi_coredump->xgmac[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->probe_dump),
+ (uint8_t *)"Probe Dump");
+ ql_get_probe_dump(qlge, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->routing_regs),
+ (uint8_t *)"Routing Regs");
+
+ ql_get_routing_index_registers(qlge, &mpi_coredump->routing_regs[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->mac_prot_regs),
+ (uint8_t *)"MAC Prot Regs");
+
+ ql_get_mac_protocol_registers(qlge, &mpi_coredump->mac_prot_regs[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->ets),
+ (uint8_t *)"ETS Registers");
+
+ ql_get_ets_regs(qlge, &mpi_coredump->ets[0]);
+
+ /* clear the pause */
+ if (ql_unpause_mpi_risc(qlge) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "Failed RISC unpause.");
+ goto out;
+ }
+
+ /* Reset the MPI Processor */
+ if (ql_reset_mpi_risc(qlge) != DDI_SUCCESS) {
+ goto out;
+ }
+
+ /* 22:WCS MPI Ram ?? */
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->code_ram),
+ (uint8_t *)"WCS RAM");
+ phy_addr = qlge->ioctl_buf_dma_attr.dma_addr;
+ if (ql_read_risc_ram(qlge, CODE_RAM_ADDR, phy_addr, CODE_RAM_CNT)
+ == DDI_SUCCESS) {
+ (void) ddi_dma_sync(qlge->ioctl_buf_dma_attr.dma_handle, 0,
+ sizeof (mpi_coredump->code_ram), DDI_DMA_SYNC_FORKERNEL);
+ bcopy(qlge->ioctl_buf_dma_attr.vaddr,
+ mpi_coredump->code_ram,
+ sizeof (mpi_coredump->code_ram));
+ } else {
+ mutex_exit(&qlge->mbx_mutex);
+ goto out;
+ }
+
+ /* 23:MEMC Ram ?? */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof (mpi_coredump_segment_header_t) +
+ sizeof (mpi_coredump->memc_ram),
+ (uint8_t *)"MEMC RAM");
+ phy_addr = qlge->ioctl_buf_dma_attr.dma_addr;
+ if (ql_read_risc_ram(qlge, MEMC_RAM_ADDR, phy_addr, MEMC_RAM_CNT)
+ == DDI_SUCCESS) {
+ (void) ddi_dma_sync(qlge->ioctl_buf_dma_attr.dma_handle, 0,
+ sizeof (mpi_coredump->memc_ram), DDI_DMA_SYNC_FORKERNEL);
+ bcopy(qlge->ioctl_buf_dma_attr.vaddr, mpi_coredump->memc_ram,
+ sizeof (mpi_coredump->memc_ram));
+ } else {
+ mutex_exit(&qlge->mbx_mutex);
+ goto out;
+ }
+ /*
+ * 24. Restart MPI
+ */
+ if (ql_write_processor_data(qlge, 0x1010, 1) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "MPI restart failure.");
+ }
+
+ rtn_val = DDI_SUCCESS;
+out:
+ ql_sem_unlock(qlge, QL_PROCESSOR_SEM_MASK);
+ return (rtn_val);
+}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_flash.c b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_flash.c
new file mode 100644
index 0000000000..6231f9dd03
--- /dev/null
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_flash.c
@@ -0,0 +1,1403 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#include <qlge.h>
+/*
+ * Local Function Prototypes.
+ */
+static int ql_read_flash(qlge_t *, uint32_t, uint32_t *);
+static int ql_write_flash(qlge_t *, uint32_t, uint32_t);
+static int ql_protect_flash(qlge_t *);
+static int ql_unprotect_flash(qlge_t *);
+
+/*
+ * ql_flash_id
+ * The flash memory chip exports 3 ID bytes in the order of manufacturer, id,
+ * capability
+ */
+int
+ql_flash_id(qlge_t *qlge)
+{
+ int rval;
+ uint32_t fdata = 0;
+
+ /*
+ * Send Restore command (0xAB) to release Flash from
+ * possible deep power down state
+ */
+ rval = ql_read_flash(qlge, FLASH_CONF_ADDR | 0x300 | FLASH_RES_CMD,
+ &fdata);
+ QL_PRINT(DBG_FLASH, ("%s(%d) flash electronic signature is %x \n",
+ __func__, qlge->instance, fdata));
+ fdata = 0;
+
+ /* 0x9F */
+ rval = ql_read_flash(qlge, FLASH_CONF_ADDR | 0x0400 | FLASH_RDID_CMD,
+ &fdata);
+
+ if ((rval != DDI_SUCCESS) || (fdata == 0)) {
+ cmn_err(CE_WARN, "%s(%d) read_flash failed 0x%x.",
+ __func__, qlge->instance, fdata);
+ } else {
+ qlge->flash_info.flash_manuf = LSB(LSW(fdata));
+ qlge->flash_info.flash_id = MSB(LSW(fdata));
+ qlge->flash_info.flash_cap = LSB(MSW(fdata));
+ QL_PRINT(DBG_FLASH, ("%s(%d) flash manufacturer 0x%x,"
+ " flash id 0x%x, flash cap 0x%x\n",
+ __func__, qlge->instance,
+ qlge->flash_info.flash_manuf, qlge->flash_info.flash_id,
+ qlge->flash_info.flash_cap));
+ }
+ return (rval);
+}
+
+/*
+ * qlge_dump_fcode
+ * Dumps fcode from flash.
+ */
+int
+qlge_dump_fcode(qlge_t *qlge, uint8_t *dp, uint32_t size, uint32_t startpos)
+{
+ uint32_t cnt, data, addr;
+ int rval = DDI_SUCCESS;
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) entered to read address %x, %x bytes\n",
+ __func__, qlge->instance, startpos, size));
+
+ /* make sure startpos+size doesn't exceed flash */
+ if (size + startpos > qlge->fdesc.flash_size) {
+ cmn_err(CE_WARN, "%s(%d) exceeded flash range, sz=%xh, stp=%xh,"
+ " flsz=%xh", __func__, qlge->instance,
+ size, startpos, qlge->fdesc.flash_size);
+ return (DDI_FAILURE);
+ }
+
+ /* check start addr is 32 bit or 4 byte aligned for M25Pxx */
+ if ((startpos & 0x3) != 0) {
+ cmn_err(CE_WARN, "%s(%d) incorrect buffer size alignment",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ /* adjust flash start addr for 32 bit words */
+ addr = startpos / 4;
+
+ /* Read fcode data from flash. */
+ cnt = startpos;
+ size += startpos;
+ while (cnt < size) {
+ /* Allow other system activity. */
+ if (cnt % 0x1000 == 0) {
+ drv_usecwait(1);
+ }
+ rval = ql_read_flash(qlge, addr++, &data);
+ if (rval != DDI_SUCCESS) {
+ break;
+ }
+ *dp++ = LSB(LSW(data));
+ *dp++ = MSB(LSW(data));
+ *dp++ = LSB(MSW(data));
+ *dp++ = MSB(MSW(data));
+ cnt += 4;
+ }
+
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "failed, rval = %xh", rval);
+ }
+ return (rval);
+}
+
+int
+ql_erase_and_write_to_flash(qlge_t *qlge, uint8_t *dp, uint32_t size,
+ uint32_t faddr)
+{
+ int rval = DDI_FAILURE;
+ uint32_t cnt, rest_addr, fdata;
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) entered to write addr %x, %d bytes\n",
+ __func__, qlge->instance, faddr, size));
+
+ /* start address must be 32 bit word aligned */
+ if ((faddr & 0x3) != 0) {
+ cmn_err(CE_WARN, "%s(%d) incorrect buffer size alignment",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ /* setup mask of address range within a sector */
+ rest_addr = (qlge->fdesc.block_size - 1) >> 2;
+
+ faddr = faddr >> 2; /* flash gets 32 bit words */
+
+ /*
+ * Write data to flash.
+ */
+ cnt = 0;
+ size = (size + 3) >> 2; /* Round up & convert to dwords */
+ while (cnt < size) {
+ /* Beginning of a sector? do a sector erase */
+ if ((faddr & rest_addr) == 0) {
+ fdata = (faddr & ~rest_addr) << 2;
+ fdata = (fdata & 0xff00) |
+ (fdata << 16 & 0xff0000) |
+ (fdata >> 16 & 0xff);
+ /* 64k bytes sector erase */
+ rval = ql_write_flash(qlge, /* 0xd8 */
+ FLASH_CONF_ADDR | 0x0300 | qlge->fdesc.erase_cmd,
+ fdata);
+
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Unable to flash sector: "
+ "address=%xh", faddr);
+ goto out;
+ }
+ }
+ /* Write data */
+ fdata = *dp++;
+ fdata |= *dp++ << 8;
+ fdata |= *dp++ << 16;
+ fdata |= *dp++ << 24;
+
+ rval = ql_write_flash(qlge, faddr, fdata);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Unable to program flash "
+ "address=%xh data=%xh", faddr,
+ *dp);
+ goto out;
+ }
+ cnt++;
+ faddr++;
+
+ /* Allow other system activity. */
+ if (cnt % 0x1000 == 0) {
+ qlge_delay(10000);
+ }
+ }
+ rval = DDI_SUCCESS;
+out:
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d failed=%xh",
+ __func__, qlge->instance, rval);
+ }
+ return (rval);
+}
+
+void
+get_sector_number(qlge_t *qlge, uint32_t faddr, uint32_t *psector)
+{
+ *psector = faddr / qlge->fdesc.block_size; /* 0x10000 */
+}
+
+/*
+ * qlge_load_flash
+ * Write "size" bytes from memory "dp" to flash address "faddr".
+ * faddr = 32bit word flash address.
+ */
+int
+qlge_load_flash(qlge_t *qlge, uint8_t *dp, uint32_t len, uint32_t faddr)
+{
+ int rval = DDI_FAILURE;
+ uint32_t start_block, end_block;
+ uint32_t start_byte, end_byte;
+ uint32_t num;
+ uint32_t sector_size, addr_src, addr_desc;
+ uint8_t *temp;
+ caddr_t bp, bdesc;
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) entered to write addr %x, %d bytes\n",
+ __func__, qlge->instance, faddr, len));
+
+ sector_size = qlge->fdesc.block_size;
+
+ if (faddr > qlge->fdesc.flash_size) {
+ cmn_err(CE_WARN, "%s(%d): invalid flash write address %x",
+ __func__, qlge->instance, faddr);
+ return (DDI_FAILURE);
+ }
+ /* Get semaphore to access Flash Address and Flash Data Registers */
+ if (ql_sem_spinlock(qlge, QL_FLASH_SEM_MASK) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ temp = kmem_zalloc(sector_size, KM_SLEEP);
+ if (temp == NULL) {
+ cmn_err(CE_WARN, "%s(%d): Unable to allocate buffer",
+ __func__, qlge->instance);
+ ql_sem_unlock(qlge, QL_FLASH_SEM_MASK);
+ return (DDI_FAILURE);
+ }
+
+ ql_unprotect_flash(qlge);
+
+ get_sector_number(qlge, faddr, &start_block);
+ get_sector_number(qlge, faddr + len - 1, &end_block);
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) start_block %x, end_block %x\n",
+ __func__, qlge->instance, start_block, end_block));
+
+ for (num = start_block; num <= end_block; num++) {
+ QL_PRINT(DBG_FLASH,
+ ("%s(%d) sector_size 0x%x, sector read addr %x\n",
+ __func__, qlge->instance, sector_size, num * sector_size));
+ /* read one whole sector flash data to buffer */
+ rval = qlge_dump_fcode(qlge, (uint8_t *)temp, sector_size,
+ num * sector_size);
+
+ start_byte = num * sector_size;
+ end_byte = start_byte + sector_size -1;
+ if (start_byte < faddr)
+ start_byte = faddr;
+ if (end_byte > (faddr + len))
+ end_byte = (faddr + len - 1);
+
+ addr_src = start_byte - faddr;
+ addr_desc = start_byte - num * sector_size;
+ bp = (caddr_t)dp + addr_src;
+ bdesc = (caddr_t)temp + addr_desc;
+ bcopy(bp, bdesc, (end_byte - start_byte + 1));
+
+ /* write the whole sector data to flash */
+ if (ql_erase_and_write_to_flash(qlge, temp, sector_size,
+ num * sector_size) != DDI_SUCCESS)
+ goto out;
+ }
+ rval = DDI_SUCCESS;
+out:
+ ql_protect_flash(qlge);
+ kmem_free(temp, sector_size);
+
+ ql_sem_unlock(qlge, QL_FLASH_SEM_MASK);
+
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d failed=%xh",
+ __func__, qlge->instance, rval);
+ }
+
+ return (rval);
+}
+
+
+/*
+ * ql_check_pci
+ * checks the passed buffer for a valid pci signature and
+ * expected (and in range) pci length values.
+ * On successful pci check, nextpos adjusted to next pci header.
+ */
+static int
+ql_check_pci(qlge_t *qlge, uint8_t *buf, uint32_t *nextpos)
+{
+ pci_header_t *pcih;
+ pci_data_t *pcid;
+ uint32_t doff;
+ uint8_t *pciinfo;
+ uint32_t image_size = 0;
+ int rval = CONTINUE_SEARCH;
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) check image at 0x%x\n",
+ __func__, qlge->instance, *nextpos));
+
+ if (buf != NULL) {
+ pciinfo = buf;
+ } else {
+ cmn_err(CE_WARN, "%s(%d) failed, null buf ptr passed",
+ __func__, qlge->instance);
+ return (STOP_SEARCH);
+ }
+
+ /* get the pci header image length */
+ pcih = (pci_header_t *)pciinfo;
+
+ doff = pcih->dataoffset[1];
+ doff <<= 8;
+ doff |= pcih->dataoffset[0];
+
+ /* some header section sanity check */
+ if (pcih->signature[0] != PCI_HEADER0 /* '55' */ ||
+ pcih->signature[1] != PCI_HEADER1 /* 'AA' */ || doff > 50) {
+ cmn_err(CE_WARN, "%s(%d) image format error: s0=%xh, s1=%xh,"
+ "off=%xh\n", __func__, qlge->instance,
+ pcih->signature[0], pcih->signature[1], doff);
+ return (STOP_SEARCH);
+ }
+
+ pcid = (pci_data_t *)(pciinfo + doff);
+
+ /* a slight sanity data section check */
+ if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
+ pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
+ cmn_err(CE_WARN, "%s(%d) failed, data sig mismatch!",
+ __func__, qlge->instance);
+ return (STOP_SEARCH);
+ }
+ image_size =
+ (pcid->imagelength[0] | (pcid->imagelength[1] << 8))*
+ PCI_SECTOR_SIZE /* 512 */;
+
+ switch (pcid->codetype) {
+ case PCI_CODE_X86PC:
+ QL_PRINT(DBG_FLASH, ("%s(%d) boot image is FTYPE_BIOS \n",
+ __func__, qlge->instance));
+ break;
+ case PCI_CODE_FCODE:
+ QL_PRINT(DBG_FLASH, ("%s(%d) boot image is FTYPE_FCODE \n",
+ __func__, qlge->instance));
+ break;
+ case PCI_CODE_EFI:
+ QL_PRINT(DBG_FLASH, ("%s(%d) boot image is FTYPE_EFI \n",
+ __func__, qlge->instance));
+ break;
+ case PCI_CODE_HPPA:
+ QL_PRINT(DBG_FLASH, ("%s(%d) boot image is PCI_CODE_HPPA \n",
+ __func__, qlge->instance));
+ break;
+ default:
+ QL_PRINT(DBG_FLASH, ("%s(%d) boot image is FTYPE_UNKNOWN \n",
+ __func__, qlge->instance));
+ break;
+ }
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) image size %x at %x\n",
+ __func__, qlge->instance, image_size, *nextpos));
+
+ if (pcid->indicator == PCI_IND_LAST_IMAGE) {
+ QL_PRINT(DBG_FLASH, ("%s(%d) last boot image found \n",
+ __func__, qlge->instance));
+ rval = LAST_IMAGE_FOUND;
+ } else {
+ rval = CONTINUE_SEARCH;
+ }
+ /* Get the next flash image address */
+ *nextpos += image_size;
+
+ return (rval);
+}
+
+/*
+ * ql_find_flash_layout_table_data_structure
+ * Find Flash Layout Table Data Structure (FLTDS) that
+ * is located at the end of last boot image.
+ * Assume FLTDS is located with first 2M bytes.
+ * Note:
+ * Driver must be in stalled state prior to entering or
+ * add code to this function prior to calling ql_setup_flash()
+ */
+int
+ql_find_flash_layout_table_data_structure_addr(qlge_t *qlge)
+{
+ int rval = DDI_FAILURE;
+ int result = CONTINUE_SEARCH;
+ uint32_t freadpos = 0;
+ uint8_t buf[FBUFSIZE];
+
+ if (qlge->flash_fltds_addr != 0) {
+ QL_PRINT(DBG_FLASH, ("%s(%d) done already\n",
+ __func__, qlge->instance));
+ return (DDI_SUCCESS);
+ }
+ /*
+ * Temporarily set the fdesc.flash_size to
+ * 1M flash size to avoid failing of ql_dump_focde.
+ */
+ qlge->fdesc.flash_size = FLASH_FIRMWARE_IMAGE_ADDR;
+
+ while (result == CONTINUE_SEARCH) {
+
+ if ((rval = qlge_dump_fcode(qlge, buf, FBUFSIZE, freadpos))
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) qlge_dump_fcode failed"
+ " pos=%xh rval=%xh",
+ __func__, qlge->instance, freadpos, rval);
+ break;
+ }
+ /*
+ * checkout the pci boot image format
+ * and get next read address
+ */
+ result = ql_check_pci(qlge, buf, &freadpos);
+ /*
+ * find last image? If so, then the freadpos
+ * is the address of FLTDS
+ */
+ if (result == LAST_IMAGE_FOUND) {
+ QL_PRINT(DBG_FLASH,
+ ("%s(%d) flash layout table data structure "
+ "(FLTDS) address is at %x \n", __func__,
+ qlge->instance, freadpos));
+ qlge->flash_fltds_addr = freadpos;
+ rval = DDI_SUCCESS;
+ break;
+ } else if (result == STOP_SEARCH) {
+ cmn_err(CE_WARN, "%s(%d) flash header incorrect,"
+ "stop searching",
+ __func__, qlge->instance);
+ break;
+ }
+ }
+ return (rval);
+}
+
+/*
+ * ql_flash_fltds
+ * Get flash layout table data structure table.
+ */
+static int
+ql_flash_fltds(qlge_t *qlge)
+{
+ uint32_t cnt;
+ uint16_t chksum, *bp, data;
+ int rval;
+
+ rval = qlge_dump_fcode(qlge, (uint8_t *)&qlge->fltds,
+ sizeof (ql_fltds_t), qlge->flash_fltds_addr);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d)read error",
+ __func__, qlge->instance);
+ bzero(&qlge->fltds, sizeof (ql_fltds_t));
+ return (rval);
+ }
+
+ QL_DUMP(DBG_FLASH, "flash layout table data structure:\n",
+ &qlge->fltds, 8, sizeof (ql_fltds_t));
+
+ chksum = 0;
+ data = 0;
+ bp = (uint16_t *)&qlge->fltds;
+ for (cnt = 0; cnt < (sizeof (ql_fltds_t)) / 2; cnt++) {
+ data = *bp;
+ LITTLE_ENDIAN_16(&data);
+ chksum += data;
+ bp++;
+ }
+
+ LITTLE_ENDIAN_32(&qlge->fltds.signature);
+ LITTLE_ENDIAN_16(&qlge->fltds.flt_addr_lo);
+ LITTLE_ENDIAN_16(&qlge->fltds.flt_addr_hi);
+ LITTLE_ENDIAN_16(&qlge->fltds.checksum);
+
+ QL_PRINT(DBG_FLASH, ("%s(%d) signature %xh\n",
+ __func__, qlge->instance, qlge->fltds.signature));
+ QL_PRINT(DBG_FLASH, ("%s(%d) flt_addr_lo %xh\n",
+ __func__, qlge->instance, qlge->fltds.flt_addr_lo));
+ QL_PRINT(DBG_FLASH, ("%s(%d) flt_addr_hi %xh\n",
+ __func__, qlge->instance, qlge->fltds.flt_addr_hi));
+ QL_PRINT(DBG_FLASH, ("%s(%d) version %xh\n",
+ __func__, qlge->instance, qlge->fltds.version));
+ QL_PRINT(DBG_FLASH, ("%s(%d) checksum %xh\n",
+ __func__, qlge->instance, qlge->fltds.checksum));
+ /* QFLT */
+ if (chksum != 0 || qlge->fltds.signature != FLASH_FLTDS_SIGNATURE) {
+ cmn_err(CE_WARN, "%s(%d) invalid flash layout table data"
+ " structure", __func__, qlge->instance);
+ bzero(&qlge->fltds, sizeof (ql_fltds_t));
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * ql_flash_flt
+ * Get flash layout table.
+ */
+int
+ql_flash_flt(qlge_t *qlge)
+{
+ uint32_t addr, cnt;
+ int rval = DDI_FAILURE;
+ ql_flt_entry_t *entry;
+ uint8_t region;
+
+ addr = qlge->fltds.flt_addr_hi;
+ addr <<= 16;
+ addr |= qlge->fltds.flt_addr_lo;
+
+ /* first read flt header to know how long the table is */
+ rval = qlge_dump_fcode(qlge, (uint8_t *)&qlge->flt.header,
+ sizeof (ql_flt_header_t), addr);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) read flt header at %x error",
+ __func__, qlge->instance, addr);
+ bzero(&qlge->flt, sizeof (ql_flt_header_t));
+ return (rval);
+ }
+
+ LITTLE_ENDIAN_16(&qlge->flt.header.version);
+ LITTLE_ENDIAN_16(&qlge->flt.header.length);
+ LITTLE_ENDIAN_16(&qlge->flt.header.checksum);
+ LITTLE_ENDIAN_16(&qlge->flt.header.reserved);
+
+ if ((qlge->flt.header.version != 1) &&
+ (qlge->flt.header.version != 0)) {
+ cmn_err(CE_WARN, "%s(%d) read flt header at %x error",
+ __func__, qlge->instance, addr);
+ bzero(&qlge->flt, sizeof (ql_flt_header_t));
+ return (DDI_FAILURE);
+ }
+ /* 2.allocate memory to save all flt table entries */
+ if ((qlge->flt.ql_flt_entry_ptr = (ql_flt_entry_t *)
+ (kmem_zalloc(qlge->flt.header.length, KM_SLEEP))) == NULL) {
+ cmn_err(CE_WARN, "%s(%d) flt table alloc failed",
+ __func__, qlge->instance);
+ goto err;
+ }
+ /* how many tables? */
+ qlge->flt.num_entries = (uint16_t)(qlge->flt.header.length /
+ sizeof (ql_flt_entry_t));
+
+ /* 3. read the rest of flt table */
+ addr += (uint32_t)sizeof (ql_flt_header_t);
+ QL_PRINT(DBG_FLASH, ("%s(%d) flt has %x entries \n",
+ __func__, qlge->instance, qlge->flt.num_entries));
+ rval = qlge_dump_fcode(qlge,
+ (uint8_t *)qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length,
+ addr);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "read flt table entry error");
+ goto err;
+ }
+
+ entry = (ql_flt_entry_t *)qlge->flt.ql_flt_entry_ptr;
+ for (cnt = 0; cnt < qlge->flt.num_entries; cnt++) {
+ LITTLE_ENDIAN_32(&entry->size);
+ LITTLE_ENDIAN_32(&entry->begin_addr);
+ LITTLE_ENDIAN_32(&entry->end_addr);
+ entry++;
+ }
+ /* TO Do :4. Checksum verification */
+
+ /* 5.search index of Flash Descriptor Table in the Flash Layout Table */
+ entry = (ql_flt_entry_t *)qlge->flt.ql_flt_entry_ptr;
+ qlge->flash_fdt_addr = 0;
+ for (cnt = 0; cnt < qlge->flt.num_entries; cnt++) {
+ if (entry->region == FLT_REGION_FDT) {
+ qlge->flash_flt_fdt_index = cnt;
+ qlge->flash_fdt_addr = entry->begin_addr;
+ qlge->flash_fdt_size = entry->size;
+ QL_PRINT(DBG_FLASH, ("%s(%d) flash_flt_fdt_index is"
+ " %x, addr %x,size %x \n", __func__,
+ qlge->instance,
+ cnt, entry->begin_addr, entry->size));
+ break;
+ }
+ entry++;
+ }
+
+ if (qlge->flash_fdt_addr == 0) {
+ cmn_err(CE_WARN, "%s(%d) flash descriptor table not found",
+ __func__, qlge->instance);
+ goto err;
+ }
+ /* 6.search index of Nic Config. Table in the Flash Layout Table */
+ entry = (ql_flt_entry_t *)qlge->flt.ql_flt_entry_ptr;
+ if (qlge->func_number == qlge->fn0_net)
+ region = FLT_REGION_NIC_PARAM0;
+ else
+ region = FLT_REGION_NIC_PARAM1;
+ qlge->flash_nic_config_table_addr = 0;
+ for (cnt = 0; cnt < qlge->flt.num_entries; cnt++) {
+ if (entry->region == region) {
+ qlge->flash_flt_nic_config_table_index = cnt;
+ qlge->flash_nic_config_table_addr = entry->begin_addr;
+ qlge->flash_nic_config_table_size = entry->size;
+ QL_PRINT(DBG_FLASH, ("%s(%d) "
+ "flash_flt_nic_config_table_index "
+ "is %x, address %x, size %x \n",
+ __func__, qlge->instance,
+ cnt, entry->begin_addr, entry->size));
+ break;
+ }
+ entry++;
+ }
+ if (qlge->flash_nic_config_table_addr == 0) {
+ cmn_err(CE_WARN, "%s(%d) NIC Configuration Table not found",
+ __func__, qlge->instance);
+ goto err;
+ }
+
+ return (DDI_SUCCESS);
+err:
+ bzero(&qlge->flt, sizeof (ql_flt_header_t));
+ if (qlge->flt.ql_flt_entry_ptr != NULL) {
+ bzero(&qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
+ kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
+ qlge->flt.ql_flt_entry_ptr = NULL;
+ }
+ cmn_err(CE_WARN, "%s(%d) read FLT failed", __func__, qlge->instance);
+ return (DDI_FAILURE);
+}
+
+/*
+ * ql_flash_desc
+ * Get flash descriptor table.
+ */
+static int
+ql_flash_desc(qlge_t *qlge)
+{
+ uint8_t w8;
+ uint32_t cnt, addr;
+ uint16_t chksum, *bp, data;
+ int rval;
+
+ addr = qlge->flash_fdt_addr;
+
+ rval = qlge_dump_fcode(qlge, (uint8_t *)&qlge->fdesc,
+ sizeof (flash_desc_t), addr);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) read Flash Descriptor Table error",
+ __func__, qlge->instance);
+ bzero(&qlge->fdesc, sizeof (flash_desc_t));
+ return (rval);
+ }
+
+ chksum = 0;
+ data = 0;
+ bp = (uint16_t *)&qlge->fdesc;
+ for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
+ data = *bp;
+ LITTLE_ENDIAN_16(&data);
+ chksum += data;
+ bp++;
+ }
+ /* endian adjustment */
+ LITTLE_ENDIAN_32(&qlge->fdesc.flash_valid);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_version);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_len);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_checksum);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_unused);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_manuf);
+ LITTLE_ENDIAN_16(&qlge->fdesc.flash_id);
+ LITTLE_ENDIAN_32(&qlge->fdesc.block_size);
+ LITTLE_ENDIAN_32(&qlge->fdesc.alt_block_size);
+ LITTLE_ENDIAN_32(&qlge->fdesc.flash_size);
+ LITTLE_ENDIAN_32(&qlge->fdesc.write_enable_data);
+ LITTLE_ENDIAN_32(&qlge->fdesc.read_timeout);
+
+ /* flash size in desc table is in 1024 bytes */
+ QL_PRINT(DBG_FLASH, ("flash_valid=%xh\n", qlge->fdesc.flash_valid));
+ QL_PRINT(DBG_FLASH, ("flash_version=%xh\n", qlge->fdesc.flash_version));
+ QL_PRINT(DBG_FLASH, ("flash_len=%xh\n", qlge->fdesc.flash_len));
+ QL_PRINT(DBG_FLASH, ("flash_checksum=%xh\n",
+ qlge->fdesc.flash_checksum));
+
+ w8 = qlge->fdesc.flash_model[15];
+ qlge->fdesc.flash_model[15] = 0;
+ QL_PRINT(DBG_FLASH, ("flash_model=%s\n", qlge->fdesc.flash_model));
+ qlge->fdesc.flash_model[15] = w8;
+ QL_PRINT(DBG_FLASH, ("flash_size=%xK bytes\n", qlge->fdesc.flash_size));
+ qlge->fdesc.flash_size = qlge->fdesc.flash_size * 0x400;
+ qlge->flash_info.flash_size = qlge->fdesc.flash_size;
+
+ if (chksum != 0 || qlge->fdesc.flash_valid != FLASH_DESC_VAILD ||
+ qlge->fdesc.flash_version != FLASH_DESC_VERSION) {
+ cmn_err(CE_WARN, "invalid descriptor table");
+ bzero(&qlge->fdesc, sizeof (flash_desc_t));
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * ql_flash_nic_config
+ * Get flash NIC Configuration table.
+ */
+static int
+ql_flash_nic_config(qlge_t *qlge)
+{
+ uint32_t cnt, addr;
+ uint16_t chksum, *bp, data;
+ int rval;
+
+ addr = qlge->flash_nic_config_table_addr;
+
+ rval = qlge_dump_fcode(qlge, (uint8_t *)&qlge->nic_config,
+ sizeof (ql_nic_config_t), addr);
+
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "fail to read nic_cfg image %xh", rval);
+ bzero(&qlge->nic_config, sizeof (ql_nic_config_t));
+ return (rval);
+ }
+
+ chksum = 0;
+ data = 0;
+ bp = (uint16_t *)&qlge->nic_config;
+ for (cnt = 0; cnt < (sizeof (ql_nic_config_t)) / 2; cnt++) {
+ data = *bp;
+ LITTLE_ENDIAN_16(&data);
+ chksum += data;
+ bp++;
+ }
+
+ LITTLE_ENDIAN_32(&qlge->nic_config.signature);
+ LITTLE_ENDIAN_16(&qlge->nic_config.version);
+ LITTLE_ENDIAN_16(&qlge->nic_config.size);
+ LITTLE_ENDIAN_16(&qlge->nic_config.checksum);
+ LITTLE_ENDIAN_16(&qlge->nic_config.total_data_size);
+ LITTLE_ENDIAN_16(&qlge->nic_config.num_of_entries);
+ LITTLE_ENDIAN_16(&qlge->nic_config.vlan_id);
+ LITTLE_ENDIAN_16(&qlge->nic_config.last_entry);
+ LITTLE_ENDIAN_16(&qlge->nic_config.subsys_vendor_id);
+ LITTLE_ENDIAN_16(&qlge->nic_config.subsys_device_id);
+
+ QL_PRINT(DBG_FLASH, ("(%d): signature=%xh\n",
+ qlge->instance, qlge->nic_config.signature));
+ QL_PRINT(DBG_FLASH, ("(%d): size=%xh\n",
+ qlge->instance, qlge->nic_config.size));
+ QL_PRINT(DBG_FLASH, ("(%d): checksum=%xh\n",
+ qlge->instance, qlge->nic_config.checksum));
+ QL_PRINT(DBG_FLASH, ("(%d): version=%xh\n",
+ qlge->instance, qlge->nic_config.version));
+ QL_PRINT(DBG_FLASH, ("(%d): total_data_size=%xh\n",
+ qlge->instance, qlge->nic_config.total_data_size));
+ QL_PRINT(DBG_FLASH, ("(%d): num_of_entries=%xh\n",
+ qlge->instance, qlge->nic_config.num_of_entries));
+ QL_PRINT(DBG_FLASH, ("(%d): data_type=%xh\n",
+ qlge->instance, qlge->nic_config.factory_data_type));
+ QL_PRINT(DBG_FLASH, ("(%d): data_type_size=%xh\n",
+ qlge->instance, qlge->nic_config.factory_data_type_size));
+ QL_PRINT(DBG_FLASH,
+ ("(%d): factory mac=%02x %02x %02x %02x %02x %02x h\n",
+ qlge->instance,
+ qlge->nic_config.factory_MAC[0],
+ qlge->nic_config.factory_MAC[1],
+ qlge->nic_config.factory_MAC[2],
+ qlge->nic_config.factory_MAC[3],
+ qlge->nic_config.factory_MAC[4],
+ qlge->nic_config.factory_MAC[5]));
+
+ QL_PRINT(DBG_FLASH, ("(%d): data_type=%xh\n",
+ qlge->instance, qlge->nic_config.clp_data_type));
+ QL_PRINT(DBG_FLASH, ("(%d): data_type_size=%xh\n",
+ qlge->instance, qlge->nic_config.clp_data_type_size));
+ QL_PRINT(DBG_FLASH, ("(%d): clp mac=%x %x %x %x %x %x h\n",
+ qlge->instance,
+ qlge->nic_config.clp_MAC[0],
+ qlge->nic_config.clp_MAC[1],
+ qlge->nic_config.clp_MAC[2],
+ qlge->nic_config.clp_MAC[3],
+ qlge->nic_config.clp_MAC[4],
+ qlge->nic_config.clp_MAC[5]));
+
+ QL_PRINT(DBG_FLASH, ("(%d): data_type=%xh\n",
+ qlge->instance, qlge->nic_config.clp_vlan_data_type));
+ QL_PRINT(DBG_FLASH, ("(%d): data_type_size=%xh\n",
+ qlge->instance, qlge->nic_config.clp_vlan_data_type_size));
+ QL_PRINT(DBG_FLASH, ("(%d): vlan_id=%xh\n",
+ qlge->instance, qlge->nic_config.vlan_id));
+
+ QL_PRINT(DBG_FLASH, ("(%d): data_type=%xh\n",
+ qlge->instance, qlge->nic_config.last_data_type));
+ QL_PRINT(DBG_FLASH, ("(%d): data_type_size=%xh\n",
+ qlge->instance, qlge->nic_config.last_data_type_size));
+ QL_PRINT(DBG_FLASH, ("(%d): last_entry=%xh\n",
+ qlge->instance, qlge->nic_config.last_entry));
+
+ QL_PRINT(DBG_FLASH, ("(%d): subsys_vendor_id=%xh\n",
+ qlge->instance, qlge->nic_config.subsys_vendor_id));
+ QL_PRINT(DBG_FLASH, ("(%d): subsys_device_id=%xh\n",
+ qlge->instance, qlge->nic_config.subsys_device_id));
+
+ if (chksum != 0 || qlge->nic_config.signature !=
+ FLASH_NIC_CONFIG_SIGNATURE || qlge->nic_config.version != 1) {
+ cmn_err(CE_WARN,
+ "invalid flash nic configuration table: chksum %x, "
+ "signature %x, version %x",
+ chksum, qlge->nic_config.signature,
+ qlge->nic_config.version);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+int
+ql_flash_vpd(qlge_t *qlge, uint8_t *buf)
+{
+ uint32_t cnt;
+ uint16_t chksum, *bp, data;
+ int rval;
+ uint32_t vpd_size;
+
+ if (buf == NULL) {
+ cmn_err(CE_WARN, "%s(%d) buffer is not available.",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ if (!qlge->flash_vpd_addr) {
+ if (qlge->func_number == qlge->fn0_net)
+ qlge->flash_vpd_addr = ISP_8100_VPD0_ADDR;
+ else
+ qlge->flash_vpd_addr = ISP_8100_VPD1_ADDR;
+ vpd_size = ISP_8100_VPD0_SIZE;
+ }
+ rval = qlge_dump_fcode(qlge, buf, vpd_size, qlge->flash_vpd_addr);
+
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d)read error",
+ __func__, qlge->instance);
+ bzero(buf, vpd_size);
+ return (rval);
+ }
+
+ QL_DUMP(DBG_FLASH, "flash vpd table raw data:\n", buf, 8, vpd_size);
+
+ chksum = 0;
+ data = 0;
+ bp = (uint16_t *)(void *)buf;
+ for (cnt = 0; cnt < (vpd_size/2); cnt++) {
+ data = *bp;
+ LITTLE_ENDIAN_16(&data);
+ chksum += data;
+ bp++;
+ }
+ if (chksum != 0) {
+ cmn_err(CE_WARN, "%s(%d) invalid flash vpd table",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+int
+ql_get_flash_params(qlge_t *qlge)
+{
+ int rval = DDI_SUCCESS;
+
+ /* Get semaphore to access Flash Address and Flash Data Registers */
+ if (ql_sem_spinlock(qlge, QL_FLASH_SEM_MASK)) {
+ rval = DDI_FAILURE;
+ goto out;
+ }
+ /* do test read of flash ID */
+ rval = ql_flash_id(qlge);
+ if (rval != DDI_SUCCESS)
+ goto out;
+
+ /*
+ * Temporarily set the fdesc.flash_size to
+ * 4M flash size to avoid failing of ql_dump_focde.
+ */
+ qlge->fdesc.flash_size = 4096 * 1024; /* ie. 4M bytes */
+
+ /* Default flash descriptor table. */
+ qlge->fdesc.write_statusreg_cmd = 1;
+ qlge->fdesc.write_enable_bits = 0;
+ qlge->fdesc.unprotect_sector_cmd = 0;
+ qlge->fdesc.protect_sector_cmd = 0;
+ qlge->fdesc.write_disable_bits = 0x9c;
+ qlge->fdesc.block_size = 0x10000;
+ qlge->fdesc.erase_cmd = 0xd8;
+
+ /* ! todo : should read from fltds! */
+ /* !ql_get_flash_params(qlge); */
+ qlge->fltds.flt_addr_hi = 0x36;
+ qlge->fltds.flt_addr_lo = 0x1000;
+ /* read all other tables from Flash memory */
+ if (ql_flash_flt(qlge) != DDI_SUCCESS) {
+ if (CFG_IST(qlge, CFG_CHIP_8100)) {
+ qlge->flash_fdt_addr = ISP_8100_FDT_ADDR; /* 0x360000 */
+ if (qlge->func_number == qlge->fn0_net)
+ /* 0x140200 */
+ qlge->flash_nic_config_table_addr =
+ ISP_8100_NIC_PARAM0_ADDR;
+ else
+ /* 0x140600 */
+ qlge->flash_nic_config_table_addr =
+ ISP_8100_NIC_PARAM1_ADDR;
+ }
+ }
+ ql_flash_desc(qlge);
+ ql_flash_nic_config(qlge);
+
+out:
+ ql_sem_unlock(qlge, QL_FLASH_SEM_MASK);
+
+ return (rval);
+}
+
+/*
+ * ql_setup_flash
+ * Gets the manufacturer and id number of the flash chip,
+ * and sets up the size parameter.
+ */
+int
+ql_setup_flash(qlge_t *qlge)
+{
+ int rval = DDI_SUCCESS;
+
+ if (qlge->flash_fltds_addr != 0) {
+ return (rval);
+ }
+ if (ql_sem_spinlock(qlge, QL_FLASH_SEM_MASK)) {
+ rval = DDI_FAILURE;
+ goto out;
+ }
+ /* try reading flash ID */
+ rval = ql_flash_id(qlge);
+ if (rval != DDI_SUCCESS)
+ goto out;
+
+ /* Default flash descriptor table. */
+ qlge->fdesc.write_statusreg_cmd = 1;
+ qlge->fdesc.write_enable_bits = 0;
+ qlge->fdesc.unprotect_sector_cmd = 0;
+ qlge->fdesc.protect_sector_cmd = 0;
+ qlge->fdesc.write_disable_bits = 0x9c;
+ qlge->fdesc.block_size = 0x10000;
+ qlge->fdesc.erase_cmd = 0xd8;
+ /* 1 Get the location of Flash Layout Table Data Structure (FLTDS) */
+ if (ql_find_flash_layout_table_data_structure_addr(qlge)
+ == DDI_SUCCESS) {
+ /* 2,read fltds */
+ if (ql_flash_fltds(qlge) == DDI_SUCCESS) {
+ /*
+ * 3,search for flash descriptor table (FDT)
+ * and Nic Configuration Table indices
+ */
+ if ((qlge->flash_fdt_addr == 0) ||
+ (qlge->flash_nic_config_table_addr == 0)) {
+ rval = ql_flash_flt(qlge);
+ if (rval == DDI_SUCCESS) {
+ ql_flash_desc(qlge);
+ ql_flash_nic_config(qlge);
+ } else {
+ rval = DDI_FAILURE;
+ goto out;
+ }
+ }
+ } else {
+ rval = DDI_FAILURE;
+ goto out;
+ }
+ } else {
+ rval = DDI_FAILURE;
+ goto out;
+ }
+out:
+ ql_sem_unlock(qlge, QL_FLASH_SEM_MASK);
+
+ return (rval);
+
+}
+
+/*
+ * ql_change_endian
+ * Change endianess of byte array.
+ */
+void
+ql_change_endian(uint8_t buf[], size_t size)
+{
+ uint8_t byte;
+ size_t cnt1;
+ size_t cnt;
+
+ cnt1 = size - 1;
+ for (cnt = 0; cnt < size / 2; cnt++) {
+ byte = buf[cnt1];
+ buf[cnt1] = buf[cnt];
+ buf[cnt] = byte;
+ cnt1--;
+ }
+}
+
+static int
+ql_wait_flash_reg_ready(qlge_t *qlge, uint32_t wait_bit)
+{
+ uint32_t reg_status;
+ int rtn_val = DDI_SUCCESS;
+ uint32_t delay = 300000;
+
+ do {
+ reg_status = ql_read_reg(qlge, REG_FLASH_ADDRESS);
+ if (reg_status & FLASH_ERR_FLAG) {
+ cmn_err(CE_WARN,
+ "%s(%d) flash address register error bit set!",
+ __func__, qlge->instance);
+ rtn_val = DDI_FAILURE;
+ break;
+ }
+ if (reg_status & wait_bit) {
+ break;
+ }
+ drv_usecwait(10);
+ } while (--delay);
+
+ if (delay == 0) {
+ cmn_err(CE_WARN,
+ "%s(%d) timeout error!", __func__, qlge->instance);
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+/*
+ * ql_read_flash
+ * Reads a 32bit word from FLASH.
+ */
+static int
+ql_read_flash(qlge_t *qlge, uint32_t faddr, uint32_t *bp)
+{
+ int rval = DDI_SUCCESS;
+
+ ql_write_reg(qlge, REG_FLASH_ADDRESS, faddr | FLASH_R_FLAG);
+
+ /* Wait for READ cycle to complete. */
+ rval = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG);
+
+ if (rval == DDI_SUCCESS) {
+ *bp = ql_read_reg(qlge, REG_FLASH_DATA);
+ }
+ return (rval);
+}
+
+static int
+ql_read_flash_status(qlge_t *qlge, uint8_t *value)
+{
+ int rtn_val = DDI_SUCCESS;
+ uint32_t data, cmd = FLASH_CONF_ADDR | FLASH_R_FLAG;
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+ cmd |= FLASH_RDSR_CMD /* 0x05 */;
+ ql_write_reg(qlge, REG_FLASH_ADDRESS, cmd);
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge,
+ FLASH_RDY_FLAG | FLASH_R_FLAG)) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+ data = ql_read_reg(qlge, REG_FLASH_DATA);
+ *value = (uint8_t)(data & 0xff);
+ return (rtn_val);
+}
+
+static int
+ql_flash_write_enable(qlge_t *qlge)
+{
+ uint8_t reg_status;
+ int rtn_val = DDI_SUCCESS;
+ uint32_t cmd = FLASH_CONF_ADDR;
+ uint32_t delay = 300000;
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) timeout!", __func__, qlge->instance);
+ rtn_val = DDI_FAILURE;
+ return (rtn_val);
+ }
+ cmd |= qlge->fdesc.write_enable_cmd;
+ ql_write_reg(qlge, REG_FLASH_ADDRESS, cmd);
+ /* wait for WEL bit set */
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ == DDI_SUCCESS) {
+ do {
+ ql_read_flash_status(qlge, &reg_status);
+ if (reg_status & BIT_1)
+ break;
+ drv_usecwait(10);
+ } while (--delay);
+ }
+ if (delay == 0) {
+ cmn_err(CE_WARN,
+ "%s(%d) timeout error! flash status reg: %x",
+ __func__, qlge->instance, reg_status);
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+static int
+ql_flash_erase_sector(qlge_t *qlge, uint32_t sectorAddr)
+{
+ int rtn_val = DDI_SUCCESS;
+ uint32_t data, cmd = FLASH_CONF_ADDR;
+ uint32_t delay = 300000;
+ uint8_t flash_status;
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ cmd |= (0x0300 | qlge->fdesc.erase_cmd);
+ data = ((sectorAddr & 0xff) << 16) | (sectorAddr & 0xff00) |
+ ((sectorAddr & 0xff0000) >> 16);
+
+ ql_write_reg(qlge, REG_FLASH_DATA, data);
+ ql_write_reg(qlge, REG_FLASH_ADDRESS, cmd);
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ == DDI_SUCCESS) {
+ /* wait Write In Progress (WIP) bit to reset */
+ do {
+ ql_read_flash_status(qlge, &flash_status);
+ if ((flash_status & BIT_0 /* WIP */) == 0)
+ break;
+ drv_usecwait(10);
+ } while (--delay);
+ } else {
+ return (rtn_val);
+ }
+
+ if (delay == 0) {
+ cmn_err(CE_WARN,
+ "%s(%d) timeout error! flash status reg: %x",
+ __func__, qlge->instance, flash_status);
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+/*
+ * ql_write_flash
+ * Writes a 32bit word to FLASH.
+ */
+static int
+ql_write_flash(qlge_t *qlge, uint32_t addr, uint32_t data)
+{
+ int rval = DDI_SUCCESS;
+ uint32_t delay = 300000;
+ uint8_t flash_status;
+
+ ql_write_reg(qlge, REG_FLASH_DATA, data);
+ ql_read_reg(qlge, REG_FLASH_DATA);
+ ql_write_reg(qlge, REG_FLASH_ADDRESS, addr);
+
+ if ((rval = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ == DDI_SUCCESS) {
+ if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
+ /* wait Write In Progress (WIP) bit to reset */
+ do {
+ ql_read_flash_status(qlge, &flash_status);
+ if ((flash_status & BIT_0 /* WIP */) == 0)
+ break;
+ drv_usecwait(10);
+ } while (--delay);
+ }
+ } else {
+ return (rval);
+ }
+
+ if (delay == 0) {
+ cmn_err(CE_WARN,
+ "%s(%d) timeout error! flash status reg: %x",
+ __func__, qlge->instance, flash_status);
+ rval = DDI_FAILURE;
+ }
+
+ return (rval);
+}
+
+/*
+ * ql_unprotect_flash
+ * Enable writes
+ */
+static int
+ql_unprotect_flash(qlge_t *qlge)
+{
+ int fdata, rtn_val;
+
+ if ((rtn_val = ql_flash_write_enable(qlge)) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ /*
+ * Remove block write protection (SST and ST) and
+ * Sector/Block Protection Register Lock (SST, ST, ATMEL).
+ * Unprotect sectors.
+ */
+ (void) ql_write_flash(qlge,
+ FLASH_CONF_ADDR | 0x100 | qlge->fdesc.write_statusreg_cmd,
+ qlge->fdesc.write_enable_bits);
+
+ if (qlge->fdesc.unprotect_sector_cmd != 0) {
+ for (fdata = 0; fdata < 0x10; fdata++) {
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR |
+ 0x300 | qlge->fdesc.unprotect_sector_cmd, fdata);
+ }
+
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x300 |
+ qlge->fdesc.unprotect_sector_cmd, 0x00400f);
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x300 |
+ qlge->fdesc.unprotect_sector_cmd, 0x00600f);
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x300 |
+ qlge->fdesc.unprotect_sector_cmd, 0x00800f);
+ }
+ rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG);
+ return (rtn_val);
+}
+
+/*
+ * ql_protect_flash
+ * Disable writes
+ */
+static int
+ql_protect_flash(qlge_t *qlge)
+{
+ int fdata, rtn_val;
+
+ if ((rtn_val = ql_flash_write_enable(qlge)) != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+
+ if ((rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG))
+ != DDI_SUCCESS) {
+ return (rtn_val);
+ }
+ /*
+ * Protect sectors.
+ * Set block write protection (SST and ST) and
+ * Sector/Block Protection Register Lock (SST, ST, ATMEL).
+ */
+
+ if (qlge->fdesc.protect_sector_cmd != 0) {
+ for (fdata = 0; fdata < 0x10; fdata++) {
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR |
+ 0x330 | qlge->fdesc.protect_sector_cmd, fdata);
+ }
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x330 |
+ qlge->fdesc.protect_sector_cmd, 0x00400f);
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x330 |
+ qlge->fdesc.protect_sector_cmd, 0x00600f);
+ (void) ql_write_flash(qlge, FLASH_CONF_ADDR | 0x330 |
+ qlge->fdesc.protect_sector_cmd, 0x00800f);
+
+ (void) ql_write_flash(qlge,
+ FLASH_CONF_ADDR | 0x101, 0x80);
+ } else {
+ (void) ql_write_flash(qlge,
+ FLASH_CONF_ADDR | 0x100 | qlge->fdesc.write_statusreg_cmd,
+ qlge->fdesc.write_disable_bits /* 0x9c */);
+ }
+
+ rtn_val = ql_wait_flash_reg_ready(qlge, FLASH_RDY_FLAG);
+ return (rtn_val);
+}
+
+/*
+ * ql_write_flash_test
+ * test write to a flash sector that is not being used
+ */
+void
+ql_write_flash_test(qlge_t *qlge, uint32_t test_addr)
+{
+ uint32_t old_data, data;
+ uint32_t addr = 0;
+
+ addr = (test_addr / 4);
+ ql_read_flash(qlge, addr, &old_data);
+ QL_PRINT(DBG_FLASH, ("read addr %x old value %x\n", test_addr,
+ old_data));
+
+ /* enable writing to flash */
+ ql_unprotect_flash(qlge);
+
+ /* erase the sector */
+ ql_flash_erase_sector(qlge, test_addr);
+ ql_read_flash(qlge, addr, &data);
+ QL_PRINT(DBG_FLASH, ("after sector erase, addr %x value %x\n",
+ test_addr, data));
+
+ /* write new value to it and read back to confirm */
+ data = 0x33445566;
+ ql_write_flash(qlge, addr, data);
+ QL_PRINT(DBG_FLASH, ("new value written to addr %x value %x\n",
+ test_addr, data));
+ ql_read_flash(qlge, addr, &data);
+ if (data != 0x33445566) {
+ cmn_err(CE_WARN, "flash write test failed, get data %x"
+ " after writing", data);
+ }
+
+ /* write old value to it and read back to restore */
+ ql_flash_erase_sector(qlge, test_addr);
+ ql_write_flash(qlge, addr, old_data);
+ ql_read_flash(qlge, addr, &data);
+ QL_PRINT(DBG_FLASH, ("write back old value addr %x value %x\n",
+ test_addr, data));
+
+ /* test done, protect the flash to forbid any more flash writting */
+ ql_protect_flash(qlge);
+
+}
+
+
+void
+ql_write_flash_test2(qlge_t *qlge, uint32_t test_addr)
+{
+ uint32_t data, old_data;
+
+ qlge_dump_fcode(qlge, (uint8_t *)&old_data, sizeof (old_data),
+ test_addr);
+ QL_PRINT(DBG_FLASH, ("read addr %x old value %x\n",
+ test_addr, old_data));
+
+ data = 0x12345678;
+
+ QL_PRINT(DBG_FLASH, ("write new test value %x\n", data));
+ qlge_load_flash(qlge, (uint8_t *)&data, sizeof (data), test_addr);
+ qlge_dump_fcode(qlge, (uint8_t *)&data, sizeof (data), test_addr);
+ if (data != 0x12345678) {
+ cmn_err(CE_WARN,
+ "flash write test failed, get data %x after writing",
+ data);
+ }
+ /* write old value to it and read back to restore */
+ qlge_load_flash(qlge, (uint8_t *)&old_data, sizeof (old_data),
+ test_addr);
+ qlge_dump_fcode(qlge, (uint8_t *)&data, sizeof (data),
+ test_addr);
+ QL_PRINT(DBG_FLASH, ("write back old value addr %x value %x verified\n",
+ test_addr, data));
+}
+
+/*
+ * ql_sem_flash_lock
+ * Flash memory is a shared resource amoung various PCI Functions, so,
+ * anyone wants to access flash memory, it needs to lock it first.
+ */
+int
+ql_sem_flash_lock(qlge_t *qlge)
+{
+ int rval = DDI_SUCCESS;
+
+ /* Get semaphore to access Flash Address and Flash Data Registers */
+ if (ql_sem_spinlock(qlge, QL_FLASH_SEM_MASK)) {
+ rval = DDI_FAILURE;
+ }
+ return (rval);
+}
+
+void
+ql_sem_flash_unlock(qlge_t *qlge)
+{
+ ql_sem_unlock(qlge, QL_FLASH_SEM_MASK);
+}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_gld.c b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_gld.c
new file mode 100644
index 0000000000..1accd358c7
--- /dev/null
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_gld.c
@@ -0,0 +1,919 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#include <qlge.h>
+#include <sys/strsubr.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <inet/ip.h>
+
+/*
+ * GLDv3 functions prototypes
+ */
+static int ql_m_getstat(void *, uint_t, uint64_t *);
+static int ql_m_start(void *);
+static void ql_m_stop(void *);
+static int ql_m_setpromiscuous(void *, boolean_t);
+static int ql_m_multicst(void *, boolean_t, const uint8_t *);
+static int ql_m_unicst(void *, const uint8_t *);
+static mblk_t *ql_m_tx(void *, mblk_t *);
+static void ql_m_ioctl(void *, queue_t *, mblk_t *);
+static boolean_t ql_m_getcapab(void *, mac_capab_t, void *);
+static int ql_unicst_set(qlge_t *qlge, const uint8_t *macaddr, int slot);
+
+static int ql_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
+ const void *);
+static int ql_m_getprop(void *, const char *, mac_prop_id_t, uint_t, uint_t,
+ void *, uint_t *);
+#define QL_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
+static mac_callbacks_t ql_m_callbacks = {
+ QL_M_CALLBACK_FLAGS,
+ ql_m_getstat,
+ ql_m_start,
+ ql_m_stop,
+ ql_m_setpromiscuous,
+ ql_m_multicst,
+ NULL,
+ NULL,
+ ql_m_ioctl,
+ ql_m_getcapab,
+ NULL,
+ NULL,
+ ql_m_setprop,
+ ql_m_getprop
+};
+mac_priv_prop_t qlge_priv_prop[] = {
+ {"_adv_pause_mode", MAC_PROP_PERM_RW}
+};
+
+#define QLGE_MAX_PRIV_PROPS \
+ (sizeof (qlge_priv_prop) / sizeof (mac_priv_prop_t))
+
+/*
+ * This function starts the driver
+ */
+static int
+ql_m_start(void *arg)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+
+ /*
+ * reset chip, re-initialize everything but do not
+ * re-allocate memory
+ */
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+ mutex_enter(&qlge->hw_mutex);
+ qlge->mac_flags = QL_MAC_INIT;
+ /*
+ * Write default ethernet address to chip register Mac
+ * Address slot 0 and Enable Primary Mac Function.
+ */
+ ql_unicst_set(qlge,
+ (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
+ qlge->stats.rpackets = 0;
+ qlge->stats.rbytes = 0;
+ qlge->stats.opackets = 0;
+ qlge->stats.obytes = 0;
+ mutex_exit(&qlge->hw_mutex);
+
+ ql_do_start(qlge);
+ mutex_exit(&qlge->gen_mutex);
+
+ mutex_enter(&qlge->mbx_mutex);
+ ql_get_firmware_version(qlge, NULL);
+ mutex_exit(&qlge->mbx_mutex);
+
+ return (0);
+}
+
+/*
+ * This function stops the driver
+ */
+static void
+ql_m_stop(void *arg)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return;
+ }
+ ql_do_stop(qlge);
+ mutex_exit(&qlge->gen_mutex);
+ qlge->mac_flags = QL_MAC_STOPPED;
+}
+
+/*
+ * Add or remove a multicast address
+ */
+static int
+ql_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ int ret = DDI_SUCCESS;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+
+ if (qlge->mac_flags == QL_MAC_DETACH) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+ if (add) {
+ QL_DUMP(DBG_GLD, "add to multicast list:\n",
+ (uint8_t *)ep, 8, ETHERADDRL);
+ ret = ql_add_to_multicast_list(qlge, (uint8_t *)ep);
+ } else {
+ QL_DUMP(DBG_GLD, "remove from multicast list:\n",
+ (uint8_t *)ep, 8, ETHERADDRL);
+ ret = ql_remove_from_multicast_list(qlge, (uint8_t *)ep);
+ }
+ mutex_exit(&qlge->gen_mutex);
+
+ return ((ret == DDI_SUCCESS) ? 0 : EIO);
+}
+
+/*
+ * Enable or disable promiscuous mode
+ */
+static int
+ql_m_setpromiscuous(void* arg, boolean_t on)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ int mode;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+
+ /* enable reception of all packets on the medium, */
+ if (on) {
+ mode = 1;
+ QL_PRINT(DBG_GLD, ("%s(%d) enable promiscuous mode\n",
+ __func__, qlge->instance));
+ } else {
+ mode = 0;
+ QL_PRINT(DBG_GLD, ("%s(%d) disable promiscuous mode\n",
+ __func__, qlge->instance));
+ }
+
+ mutex_enter(&qlge->hw_mutex);
+ ql_set_promiscuous(qlge, mode);
+ mutex_exit(&qlge->hw_mutex);
+ mutex_exit(&qlge->gen_mutex);
+ return (DDI_SUCCESS);
+}
+
+
+static int
+ql_m_getstat(void *arg, uint_t stat, uint64_t *valp)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ struct ql_stats *cur_stats;
+ uint64_t val = 0;
+ int i;
+ uint32_t val32;
+ struct rx_ring *rx_ring;
+ struct tx_ring *tx_ring;
+
+ ASSERT(qlge != NULL);
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+
+ cur_stats = &qlge->stats;
+ /* these stats are maintained in software */
+ switch (stat) {
+
+ case MAC_STAT_IFSPEED /* 1000 */ :
+ if (CFG_IST(qlge, CFG_CHIP_8100) != 0) {
+ qlge->speed = SPEED_10G;
+ }
+ val = qlge->speed * 1000000ull;
+ break;
+
+ case MAC_STAT_MULTIRCV:
+ val = cur_stats->multircv;
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ val = cur_stats->brdcstrcv;
+ break;
+
+ case MAC_STAT_MULTIXMT:
+ cur_stats->multixmt = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->multixmt += tx_ring->multixmt;
+ }
+ val = cur_stats->multixmt;
+ break;
+
+ case MAC_STAT_BRDCSTXMT:
+ cur_stats->brdcstxmt = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->brdcstxmt += tx_ring->brdcstxmt;
+ }
+ val = cur_stats->brdcstxmt;
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ val = cur_stats->norcvbuf;
+ break;
+
+ case MAC_STAT_IERRORS:
+ val = cur_stats->errrcv;
+ break;
+
+ case MAC_STAT_OBYTES:
+ cur_stats->obytes = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->obytes += tx_ring->obytes;
+ }
+ val = cur_stats->obytes;
+ break;
+
+ case MAC_STAT_OPACKETS:
+ cur_stats->opackets = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->opackets += tx_ring->opackets;
+ }
+ val = cur_stats->opackets;
+ break;
+
+ case ETHER_STAT_DEFER_XMTS:
+ cur_stats->defer = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->defer += (tx_ring->defer);
+ }
+ val = cur_stats->defer;
+ break;
+
+ case MAC_STAT_OERRORS:
+ cur_stats->errxmt = 0;
+ for (i = 0; i < qlge->tx_ring_count; i++) {
+ tx_ring = &qlge->tx_ring[i];
+ cur_stats->errxmt += tx_ring->errxmt;
+ }
+ val = cur_stats->errxmt;
+ break;
+
+ case MAC_STAT_RBYTES:
+ cur_stats->rbytes = 0;
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ cur_stats->rbytes += rx_ring->rx_bytes;
+ }
+ val = cur_stats->rbytes;
+ break;
+
+ case MAC_STAT_IPACKETS:
+ cur_stats->rpackets = 0;
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ cur_stats->rpackets += rx_ring->rx_packets;
+ }
+ val = cur_stats->rpackets;
+ break;
+
+ case ETHER_STAT_FCS_ERRORS:
+ cur_stats->crc = 0;
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ cur_stats->crc += rx_ring->fcs_err;
+ }
+ val = cur_stats->crc;
+ break;
+
+ case ETHER_STAT_TOOLONG_ERRORS:
+ cur_stats->frame_too_long = 0;
+ for (i = 0; i < qlge->rx_ring_count; i++) {
+ rx_ring = &qlge->rx_ring[i];
+ cur_stats->frame_too_long +=
+ rx_ring->frame_too_long;
+ }
+ val = cur_stats->frame_too_long;
+ break;
+
+ case ETHER_STAT_XCVR_INUSE:
+ val = XCVR_1000X;
+ break;
+ case ETHER_STAT_JABBER_ERRORS:
+ if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask) !=
+ DDI_SUCCESS) {
+ break;
+ }
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_JABBER_PKTS,
+ &val32);
+ val = val32;
+ ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
+ QL_PRINT(DBG_STATS, ("%s(%d) MAC_STAT_JABBER_ERRORS "
+ "status %d\n", __func__, qlge->instance, val));
+ break;
+ case ETHER_STAT_LINK_DUPLEX:
+ if (qlge->duplex == 1)
+ val = LINK_DUPLEX_FULL;
+ else
+ val = LINK_DUPLEX_HALF;
+ break;
+
+ /* statics saved in hw */
+ case ETHER_STAT_MACRCV_ERRORS:
+ val = 0;
+ if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask) !=
+ DDI_SUCCESS) {
+ break;
+ }
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_ALIGN_ERR,
+ &val32);
+ val += val32;
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_FCS_ERR, &val32);
+ val += val32;
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_JABBER_PKTS,
+ &val32);
+ val += val32;
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_SYM_ERR,
+ &val32);
+ val += val32;
+ ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_INT_ERR,
+ &val32);
+ val += val32;
+ ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
+ break;
+
+ default:
+ mutex_exit(&qlge->gen_mutex);
+ return (ENOTSUP);
+ }
+ *valp = val;
+ mutex_exit(&qlge->gen_mutex);
+
+ return (0);
+
+}
+
+/*
+ * Set the physical network address
+ */
+static int
+ql_unicst_set(qlge_t *qlge, const uint8_t *macaddr, int slot)
+{
+ int status;
+
+ status = ql_sem_spinlock(qlge, SEM_MAC_ADDR_MASK);
+ if (status != DDI_SUCCESS)
+ return (EIO);
+ status = ql_set_mac_addr_reg(qlge, (uint8_t *)macaddr,
+ MAC_ADDR_TYPE_CAM_MAC,
+ (uint16_t)(qlge->func_number * MAX_CQ + slot));
+ ql_sem_unlock(qlge, SEM_MAC_ADDR_MASK);
+
+ return ((status == DDI_SUCCESS) ? 0 : EIO);
+}
+
+/*
+ * Set default MAC address
+ * Each function has a total of 128 mac address, function0: 0~127,
+ * function1 128~254 etc or func_number *128 + n (0~127), but
+ * we only support one MAC address, so its address is
+ * func_number*128+0
+ */
+static int
+ql_m_unicst(void *arg, const uint8_t *mac)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ int status;
+
+ ASSERT(qlge->mac_flags != QL_MAC_DETACH);
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+
+ mutex_enter(&qlge->hw_mutex);
+ bcopy(mac, qlge->unicst_addr[0].addr.ether_addr_octet, ETHERADDRL);
+ /* Set Mac Address to slot 0 and Enable Primary Mac Function */
+ status = ql_unicst_set(qlge, mac, 0);
+ mutex_exit(&qlge->hw_mutex);
+ mutex_exit(&qlge->gen_mutex);
+
+ return (status);
+}
+
+/*
+ * ql_m_tx is used only for sending data packets into ethernet wire.
+ */
+static mblk_t *
+ql_m_tx(void *arg, mblk_t *mp)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ struct tx_ring *tx_ring;
+ mblk_t *next;
+ int rval;
+ uint32_t tx_count = 0;
+
+ if (qlge->port_link_state == LS_DOWN) {
+ cmn_err(CE_WARN, "%s(%d): exit due to link down",
+ __func__, qlge->instance);
+ freemsgchain(mp);
+ mp = NULL;
+ goto tx_exit;
+ }
+
+ /*
+ * Always send this packet through tx ring 0 for now.
+ * Will use multiple tx rings when Crossbow is supported
+ */
+ tx_ring = &qlge->tx_ring[0];
+ mutex_enter(&tx_ring->tx_lock);
+ if (tx_ring->mac_flags != QL_MAC_STARTED) {
+ mutex_exit(&tx_ring->tx_lock);
+ goto tx_exit;
+ }
+
+ /* we must try to send all */
+ while (mp != NULL) {
+ /*
+ * if number of available slots is less than a threshold,
+ * then quit
+ */
+ if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
+ tx_ring->queue_stopped = 1;
+ rval = DDI_FAILURE;
+ /*
+ * If we return the buffer back we are expected to
+ * call mac_tx_ring_update() when
+ * resources are available
+ */
+ tx_ring->defer++;
+ break;
+ }
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ rval = ql_send_common(tx_ring, mp);
+
+ if (rval != DDI_SUCCESS) {
+ mp->b_next = next;
+ break;
+ }
+ tx_count++;
+ mp = next;
+ }
+ /*
+ * After all msg blocks are mapped or copied to tx buffer,
+ * trigger the hardware to send the msg!
+ */
+ if (tx_count > 0) {
+ ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
+ tx_ring->prod_idx);
+ }
+ mutex_exit(&tx_ring->tx_lock);
+tx_exit:
+ return (mp);
+}
+
+static void
+ql_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
+{
+ qlge_t *qlge = (qlge_t *)arg;
+ struct iocblk *iocp;
+ boolean_t need_privilege = B_TRUE;
+ int err, cmd;
+ enum ioc_reply status;
+
+ /*
+ * Validate the command before bothering with the mutex...
+ */
+ iocp = (struct iocblk *)(void *)mp->b_rptr;
+ iocp->ioc_error = 0;
+ cmd = iocp->ioc_cmd;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+ }
+ switch (cmd) {
+ default:
+ QL_PRINT(DBG_GLD, ("unknown ioctl cmd \n"));
+ miocnak(wq, mp, 0, EINVAL);
+ mutex_exit(&qlge->gen_mutex);
+ return;
+ case QLA_PCI_STATUS:
+ case QLA_WRITE_REG:
+ case QLA_READ_PCI_REG:
+ case QLA_WRITE_PCI_REG:
+ case QLA_GET_DBGLEAVEL:
+ case QLA_SET_DBGLEAVEL:
+ case QLA_READ_CONTRL_REGISTERS:
+ case QLA_MANUAL_READ_FLASH:
+ case QLA_MANUAL_WRITE_FLASH:
+ case QLA_GET_BINARY_CORE_DUMP:
+ case QLA_SUPPORTED_DUMP_TYPES:
+ case QLA_TRIGGER_SYS_ERROR_EVENT:
+ case QLA_READ_FLASH:
+ case QLA_WRITE_FLASH:
+ case QLA_READ_VPD:
+ case QLA_GET_PROP:
+ case QLA_SHOW_REGION:
+ case QLA_LIST_ADAPTER_INFO:
+ case QLA_READ_FW_IMAGE:
+ case QLA_WRITE_FW_IMAGE_HEADERS:
+ case QLA_CONTINUE_COPY_IN:
+ case QLA_CONTINUE_COPY_OUT:
+ case QLA_SOFT_RESET:
+ break;
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ need_privilege = B_FALSE;
+ /* FALLTHRU */
+ case LB_SET_MODE:
+ break;
+ }
+
+ if (need_privilege) {
+ /*
+ * Check for specific net_config privilege
+ */
+ err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
+ if (err != 0) {
+ miocnak(wq, mp, 0, err);
+ mutex_exit(&qlge->gen_mutex);
+ return;
+ }
+ }
+ /*
+ * Implement ioctl
+ */
+ switch (cmd) {
+ case QLA_PCI_STATUS:
+ case QLA_WRITE_REG:
+ case QLA_READ_PCI_REG:
+ case QLA_WRITE_PCI_REG:
+ case QLA_GET_DBGLEAVEL:
+ case QLA_SET_DBGLEAVEL:
+ case QLA_READ_CONTRL_REGISTERS:
+ case QLA_MANUAL_READ_FLASH:
+ case QLA_MANUAL_WRITE_FLASH:
+ case QLA_GET_BINARY_CORE_DUMP:
+ case QLA_SUPPORTED_DUMP_TYPES:
+ case QLA_TRIGGER_SYS_ERROR_EVENT:
+ case QLA_READ_FLASH:
+ case QLA_WRITE_FLASH:
+ case QLA_READ_VPD:
+ case QLA_GET_PROP:
+ case QLA_SHOW_REGION:
+ case QLA_LIST_ADAPTER_INFO:
+ case QLA_READ_FW_IMAGE:
+ case QLA_WRITE_FW_IMAGE_HEADERS:
+ case QLA_CONTINUE_COPY_IN:
+ case QLA_CONTINUE_COPY_OUT:
+ case QLA_SOFT_RESET:
+ status = ql_chip_ioctl(qlge, wq, mp);
+ break;
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ case LB_SET_MODE:
+ status = ql_loop_ioctl(qlge, wq, mp, iocp);
+ break;
+ default:
+ status = IOC_INVAL;
+ break;
+ }
+
+ /*
+ * Decide how to reply
+ */
+ switch (status) {
+ default:
+ case IOC_INVAL:
+ /*
+ * Error, reply with a NAK and EINVAL or the specified error
+ */
+ miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
+ EINVAL : iocp->ioc_error);
+ break;
+
+ case IOC_DONE:
+ /*
+ * OK, reply already sent
+ */
+ break;
+
+ case IOC_ACK:
+ /*
+ * OK, reply with an ACK
+ */
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case IOC_REPLY:
+ /*
+ * OK, send prepared reply as ACK or NAK
+ */
+ mp->b_datap->db_type = (uint8_t)(iocp->ioc_error == 0 ?
+ M_IOCACK : M_IOCNAK);
+ qreply(wq, mp);
+ break;
+ }
+ mutex_exit(&qlge->gen_mutex);
+}
+/* ARGSUSED */
+static int
+qlge_set_priv_prop(qlge_t *qlge, const char *pr_name, uint_t pr_valsize,
+ const void *pr_val)
+{
+ int err = 0;
+ long result;
+
+ if (strcmp(pr_name, "_adv_pause_mode") == 0) {
+ (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
+ if (result > PAUSE_MODE_PER_PRIORITY ||
+ result < PAUSE_MODE_DISABLED) {
+ err = EINVAL;
+ } else if (qlge->pause != (uint32_t)result) {
+ qlge->pause = (uint32_t)result;
+ if (qlge->flags & INTERRUPTS_ENABLED) {
+ mutex_enter(&qlge->mbx_mutex);
+ if (ql_set_port_cfg(qlge) == DDI_FAILURE)
+ err = EINVAL;
+ mutex_exit(&qlge->mbx_mutex);
+ }
+ }
+ return (err);
+ }
+ return (ENOTSUP);
+}
+
+static int
+qlge_get_priv_prop(qlge_t *qlge, const char *pr_name, uint_t pr_flags,
+ uint_t pr_valsize, void *pr_val)
+{
+ int err = ENOTSUP;
+ boolean_t is_default = (boolean_t)(pr_flags & MAC_PROP_DEFAULT);
+ uint32_t value;
+
+ if (strcmp(pr_name, "_adv_pause_mode") == 0) {
+ value = (is_default? 2 : qlge->pause);
+ err = 0;
+ goto done;
+ }
+
+done:
+ if (err == 0) {
+ (void) snprintf(pr_val, pr_valsize, "%d", value);
+ }
+ return (err);
+}
+
+/*
+ * callback functions for set/get of properties
+ */
+/* ARGSUSED */
+static int
+ql_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ qlge_t *qlge = barg;
+ int err = 0;
+ uint32_t cur_mtu, new_mtu;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ mutex_exit(&qlge->gen_mutex);
+ return (ECANCELED);
+ }
+
+ switch (pr_num) {
+ case MAC_PROP_MTU:
+ cur_mtu = qlge->mtu;
+ bcopy(pr_val, &new_mtu, sizeof (new_mtu));
+
+ QL_PRINT(DBG_GLD, ("%s(%d) new mtu %d \n",
+ __func__, qlge->instance, new_mtu));
+ if (new_mtu == cur_mtu) {
+ err = 0;
+ break;
+ }
+ if ((new_mtu != ETHERMTU) && (new_mtu != JUMBO_MTU)) {
+ err = EINVAL;
+ break;
+ }
+ /*
+ * do not change on the fly, allow only before
+ * driver is started or stopped
+ */
+ if ((qlge->mac_flags == QL_MAC_STARTED) ||
+ (qlge->mac_flags == QL_MAC_DETACH)) {
+ err = EBUSY;
+ cmn_err(CE_WARN, "%s(%d) new mtu %d ignored, "
+ "driver busy, mac_flags %d", __func__,
+ qlge->instance, new_mtu, qlge->mac_flags);
+ break;
+ }
+ qlge->mtu = new_mtu;
+ err = mac_maxsdu_update(qlge->mh, qlge->mtu);
+ if (err == 0) {
+ /* EMPTY */
+ QL_PRINT(DBG_GLD, ("%s(%d) new mtu %d set success\n",
+ __func__, qlge->instance,
+ new_mtu));
+ }
+ break;
+ case MAC_PROP_PRIVATE:
+ mutex_exit(&qlge->gen_mutex);
+ err = qlge_set_priv_prop(qlge, pr_name, pr_valsize,
+ pr_val);
+ mutex_enter(&qlge->gen_mutex);
+ break;
+ default:
+ err = ENOTSUP;
+ break;
+ }
+ mutex_exit(&qlge->gen_mutex);
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+ql_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
+{
+ qlge_t *qlge = barg;
+ uint64_t speed;
+ link_state_t link_state;
+ link_duplex_t link_duplex;
+ int err = 0;
+
+ mutex_enter(&qlge->gen_mutex);
+ if (qlge->mac_flags == QL_MAC_SUSPENDED) {
+ err = ECANCELED;
+ goto out;
+ }
+
+ if (pr_valsize == 0) {
+ err = EINVAL;
+ goto out;
+ }
+ bzero(pr_val, pr_valsize);
+ /* mostly read only */
+ *perm = MAC_PROP_PERM_READ;
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ if (pr_valsize < sizeof (link_duplex_t)) {
+ err = EINVAL;
+ goto out;
+ }
+ if (qlge->duplex)
+ link_duplex = LINK_DUPLEX_FULL;
+ else
+ link_duplex = LINK_DUPLEX_HALF;
+
+ bcopy(&link_duplex, pr_val,
+ sizeof (link_duplex_t));
+ break;
+ case MAC_PROP_SPEED:
+ if (pr_valsize < sizeof (speed)) {
+ err = EINVAL;
+ goto out;
+ }
+ speed = qlge->speed * 1000000ull;
+ bcopy(&speed, pr_val, sizeof (speed));
+ break;
+ case MAC_PROP_STATUS:
+ if (pr_valsize < sizeof (link_state_t)) {
+ err = EINVAL;
+ goto out;
+ }
+ if (qlge->port_link_state == LS_DOWN)
+ link_state = LINK_STATE_DOWN;
+ else
+ link_state = LINK_STATE_UP;
+ bcopy(&link_state, pr_val,
+ sizeof (link_state_t));
+ break;
+
+ case MAC_PROP_PRIVATE:
+ err = qlge_get_priv_prop(qlge, pr_name, pr_flags,
+ pr_valsize, pr_val);
+ break;
+
+ default:
+ err = ENOTSUP;
+ }
+out:
+ mutex_exit(&qlge->gen_mutex);
+ return (err);
+}
+
+/* ARGSUSED */
+static boolean_t
+ql_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ int ret = B_FALSE;
+ uint32_t cksum = 0;
+ qlge_t *qlge = (qlge_t *)arg;
+
+ switch (cap) {
+ case MAC_CAPAB_HCKSUM:
+ if ((qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) != 0) {
+ cksum |= HCKSUM_INET_FULL_V4;
+ }
+ if ((qlge->cfg_flags & CFG_CKSUM_FULL_IPv6) != 0) {
+ cksum |= HCKSUM_INET_FULL_V6;
+ }
+ if ((qlge->cfg_flags & CFG_CKSUM_HEADER_IPv4) != 0) {
+ cksum |= HCKSUM_IPHDRCKSUM;
+ }
+ if ((qlge->cfg_flags & CFG_CKSUM_PARTIAL) != 0) {
+ cksum |= HCKSUM_INET_PARTIAL;
+ }
+ qlge->chksum_cap = cksum;
+ *(uint32_t *)cap_data = cksum;
+ ret = B_TRUE;
+ break;
+
+ case MAC_CAPAB_LSO: {
+ mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
+ uint32_t page_size;
+
+ if ((qlge->cfg_flags & CFG_LSO)&&
+ (qlge->cfg_flags & CFG_SUPPORT_SCATTER_GATHER)) {
+ cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
+ page_size = ddi_ptob(qlge->dip, (ulong_t)1);
+ cap_lso->lso_basic_tcp_ipv4.lso_max = page_size *
+ (QL_MAX_TX_DMA_HANDLES-1);
+ ret = B_TRUE;
+ }
+ break;
+ }
+
+ default:
+ return (B_FALSE);
+ }
+ return (ret);
+}
+
+void
+ql_gld3_init(qlge_t *qlge, mac_register_t *macp)
+{
+ macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ macp->m_driver = qlge;
+ macp->m_dip = qlge->dip;
+ /* This is the mac address from flash to be used by the port */
+ macp->m_src_addr = qlge->dev_addr.ether_addr_octet;
+ macp->m_min_sdu = 0;
+ macp->m_max_sdu = qlge->mtu;
+ macp->m_margin = VLAN_TAGSZ;
+ macp->m_priv_props = qlge_priv_prop;
+ macp->m_priv_prop_count = QLGE_MAX_PRIV_PROPS;
+ macp->m_v12n = 0;
+ ql_m_callbacks.mc_unicst = ql_m_unicst;
+ ql_m_callbacks.mc_tx = ql_m_tx;
+ macp->m_callbacks = &ql_m_callbacks;
+}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_mpi.c b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_mpi.c
new file mode 100644
index 0000000000..c1f5b70adb
--- /dev/null
+++ b/usr/src/uts/common/io/fibre-channel/fca/qlge/qlge_mpi.c
@@ -0,0 +1,1194 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#include <qlge.h>
+
+static int ql_async_event_parser(qlge_t *, mbx_data_t *);
+
+/*
+ * Wait upto timeout seconds for Processor Interrupt
+ * if timeout is 0, then wait for default waittime
+ */
+static int
+ql_poll_processor_intr(qlge_t *qlge, uint8_t timeout)
+{
+ int rtn_val = DDI_SUCCESS;
+
+ if (ql_wait_reg_bit(qlge, REG_STATUS, STS_PI, BIT_SET, timeout)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Polling for processor interrupt failed.");
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+/*
+ * Wait for mailbox Processor Register Ready
+ */
+static int
+ql_wait_processor_addr_reg_ready(qlge_t *qlge)
+{
+ int rtn_val = DDI_SUCCESS;
+
+ if (ql_wait_reg_bit(qlge, REG_PROCESSOR_ADDR,
+ PROCESSOR_ADDRESS_RDY, BIT_SET, 0) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "Wait for processor address register ready timeout.");
+ rtn_val = DDI_FAILURE;
+ }
+ return (rtn_val);
+}
+
+/*
+ * Read and write MPI registers using the indirect register interface
+ * Assume all the locks&semaphore have been acquired
+ */
+int
+ql_write_processor_data(qlge_t *qlge, uint32_t addr, uint32_t data)
+{
+ int rtn_val = DDI_FAILURE;
+
+ /* wait for processor address register ready */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+ /* write the data to the data reg */
+ ql_write_reg(qlge, REG_PROCESSOR_DATA, data);
+ /* trigger the write */
+ ql_write_reg(qlge, REG_PROCESSOR_ADDR, addr);
+ /* wait for register to come ready */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+
+ rtn_val = DDI_SUCCESS;
+
+out:
+ return (rtn_val);
+
+}
+
+/*
+ * Read from processor register
+ */
+int
+ql_read_processor_data(qlge_t *qlge, uint32_t addr, uint32_t *data)
+{
+ int rtn_val = DDI_FAILURE;
+
+ /* enable read operation */
+ addr |= PROCESSOR_ADDRESS_READ;
+ /* wait for processor address register ready */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+
+ /* Write read address, wait for data ready in Data register */
+ ql_write_reg(qlge, REG_PROCESSOR_ADDR, addr);
+ /* wait for data ready */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+ /* read data */
+ *data = ql_read_reg(qlge, REG_PROCESSOR_DATA);
+
+ rtn_val = DDI_SUCCESS;
+
+out:
+ return (rtn_val);
+
+}
+
+/*
+ * Read "count" number of outgoing Mailbox register starting
+ * from mailbox #0 if count is 0 then read all mailboxes
+ */
+static int
+ql_read_mailbox_cmd(qlge_t *qlge, mbx_data_t *mbx_buf, uint32_t count)
+{
+ int rtn_val = DDI_FAILURE;
+ uint32_t reg_status;
+ uint32_t addr;
+ int i;
+
+ if (ql_sem_spinlock(qlge, QL_PROCESSOR_SEM_MASK) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) get QL_PROCESSOR_SEM_MASK time out error",
+ __func__, qlge->instance);
+ return (DDI_FAILURE);
+ }
+
+ if (qlge->func_number == qlge->fn0_net)
+ addr = FUNC_0_OUT_MAILBOX_0_REG_OFFSET;
+ else
+ addr = FUNC_1_OUT_MAILBOX_0_REG_OFFSET;
+
+ if (count == 0)
+ count = NUM_MAILBOX_REGS;
+ for (i = 0; i < count; i++) {
+ if (ql_read_processor_data(qlge, addr, &reg_status)
+ == DDI_FAILURE)
+ goto out;
+ QL_PRINT(DBG_MBX, ("%s(%d) mailbox %d value 0x%x\n",
+ __func__, qlge->instance, i, reg_status));
+ mbx_buf->mb[i] = reg_status;
+ addr ++;
+ }
+
+ rtn_val = DDI_SUCCESS;
+
+out:
+ ql_sem_unlock(qlge, QL_PROCESSOR_SEM_MASK);
+
+ return (rtn_val);
+
+}
+
+/*
+ * Write mail box command (upto 16) to MPI Firmware
+ */
+int
+ql_issue_mailbox_cmd(qlge_t *qlge, mbx_cmd_t *mbx_cmd)
+{
+ int rtn_val = DDI_FAILURE;
+ uint32_t addr;
+ int i;
+ /*
+ * Get semaphore to access Processor Address and
+ * Processor Data Registers
+ */
+ if (ql_sem_spinlock(qlge, QL_PROCESSOR_SEM_MASK) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ /* ensure no overwriting current command */
+ if (ql_wait_reg_bit(qlge, REG_HOST_CMD_STATUS,
+ HOST_TO_MPI_INTR_NOT_DONE, BIT_RESET, 0) != DDI_SUCCESS) {
+ goto out;
+ }
+
+ if (qlge->func_number == qlge->fn0_net)
+ addr = FUNC_0_IN_MAILBOX_0_REG_OFFSET;
+ else
+ addr = FUNC_1_IN_MAILBOX_0_REG_OFFSET;
+
+ /* wait for mailbox registers to be ready to access */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+
+ /* issue mailbox command one by one */
+ for (i = 0; i < NUM_MAILBOX_REGS; i++) {
+ /* write sending cmd to mailbox data register */
+ ql_write_reg(qlge, REG_PROCESSOR_DATA, mbx_cmd->mb[i]);
+ /* write mailbox address to address register */
+ ql_write_reg(qlge, REG_PROCESSOR_ADDR, addr);
+ QL_PRINT(DBG_MBX, ("%s(%d) write %x to mailbox(%x) addr %x \n",
+ __func__, qlge->instance, mbx_cmd->mb[i], i, addr));
+ addr++;
+ /*
+ * wait for mailbox cmd to be written before
+ * next write can start
+ */
+ if (ql_wait_processor_addr_reg_ready(qlge) == DDI_FAILURE)
+ goto out;
+ }
+ /* inform MPI that new mailbox commands are available */
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS, HOST_CMD_SET_RISC_INTR);
+ rtn_val = DDI_SUCCESS;
+out:
+ ql_sem_unlock(qlge, QL_PROCESSOR_SEM_MASK);
+ return (rtn_val);
+}
+
+/*
+ * Send mail box command (upto 16) to MPI Firmware
+ * and polling for MPI mailbox completion response when
+ * interrupt is not enabled.
+ * The MBX_LOCK mutexe should have been held and released
+ * externally
+ */
+int
+ql_issue_mailbox_cmd_and_poll_rsp(qlge_t *qlge, mbx_cmd_t *mbx_cmd,
+ mbx_data_t *p_results)
+{
+ int rtn_val = DDI_FAILURE;
+ boolean_t done;
+ int max_wait;
+
+ if (mbx_cmd == NULL)
+ goto err;
+
+ rtn_val = ql_issue_mailbox_cmd(qlge, mbx_cmd);
+ if (rtn_val != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd failed",
+ __func__, qlge->instance);
+ goto err;
+ }
+ done = B_FALSE;
+ max_wait = 5; /* wait upto 5 PI interrupt */
+ /* delay for the processor interrupt is received */
+ while ((done != B_TRUE) && (max_wait--)) {
+ /* wait up to 5s for PI interrupt */
+ if (ql_poll_processor_intr(qlge, (uint8_t)mbx_cmd->timeout)
+ == DDI_SUCCESS) {
+ QL_PRINT(DBG_MBX, ("%s(%d) PI Intr received",
+ __func__, qlge->instance));
+ ql_read_mailbox_cmd(qlge, p_results, 0);
+ /*
+ * Sometimes, the incoming messages is not what we are
+ * waiting for, ie. async events, then, continue to
+ * wait. If it is the result * of previous mailbox
+ * command, then Done. No matter what, send
+ * HOST_CMD_CLEAR_RISC_TO_HOST_INTR to clear each
+ * PI interrupt
+ */
+ if (ql_async_event_parser(qlge, p_results) == B_FALSE) {
+ /*
+ * we get what we are waiting for,
+ * clear the interrupt
+ */
+ rtn_val = DDI_SUCCESS;
+ done = B_TRUE;
+ } else {
+ /*EMPTY*/
+ QL_PRINT(DBG_MBX,
+ ("%s(%d) result ignored, not we wait for\n",
+ __func__, qlge->instance));
+ }
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS,
+ HOST_CMD_CLEAR_RISC_TO_HOST_INTR);
+ } else { /* timeout */
+ done = B_TRUE;
+ }
+ rtn_val = DDI_SUCCESS;
+ }
+err:
+ return (rtn_val);
+}
+/*
+ * Send mail box command (upto 16) to MPI Firmware
+ * and wait for MPI mailbox completion response which
+ * is saved in interrupt. Thus, this function can only
+ * be used after interrupt is enabled.
+ * Must hold MBX mutex before calling this function
+ */
+static int
+ql_issue_mailbox_cmd_and_wait_rsp(qlge_t *qlge, mbx_cmd_t *mbx_cmd)
+{
+ int rtn_val = DDI_FAILURE;
+ clock_t timer;
+ int i;
+ int done = 0;
+
+ if (mbx_cmd == NULL)
+ goto err;
+
+ ASSERT(mutex_owned(&qlge->mbx_mutex));
+
+ /* if interrupts are not enabled, poll when results are available */
+ if (!(qlge->flags & INTERRUPTS_ENABLED)) {
+ rtn_val = ql_issue_mailbox_cmd_and_poll_rsp(qlge, mbx_cmd,
+ &qlge->received_mbx_cmds);
+ if (rtn_val == DDI_SUCCESS) {
+ for (i = 0; i < NUM_MAILBOX_REGS; i++)
+ mbx_cmd->mb[i] = qlge->received_mbx_cmds.mb[i];
+ }
+ } else {
+ rtn_val = ql_issue_mailbox_cmd(qlge, mbx_cmd);
+ if (rtn_val != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd failed",
+ __func__, qlge->instance);
+ goto err;
+ }
+ qlge->mbx_wait_completion = 1;
+ while (!done && qlge->mbx_wait_completion && !ddi_in_panic()) {
+ /* default 5 seconds from now to timeout */
+ timer = ddi_get_lbolt();
+ if (mbx_cmd->timeout) {
+ timer +=
+ mbx_cmd->timeout * drv_usectohz(1000000);
+ } else {
+ timer += 5 * drv_usectohz(1000000);
+ }
+ if (cv_timedwait(&qlge->cv_mbx_intr, &qlge->mbx_mutex,
+ timer) == -1) {
+ /*
+ * The timeout time 'timer' was
+ * reached or expired without the condition
+ * being signaled.
+ */
+ cmn_err(CE_WARN, "%s(%d) Wait for Mailbox cmd "
+ "complete timeout.",
+ __func__, qlge->instance);
+ rtn_val = DDI_FAILURE;
+ done = 1;
+ } else {
+ QL_PRINT(DBG_MBX,
+ ("%s(%d) mailbox completion signal received"
+ " \n", __func__, qlge->instance));
+ for (i = 0; i < NUM_MAILBOX_REGS; i++) {
+ mbx_cmd->mb[i] =
+ qlge->received_mbx_cmds.mb[i];
+ }
+ rtn_val = DDI_SUCCESS;
+ done = 1;
+ }
+ }
+ }
+err:
+ return (rtn_val);
+}
+
+/*
+ * Inteprete incoming asynchronous events
+ */
+static int
+ql_async_event_parser(qlge_t *qlge, mbx_data_t *mbx_cmds)
+{
+ uint32_t link_status, cmd;
+ uint8_t link_speed;
+ uint8_t link_type;
+ boolean_t proc_done = B_TRUE;
+ mbx_cmd_t reply_cmd = {0};
+
+ switch (mbx_cmds->mb[0]) {
+ case MBA_IDC_INTERMEDIATE_COMPLETE /* 1000h */:
+ QL_PRINT(DBG_MBX, ("%s(%d):"
+ "MBA_IDC_INTERMEDIATE_COMPLETE received\n",
+ __func__, qlge->instance));
+ break;
+ case MBA_SYSTEM_ERR /* 8002h */:
+ cmn_err(CE_WARN, "%s(%d): MBA_SYSTEM_ERR received",
+ __func__, qlge->instance);
+ cmn_err(CE_WARN, "%s(%d): File id %x, Line # %x,"
+ "Firmware Ver# %x",
+ __func__, qlge->instance, mbx_cmds->mb[1],
+ mbx_cmds->mb[2], mbx_cmds->mb[3]);
+ ql_8xxx_binary_core_dump(qlge, &qlge->ql_mpi_coredump);
+ break;
+ case MBA_LINK_UP /* 8011h */:
+ QL_PRINT(DBG_MBX, ("%s(%d): MBA_LINK_UP received\n",
+ __func__, qlge->instance));
+ link_status = mbx_cmds->mb[1];
+ QL_PRINT(DBG_MBX, ("%s(%d): Link Status %x \n",
+ __func__, qlge->instance, link_status));
+ link_speed = (uint8_t)((link_status >> 3) & 0x07);
+
+ if (link_speed == 0) {
+ qlge->speed = SPEED_100;
+ QL_PRINT(DBG_MBX, ("%s(%d):Link speed 100M\n",
+ __func__, qlge->instance));
+ } else if (link_speed == 1) {
+ qlge->speed = SPEED_1000;
+ QL_PRINT(DBG_MBX, ("%s(%d):Link speed 1G\n",
+ __func__, qlge->instance));
+ } else if (link_speed == 2) {
+ qlge->speed = SPEED_10G;
+ QL_PRINT(DBG_MBX, ("%s(%d):Link speed 10G\n",
+ __func__, qlge->instance));
+ }
+
+ qlge->link_type = link_type = (uint8_t)(link_status & 0x07);
+
+ if (link_type == XFI_NETWORK_INTERFACE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d):Link type XFI_NETWORK_INTERFACE\n",
+ __func__, qlge->instance));
+ } else if (link_type == XAUI_NETWORK_INTERFACE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d):Link type"
+ "XAUI_NETWORK_INTERFACE\n",
+ __func__, qlge->instance));
+ } else if (link_type == XFI_BACKPLANE_INTERFACE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d):Link type"
+ "XFI_BACKPLANE_INTERFACE\n",
+ __func__, qlge->instance));
+ } else if (link_type == XAUI_BACKPLANE_INTERFACE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d):Link type "
+ "XAUI_BACKPLANE_INTERFACE\n",
+ __func__, qlge->instance));
+ } else if (link_type == EXT_10GBASE_T_PHY) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d):Link type EXT_10GBASE_T_PHY\n",
+ __func__, qlge->instance));
+ } else if (link_type == EXT_EXT_EDC_PHY) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d):Link type EXT_EXT_EDC_PHY\n",
+ __func__, qlge->instance));
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d):unknown Link type \n",
+ __func__, qlge->instance));
+ }
+
+ cmn_err(CE_NOTE, "mpi link up! link_status %x \n", link_status);
+ /*
+ * start timer if not started to delay some time then
+ * check if link is really up or down
+ */
+ ql_restart_timer(qlge);
+
+ break;
+ case MBA_LINK_DOWN /* 8012h */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_LINK_DOWN received\n",
+ __func__, qlge->instance));
+
+ link_status = mbx_cmds->mb[1];
+
+ QL_PRINT(DBG_MBX, ("%s(%d): Link Status %x \n",
+ __func__, qlge->instance, link_status));
+ if (link_status & 0x1) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d): Loss of signal \n",
+ __func__, qlge->instance));
+ }
+ if (link_status & 0x2) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): Auto-Negotiation Failed \n",
+ __func__, qlge->instance));
+ }
+ if (link_status & 0x4) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): XTI-Training Failed \n",
+ __func__, qlge->instance));
+ }
+
+ cmn_err(CE_NOTE, "mpi link down! link_status %x \n",
+ link_status);
+ ql_restart_timer(qlge);
+ break;
+ case MBA_IDC_COMPLETE /* 8100h */:
+
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_IDC_COMPLETE received\n",
+ __func__, qlge->instance));
+ cmd = mbx_cmds->mb[1];
+ if (cmd == MBC_STOP_FIRMWARE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): STOP_FIRMWARE event completed\n",
+ __func__, qlge->instance));
+ } else if (cmd == MBC_IDC_REQUEST) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): IDC_REQUEST event completed\n",
+ __func__, qlge->instance));
+ } else if (cmd == MBC_PORT_RESET) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): PORT_RESET event completed\n",
+ __func__, qlge->instance));
+ } else if (cmd == MBC_SET_PORT_CONFIG) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): SET_PORT_CONFIG event "
+ "completed\n", __func__, qlge->instance));
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): unknown IDC completion request"
+ " event %x %x\n", __func__, qlge->instance,
+ mbx_cmds->mb[1], mbx_cmds->mb[2]));
+ }
+ proc_done = B_FALSE;
+ break;
+
+ case MBA_IDC_REQUEST_NOTIFICATION /* 8101h */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_IDC_REQUEST_NOTIFICATION "
+ "received\n", __func__, qlge->instance));
+ cmd = mbx_cmds->mb[1];
+ if (cmd == MBC_STOP_FIRMWARE) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): STOP_FIRMWARE notification"
+ " received\n", __func__, qlge->instance));
+ } else if (cmd == MBC_IDC_REQUEST) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): IDC_REQUEST notification "
+ "received\n", __func__, qlge->instance));
+ } else if (cmd == MBC_PORT_RESET) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d): PORT_RESET "
+ "notification received\n",
+ __func__, qlge->instance));
+ } else if (cmd == MBC_SET_PORT_CONFIG) {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): SET_PORT_CONFIG notification "
+ "received\n", __func__, qlge->instance));
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("%s(%d): "
+ "unknown request received %x %x\n",
+ __func__, qlge->instance, mbx_cmds->mb[1],
+ mbx_cmds->mb[2]));
+ }
+ reply_cmd.mb[0] = MBC_IDC_ACK;
+ reply_cmd.mb[1] = mbx_cmds->mb[1];
+ reply_cmd.mb[2] = mbx_cmds->mb[2];
+ reply_cmd.mb[3] = mbx_cmds->mb[3];
+ reply_cmd.mb[4] = mbx_cmds->mb[4];
+ if (ql_issue_mailbox_cmd(qlge, &reply_cmd)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) send IDC Ack failed.",
+ __func__, qlge->instance);
+ }
+ /*
+ * verify if the incoming outbound mailbox value is what
+ * we just sent
+ */
+ if (mbx_cmds->mb[0] == MBS_COMMAND_COMPLETE) {
+ /* 0x4000 */
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): IDC Ack sent success.\n",
+ __func__, qlge->instance));
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): IDC Ack reply error %x %x %x.\n",
+ __func__, qlge->instance, mbx_cmds->mb[0],
+ mbx_cmds->mb[1], mbx_cmds->mb[2]));
+ }
+ break;
+ case MBA_IDC_TIME_EXTENDED /* 8102 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_IDC_TIME_EXTENDED received\n",
+ __func__, qlge->instance));
+ break;
+ case MBA_DCBX_CONFIG_CHANGE /* 8110 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_DCBX_CONFIG_CHANGE received\n",
+ __func__, qlge->instance));
+ break;
+ case MBA_NOTIFICATION_LOST /* 8120 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_NOTIFICATION_LOST received\n",
+ __func__, qlge->instance));
+ break;
+ case MBA_SFT_TRANSCEIVER_INSERTION /* 8130 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_SFT_TRANSCEIVER_INSERTION "
+ "received\n", __func__, qlge->instance));
+ break;
+ case MBA_SFT_TRANSCEIVER_REMOVAL /* 8140 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_SFT_TRANSCEIVER_REMOVAL "
+ "received\n", __func__, qlge->instance));
+ break;
+ case MBA_FIRMWARE_INIT_COMPLETE /* 8400 */:
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): MBA_FIRMWARE_INIT_COMPLETE "
+ "received\n", __func__, qlge->instance));
+ QL_PRINT(DBG_MBX,
+ ("%s(%d): mbx[1] %x, mbx[2] %x\n", __func__,
+ qlge->instance, mbx_cmds->mb[1], mbx_cmds->mb[2]));
+ qlge->fw_init_complete = B_TRUE;
+ qlge->fw_version_info.major_version =
+ LSB(MSW(mbx_cmds->mb[1]));
+ qlge->fw_version_info.minor_version =
+ MSB(LSW(mbx_cmds->mb[1]));
+ qlge->fw_version_info.sub_minor_version =
+ LSB(LSW(mbx_cmds->mb[1]));
+ qlge->phy_version_info.major_version =
+ LSB(MSW(mbx_cmds->mb[2]));
+ qlge->phy_version_info.minor_version =
+ MSB(LSW(mbx_cmds->mb[2]));
+ qlge->phy_version_info.sub_minor_version =
+ LSB(LSW(mbx_cmds->mb[2]));
+ break;
+ case MBA_FIRMWARE_INIT_FAILED /* 8401 */:
+ cmn_err(CE_WARN, "%s(%d):"
+ "ASYNC_EVENT_FIRMWARE_INIT_FAILURE "
+ "received: mbx[1] %x, mbx[2] %x",
+ __func__, qlge->instance,
+ mbx_cmds->mb[1], mbx_cmds->mb[2]);
+ break;
+ default:
+ if (mbx_cmds->mb[0] > 0x8000) {
+ cmn_err(CE_WARN, "%s(%d): "
+ "Unknown Async event received: mbx[0] %x ,"
+ "mbx[1] %x; mbx[2] %x",
+ __func__, qlge->instance,
+ mbx_cmds->mb[0], mbx_cmds->mb[1],
+ mbx_cmds->mb[2]);
+ proc_done = B_TRUE;
+ } else {
+ proc_done = B_FALSE;
+ }
+ break;
+ }
+ return (proc_done);
+}
+
+
+/*
+ * MPI Interrupt handler
+ * Caller must have MBX_LOCK
+ */
+void
+ql_do_mpi_intr(qlge_t *qlge)
+{
+ /*
+ * we just need to read first few mailboxes that this adapter's MPI
+ * will write response to.
+ */
+ mutex_enter(&qlge->mbx_mutex);
+
+ ql_read_mailbox_cmd(qlge, &qlge->received_mbx_cmds, qlge->max_read_mbx);
+
+ /*
+ * process PI interrupt as async events, if not done,
+ * then pass to mailbox processing
+ */
+ if (ql_async_event_parser(qlge, &qlge->received_mbx_cmds) == B_FALSE) {
+ QL_PRINT(DBG_MBX, ("%s(%d) mailbox completion interrupt\n",
+ __func__, qlge->instance));
+ /*
+ * If another thread is waiting for the mail box
+ * completion event to occur
+ */
+ if (qlge->mbx_wait_completion == 1) {
+ qlge->mbx_wait_completion = 0;
+ cv_broadcast(&qlge->cv_mbx_intr);
+ QL_PRINT(DBG_MBX,
+ ("%s(%d) mailbox completion signaled \n",
+ __func__, qlge->instance));
+ }
+ }
+ /* inform MPI Firmware to clear the interrupt */
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS,
+ HOST_CMD_CLEAR_RISC_TO_HOST_INTR /* 0x0A */);
+ mutex_exit(&qlge->mbx_mutex);
+ ql_enable_completion_interrupt(qlge, 0); /* MPI is on irq 0 */
+}
+
+/*
+ * Test if mailbox communication works
+ * This is used when Interrupt is not enabled
+ */
+int
+ql_mbx_test(qlge_t *qlge)
+{
+ mbx_cmd_t mbx_cmds;
+ mbx_data_t mbx_results;
+ int i, test_ok = 1;
+ int rtn_val = DDI_FAILURE;
+
+ for (i = 0; i < NUM_MAILBOX_REGS; i++)
+ mbx_cmds.mb[i] = i;
+
+ mbx_cmds.mb[0] = MBC_MAILBOX_REGISTER_TEST; /* 0x06 */
+ if (ql_issue_mailbox_cmd(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd timeout.",
+ __func__, qlge->instance);
+ goto out;
+ }
+
+ /* delay for the processor interrupt is received */
+ if (ql_poll_processor_intr(qlge, (uint8_t)mbx_cmds.timeout)
+ == DDI_SUCCESS) {
+ QL_PRINT(DBG_MBX, ("%s(%d) PI Intr received",
+ __func__, qlge->instance));
+ ql_read_mailbox_cmd(qlge, &mbx_results, 0);
+
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS,
+ HOST_CMD_CLEAR_RISC_TO_HOST_INTR);
+
+ if (mbx_results.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ test_ok = 0;
+ } else {
+ for (i = 1; i < NUM_MAILBOX_REGS; i++) {
+ if (mbx_results.mb[i] != i) {
+ test_ok = 0;
+ break;
+ }
+ }
+ }
+ if (test_ok) {
+ rtn_val = DDI_SUCCESS;
+ } else {
+ cmn_err(CE_WARN, "%s(%d) mailbox test failed!",
+ __func__, qlge->instance);
+ }
+ } else {
+ cmn_err(CE_WARN, "%s(%d) mailbox testing error: "
+ "PI Intr not received ", __func__, qlge->instance);
+ }
+out:
+ return (rtn_val);
+}
+
+/*
+ * ql_mbx_test2
+ * Test if mailbox communication works
+ * This is used when Interrupt is enabled
+ * mailbox cmd:0x06h
+ */
+int
+ql_mbx_test2(qlge_t *qlge)
+{
+ mbx_cmd_t mbx_cmds = {0};
+ int i, test_ok = 1;
+ int rtn_val = DDI_FAILURE;
+
+ for (i = 0; i < NUM_MAILBOX_REGS; i++)
+ mbx_cmds.mb[i] = i;
+
+ mbx_cmds.mb[0] = MBC_MAILBOX_REGISTER_TEST; /* 0x06 */
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp failed.",
+ __func__, qlge->instance);
+ goto out;
+ }
+
+ /* verify if the incoming outbound mailbox value is what we just sent */
+ if (mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ test_ok = 0;
+ } else {
+ for (i = 1; i < qlge->max_read_mbx; i++) {
+ if (mbx_cmds.mb[i] != i) {
+ test_ok = 0;
+ break;
+ }
+ }
+ }
+ if (test_ok) {
+ rtn_val = DDI_SUCCESS;
+ } else {
+ cmn_err(CE_WARN, "%s(%d) mailbox test failed!",
+ __func__, qlge->instance);
+ }
+out:
+ return (rtn_val);
+}
+
+/*
+ * ql_get_fw_state
+ * Get fw state.
+ * mailbox cmd:0x69h
+ */
+int
+ql_get_fw_state(qlge_t *qlge, uint32_t *fw_state_ptr)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_GET_FIRMWARE_STATE;
+
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp"
+ " failed.", __func__, qlge->instance);
+ goto out;
+ }
+ /* verify if the transaction is completed successful */
+ if (mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ cmn_err(CE_WARN, "%s(%d) failed, 0x%x",
+ __func__, qlge->instance, mbx_cmds.mb[0]);
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX, ("firmware state: 0x%x\n", mbx_cmds.mb[1]));
+ }
+ if (fw_state_ptr != NULL)
+ *fw_state_ptr = mbx_cmds.mb[1];
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * ql_set_IDC_Req
+ * Send a IDC Request to firmware to notify all functions
+ * or any specific functions on the same port
+ * mailbox cmd:0x100h
+ */
+int
+ql_set_IDC_Req(qlge_t *qlge, uint8_t dest_functions, uint8_t timeout)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_IDC_REQUEST /* 0x100 */;
+ mbx_cmds.mb[1] = (timeout<<8) | qlge->func_number;
+
+ switch (dest_functions) {
+ case IDC_REQ_DEST_FUNC_ALL:
+ mbx_cmds.mb[1] |= IDC_REQ_ALL_DEST_FUNC_MASK;
+ mbx_cmds.mb[2] = 0;
+ break;
+ case IDC_REQ_DEST_FUNC_0:
+ mbx_cmds.mb[2] = IDC_REQ_DEST_FUNC_0_MASK;
+ break;
+ case IDC_REQ_DEST_FUNC_1:
+ mbx_cmds.mb[2] = IDC_REQ_DEST_FUNC_1_MASK;
+ break;
+ case IDC_REQ_DEST_FUNC_2:
+ mbx_cmds.mb[2] = IDC_REQ_DEST_FUNC_2_MASK;
+ break;
+ case IDC_REQ_DEST_FUNC_3:
+ mbx_cmds.mb[2] = IDC_REQ_DEST_FUNC_3_MASK;
+ break;
+ default:
+ cmn_err(CE_WARN, "Wrong dest functions %x",
+ dest_functions);
+ }
+
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp failed.",
+ __func__, qlge->instance);
+ goto out;
+ }
+ /* verify if the transaction is completed successful */
+ if (mbx_cmds.mb[0] == MBA_IDC_INTERMEDIATE_COMPLETE /* 0x1000 */) {
+ QL_PRINT(DBG_MBX, ("%s(%d) mbx1: 0x%x, mbx2: 0x%x\n",
+ __func__, qlge->instance, mbx_cmds.mb[1], mbx_cmds.mb[2]));
+ rtn_val = DDI_SUCCESS;
+ } else if (mbx_cmds.mb[0] == MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ QL_PRINT(DBG_MBX, ("%s(%d) cmd sent succesfully 0x%x\n",
+ __func__, qlge->instance));
+ rtn_val = DDI_SUCCESS;
+ } else if (mbx_cmds.mb[0] == MBS_COMMAND_ERROR /* 0x4005 */) {
+ cmn_err(CE_WARN, "%s(%d) failed: COMMAND_ERROR",
+ __func__, qlge->instance);
+ } else if (mbx_cmds.mb[0] == MBS_COMMAND_PARAMETER_ERROR /* 0x4006 */) {
+ cmn_err(CE_WARN, "%s(%d) failed: COMMAND_PARAMETER_ERROR",
+ __func__, qlge->instance);
+ } else {
+ cmn_err(CE_WARN, "%s(%d) unknow result: mbx[0]: 0x%x; mbx[1]:"
+ " 0x%x; mbx[2]: 0x%x", __func__, qlge->instance,
+ mbx_cmds.mb[0], mbx_cmds.mb[1], mbx_cmds.mb[2]);
+ }
+
+out:
+ return (rtn_val);
+}
+
+/*
+ * ql_set_mpi_port_config
+ * Send new port configuration.to mpi
+ * mailbox cmd:0x122h
+ */
+static int
+ql_set_mpi_port_config(qlge_t *qlge, port_cfg_info_t new_cfg)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_SET_PORT_CONFIG /* 0x122 */;
+ mbx_cmds.mb[1] = new_cfg.link_cfg;
+ mbx_cmds.mb[2] = new_cfg.max_frame_size;
+
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp"
+ " failed.", __func__, qlge->instance);
+ goto out;
+ }
+ /* verify if the transaction is completed successful */
+ if ((mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) &&
+ (mbx_cmds.mb[0] != MBA_IDC_COMPLETE /* 0x8100 */)) {
+ cmn_err(CE_WARN, "%s(%d) failed, 0x%x",
+ __func__, qlge->instance, mbx_cmds.mb[0]);
+ } else
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * ql_set_port_cfg
+ * Set new port configuration
+ */
+int
+ql_set_port_cfg(qlge_t *qlge)
+{
+ uint32_t loop_back_bit_mask = 0x0e; /* bit 1-3 */
+ uint32_t pause_bit_mask = 0x60; /* bit 5-6 */
+
+ /* clear pause bits */
+ qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
+ /* clear loop back bits */
+ qlge->port_cfg_info.link_cfg &= ~loop_back_bit_mask;
+ /* set new pause mode */
+ if (qlge->pause == PAUSE_MODE_STANDARD)
+ qlge->port_cfg_info.link_cfg |= STD_PAUSE;
+ else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
+ qlge->port_cfg_info.link_cfg |= PP_PAUSE;
+
+ /* loop back cfg: bit1-3 */
+ if (qlge->loop_back_mode == QLGE_LOOP_INTERNAL_PARALLEL)
+ qlge->port_cfg_info.link_cfg |= LOOP_INTERNAL_PARALLEL;
+ else if (qlge->loop_back_mode == QLGE_LOOP_INTERNAL_SERIAL)
+ qlge->port_cfg_info.link_cfg |= LOOP_INTERNAL_SERIAL;
+
+ /* max frame size */
+ if (qlge->mtu == ETHERMTU) {
+ qlge->port_cfg_info.link_cfg &= ~ENABLE_JUMBO;
+ qlge->port_cfg_info.max_frame_size = NORMAL_FRAME_SIZE;
+ } else {
+ qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
+ qlge->port_cfg_info.max_frame_size = JUMBO_FRAME_SIZE;
+ }
+
+ return (ql_set_mpi_port_config(qlge, qlge->port_cfg_info));
+
+}
+
+/*
+ * ql_get_port_cfg
+ * Get port configuration.
+ * mailbox cmd:0x123h
+ */
+int
+ql_get_port_cfg(qlge_t *qlge)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_GET_PORT_CONFIG /* 0x123 */;
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp"
+ " failed.", __func__, qlge->instance);
+ goto out;
+ }
+ /* verify if the transaction is completed successfully */
+ if (mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ cmn_err(CE_WARN, "%s(%d) failed, 0x%x",
+ __func__, qlge->instance, mbx_cmds.mb[0]);
+ } else { /* verify frame size */
+ if ((mbx_cmds.mb[2] == NORMAL_FRAME_SIZE) ||
+ (mbx_cmds.mb[2] == JUMBO_FRAME_SIZE)) {
+ qlge->port_cfg_info.link_cfg = mbx_cmds.mb[1];
+ qlge->port_cfg_info.max_frame_size = mbx_cmds.mb[2];
+ QL_PRINT(DBG_MBX, ("link_cfg: 0x%x, max_frame_size:"
+ " %d bytes\n", mbx_cmds.mb[1], mbx_cmds.mb[2]));
+ rtn_val = DDI_SUCCESS;
+ } else {
+ cmn_err(CE_WARN, "bad link_cfg: 0x%x, max_frame_size:"
+ " %d bytes", mbx_cmds.mb[1], mbx_cmds.mb[2]);
+ }
+ }
+out:
+ return (rtn_val);
+}
+
+/*
+ * qlge_get_link_status
+ * Get link status.
+ * mailbox cmd:0x124h
+ */
+int
+qlge_get_link_status(qlge_t *qlge,
+ struct qlnic_link_status_info *link_status_ptr)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_GET_LINK_STATUS /* 0x124 */;
+
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp failed.",
+ __func__, qlge->instance);
+ goto out;
+ }
+ /* verify if the transaction is completed successful */
+ if (mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ cmn_err(CE_WARN, "%s(%d) failed, 0x%x", __func__,
+ qlge->instance, mbx_cmds.mb[0]);
+ } else {
+ /* EMPTY */
+ QL_PRINT(DBG_MBX,
+ ("link status: status1 : 0x%x, status2 : 0x%x, "
+ "status3 : 0x%x\n",
+ mbx_cmds.mb[1], mbx_cmds.mb[2], mbx_cmds.mb[3]));
+ }
+ if (link_status_ptr != NULL) {
+ link_status_ptr->link_status_info = mbx_cmds.mb[1];
+ link_status_ptr->additional_info = mbx_cmds.mb[2];
+ link_status_ptr->network_hw_info = mbx_cmds.mb[3];
+ link_status_ptr->dcbx_frame_counters_info = mbx_cmds.mb[4];
+ link_status_ptr->change_counters_info = mbx_cmds.mb[5];
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+
+ return (rtn_val);
+}
+
+/*
+ * ql_get_firmware_version
+ * Get firmware version.
+ */
+int
+ql_get_firmware_version(qlge_t *qlge,
+ struct qlnic_mpi_version_info *mpi_version_ptr)
+{
+ int rtn_val = DDI_FAILURE;
+ mbx_cmd_t mbx_cmds = {0};
+
+ mbx_cmds.mb[0] = MBC_ABOUT_FIRMWARE /* 0x08 */;
+
+ if (ql_issue_mailbox_cmd_and_wait_rsp(qlge, &mbx_cmds)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s(%d) ql_issue_mailbox_cmd_and_wait_rsp failed.",
+ __func__, qlge->instance);
+ goto out;
+ }
+
+ /* verify if the transaction is completed successful */
+ if (mbx_cmds.mb[0] != MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ cmn_err(CE_WARN, "%s(%d) failed, 0x%x", __func__,
+ qlge->instance, mbx_cmds.mb[0]);
+ } else {
+ qlge->fw_version_info.major_version =
+ LSB(MSW(mbx_cmds.mb[1]));
+ qlge->fw_version_info.minor_version =
+ MSB(LSW(mbx_cmds.mb[1]));
+ qlge->fw_version_info.sub_minor_version =
+ LSB(LSW(mbx_cmds.mb[1]));
+ qlge->phy_version_info.major_version =
+ LSB(MSW(mbx_cmds.mb[2]));
+ qlge->phy_version_info.minor_version =
+ MSB(LSW(mbx_cmds.mb[2]));
+ qlge->phy_version_info.sub_minor_version =
+ LSB(LSW(mbx_cmds.mb[2]));
+#ifdef QLGE_LOAD_UNLOAD
+ cmn_err(CE_NOTE, "firmware version: %d.%d.%d\n",
+ qlge->fw_version_info.major_version,
+ qlge->fw_version_info.minor_version,
+ qlge->fw_version_info.sub_minor_version);
+#endif
+ if (mpi_version_ptr != NULL) {
+ mpi_version_ptr->fw_version =
+ (qlge->fw_version_info.major_version<<16)
+ |(qlge->fw_version_info.minor_version<<8)
+ |(qlge->fw_version_info.sub_minor_version);
+ mpi_version_ptr->phy_version =
+ (qlge->phy_version_info.major_version<<16)
+ |(qlge->phy_version_info.minor_version<<8)
+ |(qlge->phy_version_info.sub_minor_version);
+ }
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * Trigger a system error event
+ */
+int
+ql_trigger_system_error_event(qlge_t *qlge)
+{
+ mbx_cmd_t mbx_cmds = {0};
+ int rtn_val = DDI_FAILURE;
+
+ mbx_cmds.mb[0] = MBC_GENERATE_SYS_ERROR; /* 0x2A */
+ if (ql_issue_mailbox_cmd(qlge, &mbx_cmds) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s(%d) ql_issue_mailbox_cmd timeout.",
+ __func__, qlge->instance);
+ goto out;
+ }
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+/*
+ * Reset the MPI RISC Processor
+ */
+int
+ql_reset_mpi_risc(qlge_t *qlge)
+{
+ int rtn_val = DDI_FAILURE;
+
+ /* Reset the MPI Processor */
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS, HOST_CMD_SET_RISC_RESET);
+ if (ql_wait_reg_bit(qlge, REG_HOST_CMD_STATUS, RISC_RESET,
+ BIT_SET, 0) != DDI_SUCCESS) {
+ ql_read_reg(qlge, REG_HOST_CMD_STATUS);
+ goto out;
+ }
+ ql_write_reg(qlge, REG_HOST_CMD_STATUS, HOST_CMD_CLEAR_RISC_RESET);
+ rtn_val = DDI_SUCCESS;
+out:
+ return (rtn_val);
+}
+
+int
+ql_read_risc_ram(qlge_t *qlge, uint32_t risc_address, uint64_t bp,
+ uint32_t word_count)
+{
+ int rval = DDI_FAILURE;
+ mbx_cmd_t mc = {0};
+ mbx_cmd_t *mcp = &mc;
+ mbx_data_t mbx_results;
+
+ QL_PRINT(DBG_MBX, ("%s(%d): read risc addr:0x%x,"
+ "phys_addr %x,%x words\n", __func__, qlge->instance,
+ risc_address, bp, word_count));
+ if (CFG_IST(qlge, CFG_CHIP_8100)) {
+ mcp->mb[0] = MBC_DUMP_RISC_RAM /* 0x0C */;
+ mcp->mb[1] = LSW(risc_address);
+ mcp->mb[2] = MSW(LSD(bp));
+ mcp->mb[3] = LSW(LSD(bp));
+ mcp->mb[4] = MSW(word_count);
+ mcp->mb[5] = LSW(word_count);
+ mcp->mb[6] = MSW(MSD(bp));
+ mcp->mb[7] = LSW(MSD(bp));
+ mcp->mb[8] = MSW(risc_address);
+ }
+ mcp->timeout = 10 /* MAILBOX_TOV */;
+
+ if (ql_issue_mailbox_cmd_and_poll_rsp(qlge, mcp, &mbx_results)
+ != DDI_SUCCESS) {
+ goto out;
+ } else {
+ QL_PRINT(DBG_MBX, ("%s(%d) PI Intr received",
+ __func__, qlge->instance));
+ if (mbx_results.mb[0] == MBS_COMMAND_COMPLETE /* 0x4000 */) {
+ QL_PRINT(DBG_MBX, ("%s(%d): success\n",
+ __func__, qlge->instance));
+ rval = DDI_SUCCESS;
+ } else {
+ cmn_err(CE_WARN, "%s(%d): failed , status %x",
+ __func__, qlge->instance, mbx_results.mb[0]);
+ }
+ }
+out:
+ return (rval);
+}
diff --git a/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge.h b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge.h
new file mode 100644
index 0000000000..77725fde4c
--- /dev/null
+++ b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge.h
@@ -0,0 +1,904 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#ifndef _QLGE_H
+#define _QLGE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunmdi.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/dlpi.h>
+#include <sys/sdt.h>
+#include <sys/mac_provider.h>
+#include <sys/mac.h>
+#include <sys/mac_flow.h>
+#include <sys/mac_ether.h>
+#include <sys/vlan.h>
+#include <sys/netlb.h>
+#include <sys/kmem.h>
+#include <sys/file.h>
+#include <sys/proc.h>
+#include <sys/callb.h>
+#include <sys/disp.h>
+#include <sys/strsun.h>
+#include <sys/ethernet.h>
+#include <sys/miiregs.h>
+#include <sys/kstat.h>
+#include <sys/byteorder.h>
+
+#include <qlge_hw.h>
+#include <qlge_dbg.h>
+#include <qlge_open.h>
+
+#define ADAPTER_NAME "qlge"
+
+/*
+ * Local Macro Definitions.
+ */
+#ifdef TRUE
+#undef TRUE
+#endif
+#define TRUE 1
+
+#ifdef FALSE
+#undef FALSE
+#endif
+#define FALSE 0
+
+/* #define QLGE_TRACK_BUFFER_USAGE */
+/*
+ * byte order, sparc is big endian, x86 is little endian,
+ * but PCI is little endian only
+ */
+#ifdef sparc
+#define cpu_to_le64(x) BSWAP_64(x)
+#define cpu_to_le32(x) BSWAP_32(x)
+#define cpu_to_le16(x) BSWAP_16(x)
+#define le64_to_cpu(x) cpu_to_le64(x)
+#define le32_to_cpu(x) cpu_to_le32(x)
+#define le16_to_cpu(x) cpu_to_le16(x)
+#else
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+#define cpu_to_le16(x) (x)
+#define le64_to_cpu(x) (x)
+#define le32_to_cpu(x) (x)
+#define le16_to_cpu(x) (x)
+#endif
+
+/*
+ * Macros to help code, maintain, etc.
+ */
+
+#define LSB(x) (uint8_t)(x)
+#define MSB(x) (uint8_t)((uint16_t)(x) >> 8)
+
+#define MSW(x) (uint16_t)((uint32_t)(x) >> 16)
+#define LSW(x) (uint16_t)(x)
+
+#define MS32(x) (uint32_t)((uint32_t)(x) >> 32)
+#define LS32(x) (uint32_t)(x)
+
+#define MSW_LSB(x) (uint8_t)(LSB(MSW(x)))
+#define MSW_MSB(x) (uint8_t)(MSB(MSW(x)))
+
+#define LSD(x) (uint32_t)(x)
+#define MSD(x) (uint32_t)((uint64_t)(x) >> 32)
+
+#define SHORT_TO_LONG(a, b) (uint32_t)((uint16_t)b << 16 | (uint16_t)a)
+#define CHAR_TO_SHORT(a, b) (uint16_t)((uint8_t)b << 8 | (uint8_t)a)
+
+#define SWAP_ENDIAN_16(x) ((LSB(x) << 8) | MSB(x))
+
+#define SWAP_ENDIAN_32(x) ((SWAP_ENDIAN_16(LSW(x)) << 16) | \
+ SWAP_ENDIAN_16(MSW(x)))
+
+#define SWAP_ENDIAN_64(x) ((SWAP_ENDIAN_32(LS32(x)) << 32) | \
+ SWAP_ENDIAN_32(MS32(x)))
+
+#define QL_MIN(x, y) ((x < y) ? x : y)
+
+#define CARRIER_ON(qlge) mac_link_update((qlge)->mh, LINK_STATE_UP)
+#define CARRIER_OFF(qlge) mac_link_update((qlge)->mh, LINK_STATE_DOWN)
+
+/*
+ * qlge local function return status codes
+ */
+#define QL_ERROR 1
+#define QL_SUCCESS 0
+/*
+ * Solaris version compatibility definitions.
+ */
+#define QL_GET_LBOLT(timer) timer = ddi_get_lbolt()
+#define QL_DMA_XFER_COUNTER (uint64_t)0xffffffff
+#define QL_DRIVER_NAME(dip) ddi_driver_name(ddi_get_parent(dip))
+
+#define MINOR_NODE_FLAG 8
+
+/*
+ * Host adapter default definitions.
+ */
+
+/* Timeout timer counts in seconds (must greater than 1 second). */
+#define USEC_PER_TICK drv_hztousec(1)
+#define TICKS_PER_SEC drv_usectohz(1000000)
+#define QL_ONE_SEC_DELAY 1000000
+#define QL_ONE_MSEC_DELAY 1000
+#define TX_TIMEOUT 3*TICKS_PER_SEC
+/*
+ * DMA attributes definitions.
+ */
+#define QL_DMA_LOW_ADDRESS (uint64_t)0
+#define QL_DMA_HIGH_64BIT_ADDRESS (uint64_t)0xffffffffffffffffull
+#define QL_DMA_HIGH_32BIT_ADDRESS (uint64_t)0xffffffff
+#define QL_DMA_ADDRESS_ALIGNMENT (uint64_t)8
+#define QL_DMA_ALIGN_8_BYTE_BOUNDARY (uint64_t)BIT_3
+#define QL_DMA_RING_ADDRESS_ALIGNMENT (uint64_t)64
+#define QL_DMA_ALIGN_64_BYTE_BOUNDARY (uint64_t)BIT_6
+#define QL_DMA_BURSTSIZES 0xfff
+#define QL_DMA_MIN_XFER_SIZE 1
+#define QL_DMA_MAX_XFER_SIZE (uint64_t)0xffffffff
+#define QL_DMA_SEGMENT_BOUNDARY (uint64_t)0xffffffff
+#define QL_DMA_GRANULARITY 1
+#define QL_DMA_XFER_FLAGS 0
+#define QL_MAX_COOKIES 16
+
+/*
+ * ISP PCI Configuration.
+ */
+#define QL_INTR_INTERVAL 128 /* default interrupt interval 128us */
+#define QL_INTR_PKTS 8 /* default packet count threshold 8us */
+
+/* GLD */
+#define QL_STREAM_OPS(dev_ops, attach, detach) \
+ DDI_DEFINE_STREAM_OPS(dev_ops, nulldev, nulldev, attach, detach, \
+ nodev, NULL, D_MP, NULL, ql_quiesce)
+
+#define QL_GET_DEV(dip) ((qlge_t *)(ddi_get_driver_private(dip)))
+#define RESUME_TX(tx_ring) mac_tx_update(tx_ring->qlge->mh);
+#define RX_UPSTREAM(rx_ring, mp) mac_rx(rx_ring->qlge->mh, \
+ rx_ring->qlge->handle, mp);
+
+/* GLD DMA */
+extern ddi_device_acc_attr_t ql_dev_acc_attr;
+extern ddi_device_acc_attr_t ql_desc_acc_attr;
+extern ddi_device_acc_attr_t ql_buf_acc_attr;
+
+struct dma_info {
+ void *vaddr;
+ ddi_dma_handle_t dma_handle;
+ ddi_acc_handle_t acc_handle;
+ uint64_t dma_addr;
+ size_t mem_len; /* allocated size */
+ offset_t offset; /* relative to handle */
+};
+
+/*
+ * Sync a DMA area described by a dma_info
+ */
+#define DMA_SYNC(area, flag) ((void) ddi_dma_sync((area).dma_handle, \
+ (area).offset, (area).mem_len, (flag)))
+
+/*
+ * Find the (kernel virtual) address of block of memory
+ * described by a dma_info
+ */
+#define DMA_VPTR(area) ((area).vaddr)
+
+/*
+ * Zero a block of memory described by a dma_info
+ */
+#define DMA_ZERO(area) bzero(DMA_VPTR(area), (area).mem_len)
+
+#define MAX_SG_ELEMENTS 16
+#define QL_MAX_TX_DMA_HANDLES MAX_SG_ELEMENTS
+#define TOTAL_SG_ELEMENTS (MAX_SG_ELEMENTS + TX_DESC_PER_IOCB)
+
+/*
+ * ISP PCI Configuration.
+ */
+
+/* Initialize steps */
+#define INIT_SOFTSTATE_ALLOC BIT_0
+#define INIT_REGS_SETUP BIT_1
+#define INIT_DOORBELL_REGS_SETUP BIT_2
+#define INIT_MAC_ALLOC BIT_3
+#define INIT_PCI_CONFIG_SETUP BIT_4
+#define INIT_SETUP_RINGS BIT_5
+#define INIT_MEMORY_ALLOC BIT_6
+#define INIT_INTR_ALLOC BIT_7
+#define INIT_ADD_INTERRUPT BIT_8
+#define INIT_LOCKS_CREATED BIT_9
+#define INIT_ADD_SOFT_INTERRUPT BIT_10
+#define INIT_MUTEX BIT_11
+#define ADAPTER_INIT BIT_12
+#define INIT_MAC_REGISTERED BIT_13
+#define INIT_KSTATS BIT_14
+#define INIT_ADAPTER_UP BIT_15
+#define INIT_ALLOC_RX_BUF BIT_16
+#define INIT_INTR_ENABLED BIT_17
+
+
+#define LS_64BITS(x) (uint32_t)(0xffffffff & ((uint64_t)x))
+#define MS_64BITS(x) (uint32_t)(0xffffffff & (((uint64_t)x)>>16>>16))
+
+typedef uint64_t dma_addr_t;
+extern int ql_quiesce(dev_info_t *dip);
+
+/*
+ * LSO can support up to 65536 bytes of data, but can not be sent in one IOCB
+ * which only has 8 TX OALs, additional OALs must be applied separately.
+ */
+#define QL_LSO_MAX 65536 /* Maximum supported LSO data Length */
+
+enum tx_mode_t {
+ USE_DMA,
+ USE_COPY
+};
+
+#define QL_MAX_COPY_LENGTH 256
+
+#define MAX_FRAGMENTS_IN_IOCB 7
+
+#ifndef VLAN_ID_MASK
+#define VLAN_ID_MASK 0x0fffu
+#endif
+#ifndef VLAN_TAGSZ
+#define VLAN_TAGSZ 4
+#endif
+
+#ifndef ETHERTYPE_VLAN
+#define ETHERTYPE_VLAN 0x8100
+#endif
+
+#ifndef MBLKL
+#define MBLKL(mp) ((uintptr_t)(mp)->b_wptr - (uintptr_t)(mp)->b_rptr)
+#endif
+/*
+ * Checksum Offload
+ */
+#define TCP_CKSUM_OFFSET 16
+#define UDP_CKSUM_OFFSET 6
+#define IPPROTO_IPv6OVERv4 41
+
+/*
+ * Driver must be in one of these states
+ */
+enum mac_state {
+ QL_MAC_INIT, /* in the initialization stage */
+ QL_MAC_ATTACHED, /* driver attached */
+ QL_MAC_STARTED, /* interrupt enabled, driver is ready */
+ QL_MAC_BRINGDOWN, /* in the bring down process */
+ QL_MAC_STOPPED, /* stoped, no more interrupts */
+ QL_MAC_DETACH, /* to be detached */
+ QL_MAC_SUSPENDED
+};
+
+/*
+ * Soft Request Flag
+ */
+#define NEED_HW_RESET BIT_0 /* need hardware reset */
+#define NEED_MPI_RESET BIT_1 /* need MPI RISC reset */
+
+/*
+ * (Internal) return values from ioctl subroutines
+ */
+enum ioc_reply {
+ IOC_INVAL = -1, /* bad, NAK with EINVAL */
+ IOC_DONE, /* OK, reply sent */
+ IOC_ACK, /* OK, just send ACK */
+ IOC_REPLY, /* OK, just send reply */
+ IOC_RESTART_ACK, /* OK, restart & ACK */
+ IOC_RESTART_REPLY /* OK, restart & reply */
+};
+
+/*
+ * Link Speed,in Mbps
+ */
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_10G 10000
+
+/*
+ * Multicast List
+ */
+typedef struct {
+ struct ether_addr addr;
+ unsigned char reserved[2];
+} ql_multicast_addr;
+
+#define MAX_MULTICAST_LIST_SIZE 128
+
+typedef struct {
+ struct ether_addr addr; /* in canonical form */
+ boolean_t set; /* B_TRUE => valid */
+} qlge_mac_addr_t;
+
+#define MAX_UNICAST_LIST_SIZE 128
+
+/*
+ * Device kstate structure.
+ */
+enum {
+ QL_KSTAT_CHIP = 0,
+ QL_KSTAT_LINK,
+ QL_KSTAT_REG,
+ QL_KSTAT_COUNT
+};
+
+/*
+ * Register Bit Set/Reset
+ */
+enum {
+ BIT_SET = 0,
+ BIT_RESET
+};
+
+/*
+ * Flash Image Search State
+ */
+enum { STOP_SEARCH, /* Image address bad, no more search */
+ CONTINUE_SEARCH, /* Image address ok, continue search */
+ LAST_IMAGE_FOUND /* Found last image and FLTDS address */
+};
+
+/*
+ * Loop Back Modes
+ */
+enum { QLGE_LOOP_NONE,
+ QLGE_LOOP_INTERNAL_PARALLEL,
+ QLGE_LOOP_INTERNAL_SERIAL,
+ QLGE_LOOP_EXTERNAL_PHY
+};
+
+/* for soft state routine */
+typedef struct {
+ offset_t index;
+ char *name;
+} ql_ksindex_t;
+
+struct bq_desc {
+ struct dma_info bd_dma;
+ struct bq_desc *next;
+ struct rx_ring *rx_ring;
+ mblk_t *mp;
+ frtn_t rx_recycle; /* recycle function - called after mp */
+ /* is to be freed by OS */
+ uint16_t index;
+ uint16_t free_buf; /* Set to indicate the buffer is */
+ /* being freed, new one should not */
+ /* be allocated */
+ uint32_t upl_inuse; /* buffer in use by upper layers */
+};
+
+#define VM_PAGE_SIZE 4096
+
+#define QLGE_POLL_ALL -1
+
+#define SMALL_BUFFER_SIZE 512
+#define LARGE_BUFFER_SIZE 4096
+
+#define MAX_TX_WAIT_COUNT 1000
+#define MAX_RX_WAIT_COUNT 25 /* 25 second */
+
+#define MIN_BUFFERS_ARM_COUNT 16
+#define MIN_BUFFERS_FREE_COUNT 32 /* If free buffer count go over this */
+ /* value, arm the chip */
+/* if less than 16 free lrg buf nodes in the free list, then */
+/* rx has to use copy method to send packets upstream */
+#define RX_COPY_MODE_THRESHOLD (MIN_BUFFERS_ARM_COUNT/4)
+/* if there are more than TX_STOP_THRESHOLD free tx buffers, try to send it */
+#define TX_STOP_THRESHOLD 16
+#define TX_RESUME_THRESHOLD 8
+
+struct tx_ring_desc {
+ struct ob_mac_iocb_req *queue_entry; /* tx descriptor of this */
+ struct dma_info dma_mem_area; /* tx buffer */
+ ddi_dma_handle_t tx_dma_handle[QL_MAX_TX_DMA_HANDLES];
+ int tx_dma_handle_used;
+ enum tx_mode_t tx_type; /* map mode or copy mode */
+ mblk_t *mp; /* requested sending packet */
+ uint32_t index;
+ caddr_t copy_buffer;
+ uint64_t copy_buffer_dma_addr;
+ struct dma_info oal_dma; /* oal is premapped */
+ uint64_t oal_dma_addr; /* oal dma address premapped */
+ uint32_t tx_bytes;
+ void *oal;
+};
+
+struct tx_ring {
+ struct qlge *qlge;
+ struct dma_info wqicb_dma;
+ uint16_t cq_id; /* completion (rx) queue for */
+ /* tx completions */
+ uint8_t wq_id;
+ uint32_t wq_size;
+ uint32_t wq_len;
+ kmutex_t tx_lock;
+ struct dma_info wq_dma;
+ volatile uint32_t tx_free_count;
+ uint32_t tx_mode;
+ boolean_t queue_stopped; /* Tx no resource */
+ uint32_t *prod_idx_db_reg;
+ uint16_t prod_idx;
+ uint32_t *valid_db_reg; /* PCI doorbell mem area + 4 */
+ struct tx_ring_desc *wq_desc;
+ /* shadow copy of consumer idx */
+ uint32_t *cnsmr_idx_sh_reg;
+ /* dma-shadow copy consumer */
+ uint64_t cnsmr_idx_sh_reg_dma;
+ uint32_t defer; /* tx no resource */
+ uint64_t obytes;
+ uint64_t opackets;
+ uint32_t errxmt;
+ uint64_t brdcstxmt;
+ uint64_t multixmt;
+ uint64_t tx_fail_dma_bind;
+ uint64_t tx_no_dma_handle;
+ uint64_t tx_no_dma_cookie;
+
+ enum mac_state mac_flags;
+};
+
+struct bq_element {
+uint32_t addr_lo;
+uint32_t addr_hi;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+ DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
+ TX_Q = 3, /* Handles outbound completions. */
+ RX_Q = 4, /* Handles inbound completions. */
+};
+
+struct rx_ring {
+ struct dma_info cqicb_dma;
+
+ /* GLD required flags */
+ uint64_t ring_gen_num;
+ /* statistics */
+ uint64_t rx_packets;
+ uint64_t rx_bytes;
+ uint32_t frame_too_long;
+ uint32_t frame_too_short;
+ uint32_t fcs_err;
+ uint32_t rx_packets_dropped_no_buffer;
+ uint32_t rx_pkt_dropped_mac_unenabled;
+ volatile uint32_t rx_indicate;
+
+ /* miscellaneous */
+ int type; /* DEFAULT_Q, TX_Q, RX_Q */
+ kmutex_t rx_lock;
+ uint32_t irq;
+ struct qlge *qlge;
+ uint32_t cpu; /* Which CPU this should run on. */
+ enum mac_state mac_flags;
+ /* completion queue */
+ struct dma_info cq_dma; /* virtual addr and phy addr */
+ uint32_t cq_size;
+ uint32_t cq_len;
+ uint16_t cq_id;
+ volatile uint32_t *prod_idx_sh_reg; /* Shadowed prod reg */
+ uint64_t prod_idx_sh_reg_dma; /* Physical address */
+ uint32_t *cnsmr_idx_db_reg; /* PCI db mem area 0 */
+ uint32_t cnsmr_idx; /* current sw idx */
+ struct net_rsp_iocb *curr_entry; /* next entry on queue */
+ uint32_t *valid_db_reg; /* PCI doorbell mem area + 4 */
+
+ /* large buffer queue */
+ uint32_t lbq_len; /* entry count */
+ uint32_t lbq_size; /* size in bytes */
+ uint32_t lbq_buf_size;
+ struct dma_info lbq_dma; /* lbq dma info */
+ uint64_t *lbq_base_indirect;
+ uint64_t lbq_base_indirect_dma;
+ kmutex_t lbq_lock;
+ struct bq_desc **lbuf_in_use;
+ volatile uint32_t lbuf_in_use_count;
+ struct bq_desc **lbuf_free;
+ volatile uint32_t lbuf_free_count; /* free lbuf desc cnt */
+ uint32_t *lbq_prod_idx_db_reg; /* PCI db mem area+0x18 */
+ uint32_t lbq_prod_idx; /* current sw prod idx */
+ uint32_t lbq_curr_idx; /* next entry we expect */
+ uint32_t lbq_free_tail; /* free tail */
+ uint32_t lbq_free_head; /* free head */
+ uint32_t lbq_use_tail; /* inuse tail */
+ uint32_t lbq_use_head; /* inuse head */
+
+ struct bq_desc *lbq_desc;
+
+ /* small buffer queue */
+ uint32_t sbq_len; /* entry count */
+ uint32_t sbq_size; /* size in bytes of queue */
+ uint32_t sbq_buf_size;
+ struct dma_info sbq_dma; /* sbq dma info */
+ uint64_t *sbq_base_indirect;
+ uint64_t sbq_base_indirect_dma;
+ kmutex_t sbq_lock;
+ struct bq_desc **sbuf_in_use;
+ volatile uint32_t sbuf_in_use_count;
+ struct bq_desc **sbuf_free;
+ volatile uint32_t sbuf_free_count; /* free buffer desc cnt */
+ uint32_t *sbq_prod_idx_db_reg; /* PCI db mem area+0x1c */
+ uint32_t sbq_prod_idx; /* current sw prod idx */
+ uint32_t sbq_curr_idx; /* next entry we expect */
+ uint32_t sbq_free_tail; /* free tail */
+ uint32_t sbq_free_head; /* free head */
+ uint32_t sbq_use_tail; /* inuse tail */
+ uint32_t sbq_use_head; /* inuse head */
+ struct bq_desc *sbq_desc;
+ /* for test purpose */
+ uint32_t rx_failed_sbq_allocs;
+ uint32_t rx_failed_lbq_allocs;
+ uint32_t sbuf_copy_count;
+ uint32_t lbuf_copy_count;
+
+};
+
+struct intr_ctx {
+ struct qlge *qlge;
+ uint32_t intr;
+ uint32_t hooked;
+ uint32_t intr_en_mask;
+ uint32_t intr_dis_mask;
+ uint32_t intr_read_mask;
+ /*
+ * It's incremented for
+ * each irq handler that is scheduled.
+ * When each handler finishes it
+ * decrements irq_cnt and enables
+ * interrupts if it's zero.
+ */
+ uint32_t irq_cnt;
+ uint_t (*handler)(caddr_t, caddr_t);
+};
+
+struct tx_buf_desc {
+ uint64_t addr;
+ uint32_t len;
+#define TX_DESC_LEN_MASK 0x000fffff
+#define TX_DESC_C 0x40000000
+#define TX_DESC_E 0x80000000
+};
+
+typedef struct qlge {
+ /*
+ * Solaris adapter configuration data
+ */
+ dev_info_t *dip;
+ int instance;
+ ddi_acc_handle_t dev_handle;
+ caddr_t iobase;
+ ddi_acc_handle_t dev_doorbell_reg_handle;
+ caddr_t doorbell_reg_iobase;
+ pci_cfg_t pci_cfg;
+ ddi_acc_handle_t pci_handle;
+ uint32_t page_size;
+ uint32_t sequence;
+ struct intr_ctx intr_ctx[MAX_RX_RINGS];
+ struct dma_info ricb_dma;
+
+ enum mac_state mac_flags;
+
+ volatile uint32_t cfg_flags;
+
+#define CFG_JUMBLE_PACKET BIT_1
+#define CFG_RX_COPY_MODE BIT_2
+#define CFG_SUPPORT_MULTICAST BIT_3
+#define CFG_HW_UNABLE_PSEUDO_HDR_CKSUM BIT_4
+#define CFG_CKSUM_HEADER_IPv4 BIT_5
+#define CFG_CKSUM_PARTIAL BIT_6
+#define CFG_CKSUM_FULL_IPv4 BIT_7
+#define CFG_CKSUM_FULL_IPv6 BIT_8
+#define CFG_LSO BIT_9
+#define CFG_SUPPORT_SCATTER_GATHER BIT_10
+#define CFG_ENABLE_SPLIT_HEADER BIT_11
+#define CFG_ENABLE_EXTENDED_LOGGING BIT_15
+ uint32_t chksum_cap;
+ volatile uint32_t flags;
+#define CFG_CHIP_8100 BIT_16
+
+#define CFG_IST(qlge, cfgflags) (qlge->cfg_flags & cfgflags)
+
+ /* For Shadow Registers, used by adapter to write to host memory */
+ struct dma_info host_copy_shadow_dma_attr;
+ /*
+ * Extra 2x8 bytes memory saving large/small buf queue base address
+ * for each CQICB and read by chip, new request since 8100
+ */
+ struct dma_info buf_q_ptr_base_addr_dma_attr;
+ /*
+ * Debugging
+ */
+ uint32_t ql_dbgprnt;
+ /*
+ * GLD
+ */
+ mac_handle_t mh;
+ mac_resource_handle_t handle;
+ ql_stats_t stats;
+ kstat_t *ql_kstats[QL_KSTAT_COUNT];
+ /*
+ * mutex
+ */
+ kmutex_t gen_mutex; /* general adapter mutex */
+ kmutex_t hw_mutex; /* common hw(nvram)access */
+
+ /*
+ * Generic timer
+ */
+ timeout_id_t ql_timer_timeout_id;
+ clock_t ql_timer_ticks;
+
+ /*
+ * Interrupt
+ */
+ int intr_type;
+ /* for legacy interrupt */
+ ddi_iblock_cookie_t iblock_cookie;
+ /* for MSI and Fixed interrupts */
+ ddi_intr_handle_t *htable; /* For array of interrupts */
+ int intr_cnt; /* # of intrs actually allocated */
+ uint_t intr_pri; /* Interrupt priority */
+ int intr_cap; /* Interrupt capabilities */
+ size_t intr_size; /* size of the allocated */
+ /* interrupt handlers */
+ /* Power management context. */
+ uint8_t power_level;
+#define LOW_POWER_LEVEL (BIT_1 | BIT_0)
+#define MAX_POWER_LEVEL 0
+
+ /*
+ * General NIC
+ */
+ uint32_t xgmac_sem_mask;
+ uint32_t xgmac_sem_bits;
+ uint32_t func_number;
+ uint32_t fn0_net; /* network function 0 port */
+ uint32_t fn1_net; /* network function 1 port */
+
+ uint32_t mtu;
+ uint32_t port_link_state;
+ uint32_t speed;
+ uint16_t link_type;
+ uint32_t duplex;
+ uint32_t pause; /* flow-control mode */
+ uint32_t loop_back_mode;
+ uint32_t lso_enable;
+ /*
+ * PCI status
+ */
+ uint16_t vendor_id;
+ uint16_t device_id;
+
+ /*
+ * Multicast list
+ */
+ uint32_t multicast_list_count;
+ ql_multicast_addr multicast_list[MAX_MULTICAST_LIST_SIZE];
+ boolean_t multicast_promisc;
+ /*
+ * MAC address information
+ */
+ struct ether_addr dev_addr; /* ethernet address read from nvram */
+ qlge_mac_addr_t unicst_addr[MAX_UNICAST_LIST_SIZE];
+ uint32_t unicst_total; /* total unicst addresses */
+ uint32_t unicst_avail;
+ /*
+ * Soft Interrupt handlers
+ */
+ /* soft interrupt handle for MPI interrupt */
+ ddi_softint_handle_t mpi_event_intr_hdl;
+ /* soft interrupt handle for asic reset */
+ ddi_softint_handle_t asic_reset_intr_hdl;
+ /* soft interrupt handle for mpi reset */
+ ddi_softint_handle_t mpi_reset_intr_hdl;
+ /*
+ * IOCTL
+ */
+ /* new ioctl admin flags to work around the 1024 max data copy in&out */
+ caddr_t ioctl_buf_ptr;
+ uint32_t ioctl_buf_lenth;
+ uint16_t expected_trans_times;
+ uint32_t ioctl_total_length;
+ uint32_t ioctl_transferred_bytes;
+ ql_mpi_coredump_t ql_mpi_coredump;
+ /*
+ * Mailbox lock and flags
+ */
+ boolean_t fw_init_complete;
+ kmutex_t mbx_mutex;
+ boolean_t mbx_wait_completion;
+ kcondvar_t cv_mbx_intr;
+ mbx_data_t received_mbx_cmds;
+ uint_t max_read_mbx;
+ firmware_version_info_t fw_version_info;
+ phy_firmware_version_info_t phy_version_info;
+ port_cfg_info_t port_cfg_info;
+ struct dma_info ioctl_buf_dma_attr;
+
+ /*
+ * Flash
+ */
+ uint32_t flash_fltds_addr;
+ uint32_t flash_flt_fdt_index;
+ uint32_t flash_fdt_addr;
+ uint32_t flash_fdt_size;
+ uint32_t flash_flt_nic_config_table_index;
+ uint32_t flash_nic_config_table_addr;
+ uint32_t flash_nic_config_table_size;
+ uint32_t flash_vpd_addr;
+ ql_flash_info_t flash_info;
+ ql_fltds_t fltds;
+ ql_flt_t flt;
+ uint16_t flash_len; /* size of Flash memory */
+ ql_nic_config_t nic_config;
+ flash_desc_t fdesc;
+ /*
+ * TX / RX
+ */
+ clock_t last_tx_time;
+ boolean_t rx_copy;
+ uint16_t rx_coalesce_usecs;
+ uint16_t rx_max_coalesced_frames;
+ uint16_t tx_coalesce_usecs;
+ uint16_t tx_max_coalesced_frames;
+ uint32_t payload_copy_thresh;
+
+ uint32_t xg_sem_mask;
+
+ uint32_t ip_hdr_offset;
+ uint32_t selected_tx_ring;
+
+ struct rx_ring rx_ring[MAX_RX_RINGS];
+ struct tx_ring tx_ring[MAX_TX_RINGS];
+ uint32_t rx_polls[MAX_RX_RINGS];
+ uint32_t rx_interrupts[MAX_RX_RINGS];
+
+ int tx_ring_size;
+ int rx_ring_size;
+ uint32_t rx_ring_count;
+ uint32_t rss_ring_count;
+ uint32_t tx_ring_first_cq_id;
+ uint32_t tx_ring_count;
+#ifdef QLGE_TRACK_BUFFER_USAGE
+ /* Count no of times the buffers fell below 32 */
+ uint32_t rx_sb_low_count[MAX_RX_RINGS];
+ uint32_t rx_lb_low_count[MAX_RX_RINGS];
+ uint32_t cq_low_count[MAX_RX_RINGS];
+#endif
+} qlge_t;
+
+
+/*
+ * Reconfiguring the network devices requires the net_config privilege
+ * in Solaris 10+.
+ */
+extern int secpolicy_net_config(const cred_t *, boolean_t);
+
+/*
+ * Global Function Prototypes in qlge_dbg.c source file.
+ */
+extern int ql_fw_dump(qlge_t *);
+extern uint8_t ql_get8(qlge_t *, uint32_t);
+extern uint16_t ql_get16(qlge_t *, uint32_t);
+extern uint32_t ql_get32(qlge_t *, uint32_t);
+extern void ql_put8(qlge_t *, uint32_t, uint8_t);
+extern void ql_put16(qlge_t *, uint32_t, uint16_t);
+extern void ql_put32(qlge_t *, uint32_t, uint32_t);
+extern uint32_t ql_read_reg(qlge_t *, uint32_t);
+extern void ql_write_reg(qlge_t *, uint32_t, uint32_t);
+extern void ql_dump_all_contrl_regs(qlge_t *);
+extern int ql_wait_reg_bit(qlge_t *, uint32_t, uint32_t, int, uint32_t);
+extern void ql_dump_pci_config(qlge_t *);
+extern void ql_dump_host_pci_regs(qlge_t *);
+extern void ql_dump_req_pkt(qlge_t *, struct ob_mac_iocb_req *, void *, int);
+extern void ql_dump_cqicb(qlge_t *, struct cqicb_t *);
+extern void ql_dump_wqicb(qlge_t *, struct wqicb_t *);
+extern void ql_gld3_init(qlge_t *, mac_register_t *);
+enum ioc_reply ql_chip_ioctl(qlge_t *, queue_t *, mblk_t *);
+enum ioc_reply ql_loop_ioctl(qlge_t *, queue_t *, mblk_t *, struct iocblk *);
+extern int ql_8xxx_binary_core_dump(qlge_t *, ql_mpi_coredump_t *);
+/*
+ * Global Data in qlge.c source file.
+ */
+extern void qlge_delay(clock_t usecs);
+extern int ql_sem_spinlock(qlge_t *, uint32_t);
+extern void ql_sem_unlock(qlge_t *, uint32_t);
+extern int ql_sem_lock(qlge_t *, uint32_t, uint32_t);
+extern int ql_init_misc_registers(qlge_t *);
+extern int ql_init_mem_resources(qlge_t *);
+extern int ql_do_start(qlge_t *);
+extern int ql_do_stop(qlge_t *);
+extern int ql_add_to_multicast_list(qlge_t *, uint8_t *ep);
+extern int ql_remove_from_multicast_list(qlge_t *, uint8_t *);
+extern void ql_set_promiscuous(qlge_t *, int);
+extern void ql_get_hw_stats(qlge_t *);
+extern int ql_send_common(struct tx_ring *, mblk_t *);
+extern void ql_wake_asic_reset_soft_intr(qlge_t *);
+extern void ql_write_doorbell_reg(qlge_t *, uint32_t *, uint32_t);
+extern uint32_t ql_read_doorbell_reg(qlge_t *, uint32_t *);
+extern int ql_set_mac_addr_reg(qlge_t *, uint8_t *, uint32_t, uint16_t);
+extern int ql_read_xgmac_reg(qlge_t *, uint32_t, uint32_t *);
+extern void ql_enable_completion_interrupt(qlge_t *, uint32_t);
+extern mblk_t *ql_ring_rx_poll(void *, int);
+extern void ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr);
+extern mblk_t *ql_ring_tx(void *arg, mblk_t *mp);
+extern void ql_atomic_set_32(volatile uint32_t *target, uint32_t newval);
+extern uint32_t ql_atomic_read_32(volatile uint32_t *target);
+extern void ql_restart_timer(qlge_t *qlge);
+/*
+ * Global Function Prototypes in qlge_flash.c source file.
+ */
+extern int ql_sem_flash_lock(qlge_t *);
+extern void ql_sem_flash_unlock(qlge_t *);
+extern int qlge_load_flash(qlge_t *, uint8_t *, uint32_t, uint32_t);
+extern int qlge_dump_fcode(qlge_t *, uint8_t *, uint32_t, uint32_t);
+extern int ql_flash_vpd(qlge_t *qlge, uint8_t *buf);
+extern int ql_get_flash_params(qlge_t *qlge);
+/*
+ * Global Function Prototypes in qlge_mpi.c source file.
+ */
+extern void ql_do_mpi_intr(qlge_t *qlge);
+extern int ql_reset_mpi_risc(qlge_t *);
+extern int ql_get_fw_state(qlge_t *, uint32_t *);
+extern int qlge_get_link_status(qlge_t *, struct qlnic_link_status_info *);
+extern int ql_mbx_test(qlge_t *qlge);
+extern int ql_mbx_test2(qlge_t *qlge);
+extern int ql_get_port_cfg(qlge_t *qlge);
+extern int ql_set_port_cfg(qlge_t *qlge);
+extern int ql_get_LED_config(qlge_t *);
+extern int ql_dump_sfp(qlge_t *, void *bp, int mode);
+extern int ql_set_IDC_Req(qlge_t *, uint8_t dest_functions, uint8_t timeout);
+extern void ql_write_flash_test(qlge_t *qlge, uint32_t testAddr);
+extern void ql_write_flash_test2(qlge_t *qlge, uint32_t testAddr);
+extern int ql_get_firmware_version(qlge_t *,
+ struct qlnic_mpi_version_info *);
+extern int ql_read_processor_data(qlge_t *, uint32_t, uint32_t *);
+extern int ql_write_processor_data(qlge_t *, uint32_t, uint32_t);
+extern int ql_read_risc_ram(qlge_t *, uint32_t, uint64_t, uint32_t);
+extern int ql_trigger_system_error_event(qlge_t *qlge);
+
+extern void ql_core_dump(qlge_t *);
+extern void ql_dump_crash_record(qlge_t *);
+extern void ql_dump_buf(char *, uint8_t *, uint8_t, uint32_t);
+extern void ql_printf(const char *, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _QLGE_H */
diff --git a/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_dbg.h b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_dbg.h
new file mode 100644
index 0000000000..29aec27b5f
--- /dev/null
+++ b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_dbg.h
@@ -0,0 +1,104 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#ifndef _QLGE_DBG_H
+#define _QLGE_DBG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Driver debug definitions in makefile.
+ */
+
+#define QL_DEBUG_LEVELS 0x2
+
+#define DBG_NVRAM 0x01 /* Registers, PCI */
+#define DBG_INIT 0x02
+#define DBG_GLD 0x04
+#define DBG_MBX 0x08
+#define DBG_FLASH 0x08
+#define DBG_RX 0x10
+#define DBG_RX_RING 0x20
+#define DBG_TX 0x40
+#define DBG_STATS 0x80
+#define DBG_INTR 0x100
+
+#ifdef QL_DUMPFW
+#define QLA_CORE_DUMP(qlge) ql_core_dump(qlge);
+#define QLA_DUMP_CRASH_RECORD(qlge) ql_dump_crash_record(qlge)
+#else
+#define QLA_CORE_DUMP(qlge)
+#define QLA_DUMP_CRASH_RECORD(qlge)
+#endif
+
+#if QL_DEBUG
+
+#define QL_DUMP_BUFFER(a, b, c, d) \
+ ql_dump_buf((char *)a, (uint8_t *)b, (uint8_t)c, (uint32_t)d)
+
+#define QL_PRINT_1(x) ql_printf x
+
+#define QL_PRINT(dbg_level, x) \
+ if (qlge->ql_dbgprnt & dbg_level) ql_printf x
+#define QL_DUMP(dbg_level, a, b, c, d) \
+ if (qlge->ql_dbgprnt & dbg_level) QL_DUMP_BUFFER(a, b, c, d)
+
+#define QL_DUMP_REQ_PKT(qlge, pkt, oal, num) if (qlge->ql_dbgprnt & DBG_TX) \
+ ql_dump_req_pkt(qlge, pkt, oal, num)
+
+#define QL_DUMP_CQICB(qlge, cqicb) if (qlge->ql_dbgprnt & DBG_INIT) \
+ ql_dump_cqicb(qlge, cqicb)
+
+#define QL_DUMP_WQICB(qlge, wqicb) if (qlge->ql_dbgprnt & DBG_INIT) \
+ ql_dump_wqicb(qlge, wqicb)
+
+#else
+
+#define QLA_HOST_PCI_REGS(qlge)
+
+#define QL_DUMP_BUFFER(a, b, c, d)
+#define QL_DUMP(dbg_level, a, b, c, d)
+#define QL_DEBUG_PRINT(x)
+#define QL_PRINT(dbg_level, x)
+#define QL_DUMP_REQ_PKT(qlge, pkt, oal, num)
+#define QL_DUMP_CQICB
+#define QL_DUMP_WQICB
+
+#endif /* QLGE_DEBUG */
+
+/*
+ * Error and Extended Logging Macros.
+ */
+#define QL_BANG "!"
+#define QL_QUESTION "?"
+#define QL_CAROT "^"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _QLGE_DBG_H */
diff --git a/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_hw.h b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_hw.h
new file mode 100644
index 0000000000..d20846fe0f
--- /dev/null
+++ b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_hw.h
@@ -0,0 +1,2503 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#ifndef _QLGE_HW_H
+#define _QLGE_HW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ISP_SCHULTZ 0x8000
+
+#define MB_REG_COUNT 8
+#define MB_DATA_REG_COUNT (MB_REG_COUNT-1)
+
+
+#define QLA_SCHULTZ(qlge) ((qlge)->device_id == ISP_SCHULTZ)
+
+/*
+ * Data bit definitions.
+ */
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+typedef struct ql_stats
+{
+ uint32_t intr_type;
+ /* software statics */
+ uint32_t intr;
+ uint64_t speed;
+ uint32_t duplex;
+ uint32_t media;
+ /* TX */
+ uint64_t obytes;
+ uint64_t opackets;
+ uint32_t nocarrier;
+ uint32_t defer;
+ /* RX */
+ uint64_t rbytes;
+ uint64_t rpackets;
+ uint32_t norcvbuf;
+ uint32_t frame_too_long;
+ uint32_t crc;
+ ulong_t multircv;
+ ulong_t brdcstrcv;
+ uint32_t errrcv;
+ uint32_t frame_too_short;
+ /* statics by hw */
+ uint32_t errxmt;
+ uint32_t frame_err;
+ ulong_t multixmt;
+ ulong_t brdcstxmt;
+ uint32_t phy_addr;
+ uint32_t jabber_err;
+
+}ql_stats_t;
+
+
+#define ETHERNET_CRC_SIZE 4
+
+/*
+ * Register Definitions...
+ */
+#define MAILBOX_COUNT 16
+/* System Register 0x00 */
+#define PROC_ADDR_RDY BIT_31
+#define PROC_ADDR_R BIT_30
+#define PROC_ADDR_ERR BIT_29
+#define PROC_ADDR_DA BIT_28
+#define PROC_ADDR_FUNC0_MBI 0x00001180
+#define PROC_ADDR_FUNC0_MBO (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT)
+#define PROC_ADDR_FUNC0_CTL 0x000011a1
+#define PROC_ADDR_FUNC2_MBI 0x00001280
+#define PROC_ADDR_FUNC2_MBO (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT)
+#define PROC_ADDR_FUNC2_CTL 0x000012a1
+#define PROC_ADDR_MPI_RISC 0x00000000
+#define PROC_ADDR_MDE 0x00010000
+#define PROC_ADDR_REGBLOCK 0x00020000
+#define PROC_ADDR_RISC_REG 0x00030000
+
+
+/* System Register 0x08 */
+#define SYSTEM_EFE_FAE 0x3u
+#define SYSTEM_EFE_FAE_MASK (SYSTEM_EFE_FAE<<16)
+enum {
+ SYS_EFE = (1 << 0),
+ SYS_FAE = (1 << 1),
+ SYS_MDC = (1 << 2),
+ SYS_DST = (1 << 3),
+ SYS_DWC = (1 << 4),
+ SYS_EVW = (1 << 5),
+ SYS_OMP_DLY_MASK = 0x3f000000,
+ /*
+ * There are no values defined as of edit #15.
+ */
+ SYS_ODI = (1 << 14)
+};
+
+/*
+ * Reset/Failover Register (RST_FO) bit definitions.
+ */
+
+#define RST_FO_TFO (1 << 0)
+#define RST_FO_RR_MASK 0x00060000
+#define RST_FO_RR_CQ_CAM 0x00000000
+#define RST_FO_RR_DROP 0x00000001
+#define RST_FO_RR_DQ 0x00000002
+#define RST_FO_RR_RCV_FUNC_CQ 0x00000003
+#define RST_FO_FRB BIT_12
+#define RST_FO_MOP BIT_13
+#define RST_FO_REG BIT_14
+#define RST_FO_FR 0x8000u
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+ FSC_DBRST_MASK = 0x00070000,
+ FSC_DBRST_256 = 0x00000000,
+ FSC_DBRST_512 = 0x00000001,
+ FSC_DBRST_768 = 0x00000002,
+ FSC_DBRST_1024 = 0x00000003,
+ FSC_DBL_MASK = 0x00180000,
+ FSC_DBL_DBRST = 0x00000000,
+ FSC_DBL_MAX_PLD = 0x00000008,
+ FSC_DBL_MAX_BRST = 0x00000010,
+ FSC_DBL_128_BYTES = 0x00000018,
+ FSC_EC = (1 << 5),
+ FSC_EPC_MASK = 0x00c00000,
+ FSC_EPC_INBOUND = (1 << 6),
+ FSC_EPC_OUTBOUND = (1 << 7),
+ FSC_VM_PAGESIZE_MASK = 0x07000000,
+ FSC_VM_PAGE_2K = 0x00000100,
+ FSC_VM_PAGE_4K = 0x00000200,
+ FSC_VM_PAGE_8K = 0x00000300,
+ FSC_VM_PAGE_64K = 0x00000600,
+ FSC_SH = (1 << 11),
+ FSC_DSB = (1 << 12),
+ FSC_STE = (1 << 13),
+ FSC_FE = (1 << 15)
+};
+
+/*
+ * Host Command Status Register (CSR) bit definitions.
+ */
+#define CSR_ERR_STS_MASK 0x0000003f
+/*
+ * There are no valued defined as of edit #15.
+ */
+#define CSR_RR BIT_8
+#define CSR_HRI BIT_9
+#define CSR_RP BIT_10
+#define CSR_CMD_PARM_SHIFT 22
+#define CSR_CMD_NOP 0x00000000
+#define CSR_CMD_SET_RST 0x1000000
+#define CSR_CMD_CLR_RST 0x20000000
+#define CSR_CMD_SET_PAUSE 0x30000000
+#define CSR_CMD_CLR_PAUSE 0x40000000
+#define CSR_CMD_SET_H2R_INT 0x50000000
+#define CSR_CMD_CLR_H2R_INT 0x60000000
+#define CSR_CMD_PAR_EN 0x70000000
+#define CSR_CMD_SET_BAD_PAR 0x80000000u
+#define CSR_CMD_CLR_BAD_PAR 0x90000000u
+#define CSR_CMD_CLR_R2PCI_INT 0xa0000000u
+
+/*
+ * Configuration Register (CFG) bit definitions.
+ */
+enum {
+ CFG_LRQ = (1 << 0),
+ CFG_DRQ = (1 << 1),
+ CFG_LR = (1 << 2),
+ CFG_DR = (1 << 3),
+ CFG_LE = (1 << 5),
+ CFG_LCQ = (1 << 6),
+ CFG_DCQ = (1 << 7),
+ CFG_Q_SHIFT = 8,
+ CFG_Q_MASK = 0x7f000000
+};
+
+/*
+ * Status Register (STS) bit definitions.
+ */
+enum {
+ STS_FE = (1 << 0),
+ STS_PI = (1 << 1),
+ STS_PL0 = (1 << 2),
+ STS_PL1 = (1 << 3),
+ STS_PI0 = (1 << 4),
+ STS_PI1 = (1 << 5),
+ STS_FUNC_ID_MASK = 0x000000c0,
+ STS_FUNC_ID_SHIFT = 6,
+ STS_F0E = (1 << 8),
+ STS_F1E = (1 << 9),
+ STS_F2E = (1 << 10),
+ STS_F3E = (1 << 11),
+ STS_NFE = (1 << 12)
+};
+
+/*
+ * Register (REV_ID) bit definitions.
+ */
+enum {
+ REV_ID_MASK = 0x0000000f,
+ REV_ID_NICROLL_SHIFT = 0,
+ REV_ID_NICREV_SHIFT = 4,
+ REV_ID_XGROLL_SHIFT = 8,
+ REV_ID_XGREV_SHIFT = 12,
+ REV_ID_CHIPREV_SHIFT = 28
+};
+
+/*
+ * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+ FRC_ECC_ERR_VW = (1 << 12),
+ FRC_ECC_ERR_VB = (1 << 13),
+ FRC_ECC_ERR_NI = (1 << 14),
+ FRC_ECC_ERR_NO = (1 << 15),
+ FRC_ECC_PFE_SHIFT = 16,
+ FRC_ECC_ERR_DO = (1 << 18),
+ FRC_ECC_P14 = (1 << 19)
+};
+
+/*
+ * Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+ ERR_STS_NOF = (1 << 0),
+ ERR_STS_NIF = (1 << 1),
+ ERR_STS_DRP = (1 << 2),
+ ERR_STS_XGP = (1 << 3),
+ ERR_STS_FOU = (1 << 4),
+ ERR_STS_FOC = (1 << 5),
+ ERR_STS_FOF = (1 << 6),
+ ERR_STS_FIU = (1 << 7),
+ ERR_STS_FIC = (1 << 8),
+ ERR_STS_FIF = (1 << 9),
+ ERR_STS_MOF = (1 << 10),
+ ERR_STS_TA = (1 << 11),
+ ERR_STS_MA = (1 << 12),
+ ERR_STS_MPE = (1 << 13),
+ ERR_STS_SCE = (1 << 14),
+ ERR_STS_STE = (1 << 15),
+ ERR_STS_FOW = (1 << 16),
+ ERR_STS_UE = (1 << 17),
+ ERR_STS_MCH = (1 << 26),
+ ERR_STS_LOC_SHIFT = 27
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+/*
+ * Example:
+ * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+ */
+#define SEM_CLEAR 0
+#define SEM_SET 1
+#define SEM_FORCE 3
+#define SEM_XGMAC0_SHIFT 0
+#define SEM_XGMAC1_SHIFT 2
+#define SEM_ICB_SHIFT 4
+#define SEM_MAC_ADDR_SHIFT 6
+#define SEM_FLASH_SHIFT 8
+#define SEM_PROBE_SHIFT 10
+#define SEM_RT_IDX_SHIFT 12
+#define SEM_PROC_REG_SHIFT 14
+#define SEM_XGMAC0_MASK 0x00030000
+#define SEM_XGMAC1_MASK 0x000c0000
+#define SEM_ICB_MASK 0x00300000
+#define SEM_MAC_ADDR_MASK 0x00c00000
+#define SEM_FLASH_MASK 0x03000000
+#define SEM_PROBE_MASK 0x0c000000
+#define SEM_RT_IDX_MASK 0x30000000
+#define SEM_PROC_REG_MASK 0xc0000000
+
+/*
+ * Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+ CQ_STOP_QUEUE_MASK = (0x007f0000),
+ CQ_STOP_TYPE_MASK = (0x03000000),
+ CQ_STOP_TYPE_START = 0x00000100,
+ CQ_STOP_TYPE_STOP = 0x00000200,
+ CQ_STOP_TYPE_READ = 0x00000300,
+ CQ_STOP_EN = (1 << 15)
+};
+
+/*
+ * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+#define MAC_ADDR_IDX_SHIFT 4
+#define MAC_ADDR_TYPE_SHIFT 16
+#define MAC_ADDR_TYPE_MASK 0x000f0000
+#define MAC_ADDR_TYPE_CAM_MAC 0x00000000
+#define MAC_ADDR_TYPE_MULTI_MAC 0x00010000
+#define MAC_ADDR_TYPE_VLAN 0x00020000
+#define MAC_ADDR_TYPE_MULTI_FLTR 0x00030000
+#define MAC_ADDR_TYPE_FC_MAC 0x00040000
+#define MAC_ADDR_TYPE_MGMT_MAC 0x00050000
+#define MAC_ADDR_TYPE_MGMT_VLAN 0x00060000
+#define MAC_ADDR_TYPE_MGMT_V4 0x00070000
+#define MAC_ADDR_TYPE_MGMT_V6 0x00080000
+#define MAC_ADDR_TYPE_MGMT_TU_DP 0x00090000
+#define MAC_ADDR_ADR BIT_25
+#define MAC_ADDR_RS BIT_26
+#define MAC_ADDR_E BIT_27
+#define MAC_ADDR_MR BIT_30
+#define MAC_ADDR_MW BIT_31
+#define MAX_MULTICAST_HW_SIZE 32
+
+/*
+ * MAC Protocol Address Index Register (SPLT_HDR, 0xC0) bit definitions.
+ */
+#define SPLT_HDR_EP BIT_31
+
+/*
+ * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+ NIC_RCV_CFG_PPE = (1 << 0),
+ NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+ NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+ NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+ NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+ NIC_RCV_CFG_RV = (1 << 3),
+ NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+ NIC_RCV_CFG_DFQ_SHIFT = 8,
+ NIC_RCV_CFG_DFQ = 0 /* HARDCODE default queue to 0. */
+};
+
+/*
+ * Routing Index Register (RT_IDX) bit definitions.
+ */
+#define RT_IDX_IDX_SHIFT 8
+#define RT_IDX_TYPE_MASK 0x000f0000
+#define RT_IDX_TYPE_RT 0x00000000
+#define RT_IDX_TYPE_RT_INV 0x00010000
+#define RT_IDX_TYPE_NICQ 0x00020000
+#define RT_IDX_TYPE_NICQ_INV 0x00030000
+#define RT_IDX_DST_MASK 0x00700000
+#define RT_IDX_DST_RSS 0x00000000
+#define RT_IDX_DST_CAM_Q 0x00100000
+#define RT_IDX_DST_COS_Q 0x00200000
+#define RT_IDX_DST_DFLT_Q 0x00300000
+#define RT_IDX_DST_DEST_Q 0x00400000
+#define RT_IDX_RS BIT_26
+#define RT_IDX_E BIT_27
+#define RT_IDX_MR BIT_30
+#define RT_IDX_MW BIT_31
+
+/* Nic Queue format - type 2 bits */
+#define RT_IDX_BCAST 1
+#define RT_IDX_MCAST BIT_1
+#define RT_IDX_MCAST_MATCH BIT_2
+#define RT_IDX_MCAST_REG_MATCH BIT_3
+#define RT_IDX_MCAST_HASH_MATCH BIT_4
+#define RT_IDX_FC_MACH BIT_5
+#define RT_IDX_ETH_FCOE BIT_6
+#define RT_IDX_CAM_HIT BIT_7
+#define RT_IDX_CAM_BIT0 BIT_8
+#define RT_IDX_CAM_BIT1 BIT_9
+#define RT_IDX_VLAN_TAG BIT_10
+#define RT_IDX_VLAN_MATCH BIT_11
+#define RT_IDX_VLAN_FILTER BIT_12
+#define RT_IDX_ETH_SKIP1 BIT_13
+#define RT_IDX_ETH_SKIP2 BIT_14
+#define RT_IDX_BCAST_MCAST_MATCH BIT_15
+#define RT_IDX_802_3 BIT_16
+#define RT_IDX_LLDP BIT_17
+#define RT_IDX_UNUSED018 BIT_18
+#define RT_IDX_UNUSED019 BIT_19
+#define RT_IDX_UNUSED20 BIT_20
+#define RT_IDX_UNUSED21 BIT_21
+#define RT_IDX_ERR BIT_22
+#define RT_IDX_VALID BIT_23
+#define RT_IDX_TU_CSUM_ERR BIT_24
+#define RT_IDX_IP_CSUM_ERR BIT_25
+#define RT_IDX_MAC_ERR BIT_26
+#define RT_IDX_RSS_TCP6 BIT_27
+#define RT_IDX_RSS_TCP4 BIT_28
+#define RT_IDX_RSS_IPV6 BIT_29
+#define RT_IDX_RSS_IPV4 BIT_30
+#define RT_IDX_RSS_MATCH BIT_31
+
+/* Hierarchy for the NIC Queue Mask */
+enum {
+ RT_IDX_ALL_ERR_SLOT = 0,
+ RT_IDX_MAC_ERR_SLOT = 0,
+ RT_IDX_IP_CSUM_ERR_SLOT = 1,
+ RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+ RT_IDX_BCAST_SLOT = 3,
+ RT_IDX_MCAST_MATCH_SLOT = 4,
+ RT_IDX_ALLMULTI_SLOT = 5,
+ RT_IDX_UNUSED6_SLOT = 6,
+ RT_IDX_UNUSED7_SLOT = 7,
+ RT_IDX_RSS_MATCH_SLOT = 8,
+ RT_IDX_RSS_IPV4_SLOT = 8,
+ RT_IDX_RSS_IPV6_SLOT = 9,
+ RT_IDX_RSS_TCP4_SLOT = 10,
+ RT_IDX_RSS_TCP6_SLOT = 11,
+ RT_IDX_CAM_HIT_SLOT = 12,
+ RT_IDX_UNUSED013 = 13,
+ RT_IDX_UNUSED014 = 14,
+ RT_IDX_PROMISCUOUS_SLOT = 15,
+ RT_IDX_MAX_SLOTS = 16
+};
+
+enum {
+ CAM_OUT_ROUTE_FC = 0,
+ CAM_OUT_ROUTE_NIC = 1,
+ CAM_OUT_FUNC_SHIFT = 2,
+ CAM_OUT_RV = (1 << 4),
+ CAM_OUT_SH = (1 << 15),
+ CAM_OUT_CQ_ID_SHIFT = 5
+};
+
+/* Reset/Failover Register 0C */
+#define FUNCTION_RESET 0x8000u
+#define FUNCTION_RESET_MASK (FUNCTION_RESET<<16)
+
+/* Function Specific Control Register 0x10 */
+#define FSC_MASK (0x97ffu << 16)
+#define FSC_FE 0x8000
+
+/* Configuration Register 0x28 */
+#define LOAD_LCQ 0x40
+#define LOAD_LCQ_MASK (0x7F40u << 16)
+#define LOAD_ICB_ERR 0x20
+#define LOAD_LRQ 0x01
+#define LOAD_LRQ_MASK (0x7F01u << 16)
+
+#define FN0_NET 0
+#define FN1_NET 1
+#define FN0_FC 2
+#define FN1_FC 3
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+#define SEM_CLEAR 0
+#define SEM_SET 1
+#define SEM_FORCE 3
+#define SEM_XGMAC0_SHIFT 0
+#define SEM_XGMAC1_SHIFT 2
+#define SEM_ICB_SHIFT 4
+#define SEM_MAC_ADDR_SHIFT 6
+#define SEM_FLASH_SHIFT 8
+#define SEM_PROBE_SHIFT 10
+#define SEM_RT_IDX_SHIFT 12
+#define SEM_PROC_REG_SHIFT 14
+#define SEM_XGMAC0_MASK 0x00030000
+#define SEM_XGMAC1_MASK 0x000c0000
+#define SEM_ICB_MASK 0x00300000
+#define SEM_MAC_ADDR_MASK 0x00c00000
+#define SEM_FLASH_MASK 0x03000000
+#define SEM_PROBE_MASK 0x0c000000
+#define SEM_RT_IDX_MASK 0x30000000
+#define SEM_PROC_REG_MASK 0xc0000000
+
+/* System Register 0x08 */
+#define SYSTEM_EFE_FAE 0x3u
+#define SYSTEM_EFE_FAE_MASK (SYSTEM_EFE_FAE<<16)
+
+/* Interrupt Status Register-1 0x3C */
+#define CQ_0_NOT_EMPTY BIT_0
+#define CQ_1_NOT_EMPTY BIT_1
+#define CQ_2_NOT_EMPTY BIT_2
+#define CQ_3_NOT_EMPTY BIT_3
+#define CQ_4_NOT_EMPTY BIT_4
+#define CQ_5_NOT_EMPTY BIT_5
+#define CQ_6_NOT_EMPTY BIT_6
+#define CQ_7_NOT_EMPTY BIT_7
+#define CQ_8_NOT_EMPTY BIT_8
+#define CQ_9_NOT_EMPTY BIT_9
+#define CQ_10_NOT_EMPTY BIT_10
+#define CQ_11_NOT_EMPTY BIT_11
+#define CQ_12_NOT_EMPTY BIT_12
+#define CQ_13_NOT_EMPTY BIT_13
+#define CQ_14_NOT_EMPTY BIT_14
+#define CQ_15_NOT_EMPTY BIT_15
+#define CQ_16_NOT_EMPTY BIT_16
+/* Processor Address Register 0x00 */
+#define PROCESSOR_ADDRESS_RDY (0x8000u<<16)
+#define PROCESSOR_ADDRESS_READ (0x4000u<<16)
+/* Host Command/Status Register 0x14 */
+#define HOST_CMD_SET_RISC_RESET 0x10000000u
+#define HOST_CMD_CLEAR_RISC_RESET 0x20000000u
+#define HOST_CMD_SET_RISC_PAUSE 0x30000000u
+#define HOST_CMD_RELEASE_RISC_PAUSE 0x40000000u
+#define HOST_CMD_SET_RISC_INTR 0x50000000u
+#define HOST_CMD_CLEAR_RISC_INTR 0x60000000u
+#define HOST_CMD_SET_PARITY_ENABLE 0x70000000u
+#define HOST_CMD_FORCE_BAD_PARITY 0x80000000u
+#define HOST_CMD_RELEASE_BAD_PARITY 0x90000000u
+#define HOST_CMD_CLEAR_RISC_TO_HOST_INTR 0xA0000000u
+#define HOST_TO_MPI_INTR_NOT_DONE 0x200
+
+#define RISC_RESET BIT_8
+#define RISC_PAUSED BIT_10
+/* Semaphor Register 0x64 */
+#define QL_SEM_BITS_BASE_CODE 0x1u
+#define QL_PORT0_XGMAC_SEM_BITS (QL_SEM_BITS_BASE_CODE)
+#define QL_PORT1_XGMAC_SEM_BITS (QL_SEM_BITS_BASE_CODE << 2)
+#define QL_ICB_ACCESS_ADDRESS_SEM_BITS (QL_SEM_BITS_BASE_CODE << 4)
+#define QL_MAC_PROTOCOL_SEM_BITS (QL_SEM_BITS_BASE_CODE << 6)
+#define QL_FLASH_SEM_BITS (QL_SEM_BITS_BASE_CODE << 8)
+#define QL_PROBE_MUX_SEM_BITS (QL_SEM_BITS_BASE_CODE << 10)
+#define QL_ROUTING_INDEX_SEM_BITS (QL_SEM_BITS_BASE_CODE << 12)
+#define QL_PROCESSOR_SEM_BITS (QL_SEM_BITS_BASE_CODE << 14)
+#define QL_NIC_RECV_CONFIG_SEM_BITS (QL_SEM_BITS_BASE_CODE << 14)
+
+#define QL_SEM_MASK_BASE_CODE 0x30000u
+#define QL_PORT0_XGMAC_SEM_MASK (QL_SEM_MASK_BASE_CODE)
+#define QL_PORT1_XGMAC_SEM_MASK (QL_SEM_MASK_BASE_CODE << 2)
+#define QL_ICB_ACCESS_ADDRESS_SEM_MASK (QL_SEM_MASK_BASE_CODE << 4)
+#define QL_MAC_PROTOCOL_SEM_MASK (QL_SEM_MASK_BASE_CODE << 6)
+#define QL_FLASH_SEM_MASK (QL_SEM_MASK_BASE_CODE << 8)
+#define QL_PROBE_MUX_SEM_MASK (QL_SEM_MASK_BASE_CODE << 10)
+#define QL_ROUTING_INDEX_SEM_MASK (QL_SEM_MASK_BASE_CODE << 12)
+#define QL_PROCESSOR_SEM_MASK (QL_SEM_MASK_BASE_CODE << 14)
+#define QL_NIC_RECV_CONFIG_SEM_MASK (QL_SEM_MASK_BASE_CODE << 14)
+
+/* XGMAC Address Register 0x78 */
+#define XGMAC_ADDRESS_RDY (0x8000u<<16)
+#define XGMAC_ADDRESS_READ_TRANSACT (0x4000u<<16)
+#define XGMAC_ADDRESS_ACCESS_ERROR (0x2000u<<16)
+
+/* XGMAC Register Set */
+#define REG_XGMAC_GLOBAL_CONFIGURATION 0x108
+#define GLOBAL_CONFIG_JUMBO_MODE 0x40
+
+#define REG_XGMAC_MAC_TX_CONFIGURATION 0x10C
+#define XGMAC_MAC_TX_ENABLE 0x02
+
+#define REG_XGMAC_MAC_RX_CONFIGURATION 0x110
+#define XGMAC_MAC_RX_ENABLE 0x02
+
+#define REG_XGMAC_FLOW_CONTROL 0x11C
+
+#define REG_XGMAC_MAC_TX_PARAM 0x134
+#define REG_XGMAC_MAC_RX_PARAM 0x138
+
+#define REG_XGMAC_MAC_TX_PKTS 0x200
+#define REG_XGMAC_MAC_TX_OCTETS 0x208
+#define REG_XGMAC_MAC_TX_MULTCAST_PKTS 0x210
+#define REG_XGMAC_MAC_TX_BROADCAST_PKTS 0x218
+#define REG_XGMAC_MAC_TX_PAUSE_PKTS 0x230
+
+#define REG_XGMAC_MAC_RX_OCTETS 0x300
+#define REG_XGMAC_MAC_RX_OCTETS_OK 0x308
+#define REG_XGMAC_MAC_RX_PKTS 0x310
+#define REG_XGMAC_MAC_RX_PKTS_OK 0x318
+#define REG_XGMAC_MAC_RX_BROADCAST_PKTS 0x320
+#define REG_XGMAC_MAC_RX_MULTCAST_PKTS 0x328
+#define REG_XGMAC_MAC_RX_JABBER_PKTS 0x348
+#define REG_XGMAC_MAC_FCS_ERR 0x360
+#define REG_XGMAC_MAC_ALIGN_ERR 0x368
+#define REG_XGMAC_MAC_RX_SYM_ERR 0x370
+#define REG_XGMAC_MAC_RX_INT_ERR 0x378
+#define REG_XGMAC_MAC_RX_PAUSE_PKTS 0x388
+#define REG_XGMAC_MAC_PHY_ADDR 0x430
+#define REG_XGMAC_MAC_RX_FIFO_DROPS 0x5B8
+
+
+/* MAC Protocol Address Index Register Set 0xA8 */
+#define MAC_PROTOCOL_ADDRESS_INDEX_MW (0x8000u<<16)
+#define MAC_PROTOCOL_ADDRESS_ENABLE (1 << 27)
+#define MAC_PROTOCOL_TYPE_CAM_MAC (0x0)
+#define MAC_PROTOCOL_TYPE_MULTICAST (0x10000u)
+
+/* NIC Receive Configuration Register 0xD4 */
+#define RECV_CONFIG_DEFAULT_Q_MASK (0x7F000000u)
+#define RECV_CONFIG_VTAG_REMOVAL_MASK (0x80000u)
+#define RECV_CONFIG_VTAG_RV 0x08
+
+/*
+ * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
+ */
+#define XGMAC_ADDR_RDY (1 << 31)
+#define XGMAC_ADDR_R (1 << 30)
+#define XGMAC_ADDR_XME (1 << 29)
+
+#define PAUSE_SRC_LO 0x00000100
+#define PAUSE_SRC_HI 0x00000104
+#define GLOBAL_CFG 0x00000108
+#define GLOBAL_CFG_RESET (1 << 0)
+#define GLOBAL_CFG_JUMBO (1 << 6)
+#define GLOBAL_CFG_TX_STAT_EN (1 << 10)
+#define GLOBAL_CFG_RX_STAT_EN (1 << 11)
+#define TX_CFG 0x0000010c
+#define TX_CFG_RESET (1 << 0)
+#define TX_CFG_EN (1 << 1)
+#define TX_CFG_PREAM (1 << 2)
+#define RX_CFG 0x00000110
+#define RX_CFG_RESET (1 << 0)
+#define RX_CFG_EN (1 << 1)
+#define RX_CFG_PREAM (1 << 2)
+#define FLOW_CTL 0x0000011c
+#define PAUSE_OPCODE 0x00000120
+#define PAUSE_TIMER 0x00000124
+#define PAUSE_FRM_DEST_LO 0x00000128
+#define PAUSE_FRM_DEST_HI 0x0000012c
+#define MAC_TX_PARAMS 0x00000134
+#define MAC_TX_PARAMS_JUMBO (1 << 31)
+#define MAC_TX_PARAMS_SIZE_SHIFT 16
+#define MAC_RX_PARAMS 0x00000138
+#define MAC_SYS_INT 0x00000144
+#define MAC_SYS_INT_MASK 0x00000148
+#define MAC_MGMT_INT 0x0000014c
+#define MAC_MGMT_IN_MASK 0x00000150
+#define EXT_ARB_MODE 0x000001fc
+#define TX_PKTS 0x00000200
+#define TX_PKTS_LO 0x00000204
+#define TX_BYTES 0x00000208
+#define TX_BYTES_LO 0x0000020C
+#define TX_MCAST_PKTS 0x00000210
+#define TX_MCAST_PKTS_LO 0x00000214
+#define TX_BCAST_PKTS 0x00000218
+#define TX_BCAST_PKTS_LO 0x0000021C
+#define TX_UCAST_PKTS 0x00000220
+#define TX_UCAST_PKTS_LO 0x00000224
+#define TX_CTL_PKTS 0x00000228
+#define TX_CTL_PKTS_LO 0x0000022c
+#define TX_PAUSE_PKTS 0x00000230
+#define TX_PAUSE_PKTS_LO 0x00000234
+#define TX_64_PKT 0x00000238
+#define TX_64_PKT_LO 0x0000023c
+#define TX_65_TO_127_PKT 0x00000240
+#define TX_65_TO_127_PKT_LO 0x00000244
+#define TX_128_TO_255_PKT 0x00000248
+#define TX_128_TO_255_PKT_LO 0x0000024c
+#define TX_256_511_PKT 0x00000250
+#define TX_256_511_PKT_LO 0x00000254
+#define TX_512_TO_1023_PKT 0x00000258
+#define TX_512_TO_1023_PKT_LO 0x0000025c
+#define TX_1024_TO_1518_PKT 0x00000260
+#define TX_1024_TO_1518_PKT_LO 0x00000264
+#define TX_1519_TO_MAX_PKT 0x00000268
+#define TX_1519_TO_MAX_PKT_LO 0x0000026c
+#define TX_UNDERSIZE_PKT 0x00000270
+#define TX_UNDERSIZE_PKT_LO 0x00000274
+#define TX_OVERSIZE_PKT 0x00000278
+#define TX_OVERSIZE_PKT_LO 0x0000027c
+#define RX_HALF_FULL_DET 0x000002a0
+#define TX_HALF_FULL_DET_LO 0x000002a4
+#define RX_OVERFLOW_DET 0x000002a8
+#define TX_OVERFLOW_DET_LO 0x000002ac
+#define RX_HALF_FULL_MASK 0x000002b0
+#define TX_HALF_FULL_MASK_LO 0x000002b4
+#define RX_OVERFLOW_MASK 0x000002b8
+#define TX_OVERFLOW_MASK_LO 0x000002bc
+#define STAT_CNT_CTL 0x000002c0
+#define STAT_CNT_CTL_CLEAR_TX (1 << 0) /* Control */
+#define STAT_CNT_CTL_CLEAR_RX (1 << 1) /* Control */
+#define AUX_RX_HALF_FULL_DET 0x000002d0
+#define AUX_TX_HALF_FULL_DET 0x000002d4
+#define AUX_RX_OVERFLOW_DET 0x000002d8
+#define AUX_TX_OVERFLOW_DET 0x000002dc
+#define AUX_RX_HALF_FULL_MASK 0x000002f0
+#define AUX_TX_HALF_FULL_MASK 0x000002f4
+#define AUX_RX_OVERFLOW_MASK 0x000002f8
+#define AUX_TX_OVERFLOW_MASK 0x000002fc
+#define RX_BYTES 0x00000300
+#define RX_BYTES_LO 0x00000304
+#define RX_BYTES_OK 0x00000308
+#define RX_BYTES_OK_LO 0x0000030c
+#define RX_PKTS 0x00000310
+#define RX_PKTS_LO 0x00000314
+#define RX_PKTS_OK 0x00000318
+#define RX_PKTS_OK_LO 0x0000031c
+#define RX_BCAST_PKTS 0x00000320
+#define RX_BCAST_PKTS_LO 0x00000324
+#define RX_MCAST_PKTS 0x00000328
+#define RX_MCAST_PKTS_LO 0x0000032c
+#define RX_UCAST_PKTS 0x00000330
+#define RX_UCAST_PKTS_LO 0x00000334
+#define RX_UNDERSIZE_PKTS 0x00000338
+#define RX_UNDERSIZE_PKTS_LO 0x0000033c
+#define RX_OVERSIZE_PKTS 0x00000340
+#define RX_OVERSIZE_PKTS_LO 0x00000344
+#define RX_JABBER_PKTS 0x00000348
+#define RX_JABBER_PKTS_LO 0x0000034c
+#define RX_UNDERSIZE_FCERR_PKTS 0x00000350
+#define RX_UNDERSIZE_FCERR_PKTS_LO 0x00000354
+#define RX_DROP_EVENTS 0x00000358
+#define RX_DROP_EVENTS_LO 0x0000035c
+#define RX_FCERR_PKTS 0x00000360
+#define RX_FCERR_PKTS_LO 0x00000364
+#define RX_ALIGN_ERR 0x00000368
+#define RX_ALIGN_ERR_LO 0x0000036c
+#define RX_SYMBOL_ERR 0x00000370
+#define RX_SYMBOL_ERR_LO 0x00000374
+#define RX_MAC_ERR 0x00000378
+#define RX_MAC_ERR_LO 0x0000037c
+#define RX_CTL_PKTS 0x00000380
+#define RX_CTL_PKTS_LO 0x00000384
+#define RX_PAUSE_PKTS 0x00000388
+#define RX_PAUSE_PKTS_LO 0x0000038c
+#define RX_64_PKTS 0x00000390
+#define RX_64_PKTS_LO 0x00000394
+#define RX_65_TO_127_PKTS 0x00000398
+#define RX_65_TO_127_PKTS_LO 0x0000039c
+#define RX_128_255_PKTS 0x000003a0
+#define RX_128_255_PKTS_LO 0x000003a4
+#define RX_256_511_PKTS 0x000003a8
+#define RX_256_511_PKTS_LO 0x000003ac
+#define RX_512_TO_1023_PKTS 0x000003b0
+#define RX_512_TO_1023_PKTS_LO 0x000003b4
+#define RX_1024_TO_1518_PKTS 0x000003b8
+#define RX_1024_TO_1518_PKTS_LO 0x000003bc
+#define RX_1519_TO_MAX_PKTS 0x000003c0
+#define RX_1519_TO_MAX_PKTS_LO 0x000003c4
+#define RX_LEN_ERR_PKTS 0x000003c8
+#define RX_LEN_ERR_PKTS_LO 0x000003cc
+#define MDIO_TX_DATA 0x00000400
+#define MDIO_RX_DATA 0x00000410
+#define MDIO_CMD 0x00000420
+#define MDIO_PHY_ADDR 0x00000430
+#define MDIO_PORT 0x00000440
+#define MDIO_STATUS 0x00000450
+#define TX_CBFC_PAUSE_FRAMES0 0x00000500
+#define TX_CBFC_PAUSE_FRAMES0_LO 0x00000504
+#define TX_CBFC_PAUSE_FRAMES1 0x00000508
+#define TX_CBFC_PAUSE_FRAMES1_LO 0x0000050C
+#define TX_CBFC_PAUSE_FRAMES2 0x00000510
+#define TX_CBFC_PAUSE_FRAMES2_LO 0x00000514
+#define TX_CBFC_PAUSE_FRAMES3 0x00000518
+#define TX_CBFC_PAUSE_FRAMES3_LO 0x0000051C
+#define TX_CBFC_PAUSE_FRAMES4 0x00000520
+#define TX_CBFC_PAUSE_FRAMES4_LO 0x00000524
+#define TX_CBFC_PAUSE_FRAMES5 0x00000528
+#define TX_CBFC_PAUSE_FRAMES5_LO 0x0000052C
+#define TX_CBFC_PAUSE_FRAMES6 0x00000530
+#define TX_CBFC_PAUSE_FRAMES6_LO 0x00000534
+#define TX_CBFC_PAUSE_FRAMES7 0x00000538
+#define TX_CBFC_PAUSE_FRAMES7_LO 0x0000053C
+#define TX_FCOE_PKTS 0x00000540
+#define TX_FCOE_PKTS_LO 0x00000544
+#define TX_MGMT_PKTS 0x00000548
+#define TX_MGMT_PKTS_LO 0x0000054C
+#define RX_CBFC_PAUSE_FRAMES0 0x00000568
+#define RX_CBFC_PAUSE_FRAMES0_LO 0x0000056C
+#define RX_CBFC_PAUSE_FRAMES1 0x00000570
+#define RX_CBFC_PAUSE_FRAMES1_LO 0x00000574
+#define RX_CBFC_PAUSE_FRAMES2 0x00000578
+#define RX_CBFC_PAUSE_FRAMES2_LO 0x0000057C
+#define RX_CBFC_PAUSE_FRAMES3 0x00000580
+#define RX_CBFC_PAUSE_FRAMES3_LO 0x00000584
+#define RX_CBFC_PAUSE_FRAMES4 0x00000588
+#define RX_CBFC_PAUSE_FRAMES4_LO 0x0000058C
+#define RX_CBFC_PAUSE_FRAMES5 0x00000590
+#define RX_CBFC_PAUSE_FRAMES5_LO 0x00000594
+#define RX_CBFC_PAUSE_FRAMES6 0x00000598
+#define RX_CBFC_PAUSE_FRAMES6_LO 0x0000059C
+#define RX_CBFC_PAUSE_FRAMES7 0x000005A0
+#define RX_CBFC_PAUSE_FRAMES7_LO 0x000005A4
+#define RX_FCOE_PKTS 0x000005A8
+#define RX_FCOE_PKTS_LO 0x000005AC
+#define RX_MGMT_PKTS 0x000005B0
+#define RX_MGMT_PKTS_LO 0x000005B4
+#define RX_NIC_FIFO_DROP 0x000005B8
+#define RX_NIC_FIFO_DROP_LO 0x000005BC
+#define RX_FCOE_FIFO_DROP 0x000005C0
+#define RX_FCOE_FIFO_DROP_LO 0x000005C4
+#define RX_MGMT_FIFO_DROP 0x000005C8
+#define RX_MGMT_FIFO_DROP_LO 0x000005CC
+#define RX_PKTS_PRIORITY0 0x00000600
+#define RX_PKTS_PRIORITY0_LO 0x00000604
+#define RX_PKTS_PRIORITY1 0x00000608
+#define RX_PKTS_PRIORITY1_LO 0x0000060C
+#define RX_PKTS_PRIORITY2 0x00000610
+#define RX_PKTS_PRIORITY2_LO 0x00000614
+#define RX_PKTS_PRIORITY3 0x00000618
+#define RX_PKTS_PRIORITY3_LO 0x0000061C
+#define RX_PKTS_PRIORITY4 0x00000620
+#define RX_PKTS_PRIORITY4_LO 0x00000624
+#define RX_PKTS_PRIORITY5 0x00000628
+#define RX_PKTS_PRIORITY5_LO 0x0000062C
+#define RX_PKTS_PRIORITY6 0x00000630
+#define RX_PKTS_PRIORITY6_LO 0x00000634
+#define RX_PKTS_PRIORITY7 0x00000638
+#define RX_PKTS_PRIORITY7_LO 0x0000063C
+#define RX_OCTETS_PRIORITY0 0x00000640
+#define RX_OCTETS_PRIORITY0_LO 0x00000644
+#define RX_OCTETS_PRIORITY1 0x00000648
+#define RX_OCTETS_PRIORITY1_LO 0x0000064C
+#define RX_OCTETS_PRIORITY2 0x00000650
+#define RX_OCTETS_PRIORITY2_LO 0x00000654
+#define RX_OCTETS_PRIORITY3 0x00000658
+#define RX_OCTETS_PRIORITY3_LO 0x0000065C
+#define RX_OCTETS_PRIORITY4 0x00000660
+#define RX_OCTETS_PRIORITY4_LO 0x00000664
+#define RX_OCTETS_PRIORITY5 0x00000668
+#define RX_OCTETS_PRIORITY5_LO 0x0000066C
+#define RX_OCTETS_PRIORITY6 0x00000670
+#define RX_OCTETS_PRIORITY6_LO 0x00000674
+#define RX_OCTETS_PRIORITY7 0x00000678
+#define RX_OCTETS_PRIORITY7_LO 0x0000067C
+#define TX_PKTS_PRIORITY0 0x00000680
+#define TX_PKTS_PRIORITY0_LO 0x00000684
+#define TX_PKTS_PRIORITY1 0x00000688
+#define TX_PKTS_PRIORITY1_LO 0x0000068C
+#define TX_PKTS_PRIORITY2 0x00000690
+#define TX_PKTS_PRIORITY2_LO 0x00000694
+#define TX_PKTS_PRIORITY3 0x00000698
+#define TX_PKTS_PRIORITY3_LO 0x0000069C
+#define TX_PKTS_PRIORITY4 0x000006A0
+#define TX_PKTS_PRIORITY4_LO 0x000006A4
+#define TX_PKTS_PRIORITY5 0x000006A8
+#define TX_PKTS_PRIORITY5_LO 0x000006AC
+#define TX_PKTS_PRIORITY6 0x000006B0
+#define TX_PKTS_PRIORITY6_LO 0x000006B4
+#define TX_PKTS_PRIORITY7 0x000006B8
+#define TX_PKTS_PRIORITY7_LO 0x000006BC
+#define TX_OCTETS_PRIORITY0 0x000006C0
+#define TX_OCTETS_PRIORITY0_LO 0x000006C4
+#define TX_OCTETS_PRIORITY1 0x000006C8
+#define TX_OCTETS_PRIORITY1_LO 0x000006CC
+#define TX_OCTETS_PRIORITY2 0x000006D0
+#define TX_OCTETS_PRIORITY2_LO 0x000006D4
+#define TX_OCTETS_PRIORITY3 0x000006D8
+#define TX_OCTETS_PRIORITY3_LO 0x000006DC
+#define TX_OCTETS_PRIORITY4 0x000006E0
+#define TX_OCTETS_PRIORITY4_LO 0x000006E4
+#define TX_OCTETS_PRIORITY5 0x000006E8
+#define TX_OCTETS_PRIORITY5_LO 0x000006EC
+#define TX_OCTETS_PRIORITY6 0x000006F0
+#define TX_OCTETS_PRIORITY6_LO 0x000006F4
+#define TX_OCTETS_PRIORITY7 0x000006F8
+#define TX_OCTETS_PRIORITY7_LO 0x000006FC
+#define RX_DISCARD_PRIORITY0 0x00000700
+#define RX_DISCARD_PRIORITY0_LO 0x00000704
+#define RX_DISCARD_PRIORITY1 0x00000708
+#define RX_DISCARD_PRIORITY1_LO 0x0000070C
+#define RX_DISCARD_PRIORITY2 0x00000710
+#define RX_DISCARD_PRIORITY2_LO 0x00000714
+#define RX_DISCARD_PRIORITY3 0x00000718
+#define RX_DISCARD_PRIORITY3_LO 0x0000071C
+#define RX_DISCARD_PRIORITY4 0x00000720
+#define RX_DISCARD_PRIORITY4_LO 0x00000724
+#define RX_DISCARD_PRIORITY5 0x00000728
+#define RX_DISCARD_PRIORITY5_LO 0x0000072C
+#define RX_DISCARD_PRIORITY6 0x00000730
+#define RX_DISCARD_PRIORITY6_LO 0x00000734
+#define RX_DISCARD_PRIORITY7 0x00000738
+#define RX_DISCARD_PRIORITY7_LO 0x0000073C
+
+
+#define CQ0_ID 0x0
+#define NIC_CORE 0x1
+/* Routing Index Register 0xE4 */
+#define ROUTING_INDEX_MW BIT_31
+#define ROUTING_INDEX_DEFAULT_ENABLE_MASK (0x8320000u)
+#define ROUTING_INDEX_DEFAULT_DISABLE_MASK (0x0320000u)
+
+/* Routing Data Register 0xE8 */
+#define ROUTE_AS_CAM_HIT 0x80
+#define ROUTE_AS_BCAST_MCAST_MATCH 0x8000u
+#define ROUTE_AS_VALID_PKT 0x800000u /* promiscuous mode? */
+
+enum {
+ ROUTING_MASK_INDEX_CAM_HIT,
+ ROUTING_MASK_INDEX_BCAST_MCAST_MATCH,
+ ROUTING_MASK_INDEX_VALID_PKT,
+ ROUTING_MASK_INDEX_TOTAL
+};
+
+#define ROUTING_MASK_INDEX_MAX 16
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+/* MTU & Frame Size stuff */
+#define JUMBO_MTU 9000
+#define NORMAL_FRAME_SIZE 2500 /* ETHERMTU,1500 */
+#define JUMBO_FRAME_SIZE 9600
+#define VLAN_ID_LEN 2
+#define VLAN_HEADER_LEN sizeof (struct ether_vlan_header) /* 18 */
+#define ETHER_HEADER_LEN sizeof (struct ether_header) /* 14 */
+
+#define NUM_TX_RING_ENTRIES (2048*2)
+#define NUM_RX_RING_ENTRIES (2048)
+
+#define NUM_SMALL_BUFFERS (2048)
+#define NUM_LARGE_BUFFERS (2048)
+
+#define RX_TX_RING_SHADOW_SPACE 2 /* 1st one is wqicb and 2nd for cqicb */
+#define BUF_Q_PTR_SPACE ((((NUM_SMALL_BUFFERS * sizeof (uint64_t)) \
+ / VM_PAGE_SIZE) + 1) + \
+ (((NUM_LARGE_BUFFERS * sizeof (uint64_t)) \
+ / VM_PAGE_SIZE) + 1))
+
+#define MAX_CQ 128
+#define DFLT_RX_COALESCE_WAIT 500 /* usec wait for coalescing */
+#define DFLT_RX_INTER_FRAME_WAIT 25 /* max interframe-wait for */
+ /* coalescing */
+#define DFLT_TX_COALESCE_WAIT 800 /* usec wait for coalescing */
+#define DFLT_TX_INTER_FRAME_WAIT 5 /* max interframe-wait for */
+ /* coalescing */
+#define DFLT_PAYLOAD_COPY_THRESH 6 /* must be at least 6 usec */
+
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 10
+
+#define MAX_RX_RINGS 128
+#define MAX_TX_RINGS 16
+
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+ uint32_t addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK 0x00000003
+ uint32_t addr0_upper;
+};
+
+struct bufq_addr_element {
+ uint32_t addr_low;
+ uint32_t addr_high;
+};
+
+#define QL_NO_RESET 0
+#define QL_DO_RESET 1
+
+/* Link must be in one of these states */
+enum link_state_t {
+ LS_DOWN,
+ LS_UP
+};
+
+/* qlge->flags definitions. */
+#define QL_RESET_DONE BIT_0 /* Reset finished. */
+#define QL_RESET_ACTIVE BIT_1 /* Waiting for reset to finish. */
+#define QL_RESET_START BIT_2 /* Please reset the chip. */
+#define QL_LINK_MASTER BIT_5 /* This driver controls the link */
+#define QL_ADAPTER_UP BIT_6 /* Adapter has been brought up. */
+#define QL_LINK_OPTICAL BIT_12
+#define QL_MSI_ENABLED BIT_13
+#define INTERRUPTS_ENABLED BIT_14
+#define ADAPTER_SUSPENDED BIT_15
+#define QLA_PM_CAPABLE BIT_16
+
+/*
+ * ISP PCI Configuration Register Set structure definitions.
+ */
+typedef volatile struct
+{
+volatile uint16_t vendor_id;
+volatile uint16_t device_id;
+volatile uint16_t command;
+volatile uint16_t status;
+volatile uint8_t revision;
+volatile uint8_t prog_class;
+volatile uint8_t sub_class;
+volatile uint8_t base_class;
+volatile uint8_t cache_line_size;
+volatile uint8_t latency_timer;
+volatile uint8_t header_type;
+volatile uint32_t io_base_address;
+volatile uint32_t pci_cntl_reg_set_mem_base_address_lower;
+volatile uint32_t pci_cntl_reg_set_mem_base_address_upper;
+volatile uint32_t pci_doorbell_mem_base_address_lower;
+volatile uint32_t pci_doorbell_mem_base_address_upper;
+
+volatile uint16_t sub_vendor_id;
+volatile uint16_t sub_device_id;
+volatile uint32_t expansion_rom;
+volatile uint8_t intr_line;
+volatile uint8_t intr_pin;
+volatile uint8_t min_grant;
+volatile uint8_t max_latency;
+volatile uint16_t pcie_device_control;
+volatile uint16_t link_status;
+volatile uint16_t msi_msg_control;
+volatile uint16_t msi_x_msg_control;
+
+} pci_cfg_t;
+
+
+/*
+ *
+ * Schultz Control Registers Index
+ *
+ */
+#define REG_PROCESSOR_ADDR 0x00
+#define REG_PROCESSOR_DATA 0x04
+#define REG_SYSTEM 0x08
+#define REG_RESET_FAILOVER 0x0C
+#define REG_FUNCTION_SPECIFIC_CONTROL 0x10
+#define REG_HOST_CMD_STATUS 0x14
+#define REG_ICB_RID 0x1C
+#define REG_ICB_ACCESS_ADDRESS_LOWER 0x20
+#define REG_ICB_ACCESS_ADDRESS_UPPER 0x24
+#define REG_CONFIGURATION 0x28
+
+#define INTR_EN_INTR_MASK 0x007f0000
+#define INTR_EN_TYPE_MASK 0x03000000
+#define INTR_EN_TYPE_ENABLE 0x00000100
+#define INTR_EN_TYPE_DISABLE 0x00000200
+#define INTR_EN_TYPE_READ 0x00000300
+#define INTR_EN_IHD 0x00002000
+#define INTR_EN_IHD_MASK (INTR_EN_IHD << 16)
+#define INTR_EN_EI 0x00004000
+#define INTR_EN_EN 0x00008000
+
+#define REG_STATUS 0x30
+#define REG_INTERRUPT_ENABLE 0x34
+#define REG_INTERRUPT_MASK 0x38
+#define REG_INTERRUPT_STATUS_1 0x3C
+
+#define REG_ERROR_STATUS 0x54
+
+#define REG_SEMAPHORE 0x64
+
+#define REG_XGMAC_ADDRESS 0x78
+#define REG_XGMAC_DATA 0x7C
+#define REG_NIC_ENHANCED_TX_SCHEDULE 0x80
+#define REG_CNA_ENHANCED_TX_SCHEDULE 0x84
+#define REG_FLASH_ADDRESS 0x88
+#define REG_FLASH_DATA 0x8C
+
+#define REG_STOP_CQ_PROCESSING 0x90
+#define REG_PAGE_TABLE_RID 0x94
+#define REG_WQ_PAGE_TABLE_BASE_ADDR_LOWER 0x98
+#define REG_WQ_PAGE_TABLE_BASE_ADDR_UPPER 0x9C
+#define REG_CQ_PAGE_TABLE_BASE_ADDR_LOWER 0xA0
+#define REG_CQ_PAGE_TABLE_BASE_ADDR_UPPER 0xA4
+#define REG_MAC_PROTOCOL_ADDRESS_INDEX 0xA8
+#define REG_MAC_PROTOCOL_DATA 0xAC
+#define REG_SPLIT_HEADER 0xC0
+#define REG_NIC_RECEIVE_CONFIGURATION 0xD4
+
+#define REG_MGMT_RCV_CFG 0xE0
+#define REG_ROUTING_INDEX 0xE4
+#define REG_ROUTING_DATA 0xE8
+#define REG_RSVD7 0xEC
+#define REG_XG_SERDES_ADDR 0xF0
+#define REG_XG_SERDES_DATA 0xF4
+#define REG_PRB_MX_ADDR 0xF8
+#define REG_PRB_MX_DATA 0xFC
+
+#define INTR_MASK_PI 0x00000001
+#define INTR_MASK_HL0 0x00000002
+#define INTR_MASK_LH0 0x00000004
+#define INTR_MASK_HL1 0x00000008
+#define INTR_MASK_LH1 0x00000010
+#define INTR_MASK_SE 0x00000020
+#define INTR_MASK_LSC 0x00000040
+#define INTR_MASK_MC 0x00000080
+#define INTR_MASK_LINK_IRQS = (INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC)
+
+/* Interrupt Enable Register 0x34 */
+#define INTR_ENABLED 0x8000
+#define GLOBAL_ENABLE_INTR 0x4000
+#define ENABLE_MSI_MULTI_INTR 0x2000
+#define ONE_INTR_MASK 0x3FF0000u
+#define ENABLE_INTR 0x0100
+#define DISABLE_INTR 0x0200
+#define VERIFY_INTR_ENABLED 0x0300
+#define ISP_ENABLE_INTR(qlge) ql_put32(qlge, \
+ REG_INTERRUPT_ENABLE,\
+ (ONE_INTR_MASK | ENABLE_INTR))
+#define ISP_DISABLE_INTR(qlge) ql_put32(qlge, \
+ REG_INTERRUPT_ENABLE, \
+ (ONE_INTR_MASK | DISABLE_INTR))
+#define ISP_ENABLE_PI_INTR(qlge) ql_put32(qlge, \
+ REG_INTERRUPT_MASK, (BIT_16|1))
+#define ISP_DISABLE_PI_INTR(qlge) ql_put32(qlge, \
+ REG_INTERRUPT_MASK, BIT_16)
+
+#define ISP_ENABLE_GLOBAL_INTRS(qlge) { \
+ ql_put32(qlge, REG_INTERRUPT_ENABLE, \
+ (0x40000000u | GLOBAL_ENABLE_INTR)); \
+ qlge->flags |= INTERRUPTS_ENABLED; \
+ }
+#define ISP_DISABLE_GLOBAL_INTRS(qlge) { \
+ ql_put32(qlge, \
+ REG_INTERRUPT_ENABLE, (0x40000000u)); \
+ qlge->flags &= ~INTERRUPTS_ENABLED; \
+ }
+#define REQ_Q_VALID 0x10
+#define RSP_Q_VALID 0x10
+
+/*
+ * Mailbox Registers
+ */
+#define MPI_REG 0x1002
+#define NUM_MAILBOX_REGS 16
+#define FUNC_0_IN_MAILBOX_0_REG_OFFSET 0x1180
+#define FUNC_0_OUT_MAILBOX_0_REG_OFFSET 0x1190
+#define FUNC_1_IN_MAILBOX_0_REG_OFFSET 0x1280
+#define FUNC_1_OUT_MAILBOX_0_REG_OFFSET 0x1290
+
+/*
+ * Control Register Set definitions.
+ */
+typedef volatile struct
+{
+volatile uint32_t processor_address; /* 0x00 */
+volatile uint32_t processor_data; /* 0x04 */
+volatile uint32_t system_data; /* 0x08 */
+volatile uint32_t reset_failover; /* 0x0C */
+
+volatile uint32_t function_specific_control; /* 0x10 */
+volatile uint32_t host_command_status; /* 0x14 */
+volatile uint32_t led; /* 0x18 */
+volatile uint32_t icb_rid; /* 0x1c */
+
+volatile uint32_t idb_access_address_low; /* 0x20 */
+volatile uint32_t idb_access_address_high; /* 0x24 */
+volatile uint32_t configuration; /* 0x28 */
+volatile uint32_t bios_base; /* 0x2C */
+
+volatile uint32_t status; /* 0x30 */
+volatile uint32_t interrupt_enable; /* 0x34 */
+volatile uint32_t interrupt_mask; /* 0x38 */
+volatile uint32_t interrupt_status_1; /* 0x3c */
+
+volatile uint32_t interrupt_status_2; /* 0x40 */
+volatile uint32_t interrupt_status_3; /* 0x44 */
+volatile uint32_t interrupt_status_4; /* 0x48 */
+volatile uint32_t rev_id; /* 0x4c */
+
+volatile uint32_t force_ecc_error; /* 0x50 */
+volatile uint32_t error_status; /* 0x54 */
+volatile uint32_t internal_ram_debug_address; /* 0x58 */
+volatile uint32_t internal_ram_data; /* 0x5c */
+
+volatile uint32_t correctable_ecc_error; /* 0x60 */
+volatile uint32_t semaphore; /* 0x64 */
+
+volatile uint32_t gpio1; /* 0x68 */
+volatile uint32_t gpio2; /* 0x6c */
+
+volatile uint32_t gpio3; /* 0x70 */
+volatile uint32_t reserved1; /* 0x74 */
+volatile uint32_t xgmac_address; /* 0x78 */
+volatile uint32_t xgmac_data; /* 0x7c */
+
+volatile uint32_t nic_enhanced_tx_schedule; /* 0x80 */
+volatile uint32_t cna_enhanced_tx_schedule; /* 0x84 */
+volatile uint32_t flash_address; /* 0x88 */
+volatile uint32_t flash_data; /* 0x8c */
+
+volatile uint32_t stop_cq; /* 0x90 */
+volatile uint32_t page_table_rid; /* 0x94 */
+volatile uint32_t wq_page_table_base_address_lower; /* 0x98 */
+volatile uint32_t wq_page_table_base_address_upper; /* 0x9c */
+
+volatile uint32_t cq_page_table_base_address_lower; /* 0xA0 */
+volatile uint32_t cq_page_table_base_address_upper; /* 0xA4 */
+volatile uint32_t mac_protocol_address_index; /* 0xA8 */
+volatile uint32_t mac_protocol_data; /* 0xAc */
+
+volatile uint32_t cos_default_cq_reg1; /* 0xB0 */
+volatile uint32_t cos_default_cq_reg2; /* 0xB4 */
+volatile uint32_t ethertype_skip_reg1; /* 0xB8 */
+volatile uint32_t ethertype_skip_reg2; /* 0xBC */
+
+volatile uint32_t split_header; /* 0xC0 */
+volatile uint32_t fcoe_pause_threshold; /* 0xC4 */
+volatile uint32_t nic_pause_threshold; /* 0xC8 */
+volatile uint32_t fc_ethertype; /* 0xCC */
+
+volatile uint32_t fcoe_recv_configuration; /* 0xD0 */
+volatile uint32_t nic_recv_configuration; /* 0xD4 */
+volatile uint32_t cos_tags_in_fcoe_fifo; /* 0xD8 */
+volatile uint32_t cos_tags_in_nic_fifo; /* 0xDc */
+
+volatile uint32_t mgmt_recv_configuration; /* 0xE0 */
+volatile uint32_t routing_index; /* 0xE4 */
+volatile uint32_t routing_data; /* 0xE8 */
+volatile uint32_t reserved2; /* 0xEc */
+
+volatile uint32_t xg_serdes_address; /* 0xF0 */
+volatile uint32_t xg_serdes_data; /* 0xF4 */
+volatile uint32_t probe_mux_address; /* 0xF8 */
+volatile uint32_t probe_mux_read_data; /* 0xFc */
+
+#define INTR_PENDING (uint32_t)(CSR_COMPLETION_INTR)
+
+} dev_reg_t;
+
+typedef volatile struct
+{
+ volatile uint32_t doorbell_reg_address[256]; /* 0x00 */
+} dev_doorbell_reg_t;
+
+#define SET_RMASK(val) ((val & 0xffff) | (val << 16))
+#define CLR_RMASK(val) (0 | (val << 16))
+
+/*
+ * DMA registers read only
+ */
+typedef volatile struct
+{
+ volatile uint32_t req_q_out;
+ volatile uint32_t rsp_q_in;
+
+} iop_dmaregs_t;
+
+#define DMAREGS_SIZE (sizeof (iop_dmaregs_t))
+#define DUMMY_SIZE (32*1024)
+
+#ifdef QL_DEBUG
+typedef struct crash_record {
+uint16_t fw_major_version; /* 00 - 01 */
+uint16_t fw_minor_version; /* 02 - 03 */
+uint16_t fw_patch_version; /* 04 - 05 */
+uint16_t fw_build_version; /* 06 - 07 */
+
+uint8_t build_date[16]; /* 08 - 17 */
+uint8_t build_time[16]; /* 18 - 27 */
+uint8_t build_user[16]; /* 28 - 37 */
+uint8_t card_serial_num[16]; /* 38 - 47 */
+
+uint32_t time_of_crash_in_secs; /* 48 - 4B */
+uint32_t time_of_crash_in_ms; /* 4C - 4F */
+
+uint16_t outb_risc_sd_num_frames; /* 50 - 51 */
+uint16_t oap_sd_length; /* 52 - 53 */
+uint16_t iap_sd_num_frames; /* 54 - 55 */
+uint16_t inb_risc_sd_length; /* 56 - 57 */
+
+uint8_t reserved[28]; /* 58 - 7F */
+
+uint8_t outb_risc_reg_dump[256]; /* 80 -17F */
+uint8_t inb_risc_reg_dump[256]; /* 180 -27F */
+uint8_t inb_outb_risc_stack_dump[1]; /* 280 - ??? */
+} crash_record_t;
+#endif
+
+/*
+ * I/O register access macros
+ * #if QL_DEBUG & 1
+ */
+
+#define RD_REG_BYTE(qlge, addr) \
+ ddi_get8(qlge->dev_handle, (uint8_t *)addr)
+#define RD_REG_DWORD(qlge, addr) \
+ ddi_get32(qlge->dev_handle, (uint32_t *)addr)
+#define WRT_REG_BYTE(qlge, addr, data) \
+ ddi_put8(qlge->dev_handle, (uint8_t *)addr, data)
+#define WRT_REG_WORD(qlge, addr, data) \
+ ddi_put16(qlge->dev_handle, (uint16_t *)addr, data)
+#define WRT_REG_DWORD(qlge, addr, data) \
+ ddi_put32(qlge->dev_handle, (uint32_t *)addr, data)
+
+/*
+ * QLGE-specific ioctls ...
+ */
+#define QLA_IOC ((((('Q' << 8) + 'L') << 8) + 'A') << 8)
+
+/*
+ * Definition of ioctls commands
+ */
+#define QLA_PCI_STATUS (QLA_IOC|1) /* Read all PCI registers */
+
+#define QLA_WRITE_REG (QLA_IOC|3)
+#define QLA_READ_PCI_REG (QLA_IOC|4)
+#define QLA_WRITE_PCI_REG (QLA_IOC|5)
+#define QLA_GET_DBGLEAVEL (QLA_IOC|6)
+#define QLA_SET_DBGLEAVEL (QLA_IOC|7)
+#define QLA_READ_CONTRL_REGISTERS (QLA_IOC|8)
+
+#define QLA_MANUAL_READ_FLASH (QLA_IOC|9)
+#define QLA_MANUAL_WRITE_FLASH (QLA_IOC|10)
+#define QLA_SUPPORTED_DUMP_TYPES (QLA_IOC|11)
+#define QLA_GET_BINARY_CORE_DUMP (QLA_IOC|12)
+#define QLA_TRIGGER_SYS_ERROR_EVENT (QLA_IOC|13)
+
+#define QLA_READ_FLASH (QLA_IOC|15)
+#define QLA_WRITE_FLASH (QLA_IOC|16)
+#define QLA_READ_VPD (QLA_IOC|17)
+#define QLA_GET_PROP (QLA_IOC|18)
+#define QLA_SHOW_REGION (QLA_IOC|19)
+#define QLA_LIST_ADAPTER_INFO (QLA_IOC|20)
+#define QLA_READ_FW_IMAGE (QLA_IOC|21)
+#define QLA_WRITE_FW_IMAGE_HEADERS (QLA_IOC|22)
+
+#define QLA_CONTINUE_COPY_IN (QLA_IOC|29)
+#define QLA_CONTINUE_COPY_OUT (QLA_IOC|30)
+#define QLA_SOFT_RESET (QLA_IOC|31)
+
+#define QLA_IOCTL_CMD_FIRST QLA_PCI_STATUS
+#define QLA_IOCTL_CMD_LAST QLA_SOFT_RESET
+
+/* Solaris IOCTL can copy in&out up to 1024 bytes each time */
+#define IOCTL_BUFFER_SIZE 1024
+#define IOCTL_MAX_BUF_SIZE (IOCTL_BUFFER_SIZE*512) /* 512k */
+
+typedef struct ioctl_header_info {
+uint8_t version;
+uint8_t reserved;
+uint8_t option[2];
+uint16_t expected_trans_times;
+uint16_t payload_length;
+uint32_t total_length;
+} ioctl_header_info_t;
+
+#define IOCTL_HEADER_LEN sizeof (ioctl_header_info_t)
+#define IOCTL_MAX_DATA_LEN (IOCTL_BUFFER_SIZE - IOCTL_HEADER_LEN)
+
+struct ql_pci_reg {
+uint16_t addr; /* register number [0..ff] */
+uint16_t value; /* data to write/data read */
+};
+
+struct ql_device_reg {
+uint32_t addr; /* address to write/data read */
+uint32_t value; /* data to write/data read */
+};
+
+struct ql_flash_io_info {
+uint32_t addr; /* register number [0..ff] */
+uint32_t size; /* number of data to write/data read */
+};
+
+struct qlnic_mpi_version_info {
+uint32_t fw_version;
+uint32_t phy_version;
+};
+
+struct qlnic_link_status_info {
+uint32_t link_status_info;
+uint32_t additional_info;
+uint32_t network_hw_info;
+uint32_t dcbx_frame_counters_info;
+uint32_t change_counters_info;
+};
+
+struct qlnic_prop_info {
+struct qlnic_mpi_version_info mpi_version; /* MPI Version */
+uint32_t fw_state; /* MPI state */
+struct qlnic_link_status_info link_status; /* Link Status */
+};
+
+typedef struct ql_adapter_info {
+uint32_t pci_binding; /* /bus/dev/func number per IEEE 1277 format */
+uint16_t vendor_id;
+uint16_t device_id;
+uint16_t sub_vendor_id;
+uint16_t sub_device_id;
+struct ether_addr cur_addr;
+} ql_adapter_info_t;
+
+#define DUMP_DESCRIPTION_HEADER_SIGNATURE 0x42535451 /* "QTSB" */
+typedef struct ql_dump_header {
+uint32_t signature; /* QTSB */
+uint8_t version;
+uint8_t length;
+uint8_t num_dumps;
+uint8_t reserved;
+uint32_t time_stamp_lo;
+uint32_t time_stamp_hi;
+} ql_dump_header_t;
+
+#define DUMP_IMAGE_HEADER_SIGNATURE 0x504D4451 /* "QDMP" */
+
+typedef struct ql_dump_image_header {
+uint32_t signature; /* QDMP */
+uint8_t version;
+uint8_t header_length;
+uint16_t checksum;
+uint32_t data_type;
+#define DUMP_TYPE_CORE_DUMP 1
+#define DUMP_TYPE_REGISTER_DUMP 2
+#define DUMP_TYPE_DRIVER_DUMP 3
+uint32_t data_length;
+} ql_dump_image_header_t;
+
+/* utility request */
+#define DUMP_REQUEST_CORE BIT_1
+#define DUMP_REQUEST_REGISTER BIT_2
+#define DUMP_REQUEST_DRIVER BIT_3
+
+#define DUMP_REQUEST_ALL BIT_7
+
+#define DUMP_DESCRIPTION_FOOTER_SIGNATURE 0x45535451 /* "QTSE" */
+typedef struct ql_dump_footer {
+uint32_t signature; /* QTSE */
+uint8_t version;
+uint8_t length;
+uint16_t reserved;
+uint32_t time_stamp_lo;
+uint32_t time_stamp_hi;
+} ql_dump_footer_t;
+
+
+/*
+ * Solaris qlnic exit status.
+ */
+#define QN_ERR_BASE 0x30000000
+#define QN_ERR_OK QN_ERR_BASE | 0 /* Success */
+#define QN_ERR_NOT_SUPPORTED QN_ERR_BASE | 1 /* Command not supported */
+#define QN_ERR_INVALID_PARAM QN_ERR_BASE | 2 /* Invalid parameter */
+#define QN_ERR_WRONG_NO_PARAM QN_ERR_BASE | 3 /* Wrong number of parameters */
+#define QN_ERR_FILE_NOT_FOUND QN_ERR_BASE | 4 /* File not found */
+#define QN_ERR_FILE_READ_ERR QN_ERR_BASE | 5 /* File read err */
+#define QN_ERR_FILE_WRITE_ERR QN_ERR_BASE | 6 /* File write err */
+#define QN_ERR_NO_MEMORY QN_ERR_BASE | 7 /* No Memory */
+
+#define FLT_REGION_FDT 0x1A
+#define ISP_8100_FDT_ADDR 0x360000
+#define ISP_8100_FDT_SIZE 0x80
+
+#define FLT_REGION_FLT 0x1C
+#define ISP_8100_FLT_ADDR 0x361000
+#define ISP_8100_FLT_SIZE 0x1000
+
+#define FLT_REGION_NIC_BOOT_CODE 0x2E
+#define ISP_8100_NIC_BOOT_CODE_ADDR 0x0
+#define ISP_8100_NIC_BOOT_CODE_SIZE 0x80000
+
+#define FLT_REGION_MPI_FW_USE 0x42
+#define ISP_8100_MPI_FW_USE_ADDR 0xF0000
+#define ISP_8100_MPI_FW_USE_SIZE 0x10000
+
+#define FLT_REGION_MPI_RISC_FW 0x40
+#define ISP_8100_MPI_RISC_FW_ADDR 0x100000
+#define ISP_8100_MPI_RISC_FW_SIZE 0x10000
+
+#define FLT_REGION_VPD0 0x2C
+#define ISP_8100_VPD0_ADDR 0x140000
+#define ISP_8100_VPD0_SIZE 0x200
+
+#define FLT_REGION_NIC_PARAM0 0x46
+#define ISP_8100_NIC_PARAM0_ADDR 0x140200
+#define ISP_8100_NIC_PARAM0_SIZE 0x200
+
+#define FLT_REGION_VPD1 0x2D
+#define ISP_8100_VPD1_ADDR 0x140400
+#define ISP_8100_VPD1_SIZE 0x200
+
+#define FLT_REGION_NIC_PARAM1 0x47
+#define ISP_8100_NIC_PARAM1_ADDR 0x140600
+#define ISP_8100_NIC_PARAM1_SIZE 0x200
+
+#define FLT_REGION_MPI_CFG 0x41
+#define ISP_8100_MPI_CFG_ADDR 0x150000
+#define ISP_8100_MPI_CFG_SIZE 0x10000
+
+#define FLT_REGION_EDC_PHY_FW 0x45
+#define ISP_8100_EDC_PHY_FW_ADDR 0x170000
+#define ISP_8100_EDC_PHY_FW_SIZE 0x20000
+
+#define FLT_REGION_FC_BOOT_CODE 0x07
+#define ISP_8100_FC_BOOT_CODE_ADDR 0x200000
+#define ISP_8100_FC_BOOT_CODE_SIZE 0x80000
+
+#define FLT_REGION_FC_FW 0x01
+#define ISP_8100_FC_FW_ADDR 0x280000
+#define ISP_8100_FC_FW_SIZE 0x80000
+
+#define FLT_REGION_FC_VPD0 0x14
+#define ISP_8100_FC_VPD0_ADDR 0x340000
+#define ISP_8100_FC_VPD0_SIZE 0x200
+
+#define FLT_REGION_FC_NVRAM0 0x15
+#define ISP_8100_FC_NVRAM0_ADDR 0x340200
+#define ISP_8100_FC_NVRAM0_SIZE 0x200
+
+#define FLT_REGION_FC_VPD1 0x16
+#define ISP_8100_FC_VPD1_ADDR 0x340400
+#define ISP_8100_FC_VPD1_SIZE 0x200
+
+#define FLT_REGION_FC_NVRAM1 0x17
+#define ISP_8100_FC_NVRAM1_ADDR 0x340600
+#define ISP_8100_FC_NVRAM1_SIZE 0x200
+
+#define FLT_REGION_FC_BOOT_CODE 0x07
+#define ISP_8100_FC_BOOT_CODE_ADDR 0x200000
+#define ISP_8100_FC_BOOT_CODE_SIZE 0x80000
+
+#define FLT_REGION_FC_FW 0x01
+#define ISP_8100_FC_FW_ADDR 0x280000
+#define ISP_8100_FC_FW_SIZE 0x80000
+
+#define FLT_REGION_TIME_STAMP 0x60
+
+/* flash region for testing */
+#define FLT_REGION_WIN_FW_DUMP0 0x48
+#define ISP_8100_WIN_FW_DUMP0_ADDR 0x190000
+#define ISP_8100_WIN_FW_DUMP0_SIZE 0x30000
+
+#define ISP_8100_FLASH_TEST_REGION_ADDR ISP_8100_WIN_FW_DUMP0_ADDR
+#define ISP_8100_FLASH_TEST_REGION_SIZE 0x10000
+
+/* mailbox */
+#define QL_8XXX_SFP_SIZE 256
+
+#define MAILBOX_TOV 30 /* Default Timeout value. */
+/*
+ * ISP mailbox commands from Host
+ */
+#define MBC_NO_OPERATION 0 /* No Operation. */
+#define MBC_LOAD_RAM 1 /* Load RAM. */
+#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */
+#define MBC_MAILBOX_REGISTER_TEST 6 /* Mailbox echo test */
+#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */
+#define MBC_ABOUT_FIRMWARE 8 /* About Firmware. */
+#define MBC_RISC_MEMORY_COPY 0xA /* Copy RISC memory. */
+#define MBC_LOAD_RISC_RAM 0xB /* Load RISC RAM command. */
+#define MBC_DUMP_RISC_RAM 0xC /* Dump RISC RAM command. */
+#define MBC_INIT_RISC_RAM 0xE
+#define MBC_READ_RAM_WORD 0xF /* Read RAM */
+#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware */
+#define MBC_GENERATE_SYS_ERROR 0x2A /* Generate System Error */
+#define MBC_WRITE_SFP 0x30 /* Write SFP. */
+#define MBC_READ_SFP 0x31 /* Read SFP. */
+#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
+#define MBC_GET_INIT_CTRL_BLOCK 0x61 /* Get Initialization CBLK */
+#define MBC_GET_FIRMWARE_STATE 0x69 /* Get firmware state. */
+#define MBC_IDC_REQUEST 0x100 /* IDC Request. */
+#define IDC_REQ_ALL_DEST_FUNC_MASK BIT_4 /* Mailbox 1 */
+
+#define IDC_REQ_DEST_FUNC_0_MASK BIT_0 /* Mailbox 2 */
+#define IDC_REQ_DEST_FUNC_1_MASK BIT_1
+#define IDC_REQ_DEST_FUNC_2_MASK BIT_2
+#define IDC_REQ_DEST_FUNC_3_MASK BIT_3
+
+enum IDC_REQ_DEST_FUNC {
+IDC_REQ_DEST_FUNC_0,
+IDC_REQ_DEST_FUNC_1,
+IDC_REQ_DEST_FUNC_2,
+IDC_REQ_DEST_FUNC_3,
+IDC_REQ_DEST_FUNC_ALL = 0x0F
+};
+
+#define IDC_REQ_TIMEOUT_MASK 0x01
+
+#define MBC_IDC_ACK 0x101 /* IDC Acknowledge. */
+#define MBC_IDC_TIME_EXTENDED 0x102 /* IDC Time Extended. */
+
+#define MBC_SET_WAKE_ON_LANE_MODE 0x110
+#define MBC_SET_WAKE_ON_LANE_FILTER 0x111
+#define MBC_CLEAR_WAKE_ON_LANE_FILTER 0x112
+#define MBC_SET_WAKE_ON_LANE_MAGIC_PKT 0x113
+#define MBC_CLEAR_WAKE_ON_LANE_MAGIC_PKT 0x114
+
+#define MBC_PORT_RESET 0x120
+#define MBC_SET_PORT_CONFIG 0x122
+#define MBC_GET_PORT_CONFIG 0x123
+#define ENABLE_JUMBO_FRAME_SIZE_MASK BIT_16
+#define MBC_GET_LINK_STATUS 0x124
+
+#define MBC_SET_LED_CONFIG 0x125
+#define MBC_GET_LED_CONFIG 0x126
+
+/*
+ * ISP mailbox command complete status codes
+ */
+#define MBS_COMMAND_COMPLETE 0x4000
+#define MBS_INVALID_COMMAND 0x4001
+#define MBS_HOST_INTERFACE_ERROR 0x4002
+#define MBS_TEST_FAILED 0x4003
+#define MBS_POST_ERROR 0x4004
+#define MBS_COMMAND_ERROR 0x4005
+#define MBS_COMMAND_PARAMETER_ERROR 0x4006
+#define MBS_PORT_ID_USED 0x4007
+#define MBS_LOOP_ID_USED 0x4008
+#define MBS_ALL_IDS_IN_USE 0x4009
+#define MBS_NOT_LOGGED_IN 0x400A
+#define MBS_LOOP_DOWN 0x400B
+#define MBS_LOOP_BACK_ERROR 0x400C
+#define MBS_CHECKSUM_ERROR 0x4010
+
+/* Async Event Status */
+#define MBA_IDC_INTERMEDIATE_COMPLETE 0x1000
+#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */
+#define MBA_SYSTEM_ERR 0x8002
+#define MBA_LINK_UP 0x8011
+enum {
+ XFI_NETWORK_INTERFACE = 1,
+ XAUI_NETWORK_INTERFACE,
+ XFI_BACKPLANE_INTERFACE,
+ XAUI_BACKPLANE_INTERFACE,
+ EXT_10GBASE_T_PHY,
+ EXT_EXT_EDC_PHY
+};
+#define MBA_LINK_DOWN 0x8012
+#define MBA_IDC_COMPLETE 0x8100
+#define MBA_IDC_REQUEST_NOTIFICATION 0x8101
+#define MBA_IDC_TIME_EXTENDED 0x8102
+#define MBA_DCBX_CONFIG_CHANGE 0x8110
+#define MBA_NOTIFICATION_LOST 0x8120
+#define MBA_SFT_TRANSCEIVER_INSERTION 0x8130
+#define MBA_SFT_TRANSCEIVER_REMOVAL 0x8131
+#define MBA_FIRMWARE_INIT_COMPLETE 0x8400
+#define MBA_FIRMWARE_INIT_FAILED 0x8401
+
+typedef struct firmware_version_info {
+uint8_t reserved;
+uint8_t major_version;
+uint8_t minor_version;
+uint8_t sub_minor_version;
+} firmware_version_info_t;
+
+typedef struct phy_firmware_version_info {
+uint8_t reserved;
+uint8_t major_version;
+uint8_t minor_version;
+uint8_t sub_minor_version;
+} phy_firmware_version_info_t;
+
+#define ENABLE_JUMBO BIT_16
+#define STD_PAUSE 0x20
+#define PP_PAUSE 0x40
+#define LOOP_INTERNAL_PARALLEL 0x02
+#define LOOP_INTERNAL_SERIAL 0x04
+#define LOOP_EXTERNAL_PHY 0x06
+
+typedef struct port_cfg_info {
+uint32_t link_cfg;
+uint32_t max_frame_size;
+} port_cfg_info_t;
+
+enum {
+ PAUSE_MODE_DISABLED,
+ PAUSE_MODE_STANDARD, /* Standard Ethernet Pause */
+ PAUSE_MODE_PER_PRIORITY /* Class Based Pause */
+};
+
+/* Mailbox command parameter structure definition. */
+typedef struct mbx_cmd {
+uint32_t from_mpi; /* number of Incomming from MPI to driver */
+uint32_t mb[NUM_MAILBOX_REGS];
+clock_t timeout; /* Timeout in seconds. */
+} mbx_cmd_t;
+
+/* Returned Mailbox registers. */
+typedef struct mbx_data {
+uint32_t from_mpi; /* number of Incomming from MPI to driver */
+uint32_t mb[NUM_MAILBOX_REGS];
+} mbx_data_t;
+
+/* Address/Length pairs for the coredump. */
+
+#define MPI_CORE_REGS_ADDR 0x00030000
+#define MPI_CORE_REGS_CNT 127
+#define MPI_CORE_SH_REGS_CNT 16
+#define TEST_REGS_ADDR 0x00001000
+#define TEST_REGS_CNT 23
+#define RMII_REGS_ADDR 0x00001040
+#define RMII_REGS_CNT 64
+#define FCMAC1_REGS_ADDR 0x00001080
+#define FCMAC2_REGS_ADDR 0x000010c0
+#define FCMAC_REGS_CNT 64
+#define FC1_MBX_REGS_ADDR 0x00001100
+#define FC2_MBX_REGS_ADDR 0x00001240
+#define FC_MBX_REGS_CNT 64
+#define IDE_REGS_ADDR 0x00001140
+#define IDE_REGS_CNT 64
+#define NIC1_MBX_REGS_ADDR 0x00001180
+#define NIC2_MBX_REGS_ADDR 0x00001280
+#define NIC_MBX_REGS_CNT 64
+#define SMBUS_REGS_ADDR 0x00001200
+#define SMBUS_REGS_CNT 64
+#define I2C_REGS_ADDR 0x00001fc0
+#define I2C_REGS_CNT 64
+#define MEMC_REGS_ADDR 0x00003000
+#define MEMC_REGS_CNT 256
+#define PBUS_REGS_ADDR 0x00007c00
+#define PBUS_REGS_CNT 256
+#define MDE_REGS_ADDR 0x00010000
+#define MDE_REGS_CNT 6
+#define CODE_RAM_ADDR 0x00020000
+#define CODE_RAM_CNT 0x2000
+#define MEMC_RAM_ADDR 0x00100000
+#define MEMC_RAM_CNT 0x2000
+
+/* 64 probes, 8 bytes per probe + 4 bytes to list the probe ID */
+#define PROBE_DATA_LENGTH_WORDS ((64 * 2) + 1)
+#define NUMBER_OF_PROBES 34
+#define NUMBER_ROUTING_REG_ENTRIES 48
+#define WORDS_PER_ROUTING_REG_ENTRY 4
+#define MAC_PROTOCOL_REGISTER_WORDS ((512 * 3) + (32 * 2) + (4096 * 1) + \
+ (4096 * 1) + (4 * 2) + (8 * 2) + \
+ (16 * 1) + (4 * 1) + (4 * 4) + \
+ (4 * 1))
+/* Save both the address and data register */
+#define WORDS_PER_MAC_PROT_ENTRY 2
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+typedef struct mpi_coredump_global_header {
+uint32_t cookie;
+char id_string[16];
+uint32_t time_lo;
+uint32_t time_hi;
+uint32_t total_image_size;
+uint32_t global_header_size;
+char driver_info[0xE0];
+}mpi_coredump_global_header_t;
+
+typedef struct mpi_coredump_segment_header {
+uint32_t cookie;
+uint32_t seg_number;
+uint32_t seg_size;
+uint32_t extra;
+char description[16];
+}mpi_coredump_segment_header_t;
+
+typedef struct ql_mpi_coredump {
+mpi_coredump_global_header_t mpi_global_header;
+
+mpi_coredump_segment_header_t core_regs_seg_hdr;
+uint32_t mpi_core_regs[MPI_CORE_REGS_CNT];
+uint32_t mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+mpi_coredump_segment_header_t test_logic_regs_seg_hdr;
+uint32_t test_logic_regs[TEST_REGS_CNT];
+
+mpi_coredump_segment_header_t rmii_regs_seg_hdr;
+uint32_t rmii_regs[RMII_REGS_CNT];
+
+mpi_coredump_segment_header_t fcmac1_regs_seg_hdr;
+uint32_t fcmac1_regs[FCMAC_REGS_CNT];
+
+mpi_coredump_segment_header_t fcmac2_regs_seg_hdr;
+uint32_t fcmac2_regs[FCMAC_REGS_CNT];
+
+mpi_coredump_segment_header_t fc1_mbx_regs_seg_hdr;
+uint32_t fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+mpi_coredump_segment_header_t ide_regs_seg_hdr;
+uint32_t ide_regs[IDE_REGS_CNT];
+
+mpi_coredump_segment_header_t nic1_mbx_regs_seg_hdr;
+uint32_t nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+mpi_coredump_segment_header_t smbus_regs_seg_hdr;
+uint32_t smbus_regs[SMBUS_REGS_CNT];
+
+mpi_coredump_segment_header_t fc2_mbx_regs_seg_hdr;
+uint32_t fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+mpi_coredump_segment_header_t nic2_mbx_regs_seg_hdr;
+uint32_t nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+mpi_coredump_segment_header_t i2c_regs_seg_hdr;
+uint32_t i2c_regs[I2C_REGS_CNT];
+
+mpi_coredump_segment_header_t memc_regs_seg_hdr;
+uint32_t memc_regs[MEMC_REGS_CNT];
+
+mpi_coredump_segment_header_t pbus_regs_seg_hdr;
+uint32_t pbus_regs[PBUS_REGS_CNT];
+
+mpi_coredump_segment_header_t mde_regs_seg_hdr;
+uint32_t mde_regs[MDE_REGS_CNT];
+
+mpi_coredump_segment_header_t xaui_an_hdr;
+uint32_t serdes_xaui_an[14];
+
+mpi_coredump_segment_header_t xaui_hss_pcs_hdr;
+uint32_t serdes_xaui_hss_pcs[33];
+
+mpi_coredump_segment_header_t xfi_an_hdr;
+uint32_t serdes_xfi_an[14];
+
+mpi_coredump_segment_header_t xfi_train_hdr;
+uint32_t serdes_xfi_train[12];
+
+mpi_coredump_segment_header_t xfi_hss_pcs_hdr;
+uint32_t serdes_xfi_hss_pcs[15];
+
+mpi_coredump_segment_header_t xfi_hss_tx_hdr;
+uint32_t serdes_xfi_hss_tx[32];
+
+mpi_coredump_segment_header_t xfi_hss_rx_hdr;
+uint32_t serdes_xfi_hss_rx[32];
+
+mpi_coredump_segment_header_t xfi_hss_pll_hdr;
+uint32_t serdes_xfi_hss_pll[32];
+
+mpi_coredump_segment_header_t nic_regs_seg_hdr;
+uint32_t nic_regs[64];
+
+/* one interrupt state for each CQ */
+mpi_coredump_segment_header_t intr_states_seg_hdr;
+uint32_t intr_states[MAX_RX_RINGS];
+
+mpi_coredump_segment_header_t xgmac_seg_hdr;
+#define XGMAC_REGISTER_END 0x740
+uint32_t xgmac[XGMAC_REGISTER_END];
+
+mpi_coredump_segment_header_t probe_dump_seg_hdr;
+uint32_t probe_dump[PROBE_DATA_LENGTH_WORDS * NUMBER_OF_PROBES];
+
+mpi_coredump_segment_header_t routing_reg_seg_hdr;
+uint32_t routing_regs[NUMBER_ROUTING_REG_ENTRIES * WORDS_PER_ROUTING_REG_ENTRY];
+
+mpi_coredump_segment_header_t mac_prot_reg_seg_hdr;
+uint32_t mac_prot_regs[MAC_PROTOCOL_REGISTER_WORDS * WORDS_PER_MAC_PROT_ENTRY];
+
+
+mpi_coredump_segment_header_t ets_seg_hdr;
+uint32_t ets[8+2];
+
+mpi_coredump_segment_header_t code_ram_seg_hdr;
+uint32_t code_ram[CODE_RAM_CNT];
+
+mpi_coredump_segment_header_t memc_ram_seg_hdr;
+uint32_t memc_ram[MEMC_RAM_CNT];
+
+} ql_mpi_coredump_t;
+
+#define WCS_MPI_CODE_RAM_LENGTH (0x2000*4)
+#define MEMC_MPI_RAM_LENGTH (0x2000*4)
+
+#define XG_SERDES_ADDR_RDY BIT_31
+#define XG_SERDES_ADDR_R BIT_30
+
+#define CORE_SEG_NUM 1
+#define TEST_LOGIC_SEG_NUM 2
+#define RMII_SEG_NUM 3
+#define FCMAC1_SEG_NUM 4
+#define FCMAC2_SEG_NUM 5
+#define FC1_MBOX_SEG_NUM 6
+#define IDE_SEG_NUM 7
+#define NIC1_MBOX_SEG_NUM 8
+#define SMBUS_SEG_NUM 9
+#define FC2_MBOX_SEG_NUM 10
+#define NIC2_MBOX_SEG_NUM 11
+#define I2C_SEG_NUM 12
+#define MEMC_SEG_NUM 13
+#define PBUS_SEG_NUM 14
+#define MDE_SEG_NUM 15
+#define NIC1_CONTROL_SEG_NUM 16
+#define NIC2_CONTROL_SEG_NUM 17
+#define NIC1_XGMAC_SEG_NUM 18
+#define NIC2_XGMAC_SEG_NUM 19
+#define WCS_RAM_SEG_NUM 20
+#define MEMC_RAM_SEG_NUM 21
+#define XAUI_AN_SEG_NUM 22
+#define XAUI_HSS_PCS_SEG_NUM 23
+#define XFI_AN_SEG_NUM 24
+#define XFI_TRAIN_SEG_NUM 25
+#define XFI_HSS_PCS_SEG_NUM 26
+#define XFI_HSS_TX_SEG_NUM 27
+#define XFI_HSS_RX_SEG_NUM 28
+#define XFI_HSS_PLL_SEG_NUM 29
+#define INTR_STATES_SEG_NUM 31
+#define ETS_SEG_NUM 34
+#define PROBE_DUMP_SEG_NUM 35
+#define ROUTING_INDEX_SEG_NUM 36
+#define MAC_PROTOCOL_SEG_NUM 37
+
+/* Force byte packing for the following structures */
+#pragma pack(1)
+
+/*
+ * Work Queue (Request Queue) Initialization Control Block (WQICB)
+ */
+
+struct wqicb_t {
+ uint16_t len;
+#define Q_LEN_V (1 << 4)
+#define Q_LEN_CPP_CONT 0x0000
+#define Q_LEN_CPP_16 0x0001
+#define Q_LEN_CPP_32 0x0002
+#define Q_LEN_CPP_64 0x0003
+#define Q_LEN_CPP_512 0x0006
+ uint16_t flags;
+#define Q_PRI_SHIFT 1
+#define Q_FLAGS_LC 0x1000
+#define Q_FLAGS_LB 0x2000
+#define Q_FLAGS_LI 0x4000
+#define Q_FLAGS_LO 0x8000
+ uint16_t cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+ uint16_t rid;
+ uint32_t wq_addr_lo;
+ uint32_t wq_addr_hi;
+ uint32_t cnsmr_idx_addr_lo;
+ uint32_t cnsmr_idx_addr_hi;
+};
+
+/*
+ * Completion Queue (Response Queue) Initialization Control Block (CQICB)
+ */
+
+struct cqicb_t {
+ uint8_t msix_vect;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t flags;
+#define FLAGS_LV 0x08
+#define FLAGS_LS 0x10
+#define FLAGS_LL 0x20
+#define FLAGS_LI 0x40
+#define FLAGS_LC 0x80
+ uint16_t len;
+#define LEN_V (1 << 4)
+#define LEN_CPP_CONT 0x0000
+#define LEN_CPP_32 0x0001
+#define LEN_CPP_64 0x0002
+#define LEN_CPP_128 0x0003
+ uint16_t rid;
+ uint32_t cq_base_addr_lo; /* completion queue base address */
+ uint32_t cq_base_addr_hi;
+ uint32_t prod_idx_addr_lo; /* completion queue host copy */
+ /* producer index host shadow */
+ uint32_t prod_idx_addr_hi;
+ uint16_t pkt_delay;
+ uint16_t irq_delay;
+ uint32_t lbq_addr_lo;
+ uint32_t lbq_addr_hi;
+ uint16_t lbq_buf_size;
+ uint16_t lbq_len; /* entry count */
+ uint32_t sbq_addr_lo;
+ uint32_t sbq_addr_hi;
+ uint16_t sbq_buf_size;
+ uint16_t sbq_len; /* entry count */
+};
+
+struct ricb {
+ uint8_t base_cq;
+#define RSS_L4K 0x80
+ uint8_t flags;
+#define RSS_L6K 0x01
+#define RSS_LI 0x02
+#define RSS_LB 0x04
+#define RSS_LM 0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+ uint16_t mask;
+#define RSS_HASH_CQ_ID_MAX 1024
+ uint8_t hash_cq_id[RSS_HASH_CQ_ID_MAX];
+ uint32_t ipv6_hash_key[10];
+ uint32_t ipv4_hash_key[4];
+};
+
+/*
+ * Host Command IOCB Formats
+ */
+
+#define OPCODE_OB_MAC_IOCB 0x01
+#define OPCODE_OB_MAC_OFFLOAD_IOCB 0x02
+
+#define OPCODE_IB_MAC_IOCB 0x20
+#define OPCODE_IB_SYS_EVENT_IOCB 0x3f
+
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
+#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
+
+struct oal_entry {
+uint32_t buf_addr_low;
+uint32_t buf_addr_high;
+uint32_t buf_len;
+};
+
+/* 32 words, 128 bytes */
+#define TX_DESC_PER_IOCB 8 /* Number of descs in one TX IOCB */
+
+struct ob_mac_iocb_req {
+ uint8_t opcode;
+ uint8_t flag0;
+#define OB_MAC_IOCB_REQ_IPv6 0x80
+#define OB_MAC_IOCB_REQ_IPv4 0x40
+#define OB_MAC_IOCB_REQ_D 0x08 /* disable generation of comp. msg */
+#define OB_MAC_IOCB_REQ_I 0x02 /* disable generation of intr at comp */
+ uint8_t flag1;
+#define OB_MAC_IOCB_REQ_TC 0x80 /* enable TCP checksum offload */
+#define OB_MAC_IOCB_REQ_UC 0x40 /* enable UDP checksum offload */
+#define OB_MAC_IOCB_REQ_LSO 0x20 /* enable LSO offload */
+ uint8_t flag2;
+#define OB_MAC_IOCB_REQ_VLAN_OFFSET_MASK 0xF8 /* VLAN TCI insert */
+#define OB_MAC_IOCB_REQ_V 0x04 /* insert VLAN TCI */
+#define OB_MAC_IOCB_REQ_DFP 0x02 /* Drop for Failover port */
+#define OB_MAC_IOCB_REQ_IC 0x01 /* enable IP checksum offload */
+ uint32_t unused;
+ uint32_t reserved_cq_tag;
+ uint32_t frame_len; /* max 9000,for none LSO, 16M for LSO */
+ uint32_t tid;
+ uint32_t txq_idx;
+ uint16_t protocol_hdr_len;
+ uint16_t hdr_off; /* tcp/udp hdr offset */
+ uint16_t vlan_tci;
+ uint16_t mss;
+
+ struct oal_entry oal_entry[TX_DESC_PER_IOCB]; /* max FFFFF 1M bytes */
+
+};
+/* 16 words, 64 bytes */
+struct ob_mac_iocb_rsp {
+ uint8_t opcode;
+ uint8_t flags1;
+#define OB_MAC_IOCB_RSP_OI 0x01 /* */
+#define OB_MAC_IOCB_RSP_I 0x02 /* */
+#define OB_MAC_IOCB_RSP_E 0x08 /* */
+#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
+#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
+#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
+
+ uint8_t flags2;
+ uint8_t flags3;
+
+#define OB_MAC_IOCB_RSP_B 0x80
+
+ uint32_t tid;
+ uint32_t txq_idx;
+
+ uint32_t reserved[13];
+};
+
+#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
+
+struct ib_mac_iocb_rsp {
+ uint8_t opcode; /* 0x20 */
+ uint8_t flags1;
+#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
+#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
+ uint8_t flags2;
+#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
+#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
+#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
+#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
+ uint8_t flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
+#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
+#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
+ uint32_t data_len;
+ uint64_t data_addr;
+ uint32_t rss;
+ uint16_t vlan_id; /* 12 bits */
+#define IB_MAC_IOCB_RSP_VLAN_ID_MASK 0xFFF
+#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
+
+ uint16_t reserved1;
+ uint32_t reserved2[6];
+ uint8_t reserved3[3];
+ uint8_t flags4;
+#define IB_MAC_IOCB_RSP_HV 0x20
+#define IB_MAC_IOCB_RSP_HS 0x40
+#define IB_MAC_IOCB_RSP_HL 0x80
+ uint32_t hdr_len;
+ uint64_t hdr_addr;
+};
+
+/* 16 words, 64 bytes */
+struct ib_sys_event_iocb_rsp {
+ uint8_t opcode;
+ uint8_t flag0;
+ uint8_t event_type;
+ uint8_t q_id;
+ uint32_t reserved[15];
+};
+#define SYS_EVENT_PORT_LINK_UP 0x0
+#define SYS_EVENT_PORT_LINK_DOWN 0x1
+#define SYS_EVENT_MULTIPLE_CAM_HITS 0x6
+#define SYS_EVENT_SOFT_ECC_ERR 0x7
+#define SYS_EVENT_MGMT_FATAL_ERR 0x8 /* MPI_PROCESSOR */
+#define SYS_EVENT_MAC_INTERRUPT 0x9
+#define SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF 0x40
+
+/*
+ * Status Register (#define STATUS) bit definitions.
+ */
+#define STATUS_FE (1 << 0)
+#define STATUS_PI (1 << 1)
+#define STATUS_PL0 (1 << 2),
+#define STATUS_PL1 (1 << 3)
+#define STATUS_PI0 (1 << 4)
+#define STATUS_PI1 (1 << 5)
+#define STATUS_FUNC_ID_MASK 0x000000c0
+#define STATUS_FUNC_ID_SHIFT 6
+#define STATUS_F0E (1 << 8)
+#define STATUS_F1E (1 << 9)
+#define STATUS_F2E (1 << 10)
+#define STATUS_F3E (1 << 11)
+#define STATUS_NFE (1 << 12)
+
+/*
+ * Generic Response Queue IOCB Format which abstracts the difference between
+ * IB_MAC, OB_MAC IOCBs
+ */
+struct net_rsp_iocb {
+ uint8_t opcode;
+ uint8_t flag0;
+ uint8_t flag1;
+ uint8_t flag2;
+ uint32_t reserved[15];
+};
+
+/* Restore original packing rules */
+#pragma pack()
+
+#define RESPONSE_ENTRY_SIZE (sizeof (struct net_rsp_iocb))
+#define REQUEST_ENTRY_SIZE (sizeof (struct ob_mac_iocb_req))
+
+/* flash */
+/* Little endian machine correction defines. */
+#ifdef _LITTLE_ENDIAN
+#define LITTLE_ENDIAN_16(x)
+#define LITTLE_ENDIAN_24(x)
+#define LITTLE_ENDIAN_32(x)
+#define LITTLE_ENDIAN_64(x)
+#define LITTLE_ENDIAN(bp, bytes)
+#define BIG_ENDIAN_16(x) ql_change_endian((uint8_t *)x, 2)
+#define BIG_ENDIAN_24(x) ql_change_endian((uint8_t *)x, 3)
+#define BIG_ENDIAN_32(x) ql_change_endian((uint8_t *)x, 4)
+#define BIG_ENDIAN_64(x) ql_change_endian((uint8_t *)x, 8)
+#define BIG_ENDIAN(bp, bytes) ql_change_endian((uint8_t *)bp, bytes)
+#endif /* _LITTLE_ENDIAN */
+
+/* Big endian machine correction defines. */
+#ifdef _BIG_ENDIAN
+#define LITTLE_ENDIAN_16(x) ql_change_endian((uint8_t *)x, 2)
+#define LITTLE_ENDIAN_24(x) ql_change_endian((uint8_t *)x, 3)
+#define LITTLE_ENDIAN_32(x) ql_change_endian((uint8_t *)x, 4)
+#define LITTLE_ENDIAN_64(x) ql_change_endian((uint8_t *)x, 8)
+#define LITTLE_ENDIAN(bp, bytes) ql_change_endian((uint8_t *)bp, bytes)
+#define BIG_ENDIAN_16(x)
+#define BIG_ENDIAN_24(x)
+#define BIG_ENDIAN_32(x)
+#define BIG_ENDIAN_64(x)
+#define BIG_ENDIAN(bp, bytes)
+#endif /* _BIG_ENDIAN */
+
+void ql_change_endian(uint8_t *, size_t);
+
+/* Flash Address Register 0x88 */
+#define FLASH_RDY_FLAG BIT_31
+#define FLASH_R_FLAG BIT_30
+#define FLASH_ERR_FLAG BIT_29
+#define FLASH_CONF_ADDR 0x7D0000u
+#define FLASH_ADDR_MASK 0x7F0000
+
+#define FLASH_WRSR_CMD 0x01
+#define FLASH_PP_CMD 0x02
+#define FLASH_READ_CMD 0x03
+#define FLASH_WRDI_CMD 0x04
+#define FLASH_RDSR_CMD 0x05
+#define FLASH_WREN_CMD 0x06
+#define FLASH_RDID_CMD 0x9F
+#define FLASH_RES_CMD 0xAB
+
+/*
+ * Flash definitions.
+ */
+typedef struct ql_flash_info {
+ uint32_t type; /* flash type */
+ uint32_t flash_size; /* length in bytes of flash */
+ uint32_t sec_mask; /* sector number mask */
+ uint8_t flash_manuf; /* flash chip manufacturer id */
+ uint8_t flash_id; /* flash chip id */
+ uint8_t flash_cap; /* flash chip capacity */
+} ql_flash_info_t;
+
+/*
+ * Flash Description Table
+ */
+#define FLASH_DESC_VERSION 1
+#define FLASH_DESC_VAILD 0x44494C51 /* "QLID" */
+typedef struct flash_desc {
+ uint32_t flash_valid;
+ uint16_t flash_version;
+ uint16_t flash_len; /* flash description table length */
+ uint16_t flash_checksum;
+ uint16_t flash_unused;
+ uint8_t flash_model[16];
+ uint16_t flash_manuf;
+ uint16_t flash_id;
+ uint8_t flash_flag;
+ uint8_t erase_cmd;
+ uint8_t alt_erase_cmd;
+ uint8_t write_enable_cmd;
+ uint8_t write_enable_bits;
+ uint8_t write_statusreg_cmd;
+ uint8_t unprotect_sector_cmd;
+ uint8_t read_manuf_cmd;
+ uint32_t block_size;
+ uint32_t alt_block_size;
+ uint32_t flash_size;
+ uint32_t write_enable_data;
+ uint8_t readid_address_len;
+ uint8_t write_disable_bits;
+ uint8_t read_device_id_len;
+ uint8_t chip_erase_cmd;
+ uint16_t read_timeout;
+ uint8_t protect_sector_cmd;
+ uint8_t exp_reserved[65];
+} flash_desc_t;
+
+/* flash manufacturer id's */
+#define AMD_FLASH 0x01 /* AMD / Spansion */
+#define ST_FLASH 0x20 /* ST Electronics */
+#define SST_FLASH 0xbf /* SST Electronics */
+#define MXIC_FLASH 0xc2 /* Macronix (MXIC) */
+#define ATMEL_FLASH 0x1f /* Atmel (AT26DF081A) */
+#define WINBOND_FLASH 0xef /* Winbond (W25X16,W25X32) */
+#define INTEL_FLASH 0x89 /* Intel (QB25F016S33B8) */
+
+/* flash id defines */
+#define AMD_FLASHID_128K 0x6e /* 128k AMD flash chip */
+#define AMD_FLASHID_512K 0x4f /* 512k AMD flash chip */
+#define AMD_FLASHID_512Kt 0xb9 /* 512k AMD flash chip - top boot blk */
+#define AMD_FLASHID_512Kb 0xba /* 512k AMD flash chip - btm boot blk */
+#define AMD_FLASHID_1024K 0x38 /* 1 MB AMD flash chip */
+#define ST_FLASHID_128K 0x23 /* 128k ST flash chip */
+#define ST_FLASHID_512K 0xe3 /* 512k ST flash chip */
+#define ST_FLASHID_M25PXX 0x20 /* M25Pxx ST flash chip */
+#define SST_FLASHID_128K 0xd5 /* 128k SST flash chip */
+#define SST_FLASHID_1024K 0xd8 /* 1 MB SST flash chip */
+#define SST_FLASHID_1024K_A 0x80 /* 1 MB SST 25LF080A flash chip */
+#define SST_FLASHID_1024K_B 0x8e /* 1 MB SST 25VF080B flash chip */
+#define SST_FLASHID_2048K 0x25 /* 2 MB SST 25VF016B flash chip */
+#define MXIC_FLASHID_512K 0x4f /* 512k MXIC flash chip */
+#define MXIC_FLASHID_1024K 0x38 /* 1 MB MXIC flash chip */
+#define MXIC_FLASHID_25LXX 0x20 /* 25Lxx MXIC flash chip */
+#define ATMEL_FLASHID_1024K 0x45 /* 1 MB ATMEL flash chip */
+#define SPAN_FLASHID_2048K 0x02 /* 2 MB Spansion flash chip */
+#define WINBOND_FLASHID 0x30 /* Winbond W25Xxx flash chip */
+#define INTEL_FLASHID 0x89 /* Intel QB25F016S33B8 flash chip */
+
+/* flash type defines */
+#define FLASH128 BIT_0
+#define FLASH512 BIT_1
+#define FLASH512S BIT_2
+#define FLASH1024 BIT_3
+#define FLASH2048 BIT_4
+#define FLASH4096 BIT_5
+#define FLASH8192 BIT_6
+#define FLASH_PAGE BIT_31
+#define FLASH_LEGACY (FLASH128 | FLASH512S)
+
+#define FLASH_FIRMWARE_IMAGE_ADDR 0x100000 /* 1M */
+typedef struct {
+ uint8_t signature[2];
+ uint8_t reserved[0x16];
+ uint8_t dataoffset[2];
+ uint8_t pad[6];
+} pci_header_t;
+
+typedef struct {
+ uint8_t signature[4];
+ uint8_t vid[2];
+ uint8_t did[2];
+ uint8_t reserved0[2];
+ uint8_t pcidatalen[2];
+ uint8_t pcidatarev;
+ uint8_t classcode[3];
+ uint8_t imagelength[2]; /* In sectors */
+ uint8_t revisionlevel[2];
+ uint8_t codetype;
+ uint8_t indicator;
+ uint8_t reserved1[2];
+ uint8_t pad[8];
+} pci_data_t;
+
+#define PCI_HEADER0 0x55
+#define PCI_HEADER1 0xAA
+#define PCI_DATASIG "PCIR"
+#define PCI_SECTOR_SIZE 0x200
+#define PCI_CODE_X86PC 0
+#define PCI_CODE_FCODE 1
+#define PCI_CODE_HPPA 2
+#define PCI_CODE_EFI 3
+#define PCI_CODE_FW 0xfe
+#define PCI_IND_LAST_IMAGE 0x80
+#define SBUS_CODE_FCODE 0xf1
+
+#define FBUFSIZE 100
+/* Flash Layout Table Data Structure(FLTDS) */
+#define FLASH_FLTDS_SIGNATURE 0x544C4651 /* "QFLT" */
+
+typedef struct ql_fltds {
+ uint32_t signature;
+ uint16_t flt_addr_lo;
+ uint16_t flt_addr_hi;
+ uint8_t version;
+ uint8_t reserved;
+ uint16_t checksum;
+} ql_fltds_t;
+/* Image Layout Table Data Structure(ILTDS) */
+#define FLASH_ILTDS_SIGNATURE 0x4D494651 /* "QFIM" */
+typedef struct ql_iltds_header {
+ uint32_t signature;
+ uint16_t table_version; /* version of this structure */
+ uint16_t length; /* length of the table */
+ uint16_t checksum;
+ uint16_t number_entries; /* Number of type/len/size entries */
+ uint16_t reserved;
+ uint16_t version; /* version of the image */
+} ql_iltds_header_t;
+
+#define IMAGE_TABLE_HEADER_LEN sizeof (ql_iltds_header_t)
+
+#define ILTDS_REGION_VERSION_LEN_NA 0 /* version not applicable */
+typedef struct ql_iltds_img_entry {
+ uint16_t region_type;
+ uint8_t region_version_len;
+ uint8_t region_version[3];
+ uint16_t offset_lo;
+ uint16_t offset_hi;
+ uint16_t size_lo;
+ uint16_t size_hi;
+ uint8_t swap_mode;
+#define ILTDS_IMG_SWAP_NONE 0 /* no swap needed */
+#define ILTDS_IMG_SWAP_WORD 1
+
+ uint8_t card_type;
+#define ILTDS_IMG_CARD_TYPE_ALL 0 /* apply to all types */
+#define ILTDS_IMG_CARD_TYPE_SR 1 /* apply to SR/fc cards */
+#define ILTDS_IMG_CARD_TYPE_COPPER 2 /* apply to Copper cards */
+#define ILTDS_IMG_CARD_TYPE_MEZZ 4 /* apply to Mezz cards */
+} ql_iltds_img_entry_t;
+
+#define IMAGE_TABLE_ENTRY_LEN sizeof (ql_iltds_img_entry_t)
+
+typedef struct ql_iltds_time_stamp {
+ uint16_t region_type;
+ uint8_t region_version_len;
+ uint8_t region_version[3];
+ uint8_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t min;
+ uint8_t sec;
+ uint32_t reserved;
+} ql_iltds_time_stamp_t;
+
+#define IMAGE_TABLE_TIME_STAMP_LEN sizeof (ql_iltds_time_stamp_t)
+
+#define IMAGE_TABLE_IMAGE_DEFAULT_ENTRIES 5
+
+typedef struct ql_iltds_description_header {
+ ql_iltds_header_t iltds_table_header;
+ ql_iltds_img_entry_t img_entry[IMAGE_TABLE_IMAGE_DEFAULT_ENTRIES];
+ ql_iltds_time_stamp_t time_stamp;
+}ql_iltds_description_header_t;
+
+#define ILTDS_DESCRIPTION_HEADERS_LEN sizeof (ql_iltds_description_header_t)
+
+/* flash layout table definition */
+/* header */
+typedef struct ql_flt_header {
+ uint16_t version;
+ uint16_t length; /* length of the flt table,no table header */
+ uint16_t checksum;
+ uint16_t reserved;
+} ql_flt_header_t;
+
+/* table entry */
+typedef struct ql_flt_entry {
+ uint8_t region;
+ uint8_t reserved0;
+ uint8_t attr;
+#define FLT_ATTR_READ_ONLY BIT_0
+#define FLT_ATTR_NEED_FW_RESTART BIT_1
+#define FLT_ATTR_NEED_DATA_REALOAD BIT_2
+ uint8_t reserved1;
+ uint32_t size;
+ uint32_t begin_addr;
+ uint32_t end_addr;
+} ql_flt_entry_t;
+
+/* flt table */
+typedef struct ql_flt {
+ ql_flt_header_t header;
+ uint16_t num_entries;
+ ql_flt_entry_t *ql_flt_entry_ptr;
+} ql_flt_t;
+
+/* Nic Configuration Table */
+#define FLASH_NIC_CONFIG_SIGNATURE 0x30303038 /* "8000" */
+
+enum {
+ DATA_TYPE_NONE,
+ DATA_TYPE_FACTORY_MAC_ADDR,
+ DATA_TYPE_CLP_MAC_ADDR,
+ DATA_TYPE_CLP_VLAN_MAC_ADDR,
+ DATA_TYPE_RESERVED,
+ DATA_TYPE_LAST_ENTRY
+};
+
+typedef struct ql_nic_config {
+ uint32_t signature;
+ uint16_t version;
+ uint16_t size;
+ uint16_t checksum;
+ uint16_t reserved0;
+ uint16_t total_data_size;
+ uint16_t num_of_entries;
+ uint8_t factory_data_type;
+ uint8_t factory_data_type_size;
+ uint8_t factory_MAC[6];
+ uint8_t clp_data_type;
+ uint8_t clp_data_type_size;
+ uint8_t clp_MAC[6];
+ uint8_t clp_vlan_data_type;
+ uint8_t clp_vlan_data_type_size;
+ uint16_t vlan_id;
+ uint8_t last_data_type;
+ uint8_t last_data_type_size;
+ uint16_t last_entry;
+ uint8_t reserved1[464];
+ uint16_t subsys_vendor_id;
+ uint16_t subsys_device_id;
+ uint8_t reserved2[4];
+} ql_nic_config_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _QLGE_HW_H */
diff --git a/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_open.h b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_open.h
new file mode 100644
index 0000000000..78aceebd0d
--- /dev/null
+++ b/usr/src/uts/common/sys/fibre-channel/fca/qlge/qlge_open.h
@@ -0,0 +1,49 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 QLogic Corporation. All rights reserved.
+ */
+
+#ifndef _QLGE_OPEN_H
+#define _QLGE_OPEN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef VERSIONSTR
+#define VERSIONSTR "20091001-v1.00"
+#endif
+
+#ifndef QL_DEBUG
+#define QL_DEBUG 0x0
+#endif
+
+#ifndef __func__
+#define __func__ "qlge"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _QLGE_OPEN_H */
diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared
index 6b40f9581f..f1ceb0257e 100644
--- a/usr/src/uts/intel/Makefile.intel.shared
+++ b/usr/src/uts/intel/Makefile.intel.shared
@@ -302,6 +302,7 @@ DRV_KMODS += ptc
DRV_KMODS += ptm
DRV_KMODS += pts
DRV_KMODS += ptsl
+DRV_KMODS += qlge
DRV_KMODS += radeon
DRV_KMODS += ral
DRV_KMODS += ramdisk
diff --git a/usr/src/uts/intel/qlge/Makefile b/usr/src/uts/intel/qlge/Makefile
new file mode 100644
index 0000000000..52f1c143a2
--- /dev/null
+++ b/usr/src/uts/intel/qlge/Makefile
@@ -0,0 +1,93 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# This makefile drives the production of the qlge driver kernel module.
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+COMMON_BASE = ../../../common
+
+
+ARCHDIR:sh = cd ..; basename `pwd`
+
+#
+# Define the module and object file sets.
+#
+MODULE = qlge
+OBJECTS = $(QLGE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(QLGE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include ../Makefile.$(ARCHDIR)
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Overrides and depends_on
+#
+MODSTUBS_DIR = $(OBJS_DIR)
+
+INC_PATH += -I$(ROOT)/usr/include
+INC_PATH += -I$(UTSBASE)/common/sys
+INC_PATH += -I$(UTSBASE)/common/sys/fibre-channel/fca/qlge
+
+LDFLAGS += -dy -Nmisc/mac -Ndrv/ip
+
+C99MODE= -xc99=%all
+C99LMODE= -Xc99=%all
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include ../Makefile.targ
diff --git a/usr/src/uts/sparc/Makefile.sparc.shared b/usr/src/uts/sparc/Makefile.sparc.shared
index 27e995a8b8..7aa463978d 100644
--- a/usr/src/uts/sparc/Makefile.sparc.shared
+++ b/usr/src/uts/sparc/Makefile.sparc.shared
@@ -302,6 +302,7 @@ DRV_KMODS += fcip
DRV_KMODS += fcsm
DRV_KMODS += fp
DRV_KMODS += qlc
+DRV_KMODS += qlge
DRV_KMODS += dda
DRV_KMODS += dmd
DRV_KMODS += stmf
diff --git a/usr/src/uts/sparc/qlge/Makefile b/usr/src/uts/sparc/qlge/Makefile
new file mode 100644
index 0000000000..52f1c143a2
--- /dev/null
+++ b/usr/src/uts/sparc/qlge/Makefile
@@ -0,0 +1,93 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# This makefile drives the production of the qlge driver kernel module.
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+COMMON_BASE = ../../../common
+
+
+ARCHDIR:sh = cd ..; basename `pwd`
+
+#
+# Define the module and object file sets.
+#
+MODULE = qlge
+OBJECTS = $(QLGE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(QLGE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include ../Makefile.$(ARCHDIR)
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# Overrides and depends_on
+#
+MODSTUBS_DIR = $(OBJS_DIR)
+
+INC_PATH += -I$(ROOT)/usr/include
+INC_PATH += -I$(UTSBASE)/common/sys
+INC_PATH += -I$(UTSBASE)/common/sys/fibre-channel/fca/qlge
+
+LDFLAGS += -dy -Nmisc/mac -Ndrv/ip
+
+C99MODE= -xc99=%all
+C99LMODE= -Xc99=%all
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include ../Makefile.targ