summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/cmd/fm/dicts/Makefile3
-rw-r--r--usr/src/cmd/fm/dicts/NIC.dict21
-rw-r--r--usr/src/cmd/fm/dicts/NIC.po98
-rw-r--r--usr/src/cmd/fm/eversholt/files/common/nic.esc127
-rw-r--r--usr/src/cmd/fm/eversholt/files/i386/Makefile3
-rw-r--r--usr/src/cmd/fm/eversholt/files/sparc/Makefile3
-rw-r--r--usr/src/cmd/fm/modules/common/eversholt/eval.c6
-rw-r--r--usr/src/cmd/fm/modules/common/eversholt/fme.c34
-rw-r--r--usr/src/cmd/fm/modules/common/eversholt/platform.c28
-rw-r--r--usr/src/cmd/fm/modules/common/eversholt/platform.h7
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_fmt.c4
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_io.c77
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_io.h6
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_print.c36
-rw-r--r--usr/src/cmd/mdb/common/mdb/mdb_set.c4
-rw-r--r--usr/src/data/hwdata/pci.ids635
-rw-r--r--usr/src/data/hwdata/usb.ids168
-rw-r--r--usr/src/lib/libdladm/common/libdladm.c3
-rw-r--r--usr/src/lib/libdladm/common/libdladm.h5
-rw-r--r--usr/src/lib/libdladm/common/libdlvnic.c27
-rw-r--r--usr/src/man/man7d/Makefile1
-rw-r--r--usr/src/man/man7d/mlxcx.7d340
-rw-r--r--usr/src/pkg/manifests/consolidation-osnet-osnet-message-files.mf1
-rw-r--r--usr/src/pkg/manifests/driver-network-mlxcx.mf54
-rw-r--r--usr/src/pkg/manifests/service-fault-management.mf5
-rw-r--r--usr/src/pkg/manifests/system-data-hardware-registry.mf5
-rw-r--r--usr/src/uts/common/Makefile.files6
-rw-r--r--usr/src/uts/common/Makefile.rules4
-rw-r--r--usr/src/uts/common/inet/ilb/ilb.c2
-rw-r--r--usr/src/uts/common/inet/ilb/ilb_conn.c9
-rw-r--r--usr/src/uts/common/inet/ip/conn_opt.c2
-rw-r--r--usr/src/uts/common/inet/ip/icmp.c6
-rw-r--r--usr/src/uts/common/inet/ip/igmp.c31
-rw-r--r--usr/src/uts/common/inet/ip/ip.c18
-rw-r--r--usr/src/uts/common/inet/ip/ip6.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip6_ire.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip6_output.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip_ftable.c6
-rw-r--r--usr/src/uts/common/inet/ip/ip_if.c5
-rw-r--r--usr/src/uts/common/inet/ip/ip_ndp.c4
-rw-r--r--usr/src/uts/common/inet/ip/ip_output.c4
-rw-r--r--usr/src/uts/common/inet/ip/ip_rts.c17
-rw-r--r--usr/src/uts/common/inet/ip/ipclassifier.c1
-rw-r--r--usr/src/uts/common/inet/ip/ipmp.c1
-rw-r--r--usr/src/uts/common/inet/ip/ipsecah.c3
-rw-r--r--usr/src/uts/common/inet/ip/ipsecesp.c3
-rw-r--r--usr/src/uts/common/inet/ip/sadb.c38
-rw-r--r--usr/src/uts/common/inet/ip/spd.c6
-rw-r--r--usr/src/uts/common/inet/ip/tnet.c5
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_asconf.c13
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_common.c2
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_cookie.c12
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_input.c3
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_opt_data.c3
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_output.c10
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_bind.c5
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_fusion.c2
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_input.c1
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_misc.c1
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_output.c2
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_tpi.c4
-rw-r--r--usr/src/uts/common/inet/udp/udp.c6
-rw-r--r--usr/src/uts/common/inet/udp/udp_stats.c7
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx.c2765
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx.conf101
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx.h1298
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_cmd.c3542
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_dma.c460
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_endint.h305
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_gld.c1254
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_intr.c1010
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_reg.h2481
-rw-r--r--usr/src/uts/common/io/mlxcx/mlxcx_ring.c2264
-rw-r--r--usr/src/uts/common/sys/fm/io/ddi.h11
-rw-r--r--usr/src/uts/intel/Makefile.intel1
-rw-r--r--usr/src/uts/intel/ipsecah/Makefile23
-rw-r--r--usr/src/uts/intel/ipsecesp/Makefile23
-rw-r--r--usr/src/uts/intel/mlxcx/Makefile44
-rw-r--r--usr/src/uts/intel/os/driver_aliases13
-rw-r--r--usr/src/uts/sparc/ipsecah/Makefile25
-rw-r--r--usr/src/uts/sparc/ipsecesp/Makefile24
81 files changed, 17280 insertions, 313 deletions
diff --git a/usr/src/cmd/fm/dicts/Makefile b/usr/src/cmd/fm/dicts/Makefile
index 22bebd3ae8..93e0303f83 100644
--- a/usr/src/cmd/fm/dicts/Makefile
+++ b/usr/src/cmd/fm/dicts/Makefile
@@ -38,7 +38,8 @@ common_DCNAMES = \
SCA1000 \
SENSOR \
STORAGE \
- TEST
+ TEST \
+ NIC
i386_DCNAMES = \
AMD \
diff --git a/usr/src/cmd/fm/dicts/NIC.dict b/usr/src/cmd/fm/dicts/NIC.dict
new file mode 100644
index 0000000000..670dc53d46
--- /dev/null
+++ b/usr/src/cmd/fm/dicts/NIC.dict
@@ -0,0 +1,21 @@
+#
+# Copyright 2020 the University of Queensland
+# Use is subject to license terms.
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+FMDICT: name=NIC version=1 maxkey=4
+
+fault.io.nic.transceiver.notsupp=0
+fault.io.nic.transceiver.whitelist=1
+fault.io.nic.transceiver.overtemp=2
+fault.io.nic.transceiver.hwfail=3
+fault.io.nic.transceiver.unknown=4
diff --git a/usr/src/cmd/fm/dicts/NIC.po b/usr/src/cmd/fm/dicts/NIC.po
new file mode 100644
index 0000000000..46f1c859b9
--- /dev/null
+++ b/usr/src/cmd/fm/dicts/NIC.po
@@ -0,0 +1,98 @@
+#
+# Copyright 2020 the University of Queensland
+# Use is subject to license terms.
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# code: NIC-8000-0Q
+# keys: fault.io.nic.transceiver.notsupp
+#
+msgid "NIC-8000-0Q.type"
+msgstr "Fault"
+msgid "NIC-8000-0Q.severity"
+msgstr "Critical"
+msgid "NIC-8000-0Q.description"
+msgstr "NIC transceiver module %<fault-list[0].resource.hc-specific.txr_index> (SFP/SFP+/QSFP+ etc.) in %<fault-list[0].resource.hc-specific.link-name> is of a type that is not supported. This may be due to an incompatible link type or speed. In some NICs, this may also be caused by enforcement of a vendor or part whitelist.\n\n NIC data link: %<fault-list[0].resource.hc-specific.link-name> (%<fault-list[0].resource.hc-specific.primary-mac-address>)\n Module vendor: %<fault-list[0].resource.hc-specific.vendor>\n Module part: %<fault-list[0].resource.part>\n Module serial: %<fault-list[0].resource.serial>\n\n Refer to %s for more information."
+msgid "NIC-8000-0Q.response"
+msgstr "The transceiver module has been disabled, and the network data link associated with it (%<fault-list[0].resource.hc-specific.link-name>) has been marked as down.\n"
+msgid "NIC-8000-0Q.impact"
+msgstr "No network traffic will pass through the data link or network interfaces associated with this transceiver slot.\n"
+msgid "NIC-8000-0Q.action"
+msgstr "Replace the transceiver module with one of a supported type.\n"
+
+#
+# code: NIC-8000-1C
+# keys: fault.io.nic.transceiver.whitelist
+#
+msgid "NIC-8000-1C.type"
+msgstr "Fault"
+msgid "NIC-8000-1C.severity"
+msgstr "Critical"
+msgid "NIC-8000-1C.description"
+msgstr "NIC transceiver module %<fault-list[0].resource.hc-specific.txr_index> (SFP/SFP+/QSFP+ etc.) in %<fault-list[0].resource.hc-specific.link-name> is of a type that is not allowed to be used with this NIC (due to a hardware-enforced vendor or part whitelist).\n\n NIC data link: %<fault-list[0].resource.hc-specific.link-name> (%<fault-list[0].resource.hc-specific.primary-mac-address>)\n Module vendor: %<fault-list[0].resource.hc-specific.vendor>\n Module part: %<fault-list[0].resource.part>\n Module serial: %<fault-list[0].resource.serial>\n\n Refer to %s for more information."
+msgid "NIC-8000-1C.response"
+msgstr "The transceiver module has been disabled, and the network data link associated with it (%<fault-list[0].resource.hc-specific.link-name>) has been marked as down.\n"
+msgid "NIC-8000-1C.impact"
+msgstr "No network traffic will pass through the data link or network\ninterfaces associated with this transceiver slot.\n"
+msgid "NIC-8000-1C.action"
+msgstr "Replace the transceiver module with one of a supported type.\n"
+
+#
+# code: NIC-8000-2R
+# keys: fault.io.nic.transceiver.overtemp
+#
+msgid "NIC-8000-2R.type"
+msgstr "Fault"
+msgid "NIC-8000-2R.severity"
+msgstr "Critical"
+msgid "NIC-8000-2R.description"
+msgstr "NIC transceiver module %<fault-list[0].resource.hc-specific.txr_index> (SFP/SFP+/QSFP+ etc.) in %<fault-list[0].resource.hc-specific.link-name> has overheated.\n\n NIC data link: %<fault-list[0].resource.hc-specific.link-name> (%<fault-list[0].resource.hc-specific.primary-mac-address>)\n Module vendor: %<fault-list[0].resource.hc-specific.vendor>\n Module part: %<fault-list[0].resource.part>\n Module serial: %<fault-list[0].resource.serial>\n\n Refer to %s for more information."
+msgid "NIC-8000-2R.response"
+msgstr "The transceiver module has been disabled, and the network data link associated with it (%<fault-list[0].resource.hc-specific.link-name>) has been marked as down.\n"
+msgid "NIC-8000-2R.impact"
+msgstr "No network traffic will pass through the data link or network interfaces associated with this transceiver slot.\n"
+msgid "NIC-8000-2R.action"
+msgstr "Remove the transceiver module and check for adequate ventilation\nand cooling. Re-inserting the module after it has cooled will restore service.\n"
+
+#
+# code: NIC-8000-34
+# keys: fault.io.nic.transceiver.hwfail
+#
+msgid "NIC-8000-34.type"
+msgstr "Fault"
+msgid "NIC-8000-34.severity"
+msgstr "Critical"
+msgid "NIC-8000-34.description"
+msgstr "NIC transceiver module %<fault-list[0].resource.hc-specific.txr_index> (SFP/SFP+/QSFP+ etc.) in %<fault-list[0].resource.hc-specific.link-name> has experienced a hardware failure.\n\n NIC data link: %<fault-list[0].resource.hc-specific.link-name> (%<fault-list[0].resource.hc-specific.primary-mac-address>)\n Module vendor: %<fault-list[0].resource.hc-specific.vendor>\n Module part: %<fault-list[0].resource.part>\n Module serial: %<fault-list[0].resource.serial>\n\n Refer to %s for more information."
+msgid "NIC-8000-34.response"
+msgstr "The transceiver module has been disabled, and the network data link associated with it (%<fault-list[0].resource.hc-specific.link-name>) has been marked as down.\n"
+msgid "NIC-8000-34.impact"
+msgstr "No network traffic will pass through the data link or network\ninterfaces associated with this transceiver slot.\n"
+msgid "NIC-8000-34.action"
+msgstr "Remove and check the transceiver module, and consider replacing it.\n"
+
+#
+# code: NIC-8000-4X
+# keys: fault.io.nic.transceiver.unknown
+#
+msgid "NIC-8000-4X.type"
+msgstr "Fault"
+msgid "NIC-8000-4X.severity"
+msgstr "Critical"
+msgid "NIC-8000-4X.description"
+msgstr "The slot for NIC transceiver module %<fault-list[0].resource.hc-specific.txr_index> (SFP/SFP+/QSFP+ etc.) in %<fault-list[0].resource.hc-specific.link-name> is occupied, but hardware did not find a valid transceiver in it.\n Refer to %s for more information."
+msgid "NIC-8000-4X.response"
+msgstr "The transceiver module slot has been disabled, and the network data link associated with it (%<fault-list[0].resource.hc-specific.link-name>) has been marked as down.\n"
+msgid "NIC-8000-4X.impact"
+msgstr "No network traffic will pass through the data link or network\ninterfaces associated with this transceiver slot.\n"
+msgid "NIC-8000-4X.action"
+msgstr "Remove and check the transceiver module. It may be faulty,\ninserted incorrectly, or not of the correct type for the slot.\nIf problems persist, consider replacing the module.\n"
diff --git a/usr/src/cmd/fm/eversholt/files/common/nic.esc b/usr/src/cmd/fm/eversholt/files/common/nic.esc
new file mode 100644
index 0000000000..6dfaf5fa5b
--- /dev/null
+++ b/usr/src/cmd/fm/eversholt/files/common/nic.esc
@@ -0,0 +1,127 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+/*
+ * Copyright 2020, the University of Queensland
+ */
+
+#pragma dictionary "NIC"
+
+/*
+ * Rules for the generic NIC (non-driver-specific) fault events.
+ */
+
+/*
+ * Transceiver events are emitted by drivers under ereport.io.nic.txr-err.
+ *
+ * These are emitted with detector = the PCI/PCIex function of the NIC.
+ * They must always have a string property "error", set to one of the
+ * generic transceiver fault type names (notsupp, whitelist, overtemp etc).
+ *
+ * As well as "error", they must have both the "port_index" and "txr_index"
+ * properties set in the event payload (both integer types).
+ *
+ * It is expected that drivers will call ddi_fm_service_impact() immediately
+ * after noticing a transceiver error, with an argument of DDI_SERVICE_LOST or
+ * DDI_SERVICE_DEGRADED (depending on the specific error -- at time of writing
+ * all the supported events expect DDI_SERVICE_LOST).
+ */
+
+asru pcifn;
+fru pcifn/port/transceiver;
+
+asru pciexfn;
+fru pciexfn/port/transceiver;
+
+#define EV_DECL_TXR_FAULT(TYPE) \
+ event fault.io.nic.transceiver.TYPE@pcifn/port/transceiver \
+ FRU=pcifn/port/transceiver, ASRU=pcifn; \
+ event fault.io.nic.transceiver.TYPE@pciexfn/port/transceiver \
+ FRU=pciexfn/port/transceiver, ASRU=pciexfn;
+
+EV_DECL_TXR_FAULT(notsupp)
+EV_DECL_TXR_FAULT(whitelist)
+EV_DECL_TXR_FAULT(overtemp)
+EV_DECL_TXR_FAULT(hwfail)
+EV_DECL_TXR_FAULT(unknown)
+
+event ereport.io.nic.txr-err@pcifn;
+event ereport.io.service.lost@pcifn;
+
+event ereport.io.nic.txr-err@pciexfn;
+event ereport.io.service.lost@pciexfn;
+
+#define EV_PROP_TXR_FAULT(TYPE) \
+ prop fault.io.nic.transceiver.TYPE@pcifn/port[pn]/transceiver[tn] (2) -> \
+ ereport.io.nic.txr-err@pcifn { \
+ payloadprop("txr_index") == tn && \
+ payloadprop("port_index") == pn && \
+ payloadprop("error") == "TYPE" && \
+ setpayloadprop("txr_index", tn) && \
+ setpayloadprop("link-name", confprop(pcifn/port[pn], "link-name")) && \
+ setpayloadprop("primary-mac-address", confprop(pcifn/port[pn], "primary-mac-address")) && \
+ (!confprop_defined(pcifn/port[pn]/transceiver[tn], "vendor") || \
+ setpayloadprop("vendor", confprop(pcifn/port[pn]/transceiver[tn], "vendor"))) \
+ }, \
+ ereport.io.service.lost@pcifn { within(1s) }; \
+ prop fault.io.nic.transceiver.TYPE@pciexfn/port[pn]/transceiver[tn] (2) -> \
+ ereport.io.nic.txr-err@pciexfn { \
+ payloadprop("txr_index") == tn && \
+ payloadprop("port_index") == pn && \
+ payloadprop("error") == "TYPE" && \
+ setpayloadprop("txr_index", tn) && \
+ setpayloadprop("link-name", confprop(pciexfn/port[pn], "link-name")) && \
+ setpayloadprop("primary-mac-address", confprop(pciexfn/port[pn], "primary-mac-address")) && \
+ (!confprop_defined(pciexfn/port[pn]/transceiver[tn], "vendor") || \
+ setpayloadprop("vendor", confprop(pciexfn/port[pn]/transceiver[tn], "vendor"))) \
+ }, \
+ ereport.io.service.lost@pciexfn { within(1s) };
+
+EV_PROP_TXR_FAULT(notsupp)
+EV_PROP_TXR_FAULT(whitelist)
+EV_PROP_TXR_FAULT(overtemp)
+EV_PROP_TXR_FAULT(hwfail)
+EV_PROP_TXR_FAULT(unknown)
+
+/*
+ * Allow drivers (e.g. i40e) which can't tell the difference between the events
+ * notsupp/unknown/whitelist to generate a single ereport covering all 3.
+ *
+ * If transceiver information is available in topo, we will turn it into
+ * a "notsupp" fault. If it isn't, we'll turn it into an "unknown" fault
+ * instead. The text in "notsupp" explicitly notes that certain drivers might
+ * have difficulty telling the difference between it and "whitelist".
+ *
+ * If you want this for a pcifn driver rather than pciexfn, you'll have to
+ * make another copy.
+ */
+prop fault.io.nic.transceiver.notsupp@pciexfn/port[pn]/transceiver[tn] (2) ->
+ ereport.io.nic.txr-err@pciexfn {
+ payloadprop("txr_index") == tn &&
+ payloadprop("port_index") == pn &&
+ payloadprop("error") == "notsupp/unknown" &&
+ confprop_defined(pciexfn/port[pn]/transceiver[tn], "vendor") &&
+ setpayloadprop("txr_index", tn) &&
+ setpayloadprop("link-name", confprop(pciexfn/port[pn], "link-name")) &&
+ setpayloadprop("primary-mac-address", confprop(pciexfn/port[pn], "primary-mac-address")) &&
+ setpayloadprop("vendor", confprop(pciexfn/port[pn]/transceiver[tn], "vendor"))
+ },
+ ereport.io.service.lost@pciexfn { within(1s) };
+prop fault.io.nic.transceiver.unknown@pciexfn/port[pn]/transceiver[tn] (2) ->
+ ereport.io.nic.txr-err@pciexfn {
+ payloadprop("txr_index") == tn &&
+ payloadprop("port_index") == pn &&
+ payloadprop("error") == "notsupp/unknown" &&
+ !confprop_defined(pciexfn/port[pn]/transceiver[tn], "vendor") &&
+ setpayloadprop("txr_index", tn) &&
+ setpayloadprop("link-name", confprop(pciexfn/port[pn], "link-name")) &&
+ setpayloadprop("primary-mac-address", confprop(pciexfn/port[pn], "primary-mac-address"))
+ },
+ ereport.io.service.lost@pciexfn { within(1s) };
diff --git a/usr/src/cmd/fm/eversholt/files/i386/Makefile b/usr/src/cmd/fm/eversholt/files/i386/Makefile
index bb6cda3b38..67caa4468e 100644
--- a/usr/src/cmd/fm/eversholt/files/i386/Makefile
+++ b/usr/src/cmd/fm/eversholt/files/i386/Makefile
@@ -33,7 +33,8 @@ EFT_COMMON_FILES= \
sca500.eft \
sca1000.eft \
sensor.eft \
- storage.eft
+ storage.eft \
+ nic.eft
include ../../../Makefile.subdirs
diff --git a/usr/src/cmd/fm/eversholt/files/sparc/Makefile b/usr/src/cmd/fm/eversholt/files/sparc/Makefile
index 4e5655cbf7..0482b12b33 100644
--- a/usr/src/cmd/fm/eversholt/files/sparc/Makefile
+++ b/usr/src/cmd/fm/eversholt/files/sparc/Makefile
@@ -34,7 +34,8 @@ EFT_COMMON_FILES= \
sca500.eft \
sca1000.eft \
sensor.eft \
- storage.eft
+ storage.eft \
+ nic.eft
include ../../../Makefile.subdirs
diff --git a/usr/src/cmd/fm/modules/common/eversholt/eval.c b/usr/src/cmd/fm/modules/common/eversholt/eval.c
index a3c47f91dc..78ce797d28 100644
--- a/usr/src/cmd/fm/modules/common/eversholt/eval.c
+++ b/usr/src/cmd/fm/modules/common/eversholt/eval.c
@@ -32,6 +32,7 @@
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
+#include <fm/libtopo.h>
#include "alloc.h"
#include "out.h"
#include "stable.h"
@@ -507,7 +508,7 @@ eval_func(struct node *funcnp, struct lut *ex, struct node *events[],
valuep->v = 1;
return (1);
} else if (funcname == L_has_fault) {
- nvlist_t *asru = NULL, *fru = NULL, *rsrc = NULL;
+ nvlist_t *rsrc = NULL;
nodep = eval_getname(funcnp, ex, events, np->u.expr.left,
globals, croot, arrowp, try, &duped);
@@ -519,7 +520,8 @@ eval_func(struct node *funcnp, struct lut *ex, struct node *events[],
}
path = ipath2str(NULL, ipath(nodep));
- platform_units_translate(0, croot, &asru, &fru, &rsrc, path);
+ platform_unit_translate(0, croot, TOPO_PROP_RESOURCE,
+ &rsrc, path);
outfl(O_ALTFP|O_VERB2|O_NONL, np->file, np->line, "has_fault(");
ptree_name_iter(O_ALTFP|O_VERB2|O_NONL, nodep);
out(O_ALTFP|O_VERB2|O_NONL, "(%s), \"%s\") ", path,
diff --git a/usr/src/cmd/fm/modules/common/eversholt/fme.c b/usr/src/cmd/fm/modules/common/eversholt/fme.c
index e153385551..0f7edab994 100644
--- a/usr/src/cmd/fm/modules/common/eversholt/fme.c
+++ b/usr/src/cmd/fm/modules/common/eversholt/fme.c
@@ -38,6 +38,7 @@
#include <libnvpair.h>
#include <sys/fm/protocol.h>
#include <fm/fmd_api.h>
+#include <fm/libtopo.h>
#include "alloc.h"
#include "out.h"
#include "stats.h"
@@ -340,7 +341,7 @@ newfme(const char *e0class, const struct ipath *e0ipp, fmd_hdl_t *hdl,
ipathlastcomp(e0ipp);
pathstr = ipath2str(NULL, e0ipp);
cfgdata = config_snapshot();
- platform_units_translate(0, cfgdata->cooked, NULL, NULL,
+ platform_unit_translate(0, cfgdata->cooked, TOPO_PROP_RESOURCE,
&detector, pathstr);
FREE(pathstr);
structconfig_free(cfgdata->cooked);
@@ -392,7 +393,7 @@ newfme(const char *e0class, const struct ipath *e0ipp, fmd_hdl_t *hdl,
nvlist_free(detector);
pathstr = ipath2str(NULL, e0ipp);
cfgdata = config_snapshot();
- platform_units_translate(0, cfgdata->cooked, NULL, NULL,
+ platform_unit_translate(0, cfgdata->cooked, TOPO_PROP_RESOURCE,
&detector, pathstr);
FREE(pathstr);
platform_save_config(hdl, fmcase);
@@ -2181,7 +2182,8 @@ void
get_resources(struct event *sp, struct rsl *rsrcs, struct config *croot)
{
struct node *asrudef, *frudef;
- nvlist_t *asru, *fru;
+ const struct ipath *asrupath, *frupath;
+ nvlist_t *asru = NULL, *fru = NULL;
nvlist_t *rsrc = NULL;
char *pathstr;
@@ -2193,19 +2195,29 @@ get_resources(struct event *sp, struct rsl *rsrcs, struct config *croot)
frudef = eventprop_lookup(sp, L_FRU);
/*
- * Create FMRIs based on those definitions
+ * Create ipaths based on those definitions
*/
- asru = node2fmri(asrudef);
- fru = node2fmri(frudef);
- pathstr = ipath2str(NULL, sp->ipp);
+ asrupath = ipath(asrudef);
+ frupath = ipath(frudef);
/*
* Allow for platform translations of the FMRIs
*/
- platform_units_translate(is_defect(sp->t), croot, &asru, &fru, &rsrc,
- pathstr);
+ pathstr = ipath2str(NULL, sp->ipp);
+ platform_unit_translate(is_defect(sp->t), croot, TOPO_PROP_RESOURCE,
+ &rsrc, pathstr);
+ FREE(pathstr);
+ pathstr = ipath2str(NULL, asrupath);
+ platform_unit_translate(is_defect(sp->t), croot, TOPO_PROP_ASRU,
+ &asru, pathstr);
FREE(pathstr);
+
+ pathstr = ipath2str(NULL, frupath);
+ platform_unit_translate(is_defect(sp->t), croot, TOPO_PROP_FRU,
+ &fru, pathstr);
+ FREE(pathstr);
+
rsrcs->suspect = sp;
rsrcs->asru = asru;
rsrcs->fru = fru;
@@ -3115,8 +3127,8 @@ fme_undiagnosable(struct fme *f)
fmd_case_add_ereport(f->hdl, f->fmcase, ep->ffep);
pathstr = ipath2str(NULL, ipath(platform_getpath(ep->nvp)));
- platform_units_translate(0, f->config, NULL, NULL, &detector,
- pathstr);
+ platform_unit_translate(0, f->config, TOPO_PROP_RESOURCE,
+ &detector, pathstr);
FREE(pathstr);
/* add defect */
diff --git a/usr/src/cmd/fm/modules/common/eversholt/platform.c b/usr/src/cmd/fm/modules/common/eversholt/platform.c
index 1fe49bd9b4..a3972400e4 100644
--- a/usr/src/cmd/fm/modules/common/eversholt/platform.c
+++ b/usr/src/cmd/fm/modules/common/eversholt/platform.c
@@ -767,7 +767,7 @@ platform_config_snapshot(void)
}
static const char *
-cfgstrprop_lookup(struct config *croot, char *path, char *pname)
+cfgstrprop_lookup(struct config *croot, char *path, const char *pname)
{
struct config *cresource;
const char *fmristr;
@@ -790,43 +790,45 @@ cfgstrprop_lookup(struct config *croot, char *path, char *pname)
}
/*
- * Get resource FMRI from libtopo
+ * Get FMRI for a particular unit from libtopo. The unit is specified by the
+ * "path" argument (a stringified ipath). "prop" argument should be one
+ * of the constants TOPO_PROP_RESOURCE, TOPO_PROP_ASRU, TOPO_PROP_FRU, etc.
*/
/*ARGSUSED*/
void
-platform_units_translate(int isdefect, struct config *croot,
- nvlist_t **dfltasru, nvlist_t **dfltfru, nvlist_t **dfltrsrc, char *path)
+platform_unit_translate(int isdefect, struct config *croot, const char *prop,
+ nvlist_t **fmrip, char *path)
{
const char *fmristr;
char *serial;
- nvlist_t *rsrc;
+ nvlist_t *fmri;
int err;
- fmristr = cfgstrprop_lookup(croot, path, TOPO_PROP_RESOURCE);
+ fmristr = cfgstrprop_lookup(croot, path, prop);
if (fmristr == NULL) {
- out(O_ALTFP, "Cannot rewrite resource for %s.", path);
+ out(O_ALTFP, "Cannot rewrite unit FMRI for %s.", path);
return;
}
- if (topo_fmri_str2nvl(Eft_topo_hdl, fmristr, &rsrc, &err) < 0) {
+ if (topo_fmri_str2nvl(Eft_topo_hdl, fmristr, &fmri, &err) < 0) {
out(O_ALTFP, "Can not convert config info: %s",
topo_strerror(err));
- out(O_ALTFP, "Cannot rewrite resource for %s.", path);
+ out(O_ALTFP, "Cannot rewrite unit FMRI for %s.", path);
return;
}
/*
- * If we don't have a serial number in the resource then check if it
+ * If we don't have a serial number in the unit then check if it
* is available as a separate property and if so then add it.
*/
- if (nvlist_lookup_string(rsrc, FM_FMRI_HC_SERIAL_ID, &serial) != 0) {
+ if (nvlist_lookup_string(fmri, FM_FMRI_HC_SERIAL_ID, &serial) != 0) {
serial = (char *)cfgstrprop_lookup(croot, path,
FM_FMRI_HC_SERIAL_ID);
if (serial != NULL)
- (void) nvlist_add_string(rsrc, FM_FMRI_HC_SERIAL_ID,
+ (void) nvlist_add_string(fmri, FM_FMRI_HC_SERIAL_ID,
serial);
}
- *dfltrsrc = rsrc;
+ *fmrip = fmri;
}
/*
diff --git a/usr/src/cmd/fm/modules/common/eversholt/platform.h b/usr/src/cmd/fm/modules/common/eversholt/platform.h
index 30cab516ed..23e1f6f41e 100644
--- a/usr/src/cmd/fm/modules/common/eversholt/platform.h
+++ b/usr/src/cmd/fm/modules/common/eversholt/platform.h
@@ -28,8 +28,6 @@
#ifndef _EFT_PLATFORM_H
#define _EFT_PLATFORM_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <libnvpair.h>
#ifdef __cplusplus
@@ -38,6 +36,7 @@ extern "C" {
#include <config.h>
#include <fm/fmd_api.h>
+#include <fm/libtopo.h>
nvlist_t *Action_nvl; /* nvl for problem with action=... prop on it */
@@ -45,8 +44,8 @@ void platform_init(void);
void platform_fini(void);
void platform_run_poller(const char *poller);
void platform_set_payloadnvp(nvlist_t *nvlp);
-void platform_units_translate(int, struct config *, nvlist_t **, nvlist_t **,
- nvlist_t **, char *);
+void platform_unit_translate(int, struct config *, const char *, nvlist_t **,
+ char *);
struct cfgdata *platform_config_snapshot(void);
void platform_restore_config(fmd_hdl_t *hdl, fmd_case_t *fmcase);
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_fmt.c b/usr/src/cmd/mdb/common/mdb/mdb_fmt.c
index c68f20b107..6a745b2ac4 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_fmt.c
+++ b/usr/src/cmd/mdb/common/mdb/mdb_fmt.c
@@ -21,7 +21,7 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
- * Copyright 2019 Joyent, Inc.
+ * Copyright 2020 Joyent, Inc.
* Copyright (c) 2017 by Delphix. All rights reserved.
*/
@@ -739,7 +739,7 @@ mdb_fmt_print(mdb_tgt_t *t, mdb_tgt_as_t as,
* Unless a format has explicitly opted out, we force autowrap
* for the duration of mdb_fmt_print().
*/
- mdb.m_flags |= MDB_FL_AUTOWRAP;
+ mdb_iob_set_autowrap(mdb.m_out);
}
switch (FMT_TYPE(fp->f_type)) {
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_io.c b/usr/src/cmd/mdb/common/mdb/mdb_io.c
index f1ad8d051c..752c6f5c35 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_io.c
+++ b/usr/src/cmd/mdb/common/mdb/mdb_io.c
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2019, Joyent, Inc. All rights reserved.
+ * Copyright 2020 Joyent, Inc.
* Copyright (c) 2016 by Delphix. All rights reserved.
*/
@@ -1777,6 +1777,27 @@ mdb_iob_snprintf(char *buf, size_t nbytes, const char *format, ...)
return (nbytes);
}
+/*
+ * Return how many bytes we can copy into our buffer, limited by either cols or
+ * bufsiz depending on whether AUTOWRAP is on. Note that typically,
+ * mdb_iob_set_autowrap() will have already checked for an existing
+ * "->iob_nbytes > ->iob_cols" situation, but we double check here anyway.
+ */
+static size_t
+iob_bufleft(mdb_iob_t *iob)
+{
+ if (IOB_AUTOWRAP(iob)) {
+ if (iob->iob_cols < iob->iob_nbytes) {
+ mdb_iob_nl(iob);
+ ASSERT(iob->iob_cols >= iob->iob_nbytes);
+ }
+ return (iob->iob_cols - iob->iob_nbytes);
+ }
+
+ ASSERT(iob->iob_bufsiz >= iob->iob_nbytes);
+ return (iob->iob_bufsiz - iob->iob_nbytes);
+}
+
void
mdb_iob_nputs(mdb_iob_t *iob, const char *s, size_t nbytes)
{
@@ -1810,20 +1831,11 @@ mdb_iob_nputs(mdb_iob_t *iob, const char *s, size_t nbytes)
}
/*
- * For a given string component, we determine how many bytes (n) we can
- * copy into our buffer (limited by either cols or bufsiz depending
- * on whether AUTOWRAP is on), copy a chunk into the buffer, and
+ * For a given string component, we copy a chunk into the buffer, and
* flush the buffer if we reach the end of a line.
*/
while (nleft != 0) {
- if (IOB_AUTOWRAP(iob)) {
- ASSERT(iob->iob_cols >= iob->iob_nbytes);
- n = iob->iob_cols - iob->iob_nbytes;
- } else {
- ASSERT(iob->iob_bufsiz >= iob->iob_nbytes);
- n = iob->iob_bufsiz - iob->iob_nbytes;
- }
-
+ n = iob_bufleft(iob);
m = MIN(nleft, n); /* copy at most n bytes in this pass */
bcopy(q, iob->iob_bufp, m);
@@ -1884,14 +1896,7 @@ mdb_iob_fill(mdb_iob_t *iob, int c, size_t nfill)
ASSERT(iob->iob_flags & MDB_IOB_WRONLY);
while (nfill != 0) {
- if (IOB_AUTOWRAP(iob)) {
- ASSERT(iob->iob_cols >= iob->iob_nbytes);
- n = iob->iob_cols - iob->iob_nbytes;
- } else {
- ASSERT(iob->iob_bufsiz >= iob->iob_nbytes);
- n = iob->iob_bufsiz - iob->iob_nbytes;
- }
-
+ n = iob_bufleft(iob);
m = MIN(nfill, n); /* fill at most n bytes in this pass */
for (i = 0; i < m; i++)
@@ -2169,6 +2174,26 @@ mdb_iob_stack_size(mdb_iob_stack_t *stk)
}
/*
+ * This only enables autowrap for iobs that are already autowrap themselves such
+ * as mdb.m_out typically.
+ *
+ * Note that we might be the middle of the iob buffer at this point, and
+ * specifically, iob->iob_nbytes could be more than iob->iob_cols. As that's
+ * not a valid situation, we may need to do an autowrap *now*.
+ *
+ * In theory, we would need to do this across all MDB_IOB_AUTOWRAP iob's;
+ * instead, we have a failsafe in iob_bufleft().
+ */
+void
+mdb_iob_set_autowrap(mdb_iob_t *iob)
+{
+ mdb.m_flags |= MDB_FL_AUTOWRAP;
+ if (IOB_WRAPNOW(iob, 0))
+ mdb_iob_nl(iob);
+ ASSERT(iob->iob_cols >= iob->iob_nbytes);
+}
+
+/*
* Stub functions for i/o backend implementors: these stubs either act as
* pass-through no-ops or return ENOTSUP as appropriate.
*/
@@ -2267,14 +2292,14 @@ no_io_resume(mdb_io_t *io)
/*
* Iterate over the varargs. The first item indicates the mode:
* MDB_TBL_PRNT
- * pull out the next vararg as a const char * and pass it and the
- * remaining varargs to iob_doprnt; if we want to print the column,
- * direct the output to mdb.m_out otherwise direct it to mdb.m_null
+ * pull out the next vararg as a const char * and pass it and the
+ * remaining varargs to iob_doprnt; if we want to print the column,
+ * direct the output to mdb.m_out otherwise direct it to mdb.m_null
*
* MDB_TBL_FUNC
- * pull out the next vararg as type mdb_table_print_f and the
- * following one as a void * argument to the function; call the
- * function with the given argument if we want to print the column
+ * pull out the next vararg as type mdb_table_print_f and the
+ * following one as a void * argument to the function; call the
+ * function with the given argument if we want to print the column
*
* The second item indicates the flag; if the flag is set in the flags
* argument, then the column is printed. A flag value of 0 indicates
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_io.h b/usr/src/cmd/mdb/common/mdb/mdb_io.h
index 2ef4677db4..0c41c5c740 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_io.h
+++ b/usr/src/cmd/mdb/common/mdb/mdb_io.h
@@ -21,13 +21,13 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright 2020 Joyent, Inc.
*/
#ifndef _MDB_IO_H
#define _MDB_IO_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -149,6 +149,8 @@ extern size_t mdb_iob_lineno(mdb_iob_t *);
extern size_t mdb_iob_gettabstop(mdb_iob_t *);
extern size_t mdb_iob_getmargin(mdb_iob_t *);
+extern void mdb_iob_set_autowrap(mdb_iob_t *);
+
extern void mdb_iob_stack_create(mdb_iob_stack_t *);
extern void mdb_iob_stack_destroy(mdb_iob_stack_t *);
extern void mdb_iob_stack_push(mdb_iob_stack_t *, mdb_iob_t *, size_t);
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_print.c b/usr/src/cmd/mdb/common/mdb/mdb_print.c
index 2c0af13a25..bd23ef681f 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_print.c
+++ b/usr/src/cmd/mdb/common/mdb/mdb_print.c
@@ -25,7 +25,7 @@
/*
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
- * Copyright 2018 Joyent, Inc.
+ * Copyright 2020 Joyent, Inc.
* Copyright (c) 2014 Nexenta Systems, Inc. All rights reserved.
*/
@@ -931,6 +931,30 @@ print_bitfield(ulong_t off, printarg_t *pap, ctf_encoding_t *ep)
}
/*
+ * We want to print an escaped char as e.g. '\0'. We don't use mdb_fmt_print()
+ * as it won't get auto-wrap right here (although even now, we don't include any
+ * trailing comma).
+ */
+static int
+print_char_val(mdb_tgt_addr_t addr, printarg_t *pap)
+{
+ char cval;
+ char *s;
+
+ if (mdb_tgt_aread(pap->pa_tgt, pap->pa_as, &cval, 1, addr) != 1)
+ return (1);
+
+ if (mdb.m_flags & MDB_FL_ADB)
+ s = strchr2adb(&cval, 1);
+ else
+ s = strchr2esc(&cval, 1);
+
+ mdb_printf("'%s'", s);
+ strfree(s);
+ return (0);
+}
+
+/*
* Print out a character or integer value. We use some simple heuristics,
* described below, to determine the appropriate radix to use for output.
*/
@@ -971,14 +995,8 @@ print_int_val(const char *type, ctf_encoding_t *ep, ulong_t off,
if (size > 8 || (ep->cte_bits % NBBY) != 0 || (size & (size - 1)) != 0)
return (print_bitfield(off, pap, ep));
- if (IS_CHAR(*ep)) {
- mdb_printf("'");
- if (mdb_fmt_print(pap->pa_tgt, pap->pa_as,
- addr, 1, 'C') == addr)
- return (1);
- mdb_printf("'");
- return (0);
- }
+ if (IS_CHAR(*ep))
+ return (print_char_val(addr, pap));
if (mdb_tgt_aread(pap->pa_tgt, pap->pa_as, &u.i8, size, addr) != size) {
mdb_warn("failed to read %lu bytes at %llx",
diff --git a/usr/src/cmd/mdb/common/mdb/mdb_set.c b/usr/src/cmd/mdb/common/mdb/mdb_set.c
index cd8926fbcc..8d4a1d30f7 100644
--- a/usr/src/cmd/mdb/common/mdb/mdb_set.c
+++ b/usr/src/cmd/mdb/common/mdb/mdb_set.c
@@ -24,7 +24,7 @@
*/
/*
- * Copyright 2017 Joyent, Inc.
+ * Copyright 2020 Joyent, Inc.
*/
/*
@@ -275,7 +275,7 @@ print_properties(void)
* we enable it for the duration of the command.
*/
oflags = mdb.m_flags;
- mdb.m_flags |= MDB_FL_AUTOWRAP;
+ mdb_iob_set_autowrap(mdb.m_out);
mdb_printf("follow_exec_mode=");
switch (mdb.m_execmode) {
diff --git a/usr/src/data/hwdata/pci.ids b/usr/src/data/hwdata/pci.ids
index 09ef6d8bf1..be8b8dfabb 100644
--- a/usr/src/data/hwdata/pci.ids
+++ b/usr/src/data/hwdata/pci.ids
@@ -1,8 +1,8 @@
#
# List of PCI ID's
#
-# Version: 2019.09.11
-# Date: 2019-09-11 03:15:02
+# Version: 2020.02.22
+# Date: 2020-02-22 03:15:04
#
# Maintained by Albert Pool, Martin Mares, and other volunteers from
# the PCI ID Project at https://pci-ids.ucw.cz/.
@@ -13,6 +13,10 @@
# This file can be distributed under either the GNU General Public License
# (version 2 or higher) or the 3-clause BSD License.
#
+# The database is a compilation of factual data, and as such the copyright
+# only covers the aggregation and formatting. The copyright is held by
+# Martin Mares and Albert Pool.
+#
# Vendors, devices and subsystems. Please keep sorted.
@@ -63,6 +67,7 @@
# 018a is not LevelOne but there is a board misprogrammed
018a LevelOne
0106 FPC-0106TX misprogrammed [RTL81xx]
+01de Oxide Computer Company
# 021b is not Compaq but there is a board misprogrammed
021b Compaq Computer Corporation
8139 HNE-300 (RealTek RTL8139c) [iPaq Networking]
@@ -432,6 +437,8 @@
1028 1fd1 PERC H730P MX
17aa 1052 ThinkServer RAID 720i
17aa 1053 ThinkServer RAID 720ix
+ 1bd4 0014 6G SAS3108 2G
+ 1bd4 0015 6G SAS3108 4G
1d49 0600 ThinkSystem RAID 730-8i 1GB Cache PCIe 12Gb Adapter
1d49 0608 ThinkSystem RAID 730-8i 2GB Flash PCIe 12Gb Adapter
1d49 0609 ThinkSystem RAID 730-8i 4GB Flash PCIe 12Gb Adapter
@@ -491,6 +498,7 @@
006e SAS2308 PCI-Express Fusion-MPT SAS-2
0070 SAS2004 PCI-Express Fusion-MPT SAS-2 [Spitfire]
1000 3010 SAS9211-4i
+ 1014 040e ServeRAID H1110
0071 MR SAS HBA 2004
0072 SAS2008 PCI-Express Fusion-MPT SAS-2 [Falcon]
1000 3040 9210-8i
@@ -502,6 +510,12 @@
1028 1f1f PERC H200 Modular
1028 1f20 PERC H200 Embedded
1028 1f22 PERC H200 Internal Tape Adapter
+# Fujitsu D2607 SAS2008 HBA controller
+ 1734 1177 HBA Ctrl SAS 6G 0/1 [D2607]
+ 1bd4 000d 6G SAS2008IT
+ 1bd4 000e 6G SAS2008IR
+ 1bd4 000f 6G SAS2008IT SA5248
+ 1bd4 0010 6G SAS2008IR SA5248
8086 350f RMS2LL040 RAID Controller
8086 3700 SSD 910 Series
0073 MegaRAID SAS 2008 [Falcon]
@@ -610,6 +624,8 @@
1590 0041 H220i
1590 0042 H221 / 9207-8e
1590 0044 H220i
+ 1bd4 0009 6G SAS2308IR
+ 1bd4 000a 6G SAS2308IT
8086 3000 RS25GB008 RAID Controller
8086 3060 RS25FB044 RAID Controller
8086 3516 RMS25JB080 RAID Controller
@@ -638,10 +654,22 @@
1028 1fd3 HBA330 MMZ
# Supermicro AOC-S3008L-L8e uses 0808 for their SAS3008 SAS controller
15d9 0808 AOC-S3008L-L8e
+ 1bd4 000b 12G SAS3008IR
+ 1bd4 000c 12G SAS3008IT
1bd4 0011 Inspur 12Gb 8i-3008 IT SAS HBA
+ 1bd4 0012 12Gb SAS3008IR UDM
+ 1bd4 0026 12G SAS3008IT RACK
+ 1bd4 0027 12G SAS3008IMR RACK
+ 1bd4 0028 12G SAS3008IR RACK
00ab SAS3516 Fusion-MPT Tri-Mode RAID On Chip (ROC)
+# 8 Internal and 8 External port channel 9400 HBA
+ 1000 3040 HBA 9400-8i8e
8086 3530 Integrated RAID Module RMSP3JD160J
00ac SAS3416 Fusion-MPT Tri-Mode I/O Controller Chip (IOC)
+# Channel 16 internal port HBA
+ 1000 3000 HBA 9400-16i
+# Channel 16 external port HBA
+ 1000 3020 HBA 9400-16e
1028 1fe3 HBA345 Adapter
1028 1fe4 HBA345 Front
1d49 0201 ThinkSystem 430-16i SAS/SATA 12Gb HBA
@@ -651,6 +679,8 @@
00ae SAS3508 Fusion-MPT Tri-Mode RAID On Chip (ROC)
00af SAS3408 Fusion-MPT Tri-Mode I/O Controller Chip (IOC)
1000 3010 HBA 9400-8i
+# 9400 Channel 8 external port HBA
+ 1000 3030 HBA 9400-8e
1d49 0200 ThinkSystem 430-8i SAS/SATA 12Gb HBA
1d49 0202 ThinkSystem 430-8e SAS/SATA 12Gb HBA
1d49 0204 ThinkSystem 430-8i SAS/SATA 12Gb Dense HBA
@@ -672,18 +702,39 @@
00cf MegaRAID SAS-3 3324 [Intruder]
1000 9370 MegaRAID SAS 9361-24i
00d0 SAS3716 Fusion-MPT Tri-Mode RAID Controller Chip (ROC)
+# 9405W 16 internal port channel HBA
+ 1000 3050 HBA 9405W-16i
+# 9405W 8 internal and 8 external port channel HBA
+ 1000 3070 HBA 9405W-8i8e
00d1 SAS3616 Fusion-MPT Tri-Mode I/O Controller Chip (IOC)
+# 9405W 16 external port Channel HBA
+ 1000 3080 HBA 9405W-16e
+# 9405W 16 internal port Channel HBA
+ 1000 3090 HBA 9405W-16i
00d3 MegaRAID Tri-Mode SAS3716W
00e0 Fusion-MPT 12GSAS/PCIe Unsupported SAS39xx
00e1 Fusion-MPT 12GSAS/PCIe SAS39xx
00e2 Fusion-MPT 12GSAS/PCIe Secure SAS39xx
00e3 Fusion-MPT 12GSAS/PCIe Unsupported SAS39xx
00e4 Fusion-MPT 12GSAS/PCIe Unsupported SAS38xx
+# Invalid part
+ 1028 200b HBA355i Adapter Invalid
+# Invalid part
+ 1028 200c HBA355i Front Invalid
+# Invalid part
+ 1028 200d HBA355e Adapter Invalid
+# Invalid part
+ 1028 200e HBA350i MX Invalid
+# Soft Secure
00e5 Fusion-MPT 12GSAS/PCIe SAS38xx
+# Soft Secure
1028 200b HBA355i Adapter
+# Soft Secure
1028 200c HBA355i Front
+# Soft Secure
1028 200d HBA355e Adapter
- 1028 200e HBA355i MX
+# Soft Secure
+ 1028 200e HBA350i MX
1d49 0205 ThinkSystem 440-16i SAS/SATA PCIe Gen4 12Gb Internal HBA
1d49 0206 ThinkSystem 440-16e SAS/SATA PCIe Gen4 12Gb HBA
00e6 Fusion-MPT 12GSAS/PCIe Secure SAS38xx
@@ -694,6 +745,14 @@
1d49 0205 ThinkSystem 440-16i SAS/SATA PCIe Gen4 12Gb Internal HBA
1d49 0206 ThinkSystem 440-16e SAS/SATA PCIe Gen4 12Gb HBA
00e7 Fusion-MPT 12GSAS/PCIe Unsupported SAS38xx
+# Tampered part
+ 1028 200b HBA355i Adapter Tampered
+# Tampered part
+ 1028 200c HBA355i Front Tampered
+# Tampered part
+ 1028 200d HBA355e Adapter Tampered
+# Tampered part
+ 1028 200e HBA350i MX Tampered
02b0 Virtual Endpoint on PCIe Switch
1d49 0001 ThinkSystem 1610-4P NVMe Switch Adapter
1d49 0002 ThinkSystem 810-4P NVMe Switch Adapter
@@ -770,6 +829,10 @@
0901 61C102
1000 63C815
10e0 MegaRAID 12GSAS/PCIe Unsupported SAS39xx
+ 1028 1ae0 PERC H755 Adapter - Invalid Device
+ 1028 1ae1 PERC H755 Front - Invalid Device
+ 1028 1ae2 PERC H755N Front - Invalid Device
+ 1028 1ae3 PERC H755 MX - Invalid Device
10e1 MegaRAID 12GSAS/PCIe SAS39xx
1028 1ae0 PERC H755 Adapter
1028 1ae1 PERC H755 Front
@@ -793,6 +856,10 @@
1d49 060e ThinkSystem RAID 940-32i 8GB Flash PCIe Gen4 12Gb Adapter
1d49 060f ThinkSystem RAID 940-8e 4GB Flash PCIe Gen4 12Gb Adapter
10e3 MegaRAID 12GSAS/PCIe Unsupported SAS39xx
+ 1028 1ae0 PERC H755 Adapter - Tampered Device
+ 1028 1ae1 PERC H755 Front - Tampered Device
+ 1028 1ae2 PERC H755N Front - Tampered Device
+ 1028 1ae3 PERC H755 MX - Tampered Device
10e4 MegaRAID 12GSAS/PCIe Unsupported SAS38xx
10e5 MegaRAID 12GSAS/PCIe SAS38xx
10e6 MegaRAID 12GSAS/PCIe Secure SAS38xx
@@ -829,7 +896,9 @@
1306 Kaveri
1307 Kaveri
1308 Kaveri HDMI/DP Audio Controller
+ 17aa 3988 Z50-75
1309 Kaveri [Radeon R6/R7 Graphics]
+ 17aa 3830 Z50-75
130a Kaveri [Radeon R6 Graphics]
130b Kaveri [Radeon R4 Graphics]
130c Kaveri [Radeon R7 Graphics]
@@ -850,17 +919,22 @@
131c Kaveri [Radeon R7 Graphics]
131d Kaveri [Radeon R6 Graphics]
13e9 Ariel
+ 1478 Navi 10 XL Upstream Port of PCI Express Switch
+ 1479 Navi 10 XL Downstream Port of PCI Express Switch
154c Kryptos
154e Garfield
1551 Arlene
1552 Pooky
1561 Anubis
15d8 Picasso
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15dd Raven Ridge [Radeon Vega Series / Radeon Vega Mobile Series]
103c 83c6 Radeon Vega 8 Mobile
1458 d000 Radeon RX Vega 11
15de Raven/Raven2/Fenghuang HDMI/DP Audio Controller
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15df Raven/Raven2/Fenghuang/Renoir Cryptographic Coprocessor
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15ff Fenghuang [Zhongshan Subor Z+]
1607 Arden
1636 Renoir
@@ -1035,7 +1109,7 @@
4382 SB600 AC97 Audio
4383 SBx00 Azalia (Intel HDA)
1019 2120 A785GM-M
- 103c 1611 Pavilion DM1Z-3000
+ 103c 1611 Pavilion dm1z-3000
103c 280a DC5750 Microtower
1043 8230 M3A78-EH Motherboard
1043 836c M4A785TD Motherboard
@@ -1109,6 +1183,7 @@
1458 b002 GA-MA770-DS3rev2.0 Motherboard
1849 4390 Motherboard (one of many)
4391 SB7x0/SB8x0/SB9x0 SATA Controller [AHCI mode]
+ 103c 1609 ProLiant MicroServer N36L
103c 1611 Pavilion DM1Z-3000
1043 82ef M3A78-EH Motherboard
1043 8443 M5A88-V EVO
@@ -1122,6 +1197,7 @@
4395 SB8x0/SB9x0 SATA Controller [Storage mode]
4396 SB7x0/SB8x0/SB9x0 USB EHCI Controller
1019 2120 A785GM-M
+ 103c 1609 ProLiant MicroServer N36L
103c 1611 Pavilion DM1Z-3000
1043 82ef M3A78-EH Motherboard
1043 8443 M5A88-V EVO
@@ -1130,6 +1206,7 @@
174b 1001 PURE Fusion Mini
4397 SB7x0/SB8x0/SB9x0 USB OHCI0 Controller
1019 2120 A785GM-M
+ 103c 1609 ProLiant MicroServer N36L
103c 1611 Pavilion DM1Z-3000
1043 82ef M3A78-EH Motherboard
1043 8443 M5A88-V EVO
@@ -1150,10 +1227,12 @@
439c SB7x0/SB8x0/SB9x0 IDE Controller
1002 4392 MSI MS-7713 motherboard
1019 2120 A785GM-M
+ 103c 1609 ProLiant MicroServer N36L
1043 82ef M3A78-EH Motherboard
105b 0e13 N15235/A74MX mainboard / AMD SB700
439d SB7x0/SB8x0/SB9x0 LPC host controller
1019 2120 A785GM-M
+ 103c 1609 ProLiant MicroServer N36L
103c 1611 Pavilion DM1Z-3000
1043 82ef M3A78-EH Motherboard
1043 8443 M5A88-V EVO
@@ -1800,6 +1879,7 @@
1642 3f09 Radeon R7 350
6611 Oland [Radeon HD 8570 / R7 240/340 OEM]
1028 210b Radeon R5 240 OEM
+ 1642 1869 AMD Radeon 520
174b 4248 Radeon R7 240 OEM
174b a240 Radeon R7 240 OEM
174b d340 Radeon R7 340 OEM
@@ -1865,7 +1945,7 @@
17aa 3805 Radeon HD 8570M
6664 Jet XT [Radeon R5 M240]
6665 Jet PRO [Radeon R5 M230 / R7 M260DX / Radeon 520 Mobile]
- 17aa 1309 Radeon R7 M260DX
+ 17aa 1309 Z50-75 Radeon R7 M260DX
17aa 368f Radeon R5 A230
6667 Jet ULT [Radeon R5 M230]
666f Sun LE [Radeon HD 8550M / R5 M230]
@@ -2496,7 +2576,7 @@
1462 3418 Radeon RX 580 Armor 4G OC
1462 341e Radeon RX 570 Armor 4G OC
1462 8a92 Radeon RX 580
- 148c 2372 Radeon RX 480
+ 148c 2372 Radeon RX 480 [Red Dragon]
148c 2373 Radeon RX 470
1682 9470 Radeon RX 470
1682 9480 Radeon RX 480
@@ -2507,8 +2587,9 @@
1787 a470 Radeon RX 470
1787 a480 Radeon RX 480
1849 5001 Phantom Gaming X RX 580 OC
+ 1849 5030 Phantom Gaming D Radeon RX580 8G OC
1da2 e353 Radeon RX 570 Pulse 4GB
- 1da2 e366 Nitro+ Radeon RX 570/580
+ 1da2 e366 Nitro+ Radeon RX 570/580/590
67e0 Baffin [Radeon Pro WX 4170]
103c 8270 Radeon Pro WX 4170
103c 8272 Radeon Pro WX 4170
@@ -2526,6 +2607,7 @@
67e9 Baffin [Polaris11]
67eb Baffin [Radeon Pro V5300X]
67ef Baffin [Radeon RX 460/560D / Pro 450/455/460/555/555X/560/560X]
+ 1028 1703 RX 560D OEM OC 2 GB
103c 3421 Radeon RX 460
106b 0160 Radeon Pro 460
106b 0166 Radeon Pro 455
@@ -3446,7 +3528,12 @@
1043 04a0 Radeon R9 FURY X
174b e329 Radeon R9 FURY
7310 Navi 10
- 731f Navi 10 [Radeon RX 5700 / 5700 XT]
+ 7312 Navi 10 [Radeon Pro W5700]
+ 731f Navi 10 [Radeon RX 5600 OEM/5600 XT / 5700/5700 XT]
+ 7340 Navi 14 [Radeon RX 5500/5500M / Pro 5500M]
+ 7341 Navi 14 [Radeon Pro W5500]
+ 7347 Navi 14 [Radeon Pro W5500M]
+ 734f Navi 14 [Radeon Pro W5300M]
7833 RS350 Host Bridge
7834 RS350 [Radeon 9100 PRO/XT IGP]
7835 RS350M [Mobility Radeon 9000 IGP]
@@ -3682,6 +3769,7 @@
1019 2120 A785GM-M
1043 83a2 M4A785TD Motherboard
9712 RS880M [Mobility Radeon HD 4225/4250]
+ 103c 1609 ProLiant MicroServer N36L
9713 RS880M [Mobility Radeon HD 4100]
9714 RS880 [Radeon HD 4290]
9715 RS880 [Radeon HD 4250]
@@ -3700,6 +3788,7 @@
9830 Kabini [Radeon HD 8400 / R3 Series]
9831 Kabini [Radeon HD 8400E]
9832 Kabini [Radeon HD 8330]
+ 1849 9832 QC5000-ITX/PH
9833 Kabini [Radeon HD 8330E]
9834 Kabini [Radeon HD 8210]
9835 Kabini [Radeon HD 8310E]
@@ -3709,6 +3798,7 @@
9839 Kabini [Radeon HD 8180]
983d Temash [Radeon HD 8250/8280G]
9840 Kabini HDMI/DP Audio
+ 1849 9840 QC5000-ITX/PH
9850 Mullins [Radeon R3 Graphics]
9851 Mullins [Radeon R4/R5 Graphics]
1179 f928 Beema [Radeon R5 Graphics]
@@ -4477,6 +4567,7 @@
1467 Family 17h (Models 00h-0fh) Data Fabric: Device 18h; Function 7
1468 Zeppelin Cryptographic Coprocessor NTBCCP
1480 Starship/Matisse Root Complex
+ 1462 7c37 X570-A PRO motherboard
1481 Starship/Matisse IOMMU
1482 Starship/Matisse PCIe Dummy Host Bridge
1483 Starship/Matisse GPP Bridge
@@ -4484,6 +4575,7 @@
1485 Starship/Matisse Reserved SPP
1486 Starship/Matisse Cryptographic Coprocessor PSPCPP
1487 Starship/Matisse HD Audio Controller
+ 1462 9c37 X570-A PRO motherboard
1488 Starship Reserved SSP
1489 Starship Reserved SSP
148a Starship/Matisse PCIe Dummy Function
@@ -4505,6 +4597,7 @@
149a Starship PCIe GPP Bridge [1:0]
149b Starship Reserved SSP
149c Matisse USB 3.0 Host Controller
+ 1462 7c37 X570-A PRO motherboard
1510 Family 14h Processor Root Complex
174b 1001 PURE Fusion Mini
1512 Family 14h Processor Root Port
@@ -4519,6 +4612,7 @@
1534 Family 16h Processor Function 4
1535 Family 16h Processor Function 5
1536 Family 16h Processor Root Complex
+ 1849 1536 QC5000-ITX/PH
1537 Kabini/Mullins PSP-Platform Security Processor
1538 Family 16h Processor Function 0
1539 Kabini P2P Bridge for PCIe Ports[4:0]
@@ -4587,7 +4681,9 @@
15bc Stoney PCIe [GFX,GPP] Bridge [4:0]
15be Stoney Audio Processor
15d0 Raven/Raven2 Root Complex
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15d1 Raven/Raven2 IOMMU
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15d2 Raven/Raven2 PCIe Dummy Host Bridge
15d3 Raven/Raven2 PCIe GPP Bridge [6:0]
15d4 FireFlight USB 3.1
@@ -4598,12 +4694,16 @@
15de Raven/Raven2/FireFlight HD Audio Controller
15df Family 17h (Models 10h-1fh) Platform Security Processor
15e0 Raven USB 3.1
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15e1 Raven USB 3.1
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15e2 Raven/Raven2/FireFlight/Renoir Audio Processor
15e3 Family 17h (Models 10h-1fh) HD Audio Controller
+ 103c 8615 Pavilion Laptop 15-cw1xxx
15e4 Raven/Raven2/Renoir Sensor Fusion Hub
15e5 Raven2 USB 3.1
15e6 Raven/Raven2/Renoir Non-Sensor Fusion Hub KMDF driver
+ 1022 15e4 Raven/Raven2/Renoir Sensor Fusion Hub
15e8 Raven/Raven2 Device 24: Function 0
15e9 Raven/Raven2 Device 24: Function 1
15ea Raven/Raven2 Device 24: Function 2
@@ -4747,6 +4847,9 @@
43c7 400 Series Chipset PCIe Port
43c8 400 Series Chipset SATA Controller
43d5 400 Series Chipset USB 3.1 XHCI Controller
+ 57a3 Matisse PCIe GPP Bridge
+ 57a4 Matisse PCIe GPP Bridge
+ 57ad Matisse Switch Upstream
7006 AMD-751 [Irongate] System Controller
7007 AMD-751 [Irongate] AGP Bridge
700a AMD-IGR4 AGP Host to PCI Bridge
@@ -4805,6 +4908,8 @@
7801 FCH SATA Controller [AHCI mode]
103c 168b ProBook 4535s Notebook
103c 194e ProBook 455 G1 Notebook
+ 17aa 3988 Z50-75
+ 1849 7801 QC5000-ITX/PH
7802 FCH SATA Controller [RAID mode]
7803 FCH SATA Controller [RAID mode]
7804 FCH SATA Controller [AHCI mode]
@@ -4814,38 +4919,57 @@
7807 FCH USB OHCI Controller
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
+ 17aa 3988 Z50-75
+ 1849 7807 QC5000-ITX/PH
7808 FCH USB EHCI Controller
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
+ 17aa 3988 Z50-75
+ 1849 7808 QC5000-ITX/PH
7809 FCH USB OHCI Controller
103c 194e ProBook 455 G1 Notebook
+ 17aa 3988 Z50-75
780a Kabini/Mullins SATA Raid/AHCI Mode (DotHill driver)
780b FCH SMBus Controller
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
+ 17aa 3988 Z50-75
+ 1849 780b QC5000-ITX/PH
780c FCH IDE Controller
780d FCH Azalia Controller
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
1043 8444 F2A85-M Series
+ 17aa 3988 Z50-75
+ 1849 8892 QC5000-ITX/PH
780e FCH LPC Bridge
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
+ 17aa 3988 Z50-75
+ 1849 780e QC5000-ITX/PH
780f FCH PCI Bridge
7812 FCH USB XHCI Controller
7813 FCH SD Flash Controller
7814 FCH USB XHCI Controller
103c 194e ProBook 455 G1 Notebook
103c 1985 Pavilion 17-e163sg Notebook PC
+ 17aa 3988 Z50-75
+ 1849 7814 QC5000-ITX/PH
7900 FCH SATA Controller [IDE mode]
7901 FCH SATA Controller [AHCI mode]
+ 103c 8615 Pavilion Laptop 15-cw1xxx
+ 1462 7c37 X570-A PRO motherboard
7902 FCH SATA Controller [RAID mode]
7903 FCH SATA Controller [RAID mode]
7904 FCH SATA Controller [AHCI mode]
7906 FCH SD Flash Controller
7908 FCH USB EHCI Controller
790b FCH SMBus Controller
+ 103c 8615 Pavilion Laptop 15-cw1xxx
+ 1462 7c37 X570-A PRO motherboard
790e FCH LPC Bridge
+ 103c 8615 Pavilion Laptop 15-cw1xxx
+ 1462 7c37 X570-A PRO motherboard
790f FCH PCI Bridge
7914 FCH USB XHCI Controller
9600 RS780 Host Bridge
@@ -4853,13 +4977,16 @@
1043 82f1 M3A78-EH Motherboard
9601 RS880 Host Bridge
1019 2120 A785GM-M
+ 103c 1609 ProLiant MicroServer N36L
1043 83a2 M4A785-M Mainboard
1043 843e M5A88-V EVO
9602 RS780/RS880 PCI to PCI bridge (int gfx)
9603 RS780 PCI to PCI bridge (ext gfx port 0)
+ 103c 1609 ProLiant MicroServer N36L
9604 RS780/RS880 PCI to PCI bridge (PCIE port 0)
9605 RS780/RS880 PCI to PCI bridge (PCIE port 1)
9606 RS780 PCI to PCI bridge (PCIE port 2)
+ 103c 1609 ProLiant MicroServer N36L
9607 RS780/RS880 PCI to PCI bridge (PCIE port 3)
9608 RS780/RS880 PCI to PCI bridge (PCIE port 4)
9609 RS780/RS880 PCI to PCI bridge (PCIE port 5)
@@ -5815,6 +5942,7 @@
4031 zx2 I/O Controller
4037 PCIe Local Bus Adapter
9602 AMD RS780/RS880 PCI to PCI bridge (int gfx)
+ 103c 1609 ProLiant MicroServer N36L
103e Solliday Engineering
103f Synopsys/Logic Modeling Group
1040 Accelgraphics Inc.
@@ -6400,6 +6528,8 @@
4802 Falcon
4803 Hawk
4806 CPX8216
+# MPC7410 PowerPC microprocessor and PCI host bridge
+ 480b MPC7410
4d68 20268
5600 SM56 PCI Modem
1057 0300 SM56 PCI Speakerphone Modem
@@ -6830,6 +6960,10 @@
1077 02e4 QLE2772 Dual Port 32GFC PCIe Gen4 x8 Adapter
1077 02ee QLE2870 Single Port 64GFC PCIe Gen4 x8 Adapter
1077 02f0 QLE2770 Single Port 32GFC PCIe Gen4 x8 Adapter
+ 1077 02f2 QLogic 1x32Gb QLE2770 FC HBA
+ 1077 02f3 QLogic 2x32Gb QLE2772 FC HBA
+ 1590 02d3 SN1610Q - 1P Enhanced 32GFC Single Port Fibre Channel Host Bus Adapter
+ 1590 02d4 SN1610Q – 2P Enhanced 32GFC Dual Port Fibre Channel Host Bus Adapter
2300 QLA2300 64-bit Fibre Channel Adapter
2312 ISP2312-based 2Gb Fibre Channel to PCI-X HBA
103c 0131 2Gb Fibre Channel - Single port [A7538A]
@@ -6901,6 +7035,20 @@
1077 0012 FastLinQ QL41112H 10GbE Adapter
1077 0019 QL41232HOCU - Dual Port 25/10GbE SFP28 OCP Adapter
1077 0039 QLogic QL41262 PCIe 25Gb 2-Port SFP28 Ethernet Adapter
+ 1077 0053 QLogic 2x25GE QL41232HQCU NIC
+ 1077 0054 2x10GE QL41132HQRJ NIC
+ 1077 0055 QLogic 2x10GE QL41132HQCU NIC
+ 1077 0056 2x10GE QL41132HxRJ NIC
+ 1077 0057 2x25GE QL41232HxCU NIC
+ 1077 0065 QLogic 4x10GE QL41154HQRJ CNA
+ 1077 0066 QLogic 4x10GE QL41154HQCU CNA
+ 1077 0068 10GbE 2p SFP+ QL41132HLCU-HC Adapter
+ 1077 0069 10GbE 2p BASE-T QL41132HQRJ-HC OCP3 Adapter
+ 1077 0070 10GbE 2p BASE-T QL41132HLRJ-HC Adapter
+ 1077 0071 10GbE 2p SFP+ QL41132HQCU-HC OCP3 Adapter
+ 1077 0072 10GbE 4p SFP+ QL41134HLCU-HC Adapter
+ 1077 0073 10/25GbE 2p SFP28 QL41232HQCU-HC OCP3 Adapter
+ 1077 0074 10/25GbE 2p SFP28 QL41232HLCU-HC Adapter
1590 021a 10GbE 2P QL41162HLRJ-HP Adapter
1590 021b 10GbE 2P QL41162HLRJ-HP Adapter
1590 021d 10/25GbE 2P QL41222HLCU-HP Adapter
@@ -6937,6 +7085,8 @@
1077 000d FastLinQ QL41262H 25GbE iSCSI Adapter
1077 000e FastLinQ QL41162H 10GbE iSCSI Adapter
1077 000f 2x25GE QL41262HMKR CNA
+ 1077 0065 QLogic 4x10GE QL41154HQRJ CNA
+ 1077 0066 QLogic 4x10GE QL41154HQCU CNA
1590 021a 10GbE 2P QL41162HLRJ-HP Adapter
1590 021b 10GbE 2P QL41162HLRJ-HP Adapter
8090 FastLinQ QL41000 Series Gigabit Ethernet Controller (SR-IOV VF)
@@ -6956,6 +7106,13 @@
1077 0010 2x25GE QL41232HMKR NIC
1077 0011 FastLinQ QL41212H 25GbE Adapter (SR-IOV VF)
1077 0012 FastLinQ QL41112H 10GbE Adapter (SR-IOV VF)
+ 1077 0053 QLogic 2x25GE QL41232HQCU NIC
+ 1077 0054 QLogic 2x10GE QL41132HQRJ NIC
+ 1077 0055 QLogic 2x10GE QL41132HQCU NIC
+ 1077 0056 2x10GE QL41132HxRJ NIC
+ 1077 0057 2x25GE QL41232HxCU NIC
+ 1077 0065 QLogic 4x10GE QL41154HQRJ CNA
+ 1077 0066 QLogic 4x10GE QL41154HQCU CNA
1590 021a 10GbE 2P QL41162HLRJ-HP Adapter
1590 021b 10GbE 2P QL41162HLRJ-HP Adapter
1590 021e 10/25GbE 2P QL41162HMRJ-HP Adapter
@@ -10792,7 +10949,7 @@
0e0f GK208 HDMI/DP Audio Controller
0e12 TegraK1 PCIe x4 Bridge
0e13 TegraK1 PCIe x1 Bridge
- 0e1a GK110 HDMI Audio
+ 0e1a GK110 High Definition Audio Controller
0e1b GK107 HDMI Audio Controller
103c 197b ZBook 15
1043 8428 GTX650-DC-1GD5
@@ -10997,6 +11154,7 @@
10f0 GP104 High Definition Audio Controller
10f1 GP106 High Definition Audio Controller
10f7 TU102 High Definition Audio Controller
+ 10f8 TU104 HD Audio Controller
10f9 TU106 High Definition Audio Controller
1043 8673 TURBO-RTX2070-8G
1140 GF117M [GeForce 610M/710M/810M/820M / GT 620M/625M/630M/720M]
@@ -11380,8 +11538,7 @@
106b 010d iMac 13,2
11a7 GK104M [GeForce GTX 675MX]
11af GK104GLM [GRID IceCube]
-# GRID K2 Quadro USM
- 11b0 GK104GL [GRID K240Q\K260Q vGPU]
+ 11b0 GK104GL [GRID K240Q / K260Q vGPU]
10de 101a GRID K240Q
10de 101b GRID K260Q
11b1 GK104GL [GRID K2 Tesla USM]
@@ -11593,6 +11750,7 @@
1406 GM206 [GeForce GTX 960 OEM]
1407 GM206 [GeForce GTX 750 v2]
1427 GM206M [GeForce GTX 965M]
+ 103c 825b OMEN-17-w001nv
1430 GM206GL [Quadro M2000]
1431 GM206GL [Tesla M4]
1436 GM206GLM [Quadro M2200 Mobile]
@@ -11616,14 +11774,19 @@
17c2 GM200 [GeForce GTX TITAN X]
17c8 GM200 [GeForce GTX 980 Ti]
17f0 GM200GL [Quadro M6000]
+ 10de 1141 VCA 6000
17f1 GM200GL [Quadro M6000 24GB]
17fd GM200GL [Tesla M40]
- 1ad6 TU102 USB 3.1 Controller
- 1ad7 TU102 UCSI Controller
+ 1ad6 TU102 USB 3.1 Host Controller
+ 1ad7 TU102 USB Type-C UCSI Controller
+ 1ad8 TU104 USB 3.1 Host Controller
+ 1ad9 TU104 USB Type-C UCSI Controller
1ada TU106 USB 3.1 Host Controller
1043 8673 TURBO-RTX2070-8G
- 1adb TU106 USB Type-C Port Policy Controller
+ 1adb TU106 USB Type-C UCSI Controller
1043 8673 TURBO-RTX2070-8G
+ 1aeb TU116 High Definition Audio Controller
+ 1aed TU116 [GeForce GTX 1650 SUPER]
1b00 GP102 [TITAN X]
1b01 GP102 [GeForce GTX 1080 Ti 10GB]
1b02 GP102 [TITAN Xp]
@@ -11685,6 +11848,7 @@
1414 0020 GTX 1060 Mobile
1c2d GP106M
1c30 GP106GL [Quadro P2000]
+ 1c31 GP106GL [Quadro P2200]
1c35 GP106
1c60 GP106BM [GeForce GTX 1060 Mobile 6GB]
103c 8390 GeForce GTX 1060 Max-Q 6GB
@@ -11693,6 +11857,8 @@
1c70 GP106GL
1c81 GP107 [GeForce GTX 1050]
1c82 GP107 [GeForce GTX 1050 Ti]
+ 1043 8613 PH-GTX1050TI-4G
+ 1458 3763 GV-N105TOC-4GD
1c83 GP107 [GeForce GTX 1050 3GB]
1c8c GP107M [GeForce GTX 1050 Ti Mobile]
1c8d GP107M [GeForce GTX 1050 Mobile]
@@ -11701,6 +11867,7 @@
1c90 GP107M [GeForce MX150]
1c91 GP107M [GeForce GTX 1050 3 GB Max-Q]
1c92 GP107M [GeForce GTX 1050 Mobile]
+ 1c94 GP107M [GeForce MX350]
1ca7 GP107GL
1ca8 GP107GL
1caa GP107GL
@@ -11717,8 +11884,11 @@
103c 842f P1000 [Zbook 17 G5 mobile workstation]
103c 8451 P1000 [Zbook Studio x360 G5 mobile workstation]
1cbc GP107GLM [Quadro P600 Mobile]
+ 1cbd GP107GLM [Quadro P620]
1ccc GP107BM [GeForce GTX 1050 Ti Mobile]
1ccd GP107BM [GeForce GTX 1050 Mobile]
+ 1cfa GP107GL [Quadro P2000]
+ 1cfb GP107GL [Quadro P1000]
1d01 GP108 [GeForce GT 1030]
1d10 GP108M [GeForce MX150]
17aa 225e ThinkPad T480
@@ -11726,18 +11896,26 @@
1d12 GP108M [GeForce MX150]
1d72 1701 Mi Notebook Pro [GeForce MX150]
1d13 GP108M [GeForce MX250]
+ 1d16 GP108M [GeForce MX330]
1d33 GP108GLM [Quadro P500 Mobile]
+ 1d34 GP108GLM [Quadro P520]
1d52 GP108BM [GeForce MX250]
1d81 GV100 [TITAN V]
1db1 GV100GL [Tesla V100 SXM2 16GB]
- 1db2 GV100GL [Tesla V100-DGXS-16GB]
+ 1db2 GV100GL [Tesla V100 DGXS 16GB]
1db3 GV100GL [Tesla V100 FHHL 16GB]
1db4 GV100GL [Tesla V100 PCIe 16GB]
1db5 GV100GL [Tesla V100 SXM2 32GB]
1db6 GV100GL [Tesla V100 PCIe 32GB]
1db7 GV100GL [Tesla V100 DGXS 32GB]
+ 1db8 GV100GL [Tesla V100 SXM3 32GB]
+ 10de 131d Tesla V100-SXM3-32GB-H
1dba GV100GL [Quadro GV100]
10de 12eb TITAN V CEO Edition
+ 1df0 GV100GL [Tesla PG500-216]
+ 1df2 GV100GL [Tesla PG503-216]
+ 1df5 GV100GL [Tesla V100 SXM2 16GB]
+ 1df6 GV100GL [Tesla V100S PCIe 32GB]
1e02 TU102 [TITAN RTX]
1e04 TU102 [GeForce RTX 2080 Ti]
1e07 TU102 [GeForce RTX 2080 Ti Rev. A]
@@ -11747,14 +11925,22 @@
1e30 TU102GL [Quadro RTX 6000/8000]
10de 129e Quadro RTX 8000
10de 12ba Quadro RTX 6000
+ 1e37 TU102GL [GRID RTX T10-4/T10-8/T10-16]
+ 10de 1347 GRID RTX T10-8
+ 10de 1348 GRID RTX T10-4
+ 10de 1370 GRID RTX T10-16
1e38 TU102GL
1e3c TU102GL
1e3d TU102GL
1e3e TU102GL
+ 1e78 TU102GL [Quadro RTX 6000/8000]
+ 10de 13d8 Quadro RTX 8000
+ 10de 13d9 Quadro RTX 6000
1e81 TU104 [GeForce RTX 2080 SUPER]
1e82 TU104 [GeForce RTX 2080]
1e84 TU104 [GeForce RTX 2070 SUPER]
1e87 TU104 [GeForce RTX 2080 Rev. A]
+ 1e89 TU104 [GeForce RTX 2060]
1e90 TU104M [GeForce RTX 2080 Mobile]
1eab TU104M
1eae TU104M
@@ -11765,6 +11951,8 @@
1eb8 TU104GL [Tesla T4]
1eb9 TU104GL
1ebe TU104GL
+ 1ec2 TU104 [GeForce RTX 2070 SUPER]
+ 1ec7 TU104 [GeForce RTX 2070 SUPER]
1ed0 TU104BM [GeForce RTX 2080 Mobile]
1f02 TU106 [GeForce RTX 2070]
1043 8673 TURBO RTX 2070
@@ -11776,11 +11964,15 @@
1f11 TU106M [GeForce RTX 2060 Mobile]
1f2e TU106M
1f36 TU106GLM [Quadro RTX 3000 Mobile / Max-Q]
+ 1f42 TU106 [GeForce RTX 2060 SUPER]
+ 1f47 TU106 [GeForce RTX 2060 SUPER]
1f50 TU106BM [GeForce RTX 2070 Mobile]
1f51 TU106BM [GeForce RTX 2060 Mobile]
1f81 TU117
1f82 TU117 [GeForce GTX 1650]
+ 1f91 TU117M [GeForce GTX 1650 Mobile / Max-Q]
1f92 TU117M [GeForce GTX 1650 Mobile]
+ 1f96 TU117M [GeForce GTX 1650 Mobile / Max-Q]
1fae TU117GL
1fb8 TU117GLM [Quadro T2000 Mobile / Max-Q]
1fb9 TU117GLM [Quadro T1000 Mobile]
@@ -11788,9 +11980,11 @@
2182 TU116 [GeForce GTX 1660 Ti]
2183 TU116
2184 TU116 [GeForce GTX 1660]
+ 2187 TU116 [GeForce GTX 1650 SUPER]
2191 TU116M [GeForce GTX 1660 Ti Mobile]
21ae TU116GL
21bf TU116GL
+ 21c4 TU116 [GeForce GTX 1660 SUPER]
21d1 TU116BM [GeForce GTX 1660 Ti Mobile]
10df Emulex Corporation
0720 OneConnect NIC (Skyhawk)
@@ -12005,6 +12199,7 @@
17aa 3832 Yoga 520
522a RTS522A PCI Express Card Reader
103c 8079 EliteBook 840 G3
+ 103c 825b OMEN-17-w001nv
5249 RTS5249 PCI Express Card Reader
103c 1909 ZBook 15
524a RTS524A PCI Express Card Reader
@@ -12026,6 +12221,7 @@
1186 0300 DE-528
1259 2400 AT-2400
1af4 1100 QEMU Virtual Machine
+ 8125 RTL8125 2.5GbE Controller
8129 RTL-8129
10ec 8129 RT8129 Fast Ethernet Adapter
11ec 8129 RTL8111/8168 PCIe Gigabit Ethernet (misconfigured)
@@ -12047,7 +12243,7 @@
103c 006a NX9500
103c 2a20 Pavilion t3030.de Desktop PC
103c 30d9 Presario C700
- 1043 1045 L8400B or L3C/S notebook
+ 1043 1045 L8400B, L3C/S, X58LE notebook
1043 8109 P5P800-MX Mainboard
1071 8160 MIM2000
10bd 0320 EP-320X-R
@@ -12105,6 +12301,9 @@
103c 1611 Pavilion DM1Z-3000
103c 1950 ProBook 450/455
103c 2a6f Asus IPIBL-LB Motherboard
+ 103c 825b OMEN-17-w001nv
+ 103c 8615 Pavilion Laptop 15-cw1xxx
+ 1043 11f5 Notebook motherboard (one of many models)
1043 16d5 U6V/U31J laptop
1043 81aa P5B
1043 82c6 M3A78 Series Motherboard
@@ -12120,7 +12319,9 @@
1462 368c K9AG Neo2
1462 4180 Wind PC MS-7418
1462 7522 X58 Pro-E
+ 1462 7c37 X570-A PRO motherboard
1775 11cc CC11/CL11
+ 17aa 3814 Z50-75
1849 8168 Motherboard (one of many)
7470 3468 TG-3468 Gigabit PCI Express Network Adapter
8086 2055 NUC Kit DN2820FYKH
@@ -12171,8 +12372,11 @@
8821 RTL8821AE 802.11ac PCIe Wireless Network Adapter
b723 RTL8723BE PCIe Wireless Network Adapter
10ec 8739 Dell Wireless 1801
+ 17aa b736 Z50-75
b822 RTL8822BE 802.11a/b/g/n/ac WiFi adapter
+ 103c 831b Realtek RTL8822BE 802.11ac 2 × 2 Wi-Fi + Bluetooth 4.2 Combo Adapter (MU-MIMO supported)
c821 RTL8821CE 802.11ac PCIe Wireless Network Adapter
+ c822 RTL8822CE 802.11ac PCIe Wireless Network Adapter
d723 RTL8723DE 802.11b/g/n PCIe Adapter
10ed Ascii Corporation
7310 V7310
@@ -13835,7 +14039,7 @@
1259 2975 AT-2970SX/2SC Gigabit Ethernet Adapter
1259 2976 AT-2970LX/2SC Gigabit Ethernet Adapter
1259 2977 AT-2970TX/2TX Gigabit Ethernet Adapter
- 4320 SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter, PCI64, Fiber ZX/SC
+ 4320 SK-98xx V2.0 Gigabit Ethernet Adapter [Marvell 88E8001]
1148 0121 Marvell RDK-8001 Adapter
1148 0221 Marvell RDK-8002 Adapter
1148 0321 Marvell RDK-8003 Adapter
@@ -13854,9 +14058,23 @@
1148 5061 SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter
1148 5071 SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter
1148 9521 SK-9521 10/100/1000Base-T Adapter
+ 1259 2916 AT-2916T
4400 SK-9Dxx Gigabit Ethernet Adapter
4500 SK-9Mxx Gigabit Ethernet Adapter
- 9000 SK-9S21 10/100/1000Base-T Server Adapter, PCI-X, Copper RJ-45
+ 9000 SK-9Sxx Gigabit Ethernet Server Adapter PCI-X [Marvell 88E8022]
+ 1148 2100 SK-9S21 10/100/1000Base-T Server Adapter, PCI-X, Copper RJ-45
+ 1148 2200 SK-9S22 10/100/1000Base-T Dual Port Server Adapter, PCI-X, 2 Copper RJ-45
+ 1148 2210 SK-9P22 10/100/1000 Base-T Dual Port PMC card
+ 1148 2220 TPMC-GBE-CO
+ 1148 8100 SK-9S81 1000Base-SX Server Adapter,PCI-X, Fiber SX/LC
+ 1148 8200 SK-9S82 1000Base-SX Dual Port Server Adapter, PCI-X, 2 Fiber SX/LC
+ 1148 8210 SK-9P82 1000 Base-SX Dual Port PMC card
+ 1148 8220 TPMC-GBE-FI
+ 1148 9100 SK-9S91 1000Base-LX Server Adapter,PCI-X, Fiber LX/LC
+ 1148 9200 SK-9S92 1000Base-LX Dual Port Server Adapter, PCI-X, 2 Fiber LX/LC
+ 1259 2973 AT-2971SX v2 Gigabit Adapter
+ 1259 2974 AT-2971T v2 Gigabit Adapter
+ 1259 2978 AT-2971LX Gigabit Adapter
9843 [Fujitsu] Gigabit Ethernet
9e00 SK-9E21D 10/100/1000Base-T Adapter, Copper RJ-45
1148 2100 SK-9E21 Server Adapter
@@ -14121,7 +14339,7 @@
1177 Silicon Engineering
1178 Alfa, Inc.
afa1 Fast Ethernet Adapter
-1179 Toshiba America Info Systems
+1179 Toshiba Corporation
0102 Extended IDE Controller
0103 EX-IDE Type-B
010f NVMe Controller
@@ -14130,6 +14348,8 @@
1028 1ffc Express Flash NVMe 1.92T (RI) U.2 (CD5)
1028 1ffd Express Flash NVMe 3.84T (RI) U.2 (CD5)
1028 1ffe Express Flash NVMe 7.68T (RI) U.2 (CD5)
+ 1179 0001 KIOXIA CM5-R series SSD
+ 1179 0021 KIOXIA CD5 series SSD
1d49 4039 Thinksystem U.2 CM5 NVMe SSD
1d49 403a Thinksystem AIC CM5 NVMe SSD
0113 BG3 NVMe SSD Controller
@@ -14223,6 +14443,7 @@
1028 0188 Inspiron 6000 laptop
103c 30c0 Compaq 6710b
103c 30c1 Compaq 6910p
+ 1043 1017 X58LE
1043 1237 A6J-Q008
1043 1967 V6800V
1043 1987 A4K and Z81K notebooks, possibly others ( mid-2005 machines )
@@ -14267,6 +14488,7 @@
103c 30b7 Presario V6133CL
103c 30cc Pavilion dv6700
103c 30cf Pavilion dv95xx/96xx/97xx/98xx series
+ 1043 1017 X58LE
1043 1237 A6J-Q008
1043 1967 V6800V
104d 9035 VAIO VGN-FW11ZRU
@@ -14287,6 +14509,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30cf Pavilion dv9668eg Laptop
+ 1043 1017 X58LE
1043 1237 A6J-Q008
1043 1967 V6800V
10f7 8338 Panasonic CF-Y5 laptop
@@ -14318,6 +14541,7 @@
103c 1521 HP EliteBook 8540w
103c 30b7 Presario V6133CL
103c 30cf Pavilion dv9500/9600/9700 series
+ 1043 1017 X58LE
1183 0843 Alienware Aurora m9700
0852 xD-Picture Card Controller
1025 0121 Aspire 5920G
@@ -14660,6 +14884,8 @@
4380 88E8057 PCI-E Gigabit Ethernet Controller
# AVB = "Audio Video Bridging"
4381 Yukon Optima 88E8059 [PCIe Gigabit Ethernet Controller with AVB]
+ 1259 2803 AT-2814FX
+ 1259 2804 AT-2874xx
4611 GT-64115 System Controller
4620 GT-64120/64120A/64121A System Controller
4801 GT-48001
@@ -15029,7 +15255,7 @@
11fb Datel Inc
11fc Silicon Magic
11fd High Street Consultants
-11fe Comtrol Corporation
+11fe Pepperl+Fuchs Comtrol, Inc.
0001 RocketPort PCI 32-port w/external I/F
0002 RocketPort PCI 8-port w/external I/F
0003 RocketPort PCI 16-port w/external I/F
@@ -15272,7 +15498,7 @@
000e PM/PPC
1224 Interactive Images
1225 Power I/O, Inc.
-1227 Tech-Source
+1227 EIZO Rugged Solutions
0006 Raptor GFX 8P
0023 Raptor GFX [1100T]
0045 Raptor 4000-L [Linux version]
@@ -15861,7 +16087,7 @@
12b4 FutureTel Inc
12b5 Granite Systems Inc.
12b6 Natural Microsystems
-12b7 Cognex Modular Vision Systems Div. - Acumen Inc.
+12b7 Cognex Corporation
12b8 Korg
# Nee US Robotics
12b9 3Com Corp, Modem Division
@@ -15998,6 +16224,7 @@
12d8 Pericom Semiconductor
01a7 7C21P100 2-port PCI-X to PCI-X Bridge
2304 PI7C9X2G304 EL/SL PCIe2 3-Port/4-Lane Packet Switch
+ 2404 PI7C9X2G404 EL/SL PCIe2 4-Port/4-Lane Packet Switch
2608 PI7C9X2G608GP PCIe2 6-Port/8-Lane Packet Switch
400a PI7C9X442SL PCI Express Bridge Port
400e PI7C9X442SL USB OHCI Controller
@@ -17889,6 +18116,7 @@
1028 1ff8 Express Flash PM1725b 3.2TB AIC
1028 1ff9 Express Flash PM1725b 6.4TB AIC
1028 1ffa Express Flash PM1725b 12.8TB AIC
+ a824 NVMe SSD Controller PM173X
144e OLITEC
144f Askey Computer Corp.
1450 Octave Communications Ind.
@@ -18338,6 +18566,9 @@
10cf 1279 LifeBook E8010D
165f NetXtreme BCM5720 2-port Gigabit Ethernet PCIe
1028 04f7 PowerEdge R320 server
+ 1028 08fd PowerEdge R6515/R7515 LOM
+ 1028 08ff PowerEdge Rx5xx LOM Board
+ 1028 0900 PowerEdge C6525 LOM
103c 1786 NC332T Adapter
103c 193d NC332i Adapter
103c 2133 NC332i Adapter
@@ -18363,6 +18594,7 @@
1028 0179 Optiplex GX280
1028 0182 Latitude D610
1028 0187 Precision M70
+ 1028 01a3 Latitude X1
1028 01a8 Precision 380
1028 01ad OptiPlex GX620
103c 3006 DC7100 SFF(DX878AV)
@@ -18627,6 +18859,11 @@
1028 0209 XPS M1330
103c 30c0 Compaq 6710b
17aa 3a23 IdeaPad S10e
+ 1750 BCM57508 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb/200Gb Ethernet
+ 1751 BCM57504 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb/200Gb Ethernet
+ 1752 BCM57502 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet
+ 1806 BCM5750X NetXtreme-E Ethernet Virtual Function
+ 1807 BCM5750X NetXtreme-E RDMA Virtual Function
3352 BCM3352
3360 BCM3360
4210 BCM4210 iLine10 HomePNA 2.0
@@ -18811,6 +19048,7 @@
43bc BCM43602 802.11ac Wireless LAN SoC
43d3 BCM43567 802.11ac Wireless Network Adapter
43d9 BCM43570 802.11ac Wireless Network Adapter
+ 43dc BCM4355 802.11ac Wireless LAN SoC
43df BCM4354 802.11ac Wireless LAN SoC
43e9 BCM4358 802.11ac Wireless LAN SoC
43ec BCM4356 802.11ac Wireless Network Adapter
@@ -18827,7 +19065,9 @@
4412 BCM4412 10/100BaseT
4430 BCM44xx CardBus iLine32 HomePNA 2.0
4432 BCM4432 CardBus 10/100BaseT
- 4464 BCM4464 802.11ac Wireless Network Adapter
+ 4464 BCM4364 802.11ac Wireless Network Adapter
+# brcmfmac reports it as BCM4377/4 but macOS drivers call it BCM4377b
+ 4488 BCM4377b Wireless Network Adapter
4610 BCM4610 Sentry5 PCI to SB Bridge
4611 BCM4610 Sentry5 iLine32 HomePNA 1.0
4612 BCM4610 Sentry5 V.90 56k Modem
@@ -18874,6 +19114,7 @@
5841 BCM5841 Crypto Accelerator
5850 BCM5850 Crypto Accelerator
5e87 Valkyrie offload engine
+ 5e88 Viper Offload Engine
8602 BCM7400/BCM7405 Serial ATA Controller
9026 CN99xx [ThunderX2] Integrated USB 3.0 xHCI Host Controller
9027 CN99xx [ThunderX2] Integrated AHCI/SATA 3 Host Controller
@@ -19192,6 +19433,7 @@
0070 2259 WinTV HVR-1250
0070 6a18 WinTV-quadHD
0070 c108 WinTV-HVR-4400-HD model 1278
+ 1461 3100 CE310B SD PCIe Video Capture Card
5654 2389 GoTView X5 DVD Hybrid PCI-E
5654 2390 GoTView X5 3D HYBRID PCI-E
14f2 MOBILITY Electronics
@@ -19572,15 +19814,23 @@
0213 MT2892 Family [ConnectX-6 Dx Secure Flash Recovery]
0214 MT42822 Family [BlueField-2 SoC Flash Recovery]
0215 MT42822 Family [BlueField-2 Secure Flash Recovery]
+ 0216 MT2894 Family [ConnectX-6 Lx Flash Recovery]
+ 0217 MT2894 Family [ConnectX-6 Lx Secure Flash Recovery]
024e MT53100 [Spectrum-2, Flash recovery mode]
024f MT53100 [Spectrum-2, Secure Flash recovery mode]
0250 Spectrum-3, Flash recovery mode
0251 Spectrum-3, Secure Flash recovery mode
0252 Amos chiplet
+ 0254 Spectrum-4, Flash recovery mode
+ 0255 Spectrum-4, Secure Flash recovery mode
+ 0256 Ofek chiplet
+ 0257 Quantum-2 in Flash Recovery Mode
0262 MT27710 [ConnectX-4 Lx Programmable] EN
0263 MT27710 [ConnectX-4 Lx Programmable Virtual Function] EN
0264 Innova-2 Flex Burn image
0281 NPS-600 Flash Recovery
+ 0538 MT2910 Family [ConnectX-7 Flash Recovery]
+ 0539 MT2910 Family [ConnectX-7 Secure Flash Recovery]
1002 MT25400 Family [ConnectX-2 Virtual Function]
1003 MT27500 Family [ConnectX-3]
1014 04b5 PCIe3 40GbE RoCE Converged Host Bus Adapter for Power
@@ -19635,6 +19885,7 @@
15b3 0003 Mellanox Technologies ConnectX-4 Stand-up single-port 40GbE MCX413A-BCAT
15b3 0005 Mellanox Technologies ConnectX-4 Stand-up single-port 40GbE MCX415A-BCAT
15b3 0006 MCX416A-BCAT, ConnectX-4 EN, 40/56GbE 2P, PCIe3.0 x16
+ 15b3 0007 ConnectX-4 EN network interface card, 40/56GbE dual-port QSFP28, PCIe3.0 x16, tall bracket
15b3 0008 ConnectX-4 Stand-up dual-port 100GbE MCX416A-CCAT
15b3 0033 ConnectX-4 VPI IB EDR/100 GbE Single Port QSFP28 Adapter
15b3 0034 ConnectX-4 VPI IB EDR/100 GbE Dual Port QSFP28 Adapter
@@ -19662,9 +19913,9 @@
101c MT28908 Family [ConnectX-6 Virtual Function]
101d MT2892 Family [ConnectX-6 Dx]
101e ConnectX Family mlx5Gen Virtual Function
- 101f MT28851
+ 101f MT2894 Family [ConnectX-6 Lx]
1020 MT28860
- 1021 MT28861
+ 1021 MT2910 Family [ConnectX-7]
1974 MT28800 Family [ConnectX-5 PCIe Bridge]
1975 MT416842 Family [BlueField SoC PCIe Bridge]
1976 MT28908 Family [ConnectX-6 PCIe Bridge]
@@ -19672,6 +19923,7 @@
1978 MT42822 Family [BlueField-2 SoC PCIe Bridge]
4117 MT27712A0-FDCF-AE
1bd4 0039 SN10XMP2P25
+ 1bd4 003a 10G SFP28 SP EO251FM9 Adapter
1bd4 004d SN10XMP2P25,YZPC-01191-101
5274 MT21108 InfiniBridge
5a44 MT23108 InfiniHost
@@ -19736,7 +19988,9 @@
cf08 Switch-IB2
cf6c MT53100 [Spectrum-2]
cf70 Spectrum-3
+ cf80 Spectrum-4
d2f0 Quantum HDR (200Gbps) switch
+ d2f2 Quantum-2 NDR (400Gbps) switch
15b4 CCI/TRIAD
15b5 Cimetrics Inc
15b6 Texas Memory Systems Inc
@@ -20246,6 +20500,7 @@
1a32 0303 EM303 802.11bgn Wireless Mini PCIe Card [AR9281]
1a32 0306 EM306 802.11bgn Wireless Half-size Mini PCIe Card [AR9283]
1a3b 1067 AW-NE771 802.11bgn Wireless Mini PCIe Card [AR9281]
+ 1a3b 1071 AW-NE772 802.11abgn Wireless Mini PCIe Card [AR9280]
1a3b 1081 AW-NE773 802.11abgn Wireless Half-size Mini PCIe Card [AR9280]
002b AR9285 Wireless Network Adapter (PCI-Express)
1028 0204 Wireless 1502 802.11bgn Half-size Mini PCIe Card
@@ -20330,7 +20585,7 @@
16b8 Sonnet Technologies, Inc.
16be Creatix Polymedia GmbH
16c3 Synopsys, Inc.
- abcd DWC_usb3
+ abcd DWC_usb3 / PCIe bridge
abce DWC_usb3
abcf DWC_usb31
edda EPMockUp
@@ -20770,6 +21025,7 @@
17d3 1882 ARC-1882 8/12/16/24 Port PCIe 3.0 to SAS/SATA 6Gb RAID Controller
17d3 1883 ARC-1883 8/12/16/24 Port PCIe 3.0 to SAS/SATA 12Gb RAID Controller
1884 ARC-1884 series PCIe 3.0 to SAS/SATA 12/6Gb RAID Controller
+ 188a ARC-1886 series PCIe 4.0 to NVMe/SAS/SATA 16/12/6Gb RAID Controller
# nee Neterion Inc., previously S2io Inc.
17d5 Exar Corp.
5731 Xframe 10-Gigabit Ethernet PCI-X
@@ -20998,6 +21254,10 @@
# HT1000 uses 3 IDs 1166:024a (Native SATA Mode), 1166:024b (PATA/IDE Mode), 182f:000b (RAID Mode) depends on SATA BIOS setting
000b BCM5785 [HT1000] SATA (RAID Mode)
1830 Credence Systems Corporation
+ 8000 CPIn
+ 8001 CPId
+ 8002 CPIx
+ 8003 CPIq
183b MikroM GmbH
08a7 MVC100 DVI
08a8 MVC101 SDI
@@ -21524,7 +21784,7 @@
1026 AR8121/AR8113/AR8114 Gigabit or Fast Ethernet
1043 8304 P5KPL-CM Motherboard
1048 Attansic L1 Gigabit Ethernet
- 1043 8226 P5KPL-VM Motherboard
+ 1043 8226 P5B-MX/WiFi-AP, P5KPL-VM Motherboard
1062 AR8132 Fast Ethernet
1063 AR8131 Gigabit Ethernet
1458 e000 GA-G31M-ES2L Motherboard
@@ -21599,6 +21859,7 @@
1987 Phison Electronics Corporation
5007 E7 NVMe Controller
5012 E12 NVMe Controller
+ 5016 E16 PCIe4 NVMe Controller
1989 Montilio Inc.
0001 RapidFile Bridge
8001 RapidFile
@@ -21681,15 +21942,35 @@
19e5 3034 NVMe SSD ES3600C V3 1600GB HHHL AIC
19e5 3036 NVMe SSD ES3600C V3 3200GB HHHL AIC
0200 Hi1822 Family (2*100GE)
+ 19e5 d139 Hi1822 SP572 (2*100GE)
0202 Hi1822 Family (2*32G FC)
+ 19e5 d302 Hi1822 SP521 (2*32G FC)
+ 19e5 d304 Hi1822 SP526 (2*32G FC)
0203 Hi1822 Family (2*16G FC)
+ 19e5 d301 Hi1822 SP520 (2*16G FC)
+ 19e5 d305 Hi1822 SP525 (2*16G FC)
0205 Hi1822 Family (2*100GE)
+ 19e5 df27 Hi1822 MZ731 MEZZ (2*100GE)
+ 0206 Hi1822 Family (2*25GE)
+ 19e5 d138 Hi1822 SP582 (2*25GE)
0210 Hi1822 Family (4*25GE)
+ 19e5 df2e Hi1822 MZ532 MEZZ (4*25GE)
+ 0211 Hi1822 Family (4*25GE)
+ 19e5 d12f Hi1822 SP571 (4*25GE)
+ 19e5 d137 Hi1822 SP581 (4*25GE)
+ 19e5 d142 Hi1822 SP583 (4*25GE)
0212 Hi1822 Family (2*8G FC)
+ 19e5 d303 Hi1822 SP522 (2*8G FC)
+ 19e5 d306 Hi1822 SP523 (2*8G FC)
1710 iBMA Virtual Network Adapter
1711 Hi1710 [iBMC Intelligent Management system chip w/VGA support]
1822 Hi1822 Family (4*25GE)
+ 19e5 d129 Hi1822 SP570 (4*25GE)
+ 19e5 d136 Hi1822 SP580 (4*25GE)
+ 19e5 d141 Hi1822 SP583 (4*25GE)
371e Hi1822 Family Virtual Bridge
+ 375e Hi1822 Family Virtual Function
+ 379e Hi1822 Family Virtual Function
a120 HiSilicon PCIe Root Port with Gen4
a121 HiSilicon PCI-PCI Bridge
a122 HiSilicon Embedded DMA Engine
@@ -21752,6 +22033,7 @@
1a29 Fortinet, Inc.
4338 CP8 Content Processor ASIC
4e36 NP6 Network Processor
+ 4e37 NP7 Network Processor
1a2b Ascom AG
0000 GESP v1.2
0001 GESP v1.3
@@ -21845,6 +22127,8 @@
4005 Accelerated Virtual Video Adapter
4006 Memory Ballooning Controller
1ab9 Espia Srl
+1ac1 Global Unichip Corp.
+ 089a Coral Edge TPU
1ac8 Aeroflex Gaisler
1acc Point of View BV
1ad7 Spectracom Corporation
@@ -21874,6 +22158,8 @@
0310 Wil6200 802.11ad Wireless Network Adapter
1aea Alcor Micro
6601 AU6601 PCI-E Flash card reader controller
+ 6621 AU6621 PCI-E Flash card reader controller
+ 6625 AU6625 PCI-E Flash card reader controller
1aec Wolfson Microelectronics
# nee Fusion-io
1aed SanDisk
@@ -22051,6 +22337,7 @@
1b6f Etron Technology, Inc.
7023 EJ168 USB 3.0 Host Controller
7052 EJ188/EJ198 USB 3.0 Host Controller
+ 1849 7052 QC5000-ITX/PH
1b73 Fresco Logic
1000 FL1000G USB 3.0 Host Controller
1d5c 1000 Anker USB 3.0 Express Card
@@ -22177,25 +22464,34 @@
5001 25G-PCIE3-8B-2S Security Intelligent Adapter
1c1c Symphony
0001 82C101
+1c1f SoftLab-NSK
1c28 Lite-On IT Corp. / Plextor
0122 M6e PCI Express SSD [Marvell 88SS9183]
# previously Fiberblaze
1c2c Silicom Denmark
000a Capture
000f SmartNIC
- 00a0 FBC4G Capture 4x1Gb
- 00a1 FBC4XG Capture 4x10Gb
- 00a2 FBC8XG Capture 8x10Gb
- 00a3 FBC2XG Capture 2x10Gb
- 00a4 FBC4XGG3 Capture 4x10Gb
- 00a5 FBC2XLG Capture 2x40Gb
+ 00a0 FBC4G Capture 4x1Gb [Herculaneum]
+ 00a1 FBC4XG Capture 4x10Gb [Ancona]
+ 00a2 FBC8XG Capture 8x10Gb [Livorno]
+ 00a3 FBC2XG Capture 2x10Gb [Genoa]
+ 00a4 FBC4XGG3 Capture 4x10Gb [Livigno]
+ 00a5 FBC2XLG Capture 2x40Gb [Livorno]
00a6 FBC1CG Capture 1x100Gb
- 00a9 FBC2XGHH Capture 2x10Gb
+ 00a9 FBC2XGHH Capture 2x10Gb [Latina]
00ad FBC2CGG3HL Capture 2x100Gb [Padua]
00af Capture slave device
00e0 PacketMover 2x100Gb [Savona]
00e1 PacketMover 2x100Gb [Tivoli]
- a001 FBC2CGG3 Capture 2x100Gb [Mango]
+ 00e3 PacketMover 2x10Gb [Tivoli]
+ 00e5 PacketMover 2x10Gb [Corfu]
+ a000 FBC2CGG3 Capture 2x40Gb [Mango_02]
+ a001 FBC2CGG3 Capture 2x100Gb [Mango_02]
+ a003 FBC2CGG3 Capture 16x10Gb [Mango]
+ a007 FBC2CGG3 Capture 2x40Gb [Mango]
+ a008 FBC2CGG3 Capture 2x25Gb [Mango]
+ a009 FBC2CGG3 Capture 16x10Gb [Mango]
+ a00a FBC2CGG3 Capture 8x10Gb [Mango]
a00e FB2CG Capture 2x100Gb [Savona]
a00f FB2CG Capture 2x40Gb [Savona]
a011 FB2CG Capture 2x25Gb [Savona]
@@ -22223,9 +22519,17 @@
1c5c SK hynix
1283 PC300 NVMe Solid State Drive 256GB
1284 PC300 NVMe Solid State Drive 512GB
+ 1285 PC300 NVMe Solid State Drive 1TB
1504 SC300 512GB M.2 2280 SATA Solid State Drive
1c5f Beijing Memblaze Technology Co. Ltd.
+ 000d PBlaze5 520/526 AIC
+ 003d PBlaze5 920/926 AIC
+ 010d PBlaze5 520/526 U.2
+ 013d PBlaze5 920/926 U.2
0540 PBlaze4 NVMe SSD
+ 0550 PBlaze5 700/900
+ 0555 PBlaze5 510/516
+ 0557 PBlaze5 910/916
# http://www.nicevt.ru/ (in Russian)
1c63 Science and Research Centre of Computer Technology (JSC "NICEVT")
# http://www.radiotec.ru/catalog.php?cat=jr8&art=14109
@@ -22239,6 +22543,12 @@
1c8c Mobiveil, Inc.
1cb0 Shannon Systems
d000 Venice NVMe SSD
+ 1cb0 2f10 Venice-E Series U.2 SSD
+ 1cb0 2f11 Venice Series U.2 SSD
+ 1cb0 2f12 Venice-X Series U.2 SSD
+ 1cb0 af10 Venice-E Series AIC SSD
+ 1cb0 af11 Venice Series AIC SSD
+ 1cb0 af12 Venice-X Series AIC SSD
1cb1 Collion UG & Co.KG
1cb5 Focusrite Audio Engineering Ltd
0002 Clarett
@@ -22260,6 +22570,8 @@
0303 Simulyzer-RT CompactPCI Serial PSI5-SIM-1 card
0304 Simulyzer-RT CompactPCI Serial PWR-ANA-1 card
0305 Simulyzer-RT CompactPCI Serial CAN-1 card
+# supports 8x CAN (-FD) interfaces
+ 0306 Simulyzer-RT CompactPCI Serial CAN-2 card (CAN-FD)
1cd7 Nanjing Magewell Electronics Co., Ltd.
0010 Pro Capture Endpoint
0014 PRO CAPTURE AIO 4K PLUS
@@ -22309,6 +22621,7 @@
0722 ZX-200 PCIE P2C bridge
1000 ZX-D Standard Host Bridge
1001 ZX-D/ZX-E Miscellaneous Bus
+ 1003 ZX-E Standard Host Bridge
3001 ZX-100 Standard Host Bridge
300a ZX-100 Miscellaneous Bus
3038 ZX-100/ZX-200/ZX-E Standard Universal PCI to USB Host Controller
@@ -22325,6 +22638,7 @@
345b ZX-100/ZX-D/ZX-E Miscellaneous Bus
3a02 ZX-100 C-320 GPU
3a03 ZX-D C-860 GPU
+ 3a04 ZX-E C-960 GPU
9002 ZX-100/ZX-200 EIDE Controller
9003 ZX-100/ZX-E EIDE Controller
9045 ZX-100/ZX-D/ZX-E RAID Accelerator 0
@@ -22335,6 +22649,7 @@
9101 ZX-200 Traffic Controller
9141 ZX-100 High Definition Audio Controller
9142 ZX-D High Definition Audio Controller
+ 9144 ZX-E High Definition Audio Controller
9180 ZX-200 Networking Gigabit Ethernet Adapter
9202 ZX-100 USB eXtensible Host Controller
9203 ZX-200 USB eXtensible Host Controller
@@ -22402,6 +22717,10 @@
1d6c 2001 DPDK-Aware Virtual Function [Arkville VF]
100f AR-ARKA-FX1 [Arkville 64B DPDK Data Mover for Versal]
1010 AR-ARKA-FX1 [Arkville 64B DPDK Data Mover for Agilex]
+ 1011 AR-MAN-U50 [Manitou Class Accelerator for U50]
+ 1012 AR-MAN-U200 [Manitou Class Accelerator for U200]
+ 1013 AR-MAN-U250 [Manitou Class Accelerator for U250]
+ 1014 AR-MAN-U280 [Manitou Class Accelerator for U280]
4200 A5PL-E1-10GETI [10 GbE Ethernet Traffic Instrument]
1d72 Xiaomi
1d78 DERA
@@ -22455,9 +22774,40 @@
1da2 Sapphire Technology Limited
1da3 Habana Labs Ltd.
0001 HL-1000 AI Inference Accelerator [Goya]
+# PCIe accelerator card for Deep Learning training tasks
+ 1000 HL-2000 AI Training Accelerator [Gaudi]
1dbb NGD Systems, Inc.
1dbf Guizhou Huaxintong Semiconductor Technology Co., Ltd
0401 StarDragon4800 PCI Express Root Port
+1dc5 FADU Inc.
+1dcd Liqid Inc.
+1dd8 Pensando Systems Inc
+ 1000 DSC Capri Upstream Port
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+ 1001 DSC Virtual Downstream Port
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+ 1002 DSC Ethernet Controller
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+ 1003 DSC Ethernet Controller VF
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+ 1004 DSC Management Controller
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+ 1007 DSC Storage Accelerator
+ 1dd8 4000 Naples 100Gb 2-port QSFP28 x16 8GB
+ 1dd8 4001 Naples 100Gb 2-port QSFP28 x16 4GB
+ 1dd8 4002 Naples 25Gb 2-port SFP28 x8 4GB
+1de0 Groq
+ 0000 Q100 Tensor Streaming Processor
1de1 Tekram Technology Co.,Ltd.
0391 TRM-S1040 [DC-315 / DC-395 series]
2020 DC-390
@@ -22488,6 +22838,7 @@
1df3 0001 ENA2080F
1df3 0002 ENA2080FS
1df3 0003 ENA2100F
+ 1df3 0004 ENA2040F
0204 ACE-NIC-NID Programmable Network Accelerator
1df3 0001 ENA1020Z
1df3 0002 ENA1020ZS
@@ -22510,7 +22861,8 @@
# JungleCat VU35P Module
1635 JCM35
1e26 Fujitsu Client Computing Limited
-1e38 Thinci, Inc
+# nee Thinci, Inc
+1e38 Blaize, Inc
1e3d Burlywood, Inc
1e49 Yangtze Memory Technologies Co.,Ltd
1e4c GSI Technology
@@ -22518,10 +22870,14 @@
0010 Gemini [ Lida ]
1e4c 0120 SE120
1e57 Beijing Panyi Technology Co., Ltd
- 0001 PY8800
- 0100 PY8800
+ 0100 The device has already been deleted.
0000 0100 PY8800 64GB Accelerator
1e6b Axiado Corp.
+1e89 ID Quantique SA
+ 0002 Quantis-PCIe-40M
+ 0003 Quantis-PCIe-240M
+# aka SED Systems
+1e94 Calian SED
# nee Tumsan Oy
1fc0 Ascom (Finland) Oy
0300 E2200 Dual E1/Rawpipe Card
@@ -23406,6 +23762,7 @@
1043 108d VivoBook X202EV
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
+ 10cf 16bf LIFEBOOK E752
0155 Xeon E3-1200 v2/3rd Gen Core processor PCI Express Root Port
8086 2010 Server Board S1200BTS
0156 3rd Gen Core processor Graphics Controller
@@ -23425,10 +23782,19 @@
0166 3rd Gen Core processor Graphics Controller
1043 1517 Zenbook Prime UX31A
1043 2103 N56VZ
+ 10cf 16c1 LIFEBOOK E752
016a Xeon E3-1200 v2/3rd Gen Core processor Graphics Controller
1043 844d P8B WS Motherboard
0172 Xeon E3-1200 v2/3rd Gen Core processor Graphics Controller
0176 3rd Gen Core processor Graphics Controller
+ 02a4 Comet Lake SPI (flash) Controller
+ 02a6 Comet Lake North Peak
+ 02d3 Comet Lake SATA AHCI Controller
+ 02e0 Comet Lake Management Engine Interface
+ 02e8 Serial IO I2C Host Controller
+ 02f0 Wireless-AC 9462
+ 02f9 Comet Lake Thermal Subsytem
+ 02fc Comet Lake Integrated Sensor Solution
0309 80303 I/O Processor PCI-to-PCI Bridge
030d 80312 I/O Companion Chip PCI-to-PCI Bridge
0326 6700/6702PXH I/OxAPIC Interrupt Controller A
@@ -24457,6 +24823,7 @@
104e Ethernet Controller X710 for 10 Gigabit SFP+
104f Ethernet Controller X710 for 10 Gigabit backplane
1050 82562EZ 10/100 Ethernet Controller
+ 1014 0287 ThinkCentre S50
1028 019d Dimension 3000
1462 728c 865PE Neo2 (MS-6728)
1462 758c MS-6758 (875P Neo)
@@ -25016,6 +25383,7 @@
8086 357a Server Board S1200BTS
1503 82579V Gigabit Network Connection
1043 849c P8P67 Deluxe Motherboard
+ 10cf 161c LIFEBOOK E752
1507 Ethernet Express Module X520-P2
1508 82598EB Gigabit BX Network Connection
1509 82580 Gigabit Network Connection
@@ -25090,6 +25458,7 @@
8086 00a1 Ethernet Server Adapter I350-T4
8086 00a2 Ethernet Server Adapter I350-T2
8086 00a3 Ethernet Network Adapter I350-T4 for OCP NIC 3.0
+ 8086 00aa Ethernet Network Adapter I350-T4 for OCP NIC 3.0
8086 5001 Ethernet Server Adapter I350-T4
8086 5002 Ethernet Server Adapter I350-T2
8086 5003 Ethernet 1G 4P I350-t OCP
@@ -25149,6 +25518,7 @@
1531 I210 Gigabit Unprogrammed
1533 I210 Gigabit Network Connection
103c 0003 Ethernet I210-T1 GbE NIC
+ 1059 0180 RD10019 1GbE interface
1093 7706 Compact Vision System Ethernet Adapter
10a9 802c UV300 BaseIO single-port GbE
10a9 802d UV3000 BaseIO GbE Network
@@ -25211,6 +25581,7 @@
18d4 0c08 X550 10Gb 2-port RJ45 OCP Mezz Card MOP81-I-10GT2
193d 1008 560T-B
193d 1009 560T-L
+ 193d 1011 UN-NIC-ETH563T-sL-2P
8086 0001 Ethernet Converged Network Adapter X550-T2
8086 001a Ethernet Converged Network Adapter X550-T2
8086 001b Ethernet Server Adapter X550-T2 for OCP
@@ -25342,6 +25713,22 @@
1137 0000 Ethernet Network Adapter XXV710
1137 0225 Ethernet Network Adapter XXV710
1137 02b4 Ethernet Network Adapter XXV710 OCP 2.0
+# UEFI PXE Disabled
+ 1374 0230 Single Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G1I71)
+# With UEFI PXE Enabled
+ 1374 0231 Single Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G1I71EU)
+# UEFI PXE Disabled
+ 1374 0234 Dual Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G2I71)
+# With UEFI PXE Enabled
+ 1374 0235 Dual Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G2I71EU)
+# PCIe x8 Bifurcated as x4x4, UEFI PXE Disabled, low profile
+ 1374 0238 Quad Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G4I71L)
+# PCIe x8 Bifurcated as x4x4, UEFI PXE Enabled, low profile
+ 1374 0239 Quad Port 25 Gigabit Ethernet PCI Express Server Adapter (PE325G4I71LEU)
+# PCIe x16 Bifurcated as x8x8, UEFI PXE Disabled, low profile
+ 1374 023a Quad Port 25 Gigabit Ethernet PCI Express Server Adapter (PE31625G4I71L)
+# PCIe x16 Bifurcated as x8x8, UEFI PXE Enabled, low profile
+ 1374 023b Quad Port 25 Gigabit Ethernet PCI Express Server Adapter (PE31625G4I71LEU)
1590 0000 Ethernet Network Adapter XXV710-2
1590 0253 Ethernet 10/25/Gb 2-port 661SFP28 Adapter
8086 0000 Ethernet Network Adapter XXV710
@@ -25358,7 +25745,18 @@
8086 4001 Ethernet Network Adapter XXV710-2
1591 Ethernet Controller E810-C for backplane
1592 Ethernet Controller E810-C for QSFP
+ 8086 0002 Ethernet Network Adapter E810-C-Q2
+ 8086 0004 Ethernet Network Adapter E810-C-Q2
+ 8086 0005 Ethernet Network Adapter E810-C-Q1 for OCP3.0
+ 8086 0006 Ethernet Network Adapter E810-C-Q2 for OCP3.0
+ 8086 0009 Ethernet Network Adapter E810-C-Q1
1593 Ethernet Controller E810-C for SFP
+ 8086 0002 Ethernet Network Adapter E810-L-2
+ 8086 0005 Ethernet Network Adapter E810-XXV-4
+ 8086 0006 Ethernet Network Adapter E810-XXV-4
+ 8086 0007 Ethernet Network Adapter E810-XXV-4
+ 8086 0008 Ethernet Network Adapter E810-XXV-2
+ 8086 0009 Ethernet Network Adapter E810-XXV-2 for OCP 2.0
15a0 Ethernet Connection (2) I218-LM
15a1 Ethernet Connection (2) I218-V
15a2 Ethernet Connection (3) I218-LM
@@ -25435,8 +25833,17 @@
15ec JHL7540 Thunderbolt 3 USB Controller [Titan Ridge 4C 2018]
15ef JHL7540 Thunderbolt 3 Bridge [Titan Ridge DD 2018]
15f0 JHL7540 Thunderbolt 3 USB Controller [Titan Ridge DD 2018]
+ 15f4 Ethernet Connection (15) I219-LM
+ 15f5 Ethernet Connection (15) I219-V
15f6 I210 Gigabit Ethernet Connection
+ 15f9 Ethernet Connection (14) I219-LM
+ 15fa Ethernet Connection (14) I219-V
+ 15fb Ethernet Connection (13) I219-LM
+ 15fc Ethernet Connection (13) I219-V
15ff Ethernet Controller X710 for 10GBASE-T
+ 1137 0000 X710TLG GbE RJ45 PCIe NIC
+ 1137 02c1 X710T2LG 2x10 GbE RJ45 PCIe NIC
+ 1137 02c2 X710T4LG 4x10 GbE RJ45 PCIe NIC
8086 0000 Ethernet Network Adapter X710-TL
8086 0001 Ethernet Network Adapter X710-T4L
8086 0002 Ethernet Network Adapter X710-T4L
@@ -25446,6 +25853,11 @@
8086 0006 Ethernet 10G 4P X710-T4L-t Adapter
8086 0007 Ethernet 10G 2P X710-T2L-t OCP
8086 0008 Ethernet 10G 4P X710-T4L-t OCP
+ 8086 0009 Ethernet Network Adapter X710-T4L for OCP 3.0
+ 8086 000a Ethernet Network Adapter X710-T4L for OCP 3.0
+ 8086 000b Ethernet Network Adapter X710-T2L for OCP 3.0
+ 8086 000c Ethernet Network Adapter X710-T2L for OCP 3.0
+ 8086 000f Ethernet Network Adapter X710-T2L for OCP 3.0
1600 Broadwell-U Host Bridge -OPI
1601 Broadwell-U PCI Express x16 Controller
1602 Broadwell-U Integrated Graphics
@@ -25493,6 +25905,7 @@
1903 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Thermal Subsystem
1028 06dc Latitude E7470
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
17aa 225d ThinkPad T480
1904 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1028 06dc Latitude E7470
@@ -25508,6 +25921,7 @@
190f Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1910 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
1911 Xeon E3-1200 v5/v6 / E3-1500 v5 / 6th/7th/8th Gen Core Processor Gaussian Mixture Model
17aa 2247 ThinkPad T570
17aa 224f ThinkPad X1 Carbon 5th Gen
@@ -25521,6 +25935,7 @@
1919 Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Imaging Unit
191b HD Graphics 530
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
191d HD Graphics P530
191e HD Graphics 515
191f Xeon E3-1200 v5/E3-1500 v5/6th Gen Core Processor Host Bridge/DRAM Registers
@@ -25608,6 +26023,10 @@
19df Atom Processor C3000 Series SMBus controller
19e0 Atom Processor C3000 Series SPI Controller
19e2 Atom Processor C3000 Series QuickAssist Technology
+ 1a1c Ethernet Connection (17) I219-LM
+ 1a1d Ethernet Connection (17) I219-V
+ 1a1e Ethernet Connection (16) I219-LM
+ 1a1f Ethernet Connection (16) I219-V
1a21 82840 840 [Carmel] Chipset Host Bridge (Hub A)
1a23 82840 840 [Carmel] Chipset AGP Bridge
1a24 82840 840 [Carmel] Chipset PCI Bridge (Hub B)
@@ -25858,6 +26277,7 @@
1043 108d VivoBook X202EV
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
+ 10cf 16e2 LIFEBOOK E752
144d c652 NP300E5C series laptop
1e04 7 Series/C210 Series Chipset Family SATA Controller [RAID mode]
1e05 7 Series Chipset SATA Controller [RAID mode]
@@ -25872,6 +26292,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8H77-I Motherboard
+ 10cf 16e9 LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1e10 Motherboard
1e12 7 Series/C210 Series Chipset Family PCI Express Root Port 2
@@ -25879,6 +26300,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1e14 7 Series/C210 Series Chipset Family PCI Express Root Port 3
+ 10cf 16e9 LIFEBOOK E752
1e16 7 Series/C216 Chipset Family PCI Express Root Port 4
1043 108d VivoBook X202EV
1043 1477 N56VZ
@@ -25891,6 +26313,7 @@
1849 1e1a Motherboard
1e1c 7 Series/C210 Series Chipset Family PCI Express Root Port 7
1e1e 7 Series/C210 Series Chipset Family PCI Express Root Port 8
+ 10cf 16e9 LIFEBOOK E752
1849 1e1e Motherboard
1e20 7 Series/C216 Chipset Family High Definition Audio Controller
1028 054b XPS One 2710
@@ -25899,6 +26322,7 @@
1043 1517 Zenbook Prime UX31A
1043 8415 P8H77-I Motherboard
1043 8445 P8Z77-V LX Motherboard
+ 10cf 1757 LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1898 Z77 Extreme4 motherboard
1e22 7 Series/C216 Chipset Family SMBus Controller
@@ -25906,6 +26330,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8 series motherboard
+ 10cf 16e6 LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1e22 Motherboard
1e24 7 Series/C210 Series Chipset Family Thermal Management Controller
@@ -25916,6 +26341,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8 series motherboard
+ 10cf 16e8 LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1e26 Motherboard
1e2d 7 Series/C216 Chipset Family USB Enhanced Host Controller #2
@@ -25923,6 +26349,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8 series motherboard
+ 10cf 16e8 LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1e2d Motherboard
1e31 7 Series/C210 Series Chipset Family USB xHCI Host Controller
@@ -25932,6 +26359,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8 series motherboard
+ 10cf 16ee LIFEBOOK E752
17aa 21f3 ThinkPad T430
1849 1e31 Motherboard
1e33 7 Series/C210 Series Chipset Family LAN Controller
@@ -25940,6 +26368,7 @@
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
1043 84ca P8 series motherboard
+ 10cf 16ea LIFEBOOK E752
144d c652 NP300E5C series laptop
1849 1e3a Motherboard
1e3b 7 Series/C210 Series Chipset Family MEI Controller #2
@@ -25975,6 +26404,7 @@
1e59 HM76 Express Chipset LPC Controller
1043 1477 N56VZ
1043 1517 Zenbook Prime UX31A
+ 10cf 16e0 LIFEBOOK E752
1e5a 7 Series Chipset Family LPC Controller
1e5b UM77 Express Chipset LPC Controller
1e5c 7 Series Chipset Family LPC Controller
@@ -26047,6 +26477,7 @@
2018 Sky Lake-E M2PCI Registers
201a Sky Lake-E Non-Transparent Bridge Registers
201c Sky Lake-E Non-Transparent Bridge Registers
+ 201d Volume Management Device NVMe RAID Controller
2020 Sky Lake-E DMI3 Registers
15d9 095d X11SPM-TF
2021 Sky Lake-E CBDMA Registers
@@ -26102,7 +26533,7 @@
2241 Larrabee
2250 Xeon Phi coprocessor 5100 series
225c Xeon Phi coprocessor SE10/7120 series
- 225d Xeon Phi coprocessor 3120 series
+ 225d Xeon Phi coprocessor 3120 series
225e Xeon Phi coprocessor 31S1
2262 Xeon Phi coprocessor 7220
2280 Atom/Celeron/Pentium Processor x5-E8000/J3xxx/N3xxx Series SoC Transaction Register
@@ -26266,6 +26697,7 @@
103c 309f Compaq nx9420 Notebook
103c 30a3 Compaq nw8440
103c 30c1 Compaq 6910p
+ 1043 1017 X58LE
104d 902d VAIO VGN-NR120E
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -26342,6 +26774,7 @@
# same ID possibly also on other ASUS boards
1043 8277 P5K PRO Motherboard
1043 844d P8 series motherboard
+ 1043 8534 ASUS B85-PLUS
1458 5000 Motherboard
1462 7345 MS-7345 Motherboard: Intel 82801I/IR [ICH9/ICH9R]
1462 7418 Wind PC MS-7418
@@ -26639,6 +27072,7 @@
8086 4c43 Desktop Board D865GLC
8086 524c D865PERL mainboard
24d2 82801EB/ER (ICH5/ICH5R) USB UHCI Controller #1
+ 1014 0287 ThinkCentre S50
1014 02dd eServer xSeries server mainboard
1014 02ed eServer xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
@@ -26662,6 +27096,7 @@
8086 4c43 Desktop Board D865GLC
8086 524c D865PERL mainboard
24d3 82801EB/ER (ICH5/ICH5R) SMBus Controller
+ 1014 0287 ThinkCentre S50
1014 02dd eServer xSeries server mainboard
1014 02ed eServer xSeries server mainboard
1028 0156 Precision 360
@@ -26681,6 +27116,7 @@
8086 4c43 Desktop Board D865GLC
8086 524c D865PERL mainboard
24d4 82801EB/ER (ICH5/ICH5R) USB UHCI Controller #2
+ 1014 0287 ThinkCentre S50
1014 02dd eServer xSeries server mainboard
1014 02ed eServer xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
@@ -26706,6 +27142,7 @@
8086 524c D865PERL mainboard
24d5 82801EB/ER (ICH5/ICH5R) AC'97 Audio Controller
100a 147b Abit IS7-E motherboard
+ 1014 0287 ThinkCentre S50
1028 0168 Precision Workstation 670 Mainboard
1028 0169 Precision 470
103c 006a NX9500
@@ -26723,6 +27160,7 @@
24d6 82801EB/ER (ICH5/ICH5R) AC'97 Modem Controller
103c 006a NX9500
24d7 82801EB/ER (ICH5/ICH5R) USB UHCI Controller #3
+ 1014 0287 ThinkCentre S50
1014 02ed xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
1028 0169 Precision 470
@@ -26744,6 +27182,7 @@
8086 4c43 Desktop Board D865GLC
8086 524c D865PERL mainboard
24db 82801EB/ER (ICH5/ICH5R) IDE Controller
+ 1014 0287 ThinkCentre S50
1014 02dd eServer xSeries server mainboard
1014 02ed eServer xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
@@ -26769,6 +27208,7 @@
8086 524c D865PERL mainboard
24dc 82801EB (ICH5) LPC Interface Bridge
24dd 82801EB/ER (ICH5/ICH5R) USB2 EHCI Controller
+ 1014 0287 ThinkCentre S50
1014 02dd eServer xSeries server mainboard
1014 02ed eServer xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
@@ -26790,6 +27230,7 @@
8086 4c43 Desktop Board D865GLC
8086 524c D865PERL mainboard
24de 82801EB/ER (ICH5/ICH5R) USB UHCI Controller #4
+ 1014 0287 ThinkCentre S50
1014 02ed xSeries server mainboard
1028 0168 Precision Workstation 670 Mainboard
1028 0169 Precision 470
@@ -26897,6 +27338,7 @@
1458 2570 GA-8IPE1000 Pro2 motherboard (865PE)
2571 82865G/PE/P AGP Bridge
2572 82865G Integrated Graphics Controller
+ 1014 0287 ThinkCentre S50
1028 019d Dimension 3000
103c 12bc D530 sff(dc578av)
1043 80a5 P5P800-MX Mainboard
@@ -27426,7 +27868,7 @@
27b8 82801GB/GR (ICH7 Family) LPC Interface Bridge
1028 01e6 PowerEdge 860
103c 2a8c Compaq 500B Microtower
- 1043 8179 P5KPL-VM Motherboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM Motherboard
107b 5048 E4500
1462 7418 Wind PC MS-7418
1775 11cc CC11/CL11
@@ -27454,7 +27896,7 @@
1028 01df PowerEdge SC440
1028 01e6 PowerEdge 860
103c 2a8c Compaq 500B Microtower
- 1043 8179 P5KPL-VM Motherboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM Motherboard
107b 5048 E4500
1462 2310 MSI Hetis 945
1462 7236 945P Neo3-F Rev. 2.2 motherboard
@@ -27499,7 +27941,7 @@
103c 30a3 Compaq nw8440
103c 30d5 530 Laptop
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM,P5LD2-VM Mainboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM, P5LD2-VM Mainboard
1043 83ad Eee PC 1015PX
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -27524,7 +27966,7 @@
103c 30a1 NC2400
103c 30a3 Compaq nw8440
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM,P5LD2-VM Mainboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM, P5LD2-VM Mainboard
1043 83ad Eee PC 1015PX
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -27549,7 +27991,7 @@
103c 30a1 NC2400
103c 30a3 Compaq nw8440
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM,P5LD2-VM Mainboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM, P5LD2-VM Mainboard
1043 83ad Eee PC 1015PX
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -27572,7 +28014,7 @@
103c 30a1 NC2400
103c 30a3 Compaq nw8440
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM,P5LD2-VM Mainboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM, P5LD2-VM Mainboard
1043 83ad Eee PC 1015PX
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -27597,7 +28039,7 @@
103c 30a3 Compaq nw8440
103c 30d5 530 Laptop
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM,P5LD2-VM Mainboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM, P5LD2-VM Mainboard
1043 83ad Eee PC 1015PX
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
@@ -27655,6 +28097,7 @@
1043 1123 A6J-Q008
1043 13c4 G2P
1043 817f P5LD2-VM Mainboard (Realtek ALC 882 codec)
+ 1043 8249 P5B-MX/WiFi-AP
1043 8290 P5KPL-VM Motherboard
1043 82ea P5KPL-CM Motherboard
1043 8437 Eee PC 1015PX
@@ -27685,7 +28128,7 @@
1028 01e6 PowerEdge 860
103c 2a3b Pavilion A1512X
103c 2a8c Compaq 500B Microtower
- 1043 8179 P5KPL-VM Motherboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM Motherboard
105b 0d7c D270S/D250S Motherboard
1071 8209 Medion MIM 2240 Notebook PC [MD98100]
10f7 8338 Panasonic CF-Y5 laptop
@@ -27716,7 +28159,7 @@
103c 30a3 Compaq nw8440
103c 30d5 530 Laptop
1043 1237 A6J-Q008
- 1043 8179 P5KPL-VM Motherboard
+ 1043 8179 P5B-MX/WiFi-AP, P5KPL-VM Motherboard
107b 5048 E4500
10f7 8338 Panasonic CF-Y5 laptop
1462 7418 Wind PC MS-7418
@@ -27743,6 +28186,7 @@
103c 30c0 Compaq 6710b
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
17aa 20a5 ThinkPad R61
@@ -27782,6 +28226,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
17aa 20a7 ThinkPad T61/R61
@@ -27799,6 +28244,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27815,6 +28261,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27831,6 +28278,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27847,6 +28295,7 @@
103c 30c0 Compaq 6710b
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27862,6 +28311,7 @@
103c 30c0 Compaq 6710b
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27877,6 +28327,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27892,6 +28343,7 @@
103c 30c0 Compaq 6710b
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
@@ -27904,6 +28356,7 @@
1028 01f3 Inspiron 1420
1028 022f Inspiron 1525
103c 30d9 Presario C700
+ 1043 1017 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 9008 Vaio VGN-SZ79SN_C
@@ -27915,19 +28368,23 @@
283f 82801H (ICH8 Family) PCI Express Port 1
1028 01da OptiPlex 745
103c 30c1 Compaq 6910p
+ 1043 1017 X58LE
104d 902d VAIO VGN-NR120E
17aa 20ad ThinkPad T61/R61
17c0 4083 Medion WIM 2210 Notebook PC [MD96850]
2841 82801H (ICH8 Family) PCI Express Port 2
103c 30c1 Compaq 6910p
+ 1043 1017 X58LE
104d 902d VAIO VGN-NR120E
17aa 20ad ThinkPad T61/R61
17c0 4083 Medion WIM 2210 Notebook PC [MD96850]
2843 82801H (ICH8 Family) PCI Express Port 3
+ 1043 1017 X58LE
104d 902d VAIO VGN-NR120E
17aa 20ad ThinkPad T61/R61
17c0 4083 Medion WIM 2210 Notebook PC [MD96850]
2845 82801H (ICH8 Family) PCI Express Port 4
+ 1043 1017 X58LE
17aa 20ad ThinkPad T61/R61
17c0 4083 Medion WIM 2210 Notebook PC [MD96850]
2847 82801H (ICH8 Family) PCI Express Port 5
@@ -27951,6 +28408,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
1043 1339 M51S series
+ 1043 17f3 X58LE
1043 81ec P5B
104d 9005 Vaio VGN-FZ260E
104d 9008 Vaio VGN-SZ79SN_C
@@ -27970,11 +28428,13 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
17aa 20a6 ThinkPad T61/R61
17c0 4083 Medion WIM 2210 Notebook PC [MD96850]
e4bf cc47 CCG-RUMBA
+ 28c0 Volume Management Device NVMe RAID Controller
2912 82801IH (ICH9DH) LPC Interface Controller
2914 82801IO (ICH9DO) LPC Interface Controller
1028 0211 Optiplex 755
@@ -28222,8 +28682,10 @@
294c 82566DC-2 Gigabit Network Connection
17aa 302e 82566DM-2 Gigabit Network Connection
2970 82946GZ/PL/GL Memory Controller Hub
+ 1043 823b P5B-MX/WiFi-AP
2971 82946GZ/PL/GL PCI Express Root Port
2972 82946GZ/GL Integrated Graphics Controller
+ 1043 823b P5B-MX/WiFi-AP
2973 82946GZ/GL Integrated Graphics Controller
2974 82946GZ/GL HECI Controller
2975 82946GZ/GL HECI Controller
@@ -28323,6 +28785,7 @@
103c 30c1 Compaq 6910p
103c 30cc Pavilion dv6700
103c 30d9 Presario C700
+ 1043 1017 X58LE
104d 9005 Vaio VGN-FZ260E
104d 902d VAIO VGN-NR120E
17aa 20b1 ThinkPad T61
@@ -28336,6 +28799,7 @@
1028 022f Inspiron 1525
103c 30c0 Compaq 6710b
103c 30d9 Presario C700
+ 1043 14e2 X58LE
104d 902d VAIO VGN-NR120E
17aa 20b5 GM965 [X3100] on ThinkPad T61/R61
17c0 4082 GM965 on Medion WIM 2210 Notebook PC [MD96850]
@@ -28345,6 +28809,7 @@
1028 022f Inspiron 1525
103c 30c0 Compaq 6710b
103c 30d9 Presario C700
+ 1043 14e2 X58LE
104d 902d VAIO VGN-NR120E
17aa 20b5 GM965 [X3100] on ThinkPad T61/R61
17c0 4082 GM965 on Medion WIM 2210 Notebook PC [MD96850]
@@ -28898,12 +29363,15 @@
34bc Ice Lake-LP PCI Express Root Port #5
34c5 Ice Lake-LP Serial IO I2c Controller #4
34c6 Ice Lake-LP Serial IO I2c Controller #5
+ 34c8 Smart Sound Technology Audio Controller
34d3 Ice Lake-LP SATA Controller [AHCI mode]
+ 34e0 Management Engine Interface
34e8 Ice Lake-LP Serial IO I2C Controller #0
34e9 Ice Lake-LP Serial IO I2C Controller #1
34ea Ice Lake-LP Serial IO I2C Controller #2
34eb Ice Lake-LP Serial IO I2C Controller #3
34ed Ice Lake-LP USB 3.1 xHCI Host Controller
+ 34f0 Killer Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)
34f8 Ice Lake-LP SD Controller
3500 6311ESB/6321ESB PCI Express Upstream Port
103c 31fe ProLiant DL140 G3
@@ -29123,6 +29591,7 @@
17aa 4022 Ethernet Connection X722 for 1GbE
17aa 4024 Ethernet Connection X722 for 1GbE
37d2 Ethernet Connection X722 for 10GBASE-T
+ 1059 0180 RD10019 10GbE interface
1170 37d2 Ethernet Connection X722 for 10GBASE-T
14cd 0030 Ethernet OCP 2x10G RJ45 Phy Card [USI-X557-10GbaseT]
1590 0218 Ethernet 10Gb 2-port 568FLR-MMT Adapter
@@ -29384,6 +29853,7 @@
1028 02da OptiPlex 980
1028 040a Latitude E6410
1028 040b Latitude E6510
+ 103c 1521 EliteBook 8540p
144d c06a R730 Laptop
15d9 060d C7SIM-Q Motherboard
17c0 10d2 Medion Akoya E7214 Notebook PC [MD98410]
@@ -29508,6 +29978,7 @@
3e1f 8th Gen Core 4-core Desktop Processor Host Bridge/DRAM Registers [Coffee Lake S]
1458 5000 Z370 AORUS Gaming K3-CF
3e30 8th Gen Core 8-core Desktop Processor Host Bridge/DRAM Registers [Coffee Lake S]
+ 3e33 8th/9th Gen Core Processor Host Bridge/DRAM Registers [Coffee Lake]
3e34 Coffee Lake HOST and DRAM Controller
3e81 8th Gen Core Processor PCIe Controller (x16)
3e85 8th Gen Core Processor PCIe Controller (x8)
@@ -29650,6 +30121,8 @@
8086 1311 WiMAX/WiFi Link 5150 AGN
8086 1316 WiMAX/WiFi Link 5150 ABG
444e Turbo Memory Controller
+ 467f Volume Management Device NVMe RAID Controller
+ 4c3d Volume Management Device NVMe RAID Controller
5001 LE80578
5002 LE80578 Graphics Processor Unit
5009 LE80578 Video Display Controller
@@ -29734,6 +30207,7 @@
5926 Iris Plus Graphics 640
5927 Iris Plus Graphics 650
5a84 Celeron N3350/Pentium N4200/Atom E3900 Series Integrated Graphics Controller
+ 5a85 HD Graphics 500
5a88 Celeron N3350/Pentium N4200/Atom E3900 Series Imaging Unit
5a98 Celeron N3350/Pentium N4200/Atom E3900 Series Audio Cluster
5a9a Celeron N3350/Pentium N4200/Atom E3900 Series Trusted Execution Engine
@@ -30129,6 +30603,7 @@
8a1f Ice Lake Thunderbolt 3 PCI Express Root Port #1
8a21 Ice Lake Thunderbolt 3 PCI Express Root Port #2
8a23 Ice Lake Thunderbolt 3 PCI Express Root Port #3
+ 8a52 Iris Plus Graphics G7
8c00 8 Series/C220 Series Chipset Family 4-port SATA Controller 1 [IDE mode]
8c01 8 Series Chipset Family 4-port SATA Controller 1 [IDE mode] - Mobile
8c02 8 Series/C220 Series Chipset Family 6-port SATA Controller 1 [AHCI mode]
@@ -30346,6 +30821,8 @@
9622 Integrated RAID
9641 Integrated RAID
96a1 Integrated RAID
+ 9a0b Volume Management Device NVMe RAID Controller
+ 9b41 UHD Graphics
9c00 8 Series SATA Controller 1 [IDE mode]
9c01 8 Series SATA Controller 1 [IDE mode]
9c02 8 Series SATA Controller 1 [AHCI mode]
@@ -30555,17 +31032,20 @@
9da4 Cannon Point-LP SPI Controller
9db0 Cannon Point-LP PCI Express Root Port #9
9db1 Cannon Point-LP PCI Express Root Port #10
+ 9db2 Cannon Point-LP PCI Express Root Port #1
9db4 Cannon Point-LP PCI Express Root Port #13
1028 089e Inspiron 5482
9db6 Cannon Point-LP PCI Express Root Port #15
9db8 Cannon Point-LP PCI Express Root Port #1
9dbc Cannon Point-LP PCI Express Root Port #5
+ 9dbe Cannon Point-LP PCI Express Root Port #7
9dbf Cannon Point PCI Express Root Port #8
9dc5 Cannon Point-LP Serial IO I2C Host Controller
9dc8 Cannon Point-LP High Definition Audio Controller
1028 089e Inspiron 5482
9dd3 Cannon Point-LP SATA Controller [AHCI Mode]
9de0 Cannon Point-LP MEI Controller #1
+ 9de3 Cannon Point-LP Keyboard and Text (KT) Redirection
9de8 Cannon Point-LP Serial IO I2C Controller #0
1028 089e Inspiron 5482
9de9 Cannon Point-LP Serial IO I2C Controller #1
@@ -30599,6 +31079,7 @@
a102 Q170/Q150/B150/H170/H110/Z170/CM236 Chipset SATA Controller [AHCI Mode]
a103 HM170/QM170 Chipset SATA Controller [AHCI Mode]
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a105 Sunrise Point-H SATA Controller [RAID mode]
a106 Q170/H170/Z170/CM236 Chipset SATA Controller [RAID Mode]
a107 HM170/QM170 Chipset SATA Controller [RAID Mode]
@@ -30622,9 +31103,11 @@
a120 100 Series/C230 Series Chipset Family P2SB
a121 100 Series/C230 Series Chipset Family Power Management Controller
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a122 Sunrise Point-H cAVS
a123 100 Series/C230 Series Chipset Family SMBus
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a124 100 Series/C230 Series Chipset Family SPI Controller
a125 100 Series/C230 Series Chipset Family Gigabit Ethernet Controller
a126 100 Series/C230 Series Chipset Family Trace Hub
@@ -30634,13 +31117,16 @@
a12a 100 Series/C230 Series Chipset Family Serial IO GSPI #1
a12f 100 Series/C230 Series Chipset Family USB 3.0 xHCI Controller
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a130 100 Series/C230 Series Chipset Family USB Device Controller (OTG)
a131 100 Series/C230 Series Chipset Family Thermal Subsystem
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a133 Sunrise Point-H Northpeak ACPI Function
a135 100 Series/C230 Series Chipset Family Integrated Sensor Hub
a13a 100 Series/C230 Series Chipset Family MEI Controller #1
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a13b 100 Series/C230 Series Chipset Family MEI Controller #2
a13c 100 Series/C230 Series Chipset Family IDE Redirection
a13d 100 Series/C230 Series Chipset Family KT Redirection
@@ -30661,6 +31147,7 @@
a14d QM170 Chipset LPC/eSPI Controller
a14e HM170 Chipset LPC/eSPI Controller
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a14f Sunrise Point-H LPC Controller
a150 CM236 Chipset LPC/eSPI Controller
a151 Sunrise Point-H LPC Controller
@@ -30680,6 +31167,7 @@
a15f Sunrise Point-H LPC Controller
a160 100 Series/C230 Series Chipset Family Serial IO I2C Controller #0
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a161 100 Series/C230 Series Chipset Family Serial IO I2C Controller #1
1028 06e4 XPS 15 9550
a162 100 Series/C230 Series Chipset Family Serial IO I2C Controller #2
@@ -30691,6 +31179,7 @@
a16a 100 Series/C230 Series Chipset Family PCI Express Root Port #20
a170 100 Series/C230 Series Chipset Family HD Audio Controller
1028 06e4 XPS 15 9550
+ 103c 825b OMEN-17-w001nv
a171 CM238 HD Audio Controller
a182 C620 Series Chipset Family SATA Controller [AHCI mode]
a186 C620 Series Chipset Family SATA Controller [RAID mode]
@@ -30821,6 +31310,7 @@
a30c QM370 Chipset LPC/eSPI Controller
a323 Cannon Lake PCH SMBus Controller
a324 Cannon Lake PCH SPI Controller
+ a328 Cannon Lake PCH Serial IO UART Host Controller
a32c Cannon Lake PCH PCI Express Root Port #21
a32d Cannon Lake PCH PCI Express Root Port #22
a32e Cannon Lake PCH PCI Express Root Port #23
@@ -30850,6 +31340,7 @@
a353 Cannon Lake Mobile PCH SATA AHCI Controller
a360 Cannon Lake PCH HECI Controller
a363 Cannon Lake PCH Active Management Technology - SOL
+ a364 Cannon Lake PCH HECI Controller #2
a368 Cannon Lake PCH Serial IO I2C Controller #0
a369 Cannon Lake PCH Serial IO I2C Controller #1
a36a Cannon Lake PCH Serial IO I2C Controller #2
@@ -30900,6 +31391,23 @@
f1a6 SSD Pro 7600p/760p/E 6100p Series
f1a8 SSD 660P Series
8088 Beijing Wangxun Technology Co., Ltd.
+ 0101 WX1860A2 Gigabit Ethernet Controller
+ 8088 0201 Dual-Port Ethernet Network Adaptor SF200T
+ 0102 WX1860A2S Gigabit Ethernet Controller
+ 8088 0210 Dual-Port Ethernet Network Adaptor SF200T-S
+ 0103 WX1860A4 Gigabit Ethernet Controller
+ 8088 0401 Qual-Port Ethernet Network Adaptor SF400T
+ 8088 0440 Qual-Port Ethernet Network Adaptor SF400-OCP
+ 0104 WX1860A4S Gigabit Ethernet Controller
+ 8088 0410 Qual-Port Ethernet Network Adaptor SF400T-S
+ 0105 WX1860AL2 Gigabit Ethernet Controller
+ 8088 0202 Dual-Port Ethernet Network Adaptor SF200HT
+ 0106 WX1860AL2S Gigabit Ethernet Controller
+ 8088 0220 Dual-Port Ethernet Network Adaptor SF200HT-S
+ 0107 WX1860AL4 Gigabit Ethernet Controller
+ 8088 0402 Qual-Port Ethernet Network Adaptor SF400HT
+ 0108 WX1860AL4S Gigabit Ethernet Controller
+ 8088 0420 Qual-Port Ethernet Network Adaptor SF400HT-S
1001 Ethernet Controller RP1000 for 10GbE SFP+
8088 0000 Ethernet Network Adaptor RP1000 for 10GbE SFP+
2001 Ethernet Controller RP2000 for 10GbE SFP+
@@ -31504,6 +32012,7 @@ bdbd Blackmagic Design
a1ff eGPU RX580
c001 TSI Telsys
c0a9 Micron/Crucial Technology
+ 2263 P1 NVMe PCIe SSD
c0de Motorola
c0fe Motion Engineering, Inc.
ca3b Cambrionix Ltd.
@@ -31560,6 +32069,10 @@ d161 Digium, Inc.
8010 Wildcard A4B 4-port analog card (PCI-Express)
8013 Wildcard TE236/TE436 quad-span T1/E1/J1 card
b410 Wildcard B410 quad-BRI card
+d209 Ultimarc
+ 1500 PAC Drive
+ 15a2 SpinTrak
+ 1601 AimTrak
d4d4 Dy4 Systems Inc
0601 PCI Mezzanine Card
d531 I+ME ACTIA GmbH
@@ -31735,14 +32248,38 @@ f1d0 AJA Video
c0ff Kona/Xena 2
cafe Kona SD
cfee Xena LS/SD-22-DA/SD-DA
+ dafe Corvid 1
daff KONA LHi
+ db00 IoExpress
db01 Corvid22
+ db02 Kona 3G
+ db03 Corvid 3G
+ db04 Kona 3G QUAD
+ db05 Kona LHe+
+ db06 IoXT
+ db07 Kona 3G P2P
+ db08 Kona 3G QUAD P2P
db09 Corvid 24
+ db11 T-Tap
dcaf Kona HD
dfee Xena HD-DA
+ eb07 Io4K
+ eb0a Io4K UFC
+ eb0b Kona 4
+ eb0c Kona 4 UFC
eb0d Corvid 88
eb0e Corvid 44
- eb1d Kona 5
+ eb16 Corvid HEVC
+ 10cf 1049 Corvid HEVC M31
+ eb18 Corvid HB-R
+ eb1a Kona IP 1SFP
+ eb1c Kona IP 2SFP
+ eb1d Io4KPlus
+ eb1e IoIP
+ eb1f Kona 5
+ eb23 Kona 1
+ eb24 Kona HDMI
+ eb25 Corvid 44 12g
efac Xena SD-MM/SD-22-MM
facd Xena HD-MM
f5f5 F5 Networks, Inc.
diff --git a/usr/src/data/hwdata/usb.ids b/usr/src/data/hwdata/usb.ids
index 7d136519fb..1e63bb5177 100644
--- a/usr/src/data/hwdata/usb.ids
+++ b/usr/src/data/hwdata/usb.ids
@@ -9,8 +9,8 @@
# The latest version can be obtained from
# http://www.linux-usb.org/usb.ids
#
-# Version: 2019.08.21
-# Date: 2019-08-21 20:34:05
+# Version: 2020.02.28
+# Date: 2020-02-28 20:34:06
#
# Vendors, devices and interfaces. Please keep sorted.
@@ -42,6 +42,7 @@
a001 Digitus DA-71114 SATA
0085 Boeye Technology Co., Ltd.
0600 eBook Reader
+0102 miniSTREAK
0105 Trust International B.V.
145f NW-3100 802.11b/g 54Mbps Wireless Network Adapter [zd1211]
0127 IBP
@@ -75,6 +76,7 @@
03e7 Intel
2150 Myriad VPU [Movidius Neural Compute Stick]
2485 Movidius MyriadX
+ f63b Myriad VPU [Movidius Neural Compute Stick]
03e8 EndPoints, Inc.
0004 SE401 Webcam
0008 101 Ethernet [klsi]
@@ -171,6 +173,7 @@
7617 AT76C505AS Wireless Adapter
7800 Mini Album
800c Airspy HF+
+ ff01 WootingOne
ff02 WootingTwo
ff07 Tux Droid fish dongle
03ec Iwatsu America, Inc.
@@ -252,6 +255,7 @@
0601 ScanJet 6300c
0604 DeskJet 840c
0605 ScanJet 2200c
+ 0610 Z24i Monitor Hub
0611 OfficeJet K60xi
0612 business inkjet 3000
0624 Bluetooth Dongle
@@ -274,6 +278,7 @@
0912 Printing Support
0917 LaserJet 3330
0924 Modular Smartcard Keyboard
+ 0941 X500 Optical Mouse
094a Optical Mouse [672662-001]
0a01 ScanJet 2400c
0a17 color LaserJet 3700
@@ -699,6 +704,7 @@
c102 PhotoSmart 8000 series
c111 Deskjet 1510
c202 PhotoSmart 8200 series
+ c211 Deskjet 2540 series
c302 DeskJet D2300
c402 PhotoSmart D5100 series
c502 PhotoSmart D6100 series
@@ -795,6 +801,7 @@
8370 7 Port Hub
8371 PS/2 Keyboard And Mouse
8372 FT8U100AX Serial Port
+ 8508 Selectronic SP PRO
87d0 Cressi Dive Computer Interface
8a28 Rainforest Automation ZigBee Controller
8a98 TIAO Multi-Protocol Adapter
@@ -809,6 +816,7 @@
9135 Rotary Pub alarm
9136 Pulsecounter
9e90 Marvell OpenRD Base/Client
+ 9f08 CIB-1894 Conclusion SmartLink Box:
9f80 Ewert Energy Systems CANdapter
a6d0 Texas Instruments XDS100v2 JTAG / BeagleBone A3
a951 HCP HIT GSM/GPRS modem [Cinterion MC55i]
@@ -961,6 +969,7 @@
1030 FV TouchCam N1 (Video)
3000 Optical dual-touch panel
3001 Optical Touch Screen
+ a060 HD Webcam
0409 NEC Corp.
0011 PC98 Series Layout Keyboard Mouse
0012 ATerm IT75DSU ISDN TA
@@ -1142,6 +1151,10 @@
402b Photo Printer 6850
402e 605 Photo Printer
4034 805 Photo Printer
+ 4035 7000 Photo Printer
+ 4037 7010 Photo Printer
+ 4038 7015 Photo Printer
+ 404d 8810 Photo Printer
404f 305 Photo Printer
4056 ESP 7200 Series AiO
4109 EasyShare Printer Dock Series 3
@@ -1157,6 +1170,7 @@
602a i900
040b Weltrend Semiconductor
0a68 Func MS-3 gaming mouse [WT6573F MCU]
+ 2000 wired Keyboard [Dynex DX-WRK1401]
2367 Human Interface Device [HP CalcPad 200 Calculator and Numeric Keypad]
6510 Weltrend Bar Code Reader
6520 Xploder Xbox Memory Unit (8MB)
@@ -1294,6 +1308,7 @@
7721 Memory Stick Reader/Writer
7722 Memory Stick Reader/Writer
7723 SD Card Reader
+ c141 Barcode Scanner
0417 Symbios Logic
0418 AST Research
0419 Samsung Info. Systems America, Inc.
@@ -1609,7 +1624,7 @@
0301 2500H Tracer Trainer
030a PETracer x1
1237 Andromeda Hub
-0424 Standard Microsystems Corp.
+0424 Microchip Technology, Inc. (formerly SMSC)
0001 Integrated Hub
0140 LPC47M14x hub
0acd Sitecom Internal Multi Memory reader/writer MD-005
@@ -1638,6 +1653,29 @@
4041 Hub and media card controller
4060 Ultra Fast Media Reader
4064 Ultra Fast Media Reader
+ 4712 USB4712 high-speed hub
+ 4713 USB4715 high-speed hub (2 ports disabled)
+ 4714 USB4715 high-speed hub (1 port disabled)
+ 4715 USB4715 high-speed hub
+ 4910 USB491x hub integrated functions (primary)
+ 4912 USB4912 high-speed hub (1 port disabled)
+ 4914 USB4914 high-speed hub
+ 4916 USB4916 high-speed hub
+ 4920 USB491x hub integrated functions (secondary)
+ 4925 USB4925 high-speed hub (primary upstream)
+ 4927 USB4927 high-speed hub (primary upstream)
+ 4931 USB4925/4927 high-speed hub (secondary upstream)
+ 4940 USB47xx/49xx hub integrated WinUSB
+ 4942 USB47xx/49xx hub integrated I2S audio port
+ 4943 USB47xx/49xx hub integrated I2S audio + HID port
+ 4944 USB47xx/49xx hub integrated serial port
+ 4946 USB47xx/49xx hub integrated serial + I2S audio port
+ 4947 USB47xx/49xx hub integrated serial + I2S audio + HID port
+ 494a USB47xx/49xx hub integrated WinUSB + I2S audio port
+ 494b USB47xx/49xx hub integrated WinUSB + I2S audio + HID port
+ 494c USB47xx/49xx hub integrated WinUSB + serial port
+ 494e USB47xx/49xx hub integrated WinUSB + serial + I2S audio port
+ 494f USB47xx/49xx hub integrated WinUSB + serial + I2S audio + HID port
5434 Hub
5534 Hub
5744 Hub
@@ -1676,6 +1714,7 @@
0083 109 Japanese Keyboard
00a2 Type 7 Keyboard
0100 3-button Mouse
+ 0502 Panasonic CF-19 HID Touch Panel
100e 24.1" LCD Monitor v4 / FID-638 Mouse
36ba Bus Powered Hub
a101 remote key/mouse for P3 chip
@@ -1699,6 +1738,7 @@
0005 CameraMate (DPCM_USB)
0437 Framatome Connectors USA
0438 Advanced Micro Devices, Inc.
+ 7900 Root Hub
0439 Voice Technologies Group
043d Lexmark International, Inc.
0001 Laser Printer
@@ -1808,6 +1848,7 @@
008c to CF/SM/SD/MS Card Reader
008e InkJet Color Printer
008f X422
+ 0091 Laser Printer E232
0093 X5250
0095 E220 Printer
0096 2200 series
@@ -1945,10 +1986,14 @@
b700 Tacticalboard
0450 DFI, Inc.
0451 Texas Instruments, Inc.
+ 0422 TUSB422 Port Controller with Power Delivery
1234 Bluetooth Device
1428 Hub
1446 TUSB2040/2070 Hub
+ 16a2 CC Debugger
16a6 BM-USBD1 BlueRobin RF heart rate sensor receiver
+ 16a8 CC2531 ZigBee
+ 16ae CC2531 Dongle
2036 TUSB2036 Hub
2046 TUSB2046 Hub
2077 TUSB2077 Hub
@@ -1976,7 +2021,17 @@
e003 TI-84 Plus Calculator
e004 TI-89 Titanium Calculator
e008 TI-84 Plus Silver Calculator
+ e00e TI-89 Titanium Presentation Link
+ e00f TI-84 Plus Presentation Link
+ e010 TI SmartPad Keyboard
+ e011 Nspire CAS+ prototype
e012 TI-Nspire Calculator
+ e013 Network Bridge
+ e01c Data Collection Sled [Nspire Lab Cradle, Nspire Datatracker Cradle]
+ e01e Nspire™ CX Navigator™ Access Point
+ e01f Python Adapter (firmware install mode)
+ e020 Python Adapter
+ e022 Nspire CX II
f430 MSP-FET430UIF JTAG Tool
f432 eZ430 Development Tool
ffff Bluetooth Device
@@ -2006,6 +2061,7 @@
0002 Genius NetMouse Pro
0003 Genius NetScroll+
0006 Easy Mouse+
+ 0007 Trackbar Emotion
000b NetMouse Wheel(P+U)
000c TACOMA Fingerprint V1.06.01
000e Genius NetScroll Optical
@@ -2033,6 +2089,7 @@
0100 EasyPen Tablet
0101 CueCat
011b NetScroll T220
+ 0186 Genius DX-120 Mouse
1001 Joystick
1002 Game Pad
1003 Genius VideoCam
@@ -2065,11 +2122,15 @@
3018 Wireless 2.4Ghz Game Pad
3019 10-Button USB Joystick with Vibration
301a MaxFire G-12U Vibration
+ 301c Genius MaxFighter F-16U
301d Genius MaxFire MiniPad
400f Genius TVGo DVB-T02Q MCE
4012 TVGo DVB-T03 [AF9015]
5003 G-pen 560 Tablet
5004 G-pen Tablet
+ 5005 Genius EasyPen M406
+ 5012 Genius EasyPen M406W
+ 5014 Genius EasyPen 340
505e Genius iSlim 330
6001 GF3000F Ethernet Adapter
7004 VideoCAM Express V2
@@ -2207,6 +2268,7 @@
00cb Basic Optical Mouse v2.0
00ce Generic PPC Flash device
00d1 Optical Mouse with Tilt Wheel
+ 00d2 Notebook Optical Mouse with Tilt Wheel
00da eHome Infrared Receiver
00db Natural Ergonomic Keyboard 4000 V1.0
00dd Comfort Curve Keyboard 2000 V1.0
@@ -2425,6 +2487,7 @@
07cd Surface Keyboard
07f8 Wired Keyboard 600 (model 1576)
07fd Nano Transceiver 1.1
+ 0810 LifeCam HD-3000
0900 Surface Dock Hub
0901 Surface Dock Hub
0902 Surface Dock Hub
@@ -2502,12 +2565,14 @@
4d62 HP Laser Mobile Mini Mouse
4d75 Rocketfish RF-FLBTAD Bluetooth Adapter
4d81 Dell N889 Optical Mouse
+ 4d8a HP Multimedia Keyboard
4d91 Laser mouse M-D16DL
4d92 Optical mouse M-D17DR
4db1 Dell Laptop Integrated Webcam 2Mpix
4de3 HP 5-Button Optical Comfort Mouse
4de7 webcam
4e04 Lenovo Keyboard KB1021
+ 4e6f Acer Wired Keyboard Model KBAY211
0463 MGE UPS Systems
0001 UPS
ffff UPS
@@ -2536,6 +2601,7 @@
00a1 SmartCard Reader Keyboard KC 1000 SC
0106 R-300 Wireless Mouse Receiver
010d MX-Board 3.0 Keyboard
+ 0180 Strait 3.0
b090 Keyboard
b091 Mouse
046b American Megatrends, Inc.
@@ -2693,6 +2759,7 @@
0a45 960 Headset
0a4d G430 Surround Sound Gaming Headset
0a5b G933 Wireless Headset Dongle
+ 0a5d G933 Headset Battery Charger
0a66 [G533 Wireless Headset Dongle]
0b02 C-UV35 [Bluetooth Mini-Receiver] (HID proxy mode)
8801 Video Camera
@@ -2822,6 +2889,7 @@
c231 G13 Virtual Mouse
c245 G400 Optical Mouse
c246 Gaming Mouse G300
+ c247 G100S Optical Gaming Mouse
c248 G105 Gaming Keyboard
c24a G600 Gaming Mouse
c24c G400s Optical Mouse
@@ -2870,6 +2938,7 @@
c31f Comfort Keyboard K290
c326 Washable Keyboard K310
c328 Corded Keyboard K280e
+ c32b G910 Orion Spark Mechanical Keyboard
c332 G502 Proteus Spectrum Optical Mouse
c335 G910 Orion Spectrum Mechanical Keyboard
c33a G413 Gaming Keyboard
@@ -2911,6 +2980,8 @@
c531 C-U0007 [Unifying Receiver]
c532 Unifying Receiver
c534 Unifying Receiver
+ c537 Cordless Mouse Receiver
+ c53a PowerPlay Wireless Charging System
c603 3Dconnexion Spacemouse Plus XT
c605 3Dconnexion CADman
c606 3Dconnexion Spacemouse Classic
@@ -3198,6 +3269,7 @@
f101 Atlas Modem
047f Plantronics, Inc.
0101 Bulk Driver
+ 02ee BT600
0301 Bulk Driver
0411 Savi Office Base Station
0ca1 USB DSP v4 Audio Interface
@@ -3207,6 +3279,7 @@
af01 DA80
c008 Audio 655 DSP
c00e Blackwire C310 headset
+ c03b HD1
0480 Toshiba America Inc
0001 InTouch Module
0004 InTouch Module
@@ -3216,6 +3289,7 @@
0200 External Disk
0820 Canvio Advance Disk
0821 Canvio Advance 2TB model DTC920
+ 0900 MQ04UBF100
a006 External Disk 1.5TB
a007 External Disk USB 3.0
a009 Stor.E Basics
@@ -3285,6 +3359,7 @@
8259 Probe
91d1 Sensor Hub
a171 ThermaData WiFi
+ a2e0 BMeasure instrument
df11 STM Device in DFU Mode
ff10 Swann ST56 Modem
0484 Specialix
@@ -3314,6 +3389,7 @@
048d Integrated Technology Express, Inc.
1165 IT1165 Flash Controller
1172 Flash Drive
+ 1234 Mass storage
1336 SD/MMC Cardreader
1345 Multi Cardreader
9006 IT9135 BDA Afatech DVB-T HDTV Dongle
@@ -3659,6 +3735,7 @@
10c9 PIXMA iP4600 Printer
10ca PIXMA iP3600 Printer
10e3 PIXMA iX6850 Printer
+ 12fe Printer in service mode
1404 W6400PG
1405 W8400PG
150f BIJ2350 PCL
@@ -3774,6 +3851,7 @@
178a PIXMA MG3600 Series
178d PIXMA MG6853
180b PIXMA MG3000 series
+ 1856 PIXMA TS6250
1900 CanoScan LiDE 90
1901 CanoScan 8800F
1904 CanoScan LiDE 100
@@ -3842,6 +3920,7 @@
2633 LASERCLASS 500
2634 PC-D300/FAX-L400/ICD300
2635 MPC190
+ 2636 LBP3200
2637 iR C6800
2638 iR C3100
263c PIXMA MP360
@@ -3856,8 +3935,10 @@
264f MF5650 (FAX)
2650 iR 6800C EUR
2651 iR 3100C EUR
+ 2654 LBP3600
2655 FP-L170/MF350/L380/L398
2656 iR1510-1670 CAPT Printer
+ 2657 LBP3210
2659 MF8100
265b CAPT Printer
265c iR C3220
@@ -3871,7 +3952,7 @@
2666 iR C5800
2667 iR85PLUS
2669 iR105PLUS
- 266a CAPT Device
+ 266a LBP3000
266b iR8070
266c iR9070
266d iR 5800C EUR
@@ -3886,31 +3967,46 @@
2676 LBP2900
2677 iR C2570
2678 iR 2570C EUR
- 2679 CAPT Device
+ 2679 LBP5000
267a iR2016
267b iR2020
267d MF7100 series
+ 267e LBP3300
2684 MF3200 series
2686 MF6500 series
2687 iR4530
2688 LBP3460
2689 FAX-L180/L380S/L398S
268a LC310/L390/L408S
+ 268b LBP3500
268c iR C6870
268d iR 6870C EUR
268e iR C5870
268f iR 5870C EUR
2691 iR7105
+ 26a1 LBP5300
26a3 MF4100 series
+ 26a4 LBP5100
26b0 MF4600 series
26b4 MF4010 series
26b5 MF4200 series
26b6 FAX-L140/L130
- 26da LBP3010B printer
+ 26b9 LBP3310
+ 26ba LBP5050
+ 26da LBP3010/LBP3018/LBP3050
+ 26db LBP3100/LBP3108/LBP3150
26e6 iR1024
+ 26ea LBP9100C
+ 26ee MF4320-4350
+ 26f1 LBP7200C
+ 26ff LBP6300
271a LBP6000
+ 271b LBP6200
+ 271c LBP7010C/7018C
2736 I-SENSYS MF4550d
2737 MF4410
+ 2771 LBP6020
+ 2796 LBP6230/6240
3041 PowerShot S10
3042 CanoScan FS4000US Film Scanner
3043 PowerShot S20
@@ -4313,6 +4409,7 @@
0429 D5100
042a D800 (ptp)
0430 D7100
+ 0436 D810
043f D5600
0f03 PD-10 Wireless Printer Adapter
4000 Coolscan LS 40 ED
@@ -4374,6 +4471,7 @@
4611 Storage Adapter FX2 (CY)
4616 Flash Disk (TPP)
4624 DS-Xtreme Flash Card
+ 4717 West Bridge
5201 Combi Keyboard-Hub (Hub)
5202 Combi Keyboard-Hub (Keyboard)
5500 HID->COM RS232 Adapter
@@ -11638,10 +11736,16 @@
09c1 Arris Interactive LLC
1337 TOUCHSTONE DEVICE
09c2 Nisca Corp.
-09c3 ActivCard, Inc.
+09c3 HID Global
0007 Reader V2
0008 ZFG-9800-AC SmartCard Reader
0014 ActivIdentity ActivKey SIM USB Token
+ 0028 Crescendo Key
+ 0029 Crescendo Key
+ 002a Crescendo Key
+ 002b Crescendo Key
+ 002c Crescendo Key
+ 002e Crescendo Key
09c4 ACTiSYS Corp.
0011 ACT-IR2000U IrDA Dongle
09c5 Memory Corp.
@@ -15165,7 +15269,7 @@
10c3 Universal Laser Systems, Inc.
00a4 ULS PLS Series Laser Engraver Firmware Loader
00a5 ULS Print Support
-10c4 Cygnal Integrated Products, Inc.
+10c4 Silicon Labs
0002 F32x USBXpress Device
0003 CommandIR
800a SPORTident
@@ -15204,12 +15308,29 @@
8973 C8051F38x HDMI Extender [UHBX-8X]
89c6 SPORTident HID device
89e1 C8051F38x HDMI Extender [UHBX-SW3-WP]
- ea60 CP2102/CP2109 UART Bridge Controller [CP210x family]
+ 89fb Qivicon ZigBee Stick
+ 8a3c C8051F38x HDBaseT Receiver [UHBX-R-XT]
+ 8a6c C8051F38x 4K HDMI Audio Extractor [EMX-AMP]
+ 8acb C8051F38x HDBaseT Wall Plate Receiver with IR, RS-232, and PoH [UHBX-R-WP]
+ 8af8 C8051F38x 4K HDMI Audio Extractor w/Audio Amplifier, HDBT Input, Line Audio Input RS-232 Ports and IP Control [VSA-X21]
+ 8b8c C8051F38x 4K HDMI Audio Extractor w/Audio Amplifier, HDBT Input, Line Audio Input RS-232 Ports and IP Control [SC-3H]
+ 8db5 C8051F38x CATx HDMI Receiver with USB [EX-HDU-R]
+ 8db6 C8051F38x CATx HDMI Receiver
+ ea60 CP210x UART Bridge
ea61 CP210x UART Bridge
- ea70 CP210x UART Bridge
- ea80 CP210x UART Bridge
+ ea63 CP210x UART Bridge
+ ea70 CP2105 Dual UART Bridge
+ ea71 CP2108 Quad UART Bridge
+ ea80 CP2110 HID UART Bridge
+ ea90 CP2112 HID I2C Bridge
+ ea91 CP2112 HID SMBus/I2C Bridge for CP2614 Evaluation Kit
+ ea93 CP2112 HID SMBus/I2C Bridge for CP2615 Evaluation Kit
+ eab0 CP2114 I2S Audio Bridge
+ eac0 CP2614 MFi Accessory Digital Audio Bridge
+ eac1 CP2615 I2S Audio Bridge
eac9 EFM8UB1 Bootloader
eaca EFM8UB2 Bootloader
+ eacb EFM8UB3 Bootloader
10c5 Sanei Electric, Inc.
819a FM Radio
10c6 Intec, Inc.
@@ -16548,6 +16669,7 @@
001b Emu [Ambit3 Peak]
001c Finch [Ambit3 Sport]
001d Greentit [Ambit2 R]
+ 001e Ibisbill [Ambit3 Run]
1497 Panstrong Company Ltd.
1498 Microtek International Inc.
a090 DVB-T Tuner
@@ -18704,6 +18826,7 @@
2cf6 Pyra Mouse (wireless)
2d50 Kova+ Mouse
2d51 Kone+ Mouse
+ 2e22 Kone XTD Mouse
30d4 Arvo Keyboard
1ea7 SHARKOON Technologies GmbH
0066 [Mediatrack Edge Mini Keyboard]
@@ -19030,6 +19153,14 @@
6323 USB Electronic Scale
2237 Kobo Inc.
4161 eReader White
+224f APDM
+ 0001 Access Point
+ 0002 Docking Station
+ 0004 V2 Opal ACM
+ 0005 V2 Opal
+ 0006 V2 Docking Station
+ 0007 V2 Access Point ACM
+ 0008 V2 Access Point
225d Morpho
0001 FINGER VP Multimodal Biometric Sensor
0008 CBM-E3 Fingerprint Sensor
@@ -19240,6 +19371,8 @@
2548 Pulse-Eight
1001 CEC Adapter
1002 CEC Adapter
+25b5 FlatFrog
+ 0002 Multitouch 3200
2632 TwinMOS
3209 7-in-1 Card Reader
2639 Xsens
@@ -19346,6 +19479,17 @@
930c CCD Webcam(PC370R)
27b8 ThingM
01ed blink(1)
+27c6 Shenzhen Goodix Technology Co.,Ltd.
+ 5117 Fingerprint Reader
+ 5201 Fingerprint Reader
+ 5301 Fingerprint Reader
+ 530c Fingerprint Reader
+ 5385 Fingerprint Reader
+ 538c Fingerprint Reader
+ 5395 Fingerprint Reader
+ 5584 Fingerprint Reader
+ 55b4 Fingerprint Reader
+ 5740 Fingerprint Reader
2821 ASUSTek Computer Inc.
0161 WL-161 802.11b Wireless Adapter [SiS 162U]
160f WL-160g 802.11g Wireless Adapter [Envara WiND512]
@@ -19440,6 +19584,8 @@
0296 BG96 CAT-M1/NB-IoT modem
0306 EG06/EP06/EM06 LTE-A modem
0435 AG35 LTE modem
+2cdc Sea & Sun Technology GmbH
+ f232 CTD48Mc CTD Probe
2dcf Dialog Semiconductor
c952 Audio Class 2.0 Devices
2fb2 Fujitsu, Ltd
diff --git a/usr/src/lib/libdladm/common/libdladm.c b/usr/src/lib/libdladm/common/libdladm.c
index 5f98de4e70..446a15893b 100644
--- a/usr/src/lib/libdladm/common/libdladm.c
+++ b/usr/src/lib/libdladm/common/libdladm.c
@@ -456,6 +456,9 @@ dladm_status2str(dladm_status_t status, char *buf)
case DLADM_STATUS_BAD_ENCAP:
s = "invalid encapsulation protocol";
break;
+ case DLADM_STATUS_PERSIST_ON_TEMP:
+ s = "can't create persistent object on top of temporary object";
+ break;
default:
s = "<unknown error>";
break;
diff --git a/usr/src/lib/libdladm/common/libdladm.h b/usr/src/lib/libdladm/common/libdladm.h
index f5ae0e6ace..685356fa64 100644
--- a/usr/src/lib/libdladm/common/libdladm.h
+++ b/usr/src/lib/libdladm/common/libdladm.h
@@ -24,7 +24,7 @@
*/
/*
- * Copyright 2019 OmniOS Community Edition (OmniOSce) Association
+ * Copyright 2020 OmniOS Community Edition (OmniOSce) Association
*/
#ifndef _LIBDLADM_H
@@ -185,7 +185,8 @@ typedef enum {
DLADM_STATUS_INVALID_PKEY_TBL_SIZE,
DLADM_STATUS_PORT_NOPROTO,
DLADM_STATUS_INVALID_MTU,
- DLADM_STATUS_BAD_ENCAP
+ DLADM_STATUS_BAD_ENCAP,
+ DLADM_STATUS_PERSIST_ON_TEMP
} dladm_status_t;
typedef enum {
diff --git a/usr/src/lib/libdladm/common/libdlvnic.c b/usr/src/lib/libdladm/common/libdlvnic.c
index 47d007a1e2..7ff0cd5530 100644
--- a/usr/src/lib/libdladm/common/libdlvnic.c
+++ b/usr/src/lib/libdladm/common/libdlvnic.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2015, Joyent Inc.
+ * Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
*/
#include <stdio.h>
@@ -406,6 +407,7 @@ dladm_vnic_create(dladm_handle_t handle, const char *vnic, datalink_id_t linkid,
datalink_id_t vnic_id;
datalink_class_t class;
uint32_t media = DL_ETHER;
+ uint32_t link_flags;
char name[MAXLINKNAMELEN];
uchar_t tmp_addr[MAXMACADDRLEN];
dladm_status_t status;
@@ -421,6 +423,15 @@ dladm_vnic_create(dladm_handle_t handle, const char *vnic, datalink_id_t linkid,
if ((flags & DLADM_OPT_ACTIVE) == 0)
return (DLADM_STATUS_NOTSUP);
+ /*
+ * It's an anchor VNIC - linkid must be set to DATALINK_INVALID_LINKID
+ * and the VLAN id must be 0
+ */
+ if ((flags & DLADM_OPT_ANCHOR) != 0 &&
+ (linkid != DATALINK_INVALID_LINKID || vid != 0)) {
+ return (DLADM_STATUS_BADARG);
+ }
+
is_vlan = ((flags & DLADM_OPT_VLAN) != 0);
if (is_vlan && ((vid < 1 || vid > 4094)))
return (DLADM_STATUS_VIDINVAL);
@@ -430,18 +441,20 @@ dladm_vnic_create(dladm_handle_t handle, const char *vnic, datalink_id_t linkid,
if (!dladm_vnic_macaddrtype2str(mac_addr_type))
return (DLADM_STATUS_INVALIDMACADDRTYPE);
- if ((flags & DLADM_OPT_ANCHOR) == 0) {
- if ((status = dladm_datalink_id2info(handle, linkid, NULL,
- &class, &media, NULL, 0)) != DLADM_STATUS_OK)
+ if (!is_etherstub) {
+ if ((status = dladm_datalink_id2info(handle, linkid,
+ &link_flags, &class, &media, NULL, 0)) != DLADM_STATUS_OK)
return (status);
+ /* Disallow persistent objects on top of temporary ones */
+ if ((flags & DLADM_OPT_PERSIST) != 0 &&
+ (link_flags & DLMGMT_PERSIST) == 0)
+ return (DLADM_STATUS_PERSIST_ON_TEMP);
+
+ /* Links cannot be created on top of these object types */
if (class == DATALINK_CLASS_VNIC ||
class == DATALINK_CLASS_VLAN)
return (DLADM_STATUS_BADARG);
- } else {
- /* it's an anchor VNIC */
- if (linkid != DATALINK_INVALID_LINKID || vid != 0)
- return (DLADM_STATUS_BADARG);
}
/*
diff --git a/usr/src/man/man7d/Makefile b/usr/src/man/man7d/Makefile
index 9de3a14886..a6b063bdfc 100644
--- a/usr/src/man/man7d/Makefile
+++ b/usr/src/man/man7d/Makefile
@@ -216,6 +216,7 @@ i386_MANFILES= ahci.7d \
iwi.7d \
iwn.7d \
mega_sas.7d \
+ mlxcx.7d \
npe.7d \
ntxn.7d \
nv_sata.7d \
diff --git a/usr/src/man/man7d/mlxcx.7d b/usr/src/man/man7d/mlxcx.7d
new file mode 100644
index 0000000000..5373b5bec5
--- /dev/null
+++ b/usr/src/man/man7d/mlxcx.7d
@@ -0,0 +1,340 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2020 the University of Queensland
+.\"
+.Dd January 17, 2020
+.Dt MLXCX 7D
+.Os
+.Sh NAME
+.Nm mlxcx
+.Nd Mellanox ConnectX-4/5/6 Ethernet controller driver
+.Sh SYNOPSIS
+.Pa /dev/net/mlxcx*
+.Sh DESCRIPTION
+The
+.Sy mlxcx
+driver is a GLDv3 NIC driver for the ConnectX-4, ConnectX-4 Lx, ConnectX-5 and
+ConnectX-6 families of ethernet controllers from Mellanox.
+It supports the Data Link Provider Interface,
+.Xr dlpi 7P .
+.Pp
+This driver supports:
+.Bl -dash -offset indent
+.It
+Jumbo frames up to 9000 bytes.
+.It
+Checksum offload for TCP, UDP, IPv4 and IPv6.
+.It
+Group support with VLAN and MAC steering to avoid software classification
+when using VNICs.
+.It
+Promiscuous access via
+.Xr snoop 1M and
+.Xr dlpi 7P
+.It
+LED control
+.It
+Transceiver information
+.El
+.Pp
+At this time, the driver does not support Large Send Offload (LSO), energy
+efficient Ethernet (EEE), or the use of flow control through hardware pause
+frames.
+.Sh CONFIGURATION
+The
+.Sy mlxcx.conf
+file contains user configurable parameters, including the ability to set the
+number of rings and groups advertised to MAC, the sizes of rings and groups,
+and the maximum number of MAC address filters available.
+.Sh PROPERTIES
+The driver supports the following device properties which may be tuned through
+its driver.conf file,
+.Pa /kernel/drv/mlxcx.conf .
+These properties cannot be changed after the driver has been attached.
+.Pp
+These properties are not considered stable at this time, and may change.
+.Bl -hang -width Ds
+.It Sy eq_size_shift
+.Bd -filled -compact
+Minimum:
+.Sy 2 |
+Maximum:
+.Sy device dependent (up to 255)
+.Ed
+.Bd -filled
+The
+.Sy eq_size_shift
+property determines the number of entries on Event Queues for the device.
+The number of entries is calculated as
+.Dv (1 << eq_size_shift) ,
+so a value of 9 would mean 512 entries are created on each Event Queue.
+The default value is
+.Sy 9 .
+.Ed
+.It Sy cq_size_shift
+.Bd -filled -compact
+Minimum:
+.Sy 2 |
+Maximum:
+.Sy device dependent (up to 255)
+.Ed
+.Bd -filled
+The
+.Sy cq_size_shift
+property determines the number of entries on Completion Queues for the device.
+The number of entries is calculated as
+.Li (1 << cq_size_shift) ,
+so a value of 9 would mean 512 entries are created on each Event Queue.
+The default value is
+.Sy 10 .
+This should be kept very close to the value set for
+.Sy rq_size_shift
+and
+.Sy sq_size_shift .
+.Ed
+.It Sy rq_size_shift
+.Bd -filled -compact
+Minimum:
+.Sy 2 |
+Maximum:
+.Sy device dependent (up to 255)
+.Ed
+.Bd -filled
+The
+.Sy rq_size_shift
+property determines the number of descriptors on Receive Queues for the device.
+The number of descriptors is calculated as
+.Dv (1 << rq_size_shift) ,
+so a value of 9 would mean 512 descriptors are created on each Receive Queue.
+This sets the number of packets on RX rings advertised to MAC.
+The default value is
+.Sy 10 .
+.Ed
+.It Sy sq_size_shift
+.Bd -filled -compact
+Minimum:
+.Sy 2 |
+Maximum:
+.Sy device dependent (up to 255)
+.Ed
+.Bd -filled
+The
+.Sy sq_size_shift
+property determines the number of descriptors on Send Queues for the device.
+The number of descriptors is calculated as
+.Dv (1 << sq_size_shift) ,
+so a value of 9 would mean 512 descriptors are created on each Send Queue.
+This sets the number of packets on RX rings advertised to MAC.
+The default value is
+.Sy 11 .
+Note that large packets often occupy more than one descriptor slot on the SQ,
+so it is sometimes a good idea to increase this if using a large MTU.
+.Ed
+.It Sy tx_ngroups
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy tx_ngroups
+property determines the number of TX groups advertised to MAC.
+The default value is
+.Sy 1 .
+.Ed
+.It Sy tx_nrings_per_group
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy tx_nrings_per_group
+property determines the number of rings in each TX group advertised to MAC.
+The default value is
+.Sy 64 .
+.Ed
+.It Sy rx_ngroups_large
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy rx_ngroups_large
+property determines the number of "large" RX groups advertised to MAC.
+The size of "large" RX groups is set by the
+.Sy rx_nrings_per_large_group
+property.
+The default value is
+.Sy 2 .
+.Ed
+.It Sy rx_nrings_per_large_group
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy rx_nrings_per_large_group
+property determines the number of rings in each "large" RX group advertised to
+MAC.
+The number of such groups is determined by the
+.Sy rx_ngroups_large
+property.
+The default value is
+.Sy 16 .
+.Ed
+.It Sy rx_ngroups_small
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy rx_ngroups_small
+property determines the number of "small" RX groups advertised to MAC.
+The size of "small" RX groups is set by the
+.Sy rx_nrings_per_small_group
+property.
+It is recommended to use many small groups when using a large number of
+VNICs on top of the NIC (e.g. on a system with many zones).
+The default value is
+.Sy 256 .
+.Ed
+.It Sy rx_nrings_per_small_group
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy rx_nrings_per_small_group
+property determines the number of rings in each "small" RX group advertised to
+MAC.
+The number of such groups is determined by the
+.Sy rx_ngroups_small
+property.
+The default value is
+.Sy 4 .
+.Ed
+.It Sy ftbl_root_size_shift
+.Bd -filled -compact
+Minimum:
+.Sy 4 |
+Maximum:
+.Sy device dependent
+.Ed
+.Bd -filled
+The
+.Sy ftbl_root_size_shift
+property determines the number of flow table entries on the root flow table,
+and therefore how many MAC addresses can be filtered into groups across the
+entire NIC.
+The number of flow entries is calculated as
+.Dv (1 << ftbl_root_size_shift) ,
+so a value of 9 would mean 512 entries are created in the root flow table.
+The default value is
+.Sy 12 .
+.Ed
+.It Sy cqemod_period_usec
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy 65535
+.Ed
+.Bd -filled
+The
+.Sy cqemod_period_usec
+property determines the maximum delay after a completion event has occurred
+before an event queue entry (and thus an interrupt) is generated.
+The delay is measured in microseconds.
+The default value is
+.Sy 50 .
+.Ed
+.It Sy cqemod_count
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy 65535
+.Ed
+.Bd -filled
+The
+.Sy cqemod_count
+property determines the maximum number of completion events that can have
+occurred before an event queue entry (and thus an interrupt) is generated.
+The default value is
+.Sy 80% of the CQ size .
+.Ed
+.It Sy intrmod_period_usec
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy 65535
+.Ed
+.Bd -filled
+The
+.Sy intrmod_period_usec
+property determines the maximum delay after an event queue entry has been
+generated before an interrupt is raised.
+The delay is measured in microseconds.
+The default value is
+.Sy 10 .
+.Ed
+.It Sy tx_bind_threshold
+.Bd -filled -compact
+Minimum:
+.Sy 1 |
+Maximum:
+.Sy 65535
+.Ed
+.Bd -filled
+The
+.Sy tx_bind_threshold
+property determines the minimum number of bytes in a packet before the driver
+uses
+.Xr ddi_dma_addr_bind_handle 9F
+to bind the packet memory for DMA, rather than copying the memory as it does
+for small packets.
+DMA binds are expensive and involve taking locks in the PCI nexus driver, so it
+is seldom worth using them for small packets.
+The default value is
+.Sy 2048 .
+.Ed
+.El
+.Sh FILES
+.Bl -tag -width Pa
+.It Pa /kernel/drv/amd64/mlxcx
+Device driver (x86)
+.It Pa /kernel/drv/mlxcx.conf
+Driver configuration file containing user-configurable options
+.El
+.Sh SEE ALSO
+.Xr dladm 1M ,
+.Xr snoop 1M ,
+.Xr driver.conf 4 ,
+.Xr dlpi 7P
diff --git a/usr/src/pkg/manifests/consolidation-osnet-osnet-message-files.mf b/usr/src/pkg/manifests/consolidation-osnet-osnet-message-files.mf
index 373721a966..a4800aa033 100644
--- a/usr/src/pkg/manifests/consolidation-osnet-osnet-message-files.mf
+++ b/usr/src/pkg/manifests/consolidation-osnet-osnet-message-files.mf
@@ -282,6 +282,7 @@ file path=usr/lib/locale/C/LC_MESSAGES/FMD.po
file path=usr/lib/locale/C/LC_MESSAGES/FMNOTIFY.po
file path=usr/lib/locale/C/LC_MESSAGES/GMCA.po
file path=usr/lib/locale/C/LC_MESSAGES/INTEL.po
+file path=usr/lib/locale/C/LC_MESSAGES/NIC.po
file path=usr/lib/locale/C/LC_MESSAGES/NXGE.po
file path=usr/lib/locale/C/LC_MESSAGES/PCI.po
file path=usr/lib/locale/C/LC_MESSAGES/PCIEX.po
diff --git a/usr/src/pkg/manifests/driver-network-mlxcx.mf b/usr/src/pkg/manifests/driver-network-mlxcx.mf
new file mode 100644
index 0000000000..dec1aa726c
--- /dev/null
+++ b/usr/src/pkg/manifests/driver-network-mlxcx.mf
@@ -0,0 +1,54 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2020 the University of Queensland
+# Author: Alex Wilson <alex@uq.edu.au>
+#
+
+#
+# The default for payload-bearing actions in this package is to appear in the
+# global zone only. See the include file for greater detail, as well as
+# information about overriding the defaults.
+#
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/network/mlxcx@$(PKGVERS)
+set name=pkg.description value="Mellanox ConnectX-4/5/6 Ethernet Driver"
+set name=pkg.summary value="Mellanox ConnectX-4/5/6 Ethernet Driver"
+set name=info.classification \
+ value=org.opensolaris.category.2008:Drivers/Storage
+set name=variant.arch value=i386
+dir path=kernel group=sys
+dir path=kernel/drv group=sys
+dir path=kernel/drv/$(ARCH64) group=sys
+dir path=usr/share/man
+dir path=usr/share/man/man7d
+driver name=mlxcx \
+ alias=pciex15b3,1013 \
+ alias=pciex15b3,1014 \
+ alias=pciex15b3,1015 \
+ alias=pciex15b3,1016 \
+ alias=pciex15b3,1017 \
+ alias=pciex15b3,1018 \
+ alias=pciex15b3,1019 \
+ alias=pciex15b3,101a \
+ alias=pciex15b3,101b \
+ alias=pciex15b3,101c \
+ alias=pciex15b3,101d \
+ alias=pciex15b3,101e \
+ alias=pciex15b3,101f
+file path=kernel/drv/$(ARCH64)/mlxcx group=sys
+file path=kernel/drv/mlxcx.conf group=sys
+file path=usr/share/man/man7d/mlxcx.7d
+legacy pkg=SUNWmrsas desc="Mellanox ConnectX-4/5/6 Ethernet Driver" \
+ name="Mellanox ConnectX-4/5/6 Ethernet Driver"
+license cr_Sun license=cr_Sun
+license lic_CDDL license=lic_CDDL
diff --git a/usr/src/pkg/manifests/service-fault-management.mf b/usr/src/pkg/manifests/service-fault-management.mf
index 3549793ce2..fee5ffe47d 100644
--- a/usr/src/pkg/manifests/service-fault-management.mf
+++ b/usr/src/pkg/manifests/service-fault-management.mf
@@ -335,6 +335,8 @@ $(i386_ONLY)file path=usr/lib/fm/dict/GMCA.dict mode=0444 \
variant.opensolaris.zone=__NODEFAULT
$(i386_ONLY)file path=usr/lib/fm/dict/INTEL.dict mode=0444 \
variant.opensolaris.zone=__NODEFAULT
+file path=usr/lib/fm/dict/NIC.dict mode=0444 \
+ variant.opensolaris.zone=__NODEFAULT
file path=usr/lib/fm/dict/NXGE.dict mode=0444 \
variant.opensolaris.zone=__NODEFAULT
file path=usr/lib/fm/dict/PCI.dict mode=0444 \
@@ -366,6 +368,7 @@ file path=usr/lib/fm/eft/disk.eft mode=0444 \
variant.opensolaris.zone=__NODEFAULT
file path=usr/lib/fm/eft/neptune_xaui.eft mode=0444
file path=usr/lib/fm/eft/neptune_xfp.eft mode=0444
+file path=usr/lib/fm/eft/nic.eft mode=0444
file path=usr/lib/fm/eft/pci.eft mode=0444
file path=usr/lib/fm/eft/pciex.eft mode=0444
file path=usr/lib/fm/eft/pciexrc.eft mode=0444
@@ -531,6 +534,8 @@ $(i386_ONLY)file path=usr/lib/locale/C/LC_MESSAGES/GMCA.mo mode=0444 \
variant.opensolaris.zone=__NODEFAULT
$(i386_ONLY)file path=usr/lib/locale/C/LC_MESSAGES/INTEL.mo mode=0444 \
variant.opensolaris.zone=__NODEFAULT
+file path=usr/lib/locale/C/LC_MESSAGES/NIC.mo mode=0444 \
+ variant.opensolaris.zone=__NODEFAULT
file path=usr/lib/locale/C/LC_MESSAGES/NXGE.mo mode=0444 \
variant.opensolaris.zone=__NODEFAULT
file path=usr/lib/locale/C/LC_MESSAGES/PCI.mo mode=0444 \
diff --git a/usr/src/pkg/manifests/system-data-hardware-registry.mf b/usr/src/pkg/manifests/system-data-hardware-registry.mf
index 2cbc143786..779d1613e3 100644
--- a/usr/src/pkg/manifests/system-data-hardware-registry.mf
+++ b/usr/src/pkg/manifests/system-data-hardware-registry.mf
@@ -21,10 +21,11 @@
#
# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2019 OmniOS Community Edition (OmniOSce) Association.
+# Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
#
-set name=pkg.fmri value=pkg:/system/data/hardware-registry@$(PKGVERS)
+set name=pkg.fmri \
+ value=pkg:/system/data/hardware-registry@2020.2.22,$(PKGVERS_BUILTON)-$(PKGVERS_BRANCH)
set name=pkg.description \
value="ASCII databases describing various PCI, USB and other hardware"
set name=pkg.summary value="Hardware data files"
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 060036e676..ce7b7a3e6a 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -2318,3 +2318,9 @@ BNX_OBJS += \
bnx_lm_main.o \
bnx_lm_recv.o \
bnx_lm_send.o
+
+#
+# mlxcx(7D)
+#
+MLXCX_OBJS += mlxcx.o mlxcx_dma.o mlxcx_cmd.o mlxcx_intr.o mlxcx_gld.o \
+ mlxcx_ring.o
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index 1d052bdcc2..8a906a2e25 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -956,6 +956,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/mii/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/mlxcx/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/mr_sas/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
diff --git a/usr/src/uts/common/inet/ilb/ilb.c b/usr/src/uts/common/inet/ilb/ilb.c
index 8ab2a90116..91cd671b12 100644
--- a/usr/src/uts/common/inet/ilb/ilb.c
+++ b/usr/src/uts/common/inet/ilb/ilb.c
@@ -1679,6 +1679,8 @@ ilb_check(ilb_stack_t *ilbs, ill_t *ill, mblk_t *mp, in6_addr_t *src,
uint16_t nat_src_idx;
boolean_t busy;
+ ret = 0;
+
/*
* We don't really need to switch here since both protocols's
* ports are at the same offset. Just prepare for future protocol
diff --git a/usr/src/uts/common/inet/ilb/ilb_conn.c b/usr/src/uts/common/inet/ilb/ilb_conn.c
index 7f79d41dd6..24b0138fbf 100644
--- a/usr/src/uts/common/inet/ilb/ilb_conn.c
+++ b/usr/src/uts/common/inet/ilb/ilb_conn.c
@@ -132,6 +132,9 @@ ilb_conn_remove_common(ilb_conn_t *connp, boolean_t c2s)
ilb_conn_t **next, **prev;
ilb_conn_t **next_prev, **prev_next;
+ next_prev = NULL;
+ prev_next = NULL;
+
if (c2s) {
hash = connp->conn_c2s_hash;
ASSERT(MUTEX_HELD(&hash->ilb_conn_hash_lock));
@@ -698,6 +701,7 @@ update_conn_tcp(ilb_conn_t *connp, void *iph, tcpha_t *tcpha, int32_t pkt_len,
uint32_t ack, seq;
int32_t seg_len;
+ ack = 0;
if (tcpha->tha_flags & TH_RST)
return (B_FALSE);
@@ -903,6 +907,11 @@ ilb_check_icmp_conn(ilb_stack_t *ilbs, mblk_t *mp, int l3, void *out_iph,
uint32_t adj_ip_sum;
boolean_t full_nat;
+ in_iph4 = NULL;
+ in_iph6 = NULL;
+ icmph4 = NULL;
+ icmph6 = NULL;
+
if (l3 == IPPROTO_IP) {
in6_addr_t in_src, in_dst;
diff --git a/usr/src/uts/common/inet/ip/conn_opt.c b/usr/src/uts/common/inet/ip/conn_opt.c
index b4bff4d7b4..8a05a25b08 100644
--- a/usr/src/uts/common/inet/ip/conn_opt.c
+++ b/usr/src/uts/common/inet/ip/conn_opt.c
@@ -1209,6 +1209,7 @@ conn_opt_set_ip(conn_opt_arg_t *coa, t_scalar_t name, uint_t inlen,
return (EINVAL);
}
+ ifindex = UINT_MAX;
switch (name) {
case IP_TTL:
/* Don't allow zero */
@@ -1529,6 +1530,7 @@ conn_opt_set_ipv6(conn_opt_arg_t *coa, t_scalar_t name, uint_t inlen,
if (connp->conn_family != AF_INET6)
return (EINVAL);
+ ifindex = UINT_MAX;
switch (name) {
case IPV6_MULTICAST_IF:
/*
diff --git a/usr/src/uts/common/inet/ip/icmp.c b/usr/src/uts/common/inet/ip/icmp.c
index b1a77ae0cc..46c791298a 100644
--- a/usr/src/uts/common/inet/ip/icmp.c
+++ b/usr/src/uts/common/inet/ip/icmp.c
@@ -739,6 +739,11 @@ rawip_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
}
ASSERT(sa != NULL && len != 0);
+ sin = NULL;
+ sin6 = NULL;
+ dstport = 0;
+ flowinfo = 0;
+ v4dst = INADDR_ANY;
/*
* Determine packet type based on type of address passed in
@@ -3592,6 +3597,7 @@ icmp_output_ancillary(conn_t *connp, sin_t *sin, sin6_t *sin6, mblk_t *mp,
}
} else {
/* Connected case */
+ dstport = connp->conn_fport;
v6dst = connp->conn_faddr_v6;
flowinfo = connp->conn_flowinfo;
}
diff --git a/usr/src/uts/common/inet/ip/igmp.c b/usr/src/uts/common/inet/ip/igmp.c
index 423bb2a816..de6a91877a 100644
--- a/usr/src/uts/common/inet/ip/igmp.c
+++ b/usr/src/uts/common/inet/ip/igmp.c
@@ -310,15 +310,15 @@ mld_start_timers(unsigned next, ip_stack_t *ipst)
mblk_t *
igmp_input(mblk_t *mp, ip_recv_attr_t *ira)
{
- igmpa_t *igmpa;
+ igmpa_t *igmpa;
ipha_t *ipha = (ipha_t *)(mp->b_rptr);
int iphlen, igmplen, mblklen;
- ilm_t *ilm;
+ ilm_t *ilm;
uint32_t src, dst;
- uint32_t group;
+ uint32_t group;
in6_addr_t v6group;
uint_t next;
- ipif_t *ipif;
+ ipif_t *ipif;
ill_t *ill = ira->ira_ill;
ip_stack_t *ipst = ill->ill_ipst;
@@ -778,7 +778,7 @@ igmp_joingroup(ilm_t *ilm)
ASSERT(RW_WRITE_HELD(&ill->ill_mcast_lock));
if (ilm->ilm_addr == htonl(INADDR_ALLHOSTS_GROUP)) {
- ilm->ilm_rtx.rtx_timer = INFINITY;
+ ilm->ilm_rtx.rtx_timer = timer = INFINITY;
ilm->ilm_state = IGMP_OTHERMEMBER;
} else {
ip1dbg(("Querier mode %d, sending report, group %x\n",
@@ -857,11 +857,10 @@ mld_joingroup(ilm_t *ilm)
ill = ilm->ilm_ill;
ASSERT(ill->ill_isv6);
-
ASSERT(RW_WRITE_HELD(&ill->ill_mcast_lock));
if (IN6_ARE_ADDR_EQUAL(&ipv6_all_hosts_mcast, &ilm->ilm_v6addr)) {
- ilm->ilm_rtx.rtx_timer = INFINITY;
+ ilm->ilm_rtx.rtx_timer = timer = INFINITY;
ilm->ilm_state = IGMP_OTHERMEMBER;
} else {
if (ill->ill_mcast_type == MLD_V1_ROUTER) {
@@ -1435,7 +1434,7 @@ igmp_timeout_handler(void *arg)
uint_t
mld_timeout_handler_per_ill(ill_t *ill)
{
- ilm_t *ilm;
+ ilm_t *ilm;
uint_t next = INFINITY, current;
mrec_t *rp, *rtxrp;
rtx_state_t *rtxp;
@@ -1832,7 +1831,7 @@ igmp_sendpkt(ilm_t *ilm, uchar_t type, ipaddr_t addr)
ipha_t *ipha;
int hdrlen = sizeof (ipha_t) + RTRALERT_LEN;
size_t size = hdrlen + sizeof (igmpa_t);
- ill_t *ill = ilm->ilm_ill;
+ ill_t *ill = ilm->ilm_ill;
ip_stack_t *ipst = ill->ill_ipst;
ASSERT(RW_LOCK_HELD(&ill->ill_mcast_lock));
@@ -1859,15 +1858,15 @@ igmp_sendpkt(ilm_t *ilm, uchar_t type, ipaddr_t addr)
ipha->ipha_version_and_hdr_length = (IP_VERSION << 4)
| (IP_SIMPLE_HDR_LENGTH_IN_WORDS + RTRALERT_LEN_IN_WORDS);
- ipha->ipha_type_of_service = 0;
+ ipha->ipha_type_of_service = 0;
ipha->ipha_length = htons(size);
ipha->ipha_ident = 0;
ipha->ipha_fragment_offset_and_flags = 0;
- ipha->ipha_ttl = IGMP_TTL;
- ipha->ipha_protocol = IPPROTO_IGMP;
- ipha->ipha_hdr_checksum = 0;
- ipha->ipha_dst = addr ? addr : igmpa->igmpa_group;
- ipha->ipha_src = INADDR_ANY;
+ ipha->ipha_ttl = IGMP_TTL;
+ ipha->ipha_protocol = IPPROTO_IGMP;
+ ipha->ipha_hdr_checksum = 0;
+ ipha->ipha_dst = addr ? addr : igmpa->igmpa_group;
+ ipha->ipha_src = INADDR_ANY;
ill_mcast_queue(ill, mp);
@@ -2448,7 +2447,7 @@ mld_sendpkt(ilm_t *ilm, uchar_t type, const in6_addr_t *v6addr)
{
mblk_t *mp;
mld_hdr_t *mldh;
- ip6_t *ip6h;
+ ip6_t *ip6h;
ip6_hbh_t *ip6hbh;
struct ip6_opt_router *ip6router;
size_t size = IPV6_HDR_LEN + sizeof (mld_hdr_t);
diff --git a/usr/src/uts/common/inet/ip/ip.c b/usr/src/uts/common/inet/ip/ip.c
index 8cbc98c817..925d06c62b 100644
--- a/usr/src/uts/common/inet/ip/ip.c
+++ b/usr/src/uts/common/inet/ip/ip.c
@@ -2404,6 +2404,7 @@ ipoptp_next(ipoptp_t *optp)
* its there, and make sure it points to either something
* inside this option, or the end of the option.
*/
+ pointer = IPOPT_EOL;
switch (opt) {
case IPOPT_RR:
case IPOPT_TS:
@@ -6338,6 +6339,9 @@ ip_opt_set_multicast_group(conn_t *connp, t_scalar_t name,
optfn = ip_opt_delete_group;
break;
default:
+ /* Should not be reached. */
+ fmode = MODE_IS_INCLUDE;
+ optfn = NULL;
ASSERT(0);
}
@@ -6467,6 +6471,9 @@ ip_opt_set_multicast_sources(conn_t *connp, t_scalar_t name,
optfn = ip_opt_delete_group;
break;
default:
+ /* Should not be reached. */
+ optfn = NULL;
+ fmode = 0;
ASSERT(0);
}
@@ -8935,6 +8942,8 @@ ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill,
ip2dbg(("ip_forward_options\n"));
dst = ipha->ipha_dst;
+ opt = NULL;
+
for (optval = ipoptp_first(&opts, ipha);
optval != IPOPT_EOL;
optval = ipoptp_next(&opts)) {
@@ -9021,6 +9030,7 @@ ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill,
opt[IPOPT_OFFSET] += IP_ADDR_LEN;
break;
case IPOPT_TS:
+ off = 0;
/* Insert timestamp if there is room */
switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
case IPOPT_TS_TSONLY:
@@ -9185,6 +9195,7 @@ ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
ip_stack_t *ipst = ill->ill_ipst;
ip2dbg(("ip_input_local_options\n"));
+ opt = NULL;
for (optval = ipoptp_first(&opts, ipha);
optval != IPOPT_EOL;
@@ -9247,6 +9258,7 @@ ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira)
opt[IPOPT_OFFSET] += IP_ADDR_LEN;
break;
case IPOPT_TS:
+ off = 0;
/* Insert timestamp if there is romm */
switch (opt[IPOPT_POS_OV_FLG] & 0x0F) {
case IPOPT_TS_TSONLY:
@@ -9340,6 +9352,7 @@ ip_input_options(ipha_t *ipha, ipaddr_t dst, mblk_t *mp,
ire_t *ire;
ip2dbg(("ip_input_options\n"));
+ opt = NULL;
*errorp = 0;
for (optval = ipoptp_first(&opts, ipha);
optval != IPOPT_EOL;
@@ -11888,6 +11901,7 @@ ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst)
ipaddr_t dst;
uint32_t ts;
timestruc_t now;
+ uint32_t off = 0;
for (optval = ipoptp_first(&opts, ipha);
optval != IPOPT_EOL;
@@ -11896,7 +11910,6 @@ ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst)
optlen = opts.ipoptp_len;
ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0);
switch (optval) {
- uint32_t off;
case IPOPT_SSRR:
case IPOPT_LSRR:
off = opt[IPOPT_OFFSET];
@@ -12544,6 +12557,7 @@ ip_process_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg)
}
ci.ci_ipif = NULL;
+ extract_funcp = NULL;
switch (ipip->ipi_cmd_type) {
case MISC_CMD:
case MSFILT_CMD:
@@ -12725,6 +12739,7 @@ ip_wput_nondata(queue_t *q, mblk_t *mp)
else
connp = NULL;
+ iocp = NULL;
switch (DB_TYPE(mp)) {
case M_IOCTL:
/*
@@ -12935,6 +12950,7 @@ ip_output_options(mblk_t *mp, ipha_t *ipha, ip_xmit_attr_t *ixa, ill_t *ill)
ip2dbg(("ip_output_options\n"));
+ opt = NULL;
dst = ipha->ipha_dst;
for (optval = ipoptp_first(&opts, ipha);
optval != IPOPT_EOL;
diff --git a/usr/src/uts/common/inet/ip/ip6.c b/usr/src/uts/common/inet/ip/ip6.c
index afaf01024f..26e7be2fe8 100644
--- a/usr/src/uts/common/inet/ip/ip6.c
+++ b/usr/src/uts/common/inet/ip/ip6.c
@@ -2766,7 +2766,7 @@ ip_process_options_v6(mblk_t *mp, ip6_t *ip6h,
uint8_t *optptr, uint_t optlen, uint8_t hdr_type, ip_recv_attr_t *ira)
{
uint8_t opt_type;
- uint_t optused;
+ uint_t optused = 0;
int ret = 0;
const char *errtype;
ill_t *ill = ira->ira_ill;
diff --git a/usr/src/uts/common/inet/ip/ip6_ire.c b/usr/src/uts/common/inet/ip/ip6_ire.c
index ad738bc3b7..1145025588 100644
--- a/usr/src/uts/common/inet/ip/ip6_ire.c
+++ b/usr/src/uts/common/inet/ip/ip6_ire.c
@@ -687,7 +687,7 @@ ire_match_args_v6(ire_t *ire, const in6_addr_t *addr, const in6_addr_t *mask,
const in6_addr_t *gateway, int type, const ill_t *ill, zoneid_t zoneid,
const ts_label_t *tsl, int match_flags)
{
- in6_addr_t gw_addr_v6;
+ in6_addr_t gw_addr_v6 = { 0 };
ill_t *ire_ill = NULL, *dst_ill;
ip_stack_t *ipst = ire->ire_ipst;
diff --git a/usr/src/uts/common/inet/ip/ip6_output.c b/usr/src/uts/common/inet/ip/ip6_output.c
index dc074454e3..143077ed32 100644
--- a/usr/src/uts/common/inet/ip/ip6_output.c
+++ b/usr/src/uts/common/inet/ip/ip6_output.c
@@ -1023,7 +1023,7 @@ ire_send_wire_v6(ire_t *ire, mblk_t *mp, void *iph_arg,
*/
if (pktlen > ixa->ixa_fragsize ||
(ixaflags & (IXAF_IPSEC_SECURE|IXAF_IPV6_ADD_FRAGHDR))) {
- uint32_t ident;
+ uint32_t ident = 0;
if (ixaflags & IXAF_IPSEC_SECURE)
pktlen += ipsec_out_extra_length(ixa);
diff --git a/usr/src/uts/common/inet/ip/ip_ftable.c b/usr/src/uts/common/inet/ip/ip_ftable.c
index 980436b578..408b9d0ea1 100644
--- a/usr/src/uts/common/inet/ip/ip_ftable.c
+++ b/usr/src/uts/common/inet/ip/ip_ftable.c
@@ -76,7 +76,7 @@
(((ire)->ire_type & IRE_DEFAULT) || \
(((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0)))
-#define IP_SRC_MULTIHOMING(isv6, ipst) \
+#define IP_SRC_MULTIHOMING(isv6, ipst) \
(isv6 ? ipst->ips_ipv6_strict_src_multihoming : \
ipst->ips_ip_strict_src_multihoming)
@@ -470,7 +470,7 @@ ire_get_bucket(ire_t *ire)
* routes to this destination, this routine will utilise the
* first route it finds to IP address
* Return values:
- * 0 - FAILURE
+ * 0 - FAILURE
* nonzero - ifindex
*/
uint_t
@@ -807,7 +807,7 @@ ire_round_robin(irb_t *irb_ptr, ire_ftable_args_t *margs, uint_t hash,
ire_t *orig_ire, ip_stack_t *ipst)
{
ire_t *ire, *maybe_ire = NULL;
- uint_t maybe_badcnt;
+ uint_t maybe_badcnt = 0;
uint_t maxwalk;
/* Fold in more bits from the hint/hash */
diff --git a/usr/src/uts/common/inet/ip/ip_if.c b/usr/src/uts/common/inet/ip/ip_if.c
index cb3cd011c3..a2ddcb3547 100644
--- a/usr/src/uts/common/inet/ip/ip_if.c
+++ b/usr/src/uts/common/inet/ip/ip_if.c
@@ -1509,9 +1509,9 @@ ill_capability_id_ack(ill_t *ill, mblk_t *mp, dl_capability_sub_t *outers)
id_ic = (dl_capab_id_t *)(outers + 1);
+ inners = &id_ic->id_subcap;
if (outers->dl_length < sizeof (*id_ic) ||
- (inners = &id_ic->id_subcap,
- inners->dl_length > (outers->dl_length - sizeof (*inners)))) {
+ inners->dl_length > (outers->dl_length - sizeof (*inners))) {
cmn_err(CE_WARN, "ill_capability_id_ack: malformed "
"encapsulated capab type %d too long for mblk",
inners->dl_cap);
@@ -4025,6 +4025,7 @@ ill_get_next_ifindex(uint_t index, boolean_t isv6, ip_stack_t *ipst)
phyint_t *phyi_initial;
uint_t ifindex;
+ phyi_initial = NULL;
rw_enter(&ipst->ips_ill_g_lock, RW_READER);
if (index == 0) {
diff --git a/usr/src/uts/common/inet/ip/ip_ndp.c b/usr/src/uts/common/inet/ip/ip_ndp.c
index 69506f77d4..2cee123d4a 100644
--- a/usr/src/uts/common/inet/ip/ip_ndp.c
+++ b/usr/src/uts/common/inet/ip/ip_ndp.c
@@ -2943,6 +2943,8 @@ nce_update(ncec_t *ncec, uint16_t new_state, uchar_t *new_ll_addr)
ASSERT(ncec->ncec_lladdr != NULL || new_state == ND_INITIAL ||
new_state == ND_INCOMPLETE);
}
+
+ tid = 0;
if (need_stop_timer || (ncec->ncec_flags & NCE_F_STATIC)) {
tid = ncec->ncec_timeout_id;
ncec->ncec_timeout_id = 0;
@@ -4433,6 +4435,7 @@ nce_resolve_src(ncec_t *ncec, in6_addr_t *src)
ASSERT(src != NULL);
ASSERT(IN6_IS_ADDR_UNSPECIFIED(src));
+ src4 = 0;
src6 = *src;
if (is_myaddr) {
src6 = ncec->ncec_addr;
@@ -4641,6 +4644,7 @@ nce_add_common(ill_t *ill, uchar_t *hw_addr, uint_t hw_addr_len,
ndp = ill->ill_ipst->ips_ndp4;
*retnce = NULL;
+ state = 0;
ASSERT(MUTEX_HELD(&ndp->ndp_g_lock));
diff --git a/usr/src/uts/common/inet/ip/ip_output.c b/usr/src/uts/common/inet/ip/ip_output.c
index 169859707e..a6ca2aabd5 100644
--- a/usr/src/uts/common/inet/ip/ip_output.c
+++ b/usr/src/uts/common/inet/ip/ip_output.c
@@ -1100,7 +1100,7 @@ ire_send_local_v4(ire_t *ire, mblk_t *mp, void *iph_arg,
int, 1);
if (HOOKS4_INTERESTED_LOOPBACK_OUT(ipst)) {
- int error;
+ int error = 0;
DTRACE_PROBE4(ip4__loopback__out__start, ill_t *, NULL,
ill_t *, ill, ipha_t *, ipha, mblk_t *, mp);
@@ -1156,7 +1156,7 @@ ire_send_local_v4(ire_t *ire, mblk_t *mp, void *iph_arg,
}
if (HOOKS4_INTERESTED_LOOPBACK_IN(ipst)) {
- int error;
+ int error = 0;
DTRACE_PROBE4(ip4__loopback__in__start, ill_t *, ill,
ill_t *, NULL, ipha_t *, ipha, mblk_t *, mp);
diff --git a/usr/src/uts/common/inet/ip/ip_rts.c b/usr/src/uts/common/inet/ip/ip_rts.c
index dece7be29d..5df5ad6447 100644
--- a/usr/src/uts/common/inet/ip/ip_rts.c
+++ b/usr/src/uts/common/inet/ip/ip_rts.c
@@ -114,7 +114,7 @@ rts_queue_input(mblk_t *mp, conn_t *o_connp, sa_family_t af, uint_t flags,
ip_stack_t *ipst)
{
mblk_t *mp1;
- conn_t *connp, *next_connp;
+ conn_t *connp, *next_connp;
/*
* Since we don't have an ill_t here, RTSQ_DEFAULT must already be
@@ -190,7 +190,7 @@ ip_rts_rtmsg(int type, ire_t *ire, int error, ip_stack_t *ipst)
mblk_t *mp;
rt_msghdr_t *rtm;
int rtm_addrs = (RTA_DST | RTA_NETMASK | RTA_GATEWAY);
- sa_family_t af;
+ sa_family_t af = { 0 };
in6_addr_t gw_addr_v6;
if (ire == NULL)
@@ -199,6 +199,7 @@ ip_rts_rtmsg(int type, ire_t *ire, int error, ip_stack_t *ipst)
ire->ire_ipversion == IPV6_VERSION);
ASSERT(!(ire->ire_type & IRE_IF_CLONE));
+ mp = NULL;
if (ire->ire_flags & RTF_SETSRC)
rtm_addrs |= RTA_SRC;
@@ -306,10 +307,14 @@ ip_rts_request_common(mblk_t *mp, conn_t *connp, cred_t *ioc_cr)
ts_label_t *tsl = NULL;
zoneid_t zoneid;
ip_stack_t *ipst;
- ill_t *ill = NULL;
+ ill_t *ill = NULL;
zoneid = connp->conn_zoneid;
ipst = connp->conn_netstack->netstack_ip;
+ net_mask = 0;
+ src_addr = 0;
+ dst_addr = 0;
+ gw_addr = 0;
if (mp->b_cont != NULL && !pullupmsg(mp, -1)) {
freemsg(mp);
@@ -1239,6 +1244,9 @@ rts_rtmget(mblk_t *mp, ire_t *ire, ire_t *ifire, const in6_addr_t *setsrc,
ipaddr_t v4setsrc;
rtm = (rt_msghdr_t *)mp->b_rptr;
+ ifaddr = 0;
+ brdaddr = 0;
+ rtm_flags = 0;
/*
* Find the ill used to send packets. This will be NULL in case
@@ -1406,7 +1414,7 @@ rts_setmetrics(ire_t *ire, uint_t which, rt_metrics_t *metrics)
ill_t *ill;
ifrt_t *ifrt;
mblk_t *mp;
- in6_addr_t gw_addr_v6;
+ in6_addr_t gw_addr_v6 = { 0 };
/* Need to add back some metrics to the IRE? */
/*
@@ -1422,6 +1430,7 @@ rts_setmetrics(ire_t *ire, uint_t which, rt_metrics_t *metrics)
* <net/route.h> says: rmx_rtt and rmx_rttvar are stored as
* microseconds.
*/
+ rtt = 0;
if (which & RTV_RTT)
rtt = metrics->rmx_rtt / 1000;
if (which & RTV_RTTVAR)
diff --git a/usr/src/uts/common/inet/ip/ipclassifier.c b/usr/src/uts/common/inet/ip/ipclassifier.c
index 77d9d8df7e..4f3ec2d817 100644
--- a/usr/src/uts/common/inet/ip/ipclassifier.c
+++ b/usr/src/uts/common/inet/ip/ipclassifier.c
@@ -613,6 +613,7 @@ ipcl_conn_create(uint32_t type, int sleep, netstack_t *ns)
break;
default:
+ conn_cache = NULL;
connp = NULL;
ASSERT(0);
}
diff --git a/usr/src/uts/common/inet/ip/ipmp.c b/usr/src/uts/common/inet/ip/ipmp.c
index 912b489c40..3106b6e2de 100644
--- a/usr/src/uts/common/inet/ip/ipmp.c
+++ b/usr/src/uts/common/inet/ip/ipmp.c
@@ -1909,6 +1909,7 @@ ipmp_phyint_join_grp(phyint_t *phyi, ipmp_grp_t *grp)
ASSERT(IAM_WRITER_IPSQ(ipsq));
ASSERT(phyi->phyint_illv4 != NULL || phyi->phyint_illv6 != NULL);
+ ill = NULL;
/*
* Send routing socket messages indicating that the phyint's ills
diff --git a/usr/src/uts/common/inet/ip/ipsecah.c b/usr/src/uts/common/inet/ip/ipsecah.c
index fc19d7f877..ced3696948 100644
--- a/usr/src/uts/common/inet/ip/ipsecah.c
+++ b/usr/src/uts/common/inet/ip/ipsecah.c
@@ -215,7 +215,7 @@ static int
ah_kstat_update(kstat_t *kp, int rw)
{
ah_kstats_t *ekp;
- netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private;
+ netstackid_t stackid;
netstack_t *ns;
ipsec_stack_t *ipss;
@@ -225,6 +225,7 @@ ah_kstat_update(kstat_t *kp, int rw)
if (rw == KSTAT_WRITE)
return (EACCES);
+ stackid = (netstackid_t)(uintptr_t)kp->ks_private;
ns = netstack_find_by_stackid(stackid);
if (ns == NULL)
return (-1);
diff --git a/usr/src/uts/common/inet/ip/ipsecesp.c b/usr/src/uts/common/inet/ip/ipsecesp.c
index b3dc7d350a..e0efbbf3ce 100644
--- a/usr/src/uts/common/inet/ip/ipsecesp.c
+++ b/usr/src/uts/common/inet/ip/ipsecesp.c
@@ -208,7 +208,7 @@ static int
esp_kstat_update(kstat_t *kp, int rw)
{
esp_kstats_t *ekp;
- netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private;
+ netstackid_t stackid;
netstack_t *ns;
ipsec_stack_t *ipss;
@@ -218,6 +218,7 @@ esp_kstat_update(kstat_t *kp, int rw)
if (rw == KSTAT_WRITE)
return (EACCES);
+ stackid = (zoneid_t)(uintptr_t)kp->ks_private;
ns = netstack_find_by_stackid(stackid);
if (ns == NULL)
return (-1);
diff --git a/usr/src/uts/common/inet/ip/sadb.c b/usr/src/uts/common/inet/ip/sadb.c
index 44ebb21db3..288c0e3e18 100644
--- a/usr/src/uts/common/inet/ip/sadb.c
+++ b/usr/src/uts/common/inet/ip/sadb.c
@@ -113,8 +113,8 @@ extern uint64_t ipsacq_maxpackets;
if (((sa)->ipsa_ ## exp) == 0) \
(sa)->ipsa_ ## exp = tmp; \
else \
- (sa)->ipsa_ ## exp = \
- MIN((sa)->ipsa_ ## exp, tmp); \
+ (sa)->ipsa_ ## exp = \
+ MIN((sa)->ipsa_ ## exp, tmp); \
} \
}
@@ -154,8 +154,6 @@ sadb_sa_refrele(void *target)
static time_t
sadb_add_time(time_t base, uint64_t delta)
{
- time_t sum;
-
/*
* Clip delta to the maximum possible time_t value to
* prevent "overwrapping" back into a shorter-than-desired
@@ -163,18 +161,12 @@ sadb_add_time(time_t base, uint64_t delta)
*/
if (delta > TIME_MAX)
delta = TIME_MAX;
- /*
- * This sum may still overflow.
- */
- sum = base + delta;
- /*
- * .. so if the result is less than the base, we overflowed.
- */
- if (sum < base)
- sum = TIME_MAX;
-
- return (sum);
+ if (base > 0) {
+ if (TIME_MAX - base < delta)
+ return (TIME_MAX); /* Overflow */
+ }
+ return (base + delta);
}
/*
@@ -1695,8 +1687,7 @@ sadb_pfkey_echo(queue_t *pfkey_q, mblk_t *mp, sadb_msg_t *samsg,
mp->b_cont = mp1;
break;
default:
- if (mp != NULL)
- freemsg(mp);
+ freemsg(mp);
return;
}
@@ -2941,7 +2932,7 @@ sadb_common_add(queue_t *pfkey_q, mblk_t *mp, sadb_msg_t *samsg,
boolean_t isupdate = (newbie != NULL);
uint32_t *src_addr_ptr, *dst_addr_ptr, *isrc_addr_ptr, *idst_addr_ptr;
ipsec_stack_t *ipss = ns->netstack_ipsec;
- ip_stack_t *ipst = ns->netstack_ip;
+ ip_stack_t *ipst = ns->netstack_ip;
ipsec_alginfo_t *alg;
int rcode;
boolean_t async = B_FALSE;
@@ -4386,8 +4377,8 @@ sadb_update_lifetimes(ipsa_t *assoc, sadb_lifetime_t *hard,
if (assoc->ipsa_idletime != 0) {
assoc->ipsa_idletime = min(assoc->ipsa_idletime,
assoc->ipsa_idleuselt);
- assoc->ipsa_idleexpiretime =
- current + assoc->ipsa_idletime;
+ assoc->ipsa_idleexpiretime =
+ current + assoc->ipsa_idletime;
} else {
assoc->ipsa_idleexpiretime =
current + assoc->ipsa_idleuselt;
@@ -5450,7 +5441,7 @@ sadb_acquire(mblk_t *datamp, ip_xmit_attr_t *ixa, boolean_t need_ah,
uint32_t seq;
uint64_t unique_id = 0;
boolean_t tunnel_mode = (ixa->ixa_flags & IXAF_IPSEC_TUNNEL) != 0;
- ts_label_t *tsl;
+ ts_label_t *tsl;
netstack_t *ns = ixa->ixa_ipst->ips_netstack;
ipsec_stack_t *ipss = ns->netstack_ipsec;
ipsecesp_stack_t *espstack = ns->netstack_ipsecesp;
@@ -6102,7 +6093,8 @@ sadb_label_from_sens(sadb_sens_t *sens, uint64_t *bitmap)
return (NULL);
bsllow(&sl);
- LCLASS_SET((_bslabel_impl_t *)&sl, sens->sadb_sens_sens_level);
+ LCLASS_SET((_bslabel_impl_t *)&sl,
+ (uint16_t)sens->sadb_sens_sens_level);
bcopy(bitmap, &((_bslabel_impl_t *)&sl)->compartments,
bitmap_len);
@@ -6629,7 +6621,7 @@ ipsec_find_listen_conn(uint16_t *pptr, ipsec_selector_t *sel, ip_stack_t *ipst)
static void
ipsec_tcp_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, ip_stack_t *ipst)
{
- connf_t *connfp;
+ connf_t *connfp;
conn_t *connp;
uint32_t ports;
uint16_t *pptr = (uint16_t *)&ports;
diff --git a/usr/src/uts/common/inet/ip/spd.c b/usr/src/uts/common/inet/ip/spd.c
index d703170c9f..85f06f3d02 100644
--- a/usr/src/uts/common/inet/ip/spd.c
+++ b/usr/src/uts/common/inet/ip/spd.c
@@ -163,7 +163,7 @@ int ipsec_weird_null_inbound_policy = 0;
* Inbound traffic should have matching identities for both SA's.
*/
-#define SA_IDS_MATCH(sa1, sa2) \
+#define SA_IDS_MATCH(sa1, sa2) \
(((sa1) == NULL) || ((sa2) == NULL) || \
(((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) && \
(((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid))))
@@ -3178,6 +3178,7 @@ ipsec_act_find(const ipsec_act_t *a, int n, netstack_t *ns)
* TODO: should canonicalize a[] (i.e., zeroize any padding)
* so we can use a non-trivial policy_hash function.
*/
+ ap = NULL;
for (i = n-1; i >= 0; i--) {
hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]);
@@ -6282,6 +6283,9 @@ ipsec_fragcache_add(ipsec_fragcache_t *frag, mblk_t *iramp, mblk_t *mp,
#ifdef FRAGCACHE_DEBUG
cmn_err(CE_WARN, "Fragcache: %s\n", inbound ? "INBOUND" : "OUTBOUND");
#endif
+ v6_proto = 0;
+ fraghdr = NULL;
+
/*
* You're on the slow path, so insure that every packet in the
* cache is a single-mblk one.
diff --git a/usr/src/uts/common/inet/ip/tnet.c b/usr/src/uts/common/inet/ip/tnet.c
index e8c7b0c6e2..37a7402d52 100644
--- a/usr/src/uts/common/inet/ip/tnet.c
+++ b/usr/src/uts/common/inet/ip/tnet.c
@@ -692,7 +692,7 @@ tsol_get_pkt_label(mblk_t *mp, int version, ip_recv_attr_t *ira)
const void *src;
const ip6_t *ip6h;
cred_t *credp;
- int proto;
+ int proto;
ASSERT(DB_TYPE(mp) == M_DATA);
@@ -1477,6 +1477,9 @@ tsol_ip_forward(ire_t *ire, mblk_t *mp, const ip_recv_attr_t *ira)
*/
af = (ire->ire_ipversion == IPV4_VERSION) ? AF_INET : AF_INET6;
+ ipha = NULL;
+ ip6h = NULL;
+ gw_rhtp = NULL;
if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) {
ASSERT(ire->ire_ipversion == IPV4_VERSION);
diff --git a/usr/src/uts/common/inet/sctp/sctp_asconf.c b/usr/src/uts/common/inet/sctp/sctp_asconf.c
index f5edd1994f..db770df30e 100644
--- a/usr/src/uts/common/inet/sctp/sctp_asconf.c
+++ b/usr/src/uts/common/inet/sctp/sctp_asconf.c
@@ -47,7 +47,7 @@
typedef struct sctp_asconf_s {
mblk_t *head;
- uint32_t cid;
+ uint32_t cid;
} sctp_asconf_t;
/*
@@ -636,6 +636,12 @@ sctp_input_asconf_ack(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
ASSERT(ch->sch_id == CHUNK_ASCONF_ACK);
+ ainfo = NULL;
+ alist = NULL;
+ dlist = NULL;
+ aptr = NULL;
+ dptr = NULL;
+
snp = (uint32_t *)(ch + 1);
rlen = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*snp);
if (rlen < 0) {
@@ -915,9 +921,9 @@ sctp_wput_asconf(sctp_t *sctp, sctp_faddr_t *fp)
{
#define SCTP_SET_SENT_FLAG(mp) ((mp)->b_flag = SCTP_CHUNK_FLAG_SENT)
- mblk_t *mp;
+ mblk_t *mp;
mblk_t *ipmp;
- uint32_t *snp;
+ uint32_t *snp;
sctp_parm_hdr_t *ph;
boolean_t isv4;
sctp_stack_t *sctps = sctp->sctp_sctps;
@@ -1467,6 +1473,7 @@ sctp_add_ip(sctp_t *sctp, const void *addrs, uint32_t cnt)
* If deleting:
* o Must be part of the association
*/
+ sin6 = NULL;
for (i = 0; i < cnt; i++) {
switch (connp->conn_family) {
case AF_INET:
diff --git a/usr/src/uts/common/inet/sctp/sctp_common.c b/usr/src/uts/common/inet/sctp/sctp_common.c
index ef60f6d26a..a640ead3d1 100644
--- a/usr/src/uts/common/inet/sctp/sctp_common.c
+++ b/usr/src/uts/common/inet/sctp/sctp_common.c
@@ -804,6 +804,8 @@ sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp)
{
sctp_faddr_t *fpp;
+ fpp = NULL;
+
if (!sctp->sctp_faddrs) {
return;
}
diff --git a/usr/src/uts/common/inet/sctp/sctp_cookie.c b/usr/src/uts/common/inet/sctp/sctp_cookie.c
index 53c35183dc..da86faa252 100644
--- a/usr/src/uts/common/inet/sctp/sctp_cookie.c
+++ b/usr/src/uts/common/inet/sctp/sctp_cookie.c
@@ -427,10 +427,10 @@ sctp_initialize_params(sctp_t *sctp, sctp_init_chunk_t *init,
/*
* Copy the peer's original source address into addr. This relies on the
* following format (see sctp_send_initack() below):
- * relative timestamp for the cookie (int64_t) +
- * cookie lifetime (uint32_t) +
- * local tie-tag (uint32_t) + peer tie-tag (uint32_t) +
- * Peer's original src ...
+ * relative timestamp for the cookie (int64_t) +
+ * cookie lifetime (uint32_t) +
+ * local tie-tag (uint32_t) + peer tie-tag (uint32_t) +
+ * Peer's original src ...
*/
int
cl_sctp_cookie_paddr(sctp_chunk_hdr_t *ch, in6_addr_t *addr)
@@ -454,7 +454,7 @@ cl_sctp_cookie_paddr(sctp_chunk_hdr_t *ch, in6_addr_t *addr)
sizeof (int64_t) + /* timestamp */ \
sizeof (uint32_t) + /* cookie lifetime */ \
sizeof (sctp_init_chunk_t) + /* INIT ACK */ \
- sizeof (in6_addr_t) + /* peer's original source */ \
+ sizeof (in6_addr_t) + /* peer's original source */ \
ntohs((initcp)->sch_len) + /* peer's INIT */ \
sizeof (uint32_t) + /* local tie-tag */ \
sizeof (uint32_t) + /* peer tie-tag */ \
@@ -946,6 +946,8 @@ sctp_send_cookie_echo(sctp_t *sctp, sctp_chunk_hdr_t *iackch, mblk_t *iackmp,
uint16_t old_num_str;
sctp_stack_t *sctps = sctp->sctp_sctps;
+ sdc = NULL;
+ seglen = 0;
iack = (sctp_init_chunk_t *)(iackch + 1);
cph = NULL;
diff --git a/usr/src/uts/common/inet/sctp/sctp_input.c b/usr/src/uts/common/inet/sctp/sctp_input.c
index 1b6449cfab..7d856fab28 100644
--- a/usr/src/uts/common/inet/sctp/sctp_input.c
+++ b/usr/src/uts/common/inet/sctp/sctp_input.c
@@ -831,7 +831,7 @@ sctp_try_partial_delivery(sctp_t *sctp, mblk_t *hmp, sctp_reass_t *srp,
* there is a break in the sequence. We want
* to chop the reassembly list as follows (the
* numbers are TSNs):
- * 10 -> 11 -> (end of chunks)
+ * 10 -> 11 -> (end of chunks)
* 10 -> 11 -> | 13 (break in sequence)
*/
prev = mp;
@@ -943,6 +943,7 @@ sctp_data_frag(sctp_t *sctp, mblk_t *dmp, sctp_data_hdr_t **dc, int *error,
uint32_t tsn;
uint16_t fraglen = 0;
+ reassq_curr = NULL;
*error = 0;
/*
diff --git a/usr/src/uts/common/inet/sctp/sctp_opt_data.c b/usr/src/uts/common/inet/sctp/sctp_opt_data.c
index 23abeccf96..476a6d921e 100644
--- a/usr/src/uts/common/inet/sctp/sctp_opt_data.c
+++ b/usr/src/uts/common/inet/sctp/sctp_opt_data.c
@@ -1057,7 +1057,10 @@ sctp_set_opt(sctp_t *sctp, int level, int name, const void *invalp,
/* In all cases, the size of the option must be bigger than int */
if (inlen >= sizeof (int32_t)) {
onoff = ONOFF(*i1);
+ } else {
+ return (EINVAL);
}
+
retval = 0;
RUN_SCTP(sctp);
diff --git a/usr/src/uts/common/inet/sctp/sctp_output.c b/usr/src/uts/common/inet/sctp/sctp_output.c
index eced6eccba..0564f5a416 100644
--- a/usr/src/uts/common/inet/sctp/sctp_output.c
+++ b/usr/src/uts/common/inet/sctp/sctp_output.c
@@ -990,8 +990,8 @@ sctp_output(sctp_t *sctp, uint_t num_pkt)
mblk_t *head;
mblk_t *meta = sctp->sctp_xmit_tail;
mblk_t *fill = NULL;
- uint16_t chunklen;
- uint32_t cansend;
+ uint16_t chunklen;
+ uint32_t cansend;
int32_t seglen;
int32_t xtralen;
int32_t sacklen;
@@ -1007,6 +1007,8 @@ sctp_output(sctp_t *sctp, uint_t num_pkt)
sctp_stack_t *sctps = sctp->sctp_sctps;
uint32_t tsn;
+ lfp = NULL;
+
if (sctp->sctp_ftsn == sctp->sctp_lastacked + 1) {
sacklen = 0;
} else {
@@ -1651,7 +1653,7 @@ sctp_check_adv_ack_pt(sctp_t *sctp, mblk_t *meta, mblk_t *mp)
* - the chunk is unsent, i.e. new data.
*/
#define SCTP_CHUNK_RX_CANBUNDLE(mp, fp) \
- (!SCTP_CHUNK_ABANDONED((mp)) && \
+ (!SCTP_CHUNK_ABANDONED((mp)) && \
((SCTP_CHUNK_ISSENT((mp)) && (SCTP_CHUNK_DEST(mp) == (fp) && \
!SCTP_CHUNK_ISACKED(mp))) || \
(((mp)->b_flag & (SCTP_CHUNK_FLAG_REXMIT|SCTP_CHUNK_FLAG_SENT)) != \
@@ -1694,7 +1696,7 @@ sctp_rexmit(sctp_t *sctp, sctp_faddr_t *oldfp)
*
* if the advanced peer ack point includes the next
* chunk to be retransmited - possibly the Forward
- * TSN was lost.
+ * TSN was lost.
*
* if we are PRSCTP aware and the next chunk to be
* retransmitted is now abandoned
diff --git a/usr/src/uts/common/inet/tcp/tcp_bind.c b/usr/src/uts/common/inet/tcp/tcp_bind.c
index ec2a5d4e29..876e7d48e6 100644
--- a/usr/src/uts/common/inet/tcp/tcp_bind.c
+++ b/usr/src/uts/common/inet/tcp/tcp_bind.c
@@ -324,7 +324,7 @@ tcp_bind_select_lport(tcp_t *tcp, in_port_t *requested_port_ptr,
boolean_t bind_to_req_port_only, cred_t *cr)
{
in_port_t mlp_port;
- mlp_type_t addrtype, mlptype;
+ mlp_type_t addrtype, mlptype;
boolean_t user_specified;
in_port_t allocated_port;
in_port_t requested_port = *requested_port_ptr;
@@ -333,6 +333,7 @@ tcp_bind_select_lport(tcp_t *tcp, in_port_t *requested_port_ptr,
tcp_stack_t *tcps = tcp->tcp_tcps;
in6_addr_t v6addr = connp->conn_laddr_v6;
+ zone = NULL;
/*
* XXX It's up to the caller to specify bind_to_req_port_only or not.
*/
@@ -697,7 +698,7 @@ tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr,
if (connp->conn_anon_priv_bind) {
/*
* loopmax =
- * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1
+ * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1
*/
loopmax = IPPORT_RESERVED -
tcps->tcps_min_anonpriv_port;
diff --git a/usr/src/uts/common/inet/tcp/tcp_fusion.c b/usr/src/uts/common/inet/tcp/tcp_fusion.c
index e73c34de34..f2cb8f6dbd 100644
--- a/usr/src/uts/common/inet/tcp/tcp_fusion.c
+++ b/usr/src/uts/common/inet/tcp/tcp_fusion.c
@@ -160,7 +160,7 @@ tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcpha_t *tcpha)
if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable &&
tcp->tcp_xmit_head == NULL && peer_tcp->tcp_xmit_head == NULL) {
- mblk_t *mp;
+ mblk_t *mp = NULL;
queue_t *peer_rq = peer_connp->conn_rq;
ASSERT(!TCP_IS_DETACHED(peer_tcp));
diff --git a/usr/src/uts/common/inet/tcp/tcp_input.c b/usr/src/uts/common/inet/tcp/tcp_input.c
index ece2abbc04..0aaad871ba 100644
--- a/usr/src/uts/common/inet/tcp/tcp_input.c
+++ b/usr/src/uts/common/inet/tcp/tcp_input.c
@@ -2469,6 +2469,7 @@ tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
tcp_unfuse(tcp);
}
+ mss = 0;
iphdr = mp->b_rptr;
rptr = mp->b_rptr;
ASSERT(OK_32PTR(rptr));
diff --git a/usr/src/uts/common/inet/tcp/tcp_misc.c b/usr/src/uts/common/inet/tcp/tcp_misc.c
index 4f6399c433..0896dd7611 100644
--- a/usr/src/uts/common/inet/tcp/tcp_misc.c
+++ b/usr/src/uts/common/inet/tcp/tcp_misc.c
@@ -291,6 +291,7 @@ tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count,
startover:
nmatch = 0;
+ last = NULL;
mutex_enter(&connfp->connf_lock);
for (tconnp = connfp->connf_head; tconnp != NULL;
diff --git a/usr/src/uts/common/inet/tcp/tcp_output.c b/usr/src/uts/common/inet/tcp/tcp_output.c
index ae9efe863d..7a0472f3dd 100644
--- a/usr/src/uts/common/inet/tcp/tcp_output.c
+++ b/usr/src/uts/common/inet/tcp/tcp_output.c
@@ -1787,7 +1787,7 @@ tcp_send(tcp_t *tcp, const int mss, const int total_hdr_len,
uint32_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time)
{
int num_lso_seg = 1;
- uint_t lso_usable;
+ uint_t lso_usable = 0;
boolean_t do_lso_send = B_FALSE;
tcp_stack_t *tcps = tcp->tcp_tcps;
conn_t *connp = tcp->tcp_connp;
diff --git a/usr/src/uts/common/inet/tcp/tcp_tpi.c b/usr/src/uts/common/inet/tcp/tcp_tpi.c
index dbdc5b8dc7..6b32a0ad27 100644
--- a/usr/src/uts/common/inet/tcp/tcp_tpi.c
+++ b/usr/src/uts/common/inet/tcp/tcp_tpi.c
@@ -154,6 +154,10 @@ tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp,
opt_offset = tcresp->OPT_offset;
opt_lenp = (t_scalar_t *)&tcresp->OPT_length;
break;
+ default:
+ opt_lenp = 0;
+ opt_offset = 0;
+ break;
}
*t_errorp = 0;
diff --git a/usr/src/uts/common/inet/udp/udp.c b/usr/src/uts/common/inet/udp/udp.c
index 165adcb852..b2183405eb 100644
--- a/usr/src/uts/common/inet/udp/udp.c
+++ b/usr/src/uts/common/inet/udp/udp.c
@@ -4984,6 +4984,8 @@ udp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr,
mlp_type_t addrtype, mlptype;
udp_stack_t *us = udp->udp_us;
+ sin = NULL;
+ sin6 = NULL;
switch (len) {
case sizeof (sin_t): /* Complete IPv4 address */
sin = (sin_t *)sa;
@@ -5697,6 +5699,10 @@ udp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len,
udp = connp->conn_udp;
us = udp->udp_us;
+ sin = NULL;
+ sin6 = NULL;
+ v4dst = INADDR_ANY;
+ flowinfo = 0;
/*
* Address has been verified by the caller
diff --git a/usr/src/uts/common/inet/udp/udp_stats.c b/usr/src/uts/common/inet/udp/udp_stats.c
index 2f5202f693..4ed1ab9773 100644
--- a/usr/src/uts/common/inet/udp/udp_stats.c
+++ b/usr/src/uts/common/inet/udp/udp_stats.c
@@ -93,7 +93,12 @@ udp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req)
*/
mp2ctl = copymsg(mpctl);
- mp_conn_ctl = mp_attr_ctl = mp6_conn_ctl = NULL;
+ mp6_info_ctl = NULL;
+ mp6_attr_ctl = NULL;
+ mp6_conn_ctl = NULL;
+ mp_info_ctl = NULL;
+ mp_attr_ctl = NULL;
+ mp_conn_ctl = NULL;
if (mpctl == NULL ||
(mpdata = mpctl->b_cont) == NULL ||
(mp_conn_ctl = copymsg(mpctl)) == NULL ||
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx.c b/usr/src/uts/common/io/mlxcx/mlxcx.c
new file mode 100644
index 0000000000..12a8d52b3f
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx.c
@@ -0,0 +1,2765 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+/*
+ * Mellanox Connect-X 4/5/6 driver.
+ */
+
+/*
+ * The PRM for this family of parts is freely available, and can be found at:
+ * https://www.mellanox.com/related-docs/user_manuals/ \
+ * Ethernet_Adapters_Programming_Manual.pdf
+ */
+/*
+ * ConnectX glossary
+ * -----------------
+ *
+ * WR Work Request: something we've asked the hardware to do by
+ * creating a Work Queue Entry (WQE), e.g. send or recv a packet
+ *
+ * WQE Work Queue Entry: a descriptor on a work queue descriptor ring
+ *
+ * WQ Work Queue: a descriptor ring that we can place WQEs on, usually
+ * either a Send Queue (SQ) or Receive Queue (RQ). Different WQ
+ * types have different WQE structures, different commands for
+ * creating and destroying them, etc, but share a common context
+ * structure, counter setup and state graph.
+ * SQ Send Queue, a specific type of WQ that sends packets
+ * RQ Receive Queue, a specific type of WQ that receives packets
+ *
+ * CQ Completion Queue: completion of WRs from a WQ are reported to
+ * one of these, as a CQE on its entry ring.
+ * CQE Completion Queue Entry: an entry in a CQ ring. Contains error
+ * info, as well as packet size, the ID of the WQ, and the index
+ * of the WQE which completed. Does not contain any packet data.
+ *
+ * EQ Event Queue: a ring of event structs from the hardware informing
+ * us when particular events happen. Many events can point at a
+ * a particular CQ which we should then go look at.
+ * EQE Event Queue Entry: an entry on the EQ ring
+ *
+ * UAR User Access Region, a page of the device's PCI BAR which is
+ * tied to particular EQ/CQ/WQ sets and contains doorbells to
+ * ring to arm them for interrupts or wake them up for new work
+ *
+ * RQT RQ Table, a collection of indexed RQs used to refer to the group
+ * as a single unit (for e.g. hashing/RSS).
+ *
+ * TIR Transport Interface Recieve, a bucket of resources for the
+ * reception of packets. TIRs have to point at either a single RQ
+ * or a table of RQs (RQT). They then serve as a target for flow
+ * table entries (FEs). TIRs that point at an RQT also contain the
+ * settings for hashing for RSS.
+ *
+ * TIS Transport Interface Send, a bucket of resources associated with
+ * the transmission of packets. In particular, the temporary
+ * resources used for LSO internally in the card are accounted to
+ * a TIS.
+ *
+ * FT Flow Table, a collection of FEs and FGs that can be referred to
+ * as a single entity (e.g. used as a target from another flow
+ * entry or set as the "root" table to handle incoming or outgoing
+ * packets). Packets arriving at a FT are matched against the
+ * FEs in the table until either one matches with a terminating
+ * action or all FEs are exhausted (it's first-match-wins but with
+ * some actions that are non-terminal, like counting actions).
+ *
+ * FG Flow Group, a group of FEs which share a common "mask" (i.e.
+ * they match on the same attributes of packets coming into the
+ * flow).
+ *
+ * FE Flow Entry, an individual set of values to match against
+ * packets entering the flow table, combined with an action to
+ * take upon a successful match. The action we use most is
+ * "forward", which sends the packets to a TIR or another flow
+ * table and then stops further processing within the FE's FT.
+ *
+ * lkey/mkey A reference to something similar to a page table but in the
+ * device's internal onboard MMU. Since Connect-X parts double as
+ * IB cards (lots of RDMA) they have extensive onboard memory mgmt
+ * features which we try very hard not to use. For our WQEs we use
+ * the "reserved" lkey, which is a special value which indicates
+ * that addresses we give are linear addresses and should not be
+ * translated.
+ *
+ * PD Protection Domain, an IB concept. We have to allocate one to
+ * provide as a parameter for new WQs, but we don't do anything
+ * with it.
+ *
+ * TDOM/TD Transport Domain, an IB concept. We allocate one in order to
+ * provide it as a parameter to TIR/TIS creation, but we don't do
+ * anything with it.
+ */
+/*
+ *
+ * Data flow overview
+ * ------------------
+ *
+ * This driver is a MAC ring-enabled driver which maps rings to send and recv
+ * queues in hardware on the device.
+ *
+ * Each SQ and RQ is set up to report to its own individual CQ, to ensure
+ * sufficient space, and simplify the logic needed to work out which buffer
+ * was completed.
+ *
+ * The CQs are then round-robin allocated onto EQs, of which we set up one per
+ * interrupt that the system gives us for the device. Normally this means we
+ * have 8 EQs.
+ *
+ * When we have >= 8 EQs available, we try to allocate only RX or only TX
+ * CQs on each one. The EQs are chosen for RX and TX in an alternating fashion.
+ *
+ * EQ #0 is reserved for all event types other than completion events, and has
+ * no CQs associated with it at any time. EQs #1 and upwards are only used for
+ * handling CQ completion events.
+ *
+ * +------+ +------+ +------+ +---------+
+ * | SQ 0 |---->| CQ 0 |-----+ | EQ 0 |------> | MSI-X 0 | mlxcx_intr_0
+ * +------+ +------+ | +------+ +---------+
+ * |
+ * +------+ +------+ |
+ * | SQ 1 |---->| CQ 1 |---+ | +------+
+ * +------+ +------+ | +---> | |
+ * | | |
+ * +------+ +------+ | | EQ 1 | +---------+
+ * | SQ 2 |---->| CQ 2 |---------> | |------> | MSI-X 1 | mlxcx_intr_n
+ * +------+ +------+ | +---> | | +---------+
+ * | | +------+
+ * | |
+ * ... | |
+ * | | +------+
+ * +------+ +------+ +-----> | |
+ * | RQ 0 |---->| CQ 3 |---------> | | +---------+
+ * +------+ +------+ | | EQ 2 |------> | MSI-X 2 | mlxcx_intr_n
+ * | | | +---------+
+ * +------+ +------+ | +-> | |
+ * | RQ 1 |---->| CQ 4 |-----+ | +------+
+ * +------+ +------+ |
+ * | ....
+ * +------+ +------+ |
+ * | RQ 2 |---->| CQ 5 |-------+
+ * +------+ +------+
+ *
+ * ... (note this diagram does not show RX-only or TX-only EQs)
+ *
+ * For TX, we advertise all of the SQs we create as plain rings to MAC with
+ * no TX groups. This puts MAC in "virtual group" mode where it will allocate
+ * and use the rings as it sees fit.
+ *
+ * For RX, we advertise actual groups in order to make use of hardware
+ * classification.
+ *
+ * The hardware classification we use is based around Flow Tables, and we
+ * currently ignore all of the eswitch features of the card. The NIC VPORT
+ * is always set to promisc mode so that the eswitch sends us all of the
+ * traffic that arrives on the NIC, and we use flow entries to manage
+ * everything.
+ *
+ * We use 2 layers of flow tables for classification: traffic arrives at the
+ * root RX flow table which contains MAC address filters. Those then send
+ * matched traffic to the per-group L1 VLAN filter tables which contain VLAN
+ * presence and VID filters.
+ *
+ * Since these parts only support doing RSS hashing on a single protocol at a
+ * time, we have to use a third layer of flow tables as well to break traffic
+ * down by L4 and L3 protocol (TCPv6, TCPv4, UDPv6, UDPv4, IPv6, IPv4 etc)
+ * so that it can be sent to the appropriate TIR for hashing.
+ *
+ * Incoming packets
+ * + +---------+ +---------+
+ * | +->| group 0 | | group 0 |
+ * | | | vlan ft | +-->| hash ft |
+ * v | | L1 | | | L2 |
+ * +----+----+ | +---------+ | +---------+ +-----+ +-----+------+
+ * | eswitch | | | | | | TCPv6 |--->| TIR |--->| | RQ0 |
+ * +----+----+ | | | | +---------+ +-----+ | +------+
+ * | | | | | | UDPv6 |--->| TIR |--->| | RQ1 |
+ * | | | | | +---------+ +-----+ | +------+
+ * | | | | | | TCPv4 |--->| TIR |--->| | RQ2 |
+ * v | | | | +---------+ +-----+ | RQT +------+
+ * +----+----+ | +---------+ | | UDPv4 |--->| TIR |--->| | ... |
+ * | root rx | | | default |--+ +---------+ +-----+ | | |
+ * | flow tb | | +---------+ | | IPv6 |--->| TIR |--->| | |
+ * | L0 | | | promisc |--+ +---------+ +-----+ | | |
+ * +---------+ | +---------+ ^ | IPv4 |--->| TIR |--->| | |
+ * | bcast |---|---------------+ +---------+ +-----+ +-----+------+
+ * +---------+ | ^ | other |-+
+ * | MAC 0 |---+ | +---------+ | +-----+ +-----+
+ * +---------+ | +->| TIR |--->| RQ0 |
+ * | MAC 1 |-+ | +-----+ +-----+
+ * +---------+ | +---------------+
+ * | MAC 2 |-+ | ^
+ * +---------+ | | |
+ * | MAC 3 |-+ | +---------+ | +---------+
+ * +---------+ | | | group 1 | | | group 1 |
+ * | ..... | +--->| vlan ft | | +>| hash ft |
+ * | | | | L1 | | | | L2 |
+ * +---------+ | +---------+ | | +---------+ +-----+ +-----+------+
+ * | promisc |---+ | VLAN 0 |----+ | TCPv6 |--->| TIR |--->| | RQ3 |
+ * +---------+ +---------+ | +---------+ +-----+ | +------+
+ * | ..... | | | UDPv6 |--->| TIR |--->| | RQ4 |
+ * | | | +---------+ +-----+ | +------+
+ * | | | | TCPv4 |--->| TIR |--->| | RQ5 |
+ * | | | +---------+ +-----+ | RQT +------+
+ * +---------+ | | UDPv4 |--->| TIR |--->| | ... |
+ * | | | +---------+ +-----+ | | |
+ * +---------+ | | IPv6 |--->| TIR |--->| | |
+ * | promisc |--+ +---------+ +-----+ | | |
+ * +---------+ | IPv4 |--->| TIR |--->| | |
+ * +---------+ +-----+ +-----+------+
+ * | other |-+
+ * +---------+ |
+ * ....... | +-----+ +-----+
+ * +->| TIR |--->| RQ3 |
+ * +-----+ +-----+
+ *
+ * Note that the "promisc" flow entries are only set/enabled when promisc
+ * mode is enabled for the NIC. All promisc flow entries point directly at
+ * group 0's hashing flowtable (so all promisc-only traffic lands on group 0,
+ * the "default group" in MAC).
+ *
+ * The "default" entry in the L1 VLAN filter flow tables is used when there
+ * are no VLANs set for the group, to accept any traffic regardless of tag. It
+ * is deleted as soon as a VLAN filter is added (and re-instated if the
+ * last VLAN filter is removed).
+ *
+ * The actual descriptor ring structures for RX on Connect-X4 don't contain any
+ * space for packet data (they're a collection of scatter pointers only). TX
+ * descriptors contain some space for "inline headers" (and the card requires
+ * us to put at least the L2 Ethernet headers there for the eswitch to look at)
+ * but all the rest of the data comes from the gather pointers.
+ *
+ * When we get completions back they simply contain the ring index number of
+ * the WR (work request) which completed. So, we manage the buffers for actual
+ * packet data completely independently of the descriptors in this driver. When
+ * a WR is enqueued in a WQE (work queue entry), we stamp the packet data buffer
+ * with the WQE index that we put it at, and therefore don't have to look at
+ * the original descriptor at all when handling completions.
+ *
+ * For RX, we create sufficient packet data buffers to fill 150% of the
+ * available descriptors for each ring. These all are pre-set-up for DMA and
+ * have an mblk_t associated with them (with desballoc()).
+ *
+ * For TX we either borrow the mblk's memory and DMA bind it (if the packet is
+ * large enough), or we copy it into a pre-allocated buffer set up in the same
+ * as as for RX.
+ */
+
+/*
+ * Buffer lifecycle: RX
+ * --------------------
+ *
+ * The lifecycle of an mlxcx_buffer_t (packet buffer) used for RX is pretty
+ * straightforward.
+ *
+ * It is created (and has all its memory allocated) at the time of starting up
+ * the RX ring it belongs to. Then it is placed on the "free" list in the
+ * mlxcx_buffer_shard_t associated with its RQ. When mlxcx_rq_refill() wants
+ * more buffers to add to the RQ, it takes one off and marks it as "on WQ"
+ * before making a WQE for it.
+ *
+ * After a completion event occurs, the packet is either discarded (and the
+ * buffer_t returned to the free list), or it is readied for loaning to MAC.
+ *
+ * Once MAC and the rest of the system have finished with the packet, they call
+ * freemsg() on its mblk, which will call mlxcx_buf_mp_return and return the
+ * buffer_t to the free list.
+ *
+ * At detach/teardown time, buffers are only every destroyed from the free list.
+ *
+ *
+ * +
+ * |
+ * | mlxcx_buf_create
+ * |
+ * v
+ * +----+----+
+ * | created |
+ * +----+----+
+ * |
+ * |
+ * | mlxcx_buf_return
+ * |
+ * v
+ * mlxcx_buf_destroy +----+----+
+ * +---------| free |<---------------+
+ * | +----+----+ |
+ * | | |
+ * | | | mlxcx_buf_return
+ * v | mlxcx_buf_take |
+ * +---+--+ v |
+ * | dead | +---+---+ |
+ * +------+ | on WQ |- - - - - - - - >O
+ * +---+---+ ^
+ * | |
+ * | |
+ * | mlxcx_buf_loan | mlxcx_buf_mp_return
+ * v |
+ * +-------+--------+ |
+ * | on loan to MAC |----------->O
+ * +----------------+ freemsg()
+ *
+ */
+
+/*
+ * Buffer lifecycle: TX
+ * --------------------
+ *
+ * mlxcx_buffer_ts used for TX are divided into two kinds: regular buffers, and
+ * "foreign" buffers.
+ *
+ * The former have their memory allocated and DMA bound by this driver, while
+ * the latter (the "foreign" buffers) are on loan from MAC. Their memory is
+ * not owned by us, though we do DMA bind it (and take responsibility for
+ * un-binding it when we're done with them).
+ *
+ * We use separate mlxcx_buf_shard_ts for foreign and local buffers on each
+ * SQ. Thus, there is a separate free list and mutex for each kind.
+ *
+ * Since a TX packet might consist of multiple mblks, we translate each mblk
+ * into exactly one buffer_t. The buffer_ts are chained together in the same
+ * order as the mblks, using the mlb_tx_chain/mlb_tx_chain_entry list_t.
+ *
+ * Each chain of TX buffers may consist of foreign or driver buffers, in any
+ * mixture.
+ *
+ * The head of a TX buffer chain has mlb_tx_head == itself, which distinguishes
+ * it from the rest of the chain buffers.
+ *
+ * TX buffer chains are always returned to the free list by
+ * mlxcx_buf_return_chain(), which takes care of walking the mlb_tx_chain and
+ * freeing all of the members.
+ *
+ * We only call freemsg() once, on the head of the TX buffer chain's original
+ * mblk. This is true whether we copied it or bound it in a foreign buffer.
+ */
+
+/*
+ * Startup and command interface
+ * -----------------------------
+ *
+ * The command interface is the primary way in which we give control orders to
+ * the hardware (e.g. actions like "create this queue" or "delete this flow
+ * entry"). The command interface is never used to transmit or receive packets
+ * -- that takes place only on the queues that are set up through it.
+ *
+ * In mlxcx_cmd.c we implement our use of the command interface on top of a
+ * simple taskq. Since it's not performance critical, we busy-wait on command
+ * completions and only process a single command at a time.
+ *
+ * If this becomes a problem later we can wire command completions up to EQ 0
+ * once we have interrupts running.
+ *
+ * The startup/attach process for this card involves a bunch of different steps
+ * which are summarised pretty well in the PRM. We have to send a number of
+ * commands which do different things to start the card up, give it some pages
+ * of our own memory for it to use, then start creating all the entities that
+ * we need to use like EQs, CQs, WQs, as well as their dependencies like PDs
+ * and TDoms.
+ */
+
+/*
+ * UARs
+ * ----
+ *
+ * The pages of the PCI BAR other than the first few are reserved for use as
+ * "UAR" sections in this device. Each UAR section can be used as a set of
+ * doorbells for our queues.
+ *
+ * Currently we just make one single UAR for all of our queues. It doesn't
+ * seem to be a major limitation yet.
+ *
+ * When we're sending packets through an SQ, the PRM is not awful clear about
+ * exactly how we're meant to use the first 16 bytes of the Blueflame buffers
+ * (it's clear on the pattern of alternation you're expected to use between
+ * even and odd for Blueflame sends, but not for regular doorbells).
+ *
+ * Currently we don't do the even-odd alternating pattern for ordinary
+ * doorbells, and we don't use Blueflame at all. This seems to work fine, at
+ * least on Connect-X4 Lx.
+ */
+
+/*
+ * Lock ordering
+ * -------------
+ *
+ * Interrupt side:
+ *
+ * - mleq_mtx
+ * - mlcq_mtx
+ * - mlcq_bufbmtx
+ * - mlwq_mtx
+ * - mlbs_mtx
+ * - mlp_mtx
+ *
+ * GLD side:
+ *
+ * - mlp_mtx
+ * - mlg_mtx
+ * - mlg_*.mlft_mtx
+ * - mlp_*.mlft_mtx
+ * - mlwq_mtx
+ * - mlbs_mtx
+ * - mlcq_bufbmtx
+ * - mleq_mtx
+ * - mlcq_mtx
+ *
+ */
+
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/sysmacros.h>
+#include <sys/time.h>
+
+#include <sys/mac_provider.h>
+
+#include <mlxcx.h>
+
+CTASSERT((1 << MLXCX_RX_HASH_FT_SIZE_SHIFT) >= MLXCX_TIRS_PER_GROUP);
+
+#define MLXCX_MODULE_NAME "mlxcx"
+/*
+ * We give this to the firmware, so it has to be in a fixed format that it
+ * understands.
+ */
+#define MLXCX_DRIVER_VERSION "illumos,mlxcx,1.0.0,1,000,000000"
+
+/*
+ * Firmware may take a while to reclaim pages. Try a set number of times.
+ */
+clock_t mlxcx_reclaim_delay = 1000 * 50; /* 50 ms in us */
+uint_t mlxcx_reclaim_tries = 100; /* Wait at most 5000ms */
+
+static void *mlxcx_softstate;
+
+/*
+ * Fault detection thresholds.
+ */
+uint_t mlxcx_doorbell_tries = MLXCX_DOORBELL_TRIES_DFLT;
+uint_t mlxcx_stuck_intr_count = MLXCX_STUCK_INTR_COUNT_DFLT;
+
+static void
+mlxcx_load_props(mlxcx_t *mlxp)
+{
+ mlxcx_drv_props_t *p = &mlxp->mlx_props;
+
+ p->mldp_eq_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "eq_size_shift",
+ MLXCX_EQ_SIZE_SHIFT_DFLT);
+ p->mldp_cq_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "cq_size_shift",
+ MLXCX_CQ_SIZE_SHIFT_DFLT);
+ p->mldp_sq_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "sq_size_shift",
+ MLXCX_SQ_SIZE_SHIFT_DFLT);
+ p->mldp_rq_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rq_size_shift",
+ MLXCX_RQ_SIZE_SHIFT_DFLT);
+
+ p->mldp_cqemod_period_usec = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "cqemod_period_usec",
+ MLXCX_CQEMOD_PERIOD_USEC_DFLT);
+ p->mldp_cqemod_count = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "cqemod_count",
+ MLXCX_CQEMOD_COUNT_DFLT);
+ p->mldp_intrmod_period_usec = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "intrmod_period_usec",
+ MLXCX_INTRMOD_PERIOD_USEC_DFLT);
+
+ p->mldp_tx_ngroups = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "tx_ngroups",
+ MLXCX_TX_NGROUPS_DFLT);
+ p->mldp_tx_nrings_per_group = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "tx_nrings_per_group",
+ MLXCX_TX_NRINGS_PER_GROUP_DFLT);
+
+ p->mldp_rx_ngroups_large = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rx_ngroups_large",
+ MLXCX_RX_NGROUPS_LARGE_DFLT);
+ p->mldp_rx_ngroups_small = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rx_ngroups_small",
+ MLXCX_RX_NGROUPS_SMALL_DFLT);
+ p->mldp_rx_nrings_per_large_group = ddi_getprop(DDI_DEV_T_ANY,
+ mlxp->mlx_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "rx_nrings_per_large_group", MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT);
+ p->mldp_rx_nrings_per_small_group = ddi_getprop(DDI_DEV_T_ANY,
+ mlxp->mlx_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "rx_nrings_per_small_group", MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT);
+
+ p->mldp_ftbl_root_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "ftbl_root_size_shift",
+ MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT);
+
+ p->mldp_tx_bind_threshold = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "tx_bind_threshold",
+ MLXCX_TX_BIND_THRESHOLD_DFLT);
+
+ p->mldp_ftbl_vlan_size_shift = ddi_getprop(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "ftbl_vlan_size_shift",
+ MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT);
+
+ p->mldp_eq_check_interval_sec = ddi_getprop(DDI_DEV_T_ANY,
+ mlxp->mlx_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "eq_check_interval_sec", MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT);
+ p->mldp_cq_check_interval_sec = ddi_getprop(DDI_DEV_T_ANY,
+ mlxp->mlx_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "cq_check_interval_sec", MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT);
+ p->mldp_wq_check_interval_sec = ddi_getprop(DDI_DEV_T_ANY,
+ mlxp->mlx_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "wq_check_interval_sec", MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT);
+}
+
+void
+mlxcx_note(mlxcx_t *mlxp, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (mlxp != NULL && mlxp->mlx_dip != NULL) {
+ vdev_err(mlxp->mlx_dip, CE_NOTE, fmt, ap);
+ } else {
+ vcmn_err(CE_NOTE, fmt, ap);
+ }
+ va_end(ap);
+}
+
+void
+mlxcx_warn(mlxcx_t *mlxp, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (mlxp != NULL && mlxp->mlx_dip != NULL) {
+ vdev_err(mlxp->mlx_dip, CE_WARN, fmt, ap);
+ } else {
+ vcmn_err(CE_WARN, fmt, ap);
+ }
+ va_end(ap);
+}
+
+void
+mlxcx_panic(mlxcx_t *mlxp, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (mlxp != NULL && mlxp->mlx_dip != NULL) {
+ vdev_err(mlxp->mlx_dip, CE_PANIC, fmt, ap);
+ } else {
+ vcmn_err(CE_PANIC, fmt, ap);
+ }
+ va_end(ap);
+}
+
+uint16_t
+mlxcx_get16(mlxcx_t *mlxp, uintptr_t off)
+{
+ uintptr_t addr = off + (uintptr_t)mlxp->mlx_regs_base;
+ return (ddi_get16(mlxp->mlx_regs_handle, (void *)addr));
+}
+
+uint32_t
+mlxcx_get32(mlxcx_t *mlxp, uintptr_t off)
+{
+ uintptr_t addr = off + (uintptr_t)mlxp->mlx_regs_base;
+ return (ddi_get32(mlxp->mlx_regs_handle, (void *)addr));
+}
+
+uint64_t
+mlxcx_get64(mlxcx_t *mlxp, uintptr_t off)
+{
+ uintptr_t addr = off + (uintptr_t)mlxp->mlx_regs_base;
+ return (ddi_get64(mlxp->mlx_regs_handle, (void *)addr));
+}
+
+void
+mlxcx_put32(mlxcx_t *mlxp, uintptr_t off, uint32_t val)
+{
+ uintptr_t addr = off + (uintptr_t)mlxp->mlx_regs_base;
+ ddi_put32(mlxp->mlx_regs_handle, (void *)addr, val);
+}
+
+void
+mlxcx_put64(mlxcx_t *mlxp, uintptr_t off, uint64_t val)
+{
+ uintptr_t addr = off + (uintptr_t)mlxp->mlx_regs_base;
+ ddi_put64(mlxp->mlx_regs_handle, (void *)addr, val);
+}
+
+void
+mlxcx_uar_put32(mlxcx_t *mlxp, mlxcx_uar_t *mlu, uintptr_t off, uint32_t val)
+{
+ /*
+ * The UAR is always inside the first BAR, which we mapped as
+ * mlx_regs
+ */
+ uintptr_t addr = off + (uintptr_t)mlu->mlu_base +
+ (uintptr_t)mlxp->mlx_regs_base;
+ ddi_put32(mlxp->mlx_regs_handle, (void *)addr, val);
+}
+
+void
+mlxcx_uar_put64(mlxcx_t *mlxp, mlxcx_uar_t *mlu, uintptr_t off, uint64_t val)
+{
+ uintptr_t addr = off + (uintptr_t)mlu->mlu_base +
+ (uintptr_t)mlxp->mlx_regs_base;
+ ddi_put64(mlxp->mlx_regs_handle, (void *)addr, val);
+}
+
+static void
+mlxcx_fm_fini(mlxcx_t *mlxp)
+{
+ if (mlxp->mlx_fm_caps == 0)
+ return;
+
+ if (DDI_FM_ERRCB_CAP(mlxp->mlx_fm_caps))
+ ddi_fm_handler_unregister(mlxp->mlx_dip);
+
+ if (DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps) ||
+ DDI_FM_ERRCB_CAP(mlxp->mlx_fm_caps))
+ pci_ereport_teardown(mlxp->mlx_dip);
+
+ ddi_fm_fini(mlxp->mlx_dip);
+
+ mlxp->mlx_fm_caps = 0;
+}
+
+void
+mlxcx_fm_ereport(mlxcx_t *mlxp, const char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ if (!DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps))
+ return;
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ ddi_fm_ereport_post(mlxp->mlx_dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ NULL);
+}
+
+static int
+mlxcx_fm_errcb(dev_info_t *dip, ddi_fm_error_t *err, const void *arg)
+{
+ /*
+ * as the driver can always deal with an error in any dma or
+ * access handle, we can just return the fme_status value.
+ */
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+static void
+mlxcx_fm_init(mlxcx_t *mlxp)
+{
+ ddi_iblock_cookie_t iblk;
+ int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
+ DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
+
+ mlxp->mlx_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, mlxp->mlx_dip,
+ DDI_PROP_DONTPASS, "fm_capable", def);
+
+ if (mlxp->mlx_fm_caps < 0) {
+ mlxp->mlx_fm_caps = 0;
+ }
+ mlxp->mlx_fm_caps &= def;
+
+ if (mlxp->mlx_fm_caps == 0)
+ return;
+
+ ddi_fm_init(mlxp->mlx_dip, &mlxp->mlx_fm_caps, &iblk);
+ if (DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps) ||
+ DDI_FM_ERRCB_CAP(mlxp->mlx_fm_caps)) {
+ pci_ereport_setup(mlxp->mlx_dip);
+ }
+ if (DDI_FM_ERRCB_CAP(mlxp->mlx_fm_caps)) {
+ ddi_fm_handler_register(mlxp->mlx_dip, mlxcx_fm_errcb,
+ (void *)mlxp);
+ }
+}
+
+static void
+mlxcx_mlbs_teardown(mlxcx_t *mlxp, mlxcx_buf_shard_t *s)
+{
+ mlxcx_buffer_t *buf;
+
+ mutex_enter(&s->mlbs_mtx);
+ while (!list_is_empty(&s->mlbs_busy))
+ cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
+ while ((buf = list_head(&s->mlbs_free)) != NULL) {
+ mlxcx_buf_destroy(mlxp, buf);
+ }
+ list_destroy(&s->mlbs_free);
+ list_destroy(&s->mlbs_busy);
+ mutex_exit(&s->mlbs_mtx);
+
+ cv_destroy(&s->mlbs_free_nonempty);
+ mutex_destroy(&s->mlbs_mtx);
+}
+
+static void
+mlxcx_teardown_bufs(mlxcx_t *mlxp)
+{
+ mlxcx_buf_shard_t *s;
+
+ while ((s = list_remove_head(&mlxp->mlx_buf_shards)) != NULL) {
+ mlxcx_mlbs_teardown(mlxp, s);
+ kmem_free(s, sizeof (mlxcx_buf_shard_t));
+ }
+ list_destroy(&mlxp->mlx_buf_shards);
+
+ kmem_cache_destroy(mlxp->mlx_bufs_cache);
+}
+
+static void
+mlxcx_teardown_pages(mlxcx_t *mlxp)
+{
+ uint_t nzeros = 0;
+
+ mutex_enter(&mlxp->mlx_pagemtx);
+
+ while (mlxp->mlx_npages > 0) {
+ int32_t req, ret;
+ uint64_t pas[MLXCX_MANAGE_PAGES_MAX_PAGES];
+
+ ASSERT0(avl_is_empty(&mlxp->mlx_pages));
+ req = MIN(mlxp->mlx_npages, MLXCX_MANAGE_PAGES_MAX_PAGES);
+
+ if (!mlxcx_cmd_return_pages(mlxp, req, pas, &ret)) {
+ mlxcx_warn(mlxp, "hardware refused to return pages, "
+ "leaking %u remaining pages", mlxp->mlx_npages);
+ goto out;
+ }
+
+ for (int32_t i = 0; i < ret; i++) {
+ mlxcx_dev_page_t *mdp, probe;
+ bzero(&probe, sizeof (probe));
+ probe.mxdp_pa = pas[i];
+
+ mdp = avl_find(&mlxp->mlx_pages, &probe, NULL);
+
+ if (mdp != NULL) {
+ avl_remove(&mlxp->mlx_pages, mdp);
+ mlxp->mlx_npages--;
+ mlxcx_dma_free(&mdp->mxdp_dma);
+ kmem_free(mdp, sizeof (mlxcx_dev_page_t));
+ } else {
+ mlxcx_panic(mlxp, "hardware returned a page "
+ "with PA 0x%" PRIx64 " but we have no "
+ "record of giving out such a page", pas[i]);
+ }
+ }
+
+ /*
+ * If no pages were returned, note that fact.
+ */
+ if (ret == 0) {
+ nzeros++;
+ if (nzeros > mlxcx_reclaim_tries) {
+ mlxcx_warn(mlxp, "hardware refused to return "
+ "pages, leaking %u remaining pages",
+ mlxp->mlx_npages);
+ goto out;
+ }
+ delay(drv_usectohz(mlxcx_reclaim_delay));
+ }
+ }
+
+ avl_destroy(&mlxp->mlx_pages);
+
+out:
+ mutex_exit(&mlxp->mlx_pagemtx);
+ mutex_destroy(&mlxp->mlx_pagemtx);
+}
+
+static boolean_t
+mlxcx_eq_alloc_dma(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ boolean_t ret;
+ size_t sz, i;
+
+ VERIFY0(mleq->mleq_state & MLXCX_EQ_ALLOC);
+
+ mleq->mleq_entshift = mlxp->mlx_props.mldp_eq_size_shift;
+ mleq->mleq_nents = (1 << mleq->mleq_entshift);
+ sz = mleq->mleq_nents * sizeof (mlxcx_eventq_ent_t);
+ ASSERT3U(sz & (MLXCX_HW_PAGE_SIZE - 1), ==, 0);
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_queue_attr(mlxp, &attr);
+
+ ret = mlxcx_dma_alloc(mlxp, &mleq->mleq_dma, &attr, &acc,
+ B_TRUE, sz, B_TRUE);
+ if (!ret) {
+ mlxcx_warn(mlxp, "failed to allocate EQ memory");
+ return (B_FALSE);
+ }
+
+ mleq->mleq_ent = (mlxcx_eventq_ent_t *)mleq->mleq_dma.mxdb_va;
+
+ for (i = 0; i < mleq->mleq_nents; ++i)
+ mleq->mleq_ent[i].mleqe_owner = MLXCX_EQ_OWNER_INIT;
+
+ mleq->mleq_state |= MLXCX_EQ_ALLOC;
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_eq_rele_dma(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ VERIFY(mleq->mleq_state & MLXCX_EQ_ALLOC);
+ if (mleq->mleq_state & MLXCX_EQ_CREATED)
+ VERIFY(mleq->mleq_state & MLXCX_EQ_DESTROYED);
+
+ mlxcx_dma_free(&mleq->mleq_dma);
+ mleq->mleq_ent = NULL;
+
+ mleq->mleq_state &= ~MLXCX_EQ_ALLOC;
+}
+
+void
+mlxcx_teardown_flow_table(mlxcx_t *mlxp, mlxcx_flow_table_t *ft)
+{
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe;
+ int i;
+
+ ASSERT(mutex_owned(&ft->mlft_mtx));
+
+ for (i = ft->mlft_nents - 1; i >= 0; --i) {
+ fe = &ft->mlft_ent[i];
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ if (!mlxcx_cmd_delete_flow_table_entry(mlxp, fe)) {
+ mlxcx_panic(mlxp, "failed to delete flow "
+ "entry %u on table %u", i,
+ ft->mlft_num);
+ }
+ }
+ }
+
+ while ((fg = list_remove_head(&ft->mlft_groups)) != NULL) {
+ if (fg->mlfg_state & MLXCX_FLOW_GROUP_CREATED &&
+ !(fg->mlfg_state & MLXCX_FLOW_GROUP_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_flow_group(mlxp, fg)) {
+ mlxcx_panic(mlxp, "failed to destroy flow "
+ "group %u", fg->mlfg_num);
+ }
+ }
+ kmem_free(fg, sizeof (mlxcx_flow_group_t));
+ }
+ list_destroy(&ft->mlft_groups);
+ if (ft->mlft_state & MLXCX_FLOW_TABLE_CREATED &&
+ !(ft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_flow_table(mlxp, ft)) {
+ mlxcx_panic(mlxp, "failed to destroy flow table %u",
+ ft->mlft_num);
+ }
+ }
+ kmem_free(ft->mlft_ent, ft->mlft_entsize);
+ ft->mlft_ent = NULL;
+ mutex_exit(&ft->mlft_mtx);
+ mutex_destroy(&ft->mlft_mtx);
+ kmem_free(ft, sizeof (mlxcx_flow_table_t));
+}
+
+static void
+mlxcx_teardown_ports(mlxcx_t *mlxp)
+{
+ uint_t i;
+ mlxcx_port_t *p;
+ mlxcx_flow_table_t *ft;
+
+ for (i = 0; i < mlxp->mlx_nports; ++i) {
+ p = &mlxp->mlx_ports[i];
+ if (!(p->mlp_init & MLXCX_PORT_INIT))
+ continue;
+ mutex_enter(&p->mlp_mtx);
+ if ((ft = p->mlp_rx_flow) != NULL) {
+ mutex_enter(&ft->mlft_mtx);
+ /*
+ * teardown_flow_table() will destroy the mutex, so
+ * we don't release it here.
+ */
+ mlxcx_teardown_flow_table(mlxp, ft);
+ }
+ mutex_exit(&p->mlp_mtx);
+ mutex_destroy(&p->mlp_mtx);
+ p->mlp_init &= ~MLXCX_PORT_INIT;
+ }
+
+ kmem_free(mlxp->mlx_ports, mlxp->mlx_ports_size);
+ mlxp->mlx_ports = NULL;
+}
+
+static void
+mlxcx_teardown_wqs(mlxcx_t *mlxp)
+{
+ mlxcx_work_queue_t *mlwq;
+
+ while ((mlwq = list_head(&mlxp->mlx_wqs)) != NULL) {
+ mlxcx_wq_teardown(mlxp, mlwq);
+ }
+ list_destroy(&mlxp->mlx_wqs);
+}
+
+static void
+mlxcx_teardown_cqs(mlxcx_t *mlxp)
+{
+ mlxcx_completion_queue_t *mlcq;
+
+ while ((mlcq = list_head(&mlxp->mlx_cqs)) != NULL) {
+ mlxcx_cq_teardown(mlxp, mlcq);
+ }
+ list_destroy(&mlxp->mlx_cqs);
+}
+
+static void
+mlxcx_teardown_eqs(mlxcx_t *mlxp)
+{
+ mlxcx_event_queue_t *mleq;
+ uint_t i;
+
+ for (i = 0; i < mlxp->mlx_intr_count; ++i) {
+ mleq = &mlxp->mlx_eqs[i];
+ mutex_enter(&mleq->mleq_mtx);
+ if ((mleq->mleq_state & MLXCX_EQ_CREATED) &&
+ !(mleq->mleq_state & MLXCX_EQ_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_eq(mlxp, mleq)) {
+ mlxcx_warn(mlxp, "failed to destroy "
+ "event queue idx %u eqn %u",
+ i, mleq->mleq_num);
+ }
+ }
+ if (mleq->mleq_state & MLXCX_EQ_ALLOC) {
+ mlxcx_eq_rele_dma(mlxp, mleq);
+ }
+ mutex_exit(&mleq->mleq_mtx);
+ }
+}
+
+static void
+mlxcx_teardown_checktimers(mlxcx_t *mlxp)
+{
+ if (mlxp->mlx_props.mldp_eq_check_interval_sec > 0)
+ ddi_periodic_delete(mlxp->mlx_eq_checktimer);
+ if (mlxp->mlx_props.mldp_cq_check_interval_sec > 0)
+ ddi_periodic_delete(mlxp->mlx_cq_checktimer);
+ if (mlxp->mlx_props.mldp_wq_check_interval_sec > 0)
+ ddi_periodic_delete(mlxp->mlx_wq_checktimer);
+}
+
+static void
+mlxcx_teardown(mlxcx_t *mlxp)
+{
+ uint_t i;
+ dev_info_t *dip = mlxp->mlx_dip;
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_GROUPS) {
+ mlxcx_teardown_groups(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_GROUPS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_CHKTIMERS) {
+ mlxcx_teardown_checktimers(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_CHKTIMERS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_WQS) {
+ mlxcx_teardown_wqs(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_WQS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_CQS) {
+ mlxcx_teardown_cqs(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_CQS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_BUFS) {
+ mlxcx_teardown_bufs(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_BUFS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_PORTS) {
+ mlxcx_teardown_ports(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_PORTS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_INTRS) {
+ mlxcx_teardown_eqs(mlxp);
+ mlxcx_intr_teardown(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_INTRS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_UAR_PD_TD) {
+ if (mlxp->mlx_uar.mlu_allocated) {
+ if (!mlxcx_cmd_dealloc_uar(mlxp, &mlxp->mlx_uar)) {
+ mlxcx_warn(mlxp, "failed to release UAR");
+ }
+ for (i = 0; i < MLXCX_BF_PER_UAR; ++i)
+ mutex_destroy(&mlxp->mlx_uar.mlu_bf[i].mbf_mtx);
+ }
+ if (mlxp->mlx_pd.mlpd_allocated &&
+ !mlxcx_cmd_dealloc_pd(mlxp, &mlxp->mlx_pd)) {
+ mlxcx_warn(mlxp, "failed to release PD");
+ }
+ if (mlxp->mlx_tdom.mltd_allocated &&
+ !mlxcx_cmd_dealloc_tdom(mlxp, &mlxp->mlx_tdom)) {
+ mlxcx_warn(mlxp, "failed to release TDOM");
+ }
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_UAR_PD_TD;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_INIT_HCA) {
+ if (!mlxcx_cmd_teardown_hca(mlxp)) {
+ mlxcx_warn(mlxp, "failed to send teardown HCA "
+ "command during device detach");
+ }
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_INIT_HCA;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_PAGE_LIST) {
+ mlxcx_teardown_pages(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_PAGE_LIST;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_ENABLE_HCA) {
+ if (!mlxcx_cmd_disable_hca(mlxp)) {
+ mlxcx_warn(mlxp, "failed to send DISABLE HCA command "
+ "during device detach");
+ }
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_ENABLE_HCA;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_CMD) {
+ mlxcx_cmd_queue_fini(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_CMD;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_CAPS) {
+ kmem_free(mlxp->mlx_caps, sizeof (mlxcx_caps_t));
+ mlxp->mlx_caps = NULL;
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_CAPS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_REGS) {
+ ddi_regs_map_free(&mlxp->mlx_regs_handle);
+ mlxp->mlx_regs_handle = NULL;
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_REGS;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_PCI_CONFIG) {
+ pci_config_teardown(&mlxp->mlx_cfg_handle);
+ mlxp->mlx_cfg_handle = NULL;
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_PCI_CONFIG;
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_FM) {
+ mlxcx_fm_fini(mlxp);
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_FM;
+ }
+
+ VERIFY3S(mlxp->mlx_attach, ==, 0);
+ ddi_soft_state_free(mlxcx_softstate, mlxp->mlx_inst);
+ ddi_set_driver_private(dip, NULL);
+}
+
+static boolean_t
+mlxcx_regs_map(mlxcx_t *mlxp)
+{
+ off_t memsize;
+ int ret;
+ ddi_device_acc_attr_t da;
+
+ if (ddi_dev_regsize(mlxp->mlx_dip, MLXCX_REG_NUMBER, &memsize) !=
+ DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "failed to get register set size");
+ return (B_FALSE);
+ }
+
+ /*
+ * All data in the main BAR is kept in big-endian even though it's a PCI
+ * device.
+ */
+ bzero(&da, sizeof (ddi_device_acc_attr_t));
+ da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ da.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
+ da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+ if (DDI_FM_ACC_ERR_CAP(mlxp->mlx_fm_caps)) {
+ da.devacc_attr_access = DDI_FLAGERR_ACC;
+ } else {
+ da.devacc_attr_access = DDI_DEFAULT_ACC;
+ }
+
+ ret = ddi_regs_map_setup(mlxp->mlx_dip, MLXCX_REG_NUMBER,
+ &mlxp->mlx_regs_base, 0, memsize, &da, &mlxp->mlx_regs_handle);
+
+ if (ret != DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "failed to map device registers: %d", ret);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+mlxcx_check_issi(mlxcx_t *mlxp)
+{
+ uint32_t issi;
+
+ if (!mlxcx_cmd_query_issi(mlxp, &issi)) {
+ mlxcx_warn(mlxp, "failed to get ISSI");
+ return (B_FALSE);
+ }
+
+ if ((issi & (1 << MLXCX_CURRENT_ISSI)) == 0) {
+ mlxcx_warn(mlxp, "hardware does not support software ISSI, "
+ "hw vector 0x%x, sw version %u", issi, MLXCX_CURRENT_ISSI);
+ return (B_FALSE);
+ }
+
+ if (!mlxcx_cmd_set_issi(mlxp, MLXCX_CURRENT_ISSI)) {
+ mlxcx_warn(mlxp, "failed to set ISSI to %u",
+ MLXCX_CURRENT_ISSI);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_give_pages(mlxcx_t *mlxp, int32_t npages)
+{
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ int32_t i;
+ list_t plist;
+ mlxcx_dev_page_t *mdp;
+ const ddi_dma_cookie_t *ck;
+
+ /*
+ * If there are no pages required, then we're done here.
+ */
+ if (npages <= 0) {
+ return (B_TRUE);
+ }
+
+ list_create(&plist, sizeof (mlxcx_dev_page_t),
+ offsetof(mlxcx_dev_page_t, mxdp_list));
+
+ for (i = 0; i < npages; i++) {
+ mdp = kmem_zalloc(sizeof (mlxcx_dev_page_t), KM_SLEEP);
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_page_attr(mlxp, &attr);
+ if (!mlxcx_dma_alloc(mlxp, &mdp->mxdp_dma, &attr, &acc,
+ B_TRUE, MLXCX_HW_PAGE_SIZE, B_TRUE)) {
+ mlxcx_warn(mlxp, "failed to allocate 4k page %u/%u", i,
+ npages);
+ kmem_free(mdp, sizeof (mlxcx_dev_page_t));
+ goto cleanup_npages;
+ }
+ ck = mlxcx_dma_cookie_one(&mdp->mxdp_dma);
+ mdp->mxdp_pa = ck->dmac_laddress;
+
+ list_insert_tail(&plist, mdp);
+ }
+
+ /*
+ * Now that all of the pages have been allocated, given them to hardware
+ * in chunks.
+ */
+ while (npages > 0) {
+ mlxcx_dev_page_t *pages[MLXCX_MANAGE_PAGES_MAX_PAGES];
+ int32_t togive = MIN(MLXCX_MANAGE_PAGES_MAX_PAGES, npages);
+
+ for (i = 0; i < togive; i++) {
+ pages[i] = list_remove_head(&plist);
+ }
+
+ if (!mlxcx_cmd_give_pages(mlxp,
+ MLXCX_MANAGE_PAGES_OPMOD_GIVE_PAGES, togive, pages)) {
+ mlxcx_warn(mlxp, "!hardware refused our gift of %u "
+ "pages!", togive);
+ for (i = 0; i < togive; i++) {
+ list_insert_tail(&plist, pages[i]);
+ }
+ goto cleanup_npages;
+ }
+
+ mutex_enter(&mlxp->mlx_pagemtx);
+ for (i = 0; i < togive; i++) {
+ avl_add(&mlxp->mlx_pages, pages[i]);
+ }
+ mlxp->mlx_npages += togive;
+ mutex_exit(&mlxp->mlx_pagemtx);
+ npages -= togive;
+ }
+
+ list_destroy(&plist);
+
+ return (B_TRUE);
+
+cleanup_npages:
+ while ((mdp = list_remove_head(&plist)) != NULL) {
+ mlxcx_dma_free(&mdp->mxdp_dma);
+ kmem_free(mdp, sizeof (mlxcx_dev_page_t));
+ }
+ list_destroy(&plist);
+ return (B_FALSE);
+}
+
+static boolean_t
+mlxcx_init_pages(mlxcx_t *mlxp, uint_t type)
+{
+ int32_t npages;
+
+ if (!mlxcx_cmd_query_pages(mlxp, type, &npages)) {
+ mlxcx_warn(mlxp, "failed to determine boot pages");
+ return (B_FALSE);
+ }
+
+ return (mlxcx_give_pages(mlxp, npages));
+}
+
+static int
+mlxcx_bufs_cache_constr(void *arg, void *cookie, int kmflags)
+{
+ mlxcx_t *mlxp = cookie;
+ mlxcx_buffer_t *b = arg;
+
+ bzero(b, sizeof (mlxcx_buffer_t));
+ b->mlb_mlx = mlxp;
+ b->mlb_state = MLXCX_BUFFER_INIT;
+ list_create(&b->mlb_tx_chain, sizeof (mlxcx_buffer_t),
+ offsetof(mlxcx_buffer_t, mlb_tx_chain_entry));
+
+ return (0);
+}
+
+static void
+mlxcx_bufs_cache_destr(void *arg, void *cookie)
+{
+ mlxcx_t *mlxp = cookie;
+ mlxcx_buffer_t *b = arg;
+ VERIFY3P(b->mlb_mlx, ==, mlxp);
+ VERIFY(b->mlb_state == MLXCX_BUFFER_INIT);
+ list_destroy(&b->mlb_tx_chain);
+}
+
+mlxcx_buf_shard_t *
+mlxcx_mlbs_create(mlxcx_t *mlxp)
+{
+ mlxcx_buf_shard_t *s;
+
+ s = kmem_zalloc(sizeof (mlxcx_buf_shard_t), KM_SLEEP);
+
+ mutex_init(&s->mlbs_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ list_create(&s->mlbs_busy, sizeof (mlxcx_buffer_t),
+ offsetof(mlxcx_buffer_t, mlb_entry));
+ list_create(&s->mlbs_free, sizeof (mlxcx_buffer_t),
+ offsetof(mlxcx_buffer_t, mlb_entry));
+ cv_init(&s->mlbs_free_nonempty, NULL, CV_DRIVER, NULL);
+
+ list_insert_tail(&mlxp->mlx_buf_shards, s);
+
+ return (s);
+}
+
+static boolean_t
+mlxcx_setup_bufs(mlxcx_t *mlxp)
+{
+ char namebuf[KSTAT_STRLEN];
+
+ (void) snprintf(namebuf, KSTAT_STRLEN, "mlxcx%d_bufs_cache",
+ ddi_get_instance(mlxp->mlx_dip));
+ mlxp->mlx_bufs_cache = kmem_cache_create(namebuf,
+ sizeof (mlxcx_buffer_t), sizeof (uint64_t),
+ mlxcx_bufs_cache_constr, mlxcx_bufs_cache_destr,
+ NULL, mlxp, NULL, 0);
+
+ list_create(&mlxp->mlx_buf_shards, sizeof (mlxcx_buf_shard_t),
+ offsetof(mlxcx_buf_shard_t, mlbs_entry));
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_fm_qstate_ereport(mlxcx_t *mlxp, const char *qtype, uint32_t qnum,
+ const char *state, uint8_t statenum)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ if (!DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps))
+ return;
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
+ MLXCX_FM_SERVICE_MLXCX, "qstate.err");
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+
+ ddi_fm_ereport_post(mlxp->mlx_dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ "state", DATA_TYPE_STRING, state,
+ "state_num", DATA_TYPE_UINT8, statenum,
+ "qtype", DATA_TYPE_STRING, qtype,
+ "qnum", DATA_TYPE_UINT32, qnum,
+ NULL);
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_DEGRADED);
+}
+
+static void
+mlxcx_eq_check(void *arg)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_event_queue_t *eq;
+ mlxcx_eventq_ctx_t ctx;
+ const char *str;
+
+ uint_t i;
+
+ for (i = 0; i < mlxp->mlx_intr_count; ++i) {
+ eq = &mlxp->mlx_eqs[i];
+ if (!(eq->mleq_state & MLXCX_EQ_CREATED) ||
+ (eq->mleq_state & MLXCX_EQ_DESTROYED))
+ continue;
+ mutex_enter(&eq->mleq_mtx);
+ if (!mlxcx_cmd_query_eq(mlxp, eq, &ctx)) {
+ mutex_exit(&eq->mleq_mtx);
+ continue;
+ }
+
+ str = "???";
+ switch (ctx.mleqc_status) {
+ case MLXCX_EQ_STATUS_OK:
+ break;
+ case MLXCX_EQ_STATUS_WRITE_FAILURE:
+ str = "WRITE_FAILURE";
+ break;
+ }
+ if (ctx.mleqc_status != MLXCX_EQ_STATUS_OK) {
+ mlxcx_fm_qstate_ereport(mlxp, "event",
+ eq->mleq_num, str, ctx.mleqc_status);
+ mlxcx_warn(mlxp, "EQ %u is in bad status: %x (%s)",
+ eq->mleq_intr_index, ctx.mleqc_status, str);
+ }
+
+ if (ctx.mleqc_state != MLXCX_EQ_ST_ARMED &&
+ (eq->mleq_state & MLXCX_EQ_ARMED)) {
+ if (eq->mleq_cc == eq->mleq_check_disarm_cc &&
+ ++eq->mleq_check_disarm_cnt >= 3) {
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_STALL);
+ mlxcx_warn(mlxp, "EQ %u isn't armed",
+ eq->mleq_intr_index);
+ }
+ eq->mleq_check_disarm_cc = eq->mleq_cc;
+ } else {
+ eq->mleq_check_disarm_cc = 0;
+ eq->mleq_check_disarm_cnt = 0;
+ }
+
+ mutex_exit(&eq->mleq_mtx);
+ }
+}
+
+static void
+mlxcx_cq_check(void *arg)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_completionq_ctx_t ctx;
+ const char *str, *type;
+ uint_t v;
+
+ for (cq = list_head(&mlxp->mlx_cqs); cq != NULL;
+ cq = list_next(&mlxp->mlx_cqs, cq)) {
+ mutex_enter(&cq->mlcq_mtx);
+ if (!(cq->mlcq_state & MLXCX_CQ_CREATED) ||
+ (cq->mlcq_state & MLXCX_CQ_DESTROYED) ||
+ (cq->mlcq_state & MLXCX_CQ_TEARDOWN)) {
+ mutex_exit(&cq->mlcq_mtx);
+ continue;
+ }
+ if (cq->mlcq_fm_repd_qstate) {
+ mutex_exit(&cq->mlcq_mtx);
+ continue;
+ }
+ if (!mlxcx_cmd_query_cq(mlxp, cq, &ctx)) {
+ mutex_exit(&cq->mlcq_mtx);
+ continue;
+ }
+ if (cq->mlcq_wq != NULL) {
+ mlxcx_work_queue_t *wq = cq->mlcq_wq;
+ if (wq->mlwq_type == MLXCX_WQ_TYPE_RECVQ)
+ type = "rx ";
+ else if (wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ)
+ type = "tx ";
+ else
+ type = "";
+ } else {
+ type = "";
+ }
+
+ str = "???";
+ v = get_bits32(ctx.mlcqc_flags, MLXCX_CQ_CTX_STATUS);
+ switch (v) {
+ case MLXCX_CQC_STATUS_OK:
+ break;
+ case MLXCX_CQC_STATUS_OVERFLOW:
+ str = "OVERFLOW";
+ break;
+ case MLXCX_CQC_STATUS_WRITE_FAIL:
+ str = "WRITE_FAIL";
+ break;
+ case MLXCX_CQC_STATUS_INVALID:
+ str = "INVALID";
+ break;
+ }
+ if (v != MLXCX_CQC_STATUS_OK) {
+ mlxcx_fm_qstate_ereport(mlxp, "completion",
+ cq->mlcq_num, str, v);
+ mlxcx_warn(mlxp, "%sCQ 0x%x is in bad status: %x (%s)",
+ type, cq->mlcq_num, v, str);
+ cq->mlcq_fm_repd_qstate = B_TRUE;
+ }
+
+ v = get_bits32(ctx.mlcqc_flags, MLXCX_CQ_CTX_STATE);
+ if (v != MLXCX_CQC_STATE_ARMED &&
+ (cq->mlcq_state & MLXCX_CQ_ARMED) &&
+ !(cq->mlcq_state & MLXCX_CQ_POLLING)) {
+ if (cq->mlcq_cc == cq->mlcq_check_disarm_cc &&
+ ++cq->mlcq_check_disarm_cnt >= 3) {
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_STALL);
+ mlxcx_warn(mlxp, "%sCQ 0x%x (%p) isn't armed",
+ type, cq->mlcq_num, cq);
+ }
+ cq->mlcq_check_disarm_cc = cq->mlcq_cc;
+ } else {
+ cq->mlcq_check_disarm_cnt = 0;
+ cq->mlcq_check_disarm_cc = 0;
+ }
+ mutex_exit(&cq->mlcq_mtx);
+ }
+}
+
+void
+mlxcx_check_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *sq)
+{
+ mlxcx_sq_ctx_t ctx;
+ mlxcx_sq_state_t state;
+
+ ASSERT(mutex_owned(&sq->mlwq_mtx));
+
+ if (!mlxcx_cmd_query_sq(mlxp, sq, &ctx))
+ return;
+
+ ASSERT3U(from_be24(ctx.mlsqc_cqn), ==, sq->mlwq_cq->mlcq_num);
+ state = get_bits32(ctx.mlsqc_flags, MLXCX_SQ_STATE);
+ switch (state) {
+ case MLXCX_SQ_STATE_RST:
+ if (sq->mlwq_state & MLXCX_WQ_STARTED) {
+ mlxcx_fm_qstate_ereport(mlxp, "send",
+ sq->mlwq_num, "RST", state);
+ sq->mlwq_fm_repd_qstate = B_TRUE;
+ }
+ break;
+ case MLXCX_SQ_STATE_RDY:
+ if (!(sq->mlwq_state & MLXCX_WQ_STARTED)) {
+ mlxcx_fm_qstate_ereport(mlxp, "send",
+ sq->mlwq_num, "RDY", state);
+ sq->mlwq_fm_repd_qstate = B_TRUE;
+ }
+ break;
+ case MLXCX_SQ_STATE_ERR:
+ mlxcx_fm_qstate_ereport(mlxp, "send",
+ sq->mlwq_num, "ERR", state);
+ sq->mlwq_fm_repd_qstate = B_TRUE;
+ break;
+ default:
+ mlxcx_fm_qstate_ereport(mlxp, "send",
+ sq->mlwq_num, "???", state);
+ sq->mlwq_fm_repd_qstate = B_TRUE;
+ break;
+ }
+}
+
+void
+mlxcx_check_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *rq)
+{
+ mlxcx_rq_ctx_t ctx;
+ mlxcx_rq_state_t state;
+
+ ASSERT(mutex_owned(&rq->mlwq_mtx));
+
+ if (!mlxcx_cmd_query_rq(mlxp, rq, &ctx))
+ return;
+
+ ASSERT3U(from_be24(ctx.mlrqc_cqn), ==, rq->mlwq_cq->mlcq_num);
+ state = get_bits32(ctx.mlrqc_flags, MLXCX_RQ_STATE);
+ switch (state) {
+ case MLXCX_RQ_STATE_RST:
+ if (rq->mlwq_state & MLXCX_WQ_STARTED) {
+ mlxcx_fm_qstate_ereport(mlxp, "receive",
+ rq->mlwq_num, "RST", state);
+ rq->mlwq_fm_repd_qstate = B_TRUE;
+ }
+ break;
+ case MLXCX_RQ_STATE_RDY:
+ if (!(rq->mlwq_state & MLXCX_WQ_STARTED)) {
+ mlxcx_fm_qstate_ereport(mlxp, "receive",
+ rq->mlwq_num, "RDY", state);
+ rq->mlwq_fm_repd_qstate = B_TRUE;
+ }
+ break;
+ case MLXCX_RQ_STATE_ERR:
+ mlxcx_fm_qstate_ereport(mlxp, "receive",
+ rq->mlwq_num, "ERR", state);
+ rq->mlwq_fm_repd_qstate = B_TRUE;
+ break;
+ default:
+ mlxcx_fm_qstate_ereport(mlxp, "receive",
+ rq->mlwq_num, "???", state);
+ rq->mlwq_fm_repd_qstate = B_TRUE;
+ break;
+ }
+}
+
+static void
+mlxcx_wq_check(void *arg)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_work_queue_t *wq;
+
+ for (wq = list_head(&mlxp->mlx_wqs); wq != NULL;
+ wq = list_next(&mlxp->mlx_wqs, wq)) {
+ mutex_enter(&wq->mlwq_mtx);
+ if (!(wq->mlwq_state & MLXCX_WQ_CREATED) ||
+ (wq->mlwq_state & MLXCX_WQ_DESTROYED) ||
+ (wq->mlwq_state & MLXCX_WQ_TEARDOWN)) {
+ mutex_exit(&wq->mlwq_mtx);
+ continue;
+ }
+ if (wq->mlwq_fm_repd_qstate) {
+ mutex_exit(&wq->mlwq_mtx);
+ continue;
+ }
+ switch (wq->mlwq_type) {
+ case MLXCX_WQ_TYPE_SENDQ:
+ mlxcx_check_sq(mlxp, wq);
+ break;
+ case MLXCX_WQ_TYPE_RECVQ:
+ mlxcx_check_rq(mlxp, wq);
+ break;
+ }
+ mutex_exit(&wq->mlwq_mtx);
+ }
+}
+
+static boolean_t
+mlxcx_setup_checktimers(mlxcx_t *mlxp)
+{
+ if (mlxp->mlx_props.mldp_eq_check_interval_sec > 0) {
+ mlxp->mlx_eq_checktimer = ddi_periodic_add(mlxcx_eq_check, mlxp,
+ mlxp->mlx_props.mldp_eq_check_interval_sec * NANOSEC,
+ DDI_IPL_0);
+ }
+ if (mlxp->mlx_props.mldp_cq_check_interval_sec > 0) {
+ mlxp->mlx_cq_checktimer = ddi_periodic_add(mlxcx_cq_check, mlxp,
+ mlxp->mlx_props.mldp_cq_check_interval_sec * NANOSEC,
+ DDI_IPL_0);
+ }
+ if (mlxp->mlx_props.mldp_wq_check_interval_sec > 0) {
+ mlxp->mlx_wq_checktimer = ddi_periodic_add(mlxcx_wq_check, mlxp,
+ mlxp->mlx_props.mldp_wq_check_interval_sec * NANOSEC,
+ DDI_IPL_0);
+ }
+ return (B_TRUE);
+}
+
+int
+mlxcx_dmac_fe_compare(const void *arg0, const void *arg1)
+{
+ const mlxcx_flow_entry_t *left = arg0;
+ const mlxcx_flow_entry_t *right = arg1;
+ int bcmpr;
+
+ bcmpr = memcmp(left->mlfe_dmac, right->mlfe_dmac,
+ sizeof (left->mlfe_dmac));
+ if (bcmpr < 0)
+ return (-1);
+ if (bcmpr > 0)
+ return (1);
+ if (left->mlfe_vid < right->mlfe_vid)
+ return (-1);
+ if (left->mlfe_vid > right->mlfe_vid)
+ return (1);
+ return (0);
+}
+
+int
+mlxcx_grmac_compare(const void *arg0, const void *arg1)
+{
+ const mlxcx_group_mac_t *left = arg0;
+ const mlxcx_group_mac_t *right = arg1;
+ int bcmpr;
+
+ bcmpr = memcmp(left->mlgm_mac, right->mlgm_mac,
+ sizeof (left->mlgm_mac));
+ if (bcmpr < 0)
+ return (-1);
+ if (bcmpr > 0)
+ return (1);
+ return (0);
+}
+
+int
+mlxcx_page_compare(const void *arg0, const void *arg1)
+{
+ const mlxcx_dev_page_t *p0 = arg0;
+ const mlxcx_dev_page_t *p1 = arg1;
+
+ if (p0->mxdp_pa < p1->mxdp_pa)
+ return (-1);
+ if (p0->mxdp_pa > p1->mxdp_pa)
+ return (1);
+ return (0);
+}
+
+static boolean_t
+mlxcx_setup_ports(mlxcx_t *mlxp)
+{
+ uint_t i, j;
+ mlxcx_port_t *p;
+ mlxcx_flow_table_t *ft;
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe;
+
+ VERIFY3U(mlxp->mlx_nports, >, 0);
+ mlxp->mlx_ports_size = mlxp->mlx_nports * sizeof (mlxcx_port_t);
+ mlxp->mlx_ports = kmem_zalloc(mlxp->mlx_ports_size, KM_SLEEP);
+
+ for (i = 0; i < mlxp->mlx_nports; ++i) {
+ p = &mlxp->mlx_ports[i];
+ p->mlp_num = i;
+ p->mlp_init |= MLXCX_PORT_INIT;
+ mutex_init(&p->mlp_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ mutex_enter(&p->mlp_mtx);
+ if (!mlxcx_cmd_query_nic_vport_ctx(mlxp, p)) {
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ if (!mlxcx_cmd_query_port_mtu(mlxp, p)) {
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ if (!mlxcx_cmd_query_port_status(mlxp, p)) {
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ if (!mlxcx_cmd_query_port_speed(mlxp, p)) {
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ if (!mlxcx_cmd_modify_nic_vport_ctx(mlxp, p,
+ MLXCX_MODIFY_NIC_VPORT_CTX_PROMISC)) {
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+
+ mutex_exit(&p->mlp_mtx);
+ }
+
+ for (i = 0; i < mlxp->mlx_nports; ++i) {
+ p = &mlxp->mlx_ports[i];
+ mutex_enter(&p->mlp_mtx);
+ p->mlp_rx_flow = (ft = kmem_zalloc(sizeof (mlxcx_flow_table_t),
+ KM_SLEEP));
+ mutex_init(&ft->mlft_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ ft->mlft_type = MLXCX_FLOW_TABLE_NIC_RX;
+ ft->mlft_port = p;
+ ft->mlft_entshift = mlxp->mlx_props.mldp_ftbl_root_size_shift;
+ if (ft->mlft_entshift > mlxp->mlx_caps->mlc_max_rx_ft_shift)
+ ft->mlft_entshift = mlxp->mlx_caps->mlc_max_rx_ft_shift;
+ ft->mlft_nents = (1 << ft->mlft_entshift);
+ ft->mlft_entsize = ft->mlft_nents * sizeof (mlxcx_flow_entry_t);
+ ft->mlft_ent = kmem_zalloc(ft->mlft_entsize, KM_SLEEP);
+ list_create(&ft->mlft_groups, sizeof (mlxcx_flow_group_t),
+ offsetof(mlxcx_flow_group_t, mlfg_entry));
+
+ for (j = 0; j < ft->mlft_nents; ++j) {
+ ft->mlft_ent[j].mlfe_table = ft;
+ ft->mlft_ent[j].mlfe_index = j;
+ }
+
+ if (!mlxcx_cmd_create_flow_table(mlxp, ft)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+
+ if (!mlxcx_cmd_set_flow_table_root(mlxp, ft)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+
+ /*
+ * We match broadcast at the top of the root flow table, then
+ * all multicast/unicast MACs, then the promisc entry is down
+ * the very bottom.
+ *
+ * This way when promisc is on, that entry simply catches any
+ * remaining traffic that earlier flows haven't matched.
+ */
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_DMAC;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ p->mlp_bcast = fg;
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ (void) memset(fe->mlfe_dmac, 0xff, sizeof (fe->mlfe_dmac));
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = ft->mlft_nents - 2;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_DMAC;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ p->mlp_umcast = fg;
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ goto err;
+ }
+ p->mlp_promisc = fg;
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+
+ avl_create(&p->mlp_dmac_fe, mlxcx_dmac_fe_compare,
+ sizeof (mlxcx_flow_entry_t), offsetof(mlxcx_flow_entry_t,
+ mlfe_dmac_entry));
+
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&p->mlp_mtx);
+ }
+
+ return (B_TRUE);
+
+err:
+ mlxcx_teardown_ports(mlxp);
+ return (B_FALSE);
+}
+
+void
+mlxcx_remove_all_vlan_entries(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_flow_table_t *ft = g->mlg_rx_vlan_ft;
+ mlxcx_flow_group_t *fg = g->mlg_rx_vlan_fg;
+ mlxcx_flow_group_t *dfg = g->mlg_rx_vlan_def_fg;
+ mlxcx_flow_entry_t *fe;
+ mlxcx_group_vlan_t *v;
+
+ ASSERT(mutex_owned(&g->mlg_mtx));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ if (!list_is_empty(&g->mlg_rx_vlans)) {
+ fe = list_head(&dfg->mlfg_entries);
+ (void) mlxcx_cmd_set_flow_table_entry(mlxp, fe);
+ }
+
+ while ((v = list_remove_head(&g->mlg_rx_vlans)) != NULL) {
+ fe = v->mlgv_fe;
+ ASSERT3P(fe->mlfe_table, ==, ft);
+ ASSERT3P(fe->mlfe_group, ==, fg);
+ kmem_free(v, sizeof (mlxcx_group_vlan_t));
+
+ (void) mlxcx_cmd_delete_flow_table_entry(mlxp, fe);
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+}
+
+boolean_t
+mlxcx_remove_vlan_entry(mlxcx_t *mlxp, mlxcx_ring_group_t *g,
+ boolean_t tagged, uint16_t vid)
+{
+ mlxcx_flow_table_t *ft = g->mlg_rx_vlan_ft;
+ mlxcx_flow_group_t *fg = g->mlg_rx_vlan_fg;
+ mlxcx_flow_group_t *dfg = g->mlg_rx_vlan_def_fg;
+ mlxcx_flow_entry_t *fe;
+ mlxcx_group_vlan_t *v;
+ boolean_t found = B_FALSE;
+
+ ASSERT(mutex_owned(&g->mlg_mtx));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ for (v = list_head(&g->mlg_rx_vlans); v != NULL;
+ v = list_next(&g->mlg_rx_vlans, v)) {
+ if (v->mlgv_tagged == tagged && v->mlgv_vid == vid) {
+ found = B_TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+
+ list_remove(&g->mlg_rx_vlans, v);
+
+ /*
+ * If this is the last VLAN entry, we have to go back to accepting
+ * any VLAN (which means re-enabling the default entry).
+ *
+ * Do this before we remove the flow entry for the last specific
+ * VLAN so that we don't lose any traffic in the transition.
+ */
+ if (list_is_empty(&g->mlg_rx_vlans)) {
+ fe = list_head(&dfg->mlfg_entries);
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ list_insert_tail(&g->mlg_rx_vlans, v);
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+ }
+
+ fe = v->mlgv_fe;
+ ASSERT(fe->mlfe_state & MLXCX_FLOW_ENTRY_RESERVED);
+ ASSERT(fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED);
+ ASSERT3P(fe->mlfe_table, ==, ft);
+ ASSERT3P(fe->mlfe_group, ==, fg);
+
+ if (!mlxcx_cmd_delete_flow_table_entry(mlxp, fe)) {
+ list_insert_tail(&g->mlg_rx_vlans, v);
+ fe = list_head(&dfg->mlfg_entries);
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ (void) mlxcx_cmd_delete_flow_table_entry(mlxp, fe);
+ }
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+
+ kmem_free(v, sizeof (mlxcx_group_vlan_t));
+
+ mutex_exit(&ft->mlft_mtx);
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_add_vlan_entry(mlxcx_t *mlxp, mlxcx_ring_group_t *g, boolean_t tagged,
+ uint16_t vid)
+{
+ mlxcx_flow_table_t *ft = g->mlg_rx_vlan_ft;
+ mlxcx_flow_group_t *fg = g->mlg_rx_vlan_fg;
+ mlxcx_flow_group_t *dfg = g->mlg_rx_vlan_def_fg;
+ mlxcx_flow_entry_t *fe;
+ mlxcx_group_vlan_t *v;
+ boolean_t found = B_FALSE;
+ boolean_t first = B_FALSE;
+
+ ASSERT(mutex_owned(&g->mlg_mtx));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ for (v = list_head(&g->mlg_rx_vlans); v != NULL;
+ v = list_next(&g->mlg_rx_vlans, v)) {
+ if (v->mlgv_tagged == tagged && v->mlgv_vid == vid) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_TRUE);
+ }
+ }
+ if (list_is_empty(&g->mlg_rx_vlans))
+ first = B_TRUE;
+
+ for (fe = list_head(&fg->mlfg_entries); fe != NULL;
+ fe = list_next(&fg->mlfg_entries, fe)) {
+ if (!(fe->mlfe_state & MLXCX_FLOW_ENTRY_RESERVED)) {
+ found = B_TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+
+ v = kmem_zalloc(sizeof (mlxcx_group_vlan_t), KM_SLEEP);
+ v->mlgv_fe = fe;
+ v->mlgv_tagged = tagged;
+ v->mlgv_vid = vid;
+
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_RESERVED;
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+ fe->mlfe_vid = vid;
+ if (tagged) {
+ fe->mlfe_vlan_type = MLXCX_VLAN_TYPE_CVLAN;
+ } else {
+ fe->mlfe_vlan_type = MLXCX_VLAN_TYPE_NONE;
+ }
+
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_DIRTY;
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+ kmem_free(v, sizeof (mlxcx_group_vlan_t));
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+
+ list_insert_tail(&g->mlg_rx_vlans, v);
+
+ /*
+ * If the vlan list was empty for this group before adding this one,
+ * then we no longer want the "default" entry to allow all VLANs
+ * through.
+ */
+ if (first) {
+ fe = list_head(&dfg->mlfg_entries);
+ (void) mlxcx_cmd_delete_flow_table_entry(mlxp, fe);
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+ return (B_TRUE);
+}
+
+void
+mlxcx_remove_all_umcast_entries(mlxcx_t *mlxp, mlxcx_port_t *port,
+ mlxcx_ring_group_t *group)
+{
+ mlxcx_flow_entry_t *fe;
+ mlxcx_flow_table_t *ft = port->mlp_rx_flow;
+ mlxcx_group_mac_t *gm, *ngm;
+
+ ASSERT(mutex_owned(&port->mlp_mtx));
+ ASSERT(mutex_owned(&group->mlg_mtx));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ gm = avl_first(&group->mlg_rx_macs);
+ for (; gm != NULL; gm = ngm) {
+ ngm = AVL_NEXT(&group->mlg_rx_macs, gm);
+
+ ASSERT3P(gm->mlgm_group, ==, group);
+ fe = gm->mlgm_fe;
+ ASSERT3P(fe->mlfe_table, ==, ft);
+
+ avl_remove(&group->mlg_rx_macs, gm);
+ list_remove(&fe->mlfe_ring_groups, gm);
+ kmem_free(gm, sizeof (mlxcx_group_mac_t));
+
+ fe->mlfe_ndest = 0;
+ for (gm = list_head(&fe->mlfe_ring_groups); gm != NULL;
+ gm = list_next(&fe->mlfe_ring_groups, gm)) {
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow =
+ gm->mlgm_group->mlg_rx_vlan_ft;
+ }
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+
+ if (fe->mlfe_ndest > 0) {
+ (void) mlxcx_cmd_set_flow_table_entry(mlxp, fe);
+ continue;
+ }
+
+ /*
+ * There are no more ring groups left for this MAC (it wasn't
+ * attached to any other groups since ndest == 0), so clean up
+ * its flow entry.
+ */
+ avl_remove(&port->mlp_dmac_fe, fe);
+ (void) mlxcx_cmd_delete_flow_table_entry(mlxp, fe);
+ list_destroy(&fe->mlfe_ring_groups);
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+}
+
+boolean_t
+mlxcx_remove_umcast_entry(mlxcx_t *mlxp, mlxcx_port_t *port,
+ mlxcx_ring_group_t *group, const uint8_t *macaddr)
+{
+ mlxcx_flow_entry_t *fe;
+ mlxcx_flow_table_t *ft = port->mlp_rx_flow;
+ mlxcx_group_mac_t *gm, probe;
+
+ ASSERT(mutex_owned(&port->mlp_mtx));
+ ASSERT(mutex_owned(&group->mlg_mtx));
+
+ bzero(&probe, sizeof (probe));
+ bcopy(macaddr, probe.mlgm_mac, sizeof (probe.mlgm_mac));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ gm = avl_find(&group->mlg_rx_macs, &probe, NULL);
+ if (gm == NULL) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+ ASSERT3P(gm->mlgm_group, ==, group);
+ ASSERT0(bcmp(macaddr, gm->mlgm_mac, sizeof (gm->mlgm_mac)));
+
+ fe = gm->mlgm_fe;
+ ASSERT3P(fe->mlfe_table, ==, ft);
+ ASSERT0(bcmp(macaddr, fe->mlfe_dmac, sizeof (fe->mlfe_dmac)));
+
+ list_remove(&fe->mlfe_ring_groups, gm);
+ avl_remove(&group->mlg_rx_macs, gm);
+ kmem_free(gm, sizeof (mlxcx_group_mac_t));
+
+ fe->mlfe_ndest = 0;
+ for (gm = list_head(&fe->mlfe_ring_groups); gm != NULL;
+ gm = list_next(&fe->mlfe_ring_groups, gm)) {
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow =
+ gm->mlgm_group->mlg_rx_vlan_ft;
+ }
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+
+ if (fe->mlfe_ndest > 0) {
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+ mutex_exit(&ft->mlft_mtx);
+ return (B_TRUE);
+ }
+
+ /*
+ * There are no more ring groups left for this MAC (it wasn't attached
+ * to any other groups since ndest == 0), so clean up its flow entry.
+ */
+ avl_remove(&port->mlp_dmac_fe, fe);
+ (void) mlxcx_cmd_delete_flow_table_entry(mlxp, fe);
+ list_destroy(&fe->mlfe_ring_groups);
+
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+
+ mutex_exit(&ft->mlft_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_add_umcast_entry(mlxcx_t *mlxp, mlxcx_port_t *port,
+ mlxcx_ring_group_t *group, const uint8_t *macaddr)
+{
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe, probe;
+ mlxcx_flow_table_t *ft = port->mlp_rx_flow;
+ mlxcx_group_mac_t *gm;
+ boolean_t found = B_FALSE;
+
+ ASSERT(mutex_owned(&port->mlp_mtx));
+ ASSERT(mutex_owned(&group->mlg_mtx));
+
+ bzero(&probe, sizeof (probe));
+ bcopy(macaddr, probe.mlfe_dmac, sizeof (probe.mlfe_dmac));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ fe = avl_find(&port->mlp_dmac_fe, &probe, NULL);
+
+ if (fe == NULL) {
+ fg = port->mlp_umcast;
+ for (fe = list_head(&fg->mlfg_entries); fe != NULL;
+ fe = list_next(&fg->mlfg_entries, fe)) {
+ if (!(fe->mlfe_state & MLXCX_FLOW_ENTRY_RESERVED)) {
+ found = B_TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+ list_create(&fe->mlfe_ring_groups, sizeof (mlxcx_group_mac_t),
+ offsetof(mlxcx_group_mac_t, mlgm_fe_entry));
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_RESERVED;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ bcopy(macaddr, fe->mlfe_dmac, sizeof (fe->mlfe_dmac));
+
+ avl_add(&port->mlp_dmac_fe, fe);
+ }
+
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = group->mlg_rx_vlan_ft;
+ fe->mlfe_state |= MLXCX_FLOW_ENTRY_DIRTY;
+
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_DIRTY;
+ if (--fe->mlfe_ndest == 0) {
+ fe->mlfe_state &= ~MLXCX_FLOW_ENTRY_RESERVED;
+ }
+ mutex_exit(&ft->mlft_mtx);
+ return (B_FALSE);
+ }
+
+ gm = kmem_zalloc(sizeof (mlxcx_group_mac_t), KM_SLEEP);
+ gm->mlgm_group = group;
+ gm->mlgm_fe = fe;
+ bcopy(macaddr, gm->mlgm_mac, sizeof (gm->mlgm_mac));
+ avl_add(&group->mlg_rx_macs, gm);
+ list_insert_tail(&fe->mlfe_ring_groups, gm);
+
+ mutex_exit(&ft->mlft_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_setup_flow_group(mlxcx_t *mlxp, mlxcx_flow_table_t *ft,
+ mlxcx_flow_group_t *fg)
+{
+ mlxcx_flow_entry_t *fe;
+ uint_t i, idx;
+
+ ASSERT(mutex_owned(&ft->mlft_mtx));
+ ASSERT(ft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ ASSERT3P(fg->mlfg_table, ==, ft);
+
+ if (ft->mlft_next_ent + fg->mlfg_size > ft->mlft_nents)
+ return (B_FALSE);
+ fg->mlfg_start_idx = ft->mlft_next_ent;
+
+ if (!mlxcx_cmd_create_flow_group(mlxp, fg)) {
+ return (B_FALSE);
+ }
+
+ list_create(&fg->mlfg_entries, sizeof (mlxcx_flow_entry_t),
+ offsetof(mlxcx_flow_entry_t, mlfe_group_entry));
+ for (i = 0; i < fg->mlfg_size; ++i) {
+ idx = fg->mlfg_start_idx + i;
+ fe = &ft->mlft_ent[idx];
+ fe->mlfe_group = fg;
+ list_insert_tail(&fg->mlfg_entries, fe);
+ }
+ fg->mlfg_avail = fg->mlfg_size;
+ ft->mlft_next_ent += fg->mlfg_size;
+
+ return (B_TRUE);
+}
+
+static boolean_t
+mlxcx_setup_eq0(mlxcx_t *mlxp)
+{
+ mlxcx_event_queue_t *mleq = &mlxp->mlx_eqs[0];
+
+ mutex_enter(&mleq->mleq_mtx);
+ if (!mlxcx_eq_alloc_dma(mlxp, mleq)) {
+ /* mlxcx_teardown_eqs() will clean this up */
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ mleq->mleq_mlx = mlxp;
+ mleq->mleq_uar = &mlxp->mlx_uar;
+ mleq->mleq_events =
+ (1ULL << MLXCX_EVENT_PAGE_REQUEST) |
+ (1ULL << MLXCX_EVENT_PORT_STATE) |
+ (1ULL << MLXCX_EVENT_INTERNAL_ERROR) |
+ (1ULL << MLXCX_EVENT_PORT_MODULE) |
+ (1ULL << MLXCX_EVENT_SENDQ_DRAIN) |
+ (1ULL << MLXCX_EVENT_LAST_WQE) |
+ (1ULL << MLXCX_EVENT_CQ_ERROR) |
+ (1ULL << MLXCX_EVENT_WQ_CATASTROPHE) |
+ (1ULL << MLXCX_EVENT_PAGE_FAULT) |
+ (1ULL << MLXCX_EVENT_WQ_INVALID_REQ) |
+ (1ULL << MLXCX_EVENT_WQ_ACCESS_VIOL) |
+ (1ULL << MLXCX_EVENT_NIC_VPORT) |
+ (1ULL << MLXCX_EVENT_DOORBELL_CONGEST);
+ if (!mlxcx_cmd_create_eq(mlxp, mleq)) {
+ /* mlxcx_teardown_eqs() will clean this up */
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ if (ddi_intr_enable(mlxp->mlx_intr_handles[0]) != DDI_SUCCESS) {
+ /*
+ * mlxcx_teardown_eqs() will handle calling cmd_destroy_eq and
+ * eq_rele_dma
+ */
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ mlxcx_arm_eq(mlxp, mleq);
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_TRUE);
+}
+
+int
+mlxcx_cq_compare(const void *arg0, const void *arg1)
+{
+ const mlxcx_completion_queue_t *left = arg0;
+ const mlxcx_completion_queue_t *right = arg1;
+
+ if (left->mlcq_num < right->mlcq_num) {
+ return (-1);
+ }
+ if (left->mlcq_num > right->mlcq_num) {
+ return (1);
+ }
+ return (0);
+}
+
+static boolean_t
+mlxcx_setup_eqs(mlxcx_t *mlxp)
+{
+ uint_t i;
+ mlxcx_event_queue_t *mleq;
+
+ ASSERT3S(mlxp->mlx_intr_count, >, 0);
+
+ for (i = 1; i < mlxp->mlx_intr_count; ++i) {
+ mleq = &mlxp->mlx_eqs[i];
+ mutex_enter(&mleq->mleq_mtx);
+ if (!mlxcx_eq_alloc_dma(mlxp, mleq)) {
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ mleq->mleq_uar = &mlxp->mlx_uar;
+ if (!mlxcx_cmd_create_eq(mlxp, mleq)) {
+ /* mlxcx_teardown() will handle calling eq_rele_dma */
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ if (mlxp->mlx_props.mldp_intrmod_period_usec != 0 &&
+ !mlxcx_cmd_set_int_mod(mlxp, i,
+ mlxp->mlx_props.mldp_intrmod_period_usec)) {
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ if (ddi_intr_enable(mlxp->mlx_intr_handles[i]) != DDI_SUCCESS) {
+ mutex_exit(&mleq->mleq_mtx);
+ return (B_FALSE);
+ }
+ mlxcx_arm_eq(mlxp, mleq);
+ mutex_exit(&mleq->mleq_mtx);
+ }
+
+ mlxp->mlx_next_eq = 1;
+
+ return (B_TRUE);
+}
+
+/*
+ * Snapshot all of the hardware capabilities that we care about and then modify
+ * the HCA capabilities to get things moving.
+ */
+static boolean_t
+mlxcx_init_caps(mlxcx_t *mlxp)
+{
+ mlxcx_caps_t *c;
+
+ mlxp->mlx_caps = c = kmem_zalloc(sizeof (mlxcx_caps_t), KM_SLEEP);
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_GENERAL,
+ MLXCX_HCA_CAP_MODE_CURRENT, &c->mlc_hca_cur)) {
+ mlxcx_warn(mlxp, "failed to obtain current HCA general caps");
+ }
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_GENERAL,
+ MLXCX_HCA_CAP_MODE_MAX, &c->mlc_hca_max)) {
+ mlxcx_warn(mlxp, "failed to obtain maximum HCA general caps");
+ }
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_ETHERNET,
+ MLXCX_HCA_CAP_MODE_CURRENT, &c->mlc_ether_cur)) {
+ mlxcx_warn(mlxp, "failed to obtain current HCA eth caps");
+ }
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_ETHERNET,
+ MLXCX_HCA_CAP_MODE_MAX, &c->mlc_ether_max)) {
+ mlxcx_warn(mlxp, "failed to obtain maximum HCA eth caps");
+ }
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_NIC_FLOW,
+ MLXCX_HCA_CAP_MODE_CURRENT, &c->mlc_nic_flow_cur)) {
+ mlxcx_warn(mlxp, "failed to obtain current HCA flow caps");
+ }
+
+ if (!mlxcx_cmd_query_hca_cap(mlxp, MLXCX_HCA_CAP_NIC_FLOW,
+ MLXCX_HCA_CAP_MODE_MAX, &c->mlc_nic_flow_max)) {
+ mlxcx_warn(mlxp, "failed to obtain maximum HCA flow caps");
+ }
+
+ /*
+ * Check the caps meet our requirements.
+ */
+ const mlxcx_hca_cap_general_caps_t *gen = &c->mlc_hca_cur.mhc_general;
+
+ if (gen->mlcap_general_log_pg_sz != 12) {
+ mlxcx_warn(mlxp, "!hardware has page size != 4k "
+ "(log_pg_sz = %u)", (uint_t)gen->mlcap_general_log_pg_sz);
+ goto err;
+ }
+ if (gen->mlcap_general_cqe_version != 1) {
+ mlxcx_warn(mlxp, "!hardware does not support CQE v1 "
+ "(cqe_ver = %u)", (uint_t)gen->mlcap_general_cqe_version);
+ goto err;
+ }
+ if (gen->mlcap_general_port_type !=
+ MLXCX_CAP_GENERAL_PORT_TYPE_ETHERNET) {
+ mlxcx_warn(mlxp, "!hardware has non-ethernet ports");
+ goto err;
+ }
+ mlxp->mlx_nports = gen->mlcap_general_num_ports;
+ mlxp->mlx_max_sdu = (1 << (gen->mlcap_general_log_max_msg & 0x1F));
+
+ c->mlc_max_tir = (1 << gen->mlcap_general_log_max_tir);
+
+ c->mlc_checksum = get_bit32(c->mlc_ether_cur.mhc_eth.mlcap_eth_flags,
+ MLXCX_ETH_CAP_CSUM_CAP);
+ c->mlc_vxlan = get_bit32(c->mlc_ether_cur.mhc_eth.mlcap_eth_flags,
+ MLXCX_ETH_CAP_TUNNEL_STATELESS_VXLAN);
+
+ c->mlc_max_lso_size = (1 << get_bits32(c->mlc_ether_cur.mhc_eth.
+ mlcap_eth_flags, MLXCX_ETH_CAP_MAX_LSO_CAP));
+ if (c->mlc_max_lso_size == 1) {
+ c->mlc_max_lso_size = 0;
+ c->mlc_lso = B_FALSE;
+ } else {
+ c->mlc_lso = B_TRUE;
+ }
+
+ c->mlc_max_rqt_size = (1 << get_bits32(c->mlc_ether_cur.mhc_eth.
+ mlcap_eth_flags, MLXCX_ETH_CAP_RSS_IND_TBL_CAP));
+
+ if (!get_bit32(c->mlc_nic_flow_cur.mhc_flow.mlcap_flow_nic_rx.
+ mlcap_flow_prop_flags, MLXCX_FLOW_CAP_PROPS_SUPPORT)) {
+ mlxcx_warn(mlxp, "!hardware does not support rx flow tables");
+ goto err;
+ }
+ if (!get_bit32(c->mlc_nic_flow_cur.mhc_flow.mlcap_flow_nic_rx.
+ mlcap_flow_prop_flags, MLXCX_FLOW_CAP_PROPS_MODIFY)) {
+ mlxcx_warn(mlxp, "!hardware does not support modifying rx "
+ "flow table entries");
+ goto err;
+ }
+
+ c->mlc_max_rx_ft_shift = c->mlc_nic_flow_cur.mhc_flow.mlcap_flow_nic_rx.
+ mlcap_flow_prop_log_max_ft_size;
+ c->mlc_max_rx_flows = (1 << c->mlc_nic_flow_cur.mhc_flow.
+ mlcap_flow_nic_rx.mlcap_flow_prop_log_max_flow);
+ c->mlc_max_rx_fe_dest = (1 << c->mlc_nic_flow_cur.mhc_flow.
+ mlcap_flow_nic_rx.mlcap_flow_prop_log_max_destination);
+
+ return (B_TRUE);
+
+err:
+ kmem_free(mlxp->mlx_caps, sizeof (mlxcx_caps_t));
+ return (B_FALSE);
+}
+
+static int
+mlxcx_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ mlxcx_t *mlxp;
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ mlxp = ddi_get_driver_private(dip);
+ if (mlxp == NULL) {
+ mlxcx_warn(NULL, "asked to detach, but missing instance "
+ "private data");
+ return (DDI_FAILURE);
+ }
+
+ if (mlxp->mlx_attach & MLXCX_ATTACH_MAC_HDL) {
+ if (mac_unregister(mlxp->mlx_mac_hdl) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ mlxp->mlx_attach &= ~MLXCX_ATTACH_MAC_HDL;
+ }
+
+ mlxcx_teardown(mlxp);
+ return (DDI_SUCCESS);
+}
+
+static size_t
+mlxcx_calc_rx_ngroups(mlxcx_t *mlxp)
+{
+ size_t ngroups = mlxp->mlx_props.mldp_rx_ngroups_large +
+ mlxp->mlx_props.mldp_rx_ngroups_small;
+ size_t tirlim, flowlim, gflowlim;
+
+ tirlim = mlxp->mlx_caps->mlc_max_tir / MLXCX_TIRS_PER_GROUP;
+ if (tirlim < ngroups) {
+ mlxcx_note(mlxp, "limiting number of rx groups to %u based "
+ "on number of TIRs available", tirlim);
+ ngroups = tirlim;
+ }
+
+ flowlim = (1 << mlxp->mlx_caps->mlc_max_rx_ft_shift) - 2;
+ if (flowlim < ngroups) {
+ mlxcx_note(mlxp, "limiting number of rx groups to %u based "
+ "on max size of RX flow tables", flowlim);
+ ngroups = flowlim;
+ }
+
+ do {
+ gflowlim = mlxp->mlx_caps->mlc_max_rx_flows - 16 * ngroups - 2;
+ if (gflowlim < ngroups) {
+ mlxcx_note(mlxp, "limiting number of rx groups to %u "
+ "based on max total RX flows", gflowlim);
+ --ngroups;
+ }
+ } while (gflowlim < ngroups);
+
+ return (ngroups);
+}
+
+static int
+mlxcx_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ mlxcx_t *mlxp;
+ uint_t i;
+ int inst, ret;
+
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ inst = ddi_get_instance(dip);
+ ret = ddi_soft_state_zalloc(mlxcx_softstate, inst);
+ if (ret != 0)
+ return (ret);
+
+ mlxp = ddi_get_soft_state(mlxcx_softstate, inst);
+ if (mlxp == NULL)
+ return (DDI_FAILURE);
+ mlxp->mlx_dip = dip;
+ mlxp->mlx_inst = inst;
+ ddi_set_driver_private(dip, mlxp);
+
+ mlxcx_load_props(mlxp);
+
+ mlxcx_fm_init(mlxp);
+ mlxp->mlx_attach |= MLXCX_ATTACH_FM;
+
+ if (pci_config_setup(mlxp->mlx_dip, &mlxp->mlx_cfg_handle) !=
+ DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "failed to initial PCI config space");
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_PCI_CONFIG;
+
+ if (!mlxcx_regs_map(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_REGS;
+
+ if (!mlxcx_cmd_queue_init(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_CMD;
+
+ if (!mlxcx_cmd_enable_hca(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_ENABLE_HCA;
+
+ if (!mlxcx_check_issi(mlxp)) {
+ goto err;
+ }
+
+ /*
+ * We have to get our interrupts now so we know what priority to
+ * create pagemtx with.
+ */
+ if (!mlxcx_intr_setup(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_INTRS;
+
+ mutex_init(&mlxp->mlx_pagemtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ avl_create(&mlxp->mlx_pages, mlxcx_page_compare,
+ sizeof (mlxcx_dev_page_t), offsetof(mlxcx_dev_page_t, mxdp_tree));
+ mlxp->mlx_attach |= MLXCX_ATTACH_PAGE_LIST;
+
+ if (!mlxcx_init_pages(mlxp, MLXCX_QUERY_PAGES_OPMOD_BOOT)) {
+ goto err;
+ }
+
+ if (!mlxcx_init_caps(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_CAPS;
+
+ if (!mlxcx_init_pages(mlxp, MLXCX_QUERY_PAGES_OPMOD_INIT)) {
+ goto err;
+ }
+
+ if (!mlxcx_cmd_init_hca(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_INIT_HCA;
+
+ if (!mlxcx_cmd_set_driver_version(mlxp, MLXCX_DRIVER_VERSION)) {
+ goto err;
+ }
+
+ /*
+ * The User Access Region (UAR) is needed so we can ring EQ and CQ
+ * doorbells.
+ */
+ if (!mlxcx_cmd_alloc_uar(mlxp, &mlxp->mlx_uar)) {
+ goto err;
+ }
+ for (i = 0; i < MLXCX_BF_PER_UAR; ++i) {
+ mutex_init(&mlxp->mlx_uar.mlu_bf[i].mbf_mtx, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_UAR_PD_TD;
+
+ /*
+ * Set up event queue #0 -- it's special and only handles control
+ * type events, like PAGE_REQUEST (which we will probably get during
+ * the commands below).
+ *
+ * This will enable and arm the interrupt on EQ 0, too.
+ */
+ if (!mlxcx_setup_eq0(mlxp)) {
+ goto err;
+ }
+
+ /*
+ * Allocate a protection and transport domain. These don't really do
+ * anything for us (they're IB concepts), but we need to give their
+ * ID numbers in other commands.
+ */
+ if (!mlxcx_cmd_alloc_pd(mlxp, &mlxp->mlx_pd)) {
+ goto err;
+ }
+ if (!mlxcx_cmd_alloc_tdom(mlxp, &mlxp->mlx_tdom)) {
+ goto err;
+ }
+ /*
+ * Fetch the "reserved" lkey that lets us give linear addresses in
+ * work queue entries, rather than having to mess with the NIC's
+ * internal MMU.
+ */
+ if (!mlxcx_cmd_query_special_ctxs(mlxp)) {
+ goto err;
+ }
+
+ /*
+ * Query our port information and current state, populate the
+ * mlxcx_port_t structs.
+ *
+ * This also sets up the root flow tables and flow groups.
+ */
+ if (!mlxcx_setup_ports(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_PORTS;
+
+ /*
+ * Set up, enable and arm the rest of the interrupt EQs which will
+ * service events from CQs.
+ *
+ * The MLXCX_ATTACH_INTRS flag covers checking if these need to be
+ * cleaned up.
+ */
+ if (!mlxcx_setup_eqs(mlxp)) {
+ goto err;
+ }
+
+ /* Completion queues */
+ list_create(&mlxp->mlx_cqs, sizeof (mlxcx_completion_queue_t),
+ offsetof(mlxcx_completion_queue_t, mlcq_entry));
+ mlxp->mlx_attach |= MLXCX_ATTACH_CQS;
+
+ /* Work queues (send queues, receive queues) */
+ list_create(&mlxp->mlx_wqs, sizeof (mlxcx_work_queue_t),
+ offsetof(mlxcx_work_queue_t, mlwq_entry));
+ mlxp->mlx_attach |= MLXCX_ATTACH_WQS;
+
+ /* Set up periodic fault check timers which check the queue states */
+ if (!mlxcx_setup_checktimers(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_CHKTIMERS;
+
+ /*
+ * Construct our arrays of mlxcx_ring_group_ts, which represent the
+ * "groups" we advertise to MAC.
+ */
+ mlxp->mlx_rx_ngroups = mlxcx_calc_rx_ngroups(mlxp);
+ mlxp->mlx_rx_groups_size = mlxp->mlx_rx_ngroups *
+ sizeof (mlxcx_ring_group_t);
+ mlxp->mlx_rx_groups = kmem_zalloc(mlxp->mlx_rx_groups_size, KM_SLEEP);
+
+ mlxp->mlx_tx_ngroups = mlxp->mlx_props.mldp_tx_ngroups;
+ mlxp->mlx_tx_groups_size = mlxp->mlx_tx_ngroups *
+ sizeof (mlxcx_ring_group_t);
+ mlxp->mlx_tx_groups = kmem_zalloc(mlxp->mlx_tx_groups_size, KM_SLEEP);
+
+ mlxp->mlx_attach |= MLXCX_ATTACH_GROUPS;
+
+ /*
+ * Sets up the free/busy buffers list for keeping track of packet
+ * buffers.
+ */
+ if (!mlxcx_setup_bufs(mlxp))
+ goto err;
+ mlxp->mlx_attach |= MLXCX_ATTACH_BUFS;
+
+ /*
+ * Before we tell MAC about our rings/groups, we need to do enough
+ * setup on them to be sure about the numbers and configuration that
+ * we have. This will do basically everything short of allocating
+ * packet buffers and starting the rings up.
+ */
+ for (i = 0; i < mlxp->mlx_tx_ngroups; ++i) {
+ if (!mlxcx_tx_group_setup(mlxp, &mlxp->mlx_tx_groups[i]))
+ goto err;
+ }
+ for (i = 0; i < mlxp->mlx_rx_ngroups; ++i) {
+ if (!mlxcx_rx_group_setup(mlxp, &mlxp->mlx_rx_groups[i]))
+ goto err;
+ }
+
+ /*
+ * Finally, tell MAC that we exist!
+ */
+ if (!mlxcx_register_mac(mlxp)) {
+ goto err;
+ }
+ mlxp->mlx_attach |= MLXCX_ATTACH_MAC_HDL;
+
+ return (DDI_SUCCESS);
+
+err:
+ mlxcx_teardown(mlxp);
+ return (DDI_FAILURE);
+}
+
+static struct cb_ops mlxcx_cb_ops = {
+ .cb_open = nulldev,
+ .cb_close = nulldev,
+ .cb_strategy = nodev,
+ .cb_print = nodev,
+ .cb_dump = nodev,
+ .cb_read = nodev,
+ .cb_write = nodev,
+ .cb_ioctl = nodev,
+ .cb_devmap = nodev,
+ .cb_mmap = nodev,
+ .cb_segmap = nodev,
+ .cb_chpoll = nochpoll,
+ .cb_prop_op = ddi_prop_op,
+ .cb_flag = D_MP,
+ .cb_rev = CB_REV,
+ .cb_aread = nodev,
+ .cb_awrite = nodev
+};
+
+static struct dev_ops mlxcx_dev_ops = {
+ .devo_rev = DEVO_REV,
+ .devo_refcnt = 0,
+ .devo_getinfo = NULL,
+ .devo_identify = nulldev,
+ .devo_probe = nulldev,
+ .devo_attach = mlxcx_attach,
+ .devo_detach = mlxcx_detach,
+ .devo_reset = nodev,
+ .devo_power = ddi_power,
+ .devo_quiesce = ddi_quiesce_not_supported,
+ .devo_cb_ops = &mlxcx_cb_ops
+};
+
+static struct modldrv mlxcx_modldrv = {
+ .drv_modops = &mod_driverops,
+ .drv_linkinfo = "Mellanox Connect-X 4/5/6",
+ .drv_dev_ops = &mlxcx_dev_ops
+};
+
+static struct modlinkage mlxcx_modlinkage = {
+ .ml_rev = MODREV_1,
+ .ml_linkage = { &mlxcx_modldrv, NULL }
+};
+
+int
+_init(void)
+{
+ int ret;
+
+ ret = ddi_soft_state_init(&mlxcx_softstate, sizeof (mlxcx_t), 0);
+ if (ret != 0) {
+ return (ret);
+ }
+
+ mac_init_ops(&mlxcx_dev_ops, MLXCX_MODULE_NAME);
+
+ if ((ret = mod_install(&mlxcx_modlinkage)) != DDI_SUCCESS) {
+ mac_fini_ops(&mlxcx_dev_ops);
+ ddi_soft_state_fini(&mlxcx_softstate);
+ return (ret);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&mlxcx_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&mlxcx_modlinkage)) != DDI_SUCCESS) {
+ return (ret);
+ }
+
+ mac_fini_ops(&mlxcx_dev_ops);
+
+ ddi_soft_state_fini(&mlxcx_softstate);
+
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx.conf b/usr/src/uts/common/io/mlxcx/mlxcx.conf
new file mode 100644
index 0000000000..3569c4e5f5
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx.conf
@@ -0,0 +1,101 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2018, Joyent, Inc.
+# Copyright 2020, The University of Queensland
+#
+
+#
+# Driver.conf file for Mellanox Connect-X 4/5/6.
+# See mlxcx(7D) for valid options.
+#
+
+#
+# Sizing of event and completion queues.
+#
+# The number of entries on each queue will be (1 << *_size_shift) -- so
+# a value of 9 would mean 512 entries.
+#
+#eq_size_shift = 9;
+#cq_size_shift = 10;
+
+#
+# Sizing of send and receive queues.
+#
+# Note that this determines the size of the RX and TX rings that mlxcx will
+# advertise to MAC. It also determines how many packet buffers we will allocate
+# when starting the interface.
+#
+#sq_size_shift = 11;
+#rq_size_shift = 10;
+
+#
+# Number and configuration of TX groups and rings.
+#
+#tx_ngroups = 1;
+#tx_nrings_per_group = 64;
+
+#
+# Number and configuration of RX groups and rings.
+#
+#rx_ngroups_large = 2;
+#rx_nrings_per_large_group = 16;
+#rx_ngroups_small = 256;
+#rx_nrings_per_small_group = 4;
+
+#
+# Number of flow table entries allocated to root flow tables.
+#
+# This places an upper ceiling on how many MAC addresses can be filtered into
+# groups across the whole NIC. If you have a lot of VNICs you might want to
+# raise this (and probably also rx_ngroups_small).
+#
+#ftbl_root_size_shift = 12;
+
+#
+# Number of flow table entries allocated to each L1 VLAN filtering table.
+#
+# This places a limit on the number of VLANs that one MAC address can be
+# associated with before falling back to software classification. Two entries
+# are always reserved for the non-VLAN catch-all and promisc entries.
+#
+# Note: illumos MAC today only supports giving a single VLAN per MAC address
+# to hardware drivers anyway, so setting this higher is pointless until that
+# changes.
+#
+#ftbl_vlan_size_shift = 4;
+
+#
+# Interrupt and completion moderation.
+#
+#cqemod_period_usec = 50;
+#cqemod_count = <80% of cq_size>;
+#intrmod_period_usec = 10;
+
+#
+# Minimum packet size before we use a ddi_dma_bind_addr() rather than bcopy()
+# of the packet data. DMA binds are expensive and involve taking locks in the
+# PCI nexus driver, so it's seldom worth doing them for small packets.
+#
+#tx_bind_threshold = 2048;
+
+#
+# Interval between periodic double-checks of queue status against hardware
+# state. This is used to detect hardware stalls or errors, as well as guard
+# against driver bugs.
+#
+# If set to too high a frequency, checks may impact NIC performance. Can be
+# set to zero to disable periodic checking entirely.
+#
+#eq_check_interval_sec = 30;
+#cq_check_interval_sec = 300;
+#wq_check_interval_sec = 300;
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx.h b/usr/src/uts/common/io/mlxcx/mlxcx.h
new file mode 100644
index 0000000000..3b58989961
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx.h
@@ -0,0 +1,1298 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+/*
+ * Mellanox Connect-X 4/5/6 driver.
+ *
+ * More details in mlxcx.c
+ */
+
+#ifndef _MLXCX_H
+#define _MLXCX_H
+
+/*
+ * mlxcx(7D) defintions
+ */
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/ddifm.h>
+#include <sys/id_space.h>
+#include <sys/list.h>
+#include <sys/stddef.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/mac_provider.h>
+#include <sys/mac_ether.h>
+#include <sys/cpuvar.h>
+#include <sys/ethernet.h>
+
+#include <inet/ip.h>
+#include <inet/ip6.h>
+
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+
+#include <mlxcx_reg.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Get access to the first PCI BAR.
+ */
+#define MLXCX_REG_NUMBER 1
+
+/*
+ * The command queue is supposed to be a page, which is 4k.
+ */
+#define MLXCX_CMD_DMA_PAGE_SIZE 4096
+
+/*
+ * Queues can allocate in units of this much memory.
+ */
+#define MLXCX_QUEUE_DMA_PAGE_SIZE 4096
+
+/*
+ * We advertise two sizes of groups to MAC -- a certain number of "large"
+ * groups (including the default group, which is sized to at least ncpus)
+ * followed by a certain number of "small" groups.
+ *
+ * This allows us to have a larger amount of classification resources available
+ * for zones/VMs without resorting to software classification.
+ */
+#define MLXCX_RX_NGROUPS_LARGE_DFLT 2
+#define MLXCX_RX_NRINGS_PER_LARGE_GROUP_DFLT 16
+#define MLXCX_RX_NGROUPS_SMALL_DFLT 256
+#define MLXCX_RX_NRINGS_PER_SMALL_GROUP_DFLT 4
+
+#define MLXCX_TX_NGROUPS_DFLT 1
+#define MLXCX_TX_NRINGS_PER_GROUP_DFLT 64
+
+/*
+ * Queues will be sized to (1 << *Q_SIZE_SHIFT) entries long.
+ */
+#define MLXCX_EQ_SIZE_SHIFT_DFLT 9
+#define MLXCX_CQ_SIZE_SHIFT_DFLT 10
+
+/*
+ * Default to making SQs bigger than RQs for 9k MTU, since most packets will
+ * spill over into more than one slot. RQ WQEs are always 1 slot.
+ */
+#define MLXCX_SQ_SIZE_SHIFT_DFLT 11
+#define MLXCX_RQ_SIZE_SHIFT_DFLT 10
+
+#define MLXCX_CQ_HWM_GAP 16
+#define MLXCX_CQ_LWM_GAP 24
+
+#define MLXCX_RQ_REFILL_STEP 64
+
+/*
+ * CQ event moderation
+ */
+#define MLXCX_CQEMOD_PERIOD_USEC_DFLT 50
+#define MLXCX_CQEMOD_COUNT_DFLT \
+ (8 * ((1 << MLXCX_CQ_SIZE_SHIFT_DFLT) / 10))
+
+/*
+ * EQ interrupt moderation
+ */
+#define MLXCX_INTRMOD_PERIOD_USEC_DFLT 10
+
+/* Size of root flow tables */
+#define MLXCX_FTBL_ROOT_SIZE_SHIFT_DFLT 12
+
+/* Size of 2nd level flow tables for VLAN filtering */
+#define MLXCX_FTBL_VLAN_SIZE_SHIFT_DFLT 4
+
+/*
+ * How big does an mblk have to be before we dma_bind() it instead of
+ * bcopying?
+ */
+#define MLXCX_TX_BIND_THRESHOLD_DFLT 2048
+
+/*
+ * How often to check the status of completion queues for overflow and
+ * other problems.
+ */
+#define MLXCX_WQ_CHECK_INTERVAL_SEC_DFLT 300
+#define MLXCX_CQ_CHECK_INTERVAL_SEC_DFLT 300
+#define MLXCX_EQ_CHECK_INTERVAL_SEC_DFLT 30
+
+#define MLXCX_DOORBELL_TRIES_DFLT 3
+extern uint_t mlxcx_doorbell_tries;
+
+#define MLXCX_STUCK_INTR_COUNT_DFLT 128
+extern uint_t mlxcx_stuck_intr_count;
+
+#define MLXCX_BUF_BIND_MAX_ATTEMTPS 50
+
+#define MLXCX_MTU_OFFSET \
+ (sizeof (struct ether_vlan_header) + ETHERFCSL)
+
+/*
+ * This is the current version of the command structure that the driver expects
+ * to be found in the ISS.
+ */
+#define MLXCX_CMD_REVISION 5
+
+#ifdef DEBUG
+#define MLXCX_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \
+ (dma).mxdb_dma_handle, 0, 0, \
+ (flag)))
+#else
+#define MLXCX_DMA_SYNC(dma, flag) (void) ddi_dma_sync( \
+ (dma).mxdb_dma_handle, 0, 0, \
+ (flag))
+#endif
+
+#define MLXCX_FM_SERVICE_MLXCX "mlxcx"
+
+/*
+ * This macro defines the expected value of the 'Interface Step Sequence ID'
+ * (issi) which represents the version of the start up and tear down sequence.
+ * We must check that hardware supports this and tell it which version we're
+ * using as well.
+ */
+#define MLXCX_CURRENT_ISSI 1
+
+/*
+ * This is the size of a page that the hardware expects from us when
+ * manipulating pages.
+ */
+#define MLXCX_HW_PAGE_SIZE 4096
+
+/*
+ * This is a special lkey value used to terminate a list of scatter pointers.
+ */
+#define MLXCX_NULL_LKEY 0x100
+
+/*
+ * Forwards
+ */
+struct mlxcx;
+typedef struct mlxcx mlxcx_t;
+
+typedef enum {
+ MLXCX_DMABUF_HDL_ALLOC = 1 << 0,
+ MLXCX_DMABUF_MEM_ALLOC = 1 << 1,
+ MLXCX_DMABUF_BOUND = 1 << 2,
+ MLXCX_DMABUF_FOREIGN = 1 << 3,
+} mlxcx_dma_buffer_flags_t;
+
+typedef struct mlxcx_dma_buffer {
+ mlxcx_dma_buffer_flags_t mxdb_flags;
+ caddr_t mxdb_va; /* Buffer VA */
+ size_t mxdb_len; /* Buffer logical len */
+ ddi_acc_handle_t mxdb_acc_handle;
+ ddi_dma_handle_t mxdb_dma_handle;
+ uint_t mxdb_ncookies;
+} mlxcx_dma_buffer_t;
+
+typedef struct mlxcx_dev_page {
+ list_node_t mxdp_list;
+ avl_node_t mxdp_tree;
+ uintptr_t mxdp_pa;
+ mlxcx_dma_buffer_t mxdp_dma;
+} mlxcx_dev_page_t;
+
+/*
+ * Data structure to keep track of all information related to the command queue.
+ */
+typedef enum {
+ MLXCX_CMD_QUEUE_S_IDLE = 1,
+ MLXCX_CMD_QUEUE_S_BUSY,
+ MLXCX_CMD_QUEUE_S_BROKEN
+} mlxcx_cmd_queue_status_t;
+
+typedef struct mlxcx_cmd_queue {
+ kmutex_t mcmd_lock;
+ kcondvar_t mcmd_cv;
+ mlxcx_dma_buffer_t mcmd_dma;
+ mlxcx_cmd_ent_t *mcmd_ent;
+
+ uint8_t mcmd_size_l2;
+ uint8_t mcmd_stride_l2;
+
+ mlxcx_cmd_queue_status_t mcmd_status;
+
+ ddi_taskq_t *mcmd_taskq;
+ id_space_t *mcmd_tokens;
+} mlxcx_cmd_queue_t;
+
+typedef struct mlxcd_cmd_mbox {
+ list_node_t mlbox_node;
+ mlxcx_dma_buffer_t mlbox_dma;
+ mlxcx_cmd_mailbox_t *mlbox_data;
+} mlxcx_cmd_mbox_t;
+
+typedef enum {
+ MLXCX_EQ_ALLOC = 1 << 0, /* dma mem alloc'd, size set */
+ MLXCX_EQ_CREATED = 1 << 1, /* CREATE_EQ sent to hw */
+ MLXCX_EQ_DESTROYED = 1 << 2, /* DESTROY_EQ sent to hw */
+ MLXCX_EQ_ARMED = 1 << 3, /* Armed through the UAR */
+ MLXCX_EQ_POLLING = 1 << 4, /* Currently being polled */
+} mlxcx_eventq_state_t;
+
+typedef struct mlxcx_bf {
+ kmutex_t mbf_mtx;
+ uint_t mbf_cnt;
+ uint_t mbf_even;
+ uint_t mbf_odd;
+} mlxcx_bf_t;
+
+typedef struct mlxcx_uar {
+ boolean_t mlu_allocated;
+ uint_t mlu_num;
+ uint_t mlu_base;
+
+ volatile uint_t mlu_bfcnt;
+ mlxcx_bf_t mlu_bf[MLXCX_BF_PER_UAR];
+} mlxcx_uar_t;
+
+typedef struct mlxcx_pd {
+ boolean_t mlpd_allocated;
+ uint32_t mlpd_num;
+} mlxcx_pd_t;
+
+typedef struct mlxcx_tdom {
+ boolean_t mltd_allocated;
+ uint32_t mltd_num;
+} mlxcx_tdom_t;
+
+typedef enum {
+ MLXCX_PORT_VPORT_PROMISC = 1 << 0,
+} mlxcx_port_flags_t;
+
+typedef struct mlxcx_flow_table mlxcx_flow_table_t;
+typedef struct mlxcx_flow_group mlxcx_flow_group_t;
+
+typedef struct {
+ uint64_t mlps_rx_drops;
+} mlxcx_port_stats_t;
+
+typedef enum {
+ MLXCX_PORT_INIT = 1 << 0
+} mlxcx_port_init_t;
+
+typedef struct mlxcx_port {
+ kmutex_t mlp_mtx;
+ mlxcx_port_init_t mlp_init;
+ mlxcx_t *mlp_mlx;
+ /*
+ * The mlp_num we have here starts at zero (it's an index), but the
+ * numbering we have to use for register access starts at 1. We
+ * currently write mlp_num into the other_vport fields in mlxcx_cmd.c
+ * (where 0 is a magic number meaning "my vport") so if we ever add
+ * support for virtualisation features and deal with more than one
+ * vport, we will probably have to change this.
+ */
+ uint_t mlp_num;
+ mlxcx_port_flags_t mlp_flags;
+ uint64_t mlp_guid;
+ uint8_t mlp_mac_address[ETHERADDRL];
+
+ uint_t mlp_mtu;
+ uint_t mlp_max_mtu;
+
+ mlxcx_port_status_t mlp_admin_status;
+ mlxcx_port_status_t mlp_oper_status;
+
+ boolean_t mlp_autoneg;
+ mlxcx_eth_proto_t mlp_max_proto;
+ mlxcx_eth_proto_t mlp_admin_proto;
+ mlxcx_eth_proto_t mlp_oper_proto;
+
+ mlxcx_eth_inline_mode_t mlp_wqe_min_inline;
+
+ /* Root flow tables */
+ mlxcx_flow_table_t *mlp_rx_flow;
+ mlxcx_flow_table_t *mlp_tx_flow;
+
+ mlxcx_flow_group_t *mlp_promisc;
+ mlxcx_flow_group_t *mlp_bcast;
+ mlxcx_flow_group_t *mlp_umcast;
+
+ avl_tree_t mlp_dmac_fe;
+
+ mlxcx_port_stats_t mlp_stats;
+
+ mlxcx_module_status_t mlp_last_modstate;
+ mlxcx_module_error_type_t mlp_last_moderr;
+} mlxcx_port_t;
+
+typedef enum {
+ MLXCX_EQ_TYPE_ANY,
+ MLXCX_EQ_TYPE_RX,
+ MLXCX_EQ_TYPE_TX
+} mlxcx_eventq_type_t;
+
+typedef struct mlxcx_event_queue {
+ kmutex_t mleq_mtx;
+ mlxcx_t *mleq_mlx;
+ mlxcx_eventq_state_t mleq_state;
+ mlxcx_eventq_type_t mleq_type;
+
+ mlxcx_dma_buffer_t mleq_dma;
+
+ size_t mleq_entshift;
+ size_t mleq_nents;
+ mlxcx_eventq_ent_t *mleq_ent;
+ uint32_t mleq_cc; /* consumer counter */
+ uint32_t mleq_cc_armed;
+
+ uint32_t mleq_events;
+
+ uint32_t mleq_badintrs;
+
+ /* Hardware eq number */
+ uint_t mleq_num;
+ /* Index into the mlxcx_t's interrupts array */
+ uint_t mleq_intr_index;
+
+ /* UAR region that has this EQ's doorbell in it */
+ mlxcx_uar_t *mleq_uar;
+
+ /* Tree of CQn => mlxcx_completion_queue_t */
+ avl_tree_t mleq_cqs;
+
+ uint32_t mleq_check_disarm_cc;
+ uint_t mleq_check_disarm_cnt;
+} mlxcx_event_queue_t;
+
+typedef enum {
+ MLXCX_TIS_CREATED = 1 << 0,
+ MLXCX_TIS_DESTROYED = 1 << 1,
+} mlxcx_tis_state_t;
+
+typedef struct mlxcx_tis {
+ mlxcx_tis_state_t mltis_state;
+ list_node_t mltis_entry;
+ uint_t mltis_num;
+ mlxcx_tdom_t *mltis_tdom;
+} mlxcx_tis_t;
+
+typedef enum {
+ MLXCX_BUFFER_INIT,
+ MLXCX_BUFFER_FREE,
+ MLXCX_BUFFER_ON_WQ,
+ MLXCX_BUFFER_ON_LOAN,
+ MLXCX_BUFFER_ON_CHAIN,
+} mlxcx_buffer_state_t;
+
+typedef struct mlxcx_buf_shard {
+ list_node_t mlbs_entry;
+ kmutex_t mlbs_mtx;
+ list_t mlbs_busy;
+ list_t mlbs_free;
+ kcondvar_t mlbs_free_nonempty;
+} mlxcx_buf_shard_t;
+
+typedef struct mlxcx_buffer {
+ mlxcx_buf_shard_t *mlb_shard;
+ list_node_t mlb_entry;
+ list_node_t mlb_cq_entry;
+
+ struct mlxcx_buffer *mlb_tx_head; /* head of tx chain */
+ list_t mlb_tx_chain;
+ list_node_t mlb_tx_chain_entry;
+
+ boolean_t mlb_foreign;
+ size_t mlb_used;
+ mblk_t *mlb_tx_mp;
+
+ mlxcx_t *mlb_mlx;
+ mlxcx_buffer_state_t mlb_state;
+ uint_t mlb_wqe_index;
+ mlxcx_dma_buffer_t mlb_dma;
+ mblk_t *mlb_mp;
+ frtn_t mlb_frtn;
+} mlxcx_buffer_t;
+
+typedef enum {
+ MLXCX_CQ_ALLOC = 1 << 0,
+ MLXCX_CQ_CREATED = 1 << 1,
+ MLXCX_CQ_DESTROYED = 1 << 2,
+ MLXCX_CQ_EQAVL = 1 << 3,
+ MLXCX_CQ_BLOCKED_MAC = 1 << 4,
+ MLXCX_CQ_TEARDOWN = 1 << 5,
+ MLXCX_CQ_POLLING = 1 << 6,
+ MLXCX_CQ_ARMED = 1 << 7,
+} mlxcx_completionq_state_t;
+
+typedef struct mlxcx_work_queue mlxcx_work_queue_t;
+
+typedef struct mlxcx_completion_queue {
+ kmutex_t mlcq_mtx;
+ mlxcx_t *mlcq_mlx;
+ mlxcx_completionq_state_t mlcq_state;
+
+ mlxcx_port_stats_t *mlcq_stats;
+
+ list_node_t mlcq_entry;
+ avl_node_t mlcq_eq_entry;
+
+ uint_t mlcq_num;
+
+ mlxcx_work_queue_t *mlcq_wq;
+ mlxcx_event_queue_t *mlcq_eq;
+
+ /* UAR region that has this CQ's UAR doorbell in it */
+ mlxcx_uar_t *mlcq_uar;
+
+ mlxcx_dma_buffer_t mlcq_dma;
+
+ size_t mlcq_entshift;
+ size_t mlcq_nents;
+ mlxcx_completionq_ent_t *mlcq_ent;
+ uint32_t mlcq_cc; /* consumer counter */
+ uint32_t mlcq_cc_armed; /* cc at last arm */
+ uint32_t mlcq_ec; /* event counter */
+ uint32_t mlcq_ec_armed; /* ec at last arm */
+
+ mlxcx_dma_buffer_t mlcq_doorbell_dma;
+ mlxcx_completionq_doorbell_t *mlcq_doorbell;
+
+ uint64_t mlcq_bufcnt;
+ size_t mlcq_bufhwm;
+ size_t mlcq_buflwm;
+ list_t mlcq_buffers;
+ kmutex_t mlcq_bufbmtx;
+ list_t mlcq_buffers_b;
+
+ uint_t mlcq_check_disarm_cnt;
+ uint64_t mlcq_check_disarm_cc;
+
+ uint_t mlcq_cqemod_period_usec;
+ uint_t mlcq_cqemod_count;
+
+ mac_ring_handle_t mlcq_mac_hdl;
+ uint64_t mlcq_mac_gen;
+
+ boolean_t mlcq_fm_repd_qstate;
+} mlxcx_completion_queue_t;
+
+typedef enum {
+ MLXCX_WQ_ALLOC = 1 << 0,
+ MLXCX_WQ_CREATED = 1 << 1,
+ MLXCX_WQ_STARTED = 1 << 2,
+ MLXCX_WQ_DESTROYED = 1 << 3,
+ MLXCX_WQ_TEARDOWN = 1 << 4,
+ MLXCX_WQ_BUFFERS = 1 << 5,
+} mlxcx_workq_state_t;
+
+typedef enum {
+ MLXCX_WQ_TYPE_SENDQ = 1,
+ MLXCX_WQ_TYPE_RECVQ
+} mlxcx_workq_type_t;
+
+typedef struct mlxcx_ring_group mlxcx_ring_group_t;
+
+struct mlxcx_work_queue {
+ kmutex_t mlwq_mtx;
+ mlxcx_t *mlwq_mlx;
+ mlxcx_workq_type_t mlwq_type;
+ mlxcx_workq_state_t mlwq_state;
+
+ list_node_t mlwq_entry;
+ list_node_t mlwq_group_entry;
+
+ mlxcx_ring_group_t *mlwq_group;
+
+ uint_t mlwq_num;
+
+ mlxcx_completion_queue_t *mlwq_cq;
+ mlxcx_pd_t *mlwq_pd;
+
+ /* Required for send queues */
+ mlxcx_tis_t *mlwq_tis;
+
+ /* UAR region that has this WQ's blueflame buffers in it */
+ mlxcx_uar_t *mlwq_uar;
+
+ mlxcx_dma_buffer_t mlwq_dma;
+
+ mlxcx_eth_inline_mode_t mlwq_inline_mode;
+ size_t mlwq_entshift;
+ size_t mlwq_nents;
+ /* Discriminate based on mwq_type */
+ union {
+ mlxcx_sendq_ent_t *mlwq_send_ent;
+ mlxcx_sendq_extra_ent_t *mlwq_send_extra_ent;
+ mlxcx_recvq_ent_t *mlwq_recv_ent;
+ mlxcx_sendq_bf_t *mlwq_bf_ent;
+ };
+ uint64_t mlwq_pc; /* producer counter */
+
+ mlxcx_dma_buffer_t mlwq_doorbell_dma;
+ mlxcx_workq_doorbell_t *mlwq_doorbell;
+
+ mlxcx_buf_shard_t *mlwq_bufs;
+ mlxcx_buf_shard_t *mlwq_foreign_bufs;
+
+ boolean_t mlwq_fm_repd_qstate;
+};
+
+#define MLXCX_RQT_MAX_SIZE 64
+
+typedef enum {
+ MLXCX_RQT_CREATED = 1 << 0,
+ MLXCX_RQT_DESTROYED = 1 << 1,
+ MLXCX_RQT_DIRTY = 1 << 2,
+} mlxcx_rqtable_state_t;
+
+typedef struct mlxcx_rqtable {
+ mlxcx_rqtable_state_t mlrqt_state;
+ list_node_t mlrqt_entry;
+ uint_t mlrqt_num;
+
+ size_t mlrqt_max;
+ size_t mlrqt_used;
+
+ size_t mlrqt_rq_size;
+ mlxcx_work_queue_t **mlrqt_rq;
+} mlxcx_rqtable_t;
+
+typedef enum {
+ MLXCX_TIR_CREATED = 1 << 0,
+ MLXCX_TIR_DESTROYED = 1 << 1,
+} mlxcx_tir_state_t;
+
+typedef struct mlxcx_tir {
+ mlxcx_tir_state_t mltir_state;
+ list_node_t mltir_entry;
+ uint_t mltir_num;
+ mlxcx_tdom_t *mltir_tdom;
+ mlxcx_tir_type_t mltir_type;
+ union {
+ mlxcx_rqtable_t *mltir_rqtable;
+ mlxcx_work_queue_t *mltir_rq;
+ };
+ mlxcx_tir_hash_fn_t mltir_hash_fn;
+ uint8_t mltir_toeplitz_key[40];
+ mlxcx_tir_rx_hash_l3_type_t mltir_l3_type;
+ mlxcx_tir_rx_hash_l4_type_t mltir_l4_type;
+ mlxcx_tir_rx_hash_fields_t mltir_hash_fields;
+} mlxcx_tir_t;
+
+typedef enum {
+ MLXCX_FLOW_GROUP_CREATED = 1 << 0,
+ MLXCX_FLOW_GROUP_BUSY = 1 << 1,
+ MLXCX_FLOW_GROUP_DESTROYED = 1 << 2,
+} mlxcx_flow_group_state_t;
+
+typedef enum {
+ MLXCX_FLOW_MATCH_SMAC = 1 << 0,
+ MLXCX_FLOW_MATCH_DMAC = 1 << 1,
+ MLXCX_FLOW_MATCH_VLAN = 1 << 2,
+ MLXCX_FLOW_MATCH_VID = 1 << 3,
+ MLXCX_FLOW_MATCH_IP_VER = 1 << 4,
+ MLXCX_FLOW_MATCH_SRCIP = 1 << 5,
+ MLXCX_FLOW_MATCH_DSTIP = 1 << 6,
+ MLXCX_FLOW_MATCH_IP_PROTO = 1 << 7,
+ MLXCX_FLOW_MATCH_SQN = 1 << 8,
+ MLXCX_FLOW_MATCH_VXLAN = 1 << 9,
+} mlxcx_flow_mask_t;
+
+struct mlxcx_flow_group {
+ list_node_t mlfg_entry;
+ list_node_t mlfg_role_entry;
+ mlxcx_flow_group_state_t mlfg_state;
+ mlxcx_flow_table_t *mlfg_table;
+ uint_t mlfg_num;
+ size_t mlfg_start_idx;
+ size_t mlfg_size;
+ size_t mlfg_avail;
+ list_t mlfg_entries;
+ mlxcx_flow_mask_t mlfg_mask;
+};
+
+typedef enum {
+ MLXCX_FLOW_ENTRY_RESERVED = 1 << 0,
+ MLXCX_FLOW_ENTRY_CREATED = 1 << 1,
+ MLXCX_FLOW_ENTRY_DELETED = 1 << 2,
+ MLXCX_FLOW_ENTRY_DIRTY = 1 << 3,
+} mlxcx_flow_entry_state_t;
+
+typedef struct {
+ mlxcx_tir_t *mlfed_tir;
+ mlxcx_flow_table_t *mlfed_flow;
+} mlxcx_flow_entry_dest_t;
+
+typedef struct mlxcx_flow_entry {
+ list_node_t mlfe_group_entry;
+ avl_node_t mlfe_dmac_entry;
+ mlxcx_flow_entry_state_t mlfe_state;
+ mlxcx_flow_table_t *mlfe_table;
+ mlxcx_flow_group_t *mlfe_group;
+ uint_t mlfe_index;
+
+ mlxcx_flow_action_t mlfe_action;
+
+ /* Criteria for match */
+ uint8_t mlfe_smac[ETHERADDRL];
+ uint8_t mlfe_dmac[ETHERADDRL];
+
+ mlxcx_vlan_type_t mlfe_vlan_type;
+ uint16_t mlfe_vid;
+
+ uint_t mlfe_ip_version;
+ uint8_t mlfe_srcip[IPV6_ADDR_LEN];
+ uint8_t mlfe_dstip[IPV6_ADDR_LEN];
+
+ uint_t mlfe_ip_proto;
+ uint16_t mlfe_sport;
+ uint16_t mlfe_dport;
+
+ uint32_t mlfe_sqn;
+ uint32_t mlfe_vxlan_vni;
+
+ /* Destinations */
+ size_t mlfe_ndest;
+ mlxcx_flow_entry_dest_t mlfe_dest[MLXCX_FLOW_MAX_DESTINATIONS];
+
+ /*
+ * mlxcx_group_mac_ts joining this entry to N ring groups
+ * only used by FEs on the root rx flow table
+ */
+ list_t mlfe_ring_groups;
+} mlxcx_flow_entry_t;
+
+typedef enum {
+ MLXCX_FLOW_TABLE_CREATED = 1 << 0,
+ MLXCX_FLOW_TABLE_DESTROYED = 1 << 1,
+ MLXCX_FLOW_TABLE_ROOT = 1 << 2
+} mlxcx_flow_table_state_t;
+
+struct mlxcx_flow_table {
+ kmutex_t mlft_mtx;
+ mlxcx_flow_table_state_t mlft_state;
+ uint_t mlft_level;
+ uint_t mlft_num;
+ mlxcx_flow_table_type_t mlft_type;
+
+ mlxcx_port_t *mlft_port;
+
+ size_t mlft_entshift;
+ size_t mlft_nents;
+
+ size_t mlft_entsize;
+ mlxcx_flow_entry_t *mlft_ent;
+
+ /* First entry not yet claimed by a group */
+ size_t mlft_next_ent;
+
+ list_t mlft_groups;
+};
+
+typedef enum {
+ MLXCX_GROUP_RX,
+ MLXCX_GROUP_TX
+} mlxcx_group_type_t;
+
+typedef enum {
+ MLXCX_GROUP_INIT = 1 << 0,
+ MLXCX_GROUP_WQS = 1 << 1,
+ MLXCX_GROUP_TIRTIS = 1 << 2,
+ MLXCX_GROUP_FLOWS = 1 << 3,
+ MLXCX_GROUP_RUNNING = 1 << 4,
+ MLXCX_GROUP_RQT = 1 << 5,
+} mlxcx_group_state_t;
+
+#define MLXCX_RX_HASH_FT_SIZE_SHIFT 4
+
+typedef enum {
+ MLXCX_TIR_ROLE_IPv4 = 0,
+ MLXCX_TIR_ROLE_IPv6,
+ MLXCX_TIR_ROLE_TCPv4,
+ MLXCX_TIR_ROLE_TCPv6,
+ MLXCX_TIR_ROLE_UDPv4,
+ MLXCX_TIR_ROLE_UDPv6,
+ MLXCX_TIR_ROLE_OTHER,
+
+ MLXCX_TIRS_PER_GROUP
+} mlxcx_tir_role_t;
+
+typedef struct {
+ avl_node_t mlgm_group_entry;
+ list_node_t mlgm_fe_entry;
+ mlxcx_ring_group_t *mlgm_group;
+ uint8_t mlgm_mac[6];
+ mlxcx_flow_entry_t *mlgm_fe;
+} mlxcx_group_mac_t;
+
+typedef struct {
+ list_node_t mlgv_entry;
+ boolean_t mlgv_tagged;
+ uint16_t mlgv_vid;
+ mlxcx_flow_entry_t *mlgv_fe;
+} mlxcx_group_vlan_t;
+
+struct mlxcx_ring_group {
+ kmutex_t mlg_mtx;
+ mlxcx_t *mlg_mlx;
+ mlxcx_group_state_t mlg_state;
+ mlxcx_group_type_t mlg_type;
+
+ mac_group_handle_t mlg_mac_hdl;
+
+ union {
+ mlxcx_tis_t mlg_tis;
+ mlxcx_tir_t mlg_tir[MLXCX_TIRS_PER_GROUP];
+ };
+ mlxcx_port_t *mlg_port;
+
+ size_t mlg_nwqs;
+ size_t mlg_wqs_size;
+ mlxcx_work_queue_t *mlg_wqs;
+
+ mlxcx_rqtable_t *mlg_rqt;
+
+ /*
+ * Flow table for matching VLAN IDs
+ */
+ mlxcx_flow_table_t *mlg_rx_vlan_ft;
+ mlxcx_flow_group_t *mlg_rx_vlan_fg;
+ mlxcx_flow_group_t *mlg_rx_vlan_def_fg;
+ mlxcx_flow_group_t *mlg_rx_vlan_promisc_fg;
+ list_t mlg_rx_vlans;
+
+ /*
+ * Flow table for separating out by protocol before hashing
+ */
+ mlxcx_flow_table_t *mlg_rx_hash_ft;
+
+ /*
+ * Links to flow entries on the root flow table which are pointing to
+ * our rx_vlan_ft.
+ */
+ avl_tree_t mlg_rx_macs;
+};
+
+typedef enum mlxcx_cmd_state {
+ MLXCX_CMD_S_DONE = 1 << 0,
+ MLXCX_CMD_S_ERROR = 1 << 1
+} mlxcx_cmd_state_t;
+
+typedef struct mlxcx_cmd {
+ struct mlxcx *mlcmd_mlxp;
+ kmutex_t mlcmd_lock;
+ kcondvar_t mlcmd_cv;
+
+ uint8_t mlcmd_token;
+ mlxcx_cmd_op_t mlcmd_op;
+
+ /*
+ * Command data and extended mailboxes for responses.
+ */
+ const void *mlcmd_in;
+ uint32_t mlcmd_inlen;
+ void *mlcmd_out;
+ uint32_t mlcmd_outlen;
+ list_t mlcmd_mbox_in;
+ uint8_t mlcmd_nboxes_in;
+ list_t mlcmd_mbox_out;
+ uint8_t mlcmd_nboxes_out;
+ /*
+ * Status information.
+ */
+ mlxcx_cmd_state_t mlcmd_state;
+ uint8_t mlcmd_status;
+} mlxcx_cmd_t;
+
+/*
+ * Our view of capabilities.
+ */
+typedef struct mlxcx_hca_cap {
+ mlxcx_hca_cap_mode_t mhc_mode;
+ mlxcx_hca_cap_type_t mhc_type;
+ union {
+ uint8_t mhc_bulk[MLXCX_HCA_CAP_SIZE];
+ mlxcx_hca_cap_general_caps_t mhc_general;
+ mlxcx_hca_cap_eth_caps_t mhc_eth;
+ mlxcx_hca_cap_flow_caps_t mhc_flow;
+ };
+} mlxcx_hca_cap_t;
+
+typedef struct {
+ /* Cooked values */
+ boolean_t mlc_checksum;
+ boolean_t mlc_lso;
+ boolean_t mlc_vxlan;
+ size_t mlc_max_lso_size;
+ size_t mlc_max_rqt_size;
+
+ size_t mlc_max_rx_ft_shift;
+ size_t mlc_max_rx_fe_dest;
+ size_t mlc_max_rx_flows;
+
+ size_t mlc_max_tir;
+
+ /* Raw caps data */
+ mlxcx_hca_cap_t mlc_hca_cur;
+ mlxcx_hca_cap_t mlc_hca_max;
+ mlxcx_hca_cap_t mlc_ether_cur;
+ mlxcx_hca_cap_t mlc_ether_max;
+ mlxcx_hca_cap_t mlc_nic_flow_cur;
+ mlxcx_hca_cap_t mlc_nic_flow_max;
+} mlxcx_caps_t;
+
+typedef struct {
+ uint_t mldp_eq_size_shift;
+ uint_t mldp_cq_size_shift;
+ uint_t mldp_rq_size_shift;
+ uint_t mldp_sq_size_shift;
+ uint_t mldp_cqemod_period_usec;
+ uint_t mldp_cqemod_count;
+ uint_t mldp_intrmod_period_usec;
+ uint_t mldp_rx_ngroups_large;
+ uint_t mldp_rx_ngroups_small;
+ uint_t mldp_rx_nrings_per_large_group;
+ uint_t mldp_rx_nrings_per_small_group;
+ uint_t mldp_tx_ngroups;
+ uint_t mldp_tx_nrings_per_group;
+ uint_t mldp_ftbl_root_size_shift;
+ size_t mldp_tx_bind_threshold;
+ uint_t mldp_ftbl_vlan_size_shift;
+ uint64_t mldp_eq_check_interval_sec;
+ uint64_t mldp_cq_check_interval_sec;
+ uint64_t mldp_wq_check_interval_sec;
+} mlxcx_drv_props_t;
+
+typedef enum {
+ MLXCX_ATTACH_FM = 1 << 0,
+ MLXCX_ATTACH_PCI_CONFIG = 1 << 1,
+ MLXCX_ATTACH_REGS = 1 << 2,
+ MLXCX_ATTACH_CMD = 1 << 3,
+ MLXCX_ATTACH_ENABLE_HCA = 1 << 4,
+ MLXCX_ATTACH_PAGE_LIST = 1 << 5,
+ MLXCX_ATTACH_INIT_HCA = 1 << 6,
+ MLXCX_ATTACH_UAR_PD_TD = 1 << 7,
+ MLXCX_ATTACH_INTRS = 1 << 8,
+ MLXCX_ATTACH_PORTS = 1 << 9,
+ MLXCX_ATTACH_MAC_HDL = 1 << 10,
+ MLXCX_ATTACH_CQS = 1 << 11,
+ MLXCX_ATTACH_WQS = 1 << 12,
+ MLXCX_ATTACH_GROUPS = 1 << 13,
+ MLXCX_ATTACH_BUFS = 1 << 14,
+ MLXCX_ATTACH_CAPS = 1 << 15,
+ MLXCX_ATTACH_CHKTIMERS = 1 << 16,
+} mlxcx_attach_progress_t;
+
+struct mlxcx {
+ /* entry on the mlxcx_glist */
+ list_node_t mlx_gentry;
+
+ dev_info_t *mlx_dip;
+ int mlx_inst;
+ mlxcx_attach_progress_t mlx_attach;
+
+ mlxcx_drv_props_t mlx_props;
+
+ /*
+ * Misc. data
+ */
+ uint16_t mlx_fw_maj;
+ uint16_t mlx_fw_min;
+ uint16_t mlx_fw_rev;
+ uint16_t mlx_cmd_rev;
+
+ /*
+ * Various capabilities of hardware.
+ */
+ mlxcx_caps_t *mlx_caps;
+
+ uint_t mlx_max_sdu;
+ uint_t mlx_sdu;
+
+ /*
+ * FM State
+ */
+ int mlx_fm_caps;
+
+ /*
+ * PCI Data
+ */
+ ddi_acc_handle_t mlx_cfg_handle;
+ ddi_acc_handle_t mlx_regs_handle;
+ caddr_t mlx_regs_base;
+
+ /*
+ * MAC handle
+ */
+ mac_handle_t mlx_mac_hdl;
+
+ /*
+ * Main command queue for issuing general FW control commands.
+ */
+ mlxcx_cmd_queue_t mlx_cmd;
+
+ /*
+ * Interrupts
+ */
+ uint_t mlx_intr_pri;
+ uint_t mlx_intr_type; /* always MSI-X */
+ int mlx_intr_count;
+ size_t mlx_intr_size; /* allocation size */
+ ddi_intr_handle_t *mlx_intr_handles;
+
+ /*
+ * Basic firmware resources which we use for a variety of things.
+ * The UAR is a reference to a page where CQ and EQ doorbells are
+ * located. It also holds all the BlueFlame stuff (which we don't
+ * use).
+ */
+ mlxcx_uar_t mlx_uar;
+ /*
+ * The PD (Protection Domain) and TDOM (Transport Domain) are opaque
+ * entities to us (they're Infiniband constructs we don't actually care
+ * about) -- we just allocate them and shove their ID numbers in
+ * whenever we're asked for one.
+ *
+ * The "reserved" LKEY is what we should put in queue entries that
+ * have references to memory to indicate that they're using linear
+ * addresses (comes from the QUERY_SPECIAL_CONTEXTS cmd).
+ */
+ mlxcx_pd_t mlx_pd;
+ mlxcx_tdom_t mlx_tdom;
+ uint_t mlx_rsvd_lkey;
+
+ /*
+ * Our event queues. These are 1:1 with interrupts.
+ */
+ size_t mlx_eqs_size; /* allocation size */
+ mlxcx_event_queue_t *mlx_eqs;
+
+ /*
+ * Page list. These represent the set of 4k pages we've given to
+ * hardware.
+ *
+ * We can add to this list at the request of hardware from interrupt
+ * context (the PAGE_REQUEST event), so it's protected by pagemtx.
+ */
+ kmutex_t mlx_pagemtx;
+ uint_t mlx_npages;
+ avl_tree_t mlx_pages;
+
+ /*
+ * Port state
+ */
+ uint_t mlx_nports;
+ size_t mlx_ports_size;
+ mlxcx_port_t *mlx_ports;
+
+ /*
+ * Completion queues (CQs). These are also indexed off the
+ * event_queue_ts that they each report to.
+ */
+ list_t mlx_cqs;
+
+ uint_t mlx_next_eq;
+
+ /*
+ * Work queues (WQs).
+ */
+ list_t mlx_wqs;
+
+ /*
+ * Ring groups
+ */
+ size_t mlx_rx_ngroups;
+ size_t mlx_rx_groups_size;
+ mlxcx_ring_group_t *mlx_rx_groups;
+
+ size_t mlx_tx_ngroups;
+ size_t mlx_tx_groups_size;
+ mlxcx_ring_group_t *mlx_tx_groups;
+
+ kmem_cache_t *mlx_bufs_cache;
+ list_t mlx_buf_shards;
+
+ ddi_periodic_t mlx_eq_checktimer;
+ ddi_periodic_t mlx_cq_checktimer;
+ ddi_periodic_t mlx_wq_checktimer;
+};
+
+/*
+ * Register access
+ */
+extern uint16_t mlxcx_get16(mlxcx_t *, uintptr_t);
+extern uint32_t mlxcx_get32(mlxcx_t *, uintptr_t);
+extern uint64_t mlxcx_get64(mlxcx_t *, uintptr_t);
+
+extern void mlxcx_put32(mlxcx_t *, uintptr_t, uint32_t);
+extern void mlxcx_put64(mlxcx_t *, uintptr_t, uint64_t);
+
+extern void mlxcx_uar_put32(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint32_t);
+extern void mlxcx_uar_put64(mlxcx_t *, mlxcx_uar_t *, uintptr_t, uint64_t);
+
+/*
+ * Logging functions.
+ */
+extern void mlxcx_warn(mlxcx_t *, const char *, ...);
+extern void mlxcx_note(mlxcx_t *, const char *, ...);
+extern void mlxcx_panic(mlxcx_t *, const char *, ...);
+
+extern void mlxcx_fm_ereport(mlxcx_t *, const char *);
+
+extern void mlxcx_check_sq(mlxcx_t *, mlxcx_work_queue_t *);
+extern void mlxcx_check_rq(mlxcx_t *, mlxcx_work_queue_t *);
+
+/*
+ * DMA Functions
+ */
+extern void mlxcx_dma_free(mlxcx_dma_buffer_t *);
+extern boolean_t mlxcx_dma_alloc(mlxcx_t *, mlxcx_dma_buffer_t *,
+ ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
+extern boolean_t mlxcx_dma_init(mlxcx_t *, mlxcx_dma_buffer_t *,
+ ddi_dma_attr_t *, boolean_t);
+extern boolean_t mlxcx_dma_bind_mblk(mlxcx_t *, mlxcx_dma_buffer_t *,
+ const mblk_t *, size_t, boolean_t);
+extern boolean_t mlxcx_dma_alloc_offset(mlxcx_t *, mlxcx_dma_buffer_t *,
+ ddi_dma_attr_t *, ddi_device_acc_attr_t *, boolean_t,
+ size_t, size_t, boolean_t);
+extern void mlxcx_dma_unbind(mlxcx_t *, mlxcx_dma_buffer_t *);
+extern void mlxcx_dma_acc_attr(mlxcx_t *, ddi_device_acc_attr_t *);
+extern void mlxcx_dma_page_attr(mlxcx_t *, ddi_dma_attr_t *);
+extern void mlxcx_dma_queue_attr(mlxcx_t *, ddi_dma_attr_t *);
+extern void mlxcx_dma_qdbell_attr(mlxcx_t *, ddi_dma_attr_t *);
+extern void mlxcx_dma_buf_attr(mlxcx_t *, ddi_dma_attr_t *);
+
+extern boolean_t mlxcx_give_pages(mlxcx_t *, int32_t);
+
+static inline const ddi_dma_cookie_t *
+mlxcx_dma_cookie_iter(const mlxcx_dma_buffer_t *db,
+ const ddi_dma_cookie_t *prev)
+{
+ ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND);
+ return (ddi_dma_cookie_iter(db->mxdb_dma_handle, prev));
+}
+
+static inline const ddi_dma_cookie_t *
+mlxcx_dma_cookie_one(const mlxcx_dma_buffer_t *db)
+{
+ ASSERT(db->mxdb_flags & MLXCX_DMABUF_BOUND);
+ return (ddi_dma_cookie_one(db->mxdb_dma_handle));
+}
+
+/*
+ * From mlxcx_intr.c
+ */
+extern boolean_t mlxcx_intr_setup(mlxcx_t *);
+extern void mlxcx_intr_teardown(mlxcx_t *);
+extern void mlxcx_arm_eq(mlxcx_t *, mlxcx_event_queue_t *);
+extern void mlxcx_arm_cq(mlxcx_t *, mlxcx_completion_queue_t *);
+
+extern mblk_t *mlxcx_rx_poll(mlxcx_t *, mlxcx_completion_queue_t *, size_t);
+
+/*
+ * From mlxcx_gld.c
+ */
+extern boolean_t mlxcx_register_mac(mlxcx_t *);
+
+/*
+ * From mlxcx_ring.c
+ */
+extern boolean_t mlxcx_cq_alloc_dma(mlxcx_t *, mlxcx_completion_queue_t *);
+extern void mlxcx_cq_rele_dma(mlxcx_t *, mlxcx_completion_queue_t *);
+extern boolean_t mlxcx_wq_alloc_dma(mlxcx_t *, mlxcx_work_queue_t *);
+extern void mlxcx_wq_rele_dma(mlxcx_t *, mlxcx_work_queue_t *);
+
+extern boolean_t mlxcx_buf_create(mlxcx_t *, mlxcx_buf_shard_t *,
+ mlxcx_buffer_t **);
+extern boolean_t mlxcx_buf_create_foreign(mlxcx_t *, mlxcx_buf_shard_t *,
+ mlxcx_buffer_t **);
+extern void mlxcx_buf_take(mlxcx_t *, mlxcx_work_queue_t *, mlxcx_buffer_t **);
+extern size_t mlxcx_buf_take_n(mlxcx_t *, mlxcx_work_queue_t *,
+ mlxcx_buffer_t **, size_t);
+extern boolean_t mlxcx_buf_loan(mlxcx_t *, mlxcx_buffer_t *);
+extern void mlxcx_buf_return(mlxcx_t *, mlxcx_buffer_t *);
+extern void mlxcx_buf_return_chain(mlxcx_t *, mlxcx_buffer_t *, boolean_t);
+extern void mlxcx_buf_destroy(mlxcx_t *, mlxcx_buffer_t *);
+
+extern boolean_t mlxcx_buf_bind_or_copy(mlxcx_t *, mlxcx_work_queue_t *,
+ mblk_t *, size_t, mlxcx_buffer_t **);
+
+extern boolean_t mlxcx_rx_group_setup(mlxcx_t *, mlxcx_ring_group_t *);
+extern boolean_t mlxcx_tx_group_setup(mlxcx_t *, mlxcx_ring_group_t *);
+
+extern boolean_t mlxcx_rx_group_start(mlxcx_t *, mlxcx_ring_group_t *);
+extern boolean_t mlxcx_tx_ring_start(mlxcx_t *, mlxcx_ring_group_t *,
+ mlxcx_work_queue_t *);
+extern boolean_t mlxcx_rx_ring_start(mlxcx_t *, mlxcx_ring_group_t *,
+ mlxcx_work_queue_t *);
+
+extern boolean_t mlxcx_rq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *,
+ mlxcx_buffer_t *);
+extern boolean_t mlxcx_rq_add_buffers(mlxcx_t *, mlxcx_work_queue_t *,
+ mlxcx_buffer_t **, size_t);
+extern boolean_t mlxcx_sq_add_buffer(mlxcx_t *, mlxcx_work_queue_t *,
+ uint8_t *, size_t, uint32_t, mlxcx_buffer_t *);
+extern boolean_t mlxcx_sq_add_nop(mlxcx_t *, mlxcx_work_queue_t *);
+extern void mlxcx_rq_refill(mlxcx_t *, mlxcx_work_queue_t *);
+
+extern void mlxcx_teardown_groups(mlxcx_t *);
+extern void mlxcx_wq_teardown(mlxcx_t *, mlxcx_work_queue_t *);
+extern void mlxcx_cq_teardown(mlxcx_t *, mlxcx_completion_queue_t *);
+extern void mlxcx_teardown_rx_group(mlxcx_t *, mlxcx_ring_group_t *);
+extern void mlxcx_teardown_tx_group(mlxcx_t *, mlxcx_ring_group_t *);
+
+extern void mlxcx_tx_completion(mlxcx_t *, mlxcx_completion_queue_t *,
+ mlxcx_completionq_ent_t *, mlxcx_buffer_t *);
+extern mblk_t *mlxcx_rx_completion(mlxcx_t *, mlxcx_completion_queue_t *,
+ mlxcx_completionq_ent_t *, mlxcx_buffer_t *);
+
+extern mlxcx_buf_shard_t *mlxcx_mlbs_create(mlxcx_t *);
+
+/*
+ * Flow mgmt
+ */
+extern boolean_t mlxcx_add_umcast_entry(mlxcx_t *, mlxcx_port_t *,
+ mlxcx_ring_group_t *, const uint8_t *);
+extern boolean_t mlxcx_remove_umcast_entry(mlxcx_t *, mlxcx_port_t *,
+ mlxcx_ring_group_t *, const uint8_t *);
+extern void mlxcx_remove_all_umcast_entries(mlxcx_t *, mlxcx_port_t *,
+ mlxcx_ring_group_t *);
+extern boolean_t mlxcx_setup_flow_group(mlxcx_t *, mlxcx_flow_table_t *,
+ mlxcx_flow_group_t *);
+extern void mlxcx_teardown_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
+
+extern void mlxcx_remove_all_vlan_entries(mlxcx_t *, mlxcx_ring_group_t *);
+extern boolean_t mlxcx_remove_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *,
+ boolean_t, uint16_t);
+extern boolean_t mlxcx_add_vlan_entry(mlxcx_t *, mlxcx_ring_group_t *,
+ boolean_t, uint16_t);
+
+/*
+ * Command functions
+ */
+extern boolean_t mlxcx_cmd_queue_init(mlxcx_t *);
+extern void mlxcx_cmd_queue_fini(mlxcx_t *);
+
+extern boolean_t mlxcx_cmd_enable_hca(mlxcx_t *);
+extern boolean_t mlxcx_cmd_disable_hca(mlxcx_t *);
+
+extern boolean_t mlxcx_cmd_query_issi(mlxcx_t *, uint_t *);
+extern boolean_t mlxcx_cmd_set_issi(mlxcx_t *, uint16_t);
+
+extern boolean_t mlxcx_cmd_query_pages(mlxcx_t *, uint_t, int32_t *);
+extern boolean_t mlxcx_cmd_give_pages(mlxcx_t *, uint_t, int32_t,
+ mlxcx_dev_page_t **);
+extern boolean_t mlxcx_cmd_return_pages(mlxcx_t *, int32_t, uint64_t *,
+ int32_t *);
+
+extern boolean_t mlxcx_cmd_query_hca_cap(mlxcx_t *, mlxcx_hca_cap_type_t,
+ mlxcx_hca_cap_mode_t, mlxcx_hca_cap_t *);
+
+extern boolean_t mlxcx_cmd_set_driver_version(mlxcx_t *, const char *);
+
+extern boolean_t mlxcx_cmd_init_hca(mlxcx_t *);
+extern boolean_t mlxcx_cmd_teardown_hca(mlxcx_t *);
+
+extern boolean_t mlxcx_cmd_alloc_uar(mlxcx_t *, mlxcx_uar_t *);
+extern boolean_t mlxcx_cmd_dealloc_uar(mlxcx_t *, mlxcx_uar_t *);
+
+extern boolean_t mlxcx_cmd_alloc_pd(mlxcx_t *, mlxcx_pd_t *);
+extern boolean_t mlxcx_cmd_dealloc_pd(mlxcx_t *, mlxcx_pd_t *);
+
+extern boolean_t mlxcx_cmd_alloc_tdom(mlxcx_t *, mlxcx_tdom_t *);
+extern boolean_t mlxcx_cmd_dealloc_tdom(mlxcx_t *, mlxcx_tdom_t *);
+
+extern boolean_t mlxcx_cmd_create_eq(mlxcx_t *, mlxcx_event_queue_t *);
+extern boolean_t mlxcx_cmd_destroy_eq(mlxcx_t *, mlxcx_event_queue_t *);
+extern boolean_t mlxcx_cmd_query_eq(mlxcx_t *, mlxcx_event_queue_t *,
+ mlxcx_eventq_ctx_t *);
+
+extern boolean_t mlxcx_cmd_create_cq(mlxcx_t *, mlxcx_completion_queue_t *);
+extern boolean_t mlxcx_cmd_destroy_cq(mlxcx_t *, mlxcx_completion_queue_t *);
+extern boolean_t mlxcx_cmd_query_cq(mlxcx_t *, mlxcx_completion_queue_t *,
+ mlxcx_completionq_ctx_t *);
+
+extern boolean_t mlxcx_cmd_create_rq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_start_rq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_stop_rq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_destroy_rq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_query_rq(mlxcx_t *, mlxcx_work_queue_t *,
+ mlxcx_rq_ctx_t *);
+
+extern boolean_t mlxcx_cmd_create_tir(mlxcx_t *, mlxcx_tir_t *);
+extern boolean_t mlxcx_cmd_destroy_tir(mlxcx_t *, mlxcx_tir_t *);
+
+extern boolean_t mlxcx_cmd_create_sq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_start_sq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_stop_sq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_destroy_sq(mlxcx_t *, mlxcx_work_queue_t *);
+extern boolean_t mlxcx_cmd_query_sq(mlxcx_t *, mlxcx_work_queue_t *,
+ mlxcx_sq_ctx_t *);
+
+extern boolean_t mlxcx_cmd_create_tis(mlxcx_t *, mlxcx_tis_t *);
+extern boolean_t mlxcx_cmd_destroy_tis(mlxcx_t *, mlxcx_tis_t *);
+
+extern boolean_t mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *);
+extern boolean_t mlxcx_cmd_query_special_ctxs(mlxcx_t *);
+
+extern boolean_t mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *, mlxcx_port_t *,
+ mlxcx_modify_nic_vport_ctx_fields_t);
+
+extern boolean_t mlxcx_cmd_create_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
+extern boolean_t mlxcx_cmd_destroy_flow_table(mlxcx_t *, mlxcx_flow_table_t *);
+extern boolean_t mlxcx_cmd_set_flow_table_root(mlxcx_t *, mlxcx_flow_table_t *);
+
+extern boolean_t mlxcx_cmd_create_flow_group(mlxcx_t *, mlxcx_flow_group_t *);
+extern boolean_t mlxcx_cmd_set_flow_table_entry(mlxcx_t *,
+ mlxcx_flow_entry_t *);
+extern boolean_t mlxcx_cmd_delete_flow_table_entry(mlxcx_t *,
+ mlxcx_flow_entry_t *);
+extern boolean_t mlxcx_cmd_destroy_flow_group(mlxcx_t *, mlxcx_flow_group_t *);
+
+extern boolean_t mlxcx_cmd_access_register(mlxcx_t *, mlxcx_cmd_reg_opmod_t,
+ mlxcx_register_id_t, mlxcx_register_data_t *);
+extern boolean_t mlxcx_cmd_query_port_mtu(mlxcx_t *, mlxcx_port_t *);
+extern boolean_t mlxcx_cmd_query_port_status(mlxcx_t *, mlxcx_port_t *);
+extern boolean_t mlxcx_cmd_query_port_speed(mlxcx_t *, mlxcx_port_t *);
+
+extern boolean_t mlxcx_cmd_set_port_mtu(mlxcx_t *, mlxcx_port_t *);
+
+extern boolean_t mlxcx_cmd_create_rqt(mlxcx_t *, mlxcx_rqtable_t *);
+extern boolean_t mlxcx_cmd_destroy_rqt(mlxcx_t *, mlxcx_rqtable_t *);
+
+extern boolean_t mlxcx_cmd_set_int_mod(mlxcx_t *, uint_t, uint_t);
+
+extern boolean_t mlxcx_cmd_query_module_status(mlxcx_t *, uint_t,
+ mlxcx_module_status_t *, mlxcx_module_error_type_t *);
+extern boolean_t mlxcx_cmd_set_port_led(mlxcx_t *, mlxcx_port_t *, uint16_t);
+
+/* Comparator for avl_ts */
+extern int mlxcx_cq_compare(const void *, const void *);
+extern int mlxcx_dmac_fe_compare(const void *, const void *);
+extern int mlxcx_grmac_compare(const void *, const void *);
+extern int mlxcx_page_compare(const void *, const void *);
+
+extern void mlxcx_update_link_state(mlxcx_t *, mlxcx_port_t *);
+
+extern void mlxcx_eth_proto_to_string(mlxcx_eth_proto_t, char *, size_t);
+extern const char *mlxcx_port_status_string(mlxcx_port_status_t);
+
+extern const char *mlxcx_event_name(mlxcx_event_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MLXCX_H */
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_cmd.c b/usr/src/uts/common/io/mlxcx/mlxcx_cmd.c
new file mode 100644
index 0000000000..30fb7ca8ef
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_cmd.c
@@ -0,0 +1,3542 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+/*
+ * Controls the management of commands that are issues to and from the HCA
+ * command queue.
+ */
+
+#include <mlxcx.h>
+
+#include <sys/debug.h>
+#include <sys/sysmacros.h>
+
+/*
+ * When we start up the command queue, it will undergo some internal
+ * initialization after we set the command queue address. These values allow us
+ * to control how much time we should wait for that to occur.
+ */
+clock_t mlxcx_cmd_init_delay = 1000 * 10; /* 10 ms in us */
+uint_t mlxcx_cmd_init_trys = 100; /* Wait at most 1s */
+
+clock_t mlxcx_cmd_delay = 1000 * 1; /* 1 ms in us */
+uint_t mlxcx_cmd_tries = 5000; /* Wait at most 1s */
+
+/*
+ * This macro is used to identify that we care about our own function that we're
+ * communicating with. We always use this function.
+ */
+#define MLXCX_FUNCTION_SELF (to_be16(0))
+
+static const char *
+mlxcx_cmd_response_string(mlxcx_cmd_ret_t ret)
+{
+ switch (ret) {
+ case MLXCX_CMD_R_OK:
+ return ("MLXCX_CMD_R_OK");
+ case MLXCX_CMD_R_INTERNAL_ERR:
+ return ("MLXCX_CMD_R_INTERNAL_ERR");
+ case MLXCX_CMD_R_BAD_OP:
+ return ("MLXCX_CMD_R_BAD_OP");
+ case MLXCX_CMD_R_BAD_PARAM:
+ return ("MLXCX_CMD_R_BAD_PARAM");
+ case MLXCX_CMD_R_BAD_SYS_STATE:
+ return ("MLXCX_CMD_R_BAD_SYS_STATE");
+ case MLXCX_CMD_R_BAD_RESOURCE:
+ return ("MLXCX_CMD_R_BAD_RESOURCE");
+ case MLXCX_CMD_R_RESOURCE_BUSY:
+ return ("MLXCX_CMD_R_RESOURCE_BUSY");
+ case MLXCX_CMD_R_EXCEED_LIM:
+ return ("MLXCX_CMD_R_EXCEED_LIM");
+ case MLXCX_CMD_R_BAD_RES_STATE:
+ return ("MLXCX_CMD_R_BAD_RES_STATE");
+ case MLXCX_CMD_R_BAD_INDEX:
+ return ("MLXCX_CMD_R_BAD_INDEX");
+ case MLXCX_CMD_R_NO_RESOURCES:
+ return ("MLXCX_CMD_R_NO_RESOURCES");
+ case MLXCX_CMD_R_BAD_INPUT_LEN:
+ return ("MLXCX_CMD_R_BAD_INPUT_LEN");
+ case MLXCX_CMD_R_BAD_OUTPUT_LEN:
+ return ("MLXCX_CMD_R_BAD_OUTPUT_LEN");
+ case MLXCX_CMD_R_BAD_RESOURCE_STATE:
+ return ("MLXCX_CMD_R_BAD_RESOURCE_STATE");
+ case MLXCX_CMD_R_BAD_PKT:
+ return ("MLXCX_CMD_R_BAD_PKT");
+ case MLXCX_CMD_R_BAD_SIZE:
+ return ("MLXCX_CMD_R_BAD_SIZE");
+ default:
+ return ("Unknown command");
+ }
+}
+
+static const char *
+mlxcx_cmd_opcode_string(mlxcx_cmd_op_t op)
+{
+ switch (op) {
+ case MLXCX_OP_QUERY_HCA_CAP:
+ return ("MLXCX_OP_QUERY_HCA_CAP");
+ case MLXCX_OP_QUERY_ADAPTER:
+ return ("MLXCX_OP_QUERY_ADAPTER");
+ case MLXCX_OP_INIT_HCA:
+ return ("MLXCX_OP_INIT_HCA");
+ case MLXCX_OP_TEARDOWN_HCA:
+ return ("MLXCX_OP_TEARDOWN_HCA");
+ case MLXCX_OP_ENABLE_HCA:
+ return ("MLXCX_OP_ENABLE_HCA");
+ case MLXCX_OP_DISABLE_HCA:
+ return ("MLXCX_OP_DISABLE_HCA");
+ case MLXCX_OP_QUERY_PAGES:
+ return ("MLXCX_OP_QUERY_PAGES");
+ case MLXCX_OP_MANAGE_PAGES:
+ return ("MLXCX_OP_MANAGE_PAGES");
+ case MLXCX_OP_SET_HCA_CAP:
+ return ("MLXCX_OP_SET_HCA_CAP");
+ case MLXCX_OP_QUERY_ISSI:
+ return ("MLXCX_OP_QUERY_ISSI");
+ case MLXCX_OP_SET_ISSI:
+ return ("MLXCX_OP_SET_ISSI");
+ case MLXCX_OP_SET_DRIVER_VERSION:
+ return ("MLXCX_OP_SET_DRIVER_VERSION");
+ case MLXCX_OP_QUERY_OTHER_HCA_CAP:
+ return ("MLXCX_OP_QUERY_OTHER_HCA_CAP");
+ case MLXCX_OP_MODIFY_OTHER_HCA_CAP:
+ return ("MLXCX_OP_MODIFY_OTHER_HCA_CAP");
+ case MLXCX_OP_SET_TUNNELED_OPERATIONS:
+ return ("MLXCX_OP_SET_TUNNELED_OPERATIONS");
+ case MLXCX_OP_CREATE_MKEY:
+ return ("MLXCX_OP_CREATE_MKEY");
+ case MLXCX_OP_QUERY_MKEY:
+ return ("MLXCX_OP_QUERY_MKEY");
+ case MLXCX_OP_DESTROY_MKEY:
+ return ("MLXCX_OP_DESTROY_MKEY");
+ case MLXCX_OP_QUERY_SPECIAL_CONTEXTS:
+ return ("MLXCX_OP_QUERY_SPECIAL_CONTEXTS");
+ case MLXCX_OP_PAGE_FAULT_RESUME:
+ return ("MLXCX_OP_PAGE_FAULT_RESUME");
+ case MLXCX_OP_CREATE_EQ:
+ return ("MLXCX_OP_CREATE_EQ");
+ case MLXCX_OP_DESTROY_EQ:
+ return ("MLXCX_OP_DESTROY_EQ");
+ case MLXCX_OP_QUERY_EQ:
+ return ("MLXCX_OP_QUERY_EQ");
+ case MLXCX_OP_GEN_EQE:
+ return ("MLXCX_OP_GEN_EQE");
+ case MLXCX_OP_CREATE_CQ:
+ return ("MLXCX_OP_CREATE_CQ");
+ case MLXCX_OP_DESTROY_CQ:
+ return ("MLXCX_OP_DESTROY_CQ");
+ case MLXCX_OP_QUERY_CQ:
+ return ("MLXCX_OP_QUERY_CQ");
+ case MLXCX_OP_MODIFY_CQ:
+ return ("MLXCX_OP_MODIFY_CQ");
+ case MLXCX_OP_CREATE_QP:
+ return ("MLXCX_OP_CREATE_QP");
+ case MLXCX_OP_DESTROY_QP:
+ return ("MLXCX_OP_DESTROY_QP");
+ case MLXCX_OP_RST2INIT_QP:
+ return ("MLXCX_OP_RST2INIT_QP");
+ case MLXCX_OP_INIT2RTR_QP:
+ return ("MLXCX_OP_INIT2RTR_QP");
+ case MLXCX_OP_RTR2RTS_QP:
+ return ("MLXCX_OP_RTR2RTS_QP");
+ case MLXCX_OP_RTS2RTS_QP:
+ return ("MLXCX_OP_RTS2RTS_QP");
+ case MLXCX_OP_SQERR2RTS_QP:
+ return ("MLXCX_OP_SQERR2RTS_QP");
+ case MLXCX_OP__2ERR_QP:
+ return ("MLXCX_OP__2ERR_QP");
+ case MLXCX_OP__2RST_QP:
+ return ("MLXCX_OP__2RST_QP");
+ case MLXCX_OP_QUERY_QP:
+ return ("MLXCX_OP_QUERY_QP");
+ case MLXCX_OP_SQD_RTS_QP:
+ return ("MLXCX_OP_SQD_RTS_QP");
+ case MLXCX_OP_INIT2INIT_QP:
+ return ("MLXCX_OP_INIT2INIT_QP");
+ case MLXCX_OP_CREATE_PSV:
+ return ("MLXCX_OP_CREATE_PSV");
+ case MLXCX_OP_DESTROY_PSV:
+ return ("MLXCX_OP_DESTROY_PSV");
+ case MLXCX_OP_CREATE_SRQ:
+ return ("MLXCX_OP_CREATE_SRQ");
+ case MLXCX_OP_DESTROY_SRQ:
+ return ("MLXCX_OP_DESTROY_SRQ");
+ case MLXCX_OP_QUERY_SRQ:
+ return ("MLXCX_OP_QUERY_SRQ");
+ case MLXCX_OP_ARM_RQ:
+ return ("MLXCX_OP_ARM_RQ");
+ case MLXCX_OP_CREATE_XRC_SRQ:
+ return ("MLXCX_OP_CREATE_XRC_SRQ");
+ case MLXCX_OP_DESTROY_XRC_SRQ:
+ return ("MLXCX_OP_DESTROY_XRC_SRQ");
+ case MLXCX_OP_QUERY_XRC_SRQ:
+ return ("MLXCX_OP_QUERY_XRC_SRQ");
+ case MLXCX_OP_ARM_XRC_SRQ:
+ return ("MLXCX_OP_ARM_XRC_SRQ");
+ case MLXCX_OP_CREATE_DCT:
+ return ("MLXCX_OP_CREATE_DCT");
+ case MLXCX_OP_DESTROY_DCT:
+ return ("MLXCX_OP_DESTROY_DCT");
+ case MLXCX_OP_DRAIN_DCT:
+ return ("MLXCX_OP_DRAIN_DCT");
+ case MLXCX_OP_QUERY_DCT:
+ return ("MLXCX_OP_QUERY_DCT");
+ case MLXCX_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ return ("MLXCX_OP_ARM_DCT_FOR_KEY_VIOLATION");
+ case MLXCX_OP_CREATE_XRQ:
+ return ("MLXCX_OP_CREATE_XRQ");
+ case MLXCX_OP_DESTROY_XRQ:
+ return ("MLXCX_OP_DESTROY_XRQ");
+ case MLXCX_OP_QUERY_XRQ:
+ return ("MLXCX_OP_QUERY_XRQ");
+ case MLXCX_OP_CREATE_NVMF_BACKEND_CONTROLLER:
+ return ("MLXCX_OP_CREATE_NVMF_BACKEND_CONTROLLER");
+ case MLXCX_OP_DESTROY_NVMF_BACKEND_CONTROLLER:
+ return ("MLXCX_OP_DESTROY_NVMF_BACKEND_CONTROLLER");
+ case MLXCX_OP_QUERY_NVMF_BACKEND_CONTROLLER:
+ return ("MLXCX_OP_QUERY_NVMF_BACKEND_CONTROLLER");
+ case MLXCX_OP_ATTACH_NVMF_NAMESPACE:
+ return ("MLXCX_OP_ATTACH_NVMF_NAMESPACE");
+ case MLXCX_OP_DETACH_NVMF_NAMESPACE:
+ return ("MLXCX_OP_DETACH_NVMF_NAMESPACE");
+ case MLXCX_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ return ("MLXCX_OP_QUERY_XRQ_DC_PARAMS_ENTRY");
+ case MLXCX_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ return ("MLXCX_OP_SET_XRQ_DC_PARAMS_ENTRY");
+ case MLXCX_OP_QUERY_XRQ_ERROR_PARAMS:
+ return ("MLXCX_OP_QUERY_XRQ_ERROR_PARAMS");
+ case MLXCX_OP_QUERY_VPORT_STATE:
+ return ("MLXCX_OP_QUERY_VPORT_STATE");
+ case MLXCX_OP_MODIFY_VPORT_STATE:
+ return ("MLXCX_OP_MODIFY_VPORT_STATE");
+ case MLXCX_OP_QUERY_ESW_VPORT_CONTEXT:
+ return ("MLXCX_OP_QUERY_ESW_VPORT_CONTEXT");
+ case MLXCX_OP_MODIFY_ESW_VPORT_CONTEXT:
+ return ("MLXCX_OP_MODIFY_ESW_VPORT_CONTEXT");
+ case MLXCX_OP_QUERY_NIC_VPORT_CONTEXT:
+ return ("MLXCX_OP_QUERY_NIC_VPORT_CONTEXT");
+ case MLXCX_OP_MODIFY_NIC_VPORT_CONTEXT:
+ return ("MLXCX_OP_MODIFY_NIC_VPORT_CONTEXT");
+ case MLXCX_OP_QUERY_ROCE_ADDRESS:
+ return ("MLXCX_OP_QUERY_ROCE_ADDRESS");
+ case MLXCX_OP_SET_ROCE_ADDRESS:
+ return ("MLXCX_OP_SET_ROCE_ADDRESS");
+ case MLXCX_OP_QUERY_HCA_VPORT_CONTEXT:
+ return ("MLXCX_OP_QUERY_HCA_VPORT_CONTEXT");
+ case MLXCX_OP_MODIFY_HCA_VPORT_CONTEXT:
+ return ("MLXCX_OP_MODIFY_HCA_VPORT_CONTEXT");
+ case MLXCX_OP_QUERY_HCA_VPORT_GID:
+ return ("MLXCX_OP_QUERY_HCA_VPORT_GID");
+ case MLXCX_OP_QUERY_HCA_VPORT_PKEY:
+ return ("MLXCX_OP_QUERY_HCA_VPORT_PKEY");
+ case MLXCX_OP_QUERY_VPORT_COUNTER:
+ return ("MLXCX_OP_QUERY_VPORT_COUNTER");
+ case MLXCX_OP_ALLOC_Q_COUNTER:
+ return ("MLXCX_OP_ALLOC_Q_COUNTER");
+ case MLXCX_OP_DEALLOC_Q_COUNTER:
+ return ("MLXCX_OP_DEALLOC_Q_COUNTER");
+ case MLXCX_OP_QUERY_Q_COUNTER:
+ return ("MLXCX_OP_QUERY_Q_COUNTER");
+ case MLXCX_OP_SET_PP_RATE_LIMIT:
+ return ("MLXCX_OP_SET_PP_RATE_LIMIT");
+ case MLXCX_OP_QUERY_PP_RATE_LIMIT:
+ return ("MLXCX_OP_QUERY_PP_RATE_LIMIT");
+ case MLXCX_OP_ALLOC_PD:
+ return ("MLXCX_OP_ALLOC_PD");
+ case MLXCX_OP_DEALLOC_PD:
+ return ("MLXCX_OP_DEALLOC_PD");
+ case MLXCX_OP_ALLOC_UAR:
+ return ("MLXCX_OP_ALLOC_UAR");
+ case MLXCX_OP_DEALLOC_UAR:
+ return ("MLXCX_OP_DEALLOC_UAR");
+ case MLXCX_OP_CONFIG_INT_MODERATION:
+ return ("MLXCX_OP_CONFIG_INT_MODERATION");
+ case MLXCX_OP_ACCESS_REG:
+ return ("MLXCX_OP_ACCESS_REG");
+ case MLXCX_OP_ATTACH_TO_MCG:
+ return ("MLXCX_OP_ATTACH_TO_MCG");
+ case MLXCX_OP_DETACH_FROM_MCG:
+ return ("MLXCX_OP_DETACH_FROM_MCG");
+ case MLXCX_OP_MAD_IFC:
+ return ("MLXCX_OP_MAD_IFC");
+ case MLXCX_OP_QUERY_MAD_DEMUX:
+ return ("MLXCX_OP_QUERY_MAD_DEMUX");
+ case MLXCX_OP_SET_MAD_DEMUX:
+ return ("MLXCX_OP_SET_MAD_DEMUX");
+ case MLXCX_OP_NOP:
+ return ("MLXCX_OP_NOP");
+ case MLXCX_OP_ALLOC_XRCD:
+ return ("MLXCX_OP_ALLOC_XRCD");
+ case MLXCX_OP_DEALLOC_XRCD:
+ return ("MLXCX_OP_DEALLOC_XRCD");
+ case MLXCX_OP_ALLOC_TRANSPORT_DOMAIN:
+ return ("MLXCX_OP_ALLOC_TRANSPORT_DOMAIN");
+ case MLXCX_OP_DEALLOC_TRANSPORT_DOMAIN:
+ return ("MLXCX_OP_DEALLOC_TRANSPORT_DOMAIN");
+ case MLXCX_OP_QUERY_CONG_STATUS:
+ return ("MLXCX_OP_QUERY_CONG_STATUS");
+ case MLXCX_OP_MODIFY_CONG_STATUS:
+ return ("MLXCX_OP_MODIFY_CONG_STATUS");
+ case MLXCX_OP_QUERY_CONG_PARAMS:
+ return ("MLXCX_OP_QUERY_CONG_PARAMS");
+ case MLXCX_OP_MODIFY_CONG_PARAMS:
+ return ("MLXCX_OP_MODIFY_CONG_PARAMS");
+ case MLXCX_OP_QUERY_CONG_STATISTICS:
+ return ("MLXCX_OP_QUERY_CONG_STATISTICS");
+ case MLXCX_OP_ADD_VXLAN_UDP_DPORT:
+ return ("MLXCX_OP_ADD_VXLAN_UDP_DPORT");
+ case MLXCX_OP_DELETE_VXLAN_UDP_DPORT:
+ return ("MLXCX_OP_DELETE_VXLAN_UDP_DPORT");
+ case MLXCX_OP_SET_L2_TABLE_ENTRY:
+ return ("MLXCX_OP_SET_L2_TABLE_ENTRY");
+ case MLXCX_OP_QUERY_L2_TABLE_ENTRY:
+ return ("MLXCX_OP_QUERY_L2_TABLE_ENTRY");
+ case MLXCX_OP_DELETE_L2_TABLE_ENTRY:
+ return ("MLXCX_OP_DELETE_L2_TABLE_ENTRY");
+ case MLXCX_OP_SET_WOL_ROL:
+ return ("MLXCX_OP_SET_WOL_ROL");
+ case MLXCX_OP_QUERY_WOL_ROL:
+ return ("MLXCX_OP_QUERY_WOL_ROL");
+ case MLXCX_OP_CREATE_TIR:
+ return ("MLXCX_OP_CREATE_TIR");
+ case MLXCX_OP_MODIFY_TIR:
+ return ("MLXCX_OP_MODIFY_TIR");
+ case MLXCX_OP_DESTROY_TIR:
+ return ("MLXCX_OP_DESTROY_TIR");
+ case MLXCX_OP_QUERY_TIR:
+ return ("MLXCX_OP_QUERY_TIR");
+ case MLXCX_OP_CREATE_SQ:
+ return ("MLXCX_OP_CREATE_SQ");
+ case MLXCX_OP_MODIFY_SQ:
+ return ("MLXCX_OP_MODIFY_SQ");
+ case MLXCX_OP_DESTROY_SQ:
+ return ("MLXCX_OP_DESTROY_SQ");
+ case MLXCX_OP_QUERY_SQ:
+ return ("MLXCX_OP_QUERY_SQ");
+ case MLXCX_OP_CREATE_RQ:
+ return ("MLXCX_OP_CREATE_RQ");
+ case MLXCX_OP_MODIFY_RQ:
+ return ("MLXCX_OP_MODIFY_RQ");
+ case MLXCX_OP_DESTROY_RQ:
+ return ("MLXCX_OP_DESTROY_RQ");
+ case MLXCX_OP_QUERY_RQ:
+ return ("MLXCX_OP_QUERY_RQ");
+ case MLXCX_OP_CREATE_RMP:
+ return ("MLXCX_OP_CREATE_RMP");
+ case MLXCX_OP_MODIFY_RMP:
+ return ("MLXCX_OP_MODIFY_RMP");
+ case MLXCX_OP_DESTROY_RMP:
+ return ("MLXCX_OP_DESTROY_RMP");
+ case MLXCX_OP_QUERY_RMP:
+ return ("MLXCX_OP_QUERY_RMP");
+ case MLXCX_OP_CREATE_TIS:
+ return ("MLXCX_OP_CREATE_TIS");
+ case MLXCX_OP_MODIFY_TIS:
+ return ("MLXCX_OP_MODIFY_TIS");
+ case MLXCX_OP_DESTROY_TIS:
+ return ("MLXCX_OP_DESTROY_TIS");
+ case MLXCX_OP_QUERY_TIS:
+ return ("MLXCX_OP_QUERY_TIS");
+ case MLXCX_OP_CREATE_RQT:
+ return ("MLXCX_OP_CREATE_RQT");
+ case MLXCX_OP_MODIFY_RQT:
+ return ("MLXCX_OP_MODIFY_RQT");
+ case MLXCX_OP_DESTROY_RQT:
+ return ("MLXCX_OP_DESTROY_RQT");
+ case MLXCX_OP_QUERY_RQT:
+ return ("MLXCX_OP_QUERY_RQT");
+ case MLXCX_OP_SET_FLOW_TABLE_ROOT:
+ return ("MLXCX_OP_SET_FLOW_TABLE_ROOT");
+ case MLXCX_OP_CREATE_FLOW_TABLE:
+ return ("MLXCX_OP_CREATE_FLOW_TABLE");
+ case MLXCX_OP_DESTROY_FLOW_TABLE:
+ return ("MLXCX_OP_DESTROY_FLOW_TABLE");
+ case MLXCX_OP_QUERY_FLOW_TABLE:
+ return ("MLXCX_OP_QUERY_FLOW_TABLE");
+ case MLXCX_OP_CREATE_FLOW_GROUP:
+ return ("MLXCX_OP_CREATE_FLOW_GROUP");
+ case MLXCX_OP_DESTROY_FLOW_GROUP:
+ return ("MLXCX_OP_DESTROY_FLOW_GROUP");
+ case MLXCX_OP_QUERY_FLOW_GROUP:
+ return ("MLXCX_OP_QUERY_FLOW_GROUP");
+ case MLXCX_OP_SET_FLOW_TABLE_ENTRY:
+ return ("MLXCX_OP_SET_FLOW_TABLE_ENTRY");
+ case MLXCX_OP_QUERY_FLOW_TABLE_ENTRY:
+ return ("MLXCX_OP_QUERY_FLOW_TABLE_ENTRY");
+ case MLXCX_OP_DELETE_FLOW_TABLE_ENTRY:
+ return ("MLXCX_OP_DELETE_FLOW_TABLE_ENTRY");
+ case MLXCX_OP_ALLOC_FLOW_COUNTER:
+ return ("MLXCX_OP_ALLOC_FLOW_COUNTER");
+ case MLXCX_OP_DEALLOC_FLOW_COUNTER:
+ return ("MLXCX_OP_DEALLOC_FLOW_COUNTER");
+ case MLXCX_OP_QUERY_FLOW_COUNTER:
+ return ("MLXCX_OP_QUERY_FLOW_COUNTER");
+ case MLXCX_OP_MODIFY_FLOW_TABLE:
+ return ("MLXCX_OP_MODIFY_FLOW_TABLE");
+ case MLXCX_OP_ALLOC_ENCAP_HEADER:
+ return ("MLXCX_OP_ALLOC_ENCAP_HEADER");
+ case MLXCX_OP_DEALLOC_ENCAP_HEADER:
+ return ("MLXCX_OP_DEALLOC_ENCAP_HEADER");
+ case MLXCX_OP_QUERY_ENCAP_HEADER:
+ return ("MLXCX_OP_QUERY_ENCAP_HEADER");
+ default:
+ return ("Unknown Opcode");
+ }
+}
+
+const char *
+mlxcx_port_status_string(mlxcx_port_status_t st)
+{
+ switch (st) {
+ case MLXCX_PORT_STATUS_UP:
+ return ("UP");
+ case MLXCX_PORT_STATUS_DOWN:
+ return ("DOWN");
+ case MLXCX_PORT_STATUS_UP_ONCE:
+ return ("UP_ONCE");
+ case MLXCX_PORT_STATUS_DISABLED:
+ return ("DISABLED");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+void
+mlxcx_eth_proto_to_string(mlxcx_eth_proto_t p, char *buf, size_t size)
+{
+ if (p & MLXCX_PROTO_SGMII)
+ (void) strlcat(buf, "SGMII|", size);
+ if (p & MLXCX_PROTO_1000BASE_KX)
+ (void) strlcat(buf, "1000BASE_KX|", size);
+ if (p & MLXCX_PROTO_10GBASE_CX4)
+ (void) strlcat(buf, "10GBASE_CX4|", size);
+ if (p & MLXCX_PROTO_10GBASE_KX4)
+ (void) strlcat(buf, "10GBASE_KX4|", size);
+ if (p & MLXCX_PROTO_10GBASE_KR)
+ (void) strlcat(buf, "10GBASE_KR|", size);
+ if (p & MLXCX_PROTO_40GBASE_CR4)
+ (void) strlcat(buf, "40GBASE_CR4|", size);
+ if (p & MLXCX_PROTO_40GBASE_KR4)
+ (void) strlcat(buf, "40GBASE_KR4|", size);
+ if (p & MLXCX_PROTO_SGMII_100BASE)
+ (void) strlcat(buf, "SGMII_100BASE|", size);
+ if (p & MLXCX_PROTO_10GBASE_CR)
+ (void) strlcat(buf, "10GBASE_CR|", size);
+ if (p & MLXCX_PROTO_10GBASE_SR)
+ (void) strlcat(buf, "10GBASE_SR|", size);
+ if (p & MLXCX_PROTO_10GBASE_ER_LR)
+ (void) strlcat(buf, "10GBASE_ER_LR|", size);
+ if (p & MLXCX_PROTO_40GBASE_SR4)
+ (void) strlcat(buf, "40GBASE_SR4|", size);
+ if (p & MLXCX_PROTO_40GBASE_LR4_ER4)
+ (void) strlcat(buf, "40GBASE_LR4_ER4|", size);
+ if (p & MLXCX_PROTO_50GBASE_SR2)
+ (void) strlcat(buf, "50GBASE_SR2|", size);
+ if (p & MLXCX_PROTO_100GBASE_CR4)
+ (void) strlcat(buf, "100GBASE_CR4|", size);
+ if (p & MLXCX_PROTO_100GBASE_SR4)
+ (void) strlcat(buf, "100GBASE_SR4|", size);
+ if (p & MLXCX_PROTO_100GBASE_KR4)
+ (void) strlcat(buf, "100GBASE_KR4|", size);
+ if (p & MLXCX_PROTO_25GBASE_CR)
+ (void) strlcat(buf, "25GBASE_CR|", size);
+ if (p & MLXCX_PROTO_25GBASE_KR)
+ (void) strlcat(buf, "25GBASE_KR|", size);
+ if (p & MLXCX_PROTO_25GBASE_SR)
+ (void) strlcat(buf, "25GBASE_SR|", size);
+ if (p & MLXCX_PROTO_50GBASE_CR2)
+ (void) strlcat(buf, "50GBASE_CR2|", size);
+ /* Chop off the trailing '|' */
+ if (strlen(buf) > 0)
+ buf[strlen(buf) - 1] = '\0';
+}
+
+void
+mlxcx_cmd_queue_fini(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_queue_t *cmd = &mlxp->mlx_cmd;
+
+ mutex_enter(&cmd->mcmd_lock);
+ VERIFY3S(cmd->mcmd_status, ==, MLXCX_CMD_QUEUE_S_IDLE);
+ mutex_exit(&cmd->mcmd_lock);
+
+ if (cmd->mcmd_tokens != NULL) {
+ id_space_destroy(cmd->mcmd_tokens);
+ cmd->mcmd_tokens = NULL;
+ }
+
+ if (cmd->mcmd_taskq != NULL) {
+ ddi_taskq_destroy(cmd->mcmd_taskq);
+ cmd->mcmd_taskq = NULL;
+ }
+
+ cv_destroy(&cmd->mcmd_cv);
+ mutex_destroy(&cmd->mcmd_lock);
+
+ cmd->mcmd_ent = NULL;
+ mlxcx_dma_free(&cmd->mcmd_dma);
+}
+
+boolean_t
+mlxcx_cmd_queue_init(mlxcx_t *mlxp)
+{
+ uint32_t tmp, cmd_low, cmd_high, i;
+ mlxcx_cmd_queue_t *cmd = &mlxp->mlx_cmd;
+ char buf[64];
+ const ddi_dma_cookie_t *ck;
+
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+
+ tmp = mlxcx_get32(mlxp, MLXCX_ISS_FIRMWARE);
+ mlxp->mlx_fw_maj = MLXCX_ISS_FW_MAJOR(tmp);
+ mlxp->mlx_fw_min = MLXCX_ISS_FW_MINOR(tmp);
+
+ tmp = mlxcx_get32(mlxp, MLXCX_ISS_FW_CMD);
+ mlxp->mlx_fw_rev = MLXCX_ISS_FW_REV(tmp);
+ mlxp->mlx_cmd_rev = MLXCX_ISS_CMD_REV(tmp);
+
+ if (mlxp->mlx_cmd_rev != MLXCX_CMD_REVISION) {
+ mlxcx_warn(mlxp, "found unsupported command revision: %u, "
+ "expected %u", mlxp->mlx_cmd_rev, MLXCX_CMD_REVISION);
+ return (B_FALSE);
+ }
+
+ cmd_low = mlxcx_get32(mlxp, MLXCX_ISS_CMD_LOW);
+ cmd->mcmd_size_l2 = MLXCX_ISS_CMDQ_SIZE(cmd_low);
+ cmd->mcmd_stride_l2 = MLXCX_ISS_CMDQ_STRIDE(cmd_low);
+
+ mutex_init(&cmd->mcmd_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&cmd->mcmd_cv, NULL, CV_DRIVER, NULL);
+ cmd->mcmd_status = MLXCX_CMD_QUEUE_S_IDLE;
+
+ (void) snprintf(buf, sizeof (buf), "mlxcx_tokens_%d", mlxp->mlx_inst);
+ if ((cmd->mcmd_tokens = id_space_create(buf, 1, UINT8_MAX)) == NULL) {
+ mlxcx_warn(mlxp, "failed to allocate token id space");
+ mlxcx_cmd_queue_fini(mlxp);
+ return (B_FALSE);
+ }
+
+ (void) snprintf(buf, sizeof (buf), "mlxcx_cmdq_%d", mlxp->mlx_inst);
+ if ((cmd->mcmd_taskq = ddi_taskq_create(mlxp->mlx_dip, buf, 1,
+ TASKQ_DEFAULTPRI, 0)) == NULL) {
+ mlxcx_warn(mlxp, "failed to create command queue task queue");
+ mlxcx_cmd_queue_fini(mlxp);
+ return (B_FALSE);
+ }
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_page_attr(mlxp, &attr);
+
+ if (!mlxcx_dma_alloc(mlxp, &cmd->mcmd_dma, &attr, &acc, B_TRUE,
+ MLXCX_CMD_DMA_PAGE_SIZE, B_TRUE)) {
+ mlxcx_warn(mlxp, "failed to allocate command dma buffer");
+ mlxcx_cmd_queue_fini(mlxp);
+ return (B_FALSE);
+ }
+
+ ck = mlxcx_dma_cookie_one(&cmd->mcmd_dma);
+ cmd_high = (uint32_t)(ck->dmac_laddress >> 32);
+ cmd_low = (uint32_t)(ck->dmac_laddress & UINT32_MAX);
+
+ mlxcx_put32(mlxp, MLXCX_ISS_CMD_HIGH, cmd_high);
+ mlxcx_put32(mlxp, MLXCX_ISS_CMD_LOW, cmd_low);
+
+ /*
+ * Before this is ready, the initializing bit must become zero.
+ */
+ for (i = 0; i < mlxcx_cmd_init_trys; i++) {
+ uint32_t init = mlxcx_get32(mlxp, MLXCX_ISS_INIT);
+
+ if (MLXCX_ISS_INITIALIZING(init) == 0)
+ break;
+ delay(drv_usectohz(mlxcx_cmd_init_delay));
+ }
+ if (i == mlxcx_cmd_init_trys) {
+ mlxcx_warn(mlxp, "timed out initializing command queue");
+ mlxcx_cmd_queue_fini(mlxp);
+ return (B_FALSE);
+ }
+
+ cmd->mcmd_ent = (void *)cmd->mcmd_dma.mxdb_va;
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_cmd_in_header_init(mlxcx_cmd_t *cmd, mlxcx_cmd_in_t *in,
+ mlxcx_cmd_op_t op, uint16_t mod)
+{
+ ASSERT3U(op, <=, UINT16_MAX);
+ in->mci_opcode = to_be16(op);
+ in->mci_op_mod = to_be16(mod);
+ cmd->mlcmd_op = op;
+}
+
+static boolean_t
+mlxcx_cmd_mbox_alloc(mlxcx_t *mlxp, list_t *listp, uint8_t nblocks)
+{
+ uint8_t i;
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_page_attr(mlxp, &attr);
+
+ for (i = 0; i < nblocks; i++) {
+ mlxcx_cmd_mbox_t *mbox;
+
+ mbox = kmem_zalloc(sizeof (*mbox), KM_SLEEP);
+ if (!mlxcx_dma_alloc(mlxp, &mbox->mlbox_dma, &attr, &acc,
+ B_TRUE, sizeof (mlxcx_cmd_mailbox_t), B_TRUE)) {
+ mlxcx_warn(mlxp, "failed to allocate mailbox dma "
+ "buffer");
+ kmem_free(mbox, sizeof (*mbox));
+ /*
+ * mlxcx_cmd_fini will clean up any mboxes that we
+ * already placed onto listp.
+ */
+ return (B_FALSE);
+ }
+ mbox->mlbox_data = (void *)mbox->mlbox_dma.mxdb_va;
+ list_insert_tail(listp, mbox);
+ }
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_cmd_mbox_free(mlxcx_cmd_mbox_t *mbox)
+{
+ mlxcx_dma_free(&mbox->mlbox_dma);
+ kmem_free(mbox, sizeof (mlxcx_cmd_mbox_t));
+}
+
+static void
+mlxcx_cmd_fini(mlxcx_t *mlxp, mlxcx_cmd_t *cmd)
+{
+ mlxcx_cmd_mbox_t *mbox;
+
+ while ((mbox = list_remove_head(&cmd->mlcmd_mbox_out)) != NULL) {
+ mlxcx_cmd_mbox_free(mbox);
+ }
+ list_destroy(&cmd->mlcmd_mbox_out);
+ while ((mbox = list_remove_head(&cmd->mlcmd_mbox_in)) != NULL) {
+ mlxcx_cmd_mbox_free(mbox);
+ }
+ list_destroy(&cmd->mlcmd_mbox_in);
+ id_free(mlxp->mlx_cmd.mcmd_tokens, cmd->mlcmd_token);
+ cv_destroy(&cmd->mlcmd_cv);
+ mutex_destroy(&cmd->mlcmd_lock);
+}
+
+static void
+mlxcx_cmd_init(mlxcx_t *mlxp, mlxcx_cmd_t *cmd)
+{
+ bzero(cmd, sizeof (*cmd));
+ mutex_init(&cmd->mlcmd_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&cmd->mlcmd_cv, NULL, CV_DRIVER, NULL);
+ cmd->mlcmd_token = id_alloc(mlxp->mlx_cmd.mcmd_tokens);
+ list_create(&cmd->mlcmd_mbox_in, sizeof (mlxcx_cmd_mbox_t),
+ offsetof(mlxcx_cmd_mbox_t, mlbox_node));
+ list_create(&cmd->mlcmd_mbox_out, sizeof (mlxcx_cmd_mbox_t),
+ offsetof(mlxcx_cmd_mbox_t, mlbox_node));
+}
+
+static void
+mlxcx_cmd_prep_input(mlxcx_cmd_ent_t *ent, mlxcx_cmd_t *cmd)
+{
+ uint32_t rem = cmd->mlcmd_inlen;
+ uint8_t i;
+ const void *in = cmd->mlcmd_in;
+ uint32_t copy;
+ mlxcx_cmd_mbox_t *mbox;
+ const ddi_dma_cookie_t *ck;
+
+ copy = MIN(MLXCX_CMD_INLINE_INPUT_LEN, rem);
+ bcopy(in, ent->mce_input, copy);
+
+ rem -= copy;
+ in += copy;
+
+ if (rem == 0) {
+ ent->mce_in_mbox = to_be64(0);
+ VERIFY3U(cmd->mlcmd_nboxes_in, ==, 0);
+ return;
+ }
+
+ mbox = list_head(&cmd->mlcmd_mbox_in);
+ ck = mlxcx_dma_cookie_one(&mbox->mlbox_dma);
+ ent->mce_in_mbox = to_be64(ck->dmac_laddress);
+ for (i = 0; mbox != NULL;
+ mbox = list_next(&cmd->mlcmd_mbox_in, mbox), i++) {
+ mlxcx_cmd_mbox_t *next;
+ mlxcx_cmd_mailbox_t *mp = mbox->mlbox_data;
+
+ copy = MIN(MLXCX_CMD_MAILBOX_LEN, rem);
+ bcopy(in, mp->mlxb_data, copy);
+ rem -= copy;
+ in += copy;
+
+ mp->mlxb_token = cmd->mlcmd_token;
+ mp->mlxb_blockno = to_be32(i);
+
+ next = list_next(&cmd->mlcmd_mbox_in, mbox);
+ if (next == NULL) {
+ mp->mlxb_nextp = to_be64(0);
+ } else {
+ ck = mlxcx_dma_cookie_one(&next->mlbox_dma);
+ mp->mlxb_nextp = to_be64(ck->dmac_laddress);
+ }
+ MLXCX_DMA_SYNC(mbox->mlbox_dma, DDI_DMA_SYNC_FORDEV);
+ }
+ VERIFY3U(i, ==, cmd->mlcmd_nboxes_in);
+ VERIFY0(rem);
+}
+
+static void
+mlxcx_cmd_prep_output(mlxcx_cmd_ent_t *ent, mlxcx_cmd_t *cmd)
+{
+ uint8_t i;
+ mlxcx_cmd_mbox_t *mbox;
+ const ddi_dma_cookie_t *ck;
+
+ if (cmd->mlcmd_nboxes_out == 0) {
+ ent->mce_out_mbox = to_be64(0);
+ return;
+ }
+
+ mbox = list_head(&cmd->mlcmd_mbox_out);
+ ck = mlxcx_dma_cookie_one(&mbox->mlbox_dma);
+ ent->mce_out_mbox = to_be64(ck->dmac_laddress);
+ for (i = 0, mbox = list_head(&cmd->mlcmd_mbox_out); mbox != NULL;
+ mbox = list_next(&cmd->mlcmd_mbox_out, mbox), i++) {
+ mlxcx_cmd_mbox_t *next;
+ mlxcx_cmd_mailbox_t *mp = mbox->mlbox_data;
+
+ mp->mlxb_token = cmd->mlcmd_token;
+ mp->mlxb_blockno = to_be32(i);
+
+ next = list_next(&cmd->mlcmd_mbox_out, mbox);
+ if (next == NULL) {
+ mp->mlxb_nextp = to_be64(0);
+ } else {
+ ck = mlxcx_dma_cookie_one(&next->mlbox_dma);
+ mp->mlxb_nextp = to_be64(ck->dmac_laddress);
+ }
+ MLXCX_DMA_SYNC(mbox->mlbox_dma, DDI_DMA_SYNC_FORDEV);
+ }
+ VERIFY3U(i, ==, cmd->mlcmd_nboxes_out);
+}
+
+static void
+mlxcx_cmd_copy_output(mlxcx_cmd_ent_t *ent, mlxcx_cmd_t *cmd)
+{
+ void *out = cmd->mlcmd_out;
+ uint32_t rem = cmd->mlcmd_outlen;
+ uint32_t copy;
+ mlxcx_cmd_mbox_t *mbox;
+
+ copy = MIN(rem, MLXCX_CMD_INLINE_OUTPUT_LEN);
+ bcopy(ent->mce_output, out, copy);
+ out += copy;
+ rem -= copy;
+
+ if (rem == 0) {
+ VERIFY0(cmd->mlcmd_nboxes_out);
+ return;
+ }
+
+ for (mbox = list_head(&cmd->mlcmd_mbox_out); mbox != NULL;
+ mbox = list_next(&cmd->mlcmd_mbox_out, mbox)) {
+ MLXCX_DMA_SYNC(mbox->mlbox_dma, DDI_DMA_SYNC_FORKERNEL);
+ copy = MIN(MLXCX_CMD_MAILBOX_LEN, rem);
+ bcopy(mbox->mlbox_data->mlxb_data, out, copy);
+ out += copy;
+ rem -= copy;
+ }
+ VERIFY0(rem);
+}
+
+static void
+mlxcx_cmd_taskq(void *arg)
+{
+ mlxcx_cmd_t *cmd = arg;
+ mlxcx_t *mlxp = cmd->mlcmd_mlxp;
+ mlxcx_cmd_queue_t *cmdq = &mlxp->mlx_cmd;
+ mlxcx_cmd_ent_t *ent;
+ uint_t poll;
+
+ ASSERT3S(cmd->mlcmd_op, !=, 0);
+
+ mutex_enter(&cmdq->mcmd_lock);
+ while (cmdq->mcmd_status == MLXCX_CMD_QUEUE_S_BUSY) {
+ cv_wait(&cmdq->mcmd_cv, &cmdq->mcmd_lock);
+ }
+
+ if (cmdq->mcmd_status != MLXCX_CMD_QUEUE_S_IDLE) {
+ mutex_exit(&cmdq->mcmd_lock);
+
+ mutex_enter(&cmd->mlcmd_lock);
+ cmd->mlcmd_state = MLXCX_CMD_S_ERROR;
+ cv_broadcast(&cmd->mlcmd_cv);
+ mutex_exit(&cmd->mlcmd_lock);
+ return;
+ }
+
+ cmdq->mcmd_status = MLXCX_CMD_QUEUE_S_BUSY;
+ ent = cmdq->mcmd_ent;
+ mutex_exit(&cmdq->mcmd_lock);
+
+ /*
+ * Command queue is currently ours as we set busy.
+ */
+ bzero(ent, sizeof (*ent));
+ ent->mce_type = MLXCX_CMD_TRANSPORT_PCI;
+ ent->mce_in_length = to_be32(cmd->mlcmd_inlen);
+ ent->mce_out_length = to_be32(cmd->mlcmd_outlen);
+ ent->mce_token = cmd->mlcmd_token;
+ ent->mce_sig = 0;
+ ent->mce_status = MLXCX_CMD_HW_OWNED;
+ mlxcx_cmd_prep_input(ent, cmd);
+ mlxcx_cmd_prep_output(ent, cmd);
+ MLXCX_DMA_SYNC(cmdq->mcmd_dma, DDI_DMA_SYNC_FORDEV);
+
+ /* This assumes we only ever use the first command */
+ mlxcx_put32(mlxp, MLXCX_ISS_CMD_DOORBELL, 1);
+
+ for (poll = 0; poll < mlxcx_cmd_tries; poll++) {
+ delay(drv_usectohz(mlxcx_cmd_delay));
+ MLXCX_DMA_SYNC(cmdq->mcmd_dma, DDI_DMA_SYNC_FORKERNEL);
+ if ((ent->mce_status & MLXCX_CMD_HW_OWNED) == 0)
+ break;
+ }
+
+ /*
+ * Command is done (or timed out). Save relevant data. Once we broadcast
+ * on the CV and drop the lock, we must not touch the cmd again.
+ */
+ mutex_enter(&cmd->mlcmd_lock);
+
+ if (poll == mlxcx_cmd_tries) {
+ cmd->mlcmd_status = MLXCX_CMD_R_TIMEOUT;
+ cmd->mlcmd_state = MLXCX_CMD_S_ERROR;
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_NO_RESPONSE);
+ } else {
+ cmd->mlcmd_status = MLXCX_CMD_STATUS(ent->mce_status);
+ cmd->mlcmd_state = MLXCX_CMD_S_DONE;
+ if (cmd->mlcmd_status == 0) {
+ mlxcx_cmd_copy_output(ent, cmd);
+ }
+ }
+ cv_broadcast(&cmd->mlcmd_cv);
+ mutex_exit(&cmd->mlcmd_lock);
+
+ mutex_enter(&cmdq->mcmd_lock);
+ cmdq->mcmd_status = MLXCX_CMD_QUEUE_S_IDLE;
+ cv_broadcast(&cmdq->mcmd_cv);
+ mutex_exit(&cmdq->mcmd_lock);
+}
+
+static boolean_t
+mlxcx_cmd_send(mlxcx_t *mlxp, mlxcx_cmd_t *cmd, const void *in, uint32_t inlen,
+ void *out, uint32_t outlen)
+{
+ if (inlen > MLXCX_CMD_INLINE_INPUT_LEN) {
+ uint32_t need = inlen - MLXCX_CMD_INLINE_INPUT_LEN;
+ uint8_t nblocks;
+
+ if (need / MLXCX_CMD_MAILBOX_LEN + 1 > UINT8_MAX) {
+ mlxcx_warn(mlxp, "requested too many input blocks for "
+ "%u byte input len", inlen);
+ return (B_FALSE);
+ }
+
+ nblocks = need / MLXCX_CMD_MAILBOX_LEN + 1;
+ if (!mlxcx_cmd_mbox_alloc(mlxp, &cmd->mlcmd_mbox_in, nblocks)) {
+ mlxcx_warn(mlxp, "failed to allocate %u blocks of "
+ "input mailbox", nblocks);
+ return (B_FALSE);
+ }
+ cmd->mlcmd_nboxes_in = nblocks;
+ }
+
+ if (outlen > MLXCX_CMD_INLINE_OUTPUT_LEN) {
+ uint32_t need = outlen - MLXCX_CMD_INLINE_OUTPUT_LEN;
+ uint8_t nblocks;
+
+ if (need / MLXCX_CMD_MAILBOX_LEN + 1 > UINT8_MAX) {
+ mlxcx_warn(mlxp, "requested too many output blocks for "
+ "%u byte output len", outlen);
+ return (B_FALSE);
+ }
+
+ nblocks = need / MLXCX_CMD_MAILBOX_LEN + 1;
+ if (!mlxcx_cmd_mbox_alloc(mlxp, &cmd->mlcmd_mbox_out,
+ nblocks)) {
+ mlxcx_warn(mlxp, "failed to allocate %u blocks of "
+ "output mailbox", nblocks);
+ return (B_FALSE);
+ }
+ cmd->mlcmd_nboxes_out = nblocks;
+ }
+
+ cmd->mlcmd_in = in;
+ cmd->mlcmd_inlen = inlen;
+ cmd->mlcmd_out = out;
+ cmd->mlcmd_outlen = outlen;
+ cmd->mlcmd_mlxp = mlxp;
+
+ /*
+ * Now that all allocations have been done, all that remains is for us
+ * to dispatch the request to process this to the taskq for it to be
+ * processed.
+ */
+ if (ddi_taskq_dispatch(mlxp->mlx_cmd.mcmd_taskq, mlxcx_cmd_taskq, cmd,
+ DDI_SLEEP) != DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "failed to submit command to taskq");
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_cmd_wait(mlxcx_cmd_t *cmd)
+{
+ mutex_enter(&cmd->mlcmd_lock);
+ while (cmd->mlcmd_state == 0) {
+ cv_wait(&cmd->mlcmd_cv, &cmd->mlcmd_lock);
+ }
+ mutex_exit(&cmd->mlcmd_lock);
+}
+
+static boolean_t
+mlxcx_cmd_evaluate(mlxcx_t *mlxp, mlxcx_cmd_t *cmd)
+{
+ mlxcx_cmd_out_t *out;
+
+ if ((cmd->mlcmd_state & MLXCX_CMD_S_ERROR) != 0) {
+ mlxcx_warn(mlxp, "command %s (0x%x) failed due to an internal "
+ "driver error",
+ mlxcx_cmd_opcode_string(cmd->mlcmd_op),
+ cmd->mlcmd_op);
+ return (B_FALSE);
+ }
+
+ if (cmd->mlcmd_status != 0) {
+ mlxcx_warn(mlxp, "command %s (0x%x) failed with command queue "
+ "error 0x%x",
+ mlxcx_cmd_opcode_string(cmd->mlcmd_op),
+ cmd->mlcmd_op, cmd->mlcmd_status);
+ return (B_FALSE);
+ }
+
+ out = cmd->mlcmd_out;
+ if (out->mco_status != MLXCX_CMD_R_OK) {
+ mlxcx_warn(mlxp, "command %s 0x%x failed with status code %s "
+ "(0x%x)", mlxcx_cmd_opcode_string(cmd->mlcmd_op),
+ cmd->mlcmd_op, mlxcx_cmd_response_string(out->mco_status),
+ out->mco_status);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_cmd_disable_hca(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_disable_hca_in_t in;
+ mlxcx_cmd_disable_hca_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_disable_hca_head,
+ MLXCX_OP_DISABLE_HCA, 0);
+ in.mlxi_disable_hca_func = MLXCX_FUNCTION_SELF;
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_enable_hca(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_enable_hca_in_t in;
+ mlxcx_cmd_enable_hca_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_enable_hca_head,
+ MLXCX_OP_ENABLE_HCA, 0);
+ in.mlxi_enable_hca_func = MLXCX_FUNCTION_SELF;
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_issi(mlxcx_t *mlxp, uint32_t *issip)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_issi_in_t in;
+ mlxcx_cmd_query_issi_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_issi_head,
+ MLXCX_OP_QUERY_ISSI, 0);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ *issip = out.mlxo_supported_issi;
+ } else if (cmd.mlcmd_status == 0 &&
+ out.mlxo_query_issi_head.mco_status == MLXCX_CMD_R_BAD_OP) {
+ /*
+ * The PRM says that if we get a bad operation, that means this
+ * command isn't supported so it only supports version 1 of the
+ * ISSI, which means bit zero should be set.
+ */
+ ret = B_TRUE;
+ *issip = 1;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_issi(mlxcx_t *mlxp, uint16_t issi)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_set_issi_in_t in;
+ mlxcx_cmd_set_issi_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_set_issi_head,
+ MLXCX_OP_SET_ISSI, 0);
+ in.mlxi_set_issi_current = to_be16(issi);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_pages(mlxcx_t *mlxp, uint_t type, int32_t *npages)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_pages_in_t in;
+ mlxcx_cmd_query_pages_out_t out;
+ boolean_t ret;
+
+ switch (type) {
+ case MLXCX_QUERY_PAGES_OPMOD_BOOT:
+ case MLXCX_QUERY_PAGES_OPMOD_INIT:
+ case MLXCX_QUERY_PAGES_OPMOD_REGULAR:
+ break;
+ default:
+ mlxcx_warn(mlxp, "!passed invalid type to query pages: %u",
+ type);
+ return (B_FALSE);
+ }
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_pages_head,
+ MLXCX_OP_QUERY_PAGES, type);
+ in.mlxi_query_pages_func = MLXCX_FUNCTION_SELF;
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ *npages = from_be32(out.mlxo_query_pages_npages);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_give_pages(mlxcx_t *mlxp, uint_t type, int32_t npages,
+ mlxcx_dev_page_t **pages)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_manage_pages_in_t in;
+ mlxcx_cmd_manage_pages_out_t out;
+ size_t insize, outsize;
+ boolean_t ret;
+ uint32_t i;
+ uint64_t pa;
+ const ddi_dma_cookie_t *ck;
+
+ switch (type) {
+ case MLXCX_MANAGE_PAGES_OPMOD_ALLOC_FAIL:
+ if (npages != 0) {
+ mlxcx_warn(mlxp, "passed non-zero number of pages (%d) "
+ "but asked to fail page allocation", npages);
+ return (B_FALSE);
+ }
+ break;
+ case MLXCX_MANAGE_PAGES_OPMOD_GIVE_PAGES:
+ if (npages <= 0 || npages > MLXCX_MANAGE_PAGES_MAX_PAGES) {
+ mlxcx_warn(mlxp, "passed invalid number of pages (%d) "
+ "to give pages", npages);
+ return (B_FALSE);
+ }
+ break;
+ default:
+ mlxcx_warn(mlxp, "!passed invalid type to give pages: %u",
+ type);
+ return (B_FALSE);
+ }
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+ insize = offsetof(mlxcx_cmd_manage_pages_in_t, mlxi_manage_pages_pas) +
+ npages * sizeof (uint64_t);
+ outsize = offsetof(mlxcx_cmd_manage_pages_out_t, mlxo_manage_pages_pas);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_manage_pages_head,
+ MLXCX_OP_MANAGE_PAGES, type);
+ in.mlxi_manage_pages_func = MLXCX_FUNCTION_SELF;
+ in.mlxi_manage_pages_npages = to_be32(npages);
+ for (i = 0; i < npages; i++) {
+ ck = mlxcx_dma_cookie_one(&pages[i]->mxdp_dma);
+ pa = ck->dmac_laddress;
+ ASSERT3U(pa & 0xfff, ==, 0);
+ ASSERT3U(ck->dmac_size, ==, MLXCX_HW_PAGE_SIZE);
+ in.mlxi_manage_pages_pas[i] = to_be64(pa);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, outsize)) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_return_pages(mlxcx_t *mlxp, int32_t nreq, uint64_t *pas,
+ int32_t *nret)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_manage_pages_in_t in;
+ mlxcx_cmd_manage_pages_out_t out;
+ size_t insize, outsize;
+ boolean_t ret;
+ uint32_t i;
+
+ if (nreq <= 0) {
+ mlxcx_warn(mlxp, "passed invalid number of pages (%d) "
+ "to return pages", nreq);
+ return (B_FALSE);
+ }
+ VERIFY3S(nreq, <=, MLXCX_MANAGE_PAGES_MAX_PAGES);
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+ insize = offsetof(mlxcx_cmd_manage_pages_in_t, mlxi_manage_pages_pas);
+ outsize = offsetof(mlxcx_cmd_manage_pages_out_t,
+ mlxo_manage_pages_pas) + nreq * sizeof (uint64_t);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_manage_pages_head,
+ MLXCX_OP_MANAGE_PAGES, MLXCX_MANAGE_PAGES_OPMOD_RETURN_PAGES);
+ in.mlxi_manage_pages_func = MLXCX_FUNCTION_SELF;
+ in.mlxi_manage_pages_npages = to_be32(nreq);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, outsize)) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ *nret = from_be32(out.mlxo_manage_pages_npages);
+ for (i = 0; i < *nret; i++) {
+ pas[i] = from_be64(out.mlxo_manage_pages_pas[i]);
+ }
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_hca_cap(mlxcx_t *mlxp, mlxcx_hca_cap_type_t type,
+ mlxcx_hca_cap_mode_t mode, mlxcx_hca_cap_t *capp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_hca_cap_in_t in;
+ mlxcx_cmd_query_hca_cap_out_t *out;
+ boolean_t ret;
+ uint16_t opmode;
+
+ bzero(&in, sizeof (in));
+ out = kmem_zalloc(sizeof (mlxcx_cmd_query_hca_cap_out_t), KM_SLEEP);
+ mlxcx_cmd_init(mlxp, &cmd);
+
+ opmode = type << 1 | mode;
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_hca_cap_head,
+ MLXCX_OP_QUERY_HCA_CAP, opmode);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), out, sizeof (*out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ kmem_free(out, sizeof (mlxcx_cmd_query_hca_cap_out_t));
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ capp->mhc_mode = mode;
+ capp->mhc_type = type;
+ ASSERT3U(sizeof (out->mlxo_query_hca_cap_data), ==,
+ sizeof (capp->mhc_bulk));
+ bcopy(out->mlxo_query_hca_cap_data, capp->mhc_bulk,
+ sizeof (capp->mhc_bulk));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+
+ kmem_free(out, sizeof (mlxcx_cmd_query_hca_cap_out_t));
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_cmd_init_hca(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_init_hca_in_t in;
+ mlxcx_cmd_init_hca_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_init_hca_head,
+ MLXCX_OP_INIT_HCA, 0);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_driver_version(mlxcx_t *mlxp, const char *version)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_set_driver_version_in_t in;
+ mlxcx_cmd_set_driver_version_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_set_driver_version_head,
+ MLXCX_OP_SET_DRIVER_VERSION, 0);
+ VERIFY3U(strlcpy(in.mlxi_set_driver_version_version, version,
+ sizeof (in.mlxi_set_driver_version_version)), <=,
+ sizeof (in.mlxi_set_driver_version_version));
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_alloc_uar(mlxcx_t *mlxp, mlxcx_uar_t *mlup)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_alloc_uar_in_t in;
+ mlxcx_cmd_alloc_uar_out_t out;
+ boolean_t ret;
+ size_t i;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_alloc_uar_head,
+ MLXCX_OP_ALLOC_UAR, 0);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlup->mlu_allocated = B_TRUE;
+ mlup->mlu_num = from_be24(out.mlxo_alloc_uar_uar);
+ VERIFY3U(mlup->mlu_num, >, 0);
+ mlup->mlu_base = mlup->mlu_num * MLXCX_HW_PAGE_SIZE;
+
+ for (i = 0; i < MLXCX_BF_PER_UAR; ++i) {
+ mlup->mlu_bf[i].mbf_even = mlup->mlu_base +
+ MLXCX_BF_BASE + MLXCX_BF_SIZE * 2 * i;
+ mlup->mlu_bf[i].mbf_odd = mlup->mlu_bf[i].mbf_even +
+ MLXCX_BF_SIZE;
+ }
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_dealloc_uar(mlxcx_t *mlxp, mlxcx_uar_t *mlup)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_dealloc_uar_in_t in;
+ mlxcx_cmd_dealloc_uar_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_dealloc_uar_head,
+ MLXCX_OP_DEALLOC_UAR, 0);
+ VERIFY(mlup->mlu_allocated);
+ in.mlxi_dealloc_uar_uar = to_be24(mlup->mlu_num);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlup->mlu_allocated = B_FALSE;
+ mlup->mlu_num = 0;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_alloc_pd(mlxcx_t *mlxp, mlxcx_pd_t *mlpd)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_alloc_pd_in_t in;
+ mlxcx_cmd_alloc_pd_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_alloc_pd_head,
+ MLXCX_OP_ALLOC_PD, 0);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlpd->mlpd_allocated = B_TRUE;
+ mlpd->mlpd_num = from_be24(out.mlxo_alloc_pd_pdn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_dealloc_pd(mlxcx_t *mlxp, mlxcx_pd_t *mlpd)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_dealloc_pd_in_t in;
+ mlxcx_cmd_dealloc_pd_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_dealloc_pd_head,
+ MLXCX_OP_DEALLOC_PD, 0);
+ VERIFY(mlpd->mlpd_allocated);
+ in.mlxi_dealloc_pd_pdn = to_be24(mlpd->mlpd_num);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlpd->mlpd_allocated = B_FALSE;
+ mlpd->mlpd_num = 0;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_alloc_tdom(mlxcx_t *mlxp, mlxcx_tdom_t *mltd)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_alloc_tdom_in_t in;
+ mlxcx_cmd_alloc_tdom_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_alloc_tdom_head,
+ MLXCX_OP_ALLOC_TRANSPORT_DOMAIN, 0);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltd->mltd_allocated = B_TRUE;
+ mltd->mltd_num = from_be24(out.mlxo_alloc_tdom_tdomn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_dealloc_tdom(mlxcx_t *mlxp, mlxcx_tdom_t *mltd)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_dealloc_tdom_in_t in;
+ mlxcx_cmd_dealloc_tdom_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_dealloc_tdom_head,
+ MLXCX_OP_DEALLOC_TRANSPORT_DOMAIN, 0);
+ VERIFY(mltd->mltd_allocated);
+ in.mlxi_dealloc_tdom_tdomn = to_be24(mltd->mltd_num);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltd->mltd_allocated = B_FALSE;
+ mltd->mltd_num = 0;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_teardown_hca(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_teardown_hca_in_t in;
+ mlxcx_cmd_teardown_hca_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_teardown_hca_head,
+ MLXCX_OP_TEARDOWN_HCA, 0);
+ in.mlxi_teardown_hca_profile = to_be16(MLXCX_TEARDOWN_HCA_GRACEFUL);
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_nic_vport_ctx(mlxcx_t *mlxp, mlxcx_port_t *mlp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_nic_vport_ctx_in_t in;
+ mlxcx_cmd_query_nic_vport_ctx_out_t out;
+ boolean_t ret;
+ const mlxcx_nic_vport_ctx_t *ctx;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_nic_vport_ctx_head,
+ MLXCX_OP_QUERY_NIC_VPORT_CONTEXT, MLXCX_VPORT_TYPE_VNIC);
+
+ in.mlxi_query_nic_vport_ctx_vport_number = to_be16(mlp->mlp_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ ctx = &out.mlxo_query_nic_vport_ctx_context;
+ mlp->mlp_guid = from_be64(ctx->mlnvc_port_guid);
+ mlp->mlp_mtu = from_be16(ctx->mlnvc_mtu);
+ bcopy(ctx->mlnvc_permanent_address, mlp->mlp_mac_address,
+ sizeof (mlp->mlp_mac_address));
+ mlp->mlp_wqe_min_inline = get_bits64(ctx->mlnvc_flags,
+ MLXCX_VPORT_CTX_MIN_WQE_INLINE);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+static const char *
+mlxcx_reg_name(mlxcx_register_id_t rid)
+{
+ switch (rid) {
+ case MLXCX_REG_PMTU:
+ return ("PMTU");
+ case MLXCX_REG_PAOS:
+ return ("PAOS");
+ case MLXCX_REG_PTYS:
+ return ("PTYS");
+ case MLXCX_REG_MSGI:
+ return ("MSGI");
+ case MLXCX_REG_PMAOS:
+ return ("PMAOS");
+ case MLXCX_REG_MLCR:
+ return ("MLCR");
+ case MLXCX_REG_MCIA:
+ return ("MCIA");
+ case MLXCX_REG_PPCNT:
+ return ("PPCNT");
+ default:
+ return ("???");
+ }
+}
+
+boolean_t
+mlxcx_cmd_access_register(mlxcx_t *mlxp, mlxcx_cmd_reg_opmod_t opmod,
+ mlxcx_register_id_t rid, mlxcx_register_data_t *data)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_access_register_in_t in;
+ mlxcx_cmd_access_register_out_t out;
+ boolean_t ret;
+ size_t dsize, insize, outsize;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_access_register_head,
+ MLXCX_OP_ACCESS_REG, opmod);
+
+ in.mlxi_access_register_register_id = to_be16(rid);
+
+ switch (rid) {
+ case MLXCX_REG_PMTU:
+ dsize = sizeof (mlxcx_reg_pmtu_t);
+ break;
+ case MLXCX_REG_PAOS:
+ dsize = sizeof (mlxcx_reg_paos_t);
+ break;
+ case MLXCX_REG_PTYS:
+ dsize = sizeof (mlxcx_reg_ptys_t);
+ break;
+ case MLXCX_REG_MLCR:
+ dsize = sizeof (mlxcx_reg_mlcr_t);
+ break;
+ case MLXCX_REG_PMAOS:
+ dsize = sizeof (mlxcx_reg_pmaos_t);
+ break;
+ case MLXCX_REG_MCIA:
+ dsize = sizeof (mlxcx_reg_mcia_t);
+ break;
+ case MLXCX_REG_PPCNT:
+ dsize = sizeof (mlxcx_reg_ppcnt_t);
+ break;
+ default:
+ dsize = 0;
+ VERIFY(0);
+ return (B_FALSE);
+ }
+ insize = dsize + offsetof(mlxcx_cmd_access_register_in_t,
+ mlxi_access_register_data);
+ outsize = dsize + offsetof(mlxcx_cmd_access_register_out_t,
+ mlxo_access_register_data);
+
+ bcopy(data, &in.mlxi_access_register_data, dsize);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, outsize)) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ bcopy(&out.mlxo_access_register_data, data, dsize);
+ } else {
+ mlxcx_warn(mlxp, "failed OP_ACCESS_REG was for register "
+ "%04x (%s)", rid, mlxcx_reg_name(rid));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_port_mtu(mlxcx_t *mlxp, mlxcx_port_t *mlp)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ /*
+ * Since we modify the port here we require that the caller is holding
+ * the port mutex.
+ */
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&data, sizeof (data));
+ data.mlrd_pmtu.mlrd_pmtu_local_port = mlp->mlp_num + 1;
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PMTU, &data);
+
+ if (ret) {
+ mlp->mlp_mtu = from_be16(data.mlrd_pmtu.mlrd_pmtu_admin_mtu);
+ mlp->mlp_max_mtu = from_be16(data.mlrd_pmtu.mlrd_pmtu_max_mtu);
+ }
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_module_status(mlxcx_t *mlxp, uint_t id,
+ mlxcx_module_status_t *pstatus, mlxcx_module_error_type_t *perr)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ bzero(&data, sizeof (data));
+ ASSERT3U(id, <, 0xff);
+ data.mlrd_pmaos.mlrd_pmaos_module = (uint8_t)id;
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PMAOS, &data);
+
+ if (ret) {
+ if (pstatus != NULL)
+ *pstatus = data.mlrd_pmaos.mlrd_pmaos_oper_status;
+ if (perr != NULL)
+ *perr = data.mlrd_pmaos.mlrd_pmaos_error_type;
+ }
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_port_mtu(mlxcx_t *mlxp, mlxcx_port_t *mlp)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&data, sizeof (data));
+ data.mlrd_pmtu.mlrd_pmtu_local_port = mlp->mlp_num + 1;
+ data.mlrd_pmtu.mlrd_pmtu_admin_mtu = to_be16(mlp->mlp_mtu);
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_WRITE,
+ MLXCX_REG_PMTU, &data);
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_port_led(mlxcx_t *mlxp, mlxcx_port_t *mlp, uint16_t sec)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&data, sizeof (data));
+ data.mlrd_mlcr.mlrd_mlcr_local_port = mlp->mlp_num + 1;
+ set_bits8(&data.mlrd_mlcr.mlrd_mlcr_flags, MLXCX_MLCR_LED_TYPE,
+ MLXCX_LED_TYPE_PORT);
+ data.mlrd_mlcr.mlrd_mlcr_beacon_duration = to_be16(sec);
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_WRITE,
+ MLXCX_REG_MLCR, &data);
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_port_status(mlxcx_t *mlxp, mlxcx_port_t *mlp)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&data, sizeof (data));
+ data.mlrd_paos.mlrd_paos_local_port = mlp->mlp_num + 1;
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PAOS, &data);
+
+ if (ret) {
+ mlp->mlp_admin_status = data.mlrd_paos.mlrd_paos_admin_status;
+ mlp->mlp_oper_status = data.mlrd_paos.mlrd_paos_oper_status;
+ }
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_port_speed(mlxcx_t *mlxp, mlxcx_port_t *mlp)
+{
+ mlxcx_register_data_t data;
+ boolean_t ret;
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&data, sizeof (data));
+ data.mlrd_ptys.mlrd_ptys_local_port = mlp->mlp_num + 1;
+ set_bit8(&data.mlrd_ptys.mlrd_ptys_proto_mask,
+ MLXCX_PTYS_PROTO_MASK_ETH);
+
+ ret = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PTYS, &data);
+
+ if (ret) {
+ if (get_bit8(data.mlrd_ptys.mlrd_ptys_autoneg_flags,
+ MLXCX_AUTONEG_DISABLE)) {
+ mlp->mlp_autoneg = B_FALSE;
+ } else {
+ mlp->mlp_autoneg = B_TRUE;
+ }
+ mlp->mlp_max_proto =
+ from_bits32(data.mlrd_ptys.mlrd_ptys_proto_cap);
+ mlp->mlp_admin_proto =
+ from_bits32(data.mlrd_ptys.mlrd_ptys_proto_admin);
+ mlp->mlp_oper_proto =
+ from_bits32(data.mlrd_ptys.mlrd_ptys_proto_oper);
+ }
+
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_modify_nic_vport_ctx(mlxcx_t *mlxp, mlxcx_port_t *mlp,
+ mlxcx_modify_nic_vport_ctx_fields_t fields)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_modify_nic_vport_ctx_in_t in;
+ mlxcx_cmd_modify_nic_vport_ctx_out_t out;
+ boolean_t ret;
+ mlxcx_nic_vport_ctx_t *ctx;
+
+ ASSERT(mutex_owned(&mlp->mlp_mtx));
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_modify_nic_vport_ctx_head,
+ MLXCX_OP_MODIFY_NIC_VPORT_CONTEXT, MLXCX_VPORT_TYPE_VNIC);
+
+ in.mlxi_modify_nic_vport_ctx_vport_number = to_be16(mlp->mlp_num);
+ in.mlxi_modify_nic_vport_ctx_field_select = to_be32(fields);
+
+ ctx = &in.mlxi_modify_nic_vport_ctx_context;
+ if (fields & MLXCX_MODIFY_NIC_VPORT_CTX_PROMISC) {
+ set_bit16(&ctx->mlnvc_promisc_list_type,
+ MLXCX_VPORT_PROMISC_ALL);
+ }
+ if (fields & MLXCX_MODIFY_NIC_VPORT_CTX_MTU) {
+ ctx->mlnvc_mtu = to_be16(mlp->mlp_mtu);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ if (fields & MLXCX_MODIFY_NIC_VPORT_CTX_PROMISC) {
+ mlp->mlp_flags |= MLXCX_PORT_VPORT_PROMISC;
+ }
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_eq(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_eq_in_t in;
+ mlxcx_cmd_create_eq_out_t out;
+ boolean_t ret;
+ mlxcx_eventq_ctx_t *ctx;
+ size_t rem, insize;
+ const ddi_dma_cookie_t *c;
+ uint64_t pa, npages;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ VERIFY(mleq->mleq_state & MLXCX_EQ_ALLOC);
+ VERIFY0(mleq->mleq_state & MLXCX_EQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_eq_head,
+ MLXCX_OP_CREATE_EQ, 0);
+
+ ctx = &in.mlxi_create_eq_context;
+ ctx->mleqc_uar_page = to_be24(mleq->mleq_uar->mlu_num);
+ ctx->mleqc_log_eq_size = mleq->mleq_entshift;
+ ctx->mleqc_intr = mleq->mleq_intr_index;
+
+ in.mlxi_create_eq_event_bitmask = to_be64(mleq->mleq_events);
+
+ npages = 0;
+ c = NULL;
+ while ((c = mlxcx_dma_cookie_iter(&mleq->mleq_dma, c)) != NULL) {
+ pa = c->dmac_laddress;
+ rem = c->dmac_size;
+ while (rem > 0) {
+ ASSERT3U(pa & 0xfff, ==, 0);
+ ASSERT3U(rem, >=, MLXCX_HW_PAGE_SIZE);
+ in.mlxi_create_eq_pas[npages++] = to_be64(pa);
+ rem -= MLXCX_HW_PAGE_SIZE;
+ pa += MLXCX_HW_PAGE_SIZE;
+ }
+ }
+ ASSERT3U(npages, <=, MLXCX_CREATE_QUEUE_MAX_PAGES);
+
+ insize = offsetof(mlxcx_cmd_create_eq_in_t, mlxi_create_eq_pas) +
+ sizeof (uint64_t) * npages;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mleq->mleq_state |= MLXCX_EQ_CREATED;
+ mleq->mleq_num = out.mlxo_create_eq_eqn;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_eq(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq,
+ mlxcx_eventq_ctx_t *ctxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_eq_in_t in;
+ mlxcx_cmd_query_eq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ VERIFY(mleq->mleq_state & MLXCX_EQ_ALLOC);
+ VERIFY(mleq->mleq_state & MLXCX_EQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_eq_head,
+ MLXCX_OP_QUERY_EQ, 0);
+
+ in.mlxi_query_eq_eqn = mleq->mleq_num;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ bcopy(&out.mlxo_query_eq_context, ctxp,
+ sizeof (mlxcx_eventq_ctx_t));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_eq(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_eq_in_t in;
+ mlxcx_cmd_destroy_eq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ VERIFY(mleq->mleq_state & MLXCX_EQ_ALLOC);
+ VERIFY(mleq->mleq_state & MLXCX_EQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_eq_head,
+ MLXCX_OP_DESTROY_EQ, 0);
+
+ in.mlxi_destroy_eq_eqn = mleq->mleq_num;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mleq->mleq_state |= MLXCX_EQ_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_special_ctxs(mlxcx_t *mlxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_special_ctxs_in_t in;
+ mlxcx_cmd_query_special_ctxs_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_special_ctxs_head,
+ MLXCX_OP_QUERY_SPECIAL_CONTEXTS, 0);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlxp->mlx_rsvd_lkey = from_be32(
+ out.mlxo_query_special_ctxs_resd_lkey);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_cq(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_cq_in_t in;
+ mlxcx_cmd_create_cq_out_t out;
+ boolean_t ret;
+ mlxcx_completionq_ctx_t *ctx;
+ size_t rem, insize;
+ const ddi_dma_cookie_t *c;
+ uint64_t pa, npages;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_ALLOC);
+ VERIFY0(mlcq->mlcq_state & MLXCX_CQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_cq_head,
+ MLXCX_OP_CREATE_CQ, 0);
+
+ ctx = &in.mlxi_create_cq_context;
+ ctx->mlcqc_uar_page = to_be24(mlcq->mlcq_uar->mlu_num);
+ ctx->mlcqc_log_cq_size = mlcq->mlcq_entshift;
+ ctx->mlcqc_eqn = mlcq->mlcq_eq->mleq_num;
+ ctx->mlcqc_cq_period = to_be16(mlcq->mlcq_cqemod_period_usec);
+ ctx->mlcqc_cq_max_count = to_be16(mlcq->mlcq_cqemod_count);
+
+ c = mlxcx_dma_cookie_one(&mlcq->mlcq_doorbell_dma);
+ ctx->mlcqc_dbr_addr = to_be64(c->dmac_laddress);
+ ASSERT3U(c->dmac_size, >=, sizeof (mlxcx_completionq_doorbell_t));
+
+ npages = 0;
+ c = NULL;
+ while ((c = mlxcx_dma_cookie_iter(&mlcq->mlcq_dma, c)) != NULL) {
+ pa = c->dmac_laddress;
+ rem = c->dmac_size;
+ while (rem > 0) {
+ ASSERT3U(pa & 0xfff, ==, 0);
+ ASSERT3U(rem, >=, MLXCX_HW_PAGE_SIZE);
+ in.mlxi_create_cq_pas[npages++] = to_be64(pa);
+ rem -= MLXCX_HW_PAGE_SIZE;
+ pa += MLXCX_HW_PAGE_SIZE;
+ }
+ }
+ ASSERT3U(npages, <=, MLXCX_CREATE_QUEUE_MAX_PAGES);
+
+ insize = offsetof(mlxcx_cmd_create_cq_in_t, mlxi_create_cq_pas) +
+ sizeof (uint64_t) * npages;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlcq->mlcq_state |= MLXCX_CQ_CREATED;
+ mlcq->mlcq_num = from_be24(out.mlxo_create_cq_cqn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
+ mlxcx_rq_ctx_t *ctxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_rq_in_t in;
+ mlxcx_cmd_query_rq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ ASSERT3S(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_RECVQ);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_rq_head,
+ MLXCX_OP_QUERY_RQ, 0);
+
+ in.mlxi_query_rq_rqn = to_be24(mlwq->mlwq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ bcopy(&out.mlxo_query_rq_context, ctxp,
+ sizeof (mlxcx_rq_ctx_t));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
+ mlxcx_sq_ctx_t *ctxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_sq_in_t in;
+ mlxcx_cmd_query_sq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ ASSERT3S(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_SENDQ);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_sq_head,
+ MLXCX_OP_QUERY_SQ, 0);
+
+ in.mlxi_query_sq_sqn = to_be24(mlwq->mlwq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ bcopy(&out.mlxo_query_sq_context, ctxp,
+ sizeof (mlxcx_sq_ctx_t));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_query_cq(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
+ mlxcx_completionq_ctx_t *ctxp)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_query_cq_in_t in;
+ mlxcx_cmd_query_cq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_ALLOC);
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_query_cq_head,
+ MLXCX_OP_QUERY_CQ, 0);
+
+ in.mlxi_query_cq_cqn = to_be24(mlcq->mlcq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ bcopy(&out.mlxo_query_cq_context, ctxp,
+ sizeof (mlxcx_completionq_ctx_t));
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_cq(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_cq_in_t in;
+ mlxcx_cmd_destroy_cq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_ALLOC);
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_cq_head,
+ MLXCX_OP_DESTROY_CQ, 0);
+
+ in.mlxi_destroy_cq_cqn = to_be24(mlcq->mlcq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlcq->mlcq_state |= MLXCX_CQ_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_rq_in_t in;
+ mlxcx_cmd_create_rq_out_t out;
+ boolean_t ret;
+ mlxcx_rq_ctx_t *ctx;
+ size_t rem, insize;
+ const ddi_dma_cookie_t *c;
+ uint64_t pa, npages;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY3U(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_RECVQ);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_rq_head,
+ MLXCX_OP_CREATE_RQ, 0);
+
+ ctx = &in.mlxi_create_rq_context;
+
+ set_bit32(&ctx->mlrqc_flags, MLXCX_RQ_FLAGS_RLKEY);
+ set_bit32(&ctx->mlrqc_flags, MLXCX_RQ_FLAGS_FLUSH_IN_ERROR);
+ set_bit32(&ctx->mlrqc_flags, MLXCX_RQ_FLAGS_VLAN_STRIP_DISABLE);
+ ctx->mlrqc_cqn = to_be24(mlwq->mlwq_cq->mlcq_num);
+
+ set_bits32(&ctx->mlrqc_wq.mlwqc_flags, MLXCX_WORKQ_CTX_TYPE,
+ MLXCX_WORKQ_TYPE_CYCLIC);
+ ctx->mlrqc_wq.mlwqc_pd = to_be24(mlwq->mlwq_pd->mlpd_num);
+ ctx->mlrqc_wq.mlwqc_log_wq_sz = mlwq->mlwq_entshift;
+ ctx->mlrqc_wq.mlwqc_log_wq_stride = MLXCX_RECVQ_STRIDE_SHIFT;
+
+ c = mlxcx_dma_cookie_one(&mlwq->mlwq_doorbell_dma);
+ ctx->mlrqc_wq.mlwqc_dbr_addr = to_be64(c->dmac_laddress);
+ ASSERT3U(c->dmac_size, >=, sizeof (mlxcx_workq_doorbell_t));
+
+ npages = 0;
+ c = NULL;
+ while ((c = mlxcx_dma_cookie_iter(&mlwq->mlwq_dma, c)) != NULL) {
+ pa = c->dmac_laddress;
+ rem = c->dmac_size;
+ while (rem > 0) {
+ ASSERT3U(pa & 0xfff, ==, 0);
+ ASSERT3U(rem, >=, MLXCX_HW_PAGE_SIZE);
+ ctx->mlrqc_wq.mlwqc_pas[npages++] = to_be64(pa);
+ rem -= MLXCX_HW_PAGE_SIZE;
+ pa += MLXCX_HW_PAGE_SIZE;
+ }
+ }
+ ASSERT3U(npages, <=, MLXCX_WORKQ_CTX_MAX_ADDRESSES);
+
+ insize = offsetof(mlxcx_cmd_create_rq_in_t, mlxi_create_rq_context) +
+ offsetof(mlxcx_rq_ctx_t, mlrqc_wq) +
+ offsetof(mlxcx_workq_ctx_t, mlwqc_pas) +
+ sizeof (uint64_t) * npages;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_CREATED;
+ mlwq->mlwq_num = from_be24(out.mlxo_create_rq_rqn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_start_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_modify_rq_in_t in;
+ mlxcx_cmd_modify_rq_out_t out;
+ boolean_t ret;
+ ddi_fm_error_t err;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ /*
+ * Before starting the queue, we have to be sure that it is
+ * empty and the doorbell and counters are set to 0.
+ */
+ ASSERT(mutex_owned(&mlwq->mlwq_cq->mlcq_mtx));
+ ASSERT(list_is_empty(&mlwq->mlwq_cq->mlcq_buffers));
+ ASSERT(list_is_empty(&mlwq->mlwq_cq->mlcq_buffers_b));
+
+ mlwq->mlwq_doorbell->mlwqd_recv_counter = to_be16(0);
+ MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK)
+ return (B_FALSE);
+ mlwq->mlwq_pc = 0;
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_modify_rq_head,
+ MLXCX_OP_MODIFY_RQ, 0);
+
+ in.mlxi_modify_rq_rqn = to_be24(mlwq->mlwq_num);
+
+ /* From state */
+ set_bits8(&in.mlxi_modify_rq_state, MLXCX_CMD_MODIFY_RQ_STATE,
+ MLXCX_RQ_STATE_RST);
+ /* To state */
+ set_bits32(&in.mlxi_modify_rq_context.mlrqc_flags, MLXCX_RQ_STATE,
+ MLXCX_RQ_STATE_RDY);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_STARTED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_stop_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_modify_rq_in_t in;
+ mlxcx_cmd_modify_rq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_modify_rq_head,
+ MLXCX_OP_MODIFY_RQ, 0);
+
+ in.mlxi_modify_rq_rqn = to_be24(mlwq->mlwq_num);
+
+ /* From state */
+ set_bits8(&in.mlxi_modify_rq_state, MLXCX_CMD_MODIFY_RQ_STATE,
+ MLXCX_RQ_STATE_RDY);
+ /* To state */
+ set_bits32(&in.mlxi_modify_rq_context.mlrqc_flags, MLXCX_RQ_STATE,
+ MLXCX_RQ_STATE_RST);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state &= ~MLXCX_WQ_STARTED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_rq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_rq_in_t in;
+ mlxcx_cmd_destroy_rq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_rq_head,
+ MLXCX_OP_DESTROY_RQ, 0);
+
+ in.mlxi_destroy_rq_rqn = to_be24(mlwq->mlwq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_tir(mlxcx_t *mlxp, mlxcx_tir_t *mltir)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_tir_in_t in;
+ mlxcx_cmd_create_tir_out_t out;
+ mlxcx_tir_ctx_t *ctx;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY0(mltir->mltir_state & MLXCX_TIR_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_tir_head,
+ MLXCX_OP_CREATE_TIR, 0);
+
+ ctx = &in.mlxi_create_tir_context;
+ ctx->mltirc_transport_domain = to_be24(mltir->mltir_tdom->mltd_num);
+ set_bits8(&ctx->mltirc_disp_type, MLXCX_TIR_CTX_DISP_TYPE,
+ mltir->mltir_type);
+ switch (mltir->mltir_type) {
+ case MLXCX_TIR_INDIRECT:
+ VERIFY(mltir->mltir_rqtable != NULL);
+ VERIFY(mltir->mltir_rqtable->mlrqt_state & MLXCX_RQT_CREATED);
+ ctx->mltirc_indirect_table =
+ to_be24(mltir->mltir_rqtable->mlrqt_num);
+ set_bits8(&ctx->mltirc_hash_lb, MLXCX_TIR_RX_HASH_FN,
+ mltir->mltir_hash_fn);
+ bcopy(mltir->mltir_toeplitz_key,
+ ctx->mltirc_rx_hash_toeplitz_key,
+ sizeof (ctx->mltirc_rx_hash_toeplitz_key));
+ set_bits32(&ctx->mltirc_rx_hash_fields_outer,
+ MLXCX_RX_HASH_L3_TYPE, mltir->mltir_l3_type);
+ set_bits32(&ctx->mltirc_rx_hash_fields_outer,
+ MLXCX_RX_HASH_L4_TYPE, mltir->mltir_l4_type);
+ set_bits32(&ctx->mltirc_rx_hash_fields_outer,
+ MLXCX_RX_HASH_FIELDS, mltir->mltir_hash_fields);
+ break;
+ case MLXCX_TIR_DIRECT:
+ VERIFY(mltir->mltir_rq != NULL);
+ VERIFY(mltir->mltir_rq->mlwq_state & MLXCX_WQ_CREATED);
+ ctx->mltirc_inline_rqn = to_be24(mltir->mltir_rq->mlwq_num);
+ break;
+ default:
+ VERIFY(0);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltir->mltir_state |= MLXCX_TIR_CREATED;
+ mltir->mltir_num = from_be24(out.mlxo_create_tir_tirn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_tir(mlxcx_t *mlxp, mlxcx_tir_t *mltir)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_tir_in_t in;
+ mlxcx_cmd_destroy_tir_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY(mltir->mltir_state & MLXCX_TIR_CREATED);
+ VERIFY0(mltir->mltir_state & MLXCX_TIR_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_tir_head,
+ MLXCX_OP_DESTROY_TIR, 0);
+
+ in.mlxi_destroy_tir_tirn = to_be24(mltir->mltir_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltir->mltir_state |= MLXCX_TIR_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_tis(mlxcx_t *mlxp, mlxcx_tis_t *mltis)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_tis_in_t in;
+ mlxcx_cmd_create_tis_out_t out;
+ mlxcx_tis_ctx_t *ctx;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY0(mltis->mltis_state & MLXCX_TIS_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_tis_head,
+ MLXCX_OP_CREATE_TIS, 0);
+
+ ctx = &in.mlxi_create_tis_context;
+ ctx->mltisc_transport_domain = to_be24(mltis->mltis_tdom->mltd_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltis->mltis_state |= MLXCX_TIS_CREATED;
+ mltis->mltis_num = from_be24(out.mlxo_create_tis_tisn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_tis(mlxcx_t *mlxp, mlxcx_tis_t *mltis)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_tis_in_t in;
+ mlxcx_cmd_destroy_tis_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY(mltis->mltis_state & MLXCX_TIR_CREATED);
+ VERIFY0(mltis->mltis_state & MLXCX_TIR_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_tis_head,
+ MLXCX_OP_DESTROY_TIS, 0);
+
+ in.mlxi_destroy_tis_tisn = to_be24(mltis->mltis_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mltis->mltis_state |= MLXCX_TIS_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_flow_table(mlxcx_t *mlxp, mlxcx_flow_table_t *mlft)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_flow_table_in_t in;
+ mlxcx_cmd_create_flow_table_out_t out;
+ mlxcx_flow_table_ctx_t *ctx;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_flow_table_head,
+ MLXCX_OP_CREATE_FLOW_TABLE, 0);
+
+ in.mlxi_create_flow_table_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_create_flow_table_table_type = mlft->mlft_type;
+ ctx = &in.mlxi_create_flow_table_context;
+ ctx->mlftc_log_size = mlft->mlft_entshift;
+ ctx->mlftc_level = mlft->mlft_level;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlft->mlft_num = from_be24(out.mlxo_create_flow_table_table_id);
+ mlft->mlft_state |= MLXCX_FLOW_TABLE_CREATED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_flow_table(mlxcx_t *mlxp, mlxcx_flow_table_t *mlft)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_flow_table_in_t in;
+ mlxcx_cmd_destroy_flow_table_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_flow_table_head,
+ MLXCX_OP_DESTROY_FLOW_TABLE, 0);
+
+ in.mlxi_destroy_flow_table_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_destroy_flow_table_table_type = mlft->mlft_type;
+ in.mlxi_destroy_flow_table_table_id = to_be24(mlft->mlft_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlft->mlft_state |= MLXCX_FLOW_TABLE_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_flow_table_root(mlxcx_t *mlxp, mlxcx_flow_table_t *mlft)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_set_flow_table_root_in_t in;
+ mlxcx_cmd_set_flow_table_root_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_set_flow_table_root_head,
+ MLXCX_OP_SET_FLOW_TABLE_ROOT, 0);
+
+ in.mlxi_set_flow_table_root_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_set_flow_table_root_table_type = mlft->mlft_type;
+ in.mlxi_set_flow_table_root_table_id = to_be24(mlft->mlft_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlft->mlft_state |= MLXCX_FLOW_TABLE_ROOT;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_flow_group(mlxcx_t *mlxp, mlxcx_flow_group_t *mlfg)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_flow_group_in_t in;
+ mlxcx_cmd_create_flow_group_out_t out;
+ boolean_t ret;
+ const mlxcx_flow_table_t *mlft;
+ mlxcx_flow_header_match_t *hdrs;
+ mlxcx_flow_params_match_t *params;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlft = mlfg->mlfg_table;
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+ VERIFY0(mlfg->mlfg_state & MLXCX_FLOW_GROUP_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_flow_group_head,
+ MLXCX_OP_CREATE_FLOW_GROUP, 0);
+
+ in.mlxi_create_flow_group_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_create_flow_group_table_type = mlft->mlft_type;
+ in.mlxi_create_flow_group_table_id = to_be24(mlft->mlft_num);
+ in.mlxi_create_flow_group_start_flow_index =
+ to_be32(mlfg->mlfg_start_idx);
+ in.mlxi_create_flow_group_end_flow_index =
+ to_be32(mlfg->mlfg_start_idx + (mlfg->mlfg_size - 1));
+
+ hdrs = &in.mlxi_create_flow_group_match_criteria.mlfm_outer_headers;
+ params = &in.mlxi_create_flow_group_match_criteria.mlfm_misc_parameters;
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SMAC) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS;
+ (void) memset(&hdrs->mlfh_smac, 0xff, sizeof (hdrs->mlfh_smac));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_DMAC) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS;
+ (void) memset(&hdrs->mlfh_dmac, 0xff, sizeof (hdrs->mlfh_dmac));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VLAN) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS;
+ set_bit24(&hdrs->mlfh_tcp_ip_flags, MLXCX_FLOW_HDR_CVLAN_TAG);
+ set_bit24(&hdrs->mlfh_tcp_ip_flags, MLXCX_FLOW_HDR_SVLAN_TAG);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VID) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VLAN);
+ set_bits16(&hdrs->mlfh_first_vid_flags,
+ MLXCX_FLOW_HDR_FIRST_VID, UINT16_MAX);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS;
+ set_bits24(&hdrs->mlfh_tcp_ip_flags, MLXCX_FLOW_HDR_IP_VERSION,
+ UINT32_MAX);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SRCIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ (void) memset(&hdrs->mlfh_src_ip, 0xff,
+ sizeof (hdrs->mlfh_src_ip));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_DSTIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ (void) memset(&hdrs->mlfh_src_ip, 0xff,
+ sizeof (hdrs->mlfh_dst_ip));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_PROTO) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS;
+ hdrs->mlfh_ip_protocol = UINT8_MAX;
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SRCIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ (void) memset(&hdrs->mlfh_src_ip, 0xff,
+ sizeof (hdrs->mlfh_src_ip));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_DSTIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ (void) memset(&hdrs->mlfh_src_ip, 0xff,
+ sizeof (hdrs->mlfh_dst_ip));
+ }
+
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SQN) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_MISC_PARAMS;
+ params->mlfp_source_sqn = to_be24(UINT32_MAX);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VXLAN) {
+ in.mlxi_create_flow_group_match_criteria_en |=
+ MLXCX_FLOW_GROUP_MATCH_MISC_PARAMS;
+ params->mlfp_vxlan_vni = to_be24(UINT32_MAX);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlfg->mlfg_state |= MLXCX_FLOW_GROUP_CREATED;
+ mlfg->mlfg_num = from_be24(out.mlxo_create_flow_group_group_id);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_flow_group(mlxcx_t *mlxp, mlxcx_flow_group_t *mlfg)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_flow_group_in_t in;
+ mlxcx_cmd_destroy_flow_group_out_t out;
+ boolean_t ret;
+ const mlxcx_flow_table_t *mlft;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlft = mlfg->mlfg_table;
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+ VERIFY(mlfg->mlfg_state & MLXCX_FLOW_GROUP_CREATED);
+ VERIFY0(mlfg->mlfg_state & MLXCX_FLOW_GROUP_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_flow_group_head,
+ MLXCX_OP_DESTROY_FLOW_GROUP, 0);
+
+ in.mlxi_destroy_flow_group_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_destroy_flow_group_table_type = mlft->mlft_type;
+ in.mlxi_destroy_flow_group_table_id = to_be24(mlft->mlft_num);
+ in.mlxi_destroy_flow_group_group_id = to_be32(mlfg->mlfg_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlfg->mlfg_state |= MLXCX_FLOW_GROUP_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_flow_table_entry(mlxcx_t *mlxp, mlxcx_flow_entry_t *mlfe)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_set_flow_table_entry_in_t in;
+ mlxcx_cmd_set_flow_table_entry_out_t out;
+ boolean_t ret;
+ size_t insize;
+ mlxcx_flow_entry_ctx_t *ctx;
+ const mlxcx_flow_table_t *mlft;
+ mlxcx_flow_group_t *mlfg;
+ mlxcx_flow_dest_t *d;
+ uint_t i;
+ mlxcx_flow_header_match_t *hdrs;
+ mlxcx_flow_params_match_t *params;
+ mlxcx_cmd_set_flow_table_entry_opmod_t opmod;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlft = mlfe->mlfe_table;
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+
+ mlfg = mlfe->mlfe_group;
+ VERIFY(mlfg->mlfg_state & MLXCX_FLOW_GROUP_CREATED);
+ VERIFY0(mlfg->mlfg_state & MLXCX_FLOW_GROUP_DESTROYED);
+
+ opmod = MLXCX_CMD_FLOW_ENTRY_SET_NEW;
+ if (mlfe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ ASSERT(mlfe->mlfe_state & MLXCX_FLOW_ENTRY_DIRTY);
+ opmod = MLXCX_CMD_FLOW_ENTRY_MODIFY;
+ }
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_set_flow_table_entry_head,
+ MLXCX_OP_SET_FLOW_TABLE_ENTRY, opmod);
+
+ in.mlxi_set_flow_table_entry_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_set_flow_table_entry_table_type = mlft->mlft_type;
+ in.mlxi_set_flow_table_entry_table_id = to_be24(mlft->mlft_num);
+ in.mlxi_set_flow_table_entry_flow_index = to_be32(mlfe->mlfe_index);
+
+ if (mlfe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ set_bit8(&in.mlxi_set_flow_table_entry_modify_bitmask,
+ MLXCX_CMD_FLOW_ENTRY_SET_ACTION);
+ set_bit8(&in.mlxi_set_flow_table_entry_modify_bitmask,
+ MLXCX_CMD_FLOW_ENTRY_SET_DESTINATION);
+ }
+
+ ctx = &in.mlxi_set_flow_table_entry_context;
+ ctx->mlfec_group_id = to_be32(mlfg->mlfg_num);
+
+ insize = offsetof(mlxcx_cmd_set_flow_table_entry_in_t,
+ mlxi_set_flow_table_entry_context) +
+ offsetof(mlxcx_flow_entry_ctx_t, mlfec_destination);
+
+ ctx->mlfec_action = to_be16(mlfe->mlfe_action);
+
+ switch (mlfe->mlfe_action) {
+ case MLXCX_FLOW_ACTION_ALLOW:
+ case MLXCX_FLOW_ACTION_DROP:
+ break;
+ case MLXCX_FLOW_ACTION_FORWARD:
+ ASSERT3U(mlfe->mlfe_ndest, <=, MLXCX_FLOW_MAX_DESTINATIONS);
+ ASSERT3U(mlfe->mlfe_ndest, <=,
+ mlxp->mlx_caps->mlc_max_rx_fe_dest);
+ ctx->mlfec_destination_list_size = to_be24(mlfe->mlfe_ndest);
+ for (i = 0; i < mlfe->mlfe_ndest; ++i) {
+ insize += sizeof (mlxcx_flow_dest_t);
+ d = &ctx->mlfec_destination[i];
+ if (mlfe->mlfe_dest[i].mlfed_tir != NULL) {
+ d->mlfd_destination_type = MLXCX_FLOW_DEST_TIR;
+ d->mlfd_destination_id = to_be24(
+ mlfe->mlfe_dest[i].mlfed_tir->mltir_num);
+ } else if (mlfe->mlfe_dest[i].mlfed_flow != NULL) {
+ d->mlfd_destination_type =
+ MLXCX_FLOW_DEST_FLOW_TABLE;
+ d->mlfd_destination_id = to_be24(
+ mlfe->mlfe_dest[i].mlfed_flow->mlft_num);
+ } else {
+ /* Invalid flow entry destination */
+ VERIFY(0);
+ }
+ }
+ break;
+ case MLXCX_FLOW_ACTION_COUNT:
+ /* We don't support count actions yet. */
+ VERIFY(0);
+ break;
+ case MLXCX_FLOW_ACTION_ENCAP:
+ case MLXCX_FLOW_ACTION_DECAP:
+ /* We don't support encap/decap actions yet. */
+ VERIFY(0);
+ break;
+ }
+
+ hdrs = &ctx->mlfec_match_value.mlfm_outer_headers;
+ params = &ctx->mlfec_match_value.mlfm_misc_parameters;
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SMAC) {
+ bcopy(mlfe->mlfe_smac, hdrs->mlfh_smac,
+ sizeof (hdrs->mlfh_smac));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_DMAC) {
+ bcopy(mlfe->mlfe_dmac, hdrs->mlfh_dmac,
+ sizeof (hdrs->mlfh_dmac));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VLAN) {
+ switch (mlfe->mlfe_vlan_type) {
+ case MLXCX_VLAN_TYPE_CVLAN:
+ set_bit24(&hdrs->mlfh_tcp_ip_flags,
+ MLXCX_FLOW_HDR_CVLAN_TAG);
+ break;
+ case MLXCX_VLAN_TYPE_SVLAN:
+ set_bit24(&hdrs->mlfh_tcp_ip_flags,
+ MLXCX_FLOW_HDR_SVLAN_TAG);
+ break;
+ default:
+ break;
+ }
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VID) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VLAN);
+ set_bits16(&hdrs->mlfh_first_vid_flags,
+ MLXCX_FLOW_HDR_FIRST_VID, mlfe->mlfe_vid);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER) {
+ set_bits24(&hdrs->mlfh_tcp_ip_flags, MLXCX_FLOW_HDR_IP_VERSION,
+ mlfe->mlfe_ip_version);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SRCIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ bcopy(mlfe->mlfe_srcip, hdrs->mlfh_src_ip,
+ sizeof (hdrs->mlfh_src_ip));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_DSTIP) {
+ ASSERT(mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_VER);
+ bcopy(mlfe->mlfe_dstip, hdrs->mlfh_src_ip,
+ sizeof (hdrs->mlfh_dst_ip));
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_IP_PROTO) {
+ hdrs->mlfh_ip_protocol = mlfe->mlfe_ip_proto;
+ }
+
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_SQN) {
+ params->mlfp_source_sqn = to_be24(mlfe->mlfe_sqn);
+ }
+ if (mlfg->mlfg_mask & MLXCX_FLOW_MATCH_VXLAN) {
+ params->mlfp_vxlan_vni = to_be24(mlfe->mlfe_vxlan_vni);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlfe->mlfe_state |= MLXCX_FLOW_ENTRY_CREATED;
+ mlfe->mlfe_state &= ~MLXCX_FLOW_ENTRY_DIRTY;
+ mlfg->mlfg_state |= MLXCX_FLOW_GROUP_BUSY;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_delete_flow_table_entry(mlxcx_t *mlxp, mlxcx_flow_entry_t *mlfe)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_delete_flow_table_entry_in_t in;
+ mlxcx_cmd_delete_flow_table_entry_out_t out;
+ boolean_t ret;
+ const mlxcx_flow_table_t *mlft;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlft = mlfe->mlfe_table;
+ ASSERT(mutex_owned(&mlft->mlft_mtx));
+ VERIFY(mlft->mlft_state & MLXCX_FLOW_TABLE_CREATED);
+ VERIFY0(mlft->mlft_state & MLXCX_FLOW_TABLE_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_delete_flow_table_entry_head,
+ MLXCX_OP_DELETE_FLOW_TABLE_ENTRY, 0);
+
+ in.mlxi_delete_flow_table_entry_vport_number =
+ to_be16(mlft->mlft_port->mlp_num);
+ in.mlxi_delete_flow_table_entry_table_type = mlft->mlft_type;
+ in.mlxi_delete_flow_table_entry_table_id = to_be24(mlft->mlft_num);
+ in.mlxi_delete_flow_table_entry_flow_index = to_be32(mlfe->mlfe_index);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ /*
+ * Note that flow entries have a different lifecycle to most
+ * other things we create -- we have to be able to re-use them
+ * after they have been deleted, since they exist at a fixed
+ * position in their flow table.
+ *
+ * So we clear the CREATED bit here for them to let us call
+ * create_flow_table_entry() on the same entry again later.
+ */
+ mlfe->mlfe_state &= ~MLXCX_FLOW_ENTRY_CREATED;
+ mlfe->mlfe_state |= MLXCX_FLOW_ENTRY_DELETED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_sq_in_t in;
+ mlxcx_cmd_create_sq_out_t out;
+ boolean_t ret;
+ mlxcx_sq_ctx_t *ctx;
+ size_t rem, insize;
+ const ddi_dma_cookie_t *c;
+ uint64_t pa, npages;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY3U(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_SENDQ);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_sq_head,
+ MLXCX_OP_CREATE_SQ, 0);
+
+ ctx = &in.mlxi_create_sq_context;
+
+ set_bit32(&ctx->mlsqc_flags, MLXCX_SQ_FLAGS_RLKEY);
+ set_bit32(&ctx->mlsqc_flags, MLXCX_SQ_FLAGS_FLUSH_IN_ERROR);
+ set_bits32(&ctx->mlsqc_flags, MLXCX_SQ_MIN_WQE_INLINE,
+ mlwq->mlwq_inline_mode);
+ ctx->mlsqc_cqn = to_be24(mlwq->mlwq_cq->mlcq_num);
+
+ VERIFY(mlwq->mlwq_tis != NULL);
+ ctx->mlsqc_tis_lst_sz = to_be16(1);
+ ctx->mlsqc_tis_num = to_be24(mlwq->mlwq_tis->mltis_num);
+
+ set_bits32(&ctx->mlsqc_wq.mlwqc_flags, MLXCX_WORKQ_CTX_TYPE,
+ MLXCX_WORKQ_TYPE_CYCLIC);
+ ctx->mlsqc_wq.mlwqc_pd = to_be24(mlwq->mlwq_pd->mlpd_num);
+ ctx->mlsqc_wq.mlwqc_uar_page = to_be24(mlwq->mlwq_uar->mlu_num);
+ ctx->mlsqc_wq.mlwqc_log_wq_sz = mlwq->mlwq_entshift;
+ ctx->mlsqc_wq.mlwqc_log_wq_stride = MLXCX_SENDQ_STRIDE_SHIFT;
+
+ c = mlxcx_dma_cookie_one(&mlwq->mlwq_doorbell_dma);
+ ctx->mlsqc_wq.mlwqc_dbr_addr = to_be64(c->dmac_laddress);
+ ASSERT3U(c->dmac_size, >=, sizeof (mlxcx_workq_doorbell_t));
+
+ npages = 0;
+ c = NULL;
+ while ((c = mlxcx_dma_cookie_iter(&mlwq->mlwq_dma, c)) != NULL) {
+ pa = c->dmac_laddress;
+ rem = c->dmac_size;
+ while (rem > 0) {
+ ASSERT3U(pa & 0xfff, ==, 0);
+ ASSERT3U(rem, >=, MLXCX_HW_PAGE_SIZE);
+ ctx->mlsqc_wq.mlwqc_pas[npages++] = to_be64(pa);
+ rem -= MLXCX_HW_PAGE_SIZE;
+ pa += MLXCX_HW_PAGE_SIZE;
+ }
+ }
+ ASSERT3U(npages, <=, MLXCX_WORKQ_CTX_MAX_ADDRESSES);
+
+ insize = offsetof(mlxcx_cmd_create_sq_in_t, mlxi_create_sq_context) +
+ offsetof(mlxcx_sq_ctx_t, mlsqc_wq) +
+ offsetof(mlxcx_workq_ctx_t, mlwqc_pas) +
+ sizeof (uint64_t) * npages;
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, insize, &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_CREATED;
+ mlwq->mlwq_num = from_be24(out.mlxo_create_sq_sqn);
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_start_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_modify_sq_in_t in;
+ mlxcx_cmd_modify_sq_out_t out;
+ boolean_t ret;
+ ddi_fm_error_t err;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ ASSERT(mlwq->mlwq_cq != NULL);
+
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ /*
+ * Before starting the queue, we have to be sure that it is
+ * empty and the doorbell and counters are set to 0.
+ */
+ ASSERT(mutex_owned(&mlwq->mlwq_cq->mlcq_mtx));
+ ASSERT(list_is_empty(&mlwq->mlwq_cq->mlcq_buffers));
+ ASSERT(list_is_empty(&mlwq->mlwq_cq->mlcq_buffers_b));
+
+ mlwq->mlwq_doorbell->mlwqd_recv_counter = to_be16(0);
+ MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK)
+ return (B_FALSE);
+ mlwq->mlwq_pc = 0;
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_modify_sq_head,
+ MLXCX_OP_MODIFY_SQ, 0);
+
+ in.mlxi_modify_sq_sqn = to_be24(mlwq->mlwq_num);
+
+ /* From state */
+ set_bits8(&in.mlxi_modify_sq_state, MLXCX_CMD_MODIFY_SQ_STATE,
+ MLXCX_SQ_STATE_RST);
+ /* To state */
+ set_bits32(&in.mlxi_modify_sq_context.mlsqc_flags, MLXCX_SQ_STATE,
+ MLXCX_SQ_STATE_RDY);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_STARTED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_stop_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_modify_sq_in_t in;
+ mlxcx_cmd_modify_sq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_modify_sq_head,
+ MLXCX_OP_MODIFY_SQ, 0);
+
+ in.mlxi_modify_sq_sqn = to_be24(mlwq->mlwq_num);
+
+ /* From state */
+ set_bits8(&in.mlxi_modify_sq_state, MLXCX_CMD_MODIFY_SQ_STATE,
+ MLXCX_SQ_STATE_RDY);
+ /* To state */
+ set_bits32(&in.mlxi_modify_sq_context.mlsqc_flags, MLXCX_SQ_STATE,
+ MLXCX_SQ_STATE_RST);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state &= ~MLXCX_WQ_STARTED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_sq(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_sq_in_t in;
+ mlxcx_cmd_destroy_sq_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_CREATED);
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_STARTED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_sq_head,
+ MLXCX_OP_DESTROY_SQ, 0);
+
+ in.mlxi_destroy_sq_sqn = to_be24(mlwq->mlwq_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlwq->mlwq_state |= MLXCX_WQ_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_create_rqt(mlxcx_t *mlxp, mlxcx_rqtable_t *mlrqt)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_create_rqt_in_t in;
+ mlxcx_cmd_create_rqt_out_t out;
+ mlxcx_rqtable_ctx_t *ctx;
+ boolean_t ret;
+ uint_t i;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY0(mlrqt->mlrqt_state & MLXCX_RQT_CREATED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_create_rqt_head,
+ MLXCX_OP_CREATE_RQT, 0);
+
+ ctx = &in.mlxi_create_rqt_context;
+ ASSERT3U(mlrqt->mlrqt_max, <=, MLXCX_RQT_MAX_RQ_REFS);
+ ASSERT3U(mlrqt->mlrqt_max, <=, mlxp->mlx_caps->mlc_max_rqt_size);
+ ctx->mlrqtc_max_size = to_be16(mlrqt->mlrqt_max);
+ ctx->mlrqtc_actual_size = to_be16(mlrqt->mlrqt_used);
+ for (i = 0; i < mlrqt->mlrqt_used; ++i) {
+ ctx->mlrqtc_rqref[i].mlrqtr_rqn = to_be24(
+ mlrqt->mlrqt_rq[i]->mlwq_num);
+ }
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlrqt->mlrqt_num = from_be24(out.mlxo_create_rqt_rqtn);
+ mlrqt->mlrqt_state |= MLXCX_RQT_CREATED;
+ mlrqt->mlrqt_state &= ~MLXCX_RQT_DIRTY;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_destroy_rqt(mlxcx_t *mlxp, mlxcx_rqtable_t *mlrqt)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_destroy_rqt_in_t in;
+ mlxcx_cmd_destroy_rqt_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ VERIFY(mlrqt->mlrqt_state & MLXCX_RQT_CREATED);
+ VERIFY0(mlrqt->mlrqt_state & MLXCX_RQT_DESTROYED);
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_destroy_rqt_head,
+ MLXCX_OP_DESTROY_RQT, 0);
+
+ in.mlxi_destroy_rqt_rqtn = to_be24(mlrqt->mlrqt_num);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ if (ret) {
+ mlrqt->mlrqt_state |= MLXCX_RQT_DESTROYED;
+ }
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+boolean_t
+mlxcx_cmd_set_int_mod(mlxcx_t *mlxp, uint_t intr, uint_t min_delay)
+{
+ mlxcx_cmd_t cmd;
+ mlxcx_cmd_config_int_mod_in_t in;
+ mlxcx_cmd_config_int_mod_out_t out;
+ boolean_t ret;
+
+ bzero(&in, sizeof (in));
+ bzero(&out, sizeof (out));
+
+ mlxcx_cmd_init(mlxp, &cmd);
+ mlxcx_cmd_in_header_init(&cmd, &in.mlxi_config_int_mod_head,
+ MLXCX_OP_CONFIG_INT_MODERATION, MLXCX_CMD_CONFIG_INT_MOD_WRITE);
+
+ in.mlxi_config_int_mod_int_vector = to_be16(intr);
+ in.mlxi_config_int_mod_min_delay = to_be16(min_delay);
+
+ if (!mlxcx_cmd_send(mlxp, &cmd, &in, sizeof (in), &out, sizeof (out))) {
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (B_FALSE);
+ }
+ mlxcx_cmd_wait(&cmd);
+
+ ret = mlxcx_cmd_evaluate(mlxp, &cmd);
+ mlxcx_cmd_fini(mlxp, &cmd);
+ return (ret);
+}
+
+/*
+ * CTASSERTs here are for the structs in mlxcx_reg.h, to check they match
+ * against offsets from the PRM.
+ *
+ * They're not in the header file, to avoid them being used by multiple .c
+ * files.
+ */
+
+CTASSERT(offsetof(mlxcx_eventq_ent_t, mleqe_unknown_data) == 0x20);
+CTASSERT(offsetof(mlxcx_eventq_ent_t, mleqe_signature) == 0x3c + 2);
+CTASSERT(sizeof (mlxcx_eventq_ent_t) == 64);
+
+CTASSERT(offsetof(mlxcx_completionq_error_ent_t, mlcqee_byte_cnt) == 0x2C);
+CTASSERT(offsetof(mlxcx_completionq_error_ent_t, mlcqee_wqe_opcode) == 0x38);
+
+CTASSERT(sizeof (mlxcx_completionq_error_ent_t) ==
+ sizeof (mlxcx_completionq_ent_t));
+CTASSERT(sizeof (mlxcx_wqe_control_seg_t) == (1 << 4));
+
+CTASSERT(offsetof(mlxcx_wqe_eth_seg_t, mles_inline_headers) == 0x0e);
+CTASSERT(sizeof (mlxcx_wqe_eth_seg_t) == (1 << 5));
+
+CTASSERT(sizeof (mlxcx_wqe_data_seg_t) == (1 << 4));
+
+CTASSERT(sizeof (mlxcx_sendq_ent_t) == (1 << MLXCX_SENDQ_STRIDE_SHIFT));
+
+CTASSERT(sizeof (mlxcx_sendq_bf_t) == (1 << MLXCX_SENDQ_STRIDE_SHIFT));
+
+CTASSERT(sizeof (mlxcx_sendq_extra_ent_t) == (1 << MLXCX_SENDQ_STRIDE_SHIFT));
+
+CTASSERT(sizeof (mlxcx_recvq_ent_t) == (1 << MLXCX_RECVQ_STRIDE_SHIFT));
+
+CTASSERT(offsetof(mlxcx_workq_ctx_t, mlwqc_dbr_addr) == 0x10);
+CTASSERT(offsetof(mlxcx_workq_ctx_t, mlwqc_pas) == 0xc0);
+
+CTASSERT(offsetof(mlxcx_rq_ctx_t, mlrqc_cqn) == 0x09);
+CTASSERT(offsetof(mlxcx_rq_ctx_t, mlrqc_wq) == 0x30);
+
+CTASSERT(offsetof(mlxcx_sq_ctx_t, mlsqc_cqn) == 0x09);
+CTASSERT(offsetof(mlxcx_sq_ctx_t, mlsqc_tis_lst_sz) == 0x20);
+CTASSERT(offsetof(mlxcx_sq_ctx_t, mlsqc_tis_num) == 0x2d);
+CTASSERT(offsetof(mlxcx_sq_ctx_t, mlsqc_wq) == 0x30);
+
+CTASSERT(sizeof (mlxcx_tis_ctx_t) == 0xa0);
+CTASSERT(offsetof(mlxcx_tis_ctx_t, mltisc_transport_domain) == 0x25);
+
+CTASSERT(offsetof(mlxcx_rqtable_ctx_t, mlrqtc_max_size) == 0x16);
+CTASSERT(offsetof(mlxcx_rqtable_ctx_t, mlrqtc_rqref) == 0xF0);
+
+CTASSERT(offsetof(mlxcx_cmd_create_eq_in_t, mlxi_create_eq_event_bitmask) ==
+ 0x58);
+CTASSERT(offsetof(mlxcx_cmd_create_eq_in_t, mlxi_create_eq_pas) == 0x110);
+CTASSERT(offsetof(mlxcx_cmd_create_eq_in_t, mlxi_create_eq_context) == 0x10);
+
+CTASSERT(offsetof(mlxcx_cmd_create_tir_in_t, mlxi_create_tir_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_create_tis_in_t, mlxi_create_tis_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_query_special_ctxs_out_t,
+ mlxo_query_special_ctxs_resd_lkey) == 0x0c);
+
+CTASSERT(offsetof(mlxcx_cmd_query_cq_out_t, mlxo_query_cq_context) == 0x10);
+CTASSERT(offsetof(mlxcx_cmd_query_cq_out_t, mlxo_query_cq_pas) == 0x110);
+
+CTASSERT(offsetof(mlxcx_cmd_query_rq_out_t, mlxo_query_rq_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_create_sq_in_t, mlxi_create_sq_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_modify_sq_in_t, mlxi_modify_sq_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_query_sq_out_t, mlxo_query_sq_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_cmd_create_rqt_in_t, mlxi_create_rqt_context) == 0x20);
+
+CTASSERT(offsetof(mlxcx_reg_pmtu_t, mlrd_pmtu_oper_mtu) == 0x0C);
+
+CTASSERT(sizeof (mlxcx_reg_ptys_t) == 64);
+CTASSERT(offsetof(mlxcx_reg_ptys_t, mlrd_ptys_proto_cap) == 0x0c);
+CTASSERT(offsetof(mlxcx_reg_ptys_t, mlrd_ptys_proto_admin) == 0x18);
+CTASSERT(offsetof(mlxcx_reg_ptys_t, mlrd_ptys_proto_partner_advert) == 0x30);
+
+CTASSERT(offsetof(mlxcx_reg_mcia_t, mlrd_mcia_data) == 0x10);
+
+CTASSERT(offsetof(mlxcx_ppcnt_ieee_802_3_t,
+ mlppc_ieee_802_3_in_range_len_err) == 0x50);
+CTASSERT(offsetof(mlxcx_ppcnt_ieee_802_3_t,
+ mlppc_ieee_802_3_pause_tx) == 0x90);
+
+CTASSERT(sizeof (mlxcx_reg_ppcnt_t) == 256);
+CTASSERT(offsetof(mlxcx_reg_ppcnt_t, mlrd_ppcnt_data) == 0x08);
+
+CTASSERT(offsetof(mlxcx_cmd_access_register_in_t,
+ mlxi_access_register_argument) == 0x0C);
+CTASSERT(offsetof(mlxcx_cmd_access_register_in_t,
+ mlxi_access_register_data) == 0x10);
+
+CTASSERT(offsetof(mlxcx_cmd_access_register_out_t,
+ mlxo_access_register_data) == 0x10);
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_dma.c b/usr/src/uts/common/io/mlxcx/mlxcx_dma.c
new file mode 100644
index 0000000000..79b9bb3746
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_dma.c
@@ -0,0 +1,460 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+/*
+ * DMA allocation and tear down routines.
+ */
+
+#include <mlxcx.h>
+
+void
+mlxcx_dma_acc_attr(mlxcx_t *mlxp, ddi_device_acc_attr_t *accp)
+{
+ bzero(accp, sizeof (*accp));
+ accp->devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ accp->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
+ accp->devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+ if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
+ accp->devacc_attr_access = DDI_FLAGERR_ACC;
+ } else {
+ accp->devacc_attr_access = DDI_DEFAULT_ACC;
+ }
+}
+
+void
+mlxcx_dma_page_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
+{
+ bzero(attrp, sizeof (*attrp));
+ attrp->dma_attr_version = DMA_ATTR_V0;
+
+ /*
+ * This is a 64-bit PCIe device. We can use the entire address space.
+ */
+ attrp->dma_attr_addr_lo = 0x0;
+ attrp->dma_attr_addr_hi = UINT64_MAX;
+
+ /*
+ * The count max indicates the total amount that can fit into one
+ * cookie. Because we're creating a single page for tracking purposes,
+ * this can be a page in size. The alignment and segment are related to
+ * this same requirement. The alignment needs to be page aligned and the
+ * segment is the boundary that this can't cross, aka a 4k page.
+ */
+ attrp->dma_attr_count_max = MLXCX_CMD_DMA_PAGE_SIZE - 1;
+ attrp->dma_attr_align = MLXCX_CMD_DMA_PAGE_SIZE;
+ attrp->dma_attr_seg = MLXCX_CMD_DMA_PAGE_SIZE - 1;
+
+ attrp->dma_attr_burstsizes = 0xfff;
+
+ /*
+ * The minimum and and maximum sizes that we can send. We cap this based
+ * on the use of this, which is a page size.
+ */
+ attrp->dma_attr_minxfer = 0x1;
+ attrp->dma_attr_maxxfer = MLXCX_CMD_DMA_PAGE_SIZE;
+
+ /*
+ * This is supposed to be used for static data structures, therefore we
+ * keep this just to a page.
+ */
+ attrp->dma_attr_sgllen = 1;
+
+ /*
+ * The granularity describe the addressing graularity. That is, the
+ * hardware can ask for chunks in this units of bytes.
+ */
+ attrp->dma_attr_granular = MLXCX_CMD_DMA_PAGE_SIZE;
+
+ if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
+ attrp->dma_attr_flags = DDI_DMA_FLAGERR;
+ } else {
+ attrp->dma_attr_flags = 0;
+ }
+}
+
+/*
+ * DMA attributes for queue memory (EQ, CQ, WQ etc)
+ *
+ * These have to allocate in units of whole pages, but can be multiple
+ * pages and don't have to be physically contiguous.
+ */
+void
+mlxcx_dma_queue_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
+{
+ bzero(attrp, sizeof (*attrp));
+ attrp->dma_attr_version = DMA_ATTR_V0;
+
+ /*
+ * This is a 64-bit PCIe device. We can use the entire address space.
+ */
+ attrp->dma_attr_addr_lo = 0x0;
+ attrp->dma_attr_addr_hi = UINT64_MAX;
+
+ attrp->dma_attr_count_max = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
+
+ attrp->dma_attr_align = MLXCX_QUEUE_DMA_PAGE_SIZE;
+
+ attrp->dma_attr_burstsizes = 0xfff;
+
+ /*
+ * The minimum and and maximum sizes that we can send. We cap this based
+ * on the use of this, which is a page size.
+ */
+ attrp->dma_attr_minxfer = MLXCX_QUEUE_DMA_PAGE_SIZE;
+ attrp->dma_attr_maxxfer = UINT32_MAX;
+
+ attrp->dma_attr_seg = UINT64_MAX;
+
+ attrp->dma_attr_granular = MLXCX_QUEUE_DMA_PAGE_SIZE;
+
+ /* But we can have more than one. */
+ attrp->dma_attr_sgllen = MLXCX_CREATE_QUEUE_MAX_PAGES;
+
+ if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
+ attrp->dma_attr_flags = DDI_DMA_FLAGERR;
+ } else {
+ attrp->dma_attr_flags = 0;
+ }
+}
+
+/*
+ * DMA attributes for packet buffers
+ */
+void
+mlxcx_dma_buf_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
+{
+ bzero(attrp, sizeof (*attrp));
+ attrp->dma_attr_version = DMA_ATTR_V0;
+
+ /*
+ * This is a 64-bit PCIe device. We can use the entire address space.
+ */
+ attrp->dma_attr_addr_lo = 0x0;
+ attrp->dma_attr_addr_hi = UINT64_MAX;
+
+ /*
+ * Each scatter pointer has a 32-bit length field.
+ */
+ attrp->dma_attr_count_max = UINT32_MAX;
+
+ /*
+ * The PRM gives us no alignment requirements for scatter pointers,
+ * but it implies that units < 16bytes are a bad idea.
+ */
+ attrp->dma_attr_align = 16;
+ attrp->dma_attr_granular = 1;
+
+ attrp->dma_attr_burstsizes = 0xfff;
+
+ attrp->dma_attr_minxfer = 1;
+ attrp->dma_attr_maxxfer = UINT64_MAX;
+
+ attrp->dma_attr_seg = UINT64_MAX;
+
+ /*
+ * We choose how many scatter pointers we're allowed per packet when
+ * we set the recv queue stride. This macro is from mlxcx_reg.h where
+ * we fix that for all of our receive queues.
+ */
+ attrp->dma_attr_sgllen = MLXCX_RECVQ_MAX_PTRS;
+
+ if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
+ attrp->dma_attr_flags = DDI_DMA_FLAGERR;
+ } else {
+ attrp->dma_attr_flags = 0;
+ }
+}
+
+/*
+ * DMA attributes for queue doorbells
+ */
+void
+mlxcx_dma_qdbell_attr(mlxcx_t *mlxp, ddi_dma_attr_t *attrp)
+{
+ bzero(attrp, sizeof (*attrp));
+ attrp->dma_attr_version = DMA_ATTR_V0;
+
+ /*
+ * This is a 64-bit PCIe device. We can use the entire address space.
+ */
+ attrp->dma_attr_addr_lo = 0x0;
+ attrp->dma_attr_addr_hi = UINT64_MAX;
+
+ /*
+ * Queue doorbells are always exactly 16 bytes in length, but
+ * the ddi_dma functions don't like such small values of count_max.
+ *
+ * We tell some lies here.
+ */
+ attrp->dma_attr_count_max = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
+ attrp->dma_attr_align = 8;
+ attrp->dma_attr_burstsizes = 0x8;
+ attrp->dma_attr_minxfer = 1;
+ attrp->dma_attr_maxxfer = UINT16_MAX;
+ attrp->dma_attr_seg = MLXCX_QUEUE_DMA_PAGE_SIZE - 1;
+ attrp->dma_attr_granular = 1;
+ attrp->dma_attr_sgllen = 1;
+
+ if (DDI_FM_DMA_ERR_CAP(mlxp->mlx_fm_caps)) {
+ attrp->dma_attr_flags = DDI_DMA_FLAGERR;
+ } else {
+ attrp->dma_attr_flags = 0;
+ }
+}
+
+void
+mlxcx_dma_free(mlxcx_dma_buffer_t *mxdb)
+{
+ int ret;
+
+ if (mxdb->mxdb_flags & MLXCX_DMABUF_BOUND) {
+ VERIFY(mxdb->mxdb_dma_handle != NULL);
+ ret = ddi_dma_unbind_handle(mxdb->mxdb_dma_handle);
+ VERIFY3S(ret, ==, DDI_SUCCESS);
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_BOUND;
+ mxdb->mxdb_ncookies = 0;
+ }
+
+ if (mxdb->mxdb_flags & MLXCX_DMABUF_MEM_ALLOC) {
+ ddi_dma_mem_free(&mxdb->mxdb_acc_handle);
+ mxdb->mxdb_acc_handle = NULL;
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_len = 0;
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_MEM_ALLOC;
+ }
+
+ if (mxdb->mxdb_flags & MLXCX_DMABUF_FOREIGN) {
+ /* The mblk will be freed separately */
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_len = 0;
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
+ }
+
+ if (mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC) {
+ ddi_dma_free_handle(&mxdb->mxdb_dma_handle);
+ mxdb->mxdb_dma_handle = NULL;
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_HDL_ALLOC;
+ }
+
+ ASSERT3U(mxdb->mxdb_flags, ==, 0);
+ ASSERT3P(mxdb->mxdb_dma_handle, ==, NULL);
+ ASSERT3P(mxdb->mxdb_va, ==, NULL);
+ ASSERT3U(mxdb->mxdb_len, ==, 0);
+ ASSERT3U(mxdb->mxdb_ncookies, ==, 0);
+}
+
+void
+mlxcx_dma_unbind(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb)
+{
+ int ret;
+
+ ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC);
+ ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_BOUND);
+
+ if (mxdb->mxdb_flags & MLXCX_DMABUF_FOREIGN) {
+ /* The mblk will be freed separately */
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_len = 0;
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
+ }
+
+ ret = ddi_dma_unbind_handle(mxdb->mxdb_dma_handle);
+ VERIFY3S(ret, ==, DDI_SUCCESS);
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_BOUND;
+ mxdb->mxdb_ncookies = 0;
+}
+
+boolean_t
+mlxcx_dma_init(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
+ ddi_dma_attr_t *attrp, boolean_t wait)
+{
+ int ret;
+ int (*memcb)(caddr_t);
+
+ if (wait == B_TRUE) {
+ memcb = DDI_DMA_SLEEP;
+ } else {
+ memcb = DDI_DMA_DONTWAIT;
+ }
+
+ ASSERT3S(mxdb->mxdb_flags, ==, 0);
+
+ ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
+ &mxdb->mxdb_dma_handle);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
+ mxdb->mxdb_dma_handle = NULL;
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_dma_bind_mblk(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
+ const mblk_t *mp, size_t off, boolean_t wait)
+{
+ int ret;
+ uint_t flags = DDI_DMA_STREAMING;
+ int (*memcb)(caddr_t);
+
+ if (wait == B_TRUE) {
+ memcb = DDI_DMA_SLEEP;
+ } else {
+ memcb = DDI_DMA_DONTWAIT;
+ }
+
+ ASSERT(mxdb->mxdb_flags & MLXCX_DMABUF_HDL_ALLOC);
+ ASSERT0(mxdb->mxdb_flags &
+ (MLXCX_DMABUF_FOREIGN | MLXCX_DMABUF_MEM_ALLOC));
+ ASSERT0(mxdb->mxdb_flags & MLXCX_DMABUF_BOUND);
+
+ ASSERT3U(off, <=, MBLKL(mp));
+ mxdb->mxdb_va = (caddr_t)(mp->b_rptr + off);
+ mxdb->mxdb_len = MBLKL(mp) - off;
+ mxdb->mxdb_flags |= MLXCX_DMABUF_FOREIGN;
+
+ ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
+ mxdb->mxdb_va, mxdb->mxdb_len, DDI_DMA_WRITE | flags, memcb, NULL,
+ NULL, NULL);
+ if (ret != DDI_DMA_MAPPED) {
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_len = 0;
+ mxdb->mxdb_flags &= ~MLXCX_DMABUF_FOREIGN;
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
+ mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_dma_alloc(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
+ ddi_dma_attr_t *attrp, ddi_device_acc_attr_t *accp, boolean_t zero,
+ size_t size, boolean_t wait)
+{
+ int ret;
+ uint_t flags = DDI_DMA_CONSISTENT;
+ size_t len;
+ int (*memcb)(caddr_t);
+
+ if (wait == B_TRUE) {
+ memcb = DDI_DMA_SLEEP;
+ } else {
+ memcb = DDI_DMA_DONTWAIT;
+ }
+
+ ASSERT3U(mxdb->mxdb_flags, ==, 0);
+
+ ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
+ &mxdb->mxdb_dma_handle);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
+ mxdb->mxdb_dma_handle = NULL;
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
+
+ ret = ddi_dma_mem_alloc(mxdb->mxdb_dma_handle, size, accp, flags, memcb,
+ NULL, &mxdb->mxdb_va, &len, &mxdb->mxdb_acc_handle);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "!failed to allocate DMA memory: %d", ret);
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_acc_handle = NULL;
+ mlxcx_dma_free(mxdb);
+ return (B_FALSE);
+ }
+ mxdb->mxdb_len = size;
+ mxdb->mxdb_flags |= MLXCX_DMABUF_MEM_ALLOC;
+
+ if (zero == B_TRUE)
+ bzero(mxdb->mxdb_va, len);
+
+ ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
+ mxdb->mxdb_va, len, DDI_DMA_RDWR | flags, memcb, NULL, NULL,
+ NULL);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "!failed to bind DMA memory: %d", ret);
+ mlxcx_dma_free(mxdb);
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
+ mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_dma_alloc_offset(mlxcx_t *mlxp, mlxcx_dma_buffer_t *mxdb,
+ ddi_dma_attr_t *attrp, ddi_device_acc_attr_t *accp, boolean_t zero,
+ size_t size, size_t offset, boolean_t wait)
+{
+ int ret;
+ uint_t flags = DDI_DMA_STREAMING;
+ size_t len;
+ int (*memcb)(caddr_t);
+
+ if (wait == B_TRUE) {
+ memcb = DDI_DMA_SLEEP;
+ } else {
+ memcb = DDI_DMA_DONTWAIT;
+ }
+
+ ASSERT3U(mxdb->mxdb_flags, ==, 0);
+
+ ret = ddi_dma_alloc_handle(mlxp->mlx_dip, attrp, memcb, NULL,
+ &mxdb->mxdb_dma_handle);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "!failed to allocate DMA handle: %d", ret);
+ mxdb->mxdb_dma_handle = NULL;
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_HDL_ALLOC;
+
+ ret = ddi_dma_mem_alloc(mxdb->mxdb_dma_handle, size + offset, accp,
+ flags, memcb, NULL, &mxdb->mxdb_va, &len, &mxdb->mxdb_acc_handle);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "!failed to allocate DMA memory: %d", ret);
+ mxdb->mxdb_va = NULL;
+ mxdb->mxdb_acc_handle = NULL;
+ mlxcx_dma_free(mxdb);
+ return (B_FALSE);
+ }
+
+ if (zero == B_TRUE)
+ bzero(mxdb->mxdb_va, len);
+
+ mxdb->mxdb_va += offset;
+ len -= offset;
+ mxdb->mxdb_len = len;
+ mxdb->mxdb_flags |= MLXCX_DMABUF_MEM_ALLOC;
+
+ ret = ddi_dma_addr_bind_handle(mxdb->mxdb_dma_handle, NULL,
+ mxdb->mxdb_va, len, DDI_DMA_RDWR | flags, memcb, NULL, NULL,
+ NULL);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "!failed to bind DMA memory: %d", ret);
+ mlxcx_dma_free(mxdb);
+ return (B_FALSE);
+ }
+ mxdb->mxdb_flags |= MLXCX_DMABUF_BOUND;
+ mxdb->mxdb_ncookies = ddi_dma_ncookies(mxdb->mxdb_dma_handle);
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_endint.h b/usr/src/uts/common/io/mlxcx/mlxcx_endint.h
new file mode 100644
index 0000000000..4ad69173c0
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_endint.h
@@ -0,0 +1,305 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ */
+
+#ifndef _MLXCX_ENDINT_H
+#define _MLXCX_ENDINT_H
+
+#include <sys/types.h>
+#include <sys/byteorder.h>
+
+/*
+ * The inlines and structs in this file are used by mlxcx to ensure endian
+ * safety when dealing with memory-mapped structures from the device, and
+ * also simpler use of 24-bit integers (which Mellanox loves).
+ *
+ * By declaring all of these values in the memory-mapped structures as structs
+ * (e.g. uint32be_t) rather than bare integers (uint32_t) we ensure that the
+ * compiler will not allow them to be silently converted to integers and used
+ * without doing the necessary byte-swapping work.
+ *
+ * The uintXbe_t structs are designed to be used inside a #pragma pack(1)
+ * context only and we don't try to fix up their alignment.
+ *
+ * Also present in here are a number of bitsX_t types which can be used to
+ * gain a little bit of type safety when dealing with endian-swapped bitfields.
+ */
+
+#pragma pack(1)
+typedef struct { uint16_t be_val; } uint16be_t;
+typedef struct { uint8_t be_val[3]; } uint24be_t;
+typedef struct { uint32_t be_val; } uint32be_t;
+typedef struct { uint64_t be_val; } uint64be_t;
+#pragma pack()
+
+static inline uint16_t
+from_be16(uint16be_t v)
+{
+ return (BE_16(v.be_val));
+}
+
+static inline uint32_t
+from_be24(uint24be_t v)
+{
+ return (((uint32_t)v.be_val[0] << 16) |
+ ((uint32_t)v.be_val[1] << 8) |
+ ((uint32_t)v.be_val[2]));
+}
+
+static inline uint32_t
+from_be32(uint32be_t v)
+{
+ return (BE_32(v.be_val));
+}
+
+static inline uint64_t
+from_be64(uint64be_t v)
+{
+ return (BE_64(v.be_val));
+}
+
+static inline uint16be_t
+to_be16(uint16_t v)
+{
+ /* CSTYLED */
+ return ((uint16be_t){ .be_val = BE_16(v) });
+}
+
+static inline uint24be_t
+to_be24(uint32_t v)
+{
+ /* CSTYLED */
+ return ((uint24be_t){ .be_val = {
+ (v & 0xFF0000) >> 16,
+ (v & 0x00FF00) >> 8,
+ (v & 0x0000FF)
+ }});
+}
+
+static inline uint32be_t
+to_be32(uint32_t v)
+{
+ /* CSTYLED */
+ return ((uint32be_t){ .be_val = BE_32(v) });
+}
+
+static inline uint64be_t
+to_be64(uint64_t v)
+{
+ /* CSTYLED */
+ return ((uint64be_t){ .be_val = BE_64(v) });
+}
+
+#pragma pack(1)
+typedef struct { uint8_t bit_val; } bits8_t;
+typedef struct { uint16_t bit_val; } bits16_t;
+typedef struct { uint32_t bit_val; } bits32_t;
+typedef struct { uint24be_t bit_val; } bits24_t;
+typedef struct { uint64_t bit_val; } bits64_t;
+typedef struct { uint64_t bit_shift; uint64_t bit_mask; } bitdef_t;
+#pragma pack()
+
+static inline uint8_t
+get_bits8(bits8_t v, bitdef_t d)
+{
+ return ((v.bit_val & d.bit_mask) >> d.bit_shift);
+}
+static inline void
+set_bits8(bits8_t *v, bitdef_t d, uint8_t val)
+{
+ v->bit_val &= ~d.bit_mask;
+ v->bit_val |= (val << d.bit_shift) & d.bit_mask;
+}
+static inline uint8_t
+get_bit8(bits8_t v, uint8_t mask)
+{
+ return ((v.bit_val & mask) != 0);
+}
+static inline void
+set_bit8(bits8_t *v, uint8_t mask)
+{
+ v->bit_val |= mask;
+}
+static inline void
+clear_bit8(bits8_t *v, uint8_t mask)
+{
+ v->bit_val &= ~mask;
+}
+static inline bits8_t
+new_bits8(void)
+{
+ /* CSTYLED */
+ return ((bits8_t){ .bit_val = 0 });
+}
+static inline uint8_t
+from_bits8(bits8_t v)
+{
+ return (v.bit_val);
+}
+
+static inline uint16_t
+get_bits16(bits16_t v, bitdef_t d)
+{
+ return ((BE_16(v.bit_val) & d.bit_mask) >> d.bit_shift);
+}
+static inline void
+set_bits16(bits16_t *v, bitdef_t d, uint16_t val)
+{
+ v->bit_val &= BE_16(~d.bit_mask);
+ v->bit_val |= BE_16((val << d.bit_shift) & d.bit_mask);
+}
+static inline uint16_t
+get_bit16(bits16_t v, uint16_t mask)
+{
+ return ((BE_16(v.bit_val) & mask) != 0);
+}
+static inline void
+set_bit16(bits16_t *v, uint16_t mask)
+{
+ v->bit_val |= BE_16(mask);
+}
+static inline void
+clear_bit16(bits16_t *v, uint16_t mask)
+{
+ v->bit_val &= BE_16(~mask);
+}
+static inline bits16_t
+new_bits16(void)
+{
+ /* CSTYLED */
+ return ((bits16_t){ .bit_val = 0 });
+}
+static inline uint16_t
+from_bits16(bits16_t v)
+{
+ return (BE_16(v.bit_val));
+}
+
+static inline uint32_t
+get_bits32(bits32_t v, bitdef_t d)
+{
+ return ((BE_32(v.bit_val) & d.bit_mask) >> d.bit_shift);
+}
+static inline void
+set_bits32(bits32_t *v, bitdef_t d, uint32_t val)
+{
+ v->bit_val &= BE_32(~d.bit_mask);
+ v->bit_val |= BE_32((val << d.bit_shift) & d.bit_mask);
+}
+static inline uint32_t
+get_bit32(bits32_t v, uint32_t mask)
+{
+ return ((BE_32(v.bit_val) & mask) != 0);
+}
+static inline void
+set_bit32(bits32_t *v, uint32_t mask)
+{
+ v->bit_val |= BE_32(mask);
+}
+static inline void
+clear_bit32(bits32_t *v, uint32_t mask)
+{
+ v->bit_val &= BE_32(~mask);
+}
+static inline bits32_t
+new_bits32(void)
+{
+ /* CSTYLED */
+ return ((bits32_t){ .bit_val = 0 });
+}
+static inline uint32_t
+from_bits32(bits32_t v)
+{
+ return (BE_32(v.bit_val));
+}
+
+static inline uint32_t
+get_bits24(bits24_t v, bitdef_t d)
+{
+ return ((from_be24(v.bit_val) & d.bit_mask) >> d.bit_shift);
+}
+static inline void
+set_bits24(bits24_t *v, bitdef_t d, uint32_t val)
+{
+ uint32_t vv = from_be24(v->bit_val);
+ vv &= ~d.bit_mask;
+ vv |= (val << d.bit_shift) & d.bit_mask;
+ v->bit_val = to_be24(vv);
+}
+static inline uint32_t
+get_bit24(bits24_t v, uint32_t mask)
+{
+ return ((from_be24(v.bit_val) & mask) != 0);
+}
+static inline void
+set_bit24(bits24_t *v, uint32_t mask)
+{
+ v->bit_val = to_be24(from_be24(v->bit_val) | mask);
+}
+static inline void
+clear_bit24(bits24_t *v, uint32_t mask)
+{
+ v->bit_val = to_be24(from_be24(v->bit_val) & ~mask);
+}
+static inline bits24_t
+new_bits24(void)
+{
+ /* CSTYLED */
+ return ((bits24_t){ .bit_val = to_be24(0) });
+}
+static inline uint32_t
+from_bits24(bits24_t v)
+{
+ return (from_be24(v.bit_val));
+}
+
+static inline uint64_t
+get_bits64(bits64_t v, bitdef_t d)
+{
+ return ((BE_64(v.bit_val) & d.bit_mask) >> d.bit_shift);
+}
+static inline void
+set_bits64(bits64_t *v, bitdef_t d, uint64_t val)
+{
+ v->bit_val &= BE_64(~d.bit_mask);
+ v->bit_val |= BE_64((val << d.bit_shift) & d.bit_mask);
+}
+static inline uint64_t
+get_bit64(bits64_t v, uint64_t mask)
+{
+ return ((BE_64(v.bit_val) & mask) != 0);
+}
+static inline void
+set_bit64(bits64_t *v, uint64_t mask)
+{
+ v->bit_val |= BE_64(mask);
+}
+static inline void
+clear_bit64(bits64_t *v, uint64_t mask)
+{
+ v->bit_val &= BE_64(~mask);
+}
+static inline bits64_t
+new_bits64(void)
+{
+ /* CSTYLED */
+ return ((bits64_t){ .bit_val = 0 });
+}
+static inline uint64_t
+from_bits64(bits64_t v)
+{
+ return (BE_64(v.bit_val));
+}
+
+#endif /* _MLXCX_ENDINT_H */
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_gld.c b/usr/src/uts/common/io/mlxcx/mlxcx_gld.c
new file mode 100644
index 0000000000..871c4f30b3
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_gld.c
@@ -0,0 +1,1254 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2020, the University of Queensland
+ */
+
+/*
+ * Mellanox Connect-X 4/5/6 driver.
+ */
+
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/sysmacros.h>
+#include <sys/vlan.h>
+
+#include <sys/pattr.h>
+#include <sys/dlpi.h>
+
+#include <sys/mac_provider.h>
+
+/* Need these for mac_vlan_header_info() */
+#include <sys/mac_client.h>
+#include <sys/mac_client_priv.h>
+
+#include <mlxcx.h>
+
+static char *mlxcx_priv_props[] = {
+ NULL
+};
+
+#define MBITS 1000000ULL
+#define GBITS (1000ULL * MBITS)
+
+static uint64_t
+mlxcx_speed_to_bits(mlxcx_eth_proto_t v)
+{
+ switch (v) {
+ case MLXCX_PROTO_SGMII_100BASE:
+ return (100ULL * MBITS);
+ case MLXCX_PROTO_SGMII:
+ case MLXCX_PROTO_1000BASE_KX:
+ return (1000ULL * MBITS);
+ case MLXCX_PROTO_10GBASE_CX4:
+ case MLXCX_PROTO_10GBASE_KX4:
+ case MLXCX_PROTO_10GBASE_KR:
+ case MLXCX_PROTO_10GBASE_CR:
+ case MLXCX_PROTO_10GBASE_SR:
+ case MLXCX_PROTO_10GBASE_ER_LR:
+ return (10ULL * GBITS);
+ case MLXCX_PROTO_40GBASE_CR4:
+ case MLXCX_PROTO_40GBASE_KR4:
+ case MLXCX_PROTO_40GBASE_SR4:
+ case MLXCX_PROTO_40GBASE_LR4_ER4:
+ return (40ULL * GBITS);
+ case MLXCX_PROTO_25GBASE_CR:
+ case MLXCX_PROTO_25GBASE_KR:
+ case MLXCX_PROTO_25GBASE_SR:
+ return (25ULL * GBITS);
+ case MLXCX_PROTO_50GBASE_SR2:
+ case MLXCX_PROTO_50GBASE_CR2:
+ case MLXCX_PROTO_50GBASE_KR2:
+ return (50ULL * GBITS);
+ case MLXCX_PROTO_100GBASE_CR4:
+ case MLXCX_PROTO_100GBASE_SR4:
+ case MLXCX_PROTO_100GBASE_KR4:
+ return (100ULL * GBITS);
+ default:
+ return (0);
+ }
+}
+
+static int
+mlxcx_mac_stat_rfc_2863(mlxcx_t *mlxp, mlxcx_port_t *port, uint_t stat,
+ uint64_t *val)
+{
+ int ret = 0;
+ boolean_t ok;
+ mlxcx_register_data_t data;
+ mlxcx_ppcnt_rfc_2863_t *st;
+
+ ASSERT(mutex_owned(&port->mlp_mtx));
+
+ bzero(&data, sizeof (data));
+ data.mlrd_ppcnt.mlrd_ppcnt_local_port = port->mlp_num + 1;
+ data.mlrd_ppcnt.mlrd_ppcnt_grp = MLXCX_PPCNT_GRP_RFC_2863;
+ data.mlrd_ppcnt.mlrd_ppcnt_clear = MLXCX_PPCNT_NO_CLEAR;
+
+ ok = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PPCNT, &data);
+ if (!ok)
+ return (EIO);
+ st = &data.mlrd_ppcnt.mlrd_ppcnt_rfc_2863;
+
+ switch (stat) {
+ case MAC_STAT_RBYTES:
+ *val = from_be64(st->mlppc_rfc_2863_in_octets);
+ break;
+ case MAC_STAT_MULTIRCV:
+ *val = from_be64(st->mlppc_rfc_2863_in_mcast_pkts);
+ break;
+ case MAC_STAT_BRDCSTRCV:
+ *val = from_be64(st->mlppc_rfc_2863_in_bcast_pkts);
+ break;
+ case MAC_STAT_MULTIXMT:
+ *val = from_be64(st->mlppc_rfc_2863_out_mcast_pkts);
+ break;
+ case MAC_STAT_BRDCSTXMT:
+ *val = from_be64(st->mlppc_rfc_2863_out_bcast_pkts);
+ break;
+ case MAC_STAT_IERRORS:
+ *val = from_be64(st->mlppc_rfc_2863_in_errors);
+ break;
+ case MAC_STAT_UNKNOWNS:
+ *val = from_be64(st->mlppc_rfc_2863_in_unknown_protos);
+ break;
+ case MAC_STAT_OERRORS:
+ *val = from_be64(st->mlppc_rfc_2863_out_errors);
+ break;
+ case MAC_STAT_OBYTES:
+ *val = from_be64(st->mlppc_rfc_2863_out_octets);
+ break;
+ default:
+ ret = ENOTSUP;
+ }
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_stat_ieee_802_3(mlxcx_t *mlxp, mlxcx_port_t *port, uint_t stat,
+ uint64_t *val)
+{
+ int ret = 0;
+ boolean_t ok;
+ mlxcx_register_data_t data;
+ mlxcx_ppcnt_ieee_802_3_t *st;
+
+ ASSERT(mutex_owned(&port->mlp_mtx));
+
+ bzero(&data, sizeof (data));
+ data.mlrd_ppcnt.mlrd_ppcnt_local_port = port->mlp_num + 1;
+ data.mlrd_ppcnt.mlrd_ppcnt_grp = MLXCX_PPCNT_GRP_IEEE_802_3;
+ data.mlrd_ppcnt.mlrd_ppcnt_clear = MLXCX_PPCNT_NO_CLEAR;
+
+ ok = mlxcx_cmd_access_register(mlxp, MLXCX_CMD_ACCESS_REGISTER_READ,
+ MLXCX_REG_PPCNT, &data);
+ if (!ok)
+ return (EIO);
+ st = &data.mlrd_ppcnt.mlrd_ppcnt_ieee_802_3;
+
+ switch (stat) {
+ case MAC_STAT_IPACKETS:
+ *val = from_be64(st->mlppc_ieee_802_3_frames_rx);
+ break;
+ case MAC_STAT_OPACKETS:
+ *val = from_be64(st->mlppc_ieee_802_3_frames_tx);
+ break;
+ case ETHER_STAT_ALIGN_ERRORS:
+ *val = from_be64(st->mlppc_ieee_802_3_align_err);
+ break;
+ case ETHER_STAT_FCS_ERRORS:
+ *val = from_be64(st->mlppc_ieee_802_3_fcs_err);
+ break;
+ case ETHER_STAT_TOOLONG_ERRORS:
+ *val = from_be64(st->mlppc_ieee_802_3_frame_too_long_err);
+ break;
+ default:
+ ret = ENOTSUP;
+ }
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_stat(void *arg, uint_t stat, uint64_t *val)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ int ret = 0;
+
+ mutex_enter(&port->mlp_mtx);
+
+ switch (stat) {
+ case MAC_STAT_IFSPEED:
+ *val = mlxcx_speed_to_bits(port->mlp_oper_proto);
+ break;
+ case ETHER_STAT_LINK_DUPLEX:
+ *val = LINK_DUPLEX_FULL;
+ break;
+ case MAC_STAT_RBYTES:
+ case MAC_STAT_MULTIRCV:
+ case MAC_STAT_BRDCSTRCV:
+ case MAC_STAT_MULTIXMT:
+ case MAC_STAT_BRDCSTXMT:
+ case MAC_STAT_IERRORS:
+ case MAC_STAT_UNKNOWNS:
+ case MAC_STAT_OERRORS:
+ case MAC_STAT_OBYTES:
+ ret = mlxcx_mac_stat_rfc_2863(mlxp, port, stat, val);
+ break;
+ case MAC_STAT_IPACKETS:
+ case MAC_STAT_OPACKETS:
+ case ETHER_STAT_ALIGN_ERRORS:
+ case ETHER_STAT_FCS_ERRORS:
+ case ETHER_STAT_TOOLONG_ERRORS:
+ ret = mlxcx_mac_stat_ieee_802_3(mlxp, port, stat, val);
+ break;
+ case MAC_STAT_NORCVBUF:
+ *val = port->mlp_stats.mlps_rx_drops;
+ break;
+ default:
+ ret = ENOTSUP;
+ }
+
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
+{
+ mlxcx_t *mlxp = arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ int ret = 0;
+
+ if (flags != 0) {
+ return (EINVAL);
+ }
+
+ mutex_enter(&port->mlp_mtx);
+
+ switch (mode) {
+ case MAC_LED_DEFAULT:
+ case MAC_LED_OFF:
+ if (!mlxcx_cmd_set_port_led(mlxp, port, 0)) {
+ ret = EIO;
+ break;
+ }
+ break;
+ case MAC_LED_IDENT:
+ if (!mlxcx_cmd_set_port_led(mlxp, port, UINT16_MAX)) {
+ ret = EIO;
+ break;
+ }
+ break;
+ default:
+ ret = ENOTSUP;
+ }
+
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_txr_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
+{
+ mlxcx_t *mlxp = arg;
+ mlxcx_module_status_t st;
+
+ if (!mlxcx_cmd_query_module_status(mlxp, id, &st, NULL))
+ return (EIO);
+
+ if (st != MLXCX_MODULE_UNPLUGGED)
+ mac_transceiver_info_set_present(infop, B_TRUE);
+
+ if (st == MLXCX_MODULE_PLUGGED)
+ mac_transceiver_info_set_usable(infop, B_TRUE);
+
+ return (0);
+}
+
+static int
+mlxcx_mac_txr_read(void *arg, uint_t id, uint_t page, void *vbuf,
+ size_t nbytes, off_t offset, size_t *nread)
+{
+ mlxcx_t *mlxp = arg;
+ mlxcx_register_data_t data;
+ uint8_t *buf = vbuf;
+ boolean_t ok;
+ size_t take, done = 0;
+ uint8_t i2c_addr;
+
+ if (id != 0 || vbuf == NULL || nbytes == 0 || nread == NULL)
+ return (EINVAL);
+
+ if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256))
+ return (EINVAL);
+
+ /*
+ * The PRM is really not very clear about any of this, but it seems
+ * that the i2c_device_addr field in MCIA is the SFP+ spec "page"
+ * number shifted right by 1 bit. They're written in the SFF spec
+ * like "1010000X" so Mellanox just dropped the X.
+ *
+ * This means that if we want page 0xA0, we put 0x50 in the
+ * i2c_device_addr field.
+ *
+ * The "page_number" field in MCIA means something else. Don't ask me
+ * what. FreeBSD leaves it as zero, so we will too!
+ */
+ i2c_addr = page >> 1;
+
+ while (done < nbytes) {
+ take = nbytes - done;
+ if (take > sizeof (data.mlrd_mcia.mlrd_mcia_data))
+ take = sizeof (data.mlrd_mcia.mlrd_mcia_data);
+
+ bzero(&data, sizeof (data));
+ ASSERT3U(id, <=, 0xff);
+ data.mlrd_mcia.mlrd_mcia_module = (uint8_t)id;
+ data.mlrd_mcia.mlrd_mcia_i2c_device_addr = i2c_addr;
+ data.mlrd_mcia.mlrd_mcia_device_addr = to_be16(offset);
+ data.mlrd_mcia.mlrd_mcia_size = to_be16(take);
+
+ ok = mlxcx_cmd_access_register(mlxp,
+ MLXCX_CMD_ACCESS_REGISTER_READ, MLXCX_REG_MCIA, &data);
+ if (!ok) {
+ *nread = 0;
+ return (EIO);
+ }
+
+ if (data.mlrd_mcia.mlrd_mcia_status != MLXCX_MCIA_STATUS_OK) {
+ *nread = 0;
+ return (EIO);
+ }
+
+ bcopy(data.mlrd_mcia.mlrd_mcia_data, &buf[done], take);
+
+ done += take;
+ offset += take;
+ }
+ *nread = done;
+ return (0);
+}
+
+static int
+mlxcx_mac_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
+{
+ mlxcx_work_queue_t *wq = (mlxcx_work_queue_t *)rh;
+ (void) wq;
+
+ /*
+ * We should add support for using hw flow counters and such to
+ * get per-ring statistics. Not done yet though!
+ */
+
+ switch (stat) {
+ default:
+ *val = 0;
+ return (ENOTSUP);
+ }
+
+ return (0);
+}
+
+static int
+mlxcx_mac_start(void *arg)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ (void) mlxp;
+ return (0);
+}
+
+static void
+mlxcx_mac_stop(void *arg)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ (void) mlxp;
+}
+
+static mblk_t *
+mlxcx_mac_ring_tx(void *arg, mblk_t *mp)
+{
+ mlxcx_work_queue_t *sq = (mlxcx_work_queue_t *)arg;
+ mlxcx_t *mlxp = sq->mlwq_mlx;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_buffer_t *b;
+ mac_header_info_t mhi;
+ mblk_t *kmp, *nmp;
+ uint8_t inline_hdrs[MLXCX_MAX_INLINE_HEADERLEN];
+ size_t inline_hdrlen, rem, off;
+ uint32_t chkflags = 0;
+ boolean_t ok;
+ size_t take = 0;
+
+ VERIFY(mp->b_next == NULL);
+
+ mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &chkflags);
+
+ if (mac_vlan_header_info(mlxp->mlx_mac_hdl, mp, &mhi) != 0) {
+ /*
+ * We got given a frame without a valid L2 header on it. We
+ * can't really transmit that (mlx parts don't like it), so
+ * we will just drop it on the floor.
+ */
+ freemsg(mp);
+ return (NULL);
+ }
+
+ inline_hdrlen = rem = mhi.mhi_hdrsize;
+
+ kmp = mp;
+ off = 0;
+ while (rem > 0) {
+ const ptrdiff_t sz = MBLKL(kmp);
+ ASSERT3S(sz, >=, 0);
+ ASSERT3U(sz, <=, SIZE_MAX);
+ take = sz;
+ if (take > rem)
+ take = rem;
+ bcopy(kmp->b_rptr, inline_hdrs + off, take);
+ rem -= take;
+ off += take;
+ if (take == sz) {
+ take = 0;
+ kmp = kmp->b_cont;
+ }
+ }
+
+ if (!mlxcx_buf_bind_or_copy(mlxp, sq, kmp, take, &b)) {
+ /*
+ * Something went really wrong, and we probably will never be
+ * able to TX again (all our buffers are broken and DMA is
+ * failing). Drop the packet on the floor -- FMA should be
+ * reporting this error elsewhere.
+ */
+ freemsg(mp);
+ return (NULL);
+ }
+
+ mutex_enter(&sq->mlwq_mtx);
+ VERIFY3U(sq->mlwq_inline_mode, <=, MLXCX_ETH_INLINE_L2);
+ cq = sq->mlwq_cq;
+
+ /*
+ * state is a single int, so read-only access without the CQ lock
+ * should be fine.
+ */
+ if (cq->mlcq_state & MLXCX_CQ_TEARDOWN) {
+ mutex_exit(&sq->mlwq_mtx);
+ mlxcx_buf_return_chain(mlxp, b, B_FALSE);
+ return (NULL);
+ }
+
+ if (sq->mlwq_state & MLXCX_WQ_TEARDOWN) {
+ mutex_exit(&sq->mlwq_mtx);
+ mlxcx_buf_return_chain(mlxp, b, B_FALSE);
+ return (NULL);
+ }
+
+ /*
+ * Similar logic here: bufcnt is only manipulated atomically, and
+ * bufhwm is set at startup.
+ */
+ if (cq->mlcq_bufcnt >= cq->mlcq_bufhwm) {
+ atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_BLOCKED_MAC);
+ mutex_exit(&sq->mlwq_mtx);
+ mlxcx_buf_return_chain(mlxp, b, B_TRUE);
+ return (mp);
+ }
+
+ ok = mlxcx_sq_add_buffer(mlxp, sq, inline_hdrs, inline_hdrlen,
+ chkflags, b);
+ if (!ok) {
+ atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_BLOCKED_MAC);
+ mutex_exit(&sq->mlwq_mtx);
+ mlxcx_buf_return_chain(mlxp, b, B_TRUE);
+ return (mp);
+ }
+
+ /*
+ * Now that we've successfully enqueued the rest of the packet,
+ * free any mblks that we cut off while inlining headers.
+ */
+ for (; mp != kmp; mp = nmp) {
+ nmp = mp->b_cont;
+ freeb(mp);
+ }
+
+ mutex_exit(&sq->mlwq_mtx);
+
+ return (NULL);
+}
+
+static int
+mlxcx_mac_setpromisc(void *arg, boolean_t on)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe;
+ mlxcx_flow_table_t *ft;
+ mlxcx_ring_group_t *g;
+ int ret = 0;
+ uint_t idx;
+
+ mutex_enter(&port->mlp_mtx);
+
+ /*
+ * First, do the top-level flow entry on the root flow table for
+ * the port. This catches all traffic that doesn't match any MAC
+ * MAC filters.
+ */
+ ft = port->mlp_rx_flow;
+ mutex_enter(&ft->mlft_mtx);
+ fg = port->mlp_promisc;
+ fe = list_head(&fg->mlfg_entries);
+ if (on && !(fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED)) {
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ ret = EIO;
+ }
+ } else if (!on && (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED)) {
+ if (!mlxcx_cmd_delete_flow_table_entry(mlxp, fe)) {
+ ret = EIO;
+ }
+ }
+ mutex_exit(&ft->mlft_mtx);
+
+ /*
+ * If we failed to change the top-level entry, don't bother with
+ * trying the per-group ones.
+ */
+ if (ret != 0) {
+ mutex_exit(&port->mlp_mtx);
+ return (ret);
+ }
+
+ /*
+ * Then, do the per-rx-group flow entries which catch traffic that
+ * matched a MAC filter but failed to match a VLAN filter.
+ */
+ for (idx = 0; idx < mlxp->mlx_rx_ngroups; ++idx) {
+ g = &mlxp->mlx_rx_groups[idx];
+
+ mutex_enter(&g->mlg_mtx);
+
+ ft = g->mlg_rx_vlan_ft;
+ mutex_enter(&ft->mlft_mtx);
+
+ fg = g->mlg_rx_vlan_promisc_fg;
+ fe = list_head(&fg->mlfg_entries);
+ if (on && !(fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED)) {
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ ret = EIO;
+ }
+ } else if (!on && (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED)) {
+ if (!mlxcx_cmd_delete_flow_table_entry(mlxp, fe)) {
+ ret = EIO;
+ }
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ }
+
+ mutex_exit(&port->mlp_mtx);
+ return (ret);
+}
+
+static int
+mlxcx_mac_multicast(void *arg, boolean_t add, const uint8_t *addr)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ mlxcx_ring_group_t *g = &mlxp->mlx_rx_groups[0];
+ int ret = 0;
+
+ mutex_enter(&port->mlp_mtx);
+ mutex_enter(&g->mlg_mtx);
+ if (add) {
+ if (!mlxcx_add_umcast_entry(mlxp, port, g, addr)) {
+ ret = EIO;
+ }
+ } else {
+ if (!mlxcx_remove_umcast_entry(mlxp, port, g, addr)) {
+ ret = EIO;
+ }
+ }
+ mutex_exit(&g->mlg_mtx);
+ mutex_exit(&port->mlp_mtx);
+ return (ret);
+}
+
+static int
+mlxcx_group_add_mac(void *arg, const uint8_t *mac_addr)
+{
+ mlxcx_ring_group_t *g = arg;
+ mlxcx_t *mlxp = g->mlg_mlx;
+ mlxcx_port_t *port = g->mlg_port;
+ int ret = 0;
+
+ mutex_enter(&port->mlp_mtx);
+ mutex_enter(&g->mlg_mtx);
+ if (!mlxcx_add_umcast_entry(mlxp, port, g, mac_addr)) {
+ ret = EIO;
+ }
+ mutex_exit(&g->mlg_mtx);
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+/*
+ * Support for VLAN steering into groups is not yet available in upstream
+ * illumos.
+ */
+#if defined(MAC_VLAN_UNTAGGED)
+
+static int
+mlxcx_group_add_vlan(mac_group_driver_t gh, uint16_t vid)
+{
+ mlxcx_ring_group_t *g = (mlxcx_ring_group_t *)gh;
+ mlxcx_t *mlxp = g->mlg_mlx;
+ int ret = 0;
+ boolean_t tagged = B_TRUE;
+
+ if (vid == MAC_VLAN_UNTAGGED) {
+ vid = 0;
+ tagged = B_FALSE;
+ }
+
+ mutex_enter(&g->mlg_mtx);
+ if (!mlxcx_add_vlan_entry(mlxp, g, tagged, vid)) {
+ ret = EIO;
+ }
+ mutex_exit(&g->mlg_mtx);
+
+ return (ret);
+}
+
+static int
+mlxcx_group_remove_vlan(mac_group_driver_t gh, uint16_t vid)
+{
+ mlxcx_ring_group_t *g = (mlxcx_ring_group_t *)gh;
+ mlxcx_t *mlxp = g->mlg_mlx;
+ int ret = 0;
+ boolean_t tagged = B_TRUE;
+
+ if (vid == MAC_VLAN_UNTAGGED) {
+ vid = 0;
+ tagged = B_FALSE;
+ }
+
+ mutex_enter(&g->mlg_mtx);
+ if (!mlxcx_remove_vlan_entry(mlxp, g, tagged, vid)) {
+ ret = EIO;
+ }
+ mutex_exit(&g->mlg_mtx);
+
+ return (ret);
+}
+
+#endif /* MAC_VLAN_UNTAGGED */
+
+static int
+mlxcx_group_remove_mac(void *arg, const uint8_t *mac_addr)
+{
+ mlxcx_ring_group_t *g = arg;
+ mlxcx_t *mlxp = g->mlg_mlx;
+ mlxcx_port_t *port = g->mlg_port;
+ int ret = 0;
+
+ mutex_enter(&port->mlp_mtx);
+ mutex_enter(&g->mlg_mtx);
+ if (!mlxcx_remove_umcast_entry(mlxp, port, g, mac_addr)) {
+ ret = EIO;
+ }
+ mutex_exit(&g->mlg_mtx);
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_ring_start(mac_ring_driver_t rh, uint64_t gen_num)
+{
+ mlxcx_work_queue_t *wq = (mlxcx_work_queue_t *)rh;
+ mlxcx_completion_queue_t *cq = wq->mlwq_cq;
+ mlxcx_ring_group_t *g = wq->mlwq_group;
+ mlxcx_t *mlxp = wq->mlwq_mlx;
+
+ ASSERT(cq != NULL);
+ ASSERT(g != NULL);
+
+ ASSERT(wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ ||
+ wq->mlwq_type == MLXCX_WQ_TYPE_RECVQ);
+ if (wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
+ !mlxcx_tx_ring_start(mlxp, g, wq))
+ return (EIO);
+ if (wq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
+ !mlxcx_rx_ring_start(mlxp, g, wq))
+ return (EIO);
+
+ mutex_enter(&cq->mlcq_mtx);
+ cq->mlcq_mac_gen = gen_num;
+ mutex_exit(&cq->mlcq_mtx);
+
+ return (0);
+}
+
+static void
+mlxcx_mac_ring_stop(mac_ring_driver_t rh)
+{
+ mlxcx_work_queue_t *wq = (mlxcx_work_queue_t *)rh;
+ mlxcx_completion_queue_t *cq = wq->mlwq_cq;
+ mlxcx_t *mlxp = wq->mlwq_mlx;
+ mlxcx_buf_shard_t *s;
+ mlxcx_buffer_t *buf;
+
+ mutex_enter(&cq->mlcq_mtx);
+ mutex_enter(&wq->mlwq_mtx);
+ if (wq->mlwq_state & MLXCX_WQ_STARTED) {
+ if (wq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
+ !mlxcx_cmd_stop_rq(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ return;
+ }
+ if (wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
+ !mlxcx_cmd_stop_sq(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ return;
+ }
+ }
+ ASSERT0(wq->mlwq_state & MLXCX_WQ_STARTED);
+
+ if (wq->mlwq_state & MLXCX_WQ_BUFFERS) {
+ /* Return any outstanding buffers to the free pool. */
+ while ((buf = list_remove_head(&cq->mlcq_buffers)) != NULL) {
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ }
+ mutex_enter(&cq->mlcq_bufbmtx);
+ while ((buf = list_remove_head(&cq->mlcq_buffers_b)) != NULL) {
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ }
+ mutex_exit(&cq->mlcq_bufbmtx);
+ cq->mlcq_bufcnt = 0;
+
+ s = wq->mlwq_bufs;
+ mutex_enter(&s->mlbs_mtx);
+ while (!list_is_empty(&s->mlbs_busy))
+ cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
+ while ((buf = list_head(&s->mlbs_free)) != NULL) {
+ mlxcx_buf_destroy(mlxp, buf);
+ }
+ mutex_exit(&s->mlbs_mtx);
+
+ s = wq->mlwq_foreign_bufs;
+ if (s != NULL) {
+ mutex_enter(&s->mlbs_mtx);
+ while (!list_is_empty(&s->mlbs_busy))
+ cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
+ while ((buf = list_head(&s->mlbs_free)) != NULL) {
+ mlxcx_buf_destroy(mlxp, buf);
+ }
+ mutex_exit(&s->mlbs_mtx);
+ }
+
+ wq->mlwq_state &= ~MLXCX_WQ_BUFFERS;
+ }
+ ASSERT0(wq->mlwq_state & MLXCX_WQ_BUFFERS);
+
+ mutex_exit(&wq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+}
+
+static int
+mlxcx_mac_group_start(mac_group_driver_t gh)
+{
+ mlxcx_ring_group_t *g = (mlxcx_ring_group_t *)gh;
+ mlxcx_t *mlxp = g->mlg_mlx;
+
+ VERIFY3S(g->mlg_type, ==, MLXCX_GROUP_RX);
+ ASSERT(mlxp != NULL);
+
+ if (g->mlg_state & MLXCX_GROUP_RUNNING)
+ return (0);
+
+ if (!mlxcx_rx_group_start(mlxp, g))
+ return (EIO);
+
+ return (0);
+}
+
+static void
+mlxcx_mac_fill_tx_ring(void *arg, mac_ring_type_t rtype, const int group_index,
+ const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_ring_group_t *g;
+ mlxcx_work_queue_t *wq;
+ mac_intr_t *mintr = &infop->mri_intr;
+
+ if (rtype != MAC_RING_TYPE_TX)
+ return;
+ ASSERT3S(group_index, ==, -1);
+
+ g = &mlxp->mlx_tx_groups[0];
+ ASSERT(g->mlg_state & MLXCX_GROUP_INIT);
+ mutex_enter(&g->mlg_mtx);
+
+ ASSERT3S(ring_index, >=, 0);
+ ASSERT3S(ring_index, <, g->mlg_nwqs);
+
+ wq = &g->mlg_wqs[ring_index];
+
+ wq->mlwq_cq->mlcq_mac_hdl = rh;
+
+ infop->mri_driver = (mac_ring_driver_t)wq;
+ infop->mri_start = mlxcx_mac_ring_start;
+ infop->mri_stop = mlxcx_mac_ring_stop;
+ infop->mri_tx = mlxcx_mac_ring_tx;
+ infop->mri_stat = mlxcx_mac_ring_stat;
+
+ mintr->mi_ddi_handle = mlxp->mlx_intr_handles[
+ wq->mlwq_cq->mlcq_eq->mleq_intr_index];
+
+ mutex_exit(&g->mlg_mtx);
+}
+
+static int
+mlxcx_mac_ring_intr_enable(mac_intr_handle_t intrh)
+{
+ mlxcx_completion_queue_t *cq = (mlxcx_completion_queue_t *)intrh;
+ mlxcx_event_queue_t *eq = cq->mlcq_eq;
+ mlxcx_t *mlxp = cq->mlcq_mlx;
+
+ /*
+ * We are going to call mlxcx_arm_cq() here, so we take the EQ lock
+ * as well as the CQ one to make sure we don't race against
+ * mlxcx_intr_n().
+ */
+ mutex_enter(&eq->mleq_mtx);
+ mutex_enter(&cq->mlcq_mtx);
+ if (cq->mlcq_state & MLXCX_CQ_POLLING) {
+ cq->mlcq_state &= ~MLXCX_CQ_POLLING;
+ if (!(cq->mlcq_state & MLXCX_CQ_ARMED))
+ mlxcx_arm_cq(mlxp, cq);
+ }
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&eq->mleq_mtx);
+
+ return (0);
+}
+
+static int
+mlxcx_mac_ring_intr_disable(mac_intr_handle_t intrh)
+{
+ mlxcx_completion_queue_t *cq = (mlxcx_completion_queue_t *)intrh;
+
+ atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_POLLING);
+ mutex_enter(&cq->mlcq_mtx);
+ VERIFY(cq->mlcq_state & MLXCX_CQ_POLLING);
+ mutex_exit(&cq->mlcq_mtx);
+
+ return (0);
+}
+
+static mblk_t *
+mlxcx_mac_ring_rx_poll(void *arg, int poll_bytes)
+{
+ mlxcx_work_queue_t *wq = (mlxcx_work_queue_t *)arg;
+ mlxcx_completion_queue_t *cq = wq->mlwq_cq;
+ mlxcx_t *mlxp = wq->mlwq_mlx;
+ mblk_t *mp;
+
+ ASSERT(cq != NULL);
+ ASSERT3S(poll_bytes, >, 0);
+ if (poll_bytes == 0)
+ return (NULL);
+
+ mutex_enter(&cq->mlcq_mtx);
+ mp = mlxcx_rx_poll(mlxp, cq, poll_bytes);
+ mutex_exit(&cq->mlcq_mtx);
+
+ return (mp);
+}
+
+static void
+mlxcx_mac_fill_rx_ring(void *arg, mac_ring_type_t rtype, const int group_index,
+ const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_ring_group_t *g;
+ mlxcx_work_queue_t *wq;
+ mac_intr_t *mintr = &infop->mri_intr;
+
+ if (rtype != MAC_RING_TYPE_RX)
+ return;
+ ASSERT3S(group_index, >=, 0);
+ ASSERT3S(group_index, <, mlxp->mlx_rx_ngroups);
+
+ g = &mlxp->mlx_rx_groups[group_index];
+ ASSERT(g->mlg_state & MLXCX_GROUP_INIT);
+ mutex_enter(&g->mlg_mtx);
+
+ ASSERT3S(ring_index, >=, 0);
+ ASSERT3S(ring_index, <, g->mlg_nwqs);
+
+ ASSERT(g->mlg_state & MLXCX_GROUP_WQS);
+ wq = &g->mlg_wqs[ring_index];
+
+ wq->mlwq_cq->mlcq_mac_hdl = rh;
+
+ infop->mri_driver = (mac_ring_driver_t)wq;
+ infop->mri_start = mlxcx_mac_ring_start;
+ infop->mri_stop = mlxcx_mac_ring_stop;
+ infop->mri_poll = mlxcx_mac_ring_rx_poll;
+ infop->mri_stat = mlxcx_mac_ring_stat;
+
+ mintr->mi_handle = (mac_intr_handle_t)wq->mlwq_cq;
+ mintr->mi_enable = mlxcx_mac_ring_intr_enable;
+ mintr->mi_disable = mlxcx_mac_ring_intr_disable;
+
+ mintr->mi_ddi_handle = mlxp->mlx_intr_handles[
+ wq->mlwq_cq->mlcq_eq->mleq_intr_index];
+
+ mutex_exit(&g->mlg_mtx);
+}
+
+static void
+mlxcx_mac_fill_rx_group(void *arg, mac_ring_type_t rtype, const int index,
+ mac_group_info_t *infop, mac_group_handle_t gh)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_ring_group_t *g;
+
+ if (rtype != MAC_RING_TYPE_RX)
+ return;
+
+ ASSERT3S(index, >=, 0);
+ ASSERT3S(index, <, mlxp->mlx_rx_ngroups);
+ g = &mlxp->mlx_rx_groups[index];
+ ASSERT(g->mlg_state & MLXCX_GROUP_INIT);
+
+ g->mlg_mac_hdl = gh;
+
+ infop->mgi_driver = (mac_group_driver_t)g;
+ infop->mgi_start = mlxcx_mac_group_start;
+ infop->mgi_stop = NULL;
+ infop->mgi_addmac = mlxcx_group_add_mac;
+ infop->mgi_remmac = mlxcx_group_remove_mac;
+#if defined(MAC_VLAN_UNTAGGED)
+ infop->mgi_addvlan = mlxcx_group_add_vlan;
+ infop->mgi_remvlan = mlxcx_group_remove_vlan;
+#endif /* MAC_VLAN_UNTAGGED */
+
+ infop->mgi_count = g->mlg_nwqs;
+}
+
+static boolean_t
+mlxcx_mac_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mac_capab_rings_t *cap_rings;
+ mac_capab_led_t *cap_leds;
+ mac_capab_transceiver_t *cap_txr;
+ uint_t i, n = 0;
+
+ switch (cap) {
+
+ case MAC_CAPAB_RINGS:
+ cap_rings = cap_data;
+ cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
+ switch (cap_rings->mr_type) {
+ case MAC_RING_TYPE_TX:
+ cap_rings->mr_gnum = 0;
+ cap_rings->mr_rnum = mlxp->mlx_tx_groups[0].mlg_nwqs;
+ cap_rings->mr_rget = mlxcx_mac_fill_tx_ring;
+ cap_rings->mr_gget = NULL;
+ cap_rings->mr_gaddring = NULL;
+ cap_rings->mr_gremring = NULL;
+ break;
+ case MAC_RING_TYPE_RX:
+ cap_rings->mr_gnum = mlxp->mlx_rx_ngroups;
+ for (i = 0; i < mlxp->mlx_rx_ngroups; ++i)
+ n += mlxp->mlx_rx_groups[i].mlg_nwqs;
+ cap_rings->mr_rnum = n;
+ cap_rings->mr_rget = mlxcx_mac_fill_rx_ring;
+ cap_rings->mr_gget = mlxcx_mac_fill_rx_group;
+ cap_rings->mr_gaddring = NULL;
+ cap_rings->mr_gremring = NULL;
+ break;
+ default:
+ return (B_FALSE);
+ }
+ break;
+
+ case MAC_CAPAB_HCKSUM:
+ if (mlxp->mlx_caps->mlc_checksum) {
+ *(uint32_t *)cap_data = HCKSUM_INET_FULL_V4 |
+ HCKSUM_INET_FULL_V6 | HCKSUM_IPHDRCKSUM;
+ }
+ break;
+
+ case MAC_CAPAB_LED:
+ cap_leds = cap_data;
+
+ cap_leds->mcl_flags = 0;
+ cap_leds->mcl_modes = MAC_LED_DEFAULT | MAC_LED_OFF |
+ MAC_LED_IDENT;
+ cap_leds->mcl_set = mlxcx_mac_led_set;
+ break;
+
+ case MAC_CAPAB_TRANSCEIVER:
+ cap_txr = cap_data;
+
+ cap_txr->mct_flags = 0;
+ cap_txr->mct_ntransceivers = 1;
+ cap_txr->mct_info = mlxcx_mac_txr_info;
+ cap_txr->mct_read = mlxcx_mac_txr_read;
+ break;
+
+ default:
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ mac_prop_info_handle_t prh)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+
+ mutex_enter(&port->mlp_mtx);
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ case MAC_PROP_SPEED:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ break;
+ case MAC_PROP_MTU:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
+ mac_prop_info_set_range_uint32(prh, MLXCX_MTU_OFFSET,
+ port->mlp_max_mtu);
+ mac_prop_info_set_default_uint32(prh,
+ port->mlp_mtu - MLXCX_MTU_OFFSET);
+ break;
+ case MAC_PROP_AUTONEG:
+ mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
+ mac_prop_info_set_default_uint8(prh, 1);
+ break;
+ default:
+ break;
+ }
+
+ mutex_exit(&port->mlp_mtx);
+}
+
+static int
+mlxcx_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, const void *pr_val)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ int ret = 0;
+ uint32_t new_mtu, new_hw_mtu, old_mtu;
+ mlxcx_buf_shard_t *sh;
+ boolean_t allocd = B_FALSE;
+
+ mutex_enter(&port->mlp_mtx);
+
+ switch (pr_num) {
+ case MAC_PROP_MTU:
+ bcopy(pr_val, &new_mtu, sizeof (new_mtu));
+ new_hw_mtu = new_mtu + MLXCX_MTU_OFFSET;
+ if (new_hw_mtu == port->mlp_mtu)
+ break;
+ if (new_hw_mtu > port->mlp_max_mtu) {
+ ret = EINVAL;
+ break;
+ }
+ sh = list_head(&mlxp->mlx_buf_shards);
+ for (; sh != NULL; sh = list_next(&mlxp->mlx_buf_shards, sh)) {
+ mutex_enter(&sh->mlbs_mtx);
+ if (!list_is_empty(&sh->mlbs_free) ||
+ !list_is_empty(&sh->mlbs_busy)) {
+ allocd = B_TRUE;
+ mutex_exit(&sh->mlbs_mtx);
+ break;
+ }
+ mutex_exit(&sh->mlbs_mtx);
+ }
+ if (allocd) {
+ ret = EBUSY;
+ break;
+ }
+ old_mtu = port->mlp_mtu;
+ ret = mac_maxsdu_update(mlxp->mlx_mac_hdl, new_mtu);
+ if (ret != 0)
+ break;
+ port->mlp_mtu = new_hw_mtu;
+ if (!mlxcx_cmd_modify_nic_vport_ctx(mlxp, port,
+ MLXCX_MODIFY_NIC_VPORT_CTX_MTU)) {
+ port->mlp_mtu = old_mtu;
+ (void) mac_maxsdu_update(mlxp->mlx_mac_hdl, old_mtu);
+ ret = EIO;
+ break;
+ }
+ if (!mlxcx_cmd_set_port_mtu(mlxp, port)) {
+ port->mlp_mtu = old_mtu;
+ (void) mac_maxsdu_update(mlxp->mlx_mac_hdl, old_mtu);
+ ret = EIO;
+ break;
+ }
+ break;
+ default:
+ ret = ENOTSUP;
+ break;
+ }
+
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+static int
+mlxcx_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
+ uint_t pr_valsize, void *pr_val)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_port_t *port = &mlxp->mlx_ports[0];
+ uint64_t speed;
+ int ret = 0;
+
+ mutex_enter(&port->mlp_mtx);
+
+ switch (pr_num) {
+ case MAC_PROP_DUPLEX:
+ if (pr_valsize < sizeof (link_duplex_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ /* connectx parts only support full duplex */
+ *(link_duplex_t *)pr_val = LINK_DUPLEX_FULL;
+ break;
+ case MAC_PROP_SPEED:
+ if (pr_valsize < sizeof (uint64_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ speed = mlxcx_speed_to_bits(port->mlp_oper_proto);
+ bcopy(&speed, pr_val, sizeof (speed));
+ break;
+ case MAC_PROP_STATUS:
+ if (pr_valsize < sizeof (link_state_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ switch (port->mlp_oper_status) {
+ case MLXCX_PORT_STATUS_UP:
+ case MLXCX_PORT_STATUS_UP_ONCE:
+ *(link_state_t *)pr_val = LINK_STATE_UP;
+ break;
+ case MLXCX_PORT_STATUS_DOWN:
+ *(link_state_t *)pr_val = LINK_STATE_DOWN;
+ break;
+ default:
+ *(link_state_t *)pr_val = LINK_STATE_UNKNOWN;
+ }
+ break;
+ case MAC_PROP_AUTONEG:
+ if (pr_valsize < sizeof (uint8_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ *(uint8_t *)pr_val = port->mlp_autoneg;
+ break;
+ case MAC_PROP_MTU:
+ if (pr_valsize < sizeof (uint32_t)) {
+ ret = EOVERFLOW;
+ break;
+ }
+ *(uint32_t *)pr_val = port->mlp_mtu - MLXCX_MTU_OFFSET;
+ break;
+ default:
+ ret = ENOTSUP;
+ break;
+ }
+
+ mutex_exit(&port->mlp_mtx);
+
+ return (ret);
+}
+
+#define MLXCX_MAC_CALLBACK_FLAGS \
+ (MC_GETCAPAB | MC_GETPROP | MC_PROPINFO | MC_SETPROP)
+
+static mac_callbacks_t mlxcx_mac_callbacks = {
+ .mc_callbacks = MLXCX_MAC_CALLBACK_FLAGS,
+ .mc_getstat = mlxcx_mac_stat,
+ .mc_start = mlxcx_mac_start,
+ .mc_stop = mlxcx_mac_stop,
+ .mc_setpromisc = mlxcx_mac_setpromisc,
+ .mc_multicst = mlxcx_mac_multicast,
+ .mc_ioctl = NULL,
+ .mc_getcapab = mlxcx_mac_getcapab,
+ .mc_setprop = mlxcx_mac_setprop,
+ .mc_getprop = mlxcx_mac_getprop,
+ .mc_propinfo = mlxcx_mac_propinfo,
+ .mc_tx = NULL,
+ .mc_unicst = NULL,
+};
+
+boolean_t
+mlxcx_register_mac(mlxcx_t *mlxp)
+{
+ mac_register_t *mac = mac_alloc(MAC_VERSION);
+ mlxcx_port_t *port;
+ int ret;
+
+ if (mac == NULL)
+ return (B_FALSE);
+
+ VERIFY3U(mlxp->mlx_nports, ==, 1);
+ port = &mlxp->mlx_ports[0];
+
+ mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ mac->m_driver = mlxp;
+ mac->m_dip = mlxp->mlx_dip;
+ mac->m_src_addr = port->mlp_mac_address;
+ mac->m_callbacks = &mlxcx_mac_callbacks;
+ mac->m_min_sdu = MLXCX_MTU_OFFSET;
+ mac->m_max_sdu = port->mlp_mtu - MLXCX_MTU_OFFSET;
+ mac->m_margin = VLAN_TAGSZ;
+ mac->m_priv_props = mlxcx_priv_props;
+ mac->m_v12n = MAC_VIRT_LEVEL1;
+
+ ret = mac_register(mac, &mlxp->mlx_mac_hdl);
+ if (ret != 0) {
+ mlxcx_warn(mlxp, "mac_register() returned %d", ret);
+ }
+ mac_free(mac);
+
+ mlxcx_update_link_state(mlxp, port);
+
+ return (ret == 0);
+}
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_intr.c b/usr/src/uts/common/io/mlxcx/mlxcx_intr.c
new file mode 100644
index 0000000000..0516f86d6b
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_intr.c
@@ -0,0 +1,1010 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2020, the University of Queensland
+ */
+
+/*
+ * Mellanox Connect-X 4/5/6 driver.
+ */
+
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/sysmacros.h>
+
+#include <sys/mac_provider.h>
+
+#include <mlxcx.h>
+
+void
+mlxcx_intr_teardown(mlxcx_t *mlxp)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < mlxp->mlx_intr_count; ++i) {
+ mlxcx_event_queue_t *mleq = &mlxp->mlx_eqs[i];
+ mutex_enter(&mleq->mleq_mtx);
+ VERIFY0(mleq->mleq_state & MLXCX_EQ_ALLOC);
+ if (mleq->mleq_state & MLXCX_EQ_CREATED)
+ VERIFY(mleq->mleq_state & MLXCX_EQ_DESTROYED);
+ if (i != 0) {
+ VERIFY(avl_is_empty(&mleq->mleq_cqs));
+ avl_destroy(&mleq->mleq_cqs);
+ }
+ mutex_exit(&mleq->mleq_mtx);
+ (void) ddi_intr_disable(mlxp->mlx_intr_handles[i]);
+ (void) ddi_intr_remove_handler(mlxp->mlx_intr_handles[i]);
+ ret = ddi_intr_free(mlxp->mlx_intr_handles[i]);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_warn(mlxp, "failed to free interrupt %d: %d",
+ i, ret);
+ }
+ mutex_destroy(&mleq->mleq_mtx);
+ }
+ kmem_free(mlxp->mlx_intr_handles, mlxp->mlx_intr_size);
+ kmem_free(mlxp->mlx_eqs, mlxp->mlx_eqs_size);
+ mlxp->mlx_intr_handles = NULL;
+ mlxp->mlx_eqs = NULL;
+}
+
+/*
+ * Get the next SW-owned entry on the event queue, or NULL if we reach the end.
+ */
+static mlxcx_eventq_ent_t *
+mlxcx_eq_next(mlxcx_event_queue_t *mleq)
+{
+ mlxcx_eventq_ent_t *ent;
+ ddi_fm_error_t err;
+ uint_t ci;
+ const uint_t swowner = ((mleq->mleq_cc >> mleq->mleq_entshift) & 1);
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ ASSERT(mleq->mleq_state & MLXCX_EQ_CREATED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_DESTROYED);
+
+ /* mleq_nents is always a power of 2 */
+ ci = mleq->mleq_cc & (mleq->mleq_nents - 1);
+
+ ent = &mleq->mleq_ent[ci];
+ VERIFY0(ddi_dma_sync(mleq->mleq_dma.mxdb_dma_handle,
+ (uintptr_t)ent - (uintptr_t)mleq->mleq_ent,
+ sizeof (mlxcx_eventq_ent_t), DDI_DMA_SYNC_FORCPU));
+ ddi_fm_dma_err_get(mleq->mleq_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status == DDI_FM_OK && (ent->mleqe_owner & 1) == swowner) {
+ /* The PRM says we have to membar here, so we're doing it */
+ membar_consumer();
+ ++mleq->mleq_cc;
+ return (ent);
+ }
+ /*
+ * In the case of a DMA error, we should re-arm this EQ and then come
+ * back and try again when the device wakes us back up.
+ *
+ * Hopefully the fault will be gone by then.
+ */
+ ddi_fm_dma_err_clear(mleq->mleq_dma.mxdb_dma_handle, DDI_FME_VERSION);
+
+ return (NULL);
+}
+
+void
+mlxcx_arm_eq(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ uint_t try = 0;
+ ddi_fm_error_t err;
+ bits32_t v = new_bits32();
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ ASSERT(mleq->mleq_state & MLXCX_EQ_CREATED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_DESTROYED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_ARMED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_POLLING);
+
+ mleq->mleq_state |= MLXCX_EQ_ARMED;
+ mleq->mleq_cc_armed = mleq->mleq_cc;
+
+ set_bits32(&v, MLXCX_EQ_ARM_EQN, mleq->mleq_num);
+ set_bits32(&v, MLXCX_EQ_ARM_CI, mleq->mleq_cc);
+
+retry:
+ mlxcx_uar_put32(mlxp, mleq->mleq_uar, MLXCX_UAR_EQ_ARM,
+ from_bits32(v));
+ ddi_fm_acc_err_get(mlxp->mlx_regs_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status == DDI_FM_OK)
+ return;
+ if (try++ < mlxcx_doorbell_tries) {
+ ddi_fm_acc_err_clear(mlxp->mlx_regs_handle, DDI_FME_VERSION);
+ goto retry;
+ }
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+}
+
+static void
+mlxcx_update_eq(mlxcx_t *mlxp, mlxcx_event_queue_t *mleq)
+{
+ bits32_t v = new_bits32();
+ ddi_fm_error_t err;
+
+ ASSERT(mutex_owned(&mleq->mleq_mtx));
+ ASSERT(mleq->mleq_state & MLXCX_EQ_CREATED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_DESTROYED);
+ ASSERT0(mleq->mleq_state & MLXCX_EQ_ARMED);
+
+ set_bits32(&v, MLXCX_EQ_ARM_EQN, mleq->mleq_num);
+ set_bits32(&v, MLXCX_EQ_ARM_CI, mleq->mleq_cc);
+
+ mlxcx_uar_put32(mlxp, mleq->mleq_uar, MLXCX_UAR_EQ_NOARM,
+ from_bits32(v));
+ ddi_fm_acc_err_get(mlxp->mlx_regs_handle, &err,
+ DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(mlxp->mlx_regs_handle, DDI_FME_VERSION);
+ /*
+ * Ignore the error, if it's still happening when we try to re-arm the
+ * EQ, we will note the impact then.
+ */
+}
+
+static mlxcx_completionq_ent_t *
+mlxcx_cq_next(mlxcx_completion_queue_t *mlcq)
+{
+ mlxcx_completionq_ent_t *ent;
+ ddi_fm_error_t err;
+ uint_t ci;
+ const uint_t swowner = ((mlcq->mlcq_cc >> mlcq->mlcq_entshift) & 1);
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ ASSERT(mlcq->mlcq_state & MLXCX_CQ_CREATED);
+ ASSERT0(mlcq->mlcq_state & MLXCX_CQ_DESTROYED);
+
+ /* mlcq_nents is always a power of 2 */
+ ci = mlcq->mlcq_cc & (mlcq->mlcq_nents - 1);
+
+ ent = &mlcq->mlcq_ent[ci];
+ VERIFY0(ddi_dma_sync(mlcq->mlcq_dma.mxdb_dma_handle,
+ (uintptr_t)ent - (uintptr_t)mlcq->mlcq_ent,
+ sizeof (mlxcx_completionq_ent_t), DDI_DMA_SYNC_FORCPU));
+ ddi_fm_dma_err_get(mlcq->mlcq_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status == DDI_FM_OK && (ent->mlcqe_owner & 1) == swowner) {
+ /* The PRM says we have to membar here, so we're doing it */
+ membar_consumer();
+ ++mlcq->mlcq_cc;
+ return (ent);
+ }
+ ddi_fm_dma_err_clear(mlcq->mlcq_dma.mxdb_dma_handle, DDI_FME_VERSION);
+
+ return (NULL);
+}
+
+void
+mlxcx_arm_cq(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ bits32_t dbval = new_bits32();
+ uint64_t udbval;
+ ddi_fm_error_t err;
+ uint_t try = 0;
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ ASSERT(mlcq->mlcq_state & MLXCX_CQ_CREATED);
+ ASSERT0(mlcq->mlcq_state & MLXCX_CQ_DESTROYED);
+
+ if (mlcq->mlcq_state & MLXCX_CQ_ARMED)
+ ASSERT3U(mlcq->mlcq_ec, >, mlcq->mlcq_ec_armed);
+
+ if (mlcq->mlcq_state & MLXCX_CQ_TEARDOWN)
+ return;
+
+ mlcq->mlcq_state |= MLXCX_CQ_ARMED;
+ mlcq->mlcq_cc_armed = mlcq->mlcq_cc;
+ mlcq->mlcq_ec_armed = mlcq->mlcq_ec;
+
+ set_bits32(&dbval, MLXCX_CQ_ARM_SEQ, mlcq->mlcq_ec);
+ set_bits32(&dbval, MLXCX_CQ_ARM_CI, mlcq->mlcq_cc);
+
+ udbval = (uint64_t)from_bits32(dbval) << 32;
+ udbval |= mlcq->mlcq_num & 0xffffff;
+
+ mlcq->mlcq_doorbell->mlcqd_update_ci = to_be24(mlcq->mlcq_cc);
+ mlcq->mlcq_doorbell->mlcqd_arm_ci = dbval;
+
+retry:
+ MLXCX_DMA_SYNC(mlcq->mlcq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(mlcq->mlcq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ if (try++ < mlxcx_doorbell_tries) {
+ ddi_fm_dma_err_clear(
+ mlcq->mlcq_doorbell_dma.mxdb_dma_handle,
+ DDI_FME_VERSION);
+ goto retry;
+ } else {
+ goto err;
+ }
+ }
+
+ mlxcx_uar_put64(mlxp, mlcq->mlcq_uar, MLXCX_UAR_CQ_ARM, udbval);
+ ddi_fm_acc_err_get(mlxp->mlx_regs_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status == DDI_FM_OK)
+ return;
+ if (try++ < mlxcx_doorbell_tries) {
+ ddi_fm_acc_err_clear(mlxp->mlx_regs_handle, DDI_FME_VERSION);
+ goto retry;
+ }
+
+err:
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+}
+
+const char *
+mlxcx_event_name(mlxcx_event_t evt)
+{
+ switch (evt) {
+ case MLXCX_EVENT_COMPLETION:
+ return ("COMPLETION");
+ case MLXCX_EVENT_PATH_MIGRATED:
+ return ("PATH_MIGRATED");
+ case MLXCX_EVENT_COMM_ESTABLISH:
+ return ("COMM_ESTABLISH");
+ case MLXCX_EVENT_SENDQ_DRAIN:
+ return ("SENDQ_DRAIN");
+ case MLXCX_EVENT_LAST_WQE:
+ return ("LAST_WQE");
+ case MLXCX_EVENT_SRQ_LIMIT:
+ return ("SRQ_LIMIT");
+ case MLXCX_EVENT_DCT_ALL_CLOSED:
+ return ("DCT_ALL_CLOSED");
+ case MLXCX_EVENT_DCT_ACCKEY_VIOL:
+ return ("DCT_ACCKEY_VIOL");
+ case MLXCX_EVENT_CQ_ERROR:
+ return ("CQ_ERROR");
+ case MLXCX_EVENT_WQ_CATASTROPHE:
+ return ("WQ_CATASTROPHE");
+ case MLXCX_EVENT_PATH_MIGRATE_FAIL:
+ return ("PATH_MIGRATE_FAIL");
+ case MLXCX_EVENT_PAGE_FAULT:
+ return ("PAGE_FAULT");
+ case MLXCX_EVENT_WQ_INVALID_REQ:
+ return ("WQ_INVALID_REQ");
+ case MLXCX_EVENT_WQ_ACCESS_VIOL:
+ return ("WQ_ACCESS_VIOL");
+ case MLXCX_EVENT_SRQ_CATASTROPHE:
+ return ("SRQ_CATASTROPHE");
+ case MLXCX_EVENT_INTERNAL_ERROR:
+ return ("INTERNAL_ERROR");
+ case MLXCX_EVENT_PORT_STATE:
+ return ("PORT_STATE");
+ case MLXCX_EVENT_GPIO:
+ return ("GPIO");
+ case MLXCX_EVENT_PORT_MODULE:
+ return ("PORT_MODULE");
+ case MLXCX_EVENT_TEMP_WARNING:
+ return ("TEMP_WARNING");
+ case MLXCX_EVENT_REMOTE_CONFIG:
+ return ("REMOTE_CONFIG");
+ case MLXCX_EVENT_DCBX_CHANGE:
+ return ("DCBX_CHANGE");
+ case MLXCX_EVENT_DOORBELL_CONGEST:
+ return ("DOORBELL_CONGEST");
+ case MLXCX_EVENT_STALL_VL:
+ return ("STALL_VL");
+ case MLXCX_EVENT_CMD_COMPLETION:
+ return ("CMD_COMPLETION");
+ case MLXCX_EVENT_PAGE_REQUEST:
+ return ("PAGE_REQUEST");
+ case MLXCX_EVENT_NIC_VPORT:
+ return ("NIC_VPORT");
+ case MLXCX_EVENT_EC_PARAMS_CHANGE:
+ return ("EC_PARAMS_CHANGE");
+ case MLXCX_EVENT_XRQ_ERROR:
+ return ("XRQ_ERROR");
+ }
+ return ("UNKNOWN");
+}
+
+/* Should be called only when link state has changed. */
+void
+mlxcx_update_link_state(mlxcx_t *mlxp, mlxcx_port_t *port)
+{
+ link_state_t ls;
+
+ mutex_enter(&port->mlp_mtx);
+ (void) mlxcx_cmd_query_port_status(mlxp, port);
+ (void) mlxcx_cmd_query_port_speed(mlxp, port);
+
+ switch (port->mlp_oper_status) {
+ case MLXCX_PORT_STATUS_UP:
+ case MLXCX_PORT_STATUS_UP_ONCE:
+ ls = LINK_STATE_UP;
+ break;
+ case MLXCX_PORT_STATUS_DOWN:
+ ls = LINK_STATE_DOWN;
+ break;
+ default:
+ ls = LINK_STATE_UNKNOWN;
+ }
+ mac_link_update(mlxp->mlx_mac_hdl, ls);
+
+ mutex_exit(&port->mlp_mtx);
+}
+
+static void
+mlxcx_give_pages_once(mlxcx_t *mlxp, size_t npages)
+{
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ mlxcx_dev_page_t *mdp;
+ int32_t togive;
+ mlxcx_dev_page_t *pages[MLXCX_MANAGE_PAGES_MAX_PAGES];
+ uint_t i;
+ const ddi_dma_cookie_t *ck;
+
+ togive = MIN(npages, MLXCX_MANAGE_PAGES_MAX_PAGES);
+
+ for (i = 0; i < togive; i++) {
+ mdp = kmem_zalloc(sizeof (mlxcx_dev_page_t), KM_SLEEP);
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_page_attr(mlxp, &attr);
+ if (!mlxcx_dma_alloc(mlxp, &mdp->mxdp_dma, &attr, &acc,
+ B_TRUE, MLXCX_HW_PAGE_SIZE, B_TRUE)) {
+ mlxcx_warn(mlxp, "failed to allocate 4k page %u/%u", i,
+ togive);
+ goto cleanup_npages;
+ }
+ ck = mlxcx_dma_cookie_one(&mdp->mxdp_dma);
+ mdp->mxdp_pa = ck->dmac_laddress;
+ pages[i] = mdp;
+ }
+
+ mutex_enter(&mlxp->mlx_pagemtx);
+
+ if (!mlxcx_cmd_give_pages(mlxp,
+ MLXCX_MANAGE_PAGES_OPMOD_GIVE_PAGES, togive, pages)) {
+ mlxcx_warn(mlxp, "!hardware refused our gift of %u "
+ "pages!", togive);
+ goto cleanup_npages;
+ }
+
+ for (i = 0; i < togive; i++) {
+ avl_add(&mlxp->mlx_pages, pages[i]);
+ }
+ mlxp->mlx_npages += togive;
+ mutex_exit(&mlxp->mlx_pagemtx);
+
+ return;
+
+cleanup_npages:
+ for (i = 0; i < togive; i++) {
+ mdp = pages[i];
+ mlxcx_dma_free(&mdp->mxdp_dma);
+ kmem_free(mdp, sizeof (mlxcx_dev_page_t));
+ }
+ /* Tell the hardware we had an allocation failure. */
+ (void) mlxcx_cmd_give_pages(mlxp, MLXCX_MANAGE_PAGES_OPMOD_ALLOC_FAIL,
+ 0, NULL);
+ mutex_exit(&mlxp->mlx_pagemtx);
+}
+
+static void
+mlxcx_take_pages_once(mlxcx_t *mlxp, size_t npages)
+{
+ uint_t i;
+ int32_t req, ret;
+ uint64_t pas[MLXCX_MANAGE_PAGES_MAX_PAGES];
+ mlxcx_dev_page_t *mdp, probe;
+
+ mutex_enter(&mlxp->mlx_pagemtx);
+
+ ASSERT0(avl_is_empty(&mlxp->mlx_pages));
+ req = MIN(npages, MLXCX_MANAGE_PAGES_MAX_PAGES);
+
+ if (!mlxcx_cmd_return_pages(mlxp, req, pas, &ret)) {
+ return;
+ }
+
+ for (i = 0; i < ret; i++) {
+ bzero(&probe, sizeof (probe));
+ probe.mxdp_pa = pas[i];
+
+ mdp = avl_find(&mlxp->mlx_pages, &probe, NULL);
+
+ if (mdp != NULL) {
+ avl_remove(&mlxp->mlx_pages, mdp);
+ mlxp->mlx_npages--;
+ mlxcx_dma_free(&mdp->mxdp_dma);
+ kmem_free(mdp, sizeof (mlxcx_dev_page_t));
+ } else {
+ mlxcx_warn(mlxp, "hardware returned a page "
+ "with PA 0x%" PRIx64 " but we have no "
+ "record of giving out such a page", pas[i]);
+ }
+ }
+
+ mutex_exit(&mlxp->mlx_pagemtx);
+}
+
+static const char *
+mlxcx_module_error_string(mlxcx_module_error_type_t err)
+{
+ switch (err) {
+ case MLXCX_MODULE_ERR_POWER_BUDGET:
+ return ("POWER_BUDGET");
+ case MLXCX_MODULE_ERR_LONG_RANGE:
+ return ("LONG_RANGE");
+ case MLXCX_MODULE_ERR_BUS_STUCK:
+ return ("BUS_STUCK");
+ case MLXCX_MODULE_ERR_NO_EEPROM:
+ return ("NO_EEPROM");
+ case MLXCX_MODULE_ERR_ENFORCEMENT:
+ return ("ENFORCEMENT");
+ case MLXCX_MODULE_ERR_UNKNOWN_IDENT:
+ return ("UNKNOWN_IDENT");
+ case MLXCX_MODULE_ERR_HIGH_TEMP:
+ return ("HIGH_TEMP");
+ case MLXCX_MODULE_ERR_CABLE_SHORTED:
+ return ("CABLE_SHORTED");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static void
+mlxcx_report_module_error(mlxcx_t *mlxp, mlxcx_evdata_port_mod_t *evd)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+ const char *lename;
+ const char *ename;
+ const char *stname;
+ uint_t eno = 0;
+ mlxcx_module_status_t state = evd->mled_port_mod_module_status;
+
+ switch (state) {
+ case MLXCX_MODULE_ERROR:
+ stname = "error";
+ eno = evd->mled_port_mod_error_type;
+ lename = mlxcx_module_error_string(eno);
+ switch (eno) {
+ case MLXCX_MODULE_ERR_ENFORCEMENT:
+ ename = DDI_FM_TXR_ERROR_WHITELIST;
+ break;
+ case MLXCX_MODULE_ERR_UNKNOWN_IDENT:
+ case MLXCX_MODULE_ERR_NO_EEPROM:
+ ename = DDI_FM_TXR_ERROR_NOTSUPP;
+ break;
+ case MLXCX_MODULE_ERR_HIGH_TEMP:
+ ename = DDI_FM_TXR_ERROR_OVERTEMP;
+ break;
+ case MLXCX_MODULE_ERR_POWER_BUDGET:
+ case MLXCX_MODULE_ERR_LONG_RANGE:
+ case MLXCX_MODULE_ERR_CABLE_SHORTED:
+ ename = DDI_FM_TXR_ERROR_HWFAIL;
+ break;
+ case MLXCX_MODULE_ERR_BUS_STUCK:
+ default:
+ ename = DDI_FM_TXR_ERROR_UNKNOWN;
+ }
+ break;
+ default:
+ return;
+ }
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
+ DDI_FM_NIC, DDI_FM_TXR_ERROR);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ if (!DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps))
+ return;
+
+ ddi_fm_ereport_post(mlxp->mlx_dip, buf, ena, DDI_NOSLEEP,
+ /* compulsory FM props */
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ /* generic NIC txr error event props */
+ "error", DATA_TYPE_STRING, ename,
+ "port_index", DATA_TYPE_UINT8, 0,
+ "txr_index", DATA_TYPE_UINT8, evd->mled_port_mod_module,
+ /* local props */
+ "mlxcx_state", DATA_TYPE_STRING, stname,
+ "mlxcx_error", DATA_TYPE_STRING, lename,
+ "mlxcx_error_num", DATA_TYPE_UINT8, eno,
+ NULL);
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+}
+
+static uint_t
+mlxcx_intr_0(caddr_t arg, caddr_t arg2)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_event_queue_t *mleq = (mlxcx_event_queue_t *)arg2;
+ mlxcx_eventq_ent_t *ent;
+ mlxcx_port_t *port;
+ uint_t portn;
+ int32_t npages = 0;
+
+ mutex_enter(&mleq->mleq_mtx);
+
+ if (!(mleq->mleq_state & MLXCX_EQ_ALLOC) ||
+ !(mleq->mleq_state & MLXCX_EQ_CREATED) ||
+ (mleq->mleq_state & MLXCX_EQ_DESTROYED)) {
+ mlxcx_warn(mlxp, "int0 on bad eq state");
+ mutex_exit(&mleq->mleq_mtx);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ ent = mlxcx_eq_next(mleq);
+ if (ent == NULL) {
+ mlxcx_warn(mlxp, "spurious int 0?");
+ mutex_exit(&mleq->mleq_mtx);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ ASSERT(mleq->mleq_state & MLXCX_EQ_ARMED);
+ mleq->mleq_state &= ~MLXCX_EQ_ARMED;
+
+ for (; ent != NULL; ent = mlxcx_eq_next(mleq)) {
+ switch (ent->mleqe_event_type) {
+ case MLXCX_EVENT_PAGE_REQUEST:
+ VERIFY3U(from_be16(ent->mleqe_page_request.
+ mled_page_request_function_id), ==, 0);
+ npages += (int32_t)from_be32(ent->mleqe_page_request.
+ mled_page_request_num_pages);
+ break;
+ case MLXCX_EVENT_PORT_STATE:
+ portn = get_bits8(
+ ent->mleqe_port_state.mled_port_state_port_num,
+ MLXCX_EVENT_PORT_NUM) - 1;
+ if (portn >= mlxp->mlx_nports)
+ break;
+ port = &mlxp->mlx_ports[portn];
+ mlxcx_update_link_state(mlxp, port);
+ break;
+ case MLXCX_EVENT_PORT_MODULE:
+ mlxcx_report_module_error(mlxp, &ent->mleqe_port_mod);
+ break;
+ default:
+ mlxcx_warn(mlxp, "unhandled event 0x%x on int0",
+ ent->mleqe_event_type);
+ }
+ }
+
+ if (npages > 0) {
+ mlxcx_give_pages_once(mlxp, npages);
+ } else if (npages < 0) {
+ mlxcx_take_pages_once(mlxp, -1 * npages);
+ }
+
+ mlxcx_arm_eq(mlxp, mleq);
+ mutex_exit(&mleq->mleq_mtx);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+mblk_t *
+mlxcx_rx_poll(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq, size_t bytelim)
+{
+ mlxcx_buffer_t *buf;
+ mblk_t *mp, *cmp, *nmp;
+ mlxcx_completionq_ent_t *cent;
+ size_t bytes = 0;
+ boolean_t found;
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+
+ ASSERT(mlcq->mlcq_wq != NULL);
+ ASSERT3U(mlcq->mlcq_wq->mlwq_type, ==, MLXCX_WQ_TYPE_RECVQ);
+
+ if (!(mlcq->mlcq_state & MLXCX_CQ_ALLOC) ||
+ !(mlcq->mlcq_state & MLXCX_CQ_CREATED) ||
+ (mlcq->mlcq_state & MLXCX_CQ_DESTROYED) ||
+ (mlcq->mlcq_state & MLXCX_CQ_TEARDOWN)) {
+ return (NULL);
+ }
+
+ ASSERT(mlcq->mlcq_state & MLXCX_CQ_POLLING);
+
+ nmp = cmp = mp = NULL;
+
+ cent = mlxcx_cq_next(mlcq);
+ for (; cent != NULL; cent = mlxcx_cq_next(mlcq)) {
+ /*
+ * Teardown and ring stop can atomic_or this flag
+ * into our state if they want us to stop early.
+ */
+ if (mlcq->mlcq_state & MLXCX_CQ_TEARDOWN)
+ break;
+
+ if (cent->mlcqe_opcode == MLXCX_CQE_OP_REQ &&
+ cent->mlcqe_send_wqe_opcode == MLXCX_WQE_OP_NOP) {
+ /* NOP */
+ goto nextcq;
+ }
+
+ buf = list_head(&mlcq->mlcq_buffers);
+ found = B_FALSE;
+ while (buf != NULL) {
+ if ((buf->mlb_wqe_index & UINT16_MAX) ==
+ from_be16(cent->mlcqe_wqe_counter)) {
+ found = B_TRUE;
+ break;
+ }
+ buf = list_next(&mlcq->mlcq_buffers, buf);
+ }
+ if (!found) {
+ buf = list_head(&mlcq->mlcq_buffers);
+ mlxcx_warn(mlxp, "got completion on CQ %x but "
+ "no buffer matching wqe found: %x (first "
+ "buffer counter = %x)", mlcq->mlcq_num,
+ from_be16(cent->mlcqe_wqe_counter),
+ buf == NULL ? UINT32_MAX : buf->mlb_wqe_index);
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_INVAL_STATE);
+ goto nextcq;
+ }
+ list_remove(&mlcq->mlcq_buffers, buf);
+ atomic_dec_64(&mlcq->mlcq_bufcnt);
+
+ nmp = mlxcx_rx_completion(mlxp, mlcq, cent, buf);
+ if (nmp != NULL) {
+ bytes += from_be32(cent->mlcqe_byte_cnt);
+ if (cmp != NULL) {
+ cmp->b_next = nmp;
+ cmp = nmp;
+ } else {
+ mp = cmp = nmp;
+ }
+ }
+nextcq:
+ mlcq->mlcq_doorbell->mlcqd_update_ci = to_be24(mlcq->mlcq_cc);
+
+ if (bytelim != 0 && bytes > bytelim)
+ break;
+ }
+
+ return (mp);
+}
+
+static uint_t
+mlxcx_intr_n(caddr_t arg, caddr_t arg2)
+{
+ mlxcx_t *mlxp = (mlxcx_t *)arg;
+ mlxcx_event_queue_t *mleq = (mlxcx_event_queue_t *)arg2;
+ mlxcx_eventq_ent_t *ent;
+ mlxcx_completionq_ent_t *cent;
+ mlxcx_completion_queue_t *mlcq, probe;
+ mlxcx_buffer_t *buf;
+ mblk_t *mp, *cmp, *nmp;
+ boolean_t found, tellmac = B_FALSE, added;
+
+ mutex_enter(&mleq->mleq_mtx);
+
+ if (!(mleq->mleq_state & MLXCX_EQ_ALLOC) ||
+ !(mleq->mleq_state & MLXCX_EQ_CREATED) ||
+ (mleq->mleq_state & MLXCX_EQ_DESTROYED)) {
+ mutex_exit(&mleq->mleq_mtx);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ ent = mlxcx_eq_next(mleq);
+ if (ent == NULL) {
+ if (++mleq->mleq_badintrs > mlxcx_stuck_intr_count) {
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_BADINT_LIMIT);
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+ (void) ddi_intr_disable(mlxp->mlx_intr_handles[
+ mleq->mleq_intr_index]);
+ }
+ mutex_exit(&mleq->mleq_mtx);
+ return (DDI_INTR_CLAIMED);
+ }
+ mleq->mleq_badintrs = 0;
+
+ ASSERT(mleq->mleq_state & MLXCX_EQ_ARMED);
+ mleq->mleq_state &= ~MLXCX_EQ_ARMED;
+
+ for (; ent != NULL; ent = mlxcx_eq_next(mleq)) {
+ if (ent->mleqe_event_type != MLXCX_EVENT_COMPLETION) {
+ mlxcx_fm_ereport(mlxp, DDI_FM_DEVICE_INVAL_STATE);
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+ (void) ddi_intr_disable(mlxp->mlx_intr_handles[
+ mleq->mleq_intr_index]);
+ mutex_exit(&mleq->mleq_mtx);
+ return (DDI_INTR_CLAIMED);
+ }
+ ASSERT3U(ent->mleqe_event_type, ==, MLXCX_EVENT_COMPLETION);
+
+ probe.mlcq_num =
+ from_be24(ent->mleqe_completion.mled_completion_cqn);
+ mlcq = avl_find(&mleq->mleq_cqs, &probe, NULL);
+
+ if (mlcq == NULL)
+ continue;
+
+ /*
+ * The polling function might have the mutex and stop us from
+ * getting the lock here, so we increment the event counter
+ * atomically from outside.
+ *
+ * This way at the end of polling when we go back to interrupts
+ * from this CQ, the event counter is still correct.
+ *
+ * Note that mlxcx_mac_ring_intr_enable() takes the EQ lock so
+ * as to avoid any possibility of racing against us here, so we
+ * only have to consider mlxcx_rx_poll().
+ */
+ atomic_inc_32(&mlcq->mlcq_ec);
+ atomic_and_uint(&mlcq->mlcq_state, ~MLXCX_CQ_ARMED);
+
+ if (mutex_tryenter(&mlcq->mlcq_mtx) == 0) {
+ /*
+ * If we failed to take the mutex because the polling
+ * function has it, just move on. We don't want to
+ * block other CQs behind this one.
+ */
+ if (mlcq->mlcq_state & MLXCX_CQ_POLLING)
+ continue;
+ /* Otherwise we will wait. */
+ mutex_enter(&mlcq->mlcq_mtx);
+ }
+
+ if (!(mlcq->mlcq_state & MLXCX_CQ_ALLOC) ||
+ !(mlcq->mlcq_state & MLXCX_CQ_CREATED) ||
+ (mlcq->mlcq_state & MLXCX_CQ_DESTROYED) ||
+ (mlcq->mlcq_state & MLXCX_CQ_TEARDOWN) ||
+ (mlcq->mlcq_state & MLXCX_CQ_POLLING)) {
+ mutex_exit(&mlcq->mlcq_mtx);
+ continue;
+ }
+
+ nmp = cmp = mp = NULL;
+ tellmac = B_FALSE;
+
+ cent = mlxcx_cq_next(mlcq);
+ for (; cent != NULL; cent = mlxcx_cq_next(mlcq)) {
+ /*
+ * Teardown and ring stop can atomic_or this flag
+ * into our state if they want us to stop early.
+ */
+ if (mlcq->mlcq_state & MLXCX_CQ_TEARDOWN)
+ break;
+ if (mlcq->mlcq_state & MLXCX_CQ_POLLING)
+ break;
+
+ if (cent->mlcqe_opcode == MLXCX_CQE_OP_REQ &&
+ cent->mlcqe_send_wqe_opcode == MLXCX_WQE_OP_NOP) {
+ /* NOP */
+ goto nextcq;
+ }
+
+lookagain:
+ /*
+ * Generally the buffer we're looking for will be
+ * at the front of the list, so this loop won't
+ * need to look far.
+ */
+ buf = list_head(&mlcq->mlcq_buffers);
+ found = B_FALSE;
+ while (buf != NULL) {
+ if ((buf->mlb_wqe_index & UINT16_MAX) ==
+ from_be16(cent->mlcqe_wqe_counter)) {
+ found = B_TRUE;
+ break;
+ }
+ buf = list_next(&mlcq->mlcq_buffers, buf);
+ }
+ if (!found) {
+ /*
+ * If there's any buffers waiting on the
+ * buffers_b list, then merge those into
+ * the main list and have another look.
+ *
+ * The wq enqueue routines push new buffers
+ * into buffers_b so that they can avoid
+ * taking the mlcq_mtx and blocking us for
+ * every single packet.
+ */
+ added = B_FALSE;
+ mutex_enter(&mlcq->mlcq_bufbmtx);
+ if (!list_is_empty(&mlcq->mlcq_buffers_b)) {
+ list_move_tail(&mlcq->mlcq_buffers,
+ &mlcq->mlcq_buffers_b);
+ added = B_TRUE;
+ }
+ mutex_exit(&mlcq->mlcq_bufbmtx);
+ if (added)
+ goto lookagain;
+ }
+ if (!found) {
+ buf = list_head(&mlcq->mlcq_buffers);
+ mlxcx_warn(mlxp, "got completion on CQ %x but "
+ "no buffer matching wqe found: %x (first "
+ "buffer counter = %x)", mlcq->mlcq_num,
+ from_be16(cent->mlcqe_wqe_counter),
+ buf == NULL ? UINT32_MAX :
+ buf->mlb_wqe_index);
+ mlxcx_fm_ereport(mlxp,
+ DDI_FM_DEVICE_INVAL_STATE);
+ goto nextcq;
+ }
+ list_remove(&mlcq->mlcq_buffers, buf);
+ atomic_dec_64(&mlcq->mlcq_bufcnt);
+
+ switch (mlcq->mlcq_wq->mlwq_type) {
+ case MLXCX_WQ_TYPE_SENDQ:
+ mlxcx_tx_completion(mlxp, mlcq, cent, buf);
+ break;
+ case MLXCX_WQ_TYPE_RECVQ:
+ nmp = mlxcx_rx_completion(mlxp, mlcq, cent,
+ buf);
+ if (nmp != NULL) {
+ if (cmp != NULL) {
+ cmp->b_next = nmp;
+ cmp = nmp;
+ } else {
+ mp = cmp = nmp;
+ }
+ }
+ break;
+ }
+
+nextcq:
+ /*
+ * Update the "doorbell" consumer counter for the queue
+ * every time. Unlike a UAR write, this is relatively
+ * cheap and doesn't require us to go out on the bus
+ * straight away (since it's our memory).
+ */
+ mlcq->mlcq_doorbell->mlcqd_update_ci =
+ to_be24(mlcq->mlcq_cc);
+
+ if ((mlcq->mlcq_state & MLXCX_CQ_BLOCKED_MAC) &&
+ mlcq->mlcq_bufcnt < mlcq->mlcq_buflwm) {
+ mlcq->mlcq_state &= ~MLXCX_CQ_BLOCKED_MAC;
+ tellmac = B_TRUE;
+ }
+ }
+
+ mlxcx_arm_cq(mlxp, mlcq);
+ mutex_exit(&mlcq->mlcq_mtx);
+
+ if (tellmac) {
+ mac_tx_ring_update(mlxp->mlx_mac_hdl,
+ mlcq->mlcq_mac_hdl);
+ }
+ if (mp != NULL) {
+ mac_rx_ring(mlxp->mlx_mac_hdl, mlcq->mlcq_mac_hdl,
+ mp, mlcq->mlcq_mac_gen);
+ }
+
+ /*
+ * Updating the consumer counter for an EQ requires a write
+ * to the UAR, which is possibly expensive.
+ *
+ * Try to do it only often enough to stop us wrapping around.
+ */
+ if ((mleq->mleq_cc & 0x7) == 0)
+ mlxcx_update_eq(mlxp, mleq);
+ }
+
+ mlxcx_arm_eq(mlxp, mleq);
+ mutex_exit(&mleq->mleq_mtx);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+boolean_t
+mlxcx_intr_setup(mlxcx_t *mlxp)
+{
+ dev_info_t *dip = mlxp->mlx_dip;
+ int ret;
+ int nintrs = 0;
+ int navail = 0;
+ int types, i;
+ mlxcx_eventq_type_t eqt = MLXCX_EQ_TYPE_ANY;
+
+ ret = ddi_intr_get_supported_types(dip, &types);
+ if (ret != DDI_SUCCESS) {
+ return (B_FALSE);
+ }
+
+ if (!(types & DDI_INTR_TYPE_MSIX)) {
+ mlxcx_warn(mlxp, "MSI-X interrupts not available, but mlxcx "
+ "requires MSI-X");
+ return (B_FALSE);
+ }
+
+ ret = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSIX, &nintrs);
+ if (ret != DDI_SUCCESS) {
+ return (B_FALSE);
+ }
+ if (nintrs < 2) {
+ mlxcx_warn(mlxp, "%d MSI-X interrupts available, but mlxcx "
+ "requires 2", nintrs);
+ return (B_FALSE);
+ }
+
+ ret = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX, &navail);
+ if (navail < 2) {
+ mlxcx_warn(mlxp, "%d MSI-X interrupts available, but mlxcx "
+ "requires 2", navail);
+ return (B_FALSE);
+ }
+
+ mlxp->mlx_intr_size = navail * sizeof (ddi_intr_handle_t);
+ mlxp->mlx_intr_handles = kmem_alloc(mlxp->mlx_intr_size, KM_SLEEP);
+
+ ret = ddi_intr_alloc(dip, mlxp->mlx_intr_handles, DDI_INTR_TYPE_MSIX,
+ 0, navail, &mlxp->mlx_intr_count, DDI_INTR_ALLOC_NORMAL);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_intr_teardown(mlxp);
+ return (B_FALSE);
+ }
+ if (mlxp->mlx_intr_count < 2) {
+ mlxcx_intr_teardown(mlxp);
+ return (B_FALSE);
+ }
+ mlxp->mlx_intr_type = DDI_INTR_TYPE_MSIX;
+
+ ret = ddi_intr_get_pri(mlxp->mlx_intr_handles[0], &mlxp->mlx_intr_pri);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_intr_teardown(mlxp);
+ return (B_FALSE);
+ }
+
+ mlxp->mlx_eqs_size = mlxp->mlx_intr_count *
+ sizeof (mlxcx_event_queue_t);
+ mlxp->mlx_eqs = kmem_zalloc(mlxp->mlx_eqs_size, KM_SLEEP);
+
+ ret = ddi_intr_add_handler(mlxp->mlx_intr_handles[0], mlxcx_intr_0,
+ (caddr_t)mlxp, (caddr_t)&mlxp->mlx_eqs[0]);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_intr_teardown(mlxp);
+ return (B_FALSE);
+ }
+
+ /*
+ * If we have enough interrupts, set their "type" fields so that we
+ * avoid mixing RX and TX queues on the same EQs.
+ */
+ if (mlxp->mlx_intr_count >= 8) {
+ eqt = MLXCX_EQ_TYPE_RX;
+ }
+
+ for (i = 1; i < mlxp->mlx_intr_count; ++i) {
+ mutex_init(&mlxp->mlx_eqs[i].mleq_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ avl_create(&mlxp->mlx_eqs[i].mleq_cqs, mlxcx_cq_compare,
+ sizeof (mlxcx_completion_queue_t),
+ offsetof(mlxcx_completion_queue_t, mlcq_eq_entry));
+ mlxp->mlx_eqs[i].mleq_intr_index = i;
+
+ mlxp->mlx_eqs[i].mleq_type = eqt;
+ /*
+ * If eqt is still ANY, just leave it set to that
+ * (no else here).
+ */
+ if (eqt == MLXCX_EQ_TYPE_RX) {
+ eqt = MLXCX_EQ_TYPE_TX;
+ } else if (eqt == MLXCX_EQ_TYPE_TX) {
+ eqt = MLXCX_EQ_TYPE_RX;
+ }
+
+ ret = ddi_intr_add_handler(mlxp->mlx_intr_handles[i],
+ mlxcx_intr_n, (caddr_t)mlxp, (caddr_t)&mlxp->mlx_eqs[i]);
+ if (ret != DDI_SUCCESS) {
+ mlxcx_intr_teardown(mlxp);
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_reg.h b/usr/src/uts/common/io/mlxcx/mlxcx_reg.h
new file mode 100644
index 0000000000..76d0da30e7
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_reg.h
@@ -0,0 +1,2481 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+#ifndef _MLXCX_REG_H
+#define _MLXCX_REG_H
+
+#include <sys/types.h>
+#include <sys/byteorder.h>
+
+#include <mlxcx_endint.h>
+
+#if !defined(_BIT_FIELDS_HTOL) && !defined(_BIT_FIELDS_LTOH)
+#error "Need _BIT_FIELDS_HTOL or _BIT_FIELDS_LTOH"
+#endif
+
+/*
+ * Register offsets.
+ */
+
+#define MLXCX_ISS_FIRMWARE 0x0000
+#define MLXCX_ISS_FW_MAJOR(x) (((x) & 0xffff))
+#define MLXCX_ISS_FW_MINOR(x) (((x) >> 16) & 0xffff)
+#define MLXCX_ISS_FW_CMD 0x0004
+#define MLXCX_ISS_FW_REV(x) (((x) & 0xffff))
+#define MLXCX_ISS_CMD_REV(x) (((x) >> 16) & 0xffff)
+#define MLXCX_ISS_CMD_HIGH 0x0010
+#define MLXCX_ISS_CMD_LOW 0x0014
+#define MLXCX_ISS_CMDQ_SIZE(x) (((x) >> 4) & 0xf)
+#define MLXCX_ISS_CMDQ_STRIDE(x) ((x) & 0xf)
+
+#define MLXCX_ISS_CMD_DOORBELL 0x0018
+#define MLXCX_ISS_INIT 0x01fc
+#define MLXCX_ISS_INITIALIZING(x) (((x) >> 31) & 0x1)
+#define MLXCX_ISS_HEALTH_BUF 0x0200
+#define MLXCX_ISS_NO_DRAM_NIC 0x0240
+#define MLXCX_ISS_TIMER 0x1000
+#define MLXCX_ISS_HEALTH_COUNT 0x1010
+#define MLXCX_ISS_HEALTH_SYND 0x1013
+
+#define MLXCX_CMD_INLINE_INPUT_LEN 16
+#define MLXCX_CMD_INLINE_OUTPUT_LEN 16
+
+#define MLXCX_CMD_MAILBOX_LEN 512
+
+#define MLXCX_CMD_TRANSPORT_PCI 7
+#define MLXCX_CMD_HW_OWNED 0x01
+#define MLXCX_CMD_STATUS(x) ((x) >> 1)
+
+#define MLXCX_UAR_CQ_ARM 0x0020
+#define MLXCX_UAR_EQ_ARM 0x0040
+#define MLXCX_UAR_EQ_NOARM 0x0048
+
+/* Number of blue flame reg pairs per UAR */
+#define MLXCX_BF_PER_UAR 2
+#define MLXCX_BF_PER_UAR_MASK 0x1
+#define MLXCX_BF_SIZE 0x100
+#define MLXCX_BF_BASE 0x0800
+
+/* CSTYLED */
+#define MLXCX_EQ_ARM_EQN (bitdef_t){24, 0xff000000}
+/* CSTYLED */
+#define MLXCX_EQ_ARM_CI (bitdef_t){0, 0x00ffffff}
+
+/*
+ * Hardware structure that is used to represent a command.
+ */
+#pragma pack(1)
+typedef struct {
+ uint8_t mce_type;
+ uint8_t mce_rsvd[3];
+ uint32be_t mce_in_length;
+ uint64be_t mce_in_mbox;
+ uint8_t mce_input[MLXCX_CMD_INLINE_INPUT_LEN];
+ uint8_t mce_output[MLXCX_CMD_INLINE_OUTPUT_LEN];
+ uint64be_t mce_out_mbox;
+ uint32be_t mce_out_length;
+ uint8_t mce_token;
+ uint8_t mce_sig;
+ uint8_t mce_rsvd1;
+ uint8_t mce_status;
+} mlxcx_cmd_ent_t;
+
+typedef struct {
+ uint8_t mlxb_data[MLXCX_CMD_MAILBOX_LEN];
+ uint8_t mlxb_rsvd[48];
+ uint64be_t mlxb_nextp;
+ uint32be_t mlxb_blockno;
+ uint8_t mlxb_rsvd1;
+ uint8_t mlxb_token;
+ uint8_t mlxb_ctrl_sig;
+ uint8_t mlxb_sig;
+} mlxcx_cmd_mailbox_t;
+
+typedef struct {
+ uint8_t mled_page_request_rsvd[2];
+ uint16be_t mled_page_request_function_id;
+ uint32be_t mled_page_request_num_pages;
+} mlxcx_evdata_page_request_t;
+
+/* CSTYLED */
+#define MLXCX_EVENT_PORT_NUM (bitdef_t){ .bit_shift = 4, .bit_mask = 0xF0 }
+
+typedef struct {
+ uint8_t mled_port_state_rsvd[8];
+ bits8_t mled_port_state_port_num;
+} mlxcx_evdata_port_state_t;
+
+typedef enum {
+ MLXCX_MODULE_INITIALIZING = 0x0,
+ MLXCX_MODULE_PLUGGED = 0x1,
+ MLXCX_MODULE_UNPLUGGED = 0x2,
+ MLXCX_MODULE_ERROR = 0x3
+} mlxcx_module_status_t;
+
+typedef enum {
+ MLXCX_MODULE_ERR_POWER_BUDGET = 0x0,
+ MLXCX_MODULE_ERR_LONG_RANGE = 0x1,
+ MLXCX_MODULE_ERR_BUS_STUCK = 0x2,
+ MLXCX_MODULE_ERR_NO_EEPROM = 0x3,
+ MLXCX_MODULE_ERR_ENFORCEMENT = 0x4,
+ MLXCX_MODULE_ERR_UNKNOWN_IDENT = 0x5,
+ MLXCX_MODULE_ERR_HIGH_TEMP = 0x6,
+ MLXCX_MODULE_ERR_CABLE_SHORTED = 0x7,
+} mlxcx_module_error_type_t;
+
+typedef struct {
+ uint8_t mled_port_mod_rsvd;
+ uint8_t mled_port_mod_module;
+ uint8_t mled_port_mod_rsvd2;
+ uint8_t mled_port_mod_module_status;
+ uint8_t mled_port_mod_rsvd3[2];
+ uint8_t mled_port_mod_error_type;
+ uint8_t mled_port_mod_rsvd4;
+} mlxcx_evdata_port_mod_t;
+
+typedef struct {
+ uint8_t mled_completion_rsvd[25];
+ uint24be_t mled_completion_cqn;
+} mlxcx_evdata_completion_t;
+
+typedef enum {
+ MLXCX_EV_QUEUE_TYPE_QP = 0x0,
+ MLXCX_EV_QUEUE_TYPE_RQ = 0x1,
+ MLXCX_EV_QUEUE_TYPE_SQ = 0x2,
+} mlxcx_evdata_queue_type_t;
+
+typedef struct {
+ uint8_t mled_queue_rsvd[20];
+ uint8_t mled_queue_type;
+ uint8_t mled_queue_rsvd2[4];
+ uint24be_t mled_queue_num;
+} mlxcx_evdata_queue_t;
+
+#define MLXCX_EQ_OWNER_INIT 1
+
+typedef struct {
+ uint8_t mleqe_rsvd[1];
+ uint8_t mleqe_event_type;
+ uint8_t mleqe_rsvd2[1];
+ uint8_t mleqe_event_sub_type;
+ uint8_t mleqe_rsvd3[28];
+ union {
+ uint8_t mleqe_unknown_data[28];
+ mlxcx_evdata_completion_t mleqe_completion;
+ mlxcx_evdata_page_request_t mleqe_page_request;
+ mlxcx_evdata_port_state_t mleqe_port_state;
+ mlxcx_evdata_port_mod_t mleqe_port_mod;
+ mlxcx_evdata_queue_t mleqe_queue;
+ };
+ uint8_t mleqe_rsvd4[2];
+ uint8_t mleqe_signature;
+ uint8_t mleqe_owner;
+} mlxcx_eventq_ent_t;
+
+typedef enum {
+ MLXCX_CQE_L3_HDR_NONE = 0x0,
+ MLXCX_CQE_L3_HDR_RCV_BUF = 0x1,
+ MLXCX_CQE_L3_HDR_CQE = 0x2,
+} mlxcx_cqe_l3_hdr_placement_t;
+
+typedef enum {
+ MLXCX_CQE_CSFLAGS_L4_OK = 1 << 2,
+ MLXCX_CQE_CSFLAGS_L3_OK = 1 << 1,
+ MLXCX_CQE_CSFLAGS_L2_OK = 1 << 0,
+} mlxcx_cqe_csflags_t;
+
+typedef enum {
+ MLXCX_CQE_L4_TYPE_NONE = 0,
+ MLXCX_CQE_L4_TYPE_TCP = 1,
+ MLXCX_CQE_L4_TYPE_UDP = 2,
+ MLXCX_CQE_L4_TYPE_TCP_EMPTY_ACK = 3,
+ MLXCX_CQE_L4_TYPE_TCP_ACK = 4,
+} mlxcx_cqe_l4_hdr_type_t;
+
+typedef enum {
+ MLXCX_CQE_L3_TYPE_NONE = 0,
+ MLXCX_CQE_L3_TYPE_IPv6 = 1,
+ MLXCX_CQE_L3_TYPE_IPv4 = 2,
+} mlxcx_cqe_l3_hdr_type_t;
+
+typedef enum {
+ MLXCX_CQE_RX_HASH_NONE = 0,
+ MLXCX_CQE_RX_HASH_IPv4 = 1,
+ MLXCX_CQE_RX_HASH_IPv6 = 2,
+ MLXCX_CQE_RX_HASH_IPSEC_SPI = 3,
+} mlxcx_cqe_rx_hash_type_t;
+/* BEGIN CSTYLED */
+#define MLXCX_CQE_RX_HASH_IP_SRC (bitdef_t){0, 0x3}
+#define MLXCX_CQE_RX_HASH_IP_DEST (bitdef_t){2, (0x3 << 2)}
+#define MLXCX_CQE_RX_HASH_L4_SRC (bitdef_t){4, (0x3 << 4)}
+#define MLXCX_CQE_RX_HASH_L4_DEST (bitdef_t){6, (0x3 << 6)}
+/* END CSTYLED */
+
+typedef enum {
+ MLXCX_CQE_OP_REQ = 0x0,
+ MLXCX_CQE_OP_RESP_RDMA = 0x1,
+ MLXCX_CQE_OP_RESP = 0x2,
+ MLXCX_CQE_OP_RESP_IMMEDIATE = 0x3,
+ MLXCX_CQE_OP_RESP_INVALIDATE = 0x4,
+ MLXCX_CQE_OP_RESIZE_CQ = 0x5,
+ MLXCX_CQE_OP_SIG_ERR = 0x12,
+ MLXCX_CQE_OP_REQ_ERR = 0xd,
+ MLXCX_CQE_OP_RESP_ERR = 0xe,
+ MLXCX_CQE_OP_INVALID = 0xf
+} mlxcx_cqe_opcode_t;
+
+typedef enum {
+ MLXCX_CQE_FORMAT_BASIC = 0,
+ MLXCX_CQE_FORMAT_INLINE_32 = 1,
+ MLXCX_CQE_FORMAT_INLINE_64 = 2,
+ MLXCX_CQE_FORMAT_COMPRESSED = 3,
+} mlxcx_cqe_format_t;
+
+typedef enum {
+ MLXCX_CQE_OWNER_INIT = 1
+} mlxcx_cqe_owner_t;
+
+typedef enum {
+ MLXCX_VLAN_TYPE_NONE,
+ MLXCX_VLAN_TYPE_CVLAN,
+ MLXCX_VLAN_TYPE_SVLAN,
+} mlxcx_vlan_type_t;
+
+typedef enum {
+ MLXCX_CQ_ERR_LOCAL_LENGTH = 0x1,
+ MLXCX_CQ_ERR_LOCAL_QP_OP = 0x2,
+ MLXCX_CQ_ERR_LOCAL_PROTECTION = 0x4,
+ MLXCX_CQ_ERR_WR_FLUSHED = 0x5,
+ MLXCX_CQ_ERR_MEM_WINDOW_BIND = 0x6,
+ MLXCX_CQ_ERR_BAD_RESPONSE = 0x10,
+ MLXCX_CQ_ERR_LOCAL_ACCESS = 0x11,
+ MLXCX_CQ_ERR_XPORT_RETRY_CTR = 0x15,
+ MLXCX_CQ_ERR_RNR_RETRY_CTR = 0x16,
+ MLXCX_CQ_ERR_ABORTED = 0x22
+} mlxcx_cq_error_syndrome_t;
+
+typedef struct {
+ uint8_t mlcqee_rsvd[2];
+ uint16be_t mlcqee_wqe_id;
+ uint8_t mlcqee_rsvd2[29];
+ uint24be_t mlcqee_user_index;
+ uint8_t mlcqee_rsvd3[8];
+ uint32be_t mlcqee_byte_cnt;
+ uint8_t mlcqee_rsvd4[6];
+ uint8_t mlcqee_vendor_error_syndrome;
+ uint8_t mlcqee_syndrome;
+ uint8_t mlcqee_wqe_opcode;
+ uint24be_t mlcqee_flow_tag;
+ uint16be_t mlcqee_wqe_counter;
+ uint8_t mlcqee_signature;
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcqe_opcode:4;
+ uint8_t mlcqe_rsvd5:3;
+ uint8_t mlcqe_owner:1;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcqe_owner:1;
+ uint8_t mlcqe_rsvd5:3;
+ uint8_t mlcqe_opcode:4;
+#endif
+ };
+} mlxcx_completionq_error_ent_t;
+
+typedef struct {
+ uint8_t mlcqe_tunnel_flags;
+ uint8_t mlcqe_rsvd[3];
+ uint8_t mlcqe_lro_flags;
+ uint8_t mlcqe_lro_min_ttl;
+ uint16be_t mlcqe_lro_tcp_win;
+ uint32be_t mlcqe_lro_ack_seq_num;
+ uint32be_t mlcqe_rx_hash_result;
+ bits8_t mlcqe_rx_hash_type;
+ uint8_t mlcqe_ml_path;
+ uint8_t mlcqe_rsvd2[2];
+ uint16be_t mlcqe_checksum;
+ uint16be_t mlcqe_slid_smac_lo;
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcqe_rsvd3:1;
+ uint8_t mlcqe_force_loopback:1;
+ uint8_t mlcqe_l3_hdr:2;
+ uint8_t mlcqe_sl_roce_pktype:4;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcqe_sl_roce_pktype:4;
+ uint8_t mlcqe_l3_hdr:2;
+ uint8_t mlcqe_force_loopback:1;
+ uint8_t mlcqe_rsvd3:1;
+#endif
+ };
+ uint24be_t mlcqe_rqpn;
+ bits8_t mlcqe_csflags;
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcqe_ip_frag:1;
+ uint8_t mlcqe_l4_hdr_type:3;
+ uint8_t mlcqe_l3_hdr_type:2;
+ uint8_t mlcqe_ip_ext_opts:1;
+ uint8_t mlcqe_cv:1;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcqe_cv:1;
+ uint8_t mlcqe_ip_ext_opts:1;
+ uint8_t mlcqe_l3_hdr_type:2;
+ uint8_t mlcqe_l4_hdr_type:3;
+ uint8_t mlcqe_ip_frag:1;
+#endif
+ };
+ uint16be_t mlcqe_up_cfi_vid;
+ uint8_t mlcqe_lro_num_seg;
+ uint24be_t mlcqe_user_index;
+ uint32be_t mlcqe_immediate;
+ uint8_t mlcqe_rsvd4[4];
+ uint32be_t mlcqe_byte_cnt;
+ union {
+ struct {
+ uint32be_t mlcqe_lro_timestamp_value;
+ uint32be_t mlcqe_lro_timestamp_echo;
+ };
+ uint64be_t mlcqe_timestamp;
+ };
+ union {
+ uint8_t mlcqe_rx_drop_counter;
+ uint8_t mlcqe_send_wqe_opcode;
+ };
+ uint24be_t mlcqe_flow_tag;
+ uint16be_t mlcqe_wqe_counter;
+ uint8_t mlcqe_signature;
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcqe_opcode:4;
+ uint8_t mlcqe_format:2;
+ uint8_t mlcqe_se:1;
+ uint8_t mlcqe_owner:1;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcqe_owner:1;
+ uint8_t mlcqe_se:1;
+ uint8_t mlcqe_format:2;
+ uint8_t mlcqe_opcode:4;
+#endif
+ };
+} mlxcx_completionq_ent_t;
+
+typedef struct {
+ uint8_t mlcqe_data[64];
+ mlxcx_completionq_ent_t mlcqe_ent;
+} mlxcx_completionq_ent128_t;
+
+typedef enum {
+ MLXCX_WQE_OP_NOP = 0x00,
+ MLXCX_WQE_OP_SEND_INVALIDATE = 0x01,
+ MLXCX_WQE_OP_RDMA_W = 0x08,
+ MLXCX_WQE_OP_RDMA_W_IMMEDIATE = 0x09,
+ MLXCX_WQE_OP_SEND = 0x0A,
+ MLXCX_WQE_OP_SEND_IMMEDIATE = 0x0B,
+ MLXCX_WQE_OP_LSO = 0x0E,
+ MLXCX_WQE_OP_WAIT = 0x0F,
+ MLXCX_WQE_OP_RDMA_R = 0x10,
+} mlxcx_wqe_opcode_t;
+
+#define MLXCX_SQE_MAX_DS ((1 << 6) - 1)
+#define MLXCX_SQE_MAX_PTRS 61
+
+typedef enum {
+ MLXCX_SQE_FENCE_NONE = 0x0,
+ MLXCX_SQE_FENCE_WAIT_OTHERS = 0x1,
+ MLXCX_SQE_FENCE_START = 0x2,
+ MLXCX_SQE_FENCE_STRONG_ORDER = 0x3,
+ MLXCX_SQE_FENCE_START_WAIT = 0x4
+} mlxcx_sqe_fence_mode_t;
+
+typedef enum {
+ MLXCX_SQE_CQE_ON_EACH_ERROR = 0x0,
+ MLXCX_SQE_CQE_ON_FIRST_ERROR = 0x1,
+ MLXCX_SQE_CQE_ALWAYS = 0x2,
+ MLXCX_SQE_CQE_ALWAYS_PLUS_EQE = 0x3
+} mlxcx_sqe_completion_mode_t;
+
+#define MLXCX_SQE_SOLICITED (1 << 1)
+/* CSTYLED */
+#define MLXCX_SQE_FENCE_MODE (bitdef_t){5, 0xe0}
+/* CSTYLED */
+#define MLXCX_SQE_COMPLETION_MODE (bitdef_t){2, 0x0c}
+
+typedef struct {
+ uint8_t mlcs_opcode_mod;
+ uint16be_t mlcs_wqe_index;
+ uint8_t mlcs_opcode;
+ uint24be_t mlcs_qp_or_sq;
+ uint8_t mlcs_ds;
+ uint8_t mlcs_signature;
+ uint8_t mlcs_rsvd2[2];
+ bits8_t mlcs_flags;
+ uint32be_t mlcs_immediate;
+} mlxcx_wqe_control_seg_t;
+
+typedef enum {
+ MLXCX_SQE_ETH_CSFLAG_L4_CHECKSUM = 1 << 7,
+ MLXCX_SQE_ETH_CSFLAG_L3_CHECKSUM = 1 << 6,
+ MLXCX_SQE_ETH_CSFLAG_L4_INNER_CHECKSUM = 1 << 5,
+ MLXCX_SQE_ETH_CSFLAG_L3_INNER_CHECKSUM = 1 << 4,
+} mlxcx_wqe_eth_flags_t;
+
+/* CSTYLED */
+#define MLXCX_SQE_ETH_INLINE_HDR_SZ (bitdef_t){0, 0x03ff}
+#define MLXCX_SQE_ETH_SZFLAG_VLAN (1 << 15)
+#define MLXCX_MAX_INLINE_HEADERLEN 64
+
+typedef struct {
+ uint8_t mles_rsvd[4];
+ bits8_t mles_csflags;
+ uint8_t mles_rsvd2[1];
+ uint16_t mles_mss;
+ uint8_t mles_rsvd3[4];
+ bits16_t mles_szflags;
+ uint8_t mles_inline_headers[18];
+} mlxcx_wqe_eth_seg_t;
+
+typedef struct {
+ uint32be_t mlds_byte_count;
+ uint32be_t mlds_lkey;
+ uint64be_t mlds_address;
+} mlxcx_wqe_data_seg_t;
+
+#define MLXCX_SENDQ_STRIDE_SHIFT 6
+
+typedef struct {
+ mlxcx_wqe_control_seg_t mlsqe_control;
+ mlxcx_wqe_eth_seg_t mlsqe_eth;
+ mlxcx_wqe_data_seg_t mlsqe_data[1];
+} mlxcx_sendq_ent_t;
+
+typedef struct {
+ uint64be_t mlsqbf_qwords[8];
+} mlxcx_sendq_bf_t;
+
+typedef struct {
+ mlxcx_wqe_data_seg_t mlsqe_data[4];
+} mlxcx_sendq_extra_ent_t;
+
+#define MLXCX_RECVQ_STRIDE_SHIFT 7
+/*
+ * Each mlxcx_wqe_data_seg_t is 1<<4 bytes long (there's a CTASSERT to verify
+ * this in mlxcx_cmd.c), so the number of pointers is 1 << (shift - 4).
+ */
+#define MLXCX_RECVQ_MAX_PTRS (1 << (MLXCX_RECVQ_STRIDE_SHIFT - 4))
+typedef struct {
+ mlxcx_wqe_data_seg_t mlrqe_data[MLXCX_RECVQ_MAX_PTRS];
+} mlxcx_recvq_ent_t;
+
+/* CSTYLED */
+#define MLXCX_CQ_ARM_CI (bitdef_t){ .bit_shift = 0, \
+ .bit_mask = 0x00ffffff }
+/* CSTYLED */
+#define MLXCX_CQ_ARM_SEQ (bitdef_t){ .bit_shift = 28, \
+ .bit_mask = 0x30000000 }
+#define MLXCX_CQ_ARM_SOLICITED (1 << 24)
+
+typedef struct {
+ uint8_t mlcqd_rsvd;
+ uint24be_t mlcqd_update_ci;
+ bits32_t mlcqd_arm_ci;
+} mlxcx_completionq_doorbell_t;
+
+typedef struct {
+ uint16be_t mlwqd_rsvd;
+ uint16be_t mlwqd_recv_counter;
+ uint16be_t mlwqd_rsvd2;
+ uint16be_t mlwqd_send_counter;
+} mlxcx_workq_doorbell_t;
+
+#define MLXCX_EQ_STATUS_OK (0x0 << 4)
+#define MLXCX_EQ_STATUS_WRITE_FAILURE (0xA << 4)
+
+#define MLXCX_EQ_OI (1 << 1)
+#define MLXCX_EQ_EC (1 << 2)
+
+#define MLXCX_EQ_ST_ARMED 0x9
+#define MLXCX_EQ_ST_FIRED 0xA
+
+/* CSTYLED */
+#define MLXCX_EQ_LOG_PAGE_SIZE (bitdef_t){ .bit_shift = 24, \
+ .bit_mask = 0x1F000000 }
+
+typedef struct {
+ uint8_t mleqc_status;
+ uint8_t mleqc_ecoi;
+ uint8_t mleqc_state;
+ uint8_t mleqc_rsvd[7];
+ uint16be_t mleqc_page_offset;
+ uint8_t mleqc_log_eq_size;
+ uint24be_t mleqc_uar_page;
+ uint8_t mleqc_rsvd3[7];
+ uint8_t mleqc_intr;
+ uint32be_t mleqc_log_page;
+ uint8_t mleqc_rsvd4[13];
+ uint24be_t mleqc_consumer_counter;
+ uint8_t mleqc_rsvd5;
+ uint24be_t mleqc_producer_counter;
+ uint8_t mleqc_rsvd6[16];
+} mlxcx_eventq_ctx_t;
+
+typedef enum {
+ MLXCX_CQC_CQE_SIZE_64 = 0x0,
+ MLXCX_CQC_CQE_SIZE_128 = 0x1,
+} mlxcx_cqc_cqe_sz_t;
+
+typedef enum {
+ MLXCX_CQC_STATUS_OK = 0x0,
+ MLXCX_CQC_STATUS_OVERFLOW = 0x9,
+ MLXCX_CQC_STATUS_WRITE_FAIL = 0xA,
+ MLXCX_CQC_STATUS_INVALID = 0xF
+} mlxcx_cqc_status_t;
+
+typedef enum {
+ MLXCX_CQC_STATE_ARMED_SOLICITED = 0x6,
+ MLXCX_CQC_STATE_ARMED = 0x9,
+ MLXCX_CQC_STATE_FIRED = 0xA
+} mlxcx_cqc_state_t;
+
+/* CSTYLED */
+#define MLXCX_CQ_CTX_STATUS (bitdef_t){28, 0xf0000000}
+/* CSTYLED */
+#define MLXCX_CQ_CTX_CQE_SZ (bitdef_t){21, 0x00e00000}
+/* CSTYLED */
+#define MLXCX_CQ_CTX_PERIOD_MODE (bitdef_t){15, 0x00018000}
+/* CSTYLED */
+#define MLXCX_CQ_CTX_MINI_CQE_FORMAT (bitdef_t){12, 0x00003000}
+/* CSTYLED */
+#define MLXCX_CQ_CTX_STATE (bitdef_t){8, 0x00000f00}
+
+typedef struct mlxcx_completionq_ctx {
+ bits32_t mlcqc_flags;
+
+ uint8_t mlcqc_rsvd4[4];
+
+ uint8_t mlcqc_rsvd5[2];
+ uint16be_t mlcqc_page_offset;
+
+ uint8_t mlcqc_log_cq_size;
+ uint24be_t mlcqc_uar_page;
+
+ uint16be_t mlcqc_cq_period;
+ uint16be_t mlcqc_cq_max_count;
+
+ uint8_t mlcqc_rsvd7[3];
+ uint8_t mlcqc_eqn;
+
+ uint8_t mlcqc_log_page_size;
+ uint8_t mlcqc_rsvd8[3];
+
+ uint8_t mlcqc_rsvd9[4];
+
+ uint8_t mlcqc_rsvd10;
+ uint24be_t mlcqc_last_notified_index;
+ uint8_t mlcqc_rsvd11;
+ uint24be_t mlcqc_last_solicit_index;
+ uint8_t mlcqc_rsvd12;
+ uint24be_t mlcqc_consumer_counter;
+ uint8_t mlcqc_rsvd13;
+ uint24be_t mlcqc_producer_counter;
+
+ uint8_t mlcqc_rsvd14[8];
+
+ uint64be_t mlcqc_dbr_addr;
+} mlxcx_completionq_ctx_t;
+
+typedef enum {
+ MLXCX_WORKQ_TYPE_LINKED_LIST = 0x0,
+ MLXCX_WORKQ_TYPE_CYCLIC = 0x1,
+ MLXCX_WORKQ_TYPE_LINKED_LIST_STRIDING = 0x2,
+ MLXCX_WORKQ_TYPE_CYCLIC_STRIDING = 0x3
+} mlxcx_workq_ctx_type_t;
+
+typedef enum {
+ MLXCX_WORKQ_END_PAD_NONE = 0x0,
+ MLXCX_WORKQ_END_PAD_ALIGN = 0x1
+} mlxcx_workq_end_padding_t;
+
+/* CSTYLED */
+#define MLXCX_WORKQ_CTX_TYPE (bitdef_t){ \
+ .bit_shift = 28, \
+ .bit_mask = 0xf0000000 }
+#define MLXCX_WORKQ_CTX_SIGNATURE (1 << 27)
+#define MLXCX_WORKQ_CTX_CD_SLAVE (1 << 24)
+/* CSTYLED */
+#define MLXCX_WORKQ_CTX_END_PADDING (bitdef_t){ \
+ .bit_shift = 25, \
+ .bit_mask = 0x06000000 }
+
+#define MLXCX_WORKQ_CTX_MAX_ADDRESSES 128
+
+typedef struct mlxcx_workq_ctx {
+ bits32_t mlwqc_flags;
+ uint8_t mlwqc_rsvd[2];
+ uint16be_t mlwqc_lwm;
+ uint8_t mlwqc_rsvd2;
+ uint24be_t mlwqc_pd;
+ uint8_t mlwqc_rsvd3;
+ uint24be_t mlwqc_uar_page;
+ uint64be_t mlwqc_dbr_addr;
+ uint32be_t mlwqc_hw_counter;
+ uint32be_t mlwqc_sw_counter;
+ uint8_t mlwqc_rsvd4;
+ uint8_t mlwqc_log_wq_stride;
+ uint8_t mlwqc_log_wq_pg_sz;
+ uint8_t mlwqc_log_wq_sz;
+ uint8_t mlwqc_rsvd5[2];
+ bits16_t mlwqc_strides;
+ uint8_t mlwqc_rsvd6[152];
+ uint64be_t mlwqc_pas[MLXCX_WORKQ_CTX_MAX_ADDRESSES];
+} mlxcx_workq_ctx_t;
+
+#define MLXCX_RQ_FLAGS_RLKEY (1UL << 31)
+#define MLXCX_RQ_FLAGS_SCATTER_FCS (1 << 29)
+#define MLXCX_RQ_FLAGS_VLAN_STRIP_DISABLE (1 << 28)
+#define MLXCX_RQ_FLAGS_FLUSH_IN_ERROR (1 << 18)
+/* CSTYLED */
+#define MLXCX_RQ_MEM_RQ_TYPE (bitdef_t){ \
+ .bit_shift = 24, \
+ .bit_mask = 0x0f000000 }
+/* CSTYLED */
+#define MLXCX_RQ_STATE (bitdef_t){ \
+ .bit_shift = 20, \
+ .bit_mask = 0x00f00000 }
+
+typedef struct mlxcx_rq_ctx {
+ bits32_t mlrqc_flags;
+ uint8_t mlrqc_rsvd;
+ uint24be_t mlrqc_user_index;
+ uint8_t mlrqc_rsvd2;
+ uint24be_t mlrqc_cqn;
+ uint8_t mlrqc_counter_set_id;
+ uint8_t mlrqc_rsvd3[4];
+ uint24be_t mlrqc_rmpn;
+ uint8_t mlrqc_rsvd4[28];
+ mlxcx_workq_ctx_t mlrqc_wq;
+} mlxcx_rq_ctx_t;
+
+#define MLXCX_SQ_FLAGS_RLKEY (1UL << 31)
+#define MLXCX_SQ_FLAGS_CD_MASTER (1 << 30)
+#define MLXCX_SQ_FLAGS_FRE (1 << 29)
+#define MLXCX_SQ_FLAGS_FLUSH_IN_ERROR (1 << 28)
+#define MLXCX_SQ_FLAGS_ALLOW_MULTI_PKT (1 << 27)
+#define MLXCX_SQ_FLAGS_REG_UMR (1 << 19)
+
+typedef enum {
+ MLXCX_ETH_CAP_INLINE_REQUIRE_L2 = 0,
+ MLXCX_ETH_CAP_INLINE_VPORT_CTX = 1,
+ MLXCX_ETH_CAP_INLINE_NOT_REQUIRED = 2
+} mlxcx_eth_cap_inline_mode_t;
+
+typedef enum {
+ MLXCX_ETH_INLINE_NONE = 0,
+ MLXCX_ETH_INLINE_L2 = 1,
+ MLXCX_ETH_INLINE_L3 = 2,
+ MLXCX_ETH_INLINE_L4 = 3,
+ MLXCX_ETH_INLINE_INNER_L2 = 5,
+ MLXCX_ETH_INLINE_INNER_L3 = 6,
+ MLXCX_ETH_INLINE_INNER_L4 = 7
+} mlxcx_eth_inline_mode_t;
+
+/* CSTYLED */
+#define MLXCX_SQ_MIN_WQE_INLINE (bitdef_t){ \
+ .bit_shift = 24, \
+ .bit_mask = 0x07000000 }
+/* CSTYLED */
+#define MLXCX_SQ_STATE (bitdef_t){ \
+ .bit_shift = 20, \
+ .bit_mask = 0x00f00000 }
+
+typedef struct mlxcx_sq_ctx {
+ bits32_t mlsqc_flags;
+ uint8_t mlsqc_rsvd;
+ uint24be_t mlsqc_user_index;
+ uint8_t mlsqc_rsvd2;
+ uint24be_t mlsqc_cqn;
+ uint8_t mlsqc_rsvd3[18];
+ uint16be_t mlsqc_packet_pacing_rate_limit_index;
+ uint16be_t mlsqc_tis_lst_sz;
+ uint8_t mlsqc_rsvd4[11];
+ uint24be_t mlsqc_tis_num;
+ mlxcx_workq_ctx_t mlsqc_wq;
+} mlxcx_sq_ctx_t;
+
+#define MLXCX_NIC_VPORT_CTX_MAX_ADDRESSES 64
+
+typedef enum {
+ MLXCX_VPORT_PROMISC_UCAST = 1 << 15,
+ MLXCX_VPORT_PROMISC_MCAST = 1 << 14,
+ MLXCX_VPORT_PROMISC_ALL = 1 << 13
+} mlxcx_nic_vport_ctx_promisc_t;
+
+#define MLXCX_VPORT_LIST_TYPE_MASK 0x07
+#define MLXCX_VPORT_LIST_TYPE_SHIFT 0
+
+/* CSTYLED */
+#define MLXCX_VPORT_CTX_MIN_WQE_INLINE (bitdef_t){56, 0x0700000000000000}
+
+typedef struct {
+ bits64_t mlnvc_flags;
+ uint8_t mlnvc_rsvd[28];
+ uint8_t mlnvc_rsvd2[2];
+ uint16be_t mlnvc_mtu;
+ uint64be_t mlnvc_system_image_guid;
+ uint64be_t mlnvc_port_guid;
+ uint64be_t mlnvc_node_guid;
+ uint8_t mlnvc_rsvd3[40];
+ uint16be_t mlnvc_qkey_violation_counter;
+ uint8_t mlnvc_rsvd4[2];
+ uint8_t mlnvc_rsvd5[132];
+ bits16_t mlnvc_promisc_list_type;
+ uint16be_t mlnvc_allowed_list_size;
+ uint8_t mlnvc_rsvd6[2];
+ uint8_t mlnvc_permanent_address[6];
+ uint8_t mlnvc_rsvd7[4];
+ uint64be_t mlnvc_address[MLXCX_NIC_VPORT_CTX_MAX_ADDRESSES];
+} mlxcx_nic_vport_ctx_t;
+
+typedef struct {
+ uint8_t mlftc_flags;
+ uint8_t mlftc_level;
+ uint8_t mlftc_rsvd;
+ uint8_t mlftc_log_size;
+ uint8_t mlftc_rsvd2;
+ uint24be_t mlftc_table_miss_id;
+ uint8_t mlftc_rsvd3[4];
+ uint8_t mlftc_rsvd4[28];
+} mlxcx_flow_table_ctx_t;
+
+/* CSTYLED */
+#define MLXCX_FLOW_HDR_FIRST_VID (bitdef_t){0, 0x07ff}
+/* CSTYLED */
+#define MLXCX_FLOW_HDR_FIRST_PRIO (bitdef_t){13,0x7000}
+#define MLXCX_FLOW_HDR_FIRST_CFI (1 << 12)
+
+#define MLXCX_FLOW_HDR_IP_DSCP_SHIFT 18
+#define MLXCX_FLOW_HDR_IP_DSCP_MASK 0xfc0000
+#define MLXCX_FLOW_HDR_IP_ECN_SHIFT 16
+#define MLXCX_FLOW_HDR_IP_ECN_MASK 0x030000
+#define MLXCX_FLOW_HDR_CVLAN_TAG (1 << 15)
+#define MLXCX_FLOW_HDR_SVLAN_TAG (1 << 14)
+#define MLXCX_FLOW_HDR_FRAG (1 << 13)
+/* CSTYLED */
+#define MLXCX_FLOW_HDR_IP_VERSION (bitdef_t){ \
+ .bit_shift = 9, \
+ .bit_mask = 0x001e00 }
+/* CSTYLED */
+#define MLXCX_FLOW_HDR_TCP_FLAGS (bitdef_t){ \
+ .bit_shift = 0, \
+ .bit_mask = 0x0001ff }
+
+typedef struct {
+ uint8_t mlfh_smac[6];
+ uint16be_t mlfh_ethertype;
+ uint8_t mlfh_dmac[6];
+ bits16_t mlfh_first_vid_flags;
+ uint8_t mlfh_ip_protocol;
+ bits24_t mlfh_tcp_ip_flags;
+ uint16be_t mlfh_tcp_sport;
+ uint16be_t mlfh_tcp_dport;
+ uint8_t mlfh_rsvd[3];
+ uint8_t mlfh_ip_ttl_hoplimit;
+ uint16be_t mlfh_udp_sport;
+ uint16be_t mlfh_udp_dport;
+ uint8_t mlfh_src_ip[16];
+ uint8_t mlfh_dst_ip[16];
+} mlxcx_flow_header_match_t;
+
+typedef struct {
+ uint8_t mlfp_rsvd;
+ uint24be_t mlfp_source_sqn;
+ uint8_t mlfp_rsvd2[2];
+ uint16be_t mlfp_source_port;
+ bits16_t mlfp_outer_second_vid_flags;
+ bits16_t mlfp_inner_second_vid_flags;
+ bits16_t mlfp_vlan_flags;
+ uint16be_t mlfp_gre_protocol;
+ uint32be_t mlfp_gre_key;
+ uint24be_t mlfp_vxlan_vni;
+ uint8_t mlfp_rsvd3;
+ uint8_t mlfp_rsvd4[4];
+ uint8_t mlfp_rsvd5;
+ uint24be_t mlfp_outer_ipv6_flow_label;
+ uint8_t mlfp_rsvd6;
+ uint24be_t mlfp_inner_ipv6_flow_label;
+ uint8_t mlfp_rsvd7[28];
+} mlxcx_flow_params_match_t;
+
+typedef struct {
+ mlxcx_flow_header_match_t mlfm_outer_headers;
+ mlxcx_flow_params_match_t mlfm_misc_parameters;
+ mlxcx_flow_header_match_t mlfm_inner_headers;
+ uint8_t mlfm_rsvd[320];
+} mlxcx_flow_match_t;
+
+#define MLXCX_FLOW_MAX_DESTINATIONS 64
+typedef enum {
+ MLXCX_FLOW_DEST_VPORT = 0x0,
+ MLXCX_FLOW_DEST_FLOW_TABLE = 0x1,
+ MLXCX_FLOW_DEST_TIR = 0x2,
+ MLXCX_FLOW_DEST_QP = 0x3
+} mlxcx_flow_destination_type_t;
+
+typedef struct {
+ uint8_t mlfd_destination_type;
+ uint24be_t mlfd_destination_id;
+ uint8_t mlfd_rsvd[4];
+} mlxcx_flow_dest_t;
+
+typedef enum {
+ MLXCX_FLOW_ACTION_ALLOW = 1 << 0,
+ MLXCX_FLOW_ACTION_DROP = 1 << 1,
+ MLXCX_FLOW_ACTION_FORWARD = 1 << 2,
+ MLXCX_FLOW_ACTION_COUNT = 1 << 3,
+ MLXCX_FLOW_ACTION_ENCAP = 1 << 4,
+ MLXCX_FLOW_ACTION_DECAP = 1 << 5
+} mlxcx_flow_action_t;
+
+typedef struct {
+ uint8_t mlfec_rsvd[4];
+ uint32be_t mlfec_group_id;
+ uint8_t mlfec_rsvd2;
+ uint24be_t mlfec_flow_tag;
+ uint8_t mlfec_rsvd3[2];
+ uint16be_t mlfec_action;
+ uint8_t mlfec_rsvd4;
+ uint24be_t mlfec_destination_list_size;
+ uint8_t mlfec_rsvd5;
+ uint24be_t mlfec_flow_counter_list_size;
+ uint32be_t mlfec_encap_id;
+ uint8_t mlfec_rsvd6[36];
+ mlxcx_flow_match_t mlfec_match_value;
+ uint8_t mlfec_rsvd7[192];
+ mlxcx_flow_dest_t mlfec_destination[MLXCX_FLOW_MAX_DESTINATIONS];
+} mlxcx_flow_entry_ctx_t;
+
+/* CSTYLED */
+#define MLXCX_TIR_CTX_DISP_TYPE (bitdef_t){ 4, 0xf0 }
+typedef enum {
+ MLXCX_TIR_DIRECT = 0x0,
+ MLXCX_TIR_INDIRECT = 0x1,
+} mlxcx_tir_type_t;
+
+/* CSTYLED */
+#define MLXCX_TIR_LRO_TIMEOUT (bitdef_t){ 12, 0x0ffff000 }
+/* CSTYLED */
+#define MLXCX_TIR_LRO_ENABLE_MASK (bitdef_t){ 8, 0x00000f00 }
+/* CSTYLED */
+#define MLXCX_TIR_LRO_MAX_MSG_SZ (bitdef_t){ 0, 0x000000ff }
+
+/* CSTYLED */
+#define MLXCX_TIR_RX_HASH_FN (bitdef_t){ 4, 0xf0 }
+typedef enum {
+ MLXCX_TIR_HASH_NONE = 0x0,
+ MLXCX_TIR_HASH_XOR8 = 0x1,
+ MLXCX_TIR_HASH_TOEPLITZ = 0x2
+} mlxcx_tir_hash_fn_t;
+#define MLXCX_TIR_LB_UNICAST (1 << 24)
+#define MLXCX_TIR_LB_MULTICAST (1 << 25)
+
+/* CSTYLED */
+#define MLXCX_RX_HASH_L3_TYPE (bitdef_t){ 31, 0x80000000 }
+typedef enum {
+ MLXCX_RX_HASH_L3_IPv4 = 0,
+ MLXCX_RX_HASH_L3_IPv6 = 1
+} mlxcx_tir_rx_hash_l3_type_t;
+/* CSTYLED */
+#define MLXCX_RX_HASH_L4_TYPE (bitdef_t){ 30, 0x40000000 }
+typedef enum {
+ MLXCX_RX_HASH_L4_TCP = 0,
+ MLXCX_RX_HASH_L4_UDP = 1
+} mlxcx_tir_rx_hash_l4_type_t;
+/* CSTYLED */
+#define MLXCX_RX_HASH_FIELDS (bitdef_t){ 0, 0x3fffffff }
+typedef enum {
+ MLXCX_RX_HASH_SRC_IP = 1 << 0,
+ MLXCX_RX_HASH_DST_IP = 1 << 1,
+ MLXCX_RX_HASH_L4_SPORT = 1 << 2,
+ MLXCX_RX_HASH_L4_DPORT = 1 << 3,
+ MLXCX_RX_HASH_IPSEC_SPI = 1 << 4
+} mlxcx_tir_rx_hash_fields_t;
+
+typedef struct {
+ uint8_t mltirc_rsvd[4];
+ bits8_t mltirc_disp_type;
+ uint8_t mltirc_rsvd2[11];
+ bits32_t mltirc_lro;
+ uint8_t mltirc_rsvd3[9];
+ uint24be_t mltirc_inline_rqn;
+ bits8_t mltirc_flags;
+ uint24be_t mltirc_indirect_table;
+ bits8_t mltirc_hash_lb;
+ uint24be_t mltirc_transport_domain;
+ uint8_t mltirc_rx_hash_toeplitz_key[40];
+ bits32_t mltirc_rx_hash_fields_outer;
+ bits32_t mltirc_rx_hash_fields_inner;
+ uint8_t mltirc_rsvd4[152];
+} mlxcx_tir_ctx_t;
+
+typedef struct {
+ uint8_t mltisc_rsvd;
+ uint8_t mltisc_prio_or_sl;
+ uint8_t mltisc_rsvd2[35];
+ uint24be_t mltisc_transport_domain;
+ uint8_t mltisc_rsvd3[120];
+} mlxcx_tis_ctx_t;
+
+#define MLXCX_RQT_MAX_RQ_REFS 64
+
+typedef struct {
+ uint8_t mlrqtr_rsvd;
+ uint24be_t mlrqtr_rqn;
+} mlxcx_rqtable_rq_ref_t;
+
+typedef struct {
+ uint8_t mlrqtc_rsvd[22];
+ uint16be_t mlrqtc_max_size;
+ uint8_t mlrqtc_rsvd2[2];
+ uint16be_t mlrqtc_actual_size;
+ uint8_t mlrqtc_rsvd3[212];
+ mlxcx_rqtable_rq_ref_t mlrqtc_rqref[MLXCX_RQT_MAX_RQ_REFS];
+} mlxcx_rqtable_ctx_t;
+
+#pragma pack()
+
+typedef enum {
+ MLXCX_EVENT_COMPLETION = 0x00,
+ MLXCX_EVENT_PATH_MIGRATED = 0x01,
+ MLXCX_EVENT_COMM_ESTABLISH = 0x02,
+ MLXCX_EVENT_SENDQ_DRAIN = 0x03,
+ MLXCX_EVENT_LAST_WQE = 0x13,
+ MLXCX_EVENT_SRQ_LIMIT = 0x14,
+ MLXCX_EVENT_DCT_ALL_CLOSED = 0x1C,
+ MLXCX_EVENT_DCT_ACCKEY_VIOL = 0x1D,
+ MLXCX_EVENT_CQ_ERROR = 0x04,
+ MLXCX_EVENT_WQ_CATASTROPHE = 0x05,
+ MLXCX_EVENT_PATH_MIGRATE_FAIL = 0x07,
+ MLXCX_EVENT_PAGE_FAULT = 0x0C,
+ MLXCX_EVENT_WQ_INVALID_REQ = 0x10,
+ MLXCX_EVENT_WQ_ACCESS_VIOL = 0x11,
+ MLXCX_EVENT_SRQ_CATASTROPHE = 0x12,
+ MLXCX_EVENT_INTERNAL_ERROR = 0x08,
+ MLXCX_EVENT_PORT_STATE = 0x09,
+ MLXCX_EVENT_GPIO = 0x15,
+ MLXCX_EVENT_PORT_MODULE = 0x16,
+ MLXCX_EVENT_TEMP_WARNING = 0x17,
+ MLXCX_EVENT_REMOTE_CONFIG = 0x19,
+ MLXCX_EVENT_DCBX_CHANGE = 0x1E,
+ MLXCX_EVENT_DOORBELL_CONGEST = 0x1A,
+ MLXCX_EVENT_STALL_VL = 0x1B,
+ MLXCX_EVENT_CMD_COMPLETION = 0x0A,
+ MLXCX_EVENT_PAGE_REQUEST = 0x0B,
+ MLXCX_EVENT_NIC_VPORT = 0x0D,
+ MLXCX_EVENT_EC_PARAMS_CHANGE = 0x0E,
+ MLXCX_EVENT_XRQ_ERROR = 0x18
+} mlxcx_event_t;
+
+typedef enum {
+ MLXCX_CMD_R_OK = 0x00,
+ MLXCX_CMD_R_INTERNAL_ERR = 0x01,
+ MLXCX_CMD_R_BAD_OP = 0x02,
+ MLXCX_CMD_R_BAD_PARAM = 0x03,
+ MLXCX_CMD_R_BAD_SYS_STATE = 0x04,
+ MLXCX_CMD_R_BAD_RESOURCE = 0x05,
+ MLXCX_CMD_R_RESOURCE_BUSY = 0x06,
+ MLXCX_CMD_R_EXCEED_LIM = 0x08,
+ MLXCX_CMD_R_BAD_RES_STATE = 0x09,
+ MLXCX_CMD_R_BAD_INDEX = 0x0a,
+ MLXCX_CMD_R_NO_RESOURCES = 0x0f,
+ MLXCX_CMD_R_BAD_INPUT_LEN = 0x50,
+ MLXCX_CMD_R_BAD_OUTPUT_LEN = 0x51,
+ MLXCX_CMD_R_BAD_RESOURCE_STATE = 0x10,
+ MLXCX_CMD_R_BAD_PKT = 0x30,
+ MLXCX_CMD_R_BAD_SIZE = 0x40,
+ MLXCX_CMD_R_TIMEOUT = 0xFF
+} mlxcx_cmd_ret_t;
+
+typedef enum {
+ MLXCX_OP_QUERY_HCA_CAP = 0x100,
+ MLXCX_OP_QUERY_ADAPTER = 0x101,
+ MLXCX_OP_INIT_HCA = 0x102,
+ MLXCX_OP_TEARDOWN_HCA = 0x103,
+ MLXCX_OP_ENABLE_HCA = 0x104,
+ MLXCX_OP_DISABLE_HCA = 0x105,
+ MLXCX_OP_QUERY_PAGES = 0x107,
+ MLXCX_OP_MANAGE_PAGES = 0x108,
+ MLXCX_OP_SET_HCA_CAP = 0x109,
+ MLXCX_OP_QUERY_ISSI = 0x10A,
+ MLXCX_OP_SET_ISSI = 0x10B,
+ MLXCX_OP_SET_DRIVER_VERSION = 0x10D,
+ MLXCX_OP_QUERY_OTHER_HCA_CAP = 0x10E,
+ MLXCX_OP_MODIFY_OTHER_HCA_CAP = 0x10F,
+ MLXCX_OP_SET_TUNNELED_OPERATIONS = 0x110,
+ MLXCX_OP_CREATE_MKEY = 0x200,
+ MLXCX_OP_QUERY_MKEY = 0x201,
+ MLXCX_OP_DESTROY_MKEY = 0x202,
+ MLXCX_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+ MLXCX_OP_PAGE_FAULT_RESUME = 0x204,
+ MLXCX_OP_CREATE_EQ = 0x301,
+ MLXCX_OP_DESTROY_EQ = 0x302,
+ MLXCX_OP_QUERY_EQ = 0x303,
+ MLXCX_OP_GEN_EQE = 0x304,
+ MLXCX_OP_CREATE_CQ = 0x400,
+ MLXCX_OP_DESTROY_CQ = 0x401,
+ MLXCX_OP_QUERY_CQ = 0x402,
+ MLXCX_OP_MODIFY_CQ = 0x403,
+ MLXCX_OP_CREATE_QP = 0x500,
+ MLXCX_OP_DESTROY_QP = 0x501,
+ MLXCX_OP_RST2INIT_QP = 0x502,
+ MLXCX_OP_INIT2RTR_QP = 0x503,
+ MLXCX_OP_RTR2RTS_QP = 0x504,
+ MLXCX_OP_RTS2RTS_QP = 0x505,
+ MLXCX_OP_SQERR2RTS_QP = 0x506,
+ MLXCX_OP__2ERR_QP = 0x507,
+ MLXCX_OP__2RST_QP = 0x50A,
+ MLXCX_OP_QUERY_QP = 0x50B,
+ MLXCX_OP_SQD_RTS_QP = 0x50C,
+ MLXCX_OP_INIT2INIT_QP = 0x50E,
+ MLXCX_OP_CREATE_PSV = 0x600,
+ MLXCX_OP_DESTROY_PSV = 0x601,
+ MLXCX_OP_CREATE_SRQ = 0x700,
+ MLXCX_OP_DESTROY_SRQ = 0x701,
+ MLXCX_OP_QUERY_SRQ = 0x702,
+ MLXCX_OP_ARM_RQ = 0x703,
+ MLXCX_OP_CREATE_XRC_SRQ = 0x705,
+ MLXCX_OP_DESTROY_XRC_SRQ = 0x706,
+ MLXCX_OP_QUERY_XRC_SRQ = 0x707,
+ MLXCX_OP_ARM_XRC_SRQ = 0x708,
+ MLXCX_OP_CREATE_DCT = 0x710,
+ MLXCX_OP_DESTROY_DCT = 0x711,
+ MLXCX_OP_DRAIN_DCT = 0x712,
+ MLXCX_OP_QUERY_DCT = 0x713,
+ MLXCX_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLXCX_OP_CREATE_XRQ = 0x717,
+ MLXCX_OP_DESTROY_XRQ = 0x718,
+ MLXCX_OP_QUERY_XRQ = 0x719,
+ MLXCX_OP_CREATE_NVMF_BACKEND_CONTROLLER = 0x720,
+ MLXCX_OP_DESTROY_NVMF_BACKEND_CONTROLLER = 0x721,
+ MLXCX_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
+ MLXCX_OP_ATTACH_NVMF_NAMESPACE = 0x723,
+ MLXCX_OP_DETACH_NVMF_NAMESPACE = 0x724,
+ MLXCX_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
+ MLXCX_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
+ MLXCX_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
+ MLXCX_OP_QUERY_VPORT_STATE = 0x750,
+ MLXCX_OP_MODIFY_VPORT_STATE = 0x751,
+ MLXCX_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLXCX_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
+ MLXCX_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLXCX_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
+ MLXCX_OP_QUERY_ROCE_ADDRESS = 0x760,
+ MLXCX_OP_SET_ROCE_ADDRESS = 0x761,
+ MLXCX_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLXCX_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLXCX_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLXCX_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLXCX_OP_QUERY_VPORT_COUNTER = 0x770,
+ MLXCX_OP_ALLOC_Q_COUNTER = 0x771,
+ MLXCX_OP_DEALLOC_Q_COUNTER = 0x772,
+ MLXCX_OP_QUERY_Q_COUNTER = 0x773,
+ MLXCX_OP_SET_PP_RATE_LIMIT = 0x780,
+ MLXCX_OP_QUERY_PP_RATE_LIMIT = 0x781,
+ MLXCX_OP_ALLOC_PD = 0x800,
+ MLXCX_OP_DEALLOC_PD = 0x801,
+ MLXCX_OP_ALLOC_UAR = 0x802,
+ MLXCX_OP_DEALLOC_UAR = 0x803,
+ MLXCX_OP_CONFIG_INT_MODERATION = 0x804,
+ MLXCX_OP_ACCESS_REG = 0x805,
+ MLXCX_OP_ATTACH_TO_MCG = 0x806,
+ MLXCX_OP_DETACH_FROM_MCG = 0x807,
+ MLXCX_OP_MAD_IFC = 0x50D,
+ MLXCX_OP_QUERY_MAD_DEMUX = 0x80B,
+ MLXCX_OP_SET_MAD_DEMUX = 0x80C,
+ MLXCX_OP_NOP = 0x80D,
+ MLXCX_OP_ALLOC_XRCD = 0x80E,
+ MLXCX_OP_DEALLOC_XRCD = 0x80F,
+ MLXCX_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLXCX_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLXCX_OP_QUERY_CONG_STATUS = 0x822,
+ MLXCX_OP_MODIFY_CONG_STATUS = 0x823,
+ MLXCX_OP_QUERY_CONG_PARAMS = 0x824,
+ MLXCX_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLXCX_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLXCX_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLXCX_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLXCX_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLXCX_OP_QUERY_L2_TABLE_ENTRY = 0x82A,
+ MLXCX_OP_DELETE_L2_TABLE_ENTRY = 0x82B,
+ MLXCX_OP_SET_WOL_ROL = 0x830,
+ MLXCX_OP_QUERY_WOL_ROL = 0x831,
+ MLXCX_OP_CREATE_TIR = 0x900,
+ MLXCX_OP_MODIFY_TIR = 0x901,
+ MLXCX_OP_DESTROY_TIR = 0x902,
+ MLXCX_OP_QUERY_TIR = 0x903,
+ MLXCX_OP_CREATE_SQ = 0x904,
+ MLXCX_OP_MODIFY_SQ = 0x905,
+ MLXCX_OP_DESTROY_SQ = 0x906,
+ MLXCX_OP_QUERY_SQ = 0x907,
+ MLXCX_OP_CREATE_RQ = 0x908,
+ MLXCX_OP_MODIFY_RQ = 0x909,
+ MLXCX_OP_DESTROY_RQ = 0x90A,
+ MLXCX_OP_QUERY_RQ = 0x90B,
+ MLXCX_OP_CREATE_RMP = 0x90C,
+ MLXCX_OP_MODIFY_RMP = 0x90D,
+ MLXCX_OP_DESTROY_RMP = 0x90E,
+ MLXCX_OP_QUERY_RMP = 0x90F,
+ MLXCX_OP_CREATE_TIS = 0x912,
+ MLXCX_OP_MODIFY_TIS = 0x913,
+ MLXCX_OP_DESTROY_TIS = 0x914,
+ MLXCX_OP_QUERY_TIS = 0x915,
+ MLXCX_OP_CREATE_RQT = 0x916,
+ MLXCX_OP_MODIFY_RQT = 0x917,
+ MLXCX_OP_DESTROY_RQT = 0x918,
+ MLXCX_OP_QUERY_RQT = 0x919,
+ MLXCX_OP_SET_FLOW_TABLE_ROOT = 0x92f,
+ MLXCX_OP_CREATE_FLOW_TABLE = 0x930,
+ MLXCX_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLXCX_OP_QUERY_FLOW_TABLE = 0x932,
+ MLXCX_OP_CREATE_FLOW_GROUP = 0x933,
+ MLXCX_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLXCX_OP_QUERY_FLOW_GROUP = 0x935,
+ MLXCX_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLXCX_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLXCX_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
+ MLXCX_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLXCX_OP_DEALLOC_FLOW_COUNTER = 0x93a,
+ MLXCX_OP_QUERY_FLOW_COUNTER = 0x93b,
+ MLXCX_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLXCX_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLXCX_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLXCX_OP_QUERY_ENCAP_HEADER = 0x93f
+} mlxcx_cmd_op_t;
+
+/*
+ * Definitions for relevant commands
+ */
+#pragma pack(1)
+typedef struct {
+ uint16be_t mci_opcode;
+ uint8_t mci_rsvd[4];
+ uint16be_t mci_op_mod;
+} mlxcx_cmd_in_t;
+
+typedef struct {
+ uint8_t mco_status;
+ uint8_t mco_rsvd[3];
+ uint32be_t mco_syndrome;
+} mlxcx_cmd_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_enable_hca_head;
+ uint8_t mlxi_enable_hca_rsvd[2];
+ uint16be_t mlxi_enable_hca_func;
+ uint8_t mlxi_enable_hca_rsvd1[4];
+} mlxcx_cmd_enable_hca_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_enable_hca_head;
+ uint8_t mlxo_enable_hca_rsvd[8];
+} mlxcx_cmd_enable_hca_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_disable_hca_head;
+ uint8_t mlxi_disable_hca_rsvd[2];
+ uint16be_t mlxi_disable_hca_func;
+ uint8_t mlxi_disable_hca_rsvd1[4];
+} mlxcx_cmd_disable_hca_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_disable_hca_head;
+ uint8_t mlxo_disable_hca_rsvd[8];
+} mlxcx_cmd_disable_hca_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_issi_head;
+ uint8_t mlxi_query_issi_rsvd[8];
+} mlxcx_cmd_query_issi_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_issi_head;
+ uint8_t mlxo_query_issi_rsv[2];
+ uint16be_t mlxo_query_issi_current;
+ uint8_t mlxo_query_issi_rsvd1[20];
+ /*
+ * To date we only support version 1 of the ISSI. The last byte has the
+ * ISSI data that we care about, therefore we phrase the struct this
+ * way.
+ */
+ uint8_t mlxo_query_issi_rsvd2[79];
+ uint8_t mlxo_supported_issi;
+} mlxcx_cmd_query_issi_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_set_issi_head;
+ uint8_t mlxi_set_issi_rsvd[2];
+ uint16be_t mlxi_set_issi_current;
+ uint8_t mlxi_set_iss_rsvd1[4];
+} mlxcx_cmd_set_issi_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_set_issi_head;
+ uint8_t mlxo_set_issi_rsvd[8];
+} mlxcx_cmd_set_issi_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_init_hca_head;
+ uint8_t mlxi_init_hca_rsvd[8];
+} mlxcx_cmd_init_hca_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_init_hca_head;
+ uint8_t mlxo_init_hca_rsvd[8];
+} mlxcx_cmd_init_hca_out_t;
+
+#define MLXCX_TEARDOWN_HCA_GRACEFUL 0x00
+#define MLXCX_TEARDOWN_HCA_FORCE 0x01
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_teardown_hca_head;
+ uint8_t mlxi_teardown_hca_rsvd[2];
+ uint16be_t mlxi_teardown_hca_profile;
+ uint8_t mlxi_teardown_hca_rsvd1[4];
+} mlxcx_cmd_teardown_hca_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_teardown_hca_head;
+ uint8_t mlxo_teardown_hca_rsvd[7];
+ uint8_t mlxo_teardown_hca_state;
+} mlxcx_cmd_teardown_hca_out_t;
+
+#define MLXCX_QUERY_PAGES_OPMOD_BOOT 0x01
+#define MLXCX_QUERY_PAGES_OPMOD_INIT 0x02
+#define MLXCX_QUERY_PAGES_OPMOD_REGULAR 0x03
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_pages_head;
+ uint8_t mlxi_query_pages_rsvd[2];
+ uint16be_t mlxi_query_pages_func;
+ uint8_t mlxi_query_pages_rsvd1[4];
+} mlxcx_cmd_query_pages_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_pages_head;
+ uint8_t mlxo_query_pages_rsvd[2];
+ uint16be_t mlxo_query_pages_func;
+ uint32be_t mlxo_query_pages_npages;
+} mlxcx_cmd_query_pages_out_t;
+
+#define MLXCX_MANAGE_PAGES_OPMOD_ALLOC_FAIL 0x00
+#define MLXCX_MANAGE_PAGES_OPMOD_GIVE_PAGES 0x01
+#define MLXCX_MANAGE_PAGES_OPMOD_RETURN_PAGES 0x02
+
+/*
+ * This is an artificial limit that we're imposing on our actions.
+ */
+#define MLXCX_MANAGE_PAGES_MAX_PAGES 512
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_manage_pages_head;
+ uint8_t mlxi_manage_pages_rsvd[2];
+ uint16be_t mlxi_manage_pages_func;
+ uint32be_t mlxi_manage_pages_npages;
+ uint64be_t mlxi_manage_pages_pas[MLXCX_MANAGE_PAGES_MAX_PAGES];
+} mlxcx_cmd_manage_pages_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_manage_pages_head;
+ uint32be_t mlxo_manage_pages_npages;
+ uint8_t mlxo_manage_pages_rsvd[4];
+ uint64be_t mlxo_manage_pages_pas[MLXCX_MANAGE_PAGES_MAX_PAGES];
+} mlxcx_cmd_manage_pages_out_t;
+
+typedef enum {
+ MLXCX_HCA_CAP_MODE_MAX = 0x0,
+ MLXCX_HCA_CAP_MODE_CURRENT = 0x1
+} mlxcx_hca_cap_mode_t;
+
+typedef enum {
+ MLXCX_HCA_CAP_GENERAL = 0x0,
+ MLXCX_HCA_CAP_ETHERNET = 0x1,
+ MLXCX_HCA_CAP_ODP = 0x2,
+ MLXCX_HCA_CAP_ATOMIC = 0x3,
+ MLXCX_HCA_CAP_ROCE = 0x4,
+ MLXCX_HCA_CAP_IPoIB = 0x5,
+ MLXCX_HCA_CAP_NIC_FLOW = 0x7,
+ MLXCX_HCA_CAP_ESWITCH_FLOW = 0x8,
+ MLXCX_HCA_CAP_ESWITCH = 0x9,
+ MLXCX_HCA_CAP_VECTOR = 0xb,
+ MLXCX_HCA_CAP_QoS = 0xc,
+ MLXCX_HCA_CAP_NVMEoF = 0xe
+} mlxcx_hca_cap_type_t;
+
+typedef enum {
+ MLXCX_CAP_GENERAL_PORT_TYPE_IB = 0x0,
+ MLXCX_CAP_GENERAL_PORT_TYPE_ETHERNET = 0x1,
+} mlxcx_hca_cap_general_port_type_t;
+
+typedef enum {
+ MLXCX_CAP_GENERAL_FLAGS_C_ESW_FLOW_TABLE = (1 << 8),
+ MLXCX_CAP_GENERAL_FLAGS_C_NIC_FLOW_TABLE = (1 << 9),
+} mlxcx_hca_cap_general_flags_c_t;
+
+typedef struct {
+ uint8_t mlcap_general_access_other_hca_roce;
+ uint8_t mlcap_general_rsvd[3];
+
+ uint8_t mlcap_general_rsvd2[12];
+
+ uint8_t mlcap_general_log_max_srq_sz;
+ uint8_t mlcap_general_log_max_qp_sz;
+ uint8_t mlcap_general_rsvd3[1];
+ uint8_t mlcap_general_log_max_qp;
+
+ uint8_t mlcap_general_rsvd4[1];
+ uint8_t mlcap_general_log_max_srq;
+ uint8_t mlcap_general_rsvd5[2];
+
+ uint8_t mlcap_general_rsvd6[1];
+ uint8_t mlcap_general_log_max_cq_sz;
+ uint8_t mlcap_general_rsvd7[1];
+ uint8_t mlcap_general_log_max_cq;
+
+ uint8_t mlcap_general_log_max_eq_sz;
+ uint8_t mlcap_general_log_max_mkey_flags;
+ uint8_t mlcap_general_rsvd8[1];
+ uint8_t mlcap_general_log_max_eq;
+
+ uint8_t mlcap_general_max_indirection;
+ uint8_t mlcap_general_log_max_mrw_sz_flags;
+ uint8_t mlcap_general_log_max_bsf_list_size_flags;
+ uint8_t mlcap_general_log_max_klm_list_size_flags;
+
+ uint8_t mlcap_general_rsvd9[1];
+ uint8_t mlcap_general_log_max_ra_req_dc;
+ uint8_t mlcap_general_rsvd10[1];
+ uint8_t mlcap_general_log_max_ra_res_dc;
+
+ uint8_t mlcap_general_rsvd11[1];
+ uint8_t mlcap_general_log_max_ra_req_qp;
+ uint8_t mlcap_general_rsvd12[1];
+ uint8_t mlcap_general_log_max_ra_res_qp;
+
+ uint16be_t mlcap_general_flags_a;
+ uint16be_t mlcap_general_gid_table_size;
+
+ bits16_t mlcap_general_flags_b;
+ uint16be_t mlcap_general_pkey_table_size;
+
+ bits16_t mlcap_general_flags_c;
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcap_general_flags_d:6;
+ uint8_t mlcap_general_port_type:2;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcap_general_port_type:2;
+ uint8_t mlcap_general_flags_d:6;
+#endif
+ };
+ uint8_t mlcap_general_num_ports;
+
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcap_general_rsvd13:3;
+ uint8_t mlcap_general_log_max_msg:5;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcap_general_log_max_msg:5;
+ uint8_t mlcap_general_rsvd13:3;
+#endif
+ };
+ uint8_t mlcap_general_max_tc;
+ bits16_t mlcap_general_flags_d_wol;
+
+ uint16be_t mlcap_general_state_rate_support;
+ uint8_t mlcap_general_rsvd14[1];
+ struct {
+#if defined(_BIT_FIELDS_HTOL)
+ uint8_t mlcap_general_rsvd15:4;
+ uint8_t mlcap_general_cqe_version:4;
+#elif defined(_BIT_FIELDS_LTOH)
+ uint8_t mlcap_general_cqe_version:4;
+ uint8_t mlcap_general_rsvd15:4;
+#endif
+ };
+
+ uint32be_t mlcap_general_flags_e;
+
+ uint32be_t mlcap_general_flags_f;
+
+ uint8_t mlcap_general_rsvd16[1];
+ uint8_t mlcap_general_uar_sz;
+ uint8_t mlcap_general_cnak;
+ uint8_t mlcap_general_log_pg_sz;
+ uint8_t mlcap_general_rsvd17[32];
+ bits8_t mlcap_general_log_max_rq_flags;
+ uint8_t mlcap_general_log_max_sq;
+ uint8_t mlcap_general_log_max_tir;
+ uint8_t mlcap_general_log_max_tis;
+} mlxcx_hca_cap_general_caps_t;
+
+typedef enum {
+ MLXCX_ETH_CAP_TUNNEL_STATELESS_VXLAN = 1 << 0,
+ MLXCX_ETH_CAP_TUNNEL_STATELESS_GRE = 1 << 1,
+ MLXCX_ETH_CAP_TUNNEL_LSO_CONST_OUT_IP_ID = 1 << 4,
+ MLXCX_ETH_CAP_SCATTER_FCS = 1 << 6,
+ MLXCX_ETH_CAP_REG_UMR_SQ = 1 << 7,
+ MLXCX_ETH_CAP_SELF_LB_UC = 1 << 21,
+ MLXCX_ETH_CAP_SELF_LB_MC = 1 << 22,
+ MLXCX_ETH_CAP_SELF_LB_EN_MODIFIABLE = 1 << 23,
+ MLXCX_ETH_CAP_WQE_VLAN_INSERT = 1 << 24,
+ MLXCX_ETH_CAP_LRO_TIME_STAMP = 1 << 27,
+ MLXCX_ETH_CAP_LRO_PSH_FLAG = 1 << 28,
+ MLXCX_ETH_CAP_LRO_CAP = 1 << 29,
+ MLXCX_ETH_CAP_VLAN_STRIP = 1 << 30,
+ MLXCX_ETH_CAP_CSUM_CAP = 1UL << 31
+} mlxcx_hca_eth_cap_flags_t;
+
+/* CSTYLED */
+#define MLXCX_ETH_CAP_RSS_IND_TBL_CAP (bitdef_t){8, 0x00000f00}
+/* CSTYLED */
+#define MLXCX_ETH_CAP_WQE_INLINE_MODE (bitdef_t){12, 0x00003000}
+/* CSTYLED */
+#define MLXCX_ETH_CAP_MULTI_PKT_SEND_WQE (bitdef_t){14, 0x0000c000}
+/* CSTYLED */
+#define MLXCX_ETH_CAP_MAX_LSO_CAP (bitdef_t){16, 0x001f0000}
+/* CSTYLED */
+#define MLXCX_ETH_CAP_LRO_MAX_MSG_SZ_MODE (bitdef_t){25, 0x06000000}
+
+typedef struct {
+ bits32_t mlcap_eth_flags;
+ uint8_t mlcap_eth_rsvd[6];
+ uint16be_t mlcap_eth_lro_min_mss_size;
+ uint8_t mlcap_eth_rsvd2[36];
+ uint32be_t mlcap_eth_lro_timer_supported_periods[4];
+} mlxcx_hca_cap_eth_caps_t;
+
+typedef enum {
+ MLXCX_FLOW_CAP_PROPS_DECAP = 1 << 23,
+ MLXCX_FLOW_CAP_PROPS_ENCAP = 1 << 24,
+ MLXCX_FLOW_CAP_PROPS_MODIFY_TBL = 1 << 25,
+ MLXCX_FLOW_CAP_PROPS_MISS_TABLE = 1 << 26,
+ MLXCX_FLOW_CAP_PROPS_MODIFY_ROOT_TBL = 1 << 27,
+ MLXCX_FLOW_CAP_PROPS_MODIFY = 1 << 28,
+ MLXCX_FLOW_CAP_PROPS_COUNTER = 1 << 29,
+ MLXCX_FLOW_CAP_PROPS_TAG = 1 << 30,
+ MLXCX_FLOW_CAP_PROPS_SUPPORT = 1UL << 31
+} mlxcx_hca_cap_flow_cap_props_flags_t;
+
+typedef struct {
+ bits32_t mlcap_flow_prop_flags;
+ uint8_t mlcap_flow_prop_log_max_ft_size;
+ uint8_t mlcap_flow_prop_rsvd[2];
+ uint8_t mlcap_flow_prop_max_ft_level;
+ uint8_t mlcap_flow_prop_rsvd2[7];
+ uint8_t mlcap_flow_prop_log_max_ft_num;
+ uint8_t mlcap_flow_prop_rsvd3[2];
+ uint8_t mlcap_flow_prop_log_max_flow_counter;
+ uint8_t mlcap_flow_prop_log_max_destination;
+ uint8_t mlcap_flow_prop_rsvd4[3];
+ uint8_t mlcap_flow_prop_log_max_flow;
+ uint8_t mlcap_flow_prop_rsvd5[8];
+ bits32_t mlcap_flow_prop_support[4];
+ bits32_t mlcap_flow_prop_bitmask[4];
+} mlxcx_hca_cap_flow_cap_props_t;
+
+typedef struct {
+ bits32_t mlcap_flow_flags;
+ uint8_t mlcap_flow_rsvd[60];
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_rx;
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_rx_rdma;
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_rx_sniffer;
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_tx;
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_tx_rdma;
+ mlxcx_hca_cap_flow_cap_props_t mlcap_flow_nic_tx_sniffer;
+} mlxcx_hca_cap_flow_caps_t;
+
+/*
+ * Size of a buffer that is required to hold the output data.
+ */
+#define MLXCX_HCA_CAP_SIZE 0x1000
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_hca_cap_head;
+ uint8_t mlxi_query_hca_cap_rsvd[8];
+} mlxcx_cmd_query_hca_cap_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_hca_cap_head;
+ uint8_t mlxo_query_hca_cap_rsvd[8];
+ uint8_t mlxo_query_hca_cap_data[MLXCX_HCA_CAP_SIZE];
+} mlxcx_cmd_query_hca_cap_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_set_driver_version_head;
+ uint8_t mlxi_set_driver_version_rsvd[8];
+ char mlxi_set_driver_version_version[64];
+} mlxcx_cmd_set_driver_version_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_set_driver_version_head;
+ uint8_t mlxo_set_driver_version_rsvd[8];
+} mlxcx_cmd_set_driver_version_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_alloc_uar_head;
+ uint8_t mlxi_alloc_uar_rsvd[8];
+} mlxcx_cmd_alloc_uar_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_alloc_uar_head;
+ uint8_t mlxo_alloc_uar_rsvd;
+ uint24be_t mlxo_alloc_uar_uar;
+ uint8_t mlxo_alloc_uar_rsvd2[4];
+} mlxcx_cmd_alloc_uar_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_dealloc_uar_head;
+ uint8_t mlxi_dealloc_uar_rsvd;
+ uint24be_t mlxi_dealloc_uar_uar;
+ uint8_t mlxi_dealloc_uar_rsvd2[4];
+} mlxcx_cmd_dealloc_uar_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_dealloc_uar_head;
+ uint8_t mlxo_dealloc_uar_rsvd[8];
+} mlxcx_cmd_dealloc_uar_out_t;
+
+/*
+ * This is an artificial limit that we're imposing on our actions.
+ */
+#define MLXCX_CREATE_QUEUE_MAX_PAGES 128
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_eq_head;
+ uint8_t mlxi_create_eq_rsvd[8];
+ mlxcx_eventq_ctx_t mlxi_create_eq_context;
+ uint8_t mlxi_create_eq_rsvd2[8];
+ uint64be_t mlxi_create_eq_event_bitmask;
+ uint8_t mlxi_create_eq_rsvd3[176];
+ uint64be_t mlxi_create_eq_pas[MLXCX_CREATE_QUEUE_MAX_PAGES];
+} mlxcx_cmd_create_eq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_eq_head;
+ uint8_t mlxo_create_eq_rsvd[3];
+ uint8_t mlxo_create_eq_eqn;
+ uint8_t mlxo_create_eq_rsvd2[4];
+} mlxcx_cmd_create_eq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_eq_head;
+ uint8_t mlxi_query_eq_rsvd[3];
+ uint8_t mlxi_query_eq_eqn;
+ uint8_t mlxi_query_eq_rsvd2[4];
+} mlxcx_cmd_query_eq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_eq_head;
+ uint8_t mlxo_query_eq_rsvd[8];
+ mlxcx_eventq_ctx_t mlxo_query_eq_context;
+ uint8_t mlxi_query_eq_rsvd2[8];
+ uint64be_t mlxi_query_eq_event_bitmask;
+ uint8_t mlxi_query_eq_rsvd3[176];
+ uint64be_t mlxi_create_eq_pas[MLXCX_CREATE_QUEUE_MAX_PAGES];
+} mlxcx_cmd_query_eq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_eq_head;
+ uint8_t mlxi_destroy_eq_rsvd[3];
+ uint8_t mlxi_destroy_eq_eqn;
+ uint8_t mlxi_destroy_eq_rsvd2[4];
+} mlxcx_cmd_destroy_eq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_eq_head;
+ uint8_t mlxo_destroy_eq_rsvd[8];
+} mlxcx_cmd_destroy_eq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_alloc_pd_head;
+ uint8_t mlxi_alloc_pd_rsvd[8];
+} mlxcx_cmd_alloc_pd_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_alloc_pd_head;
+ uint8_t mlxo_alloc_pd_rsvd;
+ uint24be_t mlxo_alloc_pd_pdn;
+ uint8_t mlxo_alloc_pd_rsvd2[4];
+} mlxcx_cmd_alloc_pd_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_dealloc_pd_head;
+ uint8_t mlxi_dealloc_pd_rsvd;
+ uint24be_t mlxi_dealloc_pd_pdn;
+ uint8_t mlxi_dealloc_pd_rsvd2[4];
+} mlxcx_cmd_dealloc_pd_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_dealloc_pd_head;
+ uint8_t mlxo_dealloc_pd_rsvd[8];
+} mlxcx_cmd_dealloc_pd_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_alloc_tdom_head;
+ uint8_t mlxi_alloc_tdom_rsvd[8];
+} mlxcx_cmd_alloc_tdom_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_alloc_tdom_head;
+ uint8_t mlxo_alloc_tdom_rsvd;
+ uint24be_t mlxo_alloc_tdom_tdomn;
+ uint8_t mlxo_alloc_tdom_rsvd2[4];
+} mlxcx_cmd_alloc_tdom_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_dealloc_tdom_head;
+ uint8_t mlxi_dealloc_tdom_rsvd;
+ uint24be_t mlxi_dealloc_tdom_tdomn;
+ uint8_t mlxi_dealloc_tdom_rsvd2[4];
+} mlxcx_cmd_dealloc_tdom_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_dealloc_tdom_head;
+ uint8_t mlxo_dealloc_tdom_rsvd[8];
+} mlxcx_cmd_dealloc_tdom_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_tir_head;
+ uint8_t mlxi_create_tir_rsvd[24];
+ mlxcx_tir_ctx_t mlxi_create_tir_context;
+} mlxcx_cmd_create_tir_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_tir_head;
+ uint8_t mlxo_create_tir_rsvd;
+ uint24be_t mlxo_create_tir_tirn;
+ uint8_t mlxo_create_tir_rsvd2[4];
+} mlxcx_cmd_create_tir_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_tir_head;
+ uint8_t mlxi_destroy_tir_rsvd;
+ uint24be_t mlxi_destroy_tir_tirn;
+ uint8_t mlxi_destroy_tir_rsvd2[4];
+} mlxcx_cmd_destroy_tir_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_tir_head;
+ uint8_t mlxo_destroy_tir_rsvd[8];
+} mlxcx_cmd_destroy_tir_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_tis_head;
+ uint8_t mlxi_create_tis_rsvd[24];
+ mlxcx_tis_ctx_t mlxi_create_tis_context;
+} mlxcx_cmd_create_tis_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_tis_head;
+ uint8_t mlxo_create_tis_rsvd;
+ uint24be_t mlxo_create_tis_tisn;
+ uint8_t mlxo_create_tis_rsvd2[4];
+} mlxcx_cmd_create_tis_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_tis_head;
+ uint8_t mlxi_destroy_tis_rsvd;
+ uint24be_t mlxi_destroy_tis_tisn;
+ uint8_t mlxi_destroy_tis_rsvd2[4];
+} mlxcx_cmd_destroy_tis_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_tis_head;
+ uint8_t mlxo_destroy_tis_rsvd[8];
+} mlxcx_cmd_destroy_tis_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_special_ctxs_head;
+ uint8_t mlxi_query_special_ctxs_rsvd[8];
+} mlxcx_cmd_query_special_ctxs_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_special_ctxs_head;
+ uint8_t mlxo_query_special_ctxs_rsvd[4];
+ uint32be_t mlxo_query_special_ctxs_resd_lkey;
+ uint32be_t mlxo_query_special_ctxs_null_mkey;
+ uint8_t mlxo_query_special_ctxs_rsvd2[12];
+} mlxcx_cmd_query_special_ctxs_out_t;
+
+typedef enum {
+ MLXCX_VPORT_TYPE_VNIC = 0x0,
+ MLXCX_VPORT_TYPE_ESWITCH = 0x1,
+ MLXCX_VPORT_TYPE_UPLINK = 0x2,
+} mlxcx_cmd_vport_op_mod_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_nic_vport_ctx_head;
+ uint8_t mlxi_query_nic_vport_ctx_other_vport;
+ uint8_t mlxi_query_nic_vport_ctx_rsvd[1];
+ uint16be_t mlxi_query_nic_vport_ctx_vport_number;
+ uint8_t mlxi_query_nic_vport_ctx_allowed_list_type;
+ uint8_t mlxi_query_nic_vport_ctx_rsvd2[3];
+} mlxcx_cmd_query_nic_vport_ctx_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_nic_vport_ctx_head;
+ uint8_t mlxo_query_nic_vport_ctx_rsvd[8];
+ mlxcx_nic_vport_ctx_t mlxo_query_nic_vport_ctx_context;
+} mlxcx_cmd_query_nic_vport_ctx_out_t;
+
+typedef enum {
+ MLXCX_MODIFY_NIC_VPORT_CTX_ROCE_EN = 1 << 1,
+ MLXCX_MODIFY_NIC_VPORT_CTX_ADDR_LIST = 1 << 2,
+ MLXCX_MODIFY_NIC_VPORT_CTX_PERM_ADDR = 1 << 3,
+ MLXCX_MODIFY_NIC_VPORT_CTX_PROMISC = 1 << 4,
+ MLXCX_MODIFY_NIC_VPORT_CTX_EVENT = 1 << 5,
+ MLXCX_MODIFY_NIC_VPORT_CTX_MTU = 1 << 6,
+ MLXCX_MODIFY_NIC_VPORT_CTX_WQE_INLINE = 1 << 7,
+ MLXCX_MODIFY_NIC_VPORT_CTX_PORT_GUID = 1 << 8,
+ MLXCX_MODIFY_NIC_VPORT_CTX_NODE_GUID = 1 << 9,
+} mlxcx_modify_nic_vport_ctx_fields_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_modify_nic_vport_ctx_head;
+ uint8_t mlxi_modify_nic_vport_ctx_other_vport;
+ uint8_t mlxi_modify_nic_vport_ctx_rsvd[1];
+ uint16be_t mlxi_modify_nic_vport_ctx_vport_number;
+ uint32be_t mlxi_modify_nic_vport_ctx_field_select;
+ uint8_t mlxi_modify_nic_vport_ctx_rsvd2[240];
+ mlxcx_nic_vport_ctx_t mlxi_modify_nic_vport_ctx_context;
+} mlxcx_cmd_modify_nic_vport_ctx_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_modify_nic_vport_ctx_head;
+ uint8_t mlxo_modify_nic_vport_ctx_rsvd[8];
+} mlxcx_cmd_modify_nic_vport_ctx_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_vport_state_head;
+ uint8_t mlxi_query_vport_state_other_vport;
+ uint8_t mlxi_query_vport_state_rsvd[1];
+ uint16be_t mlxi_query_vport_state_vport_number;
+ uint8_t mlxi_query_vport_state_rsvd2[4];
+} mlxcx_cmd_query_vport_state_in_t;
+
+/* CSTYLED */
+#define MLXCX_VPORT_ADMIN_STATE (bitdef_t){4, 0xF0}
+/* CSTYLED */
+#define MLXCX_VPORT_OPER_STATE (bitdef_t){0, 0x0F}
+
+typedef enum {
+ MLXCX_VPORT_OPER_STATE_DOWN = 0x0,
+ MLXCX_VPORT_OPER_STATE_UP = 0x1,
+} mlxcx_vport_oper_state_t;
+
+typedef enum {
+ MLXCX_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLXCX_VPORT_ADMIN_STATE_UP = 0x1,
+ MLXCX_VPORT_ADMIN_STATE_FOLLOW = 0x2,
+} mlxcx_vport_admin_state_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_vport_state_head;
+ uint8_t mlxo_query_vport_state_rsvd[4];
+ uint16be_t mlxo_query_vport_state_max_tx_speed;
+ uint8_t mlxo_query_vport_state_rsvd2[1];
+ uint8_t mlxo_query_vport_state_state;
+} mlxcx_cmd_query_vport_state_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_cq_head;
+ uint8_t mlxi_create_cq_rsvd[8];
+ mlxcx_completionq_ctx_t mlxi_create_cq_context;
+ uint8_t mlxi_create_cq_rsvd2[192];
+ uint64be_t mlxi_create_cq_pas[MLXCX_CREATE_QUEUE_MAX_PAGES];
+} mlxcx_cmd_create_cq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_cq_head;
+ uint8_t mlxo_create_cq_rsvd;
+ uint24be_t mlxo_create_cq_cqn;
+ uint8_t mlxo_create_cq_rsvd2[4];
+} mlxcx_cmd_create_cq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_cq_head;
+ uint8_t mlxi_destroy_cq_rsvd;
+ uint24be_t mlxi_destroy_cq_cqn;
+ uint8_t mlxi_destroy_cq_rsvd2[4];
+} mlxcx_cmd_destroy_cq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_cq_head;
+ uint8_t mlxo_destroy_cq_rsvd[8];
+} mlxcx_cmd_destroy_cq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_cq_head;
+ uint8_t mlxi_query_cq_rsvd;
+ uint24be_t mlxi_query_cq_cqn;
+ uint8_t mlxi_query_cq_rsvd2[4];
+} mlxcx_cmd_query_cq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_cq_head;
+ uint8_t mlxo_query_cq_rsvd[8];
+ mlxcx_completionq_ctx_t mlxo_query_cq_context;
+ uint8_t mlxo_query_cq_rsvd2[192];
+ uint64be_t mlxo_query_cq_pas[MLXCX_CREATE_QUEUE_MAX_PAGES];
+} mlxcx_cmd_query_cq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_rq_head;
+ uint8_t mlxi_create_rq_rsvd[24];
+ mlxcx_rq_ctx_t mlxi_create_rq_context;
+} mlxcx_cmd_create_rq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_rq_head;
+ uint8_t mlxo_create_rq_rsvd;
+ uint24be_t mlxo_create_rq_rqn;
+ uint8_t mlxo_create_rq_rsvd2[4];
+} mlxcx_cmd_create_rq_out_t;
+
+/* CSTYLED */
+#define MLXCX_CMD_MODIFY_RQ_STATE (bitdef_t){ \
+ .bit_shift = 4, .bit_mask = 0xF0 }
+
+typedef enum {
+ MLXCX_MODIFY_RQ_SCATTER_FCS = 1 << 2,
+ MLXCX_MODIFY_RQ_VSD = 1 << 1,
+ MLXCX_MODIFY_RQ_COUNTER_SET_ID = 1 << 3,
+ MLXCX_MODIFY_RQ_LWM = 1 << 0
+} mlxcx_cmd_modify_rq_bitmask_t;
+
+typedef enum {
+ MLXCX_RQ_STATE_RST = 0x0,
+ MLXCX_RQ_STATE_RDY = 0x1,
+ MLXCX_RQ_STATE_ERR = 0x3
+} mlxcx_rq_state_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_modify_rq_head;
+ bits8_t mlxi_modify_rq_state;
+ uint24be_t mlxi_modify_rq_rqn;
+ uint8_t mlxi_modify_rq_rsvd[4];
+ uint64be_t mlxi_modify_rq_bitmask;
+ uint8_t mlxi_modify_rq_rsvd2[8];
+ mlxcx_rq_ctx_t mlxi_modify_rq_context;
+} mlxcx_cmd_modify_rq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_modify_rq_head;
+ uint8_t mlxo_modify_rq_rsvd[8];
+} mlxcx_cmd_modify_rq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_rq_head;
+ uint8_t mlxi_query_rq_rsvd;
+ uint24be_t mlxi_query_rq_rqn;
+ uint8_t mlxi_query_rq_rsvd2[4];
+} mlxcx_cmd_query_rq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_rq_head;
+ uint8_t mlxo_query_rq_rsvd[24];
+ mlxcx_rq_ctx_t mlxo_query_rq_context;
+} mlxcx_cmd_query_rq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_rq_head;
+ uint8_t mlxi_destroy_rq_rsvd;
+ uint24be_t mlxi_destroy_rq_rqn;
+ uint8_t mlxi_destroy_rq_rsvd2[4];
+} mlxcx_cmd_destroy_rq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_rq_head;
+ uint8_t mlxo_destroy_rq_rsvd[8];
+} mlxcx_cmd_destroy_rq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_sq_head;
+ uint8_t mlxi_create_sq_rsvd[24];
+ mlxcx_sq_ctx_t mlxi_create_sq_context;
+} mlxcx_cmd_create_sq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_sq_head;
+ uint8_t mlxo_create_sq_rsvd;
+ uint24be_t mlxo_create_sq_sqn;
+ uint8_t mlxo_create_sq_rsvd2[4];
+} mlxcx_cmd_create_sq_out_t;
+
+/* CSTYLED */
+#define MLXCX_CMD_MODIFY_SQ_STATE (bitdef_t){ \
+ .bit_shift = 4, .bit_mask = 0xF0 }
+
+typedef enum {
+ MLXCX_MODIFY_SQ_PACKET_PACING_INDEX = 1 << 0,
+} mlxcx_cmd_modify_sq_bitmask_t;
+
+typedef enum {
+ MLXCX_SQ_STATE_RST = 0x0,
+ MLXCX_SQ_STATE_RDY = 0x1,
+ MLXCX_SQ_STATE_ERR = 0x3
+} mlxcx_sq_state_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_modify_sq_head;
+ bits8_t mlxi_modify_sq_state;
+ uint24be_t mlxi_modify_sq_sqn;
+ uint8_t mlxi_modify_sq_rsvd[4];
+ uint64be_t mlxi_modify_sq_bitmask;
+ uint8_t mlxi_modify_sq_rsvd2[8];
+ mlxcx_sq_ctx_t mlxi_modify_sq_context;
+} mlxcx_cmd_modify_sq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_modify_sq_head;
+ uint8_t mlxo_modify_sq_rsvd[8];
+} mlxcx_cmd_modify_sq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_query_sq_head;
+ uint8_t mlxi_query_sq_rsvd;
+ uint24be_t mlxi_query_sq_sqn;
+ uint8_t mlxi_query_sq_rsvd2[4];
+} mlxcx_cmd_query_sq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_query_sq_head;
+ uint8_t mlxo_query_sq_rsvd[24];
+ mlxcx_sq_ctx_t mlxo_query_sq_context;
+} mlxcx_cmd_query_sq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_sq_head;
+ uint8_t mlxi_destroy_sq_rsvd;
+ uint24be_t mlxi_destroy_sq_sqn;
+ uint8_t mlxi_destroy_sq_rsvd2[4];
+} mlxcx_cmd_destroy_sq_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_sq_head;
+ uint8_t mlxo_destroy_sq_rsvd[8];
+} mlxcx_cmd_destroy_sq_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_rqt_head;
+ uint8_t mlxi_create_rqt_rsvd[24];
+ mlxcx_rqtable_ctx_t mlxi_create_rqt_context;
+} mlxcx_cmd_create_rqt_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_rqt_head;
+ uint8_t mlxo_create_rqt_rsvd;
+ uint24be_t mlxo_create_rqt_rqtn;
+ uint8_t mlxo_create_rqt_rsvd2[4];
+} mlxcx_cmd_create_rqt_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_rqt_head;
+ uint8_t mlxi_destroy_rqt_rsvd;
+ uint24be_t mlxi_destroy_rqt_rqtn;
+ uint8_t mlxi_destroy_rqt_rsvd2[4];
+} mlxcx_cmd_destroy_rqt_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_rqt_head;
+ uint8_t mlxo_destroy_rqt_rsvd[8];
+} mlxcx_cmd_destroy_rqt_out_t;
+
+typedef enum {
+ MLXCX_FLOW_TABLE_NIC_RX = 0x0,
+ MLXCX_FLOW_TABLE_NIC_TX = 0x1,
+ MLXCX_FLOW_TABLE_ESW_OUT = 0x2,
+ MLXCX_FLOW_TABLE_ESW_IN = 0x3,
+ MLXCX_FLOW_TABLE_ESW_FDB = 0x4,
+ MLXCX_FLOW_TABLE_NIC_RX_SNIFF = 0x5,
+ MLXCX_FLOW_TABLE_NIC_TX_SNIFF = 0x6,
+ MLXCX_FLOW_TABLE_NIC_RX_RDMA = 0x7,
+ MLXCX_FLOW_TABLE_NIC_TX_RDMA = 0x8
+} mlxcx_flow_table_type_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_flow_table_head;
+ uint8_t mlxi_create_flow_table_other_vport;
+ uint8_t mlxi_create_flow_table_rsvd;
+ uint16be_t mlxi_create_flow_table_vport_number;
+ uint8_t mlxi_create_flow_table_rsvd2[4];
+ uint8_t mlxi_create_flow_table_table_type;
+ uint8_t mlxi_create_flow_table_rsvd3[7];
+ mlxcx_flow_table_ctx_t mlxi_create_flow_table_context;
+} mlxcx_cmd_create_flow_table_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_flow_table_head;
+ uint8_t mlxo_create_flow_table_rsvd;
+ uint24be_t mlxo_create_flow_table_table_id;
+ uint8_t mlxo_create_flow_table_rsvd2[4];
+} mlxcx_cmd_create_flow_table_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_flow_table_head;
+ uint8_t mlxi_destroy_flow_table_other_vport;
+ uint8_t mlxi_destroy_flow_table_rsvd;
+ uint16be_t mlxi_destroy_flow_table_vport_number;
+ uint8_t mlxi_destroy_flow_table_rsvd2[4];
+ uint8_t mlxi_destroy_flow_table_table_type;
+ uint8_t mlxi_destroy_flow_table_rsvd3[4];
+ uint24be_t mlxi_destroy_flow_table_table_id;
+ uint8_t mlxi_destroy_flow_table_rsvd4[4];
+} mlxcx_cmd_destroy_flow_table_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_flow_table_head;
+ uint8_t mlxo_destroy_flow_table_rsvd[8];
+} mlxcx_cmd_destroy_flow_table_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_set_flow_table_root_head;
+ uint8_t mlxi_set_flow_table_root_other_vport;
+ uint8_t mlxi_set_flow_table_root_rsvd;
+ uint16be_t mlxi_set_flow_table_root_vport_number;
+ uint8_t mlxi_set_flow_table_root_rsvd2[4];
+ uint8_t mlxi_set_flow_table_root_table_type;
+ uint8_t mlxi_set_flow_table_root_rsvd3[4];
+ uint24be_t mlxi_set_flow_table_root_table_id;
+ uint8_t mlxi_set_flow_table_root_rsvd4[4];
+} mlxcx_cmd_set_flow_table_root_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_set_flow_table_root_head;
+ uint8_t mlxo_set_flow_table_root_rsvd[8];
+} mlxcx_cmd_set_flow_table_root_out_t;
+
+typedef enum {
+ MLXCX_FLOW_GROUP_MATCH_OUTER_HDRS = 1 << 0,
+ MLXCX_FLOW_GROUP_MATCH_MISC_PARAMS = 1 << 1,
+ MLXCX_FLOW_GROUP_MATCH_INNER_HDRS = 1 << 2,
+} mlxcx_flow_group_match_criteria_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_create_flow_group_head;
+ uint8_t mlxi_create_flow_group_other_vport;
+ uint8_t mlxi_create_flow_group_rsvd;
+ uint16be_t mlxi_create_flow_group_vport_number;
+ uint8_t mlxi_create_flow_group_rsvd2[4];
+ uint8_t mlxi_create_flow_group_table_type;
+ uint8_t mlxi_create_flow_group_rsvd3[4];
+ uint24be_t mlxi_create_flow_group_table_id;
+ uint8_t mlxi_create_flow_group_rsvd4[4];
+ uint32be_t mlxi_create_flow_group_start_flow_index;
+ uint8_t mlxi_create_flow_group_rsvd5[4];
+ uint32be_t mlxi_create_flow_group_end_flow_index;
+ uint8_t mlxi_create_flow_group_rsvd6[23];
+ uint8_t mlxi_create_flow_group_match_criteria_en;
+ mlxcx_flow_match_t mlxi_create_flow_group_match_criteria;
+ uint8_t mlxi_create_flow_group_rsvd7[448];
+} mlxcx_cmd_create_flow_group_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_create_flow_group_head;
+ uint8_t mlxo_create_flow_group_rsvd;
+ uint24be_t mlxo_create_flow_group_group_id;
+ uint8_t mlxo_create_flow_group_rsvd2[4];
+} mlxcx_cmd_create_flow_group_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_destroy_flow_group_head;
+ uint8_t mlxi_destroy_flow_group_other_vport;
+ uint8_t mlxi_destroy_flow_group_rsvd;
+ uint16be_t mlxi_destroy_flow_group_vport_number;
+ uint8_t mlxi_destroy_flow_group_rsvd2[4];
+ uint8_t mlxi_destroy_flow_group_table_type;
+ uint8_t mlxi_destroy_flow_group_rsvd3[4];
+ uint24be_t mlxi_destroy_flow_group_table_id;
+ uint32be_t mlxi_destroy_flow_group_group_id;
+ uint8_t mlxi_destroy_flow_group_rsvd4[36];
+} mlxcx_cmd_destroy_flow_group_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_destroy_flow_group_head;
+ uint8_t mlxo_destroy_flow_group_rsvd[8];
+} mlxcx_cmd_destroy_flow_group_out_t;
+
+typedef enum {
+ MLXCX_CMD_FLOW_ENTRY_SET_NEW = 0,
+ MLXCX_CMD_FLOW_ENTRY_MODIFY = 1,
+} mlxcx_cmd_set_flow_table_entry_opmod_t;
+
+typedef enum {
+ MLXCX_CMD_FLOW_ENTRY_SET_ACTION = 1 << 0,
+ MLXCX_CMD_FLOW_ENTRY_SET_FLOW_TAG = 1 << 1,
+ MLXCX_CMD_FLOW_ENTRY_SET_DESTINATION = 1 << 2,
+ MLXCX_CMD_FLOW_ENTRY_SET_COUNTERS = 1 << 3,
+ MLXCX_CMD_FLOW_ENTRY_SET_ENCAP = 1 << 4
+} mlxcx_cmd_set_flow_table_entry_bitmask_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_set_flow_table_entry_head;
+ uint8_t mlxi_set_flow_table_entry_other_vport;
+ uint8_t mlxi_set_flow_table_entry_rsvd;
+ uint16be_t mlxi_set_flow_table_entry_vport_number;
+ uint8_t mlxi_set_flow_table_entry_rsvd2[4];
+ uint8_t mlxi_set_flow_table_entry_table_type;
+ uint8_t mlxi_set_flow_table_entry_rsvd3[4];
+ uint24be_t mlxi_set_flow_table_entry_table_id;
+ uint8_t mlxi_set_flow_table_entry_rsvd4[3];
+ bits8_t mlxi_set_flow_table_entry_modify_bitmask;
+ uint8_t mlxi_set_flow_table_entry_rsvd5[4];
+ uint32be_t mlxi_set_flow_table_entry_flow_index;
+ uint8_t mlxi_set_flow_table_entry_rsvd6[28];
+ mlxcx_flow_entry_ctx_t mlxi_set_flow_table_entry_context;
+} mlxcx_cmd_set_flow_table_entry_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_set_flow_table_entry_head;
+ uint8_t mlxo_set_flow_table_entry_rsvd[8];
+} mlxcx_cmd_set_flow_table_entry_out_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_delete_flow_table_entry_head;
+ uint8_t mlxi_delete_flow_table_entry_other_vport;
+ uint8_t mlxi_delete_flow_table_entry_rsvd;
+ uint16be_t mlxi_delete_flow_table_entry_vport_number;
+ uint8_t mlxi_delete_flow_table_entry_rsvd2[4];
+ uint8_t mlxi_delete_flow_table_entry_table_type;
+ uint8_t mlxi_delete_flow_table_entry_rsvd3[4];
+ uint24be_t mlxi_delete_flow_table_entry_table_id;
+ uint8_t mlxi_delete_flow_table_entry_rsvd4[8];
+ uint32be_t mlxi_delete_flow_table_entry_flow_index;
+ uint8_t mlxi_delete_flow_table_entry_rsvd5[28];
+} mlxcx_cmd_delete_flow_table_entry_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_delete_flow_table_entry_head;
+ uint8_t mlxo_delete_flow_table_entry_rsvd[8];
+} mlxcx_cmd_delete_flow_table_entry_out_t;
+
+typedef enum {
+ MLXCX_CMD_CONFIG_INT_MOD_READ = 1,
+ MLXCX_CMD_CONFIG_INT_MOD_WRITE = 0
+} mlxcx_cmd_config_int_mod_opmod_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_config_int_mod_head;
+ uint16be_t mlxi_config_int_mod_min_delay;
+ uint16be_t mlxi_config_int_mod_int_vector;
+ uint8_t mlxi_config_int_mod_rsvd[4];
+} mlxcx_cmd_config_int_mod_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_config_int_mod_head;
+ uint16be_t mlxo_config_int_mod_min_delay;
+ uint16be_t mlxo_config_int_mod_int_vector;
+ uint8_t mlxo_config_int_mod_rsvd[4];
+} mlxcx_cmd_config_int_mod_out_t;
+
+typedef struct {
+ uint8_t mlrd_pmtu_rsvd;
+ uint8_t mlrd_pmtu_local_port;
+ uint8_t mlrd_pmtu_rsvd2[2];
+
+ uint16be_t mlrd_pmtu_max_mtu;
+ uint8_t mlrd_pmtu_rsvd3[2];
+
+ uint16be_t mlrd_pmtu_admin_mtu;
+ uint8_t mlrd_pmtu_rsvd4[2];
+
+ uint16be_t mlrd_pmtu_oper_mtu;
+ uint8_t mlrd_pmtu_rsvd5[2];
+} mlxcx_reg_pmtu_t;
+
+typedef enum {
+ MLXCX_PORT_STATUS_UP = 1,
+ MLXCX_PORT_STATUS_DOWN = 2,
+ MLXCX_PORT_STATUS_UP_ONCE = 3,
+ MLXCX_PORT_STATUS_DISABLED = 4,
+} mlxcx_port_status_t;
+
+typedef enum {
+ MLXCX_PAOS_ADMIN_ST_EN = 1UL << 31,
+} mlxcx_paos_flags_t;
+
+typedef struct {
+ uint8_t mlrd_paos_swid;
+ uint8_t mlrd_paos_local_port;
+ uint8_t mlrd_paos_admin_status;
+ uint8_t mlrd_paos_oper_status;
+ bits32_t mlrd_paos_flags;
+ uint8_t mlrd_paos_rsvd[8];
+} mlxcx_reg_paos_t;
+
+typedef enum {
+ MLXCX_PROTO_SGMII = 1 << 0,
+ MLXCX_PROTO_1000BASE_KX = 1 << 1,
+ MLXCX_PROTO_10GBASE_CX4 = 1 << 2,
+ MLXCX_PROTO_10GBASE_KX4 = 1 << 3,
+ MLXCX_PROTO_10GBASE_KR = 1 << 4,
+ MLXCX_PROTO_UNKNOWN_1 = 1 << 5,
+ MLXCX_PROTO_40GBASE_CR4 = 1 << 6,
+ MLXCX_PROTO_40GBASE_KR4 = 1 << 7,
+ MLXCX_PROTO_UNKNOWN_2 = 1 << 8,
+ MLXCX_PROTO_SGMII_100BASE = 1 << 9,
+ MLXCX_PROTO_UNKNOWN_3 = 1 << 10,
+ MLXCX_PROTO_UNKNOWN_4 = 1 << 11,
+ MLXCX_PROTO_10GBASE_CR = 1 << 12,
+ MLXCX_PROTO_10GBASE_SR = 1 << 13,
+ MLXCX_PROTO_10GBASE_ER_LR = 1 << 14,
+ MLXCX_PROTO_40GBASE_SR4 = 1 << 15,
+ MLXCX_PROTO_40GBASE_LR4_ER4 = 1 << 16,
+ MLXCX_PROTO_UNKNOWN_5 = 1 << 17,
+ MLXCX_PROTO_50GBASE_SR2 = 1 << 18,
+ MLXCX_PROTO_UNKNOWN_6 = 1 << 19,
+ MLXCX_PROTO_100GBASE_CR4 = 1 << 20,
+ MLXCX_PROTO_100GBASE_SR4 = 1 << 21,
+ MLXCX_PROTO_100GBASE_KR4 = 1 << 22,
+ MLXCX_PROTO_UNKNOWN_7 = 1 << 23,
+ MLXCX_PROTO_UNKNOWN_8 = 1 << 24,
+ MLXCX_PROTO_UNKNOWN_9 = 1 << 25,
+ MLXCX_PROTO_UNKNOWN_10 = 1 << 26,
+ MLXCX_PROTO_25GBASE_CR = 1 << 27,
+ MLXCX_PROTO_25GBASE_KR = 1 << 28,
+ MLXCX_PROTO_25GBASE_SR = 1 << 29,
+ MLXCX_PROTO_50GBASE_CR2 = 1 << 30,
+ MLXCX_PROTO_50GBASE_KR2 = 1UL << 31,
+} mlxcx_eth_proto_t;
+
+typedef enum {
+ MLXCX_AUTONEG_DISABLE_CAP = 1 << 5,
+ MLXCX_AUTONEG_DISABLE = 1 << 6
+} mlxcx_autoneg_flags_t;
+
+typedef enum {
+ MLXCX_PTYS_PROTO_MASK_IB = 1 << 0,
+ MLXCX_PTYS_PROTO_MASK_ETH = 1 << 2,
+} mlxcx_reg_ptys_proto_mask_t;
+
+typedef struct {
+ bits8_t mlrd_ptys_autoneg_flags;
+ uint8_t mlrd_ptys_local_port;
+ uint8_t mlrd_ptys_rsvd;
+ bits8_t mlrd_ptys_proto_mask;
+
+ bits8_t mlrd_ptys_autoneg_status;
+ uint8_t mlrd_ptys_rsvd2;
+ uint16be_t mlrd_ptys_data_rate_oper;
+
+ uint8_t mlrd_ptys_rsvd3[4];
+
+ bits32_t mlrd_ptys_proto_cap;
+ uint8_t mlrd_ptys_rsvd4[8];
+ bits32_t mlrd_ptys_proto_admin;
+ uint8_t mlrd_ptys_rsvd5[8];
+ bits32_t mlrd_ptys_proto_oper;
+ uint8_t mlrd_ptys_rsvd6[8];
+ bits32_t mlrd_ptys_proto_partner_advert;
+ uint8_t mlrd_ptys_rsvd7[12];
+} mlxcx_reg_ptys_t;
+
+typedef enum {
+ MLXCX_LED_TYPE_BOTH = 0x0,
+ MLXCX_LED_TYPE_UID = 0x1,
+ MLXCX_LED_TYPE_PORT = 0x2,
+} mlxcx_led_type_t;
+
+#define MLXCX_MLCR_INDIVIDUAL_ONLY (1 << 4)
+/* CSTYLED */
+#define MLXCX_MLCR_LED_TYPE (bitdef_t){ 0, 0x0F }
+
+typedef struct {
+ uint8_t mlrd_mlcr_rsvd;
+ uint8_t mlrd_mlcr_local_port;
+ uint8_t mlrd_mlcr_rsvd2;
+ bits8_t mlrd_mlcr_flags;
+ uint8_t mlrd_mlcr_rsvd3[2];
+ uint16be_t mlrd_mlcr_beacon_duration;
+ uint8_t mlrd_mlcr_rsvd4[2];
+ uint16be_t mlrd_mlcr_beacon_remain;
+} mlxcx_reg_mlcr_t;
+
+typedef struct {
+ uint8_t mlrd_pmaos_rsvd;
+ uint8_t mlrd_pmaos_module;
+ uint8_t mlrd_pmaos_admin_status;
+ uint8_t mlrd_pmaos_oper_status;
+ bits8_t mlrd_pmaos_flags;
+ uint8_t mlrd_pmaos_rsvd2;
+ uint8_t mlrd_pmaos_error_type;
+ uint8_t mlrd_pmaos_event_en;
+ uint8_t mlrd_pmaos_rsvd3[8];
+} mlxcx_reg_pmaos_t;
+
+typedef enum {
+ MLXCX_MCIA_STATUS_OK = 0x0,
+ MLXCX_MCIA_STATUS_NO_EEPROM = 0x1,
+ MLXCX_MCIA_STATUS_NOT_SUPPORTED = 0x2,
+ MLXCX_MCIA_STATUS_NOT_CONNECTED = 0x3,
+ MLXCX_MCIA_STATUS_I2C_ERROR = 0x9,
+ MLXCX_MCIA_STATUS_DISABLED = 0x10
+} mlxcx_mcia_status_t;
+
+typedef struct {
+ bits8_t mlrd_mcia_flags;
+ uint8_t mlrd_mcia_module;
+ uint8_t mlrd_mcia_rsvd;
+ uint8_t mlrd_mcia_status;
+ uint8_t mlrd_mcia_i2c_device_addr;
+ uint8_t mlrd_mcia_page_number;
+ uint16be_t mlrd_mcia_device_addr;
+ uint8_t mlrd_mcia_rsvd2[2];
+ uint16be_t mlrd_mcia_size;
+ uint8_t mlrd_mcia_rsvd3[4];
+ uint8_t mlrd_mcia_data[48];
+} mlxcx_reg_mcia_t;
+
+typedef struct {
+ uint64be_t mlppc_ieee_802_3_frames_tx;
+ uint64be_t mlppc_ieee_802_3_frames_rx;
+ uint64be_t mlppc_ieee_802_3_fcs_err;
+ uint64be_t mlppc_ieee_802_3_align_err;
+ uint64be_t mlppc_ieee_802_3_bytes_tx;
+ uint64be_t mlppc_ieee_802_3_bytes_rx;
+ uint64be_t mlppc_ieee_802_3_mcast_tx;
+ uint64be_t mlppc_ieee_802_3_bcast_tx;
+ uint64be_t mlppc_ieee_802_3_mcast_rx;
+ uint64be_t mlppc_ieee_802_3_bcast_rx;
+ uint64be_t mlppc_ieee_802_3_in_range_len_err;
+ uint64be_t mlppc_ieee_802_3_out_of_range_len_err;
+ uint64be_t mlppc_ieee_802_3_frame_too_long_err;
+ uint64be_t mlppc_ieee_802_3_symbol_err;
+ uint64be_t mlppc_ieee_802_3_mac_ctrl_tx;
+ uint64be_t mlppc_ieee_802_3_mac_ctrl_rx;
+ uint64be_t mlppc_ieee_802_3_unsup_opcodes_rx;
+ uint64be_t mlppc_ieee_802_3_pause_rx;
+ uint64be_t mlppc_ieee_802_3_pause_tx;
+} mlxcx_ppcnt_ieee_802_3_t;
+
+typedef struct {
+ uint64be_t mlppc_rfc_2863_in_octets;
+ uint64be_t mlppc_rfc_2863_in_ucast_pkts;
+ uint64be_t mlppc_rfc_2863_in_discards;
+ uint64be_t mlppc_rfc_2863_in_errors;
+ uint64be_t mlppc_rfc_2863_in_unknown_protos;
+ uint64be_t mlppc_rfc_2863_out_octets;
+ uint64be_t mlppc_rfc_2863_out_ucast_pkts;
+ uint64be_t mlppc_rfc_2863_out_discards;
+ uint64be_t mlppc_rfc_2863_out_errors;
+ uint64be_t mlppc_rfc_2863_in_mcast_pkts;
+ uint64be_t mlppc_rfc_2863_in_bcast_pkts;
+ uint64be_t mlppc_rfc_2863_out_mcast_pkts;
+ uint64be_t mlppc_rfc_2863_out_bcast_pkts;
+} mlxcx_ppcnt_rfc_2863_t;
+
+typedef struct {
+ uint64be_t mlppc_phy_stats_time_since_last_clear;
+ uint64be_t mlppc_phy_stats_rx_bits;
+ uint64be_t mlppc_phy_stats_symbol_errs;
+ uint64be_t mlppc_phy_stats_corrected_bits;
+ uint8_t mlppc_phy_stats_rsvd[2];
+ uint8_t mlppc_phy_stats_raw_ber_mag;
+ uint8_t mlppc_phy_stats_raw_ber_coef;
+ uint8_t mlppc_phy_stats_rsvd2[2];
+ uint8_t mlppc_phy_stats_eff_ber_mag;
+ uint8_t mlppc_phy_stats_eff_ber_coef;
+} mlxcx_ppcnt_phy_stats_t;
+
+typedef enum {
+ MLXCX_PPCNT_GRP_IEEE_802_3 = 0x0,
+ MLXCX_PPCNT_GRP_RFC_2863 = 0x1,
+ MLXCX_PPCNT_GRP_RFC_2819 = 0x2,
+ MLXCX_PPCNT_GRP_RFC_3635 = 0x3,
+ MLXCX_PPCNT_GRP_ETH_EXTD = 0x5,
+ MLXCX_PPCNT_GRP_ETH_DISCARD = 0x6,
+ MLXCX_PPCNT_GRP_PER_PRIO = 0x10,
+ MLXCX_PPCNT_GRP_PER_TC = 0x11,
+ MLXCX_PPCNT_GRP_PER_TC_CONGEST = 0x13,
+ MLXCX_PPCNT_GRP_PHY_STATS = 0x16
+} mlxcx_ppcnt_grp_t;
+
+typedef enum {
+ MLXCX_PPCNT_CLEAR = (1 << 7),
+ MLXCX_PPCNT_NO_CLEAR = 0
+} mlxcx_ppcnt_clear_t;
+
+typedef struct {
+ uint8_t mlrd_ppcnt_swid;
+ uint8_t mlrd_ppcnt_local_port;
+ uint8_t mlrd_ppcnt_pnat;
+ uint8_t mlrd_ppcnt_grp;
+ uint8_t mlrd_ppcnt_clear;
+ uint8_t mlrd_ppcnt_rsvd[2];
+ uint8_t mlrd_ppcnt_prio_tc;
+ union {
+ uint8_t mlrd_ppcnt_data[248];
+ mlxcx_ppcnt_ieee_802_3_t mlrd_ppcnt_ieee_802_3;
+ mlxcx_ppcnt_rfc_2863_t mlrd_ppcnt_rfc_2863;
+ mlxcx_ppcnt_phy_stats_t mlrd_ppcnt_phy_stats;
+ };
+} mlxcx_reg_ppcnt_t;
+
+typedef enum {
+ MLXCX_REG_PMTU = 0x5003,
+ MLXCX_REG_PTYS = 0x5004,
+ MLXCX_REG_PAOS = 0x5006,
+ MLXCX_REG_PMAOS = 0x5012,
+ MLXCX_REG_MSGI = 0x9021,
+ MLXCX_REG_MLCR = 0x902B,
+ MLXCX_REG_MCIA = 0x9014,
+ MLXCX_REG_PPCNT = 0x5008,
+} mlxcx_register_id_t;
+
+typedef union {
+ mlxcx_reg_pmtu_t mlrd_pmtu;
+ mlxcx_reg_paos_t mlrd_paos;
+ mlxcx_reg_ptys_t mlrd_ptys;
+ mlxcx_reg_mlcr_t mlrd_mlcr;
+ mlxcx_reg_pmaos_t mlrd_pmaos;
+ mlxcx_reg_mcia_t mlrd_mcia;
+ mlxcx_reg_ppcnt_t mlrd_ppcnt;
+} mlxcx_register_data_t;
+
+typedef enum {
+ MLXCX_CMD_ACCESS_REGISTER_READ = 1,
+ MLXCX_CMD_ACCESS_REGISTER_WRITE = 0
+} mlxcx_cmd_reg_opmod_t;
+
+typedef struct {
+ mlxcx_cmd_in_t mlxi_access_register_head;
+ uint8_t mlxi_access_register_rsvd[2];
+ uint16be_t mlxi_access_register_register_id;
+ uint32be_t mlxi_access_register_argument;
+ mlxcx_register_data_t mlxi_access_register_data;
+} mlxcx_cmd_access_register_in_t;
+
+typedef struct {
+ mlxcx_cmd_out_t mlxo_access_register_head;
+ uint8_t mlxo_access_register_rsvd[8];
+ mlxcx_register_data_t mlxo_access_register_data;
+} mlxcx_cmd_access_register_out_t;
+
+#pragma pack()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MLXCX_REG_H */
diff --git a/usr/src/uts/common/io/mlxcx/mlxcx_ring.c b/usr/src/uts/common/io/mlxcx/mlxcx_ring.c
new file mode 100644
index 0000000000..8337545b57
--- /dev/null
+++ b/usr/src/uts/common/io/mlxcx/mlxcx_ring.c
@@ -0,0 +1,2264 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2020, The University of Queensland
+ * Copyright (c) 2018, Joyent, Inc.
+ */
+
+/*
+ * Mellanox Connect-X 4/5/6 driver.
+ */
+
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/sysmacros.h>
+#include <sys/atomic.h>
+#include <sys/cpuvar.h>
+
+#include <sys/pattr.h>
+#include <sys/dlpi.h>
+
+#include <sys/mac_provider.h>
+
+#include <sys/random.h>
+
+#include <mlxcx.h>
+
+boolean_t
+mlxcx_wq_alloc_dma(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ boolean_t ret;
+ size_t sz;
+
+ VERIFY0(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+
+ /* Receive and send queue entries might be different sizes. */
+ switch (mlwq->mlwq_type) {
+ case MLXCX_WQ_TYPE_SENDQ:
+ mlwq->mlwq_entshift = mlxp->mlx_props.mldp_sq_size_shift;
+ mlwq->mlwq_nents = (1 << mlwq->mlwq_entshift);
+ sz = mlwq->mlwq_nents * sizeof (mlxcx_sendq_ent_t);
+ break;
+ case MLXCX_WQ_TYPE_RECVQ:
+ mlwq->mlwq_entshift = mlxp->mlx_props.mldp_rq_size_shift;
+ mlwq->mlwq_nents = (1 << mlwq->mlwq_entshift);
+ sz = mlwq->mlwq_nents * sizeof (mlxcx_recvq_ent_t);
+ break;
+ default:
+ VERIFY(0);
+ return (B_FALSE);
+ }
+ ASSERT3U(sz & (MLXCX_HW_PAGE_SIZE - 1), ==, 0);
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_queue_attr(mlxp, &attr);
+
+ ret = mlxcx_dma_alloc(mlxp, &mlwq->mlwq_dma, &attr, &acc,
+ B_TRUE, sz, B_TRUE);
+ if (!ret) {
+ mlxcx_warn(mlxp, "failed to allocate WQ memory");
+ return (B_FALSE);
+ }
+
+ /*
+ * Just set the first pointer in the union. Yes, this is a strict
+ * aliasing violation. No, I don't care.
+ */
+ mlwq->mlwq_send_ent = (mlxcx_sendq_ent_t *)mlwq->mlwq_dma.mxdb_va;
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_qdbell_attr(mlxp, &attr);
+ sz = sizeof (mlxcx_workq_doorbell_t);
+ ret = mlxcx_dma_alloc(mlxp, &mlwq->mlwq_doorbell_dma, &attr, &acc,
+ B_TRUE, sz, B_TRUE);
+ if (!ret) {
+ mlxcx_warn(mlxp, "failed to allocate WQ doorbell memory");
+ mlxcx_dma_free(&mlwq->mlwq_dma);
+ mlwq->mlwq_send_ent = NULL;
+ return (B_FALSE);
+ }
+
+ mlwq->mlwq_doorbell =
+ (mlxcx_workq_doorbell_t *)mlwq->mlwq_doorbell_dma.mxdb_va;
+
+ mlwq->mlwq_state |= MLXCX_WQ_ALLOC;
+
+ return (B_TRUE);
+}
+
+void
+mlxcx_wq_rele_dma(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_ALLOC);
+ if (mlwq->mlwq_state & MLXCX_WQ_CREATED)
+ VERIFY(mlwq->mlwq_state & MLXCX_WQ_DESTROYED);
+
+ mlxcx_dma_free(&mlwq->mlwq_dma);
+ mlwq->mlwq_send_ent = NULL;
+ mlxcx_dma_free(&mlwq->mlwq_doorbell_dma);
+ mlwq->mlwq_doorbell = NULL;
+
+ mlwq->mlwq_state &= ~MLXCX_CQ_ALLOC;
+}
+
+boolean_t
+mlxcx_cq_alloc_dma(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ boolean_t ret;
+ size_t sz, i;
+
+ VERIFY0(mlcq->mlcq_state & MLXCX_EQ_ALLOC);
+
+ mlcq->mlcq_entshift = mlxp->mlx_props.mldp_cq_size_shift;
+ mlcq->mlcq_nents = (1 << mlcq->mlcq_entshift);
+ sz = mlcq->mlcq_nents * sizeof (mlxcx_completionq_ent_t);
+ ASSERT3U(sz & (MLXCX_HW_PAGE_SIZE - 1), ==, 0);
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_queue_attr(mlxp, &attr);
+
+ ret = mlxcx_dma_alloc(mlxp, &mlcq->mlcq_dma, &attr, &acc,
+ B_TRUE, sz, B_TRUE);
+ if (!ret) {
+ mlxcx_warn(mlxp, "failed to allocate CQ memory");
+ return (B_FALSE);
+ }
+
+ mlcq->mlcq_ent = (mlxcx_completionq_ent_t *)mlcq->mlcq_dma.mxdb_va;
+
+ for (i = 0; i < mlcq->mlcq_nents; ++i) {
+ mlcq->mlcq_ent[i].mlcqe_opcode = MLXCX_CQE_OP_INVALID;
+ mlcq->mlcq_ent[i].mlcqe_owner = MLXCX_CQE_OWNER_INIT;
+ }
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_qdbell_attr(mlxp, &attr);
+ sz = sizeof (mlxcx_completionq_doorbell_t);
+ ret = mlxcx_dma_alloc(mlxp, &mlcq->mlcq_doorbell_dma, &attr, &acc,
+ B_TRUE, sz, B_TRUE);
+ if (!ret) {
+ mlxcx_warn(mlxp, "failed to allocate CQ doorbell memory");
+ mlxcx_dma_free(&mlcq->mlcq_dma);
+ mlcq->mlcq_ent = NULL;
+ return (B_FALSE);
+ }
+
+ mlcq->mlcq_doorbell =
+ (mlxcx_completionq_doorbell_t *)mlcq->mlcq_doorbell_dma.mxdb_va;
+
+ mlcq->mlcq_state |= MLXCX_CQ_ALLOC;
+
+ return (B_TRUE);
+}
+
+void
+mlxcx_cq_rele_dma(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_ALLOC);
+ if (mlcq->mlcq_state & MLXCX_CQ_CREATED)
+ VERIFY(mlcq->mlcq_state & MLXCX_CQ_DESTROYED);
+
+ mlxcx_dma_free(&mlcq->mlcq_dma);
+ mlcq->mlcq_ent = NULL;
+ mlxcx_dma_free(&mlcq->mlcq_doorbell_dma);
+ mlcq->mlcq_doorbell = NULL;
+
+ mlcq->mlcq_state &= ~MLXCX_CQ_ALLOC;
+}
+
+void
+mlxcx_wq_teardown(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ mlxcx_completion_queue_t *mlcq;
+
+ /*
+ * If something is holding the lock on a long operation like a
+ * refill, setting this flag asks them to exit early if possible.
+ */
+ atomic_or_uint(&mlwq->mlwq_state, MLXCX_WQ_TEARDOWN);
+
+ mutex_enter(&mlwq->mlwq_mtx);
+
+ list_remove(&mlxp->mlx_wqs, mlwq);
+
+ if ((mlwq->mlwq_state & MLXCX_WQ_CREATED) &&
+ !(mlwq->mlwq_state & MLXCX_WQ_DESTROYED)) {
+ if (mlwq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
+ mlwq->mlwq_state & MLXCX_WQ_STARTED &&
+ !mlxcx_cmd_stop_rq(mlxp, mlwq)) {
+ mlxcx_warn(mlxp, "failed to stop "
+ "recv queue num %x", mlwq->mlwq_num);
+ }
+ if (mlwq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
+ mlwq->mlwq_state & MLXCX_WQ_STARTED &&
+ !mlxcx_cmd_stop_sq(mlxp, mlwq)) {
+ mlxcx_warn(mlxp, "failed to stop "
+ "send queue num %x", mlwq->mlwq_num);
+ }
+ if (mlwq->mlwq_type == MLXCX_WQ_TYPE_RECVQ &&
+ !mlxcx_cmd_destroy_rq(mlxp, mlwq)) {
+ mlxcx_warn(mlxp, "failed to destroy "
+ "recv queue num %x", mlwq->mlwq_num);
+ }
+ if (mlwq->mlwq_type == MLXCX_WQ_TYPE_SENDQ &&
+ !mlxcx_cmd_destroy_sq(mlxp, mlwq)) {
+ mlxcx_warn(mlxp, "failed to destroy "
+ "send queue num %x", mlwq->mlwq_num);
+ }
+ }
+ if (mlwq->mlwq_state & MLXCX_WQ_ALLOC) {
+ mlxcx_wq_rele_dma(mlxp, mlwq);
+ }
+ mlcq = mlwq->mlwq_cq;
+
+ /* These will be released by mlxcx_teardown_bufs() */
+ mlwq->mlwq_bufs = NULL;
+ mlwq->mlwq_foreign_bufs = NULL;
+
+ mutex_exit(&mlwq->mlwq_mtx);
+
+ mutex_enter(&mlcq->mlcq_mtx);
+ mutex_enter(&mlwq->mlwq_mtx);
+ ASSERT3P(mlcq->mlcq_wq, ==, mlwq);
+ mlcq->mlcq_wq = NULL;
+ mutex_exit(&mlwq->mlwq_mtx);
+ mutex_exit(&mlcq->mlcq_mtx);
+
+ mutex_destroy(&mlwq->mlwq_mtx);
+}
+
+void
+mlxcx_cq_teardown(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq)
+{
+ mlxcx_event_queue_t *mleq;
+ mlxcx_buffer_t *b;
+
+ /*
+ * If something is holding the lock on a long operation like polling
+ * which we're going to abort anyway, this flag asks them to exit
+ * early if possible.
+ */
+ atomic_or_uint(&mlcq->mlcq_state, MLXCX_CQ_TEARDOWN);
+
+ mutex_enter(&mlcq->mlcq_mtx);
+
+ list_remove(&mlxp->mlx_cqs, mlcq);
+
+ if ((mlcq->mlcq_state & MLXCX_CQ_CREATED) &&
+ !(mlcq->mlcq_state & MLXCX_CQ_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_cq(mlxp, mlcq)) {
+ mlxcx_warn(mlxp, "failed to destroy "
+ "completion queue num %u",
+ mlcq->mlcq_num);
+ }
+ }
+ if (mlcq->mlcq_state & MLXCX_CQ_ALLOC) {
+ mlxcx_cq_rele_dma(mlxp, mlcq);
+ }
+ /*
+ * If we're on an EQ AVL tree, then we need to grab
+ * the EQ's mutex to take it off. The ISR always takes
+ * EQ mutex before CQ mutex, so we have to let go of
+ * the CQ mutex then come back again.
+ *
+ * The ISR will bail out if tries to touch this CQ now since
+ * we added the CQ_DESTROYED flag above.
+ */
+ if (mlcq->mlcq_state & MLXCX_CQ_EQAVL) {
+ mleq = mlcq->mlcq_eq;
+ } else {
+ mleq = NULL;
+ }
+
+ /* Return any outstanding buffers to the free pool. */
+ while ((b = list_remove_head(&mlcq->mlcq_buffers)) != NULL) {
+ mlxcx_buf_return_chain(mlxp, b, B_FALSE);
+ }
+ mutex_enter(&mlcq->mlcq_bufbmtx);
+ while ((b = list_remove_head(&mlcq->mlcq_buffers_b)) != NULL) {
+ mlxcx_buf_return_chain(mlxp, b, B_FALSE);
+ }
+ mutex_exit(&mlcq->mlcq_bufbmtx);
+
+ /*
+ * Since the interrupt handlers take the EQ lock before the CQ one,
+ * we must do the same here. That means letting go of the lock
+ * for a brief window here (we'll double-check the state when we
+ * get back in).
+ */
+ mutex_exit(&mlcq->mlcq_mtx);
+
+ if (mleq != NULL) {
+ mutex_enter(&mleq->mleq_mtx);
+ mutex_enter(&mlcq->mlcq_mtx);
+ /*
+ * Double-check the state, we let go of the
+ * mutex briefly.
+ */
+ if (mlcq->mlcq_state & MLXCX_CQ_EQAVL) {
+ avl_remove(&mleq->mleq_cqs, mlcq);
+ mlcq->mlcq_state &= ~MLXCX_CQ_EQAVL;
+ }
+ mutex_exit(&mlcq->mlcq_mtx);
+ mutex_exit(&mleq->mleq_mtx);
+ }
+
+ mutex_enter(&mlcq->mlcq_mtx);
+ ASSERT0(mlcq->mlcq_state & ~(MLXCX_CQ_CREATED | MLXCX_CQ_DESTROYED |
+ MLXCX_CQ_TEARDOWN | MLXCX_CQ_ARMED));
+ mutex_exit(&mlcq->mlcq_mtx);
+
+ mutex_destroy(&mlcq->mlcq_mtx);
+ mutex_destroy(&mlcq->mlcq_bufbmtx);
+ list_destroy(&mlcq->mlcq_buffers);
+ list_destroy(&mlcq->mlcq_buffers_b);
+ kmem_free(mlcq, sizeof (mlxcx_completion_queue_t));
+}
+
+static boolean_t
+mlxcx_cq_setup(mlxcx_t *mlxp, mlxcx_event_queue_t *eq,
+ mlxcx_completion_queue_t **cqp)
+{
+ mlxcx_completion_queue_t *cq;
+
+ cq = kmem_zalloc(sizeof (mlxcx_completion_queue_t), KM_SLEEP);
+ mutex_init(&cq->mlcq_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ mutex_init(&cq->mlcq_bufbmtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ list_create(&cq->mlcq_buffers, sizeof (mlxcx_buffer_t),
+ offsetof(mlxcx_buffer_t, mlb_cq_entry));
+ list_create(&cq->mlcq_buffers_b, sizeof (mlxcx_buffer_t),
+ offsetof(mlxcx_buffer_t, mlb_cq_entry));
+
+ cq->mlcq_mlx = mlxp;
+ list_insert_tail(&mlxp->mlx_cqs, cq);
+
+ mutex_enter(&cq->mlcq_mtx);
+
+ if (!mlxcx_cq_alloc_dma(mlxp, cq)) {
+ mutex_exit(&cq->mlcq_mtx);
+ return (B_FALSE);
+ }
+
+ cq->mlcq_bufhwm = cq->mlcq_nents - MLXCX_CQ_HWM_GAP;
+ cq->mlcq_buflwm = cq->mlcq_nents - MLXCX_CQ_LWM_GAP;
+
+ cq->mlcq_uar = &mlxp->mlx_uar;
+ cq->mlcq_eq = eq;
+
+ cq->mlcq_cqemod_period_usec = mlxp->mlx_props.mldp_cqemod_period_usec;
+ cq->mlcq_cqemod_count = mlxp->mlx_props.mldp_cqemod_count;
+
+ if (!mlxcx_cmd_create_cq(mlxp, cq)) {
+ mutex_exit(&cq->mlcq_mtx);
+ return (B_FALSE);
+ }
+
+ mutex_exit(&cq->mlcq_mtx);
+
+ mutex_enter(&eq->mleq_mtx);
+ mutex_enter(&cq->mlcq_mtx);
+ ASSERT0(cq->mlcq_state & MLXCX_CQ_EQAVL);
+ avl_add(&eq->mleq_cqs, cq);
+ cq->mlcq_state |= MLXCX_CQ_EQAVL;
+ mlxcx_arm_cq(mlxp, cq);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&eq->mleq_mtx);
+
+ *cqp = cq;
+ return (B_TRUE);
+}
+
+static boolean_t
+mlxcx_rq_setup(mlxcx_t *mlxp, mlxcx_completion_queue_t *cq,
+ mlxcx_work_queue_t *wq)
+{
+ mutex_init(&wq->mlwq_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+
+ list_insert_tail(&mlxp->mlx_wqs, wq);
+
+ mutex_enter(&wq->mlwq_mtx);
+
+ wq->mlwq_mlx = mlxp;
+ wq->mlwq_type = MLXCX_WQ_TYPE_RECVQ;
+ wq->mlwq_cq = cq;
+ wq->mlwq_pd = &mlxp->mlx_pd;
+ wq->mlwq_uar = &mlxp->mlx_uar;
+
+ wq->mlwq_bufs = mlxcx_mlbs_create(mlxp);
+
+ if (!mlxcx_wq_alloc_dma(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ return (B_FALSE);
+ }
+
+ if (!mlxcx_cmd_create_rq(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ return (B_FALSE);
+ }
+
+ mutex_exit(&wq->mlwq_mtx);
+
+ mutex_enter(&cq->mlcq_mtx);
+ mutex_enter(&wq->mlwq_mtx);
+ ASSERT3P(cq->mlcq_wq, ==, NULL);
+ cq->mlcq_wq = wq;
+ mutex_exit(&wq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+
+ return (B_TRUE);
+}
+
+static boolean_t
+mlxcx_sq_setup(mlxcx_t *mlxp, mlxcx_port_t *port, mlxcx_completion_queue_t *cq,
+ mlxcx_tis_t *tis, mlxcx_work_queue_t *wq)
+{
+ mutex_init(&wq->mlwq_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+
+ list_insert_tail(&mlxp->mlx_wqs, wq);
+
+ mutex_enter(&wq->mlwq_mtx);
+
+ wq->mlwq_mlx = mlxp;
+ wq->mlwq_type = MLXCX_WQ_TYPE_SENDQ;
+ wq->mlwq_cq = cq;
+ wq->mlwq_pd = &mlxp->mlx_pd;
+ wq->mlwq_uar = &mlxp->mlx_uar;
+ wq->mlwq_tis = tis;
+
+ wq->mlwq_bufs = mlxcx_mlbs_create(mlxp);
+ wq->mlwq_foreign_bufs = mlxcx_mlbs_create(mlxp);
+
+ VERIFY3U(port->mlp_wqe_min_inline, <=, MLXCX_ETH_INLINE_L2);
+ wq->mlwq_inline_mode = MLXCX_ETH_INLINE_L2;
+
+ if (!mlxcx_wq_alloc_dma(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ return (B_FALSE);
+ }
+
+ if (!mlxcx_cmd_create_sq(mlxp, wq)) {
+ mutex_exit(&wq->mlwq_mtx);
+ return (B_FALSE);
+ }
+
+ mutex_exit(&wq->mlwq_mtx);
+
+ mutex_enter(&cq->mlcq_mtx);
+ mutex_enter(&wq->mlwq_mtx);
+ ASSERT3P(cq->mlcq_wq, ==, NULL);
+ cq->mlcq_wq = wq;
+ mutex_exit(&wq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+
+ return (B_TRUE);
+}
+
+void
+mlxcx_teardown_rx_group(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_work_queue_t *wq;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_flow_entry_t *fe;
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_table_t *ft;
+ uint_t i;
+
+ mutex_enter(&g->mlg_port->mlp_mtx);
+ mutex_enter(&g->mlg_mtx);
+
+ if (g->mlg_state & MLXCX_GROUP_FLOWS) {
+ mlxcx_remove_all_umcast_entries(mlxp, g->mlg_port, g);
+
+ if (g->mlg_rx_vlan_ft != NULL)
+ mlxcx_remove_all_vlan_entries(mlxp, g);
+
+ if (g == &mlxp->mlx_rx_groups[0]) {
+ ft = g->mlg_port->mlp_rx_flow;
+ mutex_enter(&ft->mlft_mtx);
+
+ fg = g->mlg_port->mlp_bcast;
+ fe = list_head(&fg->mlfg_entries);
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ (void) mlxcx_cmd_delete_flow_table_entry(
+ mlxp, fe);
+ }
+
+ fg = g->mlg_port->mlp_promisc;
+ fe = list_head(&fg->mlfg_entries);
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ (void) mlxcx_cmd_delete_flow_table_entry(
+ mlxp, fe);
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+ }
+
+ if (g->mlg_rx_vlan_ft != NULL) {
+ mutex_enter(&g->mlg_rx_vlan_ft->mlft_mtx);
+ ASSERT(list_is_empty(&g->mlg_rx_vlans));
+ fg = g->mlg_rx_vlan_def_fg;
+ fe = list_head(&fg->mlfg_entries);
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ (void) mlxcx_cmd_delete_flow_table_entry(
+ mlxp, fe);
+ }
+ fg = g->mlg_rx_vlan_promisc_fg;
+ fe = list_head(&fg->mlfg_entries);
+ if (fe->mlfe_state & MLXCX_FLOW_ENTRY_CREATED) {
+ (void) mlxcx_cmd_delete_flow_table_entry(
+ mlxp, fe);
+ }
+ mlxcx_teardown_flow_table(mlxp, g->mlg_rx_vlan_ft);
+ list_destroy(&g->mlg_rx_vlans);
+
+ g->mlg_rx_vlan_ft = NULL;
+ }
+
+ mutex_enter(&g->mlg_rx_hash_ft->mlft_mtx);
+ mlxcx_teardown_flow_table(mlxp, g->mlg_rx_hash_ft);
+ g->mlg_rx_hash_ft = NULL;
+
+ avl_destroy(&g->mlg_rx_macs);
+ g->mlg_state &= ~MLXCX_GROUP_FLOWS;
+ }
+
+ if (g->mlg_state & MLXCX_GROUP_RUNNING) {
+ for (i = 0; i < g->mlg_nwqs; ++i) {
+ wq = &g->mlg_wqs[i];
+ mutex_enter(&wq->mlwq_mtx);
+ if (wq->mlwq_state & MLXCX_WQ_STARTED &&
+ !mlxcx_cmd_stop_rq(mlxp, wq)) {
+ mlxcx_warn(mlxp, "failed to stop rq %x",
+ wq->mlwq_num);
+ }
+ mutex_exit(&wq->mlwq_mtx);
+ }
+ g->mlg_state &= ~MLXCX_GROUP_RUNNING;
+ }
+
+ if (g->mlg_state & MLXCX_GROUP_TIRTIS) {
+ for (i = 0; i < MLXCX_TIRS_PER_GROUP; ++i) {
+ mlxcx_tir_t *tir = &g->mlg_tir[i];
+ if (tir->mltir_state & MLXCX_TIR_CREATED &&
+ !(tir->mltir_state & MLXCX_TIR_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_tir(mlxp, tir)) {
+ mlxcx_warn(mlxp,
+ "failed to destroy tir %u "
+ "for rx ring", tir->mltir_num);
+ }
+ }
+ }
+ g->mlg_state &= ~MLXCX_GROUP_TIRTIS;
+ }
+
+ if (g->mlg_state & MLXCX_GROUP_RQT) {
+ if (g->mlg_rqt->mlrqt_state & MLXCX_RQT_CREATED &&
+ !(g->mlg_rqt->mlrqt_state & MLXCX_RQT_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_rqt(mlxp, g->mlg_rqt)) {
+ mlxcx_warn(mlxp, "failed to destroy rqt %u "
+ "for rx ring", g->mlg_rqt->mlrqt_num);
+ }
+ kmem_free(g->mlg_rqt->mlrqt_rq,
+ g->mlg_rqt->mlrqt_rq_size);
+ g->mlg_rqt->mlrqt_rq = NULL;
+ kmem_free(g->mlg_rqt, sizeof (mlxcx_rqtable_t));
+ g->mlg_rqt = NULL;
+ }
+ g->mlg_state &= ~MLXCX_GROUP_RQT;
+ }
+
+ for (i = 0; i < g->mlg_nwqs; ++i) {
+ wq = &g->mlg_wqs[i];
+ cq = wq->mlwq_cq;
+ mlxcx_wq_teardown(mlxp, wq);
+ if (cq != NULL)
+ mlxcx_cq_teardown(mlxp, cq);
+ }
+ kmem_free(g->mlg_wqs, g->mlg_wqs_size);
+ g->mlg_wqs = NULL;
+ g->mlg_state &= ~MLXCX_GROUP_WQS;
+
+ mutex_exit(&g->mlg_mtx);
+ mutex_exit(&g->mlg_port->mlp_mtx);
+
+ mutex_destroy(&g->mlg_mtx);
+
+ g->mlg_state &= ~MLXCX_GROUP_INIT;
+ ASSERT3S(g->mlg_state, ==, 0);
+}
+
+void
+mlxcx_teardown_tx_group(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_work_queue_t *wq;
+ mlxcx_completion_queue_t *cq;
+ uint_t i;
+
+ mutex_enter(&g->mlg_mtx);
+
+ if (g->mlg_state & MLXCX_GROUP_WQS) {
+ for (i = 0; i < g->mlg_nwqs; ++i) {
+ wq = &g->mlg_wqs[i];
+ mutex_enter(&wq->mlwq_mtx);
+ cq = wq->mlwq_cq;
+ if (wq->mlwq_state & MLXCX_WQ_STARTED &&
+ !mlxcx_cmd_stop_sq(mlxp, wq)) {
+ mlxcx_warn(mlxp, "failed to stop sq %x",
+ wq->mlwq_num);
+ }
+ mutex_exit(&wq->mlwq_mtx);
+ mlxcx_wq_teardown(mlxp, wq);
+ if (cq != NULL)
+ mlxcx_cq_teardown(mlxp, cq);
+ }
+ g->mlg_state &= ~MLXCX_GROUP_RUNNING;
+ kmem_free(g->mlg_wqs, g->mlg_wqs_size);
+ g->mlg_wqs = NULL;
+ g->mlg_state &= ~MLXCX_GROUP_WQS;
+ }
+
+ if ((g->mlg_state & MLXCX_GROUP_TIRTIS) &&
+ g->mlg_tis.mltis_state & MLXCX_TIS_CREATED &&
+ !(g->mlg_tis.mltis_state & MLXCX_TIS_DESTROYED)) {
+ if (!mlxcx_cmd_destroy_tis(mlxp, &g->mlg_tis)) {
+ mlxcx_warn(mlxp, "failed to destroy tis %u for tx ring",
+ g->mlg_tis.mltis_num);
+ }
+ }
+ g->mlg_state &= ~MLXCX_GROUP_TIRTIS;
+
+ mutex_exit(&g->mlg_mtx);
+ mutex_destroy(&g->mlg_mtx);
+ g->mlg_state &= ~MLXCX_GROUP_INIT;
+ ASSERT3S(g->mlg_state, ==, 0);
+}
+
+void
+mlxcx_teardown_groups(mlxcx_t *mlxp)
+{
+ mlxcx_ring_group_t *g;
+ uint_t i;
+
+ for (i = 0; i < mlxp->mlx_rx_ngroups; ++i) {
+ g = &mlxp->mlx_rx_groups[i];
+ if (!(g->mlg_state & MLXCX_GROUP_INIT))
+ continue;
+ ASSERT3S(g->mlg_type, ==, MLXCX_GROUP_RX);
+ mlxcx_teardown_rx_group(mlxp, g);
+ }
+ kmem_free(mlxp->mlx_rx_groups, mlxp->mlx_rx_groups_size);
+ mlxp->mlx_rx_groups = NULL;
+
+ for (i = 0; i < mlxp->mlx_tx_ngroups; ++i) {
+ g = &mlxp->mlx_tx_groups[i];
+ if (!(g->mlg_state & MLXCX_GROUP_INIT))
+ continue;
+ ASSERT3S(g->mlg_type, ==, MLXCX_GROUP_TX);
+ mlxcx_teardown_tx_group(mlxp, g);
+ }
+ kmem_free(mlxp->mlx_tx_groups, mlxp->mlx_tx_groups_size);
+ mlxp->mlx_tx_groups = NULL;
+}
+
+boolean_t
+mlxcx_rx_group_setup(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_event_queue_t *eq;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_work_queue_t *rq;
+ mlxcx_flow_table_t *ft;
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe;
+ uint_t i, j;
+
+ ASSERT3S(g->mlg_state, ==, 0);
+
+ mutex_init(&g->mlg_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ mutex_enter(&g->mlg_mtx);
+ g->mlg_mlx = mlxp;
+ g->mlg_type = MLXCX_GROUP_RX;
+ g->mlg_port = &mlxp->mlx_ports[0];
+ g->mlg_state |= MLXCX_GROUP_INIT;
+
+ g->mlg_nwqs = mlxp->mlx_props.mldp_rx_nrings_per_small_group;
+ i = g - &mlxp->mlx_rx_groups[0];
+ if (i < mlxp->mlx_props.mldp_rx_ngroups_large)
+ g->mlg_nwqs = mlxp->mlx_props.mldp_rx_nrings_per_large_group;
+
+ g->mlg_wqs_size = g->mlg_nwqs * sizeof (mlxcx_work_queue_t);
+ g->mlg_wqs = kmem_zalloc(g->mlg_wqs_size, KM_SLEEP);
+ g->mlg_state |= MLXCX_GROUP_WQS;
+
+ g->mlg_rqt = kmem_zalloc(sizeof (mlxcx_rqtable_t), KM_SLEEP);
+ g->mlg_rqt->mlrqt_max = 2;
+ while (g->mlg_rqt->mlrqt_max < g->mlg_nwqs)
+ g->mlg_rqt->mlrqt_max <<= 1;
+ g->mlg_rqt->mlrqt_rq_size = g->mlg_rqt->mlrqt_max *
+ sizeof (mlxcx_work_queue_t *);
+ g->mlg_rqt->mlrqt_rq = kmem_zalloc(g->mlg_rqt->mlrqt_rq_size, KM_SLEEP);
+ g->mlg_state |= MLXCX_GROUP_RQT;
+
+ for (i = 0; i < g->mlg_nwqs; ++i) {
+ eq = NULL;
+ while (eq == NULL) {
+ eq = &mlxp->mlx_eqs[mlxp->mlx_next_eq++];
+ if (mlxp->mlx_next_eq >= mlxp->mlx_intr_count)
+ mlxp->mlx_next_eq = 1;
+ if (eq->mleq_type != MLXCX_EQ_TYPE_ANY &&
+ eq->mleq_type != MLXCX_EQ_TYPE_RX) {
+ /* Try the next one */
+ eq = NULL;
+ }
+ }
+
+ if (!mlxcx_cq_setup(mlxp, eq, &cq)) {
+ g->mlg_nwqs = i;
+ break;
+ }
+ cq->mlcq_stats = &g->mlg_port->mlp_stats;
+
+ rq = &g->mlg_wqs[i];
+ if (!mlxcx_rq_setup(mlxp, cq, rq)) {
+ g->mlg_nwqs = i;
+ break;
+ }
+ g->mlg_rqt->mlrqt_rq[g->mlg_rqt->mlrqt_used++] = rq;
+ g->mlg_rqt->mlrqt_state |= MLXCX_RQT_DIRTY;
+ rq->mlwq_group = g;
+ }
+ if (g->mlg_nwqs == 0) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ if (!mlxcx_cmd_create_rqt(mlxp, g->mlg_rqt)) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ for (i = 0; i < MLXCX_TIRS_PER_GROUP; ++i) {
+ mlxcx_tir_t *tir = &g->mlg_tir[i];
+ tir->mltir_tdom = &mlxp->mlx_tdom;
+ switch (i) {
+ case MLXCX_TIR_ROLE_OTHER:
+ tir->mltir_type = MLXCX_TIR_DIRECT;
+ tir->mltir_rq = &g->mlg_wqs[0];
+ break;
+ case MLXCX_TIR_ROLE_IPv4:
+ case MLXCX_TIR_ROLE_IPv6:
+ case MLXCX_TIR_ROLE_TCPv4:
+ case MLXCX_TIR_ROLE_TCPv6:
+ case MLXCX_TIR_ROLE_UDPv4:
+ case MLXCX_TIR_ROLE_UDPv6:
+ tir->mltir_type = MLXCX_TIR_INDIRECT;
+ tir->mltir_rqtable = g->mlg_rqt;
+ tir->mltir_hash_fn = MLXCX_TIR_HASH_TOEPLITZ;
+ (void) random_get_pseudo_bytes(tir->mltir_toeplitz_key,
+ sizeof (tir->mltir_toeplitz_key));
+ break;
+ }
+ switch (i) {
+ case MLXCX_TIR_ROLE_OTHER:
+ break;
+ case MLXCX_TIR_ROLE_IPv4:
+ case MLXCX_TIR_ROLE_TCPv4:
+ case MLXCX_TIR_ROLE_UDPv4:
+ tir->mltir_l3_type = MLXCX_RX_HASH_L3_IPv4;
+ tir->mltir_hash_fields =
+ MLXCX_RX_HASH_SRC_IP | MLXCX_RX_HASH_DST_IP;
+ break;
+ case MLXCX_TIR_ROLE_IPv6:
+ case MLXCX_TIR_ROLE_TCPv6:
+ case MLXCX_TIR_ROLE_UDPv6:
+ tir->mltir_l3_type = MLXCX_RX_HASH_L3_IPv6;
+ tir->mltir_hash_fields =
+ MLXCX_RX_HASH_SRC_IP | MLXCX_RX_HASH_DST_IP;
+ break;
+ }
+ switch (i) {
+ case MLXCX_TIR_ROLE_OTHER:
+ case MLXCX_TIR_ROLE_IPv4:
+ case MLXCX_TIR_ROLE_IPv6:
+ break;
+ case MLXCX_TIR_ROLE_TCPv4:
+ case MLXCX_TIR_ROLE_TCPv6:
+ tir->mltir_l4_type = MLXCX_RX_HASH_L4_TCP;
+ tir->mltir_hash_fields |=
+ MLXCX_RX_HASH_L4_SPORT | MLXCX_RX_HASH_L4_DPORT;
+ break;
+ case MLXCX_TIR_ROLE_UDPv4:
+ case MLXCX_TIR_ROLE_UDPv6:
+ tir->mltir_l4_type = MLXCX_RX_HASH_L4_UDP;
+ tir->mltir_hash_fields |=
+ MLXCX_RX_HASH_L4_SPORT | MLXCX_RX_HASH_L4_DPORT;
+ break;
+ }
+
+ if (!mlxcx_cmd_create_tir(mlxp, tir)) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ g->mlg_state |= MLXCX_GROUP_TIRTIS;
+ }
+
+ /*
+ * Flow table: our RX hashing breakout table for RSS
+ */
+
+ g->mlg_rx_hash_ft = (ft = kmem_zalloc(sizeof (mlxcx_flow_table_t),
+ KM_SLEEP));
+ mutex_init(&ft->mlft_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ avl_create(&g->mlg_rx_macs, mlxcx_grmac_compare,
+ sizeof (mlxcx_group_mac_t),
+ offsetof(mlxcx_group_mac_t, mlgm_group_entry));
+ g->mlg_state |= MLXCX_GROUP_FLOWS;
+
+ mutex_enter(&ft->mlft_mtx);
+
+ ft->mlft_type = MLXCX_FLOW_TABLE_NIC_RX;
+ ft->mlft_level = 2;
+ ft->mlft_port = g->mlg_port;
+ ft->mlft_entshift = MLXCX_RX_HASH_FT_SIZE_SHIFT;
+ ft->mlft_nents = (1 << ft->mlft_entshift);
+ ASSERT3U(ft->mlft_nents, >=, MLXCX_TIRS_PER_GROUP);
+ ft->mlft_entsize = ft->mlft_nents * sizeof (mlxcx_flow_entry_t);
+ ft->mlft_ent = kmem_zalloc(ft->mlft_entsize, KM_SLEEP);
+ list_create(&ft->mlft_groups, sizeof (mlxcx_flow_group_t),
+ offsetof(mlxcx_flow_group_t, mlfg_entry));
+
+ for (j = 0; j < ft->mlft_nents; ++j) {
+ ft->mlft_ent[j].mlfe_table = ft;
+ ft->mlft_ent[j].mlfe_index = j;
+ }
+
+ if (!mlxcx_cmd_create_flow_table(mlxp, ft)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 6;
+ fe->mlfe_ip_proto = IPPROTO_UDP;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_UDPv6];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 4;
+ fe->mlfe_ip_proto = IPPROTO_UDP;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_UDPv4];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 6;
+ fe->mlfe_ip_proto = IPPROTO_TCP;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_TCPv6];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER | MLXCX_FLOW_MATCH_IP_PROTO;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 4;
+ fe->mlfe_ip_proto = IPPROTO_TCP;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_TCPv4];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 6;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_IPv6];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_IP_VER;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ip_version = 4;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_IPv4];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_tir =
+ &g->mlg_tir[MLXCX_TIR_ROLE_OTHER];
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ mutex_exit(&ft->mlft_mtx);
+
+ /*
+ * Flow table: the VLAN breakout table for doing VLAN filtering after
+ * we've matched a MAC address.
+ */
+
+ g->mlg_rx_vlan_ft = (ft = kmem_zalloc(sizeof (mlxcx_flow_table_t),
+ KM_SLEEP));
+ mutex_init(&ft->mlft_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ list_create(&g->mlg_rx_vlans, sizeof (mlxcx_group_vlan_t),
+ offsetof(mlxcx_group_vlan_t, mlgv_entry));
+
+ mutex_enter(&ft->mlft_mtx);
+
+ ft->mlft_type = MLXCX_FLOW_TABLE_NIC_RX;
+ ft->mlft_level = 1;
+ ft->mlft_port = g->mlg_port;
+ ft->mlft_entshift = mlxp->mlx_props.mldp_ftbl_vlan_size_shift;
+ ft->mlft_nents = (1 << ft->mlft_entshift);
+ ft->mlft_entsize = ft->mlft_nents * sizeof (mlxcx_flow_entry_t);
+ ft->mlft_ent = kmem_zalloc(ft->mlft_entsize, KM_SLEEP);
+ list_create(&ft->mlft_groups, sizeof (mlxcx_flow_group_t),
+ offsetof(mlxcx_flow_group_t, mlfg_entry));
+
+ for (j = 0; j < ft->mlft_nents; ++j) {
+ fe = &ft->mlft_ent[j];
+ fe->mlfe_table = ft;
+ fe->mlfe_index = j;
+ fe->mlfe_action = MLXCX_FLOW_ACTION_FORWARD;
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
+ }
+
+ if (!mlxcx_cmd_create_flow_table(mlxp, ft)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ /* First group is all actual matched VLANs */
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ g->mlg_rx_vlan_fg = fg;
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = ft->mlft_nents - 2;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_VLAN;
+ fg->mlfg_mask |= MLXCX_FLOW_MATCH_VID;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ /*
+ * Then the "default" entry which we enable when we have no VLAN IDs
+ * added to the group (we start with this enabled).
+ */
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ g->mlg_rx_vlan_def_fg = fg;
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ /*
+ * Finally, the promisc entry which points at the *hash ft* from the
+ * default group. We only enable this when we have promisc on.
+ */
+ fg = kmem_zalloc(sizeof (mlxcx_flow_group_t), KM_SLEEP);
+ g->mlg_rx_vlan_promisc_fg = fg;
+ list_insert_tail(&ft->mlft_groups, fg);
+ fg->mlfg_table = ft;
+ fg->mlfg_size = 1;
+ if (!mlxcx_setup_flow_group(mlxp, ft, fg)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_ndest = 1;
+ fe->mlfe_dest[0].mlfed_flow = mlxp->mlx_rx_groups[0].mlg_rx_hash_ft;
+
+ mutex_exit(&ft->mlft_mtx);
+
+ mutex_exit(&g->mlg_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_rx_ring_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g,
+ mlxcx_work_queue_t *rq)
+{
+ uint_t j;
+ mlxcx_buffer_t *b;
+ mlxcx_completion_queue_t *cq;
+
+ mutex_enter(&g->mlg_mtx);
+ /*
+ * Sadly, even though MAC has the mgi_start callback, it is not always
+ * called -- in particular when we are being managed under an aggr, the
+ * mgi_start callback will only ever be called on the default group.
+ *
+ * So instead of asserting about the group state here, we have to
+ * check it and call group start if needed.
+ */
+ if (!(g->mlg_state & MLXCX_GROUP_RUNNING)) {
+ mutex_exit(&g->mlg_mtx);
+ if (!mlxcx_rx_group_start(mlxp, g))
+ return (B_FALSE);
+ mutex_enter(&g->mlg_mtx);
+ }
+ ASSERT(g->mlg_state & MLXCX_GROUP_RUNNING);
+
+ cq = rq->mlwq_cq;
+ ASSERT(cq != NULL);
+
+ mutex_enter(&cq->mlcq_mtx);
+ mutex_enter(&rq->mlwq_mtx);
+
+ if (rq->mlwq_state & MLXCX_WQ_STARTED) {
+ mutex_exit(&rq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_TRUE);
+ }
+
+ if (!mlxcx_cmd_start_rq(mlxp, rq)) {
+ mutex_exit(&rq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ ASSERT(rq->mlwq_state & MLXCX_WQ_STARTED);
+
+ ASSERT0(rq->mlwq_state & MLXCX_WQ_BUFFERS);
+ rq->mlwq_state |= MLXCX_WQ_BUFFERS;
+
+ for (j = 0; j < rq->mlwq_nents; ++j) {
+ if (!mlxcx_buf_create(mlxp, rq->mlwq_bufs, &b))
+ break;
+ mlxcx_buf_return(mlxp, b);
+ }
+ for (j = 0; j < rq->mlwq_nents / 2; ++j) {
+ if (!mlxcx_buf_create(mlxp, rq->mlwq_bufs, &b))
+ break;
+ mlxcx_buf_return(mlxp, b);
+ }
+
+ mlxcx_rq_refill(mlxp, rq);
+
+ mutex_exit(&rq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_rx_group_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_flow_table_t *ft;
+ mlxcx_flow_group_t *fg;
+ mlxcx_flow_entry_t *fe;
+
+ mutex_enter(&g->mlg_mtx);
+
+ if (g->mlg_state & MLXCX_GROUP_RUNNING) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_TRUE);
+ }
+
+ ASSERT0(g->mlg_state & MLXCX_GROUP_RUNNING);
+
+ g->mlg_state |= MLXCX_GROUP_RUNNING;
+
+ if (g == &mlxp->mlx_rx_groups[0]) {
+ ft = g->mlg_port->mlp_rx_flow;
+ mutex_enter(&ft->mlft_mtx);
+
+ /*
+ * Broadcast and promisc entries go directly to group 0's
+ * RSS hash fanout flow table. They bypass VLAN filtering.
+ */
+ fg = g->mlg_port->mlp_bcast;
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
+ if (!mlxcx_cmd_set_flow_table_entry(mlxp, fe)) {
+ mutex_exit(&ft->mlft_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ fg = g->mlg_port->mlp_promisc;
+ fe = list_head(&fg->mlfg_entries);
+ fe->mlfe_dest[fe->mlfe_ndest++].mlfed_flow = g->mlg_rx_hash_ft;
+ /*
+ * Don't actually set the promisc entry until promisc is
+ * enabled.
+ */
+
+ mutex_exit(&ft->mlft_mtx);
+ }
+
+ mutex_exit(&g->mlg_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_tx_group_setup(mlxcx_t *mlxp, mlxcx_ring_group_t *g)
+{
+ mlxcx_event_queue_t *eq;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_work_queue_t *sq;
+ uint_t i;
+
+ ASSERT3S(g->mlg_state, ==, 0);
+
+ mutex_init(&g->mlg_mtx, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(mlxp->mlx_intr_pri));
+ g->mlg_state |= MLXCX_GROUP_INIT;
+ mutex_enter(&g->mlg_mtx);
+
+ g->mlg_mlx = mlxp;
+ g->mlg_type = MLXCX_GROUP_TX;
+ g->mlg_port = &mlxp->mlx_ports[0];
+
+ g->mlg_nwqs = mlxp->mlx_props.mldp_tx_nrings_per_group;
+ g->mlg_wqs_size = g->mlg_nwqs * sizeof (mlxcx_work_queue_t);
+ g->mlg_wqs = kmem_zalloc(g->mlg_wqs_size, KM_SLEEP);
+ g->mlg_state |= MLXCX_GROUP_WQS;
+
+ g->mlg_tis.mltis_tdom = &mlxp->mlx_tdom;
+
+ if (!mlxcx_cmd_create_tis(mlxp, &g->mlg_tis)) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+
+ g->mlg_state |= MLXCX_GROUP_TIRTIS;
+
+ for (i = 0; i < g->mlg_nwqs; ++i) {
+ eq = NULL;
+ while (eq == NULL) {
+ eq = &mlxp->mlx_eqs[mlxp->mlx_next_eq++];
+ if (mlxp->mlx_next_eq >= mlxp->mlx_intr_count)
+ mlxp->mlx_next_eq = 1;
+ if (eq->mleq_type != MLXCX_EQ_TYPE_ANY &&
+ eq->mleq_type != MLXCX_EQ_TYPE_TX) {
+ /* Try the next one */
+ eq = NULL;
+ }
+ }
+
+ if (!mlxcx_cq_setup(mlxp, eq, &cq))
+ return (B_FALSE);
+ cq->mlcq_stats = &g->mlg_port->mlp_stats;
+
+ sq = &g->mlg_wqs[i];
+ if (!mlxcx_sq_setup(mlxp, g->mlg_port, cq, &g->mlg_tis, sq)) {
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ sq->mlwq_group = g;
+ }
+
+ mutex_exit(&g->mlg_mtx);
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_tx_ring_start(mlxcx_t *mlxp, mlxcx_ring_group_t *g,
+ mlxcx_work_queue_t *sq)
+{
+ uint_t i;
+ mlxcx_buffer_t *b;
+ mlxcx_completion_queue_t *cq;
+
+ mutex_enter(&g->mlg_mtx);
+
+ cq = sq->mlwq_cq;
+ ASSERT(cq != NULL);
+
+ mutex_enter(&cq->mlcq_mtx);
+ mutex_enter(&sq->mlwq_mtx);
+ if (sq->mlwq_state & MLXCX_WQ_STARTED) {
+ mutex_exit(&sq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_TRUE);
+ }
+
+ ASSERT0(sq->mlwq_state & MLXCX_WQ_BUFFERS);
+ for (i = 0; i < sq->mlwq_nents; ++i) {
+ if (!mlxcx_buf_create_foreign(mlxp, sq->mlwq_foreign_bufs, &b))
+ break;
+ mlxcx_buf_return(mlxp, b);
+ }
+ for (i = 0; i < sq->mlwq_nents / 2; ++i) {
+ if (!mlxcx_buf_create_foreign(mlxp, sq->mlwq_foreign_bufs, &b))
+ break;
+ mlxcx_buf_return(mlxp, b);
+ }
+ for (i = 0; i < sq->mlwq_nents; ++i) {
+ if (!mlxcx_buf_create(mlxp, sq->mlwq_bufs, &b))
+ break;
+ mlxcx_buf_return(mlxp, b);
+ }
+ sq->mlwq_state |= MLXCX_WQ_BUFFERS;
+
+ if (!mlxcx_cmd_start_sq(mlxp, sq)) {
+ mutex_exit(&sq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+ return (B_FALSE);
+ }
+ g->mlg_state |= MLXCX_GROUP_RUNNING;
+
+ (void) mlxcx_sq_add_nop(mlxp, sq);
+
+ mutex_exit(&sq->mlwq_mtx);
+ mutex_exit(&cq->mlcq_mtx);
+ mutex_exit(&g->mlg_mtx);
+
+ return (B_TRUE);
+}
+
+static boolean_t
+mlxcx_sq_ring_dbell(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq, uint_t first)
+{
+ uint_t idx;
+ mlxcx_bf_t *bf;
+ ddi_fm_error_t err;
+ uint_t try = 0;
+
+ ASSERT3U(mlwq->mlwq_type, ==, MLXCX_WQ_TYPE_SENDQ);
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+
+ mlwq->mlwq_doorbell->mlwqd_send_counter = to_be16(mlwq->mlwq_pc);
+
+ ASSERT(mlwq->mlwq_cq != NULL);
+ ASSERT(mlwq->mlwq_cq->mlcq_eq != NULL);
+ idx = mlwq->mlwq_cq->mlcq_eq->mleq_intr_index & MLXCX_BF_PER_UAR_MASK;
+ bf = &mlwq->mlwq_uar->mlu_bf[idx];
+
+retry:
+ MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ if (try++ < mlxcx_doorbell_tries) {
+ ddi_fm_dma_err_clear(
+ mlwq->mlwq_doorbell_dma.mxdb_dma_handle,
+ DDI_FME_VERSION);
+ goto retry;
+ } else {
+ goto err;
+ }
+ }
+
+ mlxcx_put64(mlxp, bf->mbf_even, from_be64(
+ mlwq->mlwq_bf_ent[first].mlsqbf_qwords[0]));
+ ddi_fm_acc_err_get(mlxp->mlx_regs_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status == DDI_FM_OK)
+ return (B_TRUE);
+ if (try++ < mlxcx_doorbell_tries) {
+ ddi_fm_acc_err_clear(mlxp->mlx_regs_handle, DDI_FME_VERSION);
+ goto retry;
+ }
+
+err:
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_LOST);
+ return (B_FALSE);
+}
+
+boolean_t
+mlxcx_sq_add_nop(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ uint_t index, start_pc;
+ mlxcx_sendq_ent_t *ent0;
+ ddi_fm_error_t err;
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+
+ index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
+ ent0 = &mlwq->mlwq_send_ent[index];
+ start_pc = mlwq->mlwq_pc;
+ ++mlwq->mlwq_pc;
+
+ bzero(ent0, sizeof (mlxcx_sendq_ent_t));
+ ent0->mlsqe_control.mlcs_opcode = MLXCX_WQE_OP_NOP;
+ ent0->mlsqe_control.mlcs_qp_or_sq = to_be24(mlwq->mlwq_num);
+ ent0->mlsqe_control.mlcs_wqe_index = to_be16(start_pc);
+
+ set_bits8(&ent0->mlsqe_control.mlcs_flags,
+ MLXCX_SQE_FENCE_MODE, MLXCX_SQE_FENCE_NONE);
+ set_bits8(&ent0->mlsqe_control.mlcs_flags,
+ MLXCX_SQE_COMPLETION_MODE, MLXCX_SQE_CQE_ALWAYS);
+
+ ent0->mlsqe_control.mlcs_ds = 1;
+
+ VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
+ (uintptr_t)ent0 - (uintptr_t)mlwq->mlwq_send_ent,
+ sizeof (mlxcx_sendq_ent_t), DDI_DMA_SYNC_FORDEV));
+ ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ return (B_FALSE);
+ }
+ if (!mlxcx_sq_ring_dbell(mlxp, mlwq, index)) {
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_sq_add_buffer(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
+ uint8_t *inlinehdrs, size_t inlinelen, uint32_t chkflags,
+ mlxcx_buffer_t *b0)
+{
+ uint_t index, first, ents = 0;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_sendq_ent_t *ent0;
+ mlxcx_sendq_extra_ent_t *ent;
+ mlxcx_wqe_data_seg_t *seg;
+ uint_t ptri, nptr;
+ const ddi_dma_cookie_t *c;
+ size_t rem;
+ mlxcx_buffer_t *b;
+ ddi_fm_error_t err;
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ ASSERT3P(b0->mlb_tx_head, ==, b0);
+ ASSERT3U(b0->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
+ cq = mlwq->mlwq_cq;
+
+ index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
+ ent0 = &mlwq->mlwq_send_ent[index];
+ b0->mlb_wqe_index = mlwq->mlwq_pc;
+ ++mlwq->mlwq_pc;
+ ++ents;
+
+ first = index;
+
+ mutex_enter(&cq->mlcq_bufbmtx);
+ list_insert_tail(&cq->mlcq_buffers_b, b0);
+ atomic_inc_64(&cq->mlcq_bufcnt);
+ mutex_exit(&cq->mlcq_bufbmtx);
+
+ bzero(ent0, sizeof (mlxcx_sendq_ent_t));
+ ent0->mlsqe_control.mlcs_opcode = MLXCX_WQE_OP_SEND;
+ ent0->mlsqe_control.mlcs_qp_or_sq = to_be24(mlwq->mlwq_num);
+ ent0->mlsqe_control.mlcs_wqe_index = to_be16(b0->mlb_wqe_index);
+
+ set_bits8(&ent0->mlsqe_control.mlcs_flags,
+ MLXCX_SQE_FENCE_MODE, MLXCX_SQE_FENCE_WAIT_OTHERS);
+ set_bits8(&ent0->mlsqe_control.mlcs_flags,
+ MLXCX_SQE_COMPLETION_MODE, MLXCX_SQE_CQE_ALWAYS);
+
+ VERIFY3U(inlinelen, <=, sizeof (ent0->mlsqe_eth.mles_inline_headers));
+ set_bits16(&ent0->mlsqe_eth.mles_szflags,
+ MLXCX_SQE_ETH_INLINE_HDR_SZ, inlinelen);
+ if (inlinelen > 0) {
+ bcopy(inlinehdrs, ent0->mlsqe_eth.mles_inline_headers,
+ inlinelen);
+ }
+
+ ent0->mlsqe_control.mlcs_ds =
+ offsetof(mlxcx_sendq_ent_t, mlsqe_data) / 16;
+
+ if (chkflags & HCK_IPV4_HDRCKSUM) {
+ ASSERT(mlxp->mlx_caps->mlc_checksum);
+ set_bit8(&ent0->mlsqe_eth.mles_csflags,
+ MLXCX_SQE_ETH_CSFLAG_L3_CHECKSUM);
+ }
+ if (chkflags & HCK_FULLCKSUM) {
+ ASSERT(mlxp->mlx_caps->mlc_checksum);
+ set_bit8(&ent0->mlsqe_eth.mles_csflags,
+ MLXCX_SQE_ETH_CSFLAG_L4_CHECKSUM);
+ }
+
+ b = b0;
+ ptri = 0;
+ nptr = sizeof (ent0->mlsqe_data) / sizeof (mlxcx_wqe_data_seg_t);
+ seg = ent0->mlsqe_data;
+ while (b != NULL) {
+ rem = b->mlb_used;
+
+ c = NULL;
+ while (rem > 0 &&
+ (c = mlxcx_dma_cookie_iter(&b->mlb_dma, c)) != NULL) {
+ if (ptri >= nptr) {
+ index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
+ ent = &mlwq->mlwq_send_extra_ent[index];
+ ++mlwq->mlwq_pc;
+ ++ents;
+
+ seg = ent->mlsqe_data;
+ ptri = 0;
+ nptr = sizeof (ent->mlsqe_data) /
+ sizeof (mlxcx_wqe_data_seg_t);
+ }
+
+ seg->mlds_lkey = to_be32(mlxp->mlx_rsvd_lkey);
+ if (c->dmac_size > rem) {
+ seg->mlds_byte_count = to_be32(rem);
+ rem = 0;
+ } else {
+ seg->mlds_byte_count = to_be32(c->dmac_size);
+ rem -= c->dmac_size;
+ }
+ seg->mlds_address = to_be64(c->dmac_laddress);
+ ++seg;
+ ++ptri;
+ ++ent0->mlsqe_control.mlcs_ds;
+
+ ASSERT3U(ent0->mlsqe_control.mlcs_ds, <=,
+ MLXCX_SQE_MAX_DS);
+ }
+
+ if (b == b0) {
+ b = list_head(&b0->mlb_tx_chain);
+ } else {
+ b = list_next(&b0->mlb_tx_chain, b);
+ }
+ }
+
+ for (; ptri < nptr; ++ptri, ++seg) {
+ seg->mlds_lkey = to_be32(MLXCX_NULL_LKEY);
+ seg->mlds_byte_count = to_be32(0);
+ seg->mlds_address = to_be64(0);
+ }
+
+ /*
+ * Make sure the workqueue entry is flushed out before updating
+ * the doorbell.
+ */
+ VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
+ (uintptr_t)ent0 - (uintptr_t)mlwq->mlwq_send_ent,
+ ents * sizeof (mlxcx_sendq_ent_t), DDI_DMA_SYNC_FORDEV));
+ ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ return (B_FALSE);
+ }
+ if (!mlxcx_sq_ring_dbell(mlxp, mlwq, first)) {
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_rq_add_buffer(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
+ mlxcx_buffer_t *buf)
+{
+ return (mlxcx_rq_add_buffers(mlxp, mlwq, &buf, 1));
+}
+
+boolean_t
+mlxcx_rq_add_buffers(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq,
+ mlxcx_buffer_t **bufs, size_t nbufs)
+{
+ uint_t index;
+ mlxcx_recvq_ent_t *ent;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_wqe_data_seg_t *seg;
+ uint_t bi, ptri;
+ const ddi_dma_cookie_t *c;
+ mlxcx_buffer_t *buf;
+ ddi_fm_error_t err;
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ cq = mlwq->mlwq_cq;
+ ASSERT(mutex_owned(&cq->mlcq_mtx));
+
+ for (bi = 0; bi < nbufs; ++bi) {
+ buf = bufs[bi];
+ bufs[bi] = NULL;
+ ASSERT3U(buf->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
+
+ index = mlwq->mlwq_pc & (mlwq->mlwq_nents - 1);
+ ent = &mlwq->mlwq_recv_ent[index];
+ buf->mlb_wqe_index = mlwq->mlwq_pc;
+
+ ++mlwq->mlwq_pc;
+
+ mutex_enter(&cq->mlcq_bufbmtx);
+ list_insert_tail(&cq->mlcq_buffers, buf);
+ atomic_inc_64(&cq->mlcq_bufcnt);
+ mutex_exit(&cq->mlcq_bufbmtx);
+
+ ASSERT3U(buf->mlb_dma.mxdb_ncookies, <=, MLXCX_RECVQ_MAX_PTRS);
+ ptri = 0;
+ c = NULL;
+ while ((c = mlxcx_dma_cookie_iter(&buf->mlb_dma, c)) != NULL) {
+ seg = &ent->mlrqe_data[ptri++];
+ seg->mlds_lkey = to_be32(mlxp->mlx_rsvd_lkey);
+ seg->mlds_byte_count = to_be32(c->dmac_size);
+ seg->mlds_address = to_be64(c->dmac_laddress);
+ }
+ /*
+ * Fill any unused scatter pointers with the special null
+ * value.
+ */
+ for (; ptri < MLXCX_RECVQ_MAX_PTRS; ++ptri) {
+ seg = &ent->mlrqe_data[ptri];
+ seg->mlds_lkey = to_be32(MLXCX_NULL_LKEY);
+ seg->mlds_byte_count = to_be32(0);
+ seg->mlds_address = to_be64(0);
+ }
+
+ /*
+ * Make sure the workqueue entry is flushed out before updating
+ * the doorbell.
+ */
+ VERIFY0(ddi_dma_sync(mlwq->mlwq_dma.mxdb_dma_handle,
+ (uintptr_t)ent - (uintptr_t)mlwq->mlwq_recv_ent,
+ sizeof (mlxcx_recvq_ent_t), DDI_DMA_SYNC_FORDEV));
+ ddi_fm_dma_err_get(mlwq->mlwq_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ return (B_FALSE);
+ }
+ }
+
+ mlwq->mlwq_doorbell->mlwqd_recv_counter = to_be16(mlwq->mlwq_pc);
+ /*
+ * Flush the CQ doorbell as well so that HW knows how many
+ * completions we've consumed.
+ */
+ MLXCX_DMA_SYNC(cq->mlcq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(cq->mlcq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ return (B_FALSE);
+ }
+ MLXCX_DMA_SYNC(mlwq->mlwq_doorbell_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(mlwq->mlwq_doorbell_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+void
+mlxcx_rq_refill(mlxcx_t *mlxp, mlxcx_work_queue_t *mlwq)
+{
+ size_t target, current, want, done, n;
+ mlxcx_completion_queue_t *cq;
+ mlxcx_buffer_t *b[MLXCX_RQ_REFILL_STEP];
+ uint_t i;
+
+ ASSERT(mutex_owned(&mlwq->mlwq_mtx));
+ cq = mlwq->mlwq_cq;
+ ASSERT(mutex_owned(&cq->mlcq_mtx));
+
+ ASSERT(mlwq->mlwq_state & MLXCX_WQ_BUFFERS);
+
+ target = mlwq->mlwq_nents - MLXCX_RQ_REFILL_STEP;
+ cq = mlwq->mlwq_cq;
+
+ if (cq->mlcq_state & MLXCX_CQ_TEARDOWN)
+ return;
+
+ current = cq->mlcq_bufcnt;
+
+ if (current >= target - MLXCX_RQ_REFILL_STEP)
+ return;
+
+ want = target - current;
+ done = 0;
+
+ while (!(mlwq->mlwq_state & MLXCX_WQ_TEARDOWN) && done < want) {
+ n = mlxcx_buf_take_n(mlxp, mlwq, b, MLXCX_RQ_REFILL_STEP);
+ if (n == 0) {
+ mlxcx_warn(mlxp, "!exiting rq refill early, done %u "
+ "but wanted %u", done, want);
+ return;
+ }
+ if (mlwq->mlwq_state & MLXCX_WQ_TEARDOWN) {
+ for (i = 0; i < n; ++i)
+ mlxcx_buf_return(mlxp, b[i]);
+ return;
+ }
+ if (!mlxcx_rq_add_buffers(mlxp, mlwq, b, n)) {
+ /*
+ * mlxcx_rq_add_buffers NULLs out the buffers as it
+ * enqueues them, so any that are non-NULL we have to
+ * free now. The others now belong to the WQ, even if
+ * we failed.
+ */
+ for (i = 0; i < n; ++i) {
+ if (b[i] != NULL) {
+ mlxcx_buf_return(mlxp, b[i]);
+ }
+ }
+ return;
+ }
+ done += n;
+ }
+}
+
+static const char *
+mlxcx_cq_err_syndrome_string(mlxcx_cq_error_syndrome_t sy)
+{
+ switch (sy) {
+ case MLXCX_CQ_ERR_LOCAL_LENGTH:
+ return ("LOCAL_LENGTH");
+ case MLXCX_CQ_ERR_LOCAL_QP_OP:
+ return ("LOCAL_QP_OP");
+ case MLXCX_CQ_ERR_LOCAL_PROTECTION:
+ return ("LOCAL_PROTECTION");
+ case MLXCX_CQ_ERR_WR_FLUSHED:
+ return ("WR_FLUSHED");
+ case MLXCX_CQ_ERR_MEM_WINDOW_BIND:
+ return ("MEM_WINDOW_BIND");
+ case MLXCX_CQ_ERR_BAD_RESPONSE:
+ return ("BAD_RESPONSE");
+ case MLXCX_CQ_ERR_LOCAL_ACCESS:
+ return ("LOCAL_ACCESS");
+ case MLXCX_CQ_ERR_XPORT_RETRY_CTR:
+ return ("XPORT_RETRY_CTR");
+ case MLXCX_CQ_ERR_RNR_RETRY_CTR:
+ return ("RNR_RETRY_CTR");
+ case MLXCX_CQ_ERR_ABORTED:
+ return ("ABORTED");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static void
+mlxcx_fm_cqe_ereport(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
+ mlxcx_completionq_error_ent_t *ent)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+ const char *name = mlxcx_cq_err_syndrome_string(ent->mlcqee_syndrome);
+
+ if (!DDI_FM_EREPORT_CAP(mlxp->mlx_fm_caps))
+ return;
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
+ MLXCX_FM_SERVICE_MLXCX, "cqe.err");
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+
+ ddi_fm_ereport_post(mlxp->mlx_dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
+ "syndrome", DATA_TYPE_STRING, name,
+ "syndrome_num", DATA_TYPE_UINT8, ent->mlcqee_syndrome,
+ "vendor_syndrome", DATA_TYPE_UINT8,
+ ent->mlcqee_vendor_error_syndrome,
+ "wqe_counter", DATA_TYPE_UINT16, from_be16(ent->mlcqee_wqe_counter),
+ "wq_type", DATA_TYPE_STRING,
+ (mlcq->mlcq_wq->mlwq_type == MLXCX_WQ_TYPE_SENDQ) ? "send": "recv",
+ "cq_num", DATA_TYPE_UINT32, mlcq->mlcq_num,
+ "wq_num", DATA_TYPE_UINT32, mlcq->mlcq_wq->mlwq_num,
+ NULL);
+ ddi_fm_service_impact(mlxp->mlx_dip, DDI_SERVICE_DEGRADED);
+}
+
+void
+mlxcx_tx_completion(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
+ mlxcx_completionq_ent_t *ent, mlxcx_buffer_t *buf)
+{
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+ if (ent->mlcqe_opcode == MLXCX_CQE_OP_REQ_ERR) {
+ mlxcx_completionq_error_ent_t *eent =
+ (mlxcx_completionq_error_ent_t *)ent;
+ mlxcx_fm_cqe_ereport(mlxp, mlcq, eent);
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ mutex_enter(&mlcq->mlcq_wq->mlwq_mtx);
+ mlxcx_check_sq(mlxp, mlcq->mlcq_wq);
+ mutex_exit(&mlcq->mlcq_wq->mlwq_mtx);
+ return;
+ }
+
+ if (ent->mlcqe_opcode != MLXCX_CQE_OP_REQ) {
+ mlxcx_warn(mlxp, "!got weird cq opcode: %x", ent->mlcqe_opcode);
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ return;
+ }
+
+ if (ent->mlcqe_send_wqe_opcode != MLXCX_WQE_OP_SEND) {
+ mlxcx_warn(mlxp, "!got weird cq wqe opcode: %x",
+ ent->mlcqe_send_wqe_opcode);
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ return;
+ }
+
+ if (ent->mlcqe_format != MLXCX_CQE_FORMAT_BASIC) {
+ mlxcx_warn(mlxp, "!got weird cq format: %x", ent->mlcqe_format);
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+ return;
+ }
+
+ mlxcx_buf_return_chain(mlxp, buf, B_FALSE);
+}
+
+mblk_t *
+mlxcx_rx_completion(mlxcx_t *mlxp, mlxcx_completion_queue_t *mlcq,
+ mlxcx_completionq_ent_t *ent, mlxcx_buffer_t *buf)
+{
+ uint32_t chkflags = 0;
+ ddi_fm_error_t err;
+
+ ASSERT(mutex_owned(&mlcq->mlcq_mtx));
+
+ if (ent->mlcqe_opcode == MLXCX_CQE_OP_RESP_ERR) {
+ mlxcx_completionq_error_ent_t *eent =
+ (mlxcx_completionq_error_ent_t *)ent;
+ mlxcx_fm_cqe_ereport(mlxp, mlcq, eent);
+ mlxcx_buf_return(mlxp, buf);
+ mutex_enter(&mlcq->mlcq_wq->mlwq_mtx);
+ mlxcx_check_rq(mlxp, mlcq->mlcq_wq);
+ mutex_exit(&mlcq->mlcq_wq->mlwq_mtx);
+ return (NULL);
+ }
+
+ if (ent->mlcqe_opcode != MLXCX_CQE_OP_RESP) {
+ mlxcx_warn(mlxp, "!got weird cq opcode: %x", ent->mlcqe_opcode);
+ mlxcx_buf_return(mlxp, buf);
+ return (NULL);
+ }
+
+ if (ent->mlcqe_format != MLXCX_CQE_FORMAT_BASIC) {
+ mlxcx_warn(mlxp, "!got weird cq format: %x", ent->mlcqe_format);
+ mlxcx_buf_return(mlxp, buf);
+ return (NULL);
+ }
+
+ if (ent->mlcqe_rx_drop_counter > 0) {
+ atomic_add_64(&mlcq->mlcq_stats->mlps_rx_drops,
+ ent->mlcqe_rx_drop_counter);
+ }
+
+ MLXCX_DMA_SYNC(buf->mlb_dma, DDI_DMA_SYNC_FORCPU);
+ ddi_fm_dma_err_get(buf->mlb_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ ddi_fm_dma_err_clear(buf->mlb_dma.mxdb_dma_handle,
+ DDI_FME_VERSION);
+ mlxcx_buf_return(mlxp, buf);
+ return (NULL);
+ }
+
+ if (!mlxcx_buf_loan(mlxp, buf)) {
+ mlxcx_warn(mlxp, "!loan failed, dropping packet");
+ mlxcx_buf_return(mlxp, buf);
+ return (NULL);
+ }
+
+ buf->mlb_mp->b_next = NULL;
+ buf->mlb_mp->b_cont = NULL;
+ buf->mlb_mp->b_wptr = buf->mlb_mp->b_rptr +
+ from_be32(ent->mlcqe_byte_cnt);
+
+ if (get_bit8(ent->mlcqe_csflags, MLXCX_CQE_CSFLAGS_L4_OK)) {
+ chkflags |= HCK_FULLCKSUM_OK;
+ }
+ if (get_bit8(ent->mlcqe_csflags, MLXCX_CQE_CSFLAGS_L3_OK)) {
+ chkflags |= HCK_IPV4_HDRCKSUM_OK;
+ }
+ if (chkflags != 0) {
+ mac_hcksum_set(buf->mlb_mp, 0, 0, 0,
+ from_be16(ent->mlcqe_checksum), chkflags);
+ }
+
+ /*
+ * Don't check if a refill is needed on every single completion,
+ * since checking involves taking the RQ lock.
+ */
+ if ((buf->mlb_wqe_index & 0x7) == 0) {
+ mlxcx_work_queue_t *wq = mlcq->mlcq_wq;
+ ASSERT(wq != NULL);
+ mutex_enter(&wq->mlwq_mtx);
+ if (!(wq->mlwq_state & MLXCX_WQ_TEARDOWN))
+ mlxcx_rq_refill(mlxp, wq);
+ mutex_exit(&wq->mlwq_mtx);
+ }
+
+ return (buf->mlb_mp);
+}
+
+static void
+mlxcx_buf_mp_return(caddr_t arg)
+{
+ mlxcx_buffer_t *b = (mlxcx_buffer_t *)arg;
+ mlxcx_t *mlxp = b->mlb_mlx;
+
+ if (b->mlb_state != MLXCX_BUFFER_ON_LOAN) {
+ b->mlb_mp = NULL;
+ return;
+ }
+ /*
+ * The mblk for this buffer_t (in its mlb_mp field) has been used now,
+ * so NULL it out.
+ */
+ b->mlb_mp = NULL;
+ mlxcx_buf_return(mlxp, b);
+}
+
+boolean_t
+mlxcx_buf_create(mlxcx_t *mlxp, mlxcx_buf_shard_t *shard, mlxcx_buffer_t **bp)
+{
+ mlxcx_buffer_t *b;
+ ddi_device_acc_attr_t acc;
+ ddi_dma_attr_t attr;
+ boolean_t ret;
+
+ b = kmem_cache_alloc(mlxp->mlx_bufs_cache, KM_SLEEP);
+ b->mlb_shard = shard;
+ b->mlb_foreign = B_FALSE;
+
+ mlxcx_dma_acc_attr(mlxp, &acc);
+ mlxcx_dma_buf_attr(mlxp, &attr);
+
+ ret = mlxcx_dma_alloc_offset(mlxp, &b->mlb_dma, &attr, &acc,
+ B_FALSE, mlxp->mlx_ports[0].mlp_mtu, 2, B_TRUE);
+ if (!ret) {
+ kmem_cache_free(mlxp->mlx_bufs_cache, b);
+ return (B_FALSE);
+ }
+
+ b->mlb_frtn.free_func = mlxcx_buf_mp_return;
+ b->mlb_frtn.free_arg = (caddr_t)b;
+ b->mlb_mp = desballoc((unsigned char *)b->mlb_dma.mxdb_va,
+ b->mlb_dma.mxdb_len, 0, &b->mlb_frtn);
+
+ *bp = b;
+
+ return (B_TRUE);
+}
+
+boolean_t
+mlxcx_buf_create_foreign(mlxcx_t *mlxp, mlxcx_buf_shard_t *shard,
+ mlxcx_buffer_t **bp)
+{
+ mlxcx_buffer_t *b;
+ ddi_dma_attr_t attr;
+ boolean_t ret;
+
+ b = kmem_cache_alloc(mlxp->mlx_bufs_cache, KM_SLEEP);
+ b->mlb_shard = shard;
+ b->mlb_foreign = B_TRUE;
+
+ mlxcx_dma_buf_attr(mlxp, &attr);
+
+ ret = mlxcx_dma_init(mlxp, &b->mlb_dma, &attr, B_TRUE);
+ if (!ret) {
+ kmem_cache_free(mlxp->mlx_bufs_cache, b);
+ return (B_FALSE);
+ }
+
+ *bp = b;
+
+ return (B_TRUE);
+}
+
+static void
+mlxcx_buf_take_foreign(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
+ mlxcx_buffer_t **bp)
+{
+ mlxcx_buffer_t *b;
+ mlxcx_buf_shard_t *s = wq->mlwq_foreign_bufs;
+
+ mutex_enter(&s->mlbs_mtx);
+ while (list_is_empty(&s->mlbs_free))
+ cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
+ b = list_remove_head(&s->mlbs_free);
+ ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
+ ASSERT(b->mlb_foreign);
+ b->mlb_state = MLXCX_BUFFER_ON_WQ;
+ list_insert_tail(&s->mlbs_busy, b);
+ mutex_exit(&s->mlbs_mtx);
+
+ *bp = b;
+}
+
+boolean_t
+mlxcx_buf_bind_or_copy(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
+ mblk_t *mpb, size_t off, mlxcx_buffer_t **bp)
+{
+ mlxcx_buffer_t *b, *b0 = NULL;
+ boolean_t first = B_TRUE;
+ ddi_fm_error_t err;
+ mblk_t *mp;
+ uint8_t *rptr;
+ size_t sz;
+ size_t ncookies = 0;
+ boolean_t ret;
+ uint_t attempts = 0;
+
+ for (mp = mpb; mp != NULL; mp = mp->b_cont) {
+ rptr = mp->b_rptr;
+ sz = MBLKL(mp);
+
+ if (off > 0)
+ ASSERT3U(off, <, sz);
+ rptr += off;
+ sz -= off;
+
+ if (sz < mlxp->mlx_props.mldp_tx_bind_threshold)
+ goto copyb;
+
+ mlxcx_buf_take_foreign(mlxp, wq, &b);
+ ret = mlxcx_dma_bind_mblk(mlxp, &b->mlb_dma, mp, off, B_FALSE);
+
+ if (!ret) {
+ mlxcx_buf_return(mlxp, b);
+
+copyb:
+ mlxcx_buf_take(mlxp, wq, &b);
+ ASSERT3U(b->mlb_dma.mxdb_len, >=, sz);
+ bcopy(rptr, b->mlb_dma.mxdb_va, sz);
+ MLXCX_DMA_SYNC(b->mlb_dma, DDI_DMA_SYNC_FORDEV);
+ ddi_fm_dma_err_get(b->mlb_dma.mxdb_dma_handle, &err,
+ DDI_FME_VERSION);
+ if (err.fme_status != DDI_FM_OK) {
+ ddi_fm_dma_err_clear(b->mlb_dma.mxdb_dma_handle,
+ DDI_FME_VERSION);
+ mlxcx_buf_return(mlxp, b);
+ if (++attempts > MLXCX_BUF_BIND_MAX_ATTEMTPS) {
+ *bp = NULL;
+ return (B_FALSE);
+ }
+ goto copyb;
+ }
+ }
+
+ /*
+ * We might overestimate here when we've copied data, since
+ * the buffer might be longer than what we copied into it. This
+ * is safe since it's always wrong in the conservative
+ * direction (and we will blow up later when we actually
+ * generate the WQE anyway).
+ *
+ * If the assert below ever blows, we'll have to come and fix
+ * this up so we can transmit these packets.
+ */
+ ncookies += b->mlb_dma.mxdb_ncookies;
+
+ if (first)
+ b0 = b;
+
+ if (!first)
+ b->mlb_state = MLXCX_BUFFER_ON_CHAIN;
+
+ b->mlb_tx_mp = mp;
+ b->mlb_tx_head = b0;
+ b->mlb_used = sz;
+
+ if (!first)
+ list_insert_tail(&b0->mlb_tx_chain, b);
+ first = B_FALSE;
+ off = 0;
+ }
+
+ ASSERT3U(ncookies, <=, MLXCX_SQE_MAX_PTRS);
+
+ *bp = b0;
+ return (B_TRUE);
+}
+
+void
+mlxcx_buf_take(mlxcx_t *mlxp, mlxcx_work_queue_t *wq, mlxcx_buffer_t **bp)
+{
+ mlxcx_buffer_t *b;
+ mlxcx_buf_shard_t *s = wq->mlwq_bufs;
+
+ mutex_enter(&s->mlbs_mtx);
+ while (list_is_empty(&s->mlbs_free))
+ cv_wait(&s->mlbs_free_nonempty, &s->mlbs_mtx);
+ b = list_remove_head(&s->mlbs_free);
+ ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
+ b->mlb_state = MLXCX_BUFFER_ON_WQ;
+ list_insert_tail(&s->mlbs_busy, b);
+ mutex_exit(&s->mlbs_mtx);
+
+ *bp = b;
+}
+
+#define MLXCX_BUF_TAKE_N_TIMEOUT_USEC 5000
+#define MLXCX_BUF_TAKE_N_MAX_RETRIES 3
+
+size_t
+mlxcx_buf_take_n(mlxcx_t *mlxp, mlxcx_work_queue_t *wq,
+ mlxcx_buffer_t **bp, size_t nbufs)
+{
+ mlxcx_buffer_t *b;
+ size_t done = 0, empty = 0;
+ clock_t wtime = drv_usectohz(MLXCX_BUF_TAKE_N_TIMEOUT_USEC);
+ mlxcx_buf_shard_t *s;
+
+ s = wq->mlwq_bufs;
+
+ mutex_enter(&s->mlbs_mtx);
+ while (done < nbufs) {
+ while (list_is_empty(&s->mlbs_free)) {
+ (void) cv_reltimedwait(&s->mlbs_free_nonempty,
+ &s->mlbs_mtx, wtime, TR_MILLISEC);
+ if (list_is_empty(&s->mlbs_free) &&
+ empty++ >= MLXCX_BUF_TAKE_N_MAX_RETRIES) {
+ mutex_exit(&s->mlbs_mtx);
+ return (done);
+ }
+ }
+ b = list_remove_head(&s->mlbs_free);
+ ASSERT3U(b->mlb_state, ==, MLXCX_BUFFER_FREE);
+ b->mlb_state = MLXCX_BUFFER_ON_WQ;
+ list_insert_tail(&s->mlbs_busy, b);
+ bp[done++] = b;
+ }
+ mutex_exit(&s->mlbs_mtx);
+ return (done);
+}
+
+boolean_t
+mlxcx_buf_loan(mlxcx_t *mlxp, mlxcx_buffer_t *b)
+{
+ VERIFY3U(b->mlb_state, ==, MLXCX_BUFFER_ON_WQ);
+ ASSERT3P(b->mlb_mlx, ==, mlxp);
+
+ if (b->mlb_mp == NULL) {
+ b->mlb_mp = desballoc((unsigned char *)b->mlb_dma.mxdb_va,
+ b->mlb_dma.mxdb_len, 0, &b->mlb_frtn);
+ if (b->mlb_mp == NULL)
+ return (B_FALSE);
+ }
+
+ b->mlb_state = MLXCX_BUFFER_ON_LOAN;
+ b->mlb_wqe_index = 0;
+ return (B_TRUE);
+}
+
+void
+mlxcx_buf_return_chain(mlxcx_t *mlxp, mlxcx_buffer_t *b0, boolean_t keepmp)
+{
+ mlxcx_buffer_t *b;
+
+ if (b0->mlb_tx_head != b0) {
+ mlxcx_buf_return(mlxp, b0);
+ return;
+ }
+
+ while ((b = list_head(&b0->mlb_tx_chain)) != NULL) {
+ mlxcx_buf_return(mlxp, b);
+ }
+ if (keepmp) {
+ b0->mlb_tx_mp = NULL;
+ b0->mlb_tx_head = NULL;
+ }
+ mlxcx_buf_return(mlxp, b0);
+}
+
+void
+mlxcx_buf_return(mlxcx_t *mlxp, mlxcx_buffer_t *b)
+{
+ mlxcx_buffer_state_t oldstate = b->mlb_state;
+ mlxcx_buffer_t *txhead = b->mlb_tx_head;
+ mlxcx_buf_shard_t *s = b->mlb_shard;
+ mblk_t *mp = b->mlb_tx_mp;
+
+ VERIFY3U(oldstate, !=, MLXCX_BUFFER_FREE);
+ ASSERT3P(b->mlb_mlx, ==, mlxp);
+ b->mlb_state = MLXCX_BUFFER_FREE;
+ b->mlb_wqe_index = 0;
+ b->mlb_tx_head = NULL;
+ b->mlb_tx_mp = NULL;
+ b->mlb_used = 0;
+ ASSERT(list_is_empty(&b->mlb_tx_chain));
+
+ mutex_enter(&s->mlbs_mtx);
+ switch (oldstate) {
+ case MLXCX_BUFFER_INIT:
+ break;
+ case MLXCX_BUFFER_ON_WQ:
+ list_remove(&s->mlbs_busy, b);
+ break;
+ case MLXCX_BUFFER_ON_LOAN:
+ ASSERT(!b->mlb_foreign);
+ list_remove(&s->mlbs_busy, b);
+ break;
+ case MLXCX_BUFFER_FREE:
+ VERIFY(0);
+ break;
+ case MLXCX_BUFFER_ON_CHAIN:
+ ASSERT(txhead != NULL);
+ list_remove(&txhead->mlb_tx_chain, b);
+ list_remove(&s->mlbs_busy, b);
+ break;
+ }
+
+ if (b->mlb_foreign) {
+ if (b->mlb_dma.mxdb_flags & MLXCX_DMABUF_BOUND) {
+ mlxcx_dma_unbind(mlxp, &b->mlb_dma);
+ }
+ }
+
+ list_insert_tail(&s->mlbs_free, b);
+ cv_signal(&s->mlbs_free_nonempty);
+
+ mutex_exit(&s->mlbs_mtx);
+
+ /*
+ * For TX chain heads, free the mblk_t after we let go of the lock.
+ * This might be a borrowed buf that we in turn loaned to MAC, in which
+ * case calling freemsg() on it will re-enter this very function -- so
+ * we better not be holding the lock!
+ */
+ if (txhead == b)
+ freemsg(mp);
+}
+
+void
+mlxcx_buf_destroy(mlxcx_t *mlxp, mlxcx_buffer_t *b)
+{
+ mlxcx_buf_shard_t *s = b->mlb_shard;
+ VERIFY(b->mlb_state == MLXCX_BUFFER_FREE ||
+ b->mlb_state == MLXCX_BUFFER_INIT);
+ ASSERT(mutex_owned(&s->mlbs_mtx));
+ if (b->mlb_state == MLXCX_BUFFER_FREE)
+ list_remove(&s->mlbs_free, b);
+
+ /*
+ * This is going back to the kmem cache, so it needs to be set up in
+ * the same way we expect a new buffer to come out (state INIT, other
+ * fields NULL'd)
+ */
+ b->mlb_state = MLXCX_BUFFER_INIT;
+ b->mlb_shard = NULL;
+ if (b->mlb_mp != NULL) {
+ freeb(b->mlb_mp);
+ ASSERT(b->mlb_mp == NULL);
+ }
+ mlxcx_dma_free(&b->mlb_dma);
+ ASSERT(list_is_empty(&b->mlb_tx_chain));
+
+ kmem_cache_free(mlxp->mlx_bufs_cache, b);
+}
diff --git a/usr/src/uts/common/sys/fm/io/ddi.h b/usr/src/uts/common/sys/fm/io/ddi.h
index 75afff5c38..d8c772cdaf 100644
--- a/usr/src/uts/common/sys/fm/io/ddi.h
+++ b/usr/src/uts/common/sys/fm/io/ddi.h
@@ -66,6 +66,17 @@ extern "C" {
#define DVR_STACK_DEPTH "dvr-stack-depth"
#define DVR_ERR_SPECIFIC "dvr-error-specific"
+/* Generic NIC driver ereports. */
+#define DDI_FM_NIC "nic"
+#define DDI_FM_TXR_ERROR "txr-err"
+
+/* Valid values of the "error" field in txr-err ereports */
+#define DDI_FM_TXR_ERROR_WHITELIST "whitelist"
+#define DDI_FM_TXR_ERROR_NOTSUPP "notsupp"
+#define DDI_FM_TXR_ERROR_OVERTEMP "overtemp"
+#define DDI_FM_TXR_ERROR_HWFAIL "hwfail"
+#define DDI_FM_TXR_ERROR_UNKNOWN "unknown"
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/intel/Makefile.intel b/usr/src/uts/intel/Makefile.intel
index 820e0a4e31..aed47948a9 100644
--- a/usr/src/uts/intel/Makefile.intel
+++ b/usr/src/uts/intel/Makefile.intel
@@ -318,6 +318,7 @@ DRV_KMODS += log
DRV_KMODS += logindmux
DRV_KMODS += mega_sas
DRV_KMODS += mc-amd
+DRV_KMODS += mlxcx
DRV_KMODS += mm
DRV_KMODS += mouse8042
DRV_KMODS += mpt_sas
diff --git a/usr/src/uts/intel/ipsecah/Makefile b/usr/src/uts/intel/ipsecah/Makefile
index d744c131f1..dd8485f210 100644
--- a/usr/src/uts/intel/ipsecah/Makefile
+++ b/usr/src/uts/intel/ipsecah/Makefile
@@ -42,7 +42,6 @@ UTSBASE = ../..
#
MODULE = ipsecah
OBJECTS = $(IPSECAH_OBJS:%=$(OBJS_DIR)/%)
-LINTS = $(IPSECAH_OBJS:%.o=$(LINTS_DIR)/%.ln)
ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
ROOTLINK = $(ROOT_STRMOD_DIR)/$(MODULE)
CONF_SRCDIR = $(UTSBASE)/common/inet/ip
@@ -56,7 +55,6 @@ include $(UTSBASE)/intel/Makefile.intel
# Define targets
#
ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
-LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
@@ -64,24 +62,9 @@ INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
LDFLAGS += -dy -Ndrv/ip -Ndrv/tcp -Nmisc/kcf
-#
-# For now, disable these lint checks; maintainers should endeavor
-# to investigate and remove these for maximum lint coverage.
-# Please do not carry these forward to new Makefiles.
-#
-LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
-LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
-LINTTAGS += -erroff=E_SUSPICIOUS_COMPARISON
-LINTTAGS += -erroff=E_ASSIGN_NARROW_CONV
-
CERRWARN += -_gcc=-Wno-parentheses
CERRWARN += $(CNOWARN_UNINIT)
-# needs work
-$(OBJS_DIR)/ipsecahddi.o := SMOFF += index_overflow
-$(OBJS_DIR)/ipsecah.o := SMOFF += deref_check
-$(OBJS_DIR)/sadb.o := SMOFF += signed_integer_overflow_check,deref_check,indenting,shift_to_zero
-
#
# Default build targets.
#
@@ -95,12 +78,6 @@ clean: $(CLEAN_DEPS) $(SISCLEAN_DEPS)
clobber: $(CLOBBER_DEPS) $(SISCLEAN_DEPS)
-lint: $(LINT_DEPS)
-
-modlintlib: $(MODLINTLIB_DEPS)
-
-clean.lint: $(CLEAN_LINT_DEPS)
-
install: $(INSTALL_DEPS) $(SISCHECK_DEPS)
$(ROOTLINK): $(ROOT_STRMOD_DIR) $(ROOTMODULE)
diff --git a/usr/src/uts/intel/ipsecesp/Makefile b/usr/src/uts/intel/ipsecesp/Makefile
index 713ad82d7c..3ae4a4cac1 100644
--- a/usr/src/uts/intel/ipsecesp/Makefile
+++ b/usr/src/uts/intel/ipsecesp/Makefile
@@ -26,7 +26,7 @@
# Copyright (c) 2018, Joyent, Inc.
#
-# This makefile drives the production of the ipsecesp driver
+# This makefile drives the production of the ipsecesp driver
# kernel module.
#
# intel implementation architecture dependent
@@ -42,7 +42,6 @@ UTSBASE = ../..
#
MODULE = ipsecesp
OBJECTS = $(IPSECESP_OBJS:%=$(OBJS_DIR)/%)
-LINTS = $(IPSECESP_OBJS:%.o=$(LINTS_DIR)/%.ln)
ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
ROOTLINK = $(ROOT_STRMOD_DIR)/$(MODULE)
CONF_SRCDIR = $(UTSBASE)/common/inet/ip
@@ -56,7 +55,6 @@ include $(UTSBASE)/intel/Makefile.intel
# Define targets
#
ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
-LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
@@ -64,21 +62,8 @@ INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
LDFLAGS += -dy -Ndrv/ip -Ndrv/ipsecah -Nmisc/kcf
-#
-# For now, disable these lint checks; maintainers should endeavor
-# to investigate and remove these for maximum lint coverage.
-# Please do not carry these forward to new Makefiles.
-#
-LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
-LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
-LINTTAGS += -erroff=E_ASSIGN_NARROW_CONV
-
CERRWARN += $(CNOWARN_UNINIT)
-# needs work
-$(OBJS_DIR)/ipsecespddi.o := SMOFF += index_overflow
-$(OBJS_DIR)/ipsecesp.o := SMOFF += deref_check
-
#
# Default build targets.
#
@@ -92,12 +77,6 @@ clean: $(CLEAN_DEPS) $(SISCLEAN_DEPS)
clobber: $(CLOBBER_DEPS) $(SISCLEAN_DEPS)
-lint: $(LINT_DEPS)
-
-modlintlib: $(MODLINTLIB_DEPS)
-
-clean.lint: $(CLEAN_LINT_DEPS)
-
install: $(INSTALL_DEPS) $(SISCHECK_DEPS)
$(ROOTLINK): $(ROOT_STRMOD_DIR) $(ROOTMODULE)
diff --git a/usr/src/uts/intel/mlxcx/Makefile b/usr/src/uts/intel/mlxcx/Makefile
new file mode 100644
index 0000000000..27bdfa4b73
--- /dev/null
+++ b/usr/src/uts/intel/mlxcx/Makefile
@@ -0,0 +1,44 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2018 Joyent, Inc.
+#
+
+UTSBASE = ../..
+
+MODULE = mlxcx
+OBJECTS = $(MLXCX_OBJS:%=$(OBJS_DIR)/%)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/mlxcx
+
+include $(UTSBASE)/intel/Makefile.intel
+
+CPPFLAGS += -I$(UTSBASE)/common/io/mlxcx
+
+ALL_TARGET = $(BINARY) $(CONFMOD)
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+LDFLAGS += -dy -N misc/mac
+
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+install: $(INSTALL_DEPS)
+
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/os/driver_aliases b/usr/src/uts/intel/os/driver_aliases
index f9b5129d3b..f5b0a08489 100644
--- a/usr/src/uts/intel/os/driver_aliases
+++ b/usr/src/uts/intel/os/driver_aliases
@@ -1085,6 +1085,19 @@ mega_sas "pci1028,15.1028.1f01"
mega_sas "pci1028,15.1028.1f02"
mega_sas "pci1028,15.1028.1f03"
mouse8042 "pnpPNP,f03"
+mlxcx "pciex15b3,1013"
+mlxcx "pciex15b3,1014"
+mlxcx "pciex15b3,1015"
+mlxcx "pciex15b3,1016"
+mlxcx "pciex15b3,1017"
+mlxcx "pciex15b3,1018"
+mlxcx "pciex15b3,1019"
+mlxcx "pciex15b3,101a"
+mlxcx "pciex15b3,101b"
+mlxcx "pciex15b3,101c"
+mlxcx "pciex15b3,101d"
+mlxcx "pciex15b3,101e"
+mlxcx "pciex15b3,101f"
mpt "pci1000,30"
mpt "pci1000,50"
mpt "pci1000,54"
diff --git a/usr/src/uts/sparc/ipsecah/Makefile b/usr/src/uts/sparc/ipsecah/Makefile
index 55ee48c88f..ad14fa4e5b 100644
--- a/usr/src/uts/sparc/ipsecah/Makefile
+++ b/usr/src/uts/sparc/ipsecah/Makefile
@@ -24,7 +24,7 @@
#
#
-# This makefile drives the production of the ipsecah driver
+# This makefile drives the production of the ipsecah driver
# kernel module.
#
# sparc architecture dependent
@@ -40,7 +40,6 @@ UTSBASE = ../..
#
MODULE = ipsecah
OBJECTS = $(IPSECAH_OBJS:%=$(OBJS_DIR)/%)
-LINTS = $(IPSECAH_OBJS:%.o=$(LINTS_DIR)/%.ln)
ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
ROOTLINK = $(ROOT_STRMOD_DIR)/$(MODULE)
CONF_SRCDIR = $(UTSBASE)/common/inet/ip
@@ -54,7 +53,6 @@ include $(UTSBASE)/sparc/Makefile.sparc
# Define targets
#
ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
-LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
@@ -62,21 +60,6 @@ INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
LDFLAGS += -dy -Ndrv/ip -Ndrv/tcp -Nmisc/kcf
-#
-# lint pass one enforcement
-#
-CFLAGS += $(CCVERBOSE)
-
-#
-# For now, disable these lint checks; maintainers should endeavor
-# to investigate and remove these for maximum lint coverage.
-# Please do not carry these forward to new Makefiles.
-#
-LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
-LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
-LINTTAGS += -erroff=E_ASSIGN_NARROW_CONV
-LINTTAGS += -erroff=E_SUSPICIOUS_COMPARISON
-
CERRWARN += -_gcc=-Wno-parentheses
CERRWARN += $(CNOWARN_UNINIT)
@@ -93,12 +76,6 @@ clean: $(CLEAN_DEPS) $(SISCLEAN_DEPS)
clobber: $(CLOBBER_DEPS) $(SISCLEAN_DEPS)
-lint: $(LINT_DEPS)
-
-modlintlib: $(MODLINTLIB_DEPS)
-
-clean.lint: $(CLEAN_LINT_DEPS)
-
install: $(INSTALL_DEPS) $(SISCHECK_DEPS)
$(ROOTLINK): $(ROOT_STRMOD_DIR) $(ROOTMODULE)
diff --git a/usr/src/uts/sparc/ipsecesp/Makefile b/usr/src/uts/sparc/ipsecesp/Makefile
index 1a36e4fbc7..931dc913a2 100644
--- a/usr/src/uts/sparc/ipsecesp/Makefile
+++ b/usr/src/uts/sparc/ipsecesp/Makefile
@@ -24,7 +24,7 @@
#
#
-# This makefile drives the production of the ipsecesp driver
+# This makefile drives the production of the ipsecesp driver
# kernel module.
#
# sparc architecture dependent
@@ -40,7 +40,6 @@ UTSBASE = ../..
#
MODULE = ipsecesp
OBJECTS = $(IPSECESP_OBJS:%=$(OBJS_DIR)/%)
-LINTS = $(IPSECESP_OBJS:%.o=$(LINTS_DIR)/%.ln)
ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
ROOTLINK = $(ROOT_STRMOD_DIR)/$(MODULE)
CONF_SRCDIR = $(UTSBASE)/common/inet/ip
@@ -54,7 +53,6 @@ include $(UTSBASE)/sparc/Makefile.sparc
# Define targets
#
ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
-LINT_TARGET = $(MODULE).lint
INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
@@ -62,20 +60,6 @@ INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
#
LDFLAGS += -dy -Ndrv/ip -Ndrv/ipsecah -Nmisc/kcf
-#
-# lint pass one enforcement
-#
-CFLAGS += $(CCVERBOSE)
-
-#
-# For now, disable these lint checks; maintainers should endeavor
-# to investigate and remove these for maximum lint coverage.
-# Please do not carry these forward to new Makefiles.
-#
-LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
-LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
-LINTTAGS += -erroff=E_ASSIGN_NARROW_CONV
-
CERRWARN += $(CNOWARN_UNINIT)
#
@@ -91,12 +75,6 @@ clean: $(CLEAN_DEPS) $(SISCLEAN_DEPS)
clobber: $(CLOBBER_DEPS) $(SISCLEAN_DEPS)
-lint: $(LINT_DEPS)
-
-modlintlib: $(MODLINTLIB_DEPS)
-
-clean.lint: $(CLEAN_LINT_DEPS)
-
install: $(INSTALL_DEPS) $(SISCHECK_DEPS)
$(ROOTLINK): $(ROOT_STRMOD_DIR) $(ROOTMODULE)