summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordh142964 <David.Hollister@Sun.COM>2009-09-30 13:40:27 -0600
committerdh142964 <David.Hollister@Sun.COM>2009-09-30 13:40:27 -0600
commit4c06356b0f0fffb4fc1b6eccc8e5d8e2254a84d6 (patch)
tree17ba947a21901975bb128b8c535cb0575d4c9a4a
parent7b57f05abb8796d3c91c8d4d4c75dcafb5af6b69 (diff)
downloadillumos-gate-4c06356b0f0fffb4fc1b6eccc8e5d8e2254a84d6.tar.gz
PSARC 2008/672 thebe SAS/SATA driver
PSARC 2008/755 ddi_ssoft_state(9F) and ddi_isoft_state(9F) PSARC 2008/764 Cfgadm SCSI-Plugin MPxIO Support PSARC 2009/125 scsi_device property interfaces 6726110 pmcs driver (driver for thebe) 6726867 SCSAv3
-rw-r--r--usr/src/Makefile.master13
-rw-r--r--usr/src/Makefile.master.641
-rw-r--r--usr/src/cmd/devfsadm/Makefile.com2
-rw-r--r--usr/src/cmd/devfsadm/disk_link.c117
-rw-r--r--usr/src/cmd/devfsadm/sgen_link.c72
-rw-r--r--usr/src/cmd/mdb/Makefile.common1
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/Makefile.files2
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/bitset.c212
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/bitset.h43
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/damap.c325
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/damap.h43
-rw-r--r--usr/src/cmd/mdb/common/modules/genunix/genunix.c146
-rw-r--r--usr/src/cmd/mdb/common/modules/pmcs/pmcs.c1776
-rw-r--r--usr/src/cmd/mdb/intel/amd64/pmcs/Makefile38
-rw-r--r--usr/src/cmd/mdb/intel/ia32/pmcs/Makefile37
-rw-r--r--usr/src/cmd/mdb/sparc/v9/pmcs/Makefile38
-rw-r--r--usr/src/cmd/mpathadm/mpathadm.c364
-rw-r--r--usr/src/cmd/prtconf/pdevinfo.c12
-rw-r--r--usr/src/cmd/stmsboot/stmsboot.sh10
-rw-r--r--usr/src/cmd/stmsboot/stmsboot_util.c5
-rw-r--r--usr/src/common/devid/devid_scsi.c23
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_ctl.c62
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_cvt.c181
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_list.c381
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_rcm.c25
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.c14
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.h23
-rw-r--r--usr/src/lib/cfgadm_plugins/scsi/common/cfga_utils.c241
-rw-r--r--usr/src/lib/fm/topo/modules/common/disk/disk_common.c15
-rw-r--r--usr/src/lib/libdevice/llib-ldevice59
-rw-r--r--usr/src/lib/libdevid/libdevid.h1
-rw-r--r--usr/src/lib/libdevid/mapfile-vers1
-rw-r--r--usr/src/lib/libdevinfo/devinfo.c14
-rw-r--r--usr/src/lib/libdevinfo/libdevinfo.h2
-rw-r--r--usr/src/lib/libdevinfo/mapfile-vers1
-rw-r--r--usr/src/lib/mpapi/libmpscsi_vhci/common/MP_GetMultipathLusPlugin.c50
-rw-r--r--usr/src/pkgdefs/Makefile4
-rw-r--r--usr/src/pkgdefs/SUNWhea/prototype_com2
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/Makefile38
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/pkginfo.tmpl46
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/postinstall.tmpl33
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/preremove.tmpl28
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/prototype_com51
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/prototype_i38651
-rw-r--r--usr/src/pkgdefs/SUNWpmcsr/prototype_sparc49
-rw-r--r--usr/src/pkgdefs/SUNWpmcsu/Makefile36
-rw-r--r--usr/src/pkgdefs/SUNWpmcsu/pkginfo.tmpl46
-rw-r--r--usr/src/pkgdefs/SUNWpmcsu/prototype_com47
-rw-r--r--usr/src/pkgdefs/SUNWpmcsu/prototype_i38647
-rw-r--r--usr/src/pkgdefs/SUNWpmcsu/prototype_sparc46
-rw-r--r--usr/src/pkgdefs/common_files/i.pmcsconf137
-rw-r--r--usr/src/tools/scripts/bfu.sh1
-rw-r--r--usr/src/uts/Makefile.targ3
-rw-r--r--usr/src/uts/Makefile.uts1
-rw-r--r--usr/src/uts/common/Makefile.files8
-rw-r--r--usr/src/uts/common/Makefile.rules11
-rw-r--r--usr/src/uts/common/fs/devfs/devfs_subr.c14
-rw-r--r--usr/src/uts/common/fs/devfs/devfs_vnops.c26
-rw-r--r--usr/src/uts/common/io/1394/targets/scsa1394/hba.c14
-rw-r--r--usr/src/uts/common/io/devinfo.c27
-rw-r--r--usr/src/uts/common/io/sata/impl/sata.c6
-rw-r--r--usr/src/uts/common/io/scsi/adapters/iscsi/iscsi.c26
-rw-r--r--usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c22
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs.conf59
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs8001fw.version24
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c3026
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fw_hdr.c56
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fwlog.c1024
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_intr.c1654
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_nvram.c823
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_sata.c709
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_scsa.c2983
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_smhba.c287
-rw-r--r--usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c8258
-rw-r--r--usr/src/uts/common/io/scsi/adapters/scsi_vhci/mpapi_impl.c139
-rw-r--r--usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c145
-rw-r--r--usr/src/uts/common/io/scsi/conf/scsi_confdata.c20
-rw-r--r--usr/src/uts/common/io/scsi/conf/scsi_confsubr.c548
-rw-r--r--usr/src/uts/common/io/scsi/impl/sas_transport.c23
-rw-r--r--usr/src/uts/common/io/scsi/impl/scsi_control.c12
-rw-r--r--usr/src/uts/common/io/scsi/impl/scsi_hba.c7229
-rw-r--r--usr/src/uts/common/io/scsi/impl/scsi_resource.c2
-rw-r--r--usr/src/uts/common/io/scsi/impl/scsi_transport.c5
-rw-r--r--usr/src/uts/common/io/scsi/targets/sd.c8
-rw-r--r--usr/src/uts/common/io/scsi/targets/ses.c20
-rw-r--r--usr/src/uts/common/io/scsi/targets/sgen.conf57
-rw-r--r--usr/src/uts/common/io/scsi/targets/smp.c65
-rw-r--r--usr/src/uts/common/os/bitset.c79
-rw-r--r--usr/src/uts/common/os/callout.c8
-rw-r--r--usr/src/uts/common/os/clock.c116
-rw-r--r--usr/src/uts/common/os/damap.c1258
-rw-r--r--usr/src/uts/common/os/devcfg.c269
-rw-r--r--usr/src/uts/common/os/id_space.c52
-rw-r--r--usr/src/uts/common/os/modctl.c4
-rw-r--r--usr/src/uts/common/os/sunddi.c344
-rw-r--r--usr/src/uts/common/os/sunmdi.c1085
-rw-r--r--usr/src/uts/common/os/sunndi.c30
-rw-r--r--usr/src/uts/common/sys/Makefile2
-rw-r--r--usr/src/uts/common/sys/autoconf.h2
-rw-r--r--usr/src/uts/common/sys/bitset.h16
-rw-r--r--usr/src/uts/common/sys/damap.h156
-rw-r--r--usr/src/uts/common/sys/damap_impl.h181
-rw-r--r--usr/src/uts/common/sys/ddi_impldefs.h42
-rw-r--r--usr/src/uts/common/sys/devctl.h7
-rw-r--r--usr/src/uts/common/sys/devinfo_impl.h6
-rw-r--r--usr/src/uts/common/sys/id_space.h11
-rw-r--r--usr/src/uts/common/sys/mdi_impldefs.h33
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/mpapi_impl.h5
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/mpapi_scsi_vhci.h11
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/ata.h279
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/ata8-acs.h122
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/atapi7v3.h228
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs.h627
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_def.h514
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_iomb.h724
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_mpi.h222
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_param.h138
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h353
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_reg.h378
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_scsa.h92
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_sgl.h86
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_smhba.h72
-rw-r--r--usr/src/uts/common/sys/scsi/adapters/pmcs/smp_defs.h1100
-rw-r--r--usr/src/uts/common/sys/scsi/conf/autoconf.h36
-rw-r--r--usr/src/uts/common/sys/scsi/conf/device.h7
-rw-r--r--usr/src/uts/common/sys/scsi/generic/sas.h199
-rw-r--r--usr/src/uts/common/sys/scsi/generic/smp_frames.h1114
-rw-r--r--usr/src/uts/common/sys/scsi/impl/sas_transport.h217
-rw-r--r--usr/src/uts/common/sys/scsi/impl/transport.h143
-rw-r--r--usr/src/uts/common/sys/scsi/scsi_address.h5
-rw-r--r--usr/src/uts/common/sys/scsi/scsi_ctl.h20
-rw-r--r--usr/src/uts/common/sys/scsi/scsi_pkt.h7
-rw-r--r--usr/src/uts/common/sys/scsi/scsi_types.h20
-rw-r--r--usr/src/uts/common/sys/scsi/targets/smp.h16
-rw-r--r--usr/src/uts/common/sys/sunddi.h65
-rw-r--r--usr/src/uts/common/sys/sunmdi.h23
-rw-r--r--usr/src/uts/common/sys/sunndi.h44
-rw-r--r--usr/src/uts/common/sys/systm.h1
-rw-r--r--usr/src/uts/intel/Makefile.intel.shared1
-rw-r--r--usr/src/uts/intel/os/driver_aliases2
-rw-r--r--usr/src/uts/intel/pmcs/Makefile113
-rw-r--r--usr/src/uts/sparc/Makefile.sparc.shared1
-rw-r--r--usr/src/uts/sparc/os/driver_aliases2
-rw-r--r--usr/src/uts/sparc/pmcs/Makefile113
144 files changed, 40806 insertions, 2341 deletions
diff --git a/usr/src/Makefile.master b/usr/src/Makefile.master
index 58f7f0e1b1..d1349cde23 100644
--- a/usr/src/Makefile.master
+++ b/usr/src/Makefile.master
@@ -132,6 +132,7 @@ NM= /usr/ccs/bin/nm
DIFF= /usr/bin/diff
GREP= /usr/bin/grep
EGREP= /usr/bin/egrep
+ELFWRAP= /usr/bin/elfwrap
KSH93= /usr/bin/ksh93
SED= /usr/bin/sed
NAWK= /usr/bin/nawk
@@ -536,6 +537,12 @@ CCFLAGS= $(CCOPTFLAG) $($(MACH)_CCFLAGS)
CCFLAGS64= $(CCOPTFLAG64) $($(MACH64)_CCFLAGS)
#
+#
+#
+ELFWRAP_FLAGS =
+ELFWRAP_FLAGS64 = -64
+
+#
# Various mapfiles that are used throughout the build, and delivered to
# /usr/lib/ld.
#
@@ -593,6 +600,8 @@ COMPILE.s= $(AS) $(ASFLAGS) $(AS_CPPFLAGS)
COMPILE64.s= $(AS) $(ASFLAGS) $($(MACH64)_AS_XARCH) $(AS_CPPFLAGS)
COMPILE.d= $(DTRACE) -G -32
COMPILE64.d= $(DTRACE) -G -64
+COMPILE.b= $(ELFWRAP) $(ELFWRAP_FLAGS$(CLASS))
+COMPILE64.b= $(ELFWRAP) $(ELFWRAP_FLAGS$(CLASS))
CLASSPATH= .
COMPILE.java= $(JAVAC) $(JAVAFLAGS) -classpath $(CLASSPATH)
@@ -972,6 +981,10 @@ PKGARCHIVE=$(SRC)/../../packages/$(MACH)/nightly$(PKGARCHIVESUFFIX)
$(POST_PROCESS_O)
$(RM) $*.c
+.bin.o:
+ $(COMPILE.b) -o $@ $<
+ $(POST_PROCESS_O)
+
.java.class:
$(COMPILE.java) $<
diff --git a/usr/src/Makefile.master.64 b/usr/src/Makefile.master.64
index 5256716715..cb01d1ab75 100644
--- a/usr/src/Makefile.master.64
+++ b/usr/src/Makefile.master.64
@@ -33,6 +33,7 @@ COMPILE.c= $(COMPILE64.c)
COMPILE.cc= $(COMPILE64.cc)
COMPILE.s= $(COMPILE64.s)
COMPILE.d= $(COMPILE64.d)
+COMPILE.b= $(COMPILE64.b)
LINK.c= $(LINK64.c)
LINK.cc= $(LINK64.cc)
LINT.c= $(LINT64.c)
diff --git a/usr/src/cmd/devfsadm/Makefile.com b/usr/src/cmd/devfsadm/Makefile.com
index 3468647dd5..63e212a96a 100644
--- a/usr/src/cmd/devfsadm/Makefile.com
+++ b/usr/src/cmd/devfsadm/Makefile.com
@@ -99,6 +99,8 @@ LINTFLAGS += -erroff=E_NAME_MULTIPLY_DEF2
LDLIBS += -ldevinfo
devfsadm := LDLIBS += -lgen -lsysevent -lnvpair -lzonecfg -lbsm
SUNW_md_link.so := LDLIBS += -lmeta
+SUNW_disk_link.so := LDLIBS += -ldevid
+SUNW_sgen_link.so := LDLIBS += -ldevid
# All libraries are built from the same SUNW_%.so rule (see below), and define
# their own SONAME using -h explicitly. Null the generic -h macro that gets
diff --git a/usr/src/cmd/devfsadm/disk_link.c b/usr/src/cmd/devfsadm/disk_link.c
index 95cda78a19..9e447875f1 100644
--- a/usr/src/cmd/devfsadm/disk_link.c
+++ b/usr/src/cmd/devfsadm/disk_link.c
@@ -29,8 +29,11 @@
#include <stdlib.h>
#include <limits.h>
#include <ctype.h>
+#include <sys/int_fmtio.h>
#include <sys/stat.h>
#include <bsm/devalloc.h>
+#include <sys/scsi/scsi_address.h>
+#include <sys/libdevid.h>
#define DISK_SUBPATH_MAX 100
#define RM_STALE 0x01
@@ -68,31 +71,31 @@ static int reserved_links_exist(di_node_t node, di_minor_t minor, int nflags);
static devfsadm_create_t disk_cbt[] = {
- { "disk", "ddi_block", NULL,
+ { "disk", DDI_NT_BLOCK, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_nchan
},
- { "disk", "ddi_block:channel", NULL,
+ { "disk", DDI_NT_BLOCK_CHAN, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_chan
},
- { "disk", "ddi_block:fabric", NULL,
+ { "disk", DDI_NT_BLOCK_FABRIC, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_fabric
},
- { "disk", "ddi_block:wwn", NULL,
+ { "disk", DDI_NT_BLOCK_WWN, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_wwn
},
- { "disk", "ddi_block:sas", NULL,
+ { "disk", DDI_NT_BLOCK_SAS, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_sas
},
- { "disk", "ddi_block:cdrom", NULL,
+ { "disk", DDI_NT_CD, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_nchan
},
- { "disk", "ddi_block:cdrom:channel", NULL,
+ { "disk", DDI_NT_CD_CHAN, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_chan
},
- { "disk", "ddi_block:xvmd", NULL,
+ { "disk", DDI_NT_BLOCK_XVMD, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_xvmd
},
- { "disk", "ddi_block:cdrom:xvmd", NULL,
+ { "disk", DDI_NT_CD_XVMD, NULL,
TYPE_EXACT, ILEVEL_0, disk_callback_xvmd
},
};
@@ -168,11 +171,13 @@ disk_callback_wwn(di_minor_t minor, di_node_t node)
int targ;
int *intp;
- if (di_prop_lookup_ints(DDI_DEV_T_ANY, node, "target", &intp) <= 0) {
+ if (di_prop_lookup_ints(DDI_DEV_T_ANY, node, SCSI_ADDR_PROP_TARGET,
+ &intp) <= 0) {
return (DEVFSADM_CONTINUE);
}
targ = *intp;
- if (di_prop_lookup_ints(DDI_DEV_T_ANY, node, "lun", &intp) <= 0) {
+ if (di_prop_lookup_ints(DDI_DEV_T_ANY, node, SCSI_ADDR_PROP_LUN,
+ &intp) <= 0) {
lun = 0;
} else {
lun = *intp;
@@ -207,7 +212,7 @@ disk_callback_fabric(di_minor_t minor, di_node_t node)
} else if (di_prop_lookup_bytes(DDI_DEV_T_ANY, node,
"port-wwn", &wwn) > 0) {
if (di_prop_lookup_ints(DDI_DEV_T_ANY, node,
- "lun", &intp) > 0) {
+ SCSI_ADDR_PROP_LUN, &intp) > 0) {
lun = *intp;
} else {
lun = 0;
@@ -236,38 +241,72 @@ static int
disk_callback_sas(di_minor_t minor, di_node_t node)
{
char disk[DISK_SUBPATH_MAX];
- int lun;
+ int lun64_found = 0;
+ scsi_lun64_t lun64, sl;
+ scsi_lun_t lun;
+ int64_t *lun64p;
+ uint64_t wwn;
int *intp;
- char *str;
- char *wwn;
-
- /*
- * get LUN property
- */
- if (di_prop_lookup_ints(DDI_DEV_T_ANY, node,
- "lun", &intp) > 0) {
- lun = *intp;
- } else {
- lun = 0;
+ char *tgt_port;
+ uchar_t addr_method;
+
+ /* Get lun property */
+ if (di_prop_lookup_int64(DDI_DEV_T_ANY, node,
+ SCSI_ADDR_PROP_LUN64, &lun64p) > 0) {
+ if (*lun64p != SCSI_LUN64_ILLEGAL) {
+ lun64_found = 1;
+ lun64 = (uint64_t)*lun64p;
+ }
}
+ if ((!lun64_found) && (di_prop_lookup_ints(DDI_DEV_T_ANY, node,
+ SCSI_ADDR_PROP_LUN, &intp) > 0)) {
+ lun64 = (uint64_t)*intp;
+ }
+
+ lun = scsi_lun64_to_lun(lun64);
+
+ addr_method = (lun.sl_lun1_msb & SCSI_LUN_AM_MASK);
+
if (di_prop_lookup_strings(DDI_DEV_T_ANY, node,
- "target-port", &wwn) > 0) {
- /*
- * If the target-port property exist
- * we use wwn format naming
- */
- for (str = wwn; *str != '\0'; str++) {
- *str = DISK_LINK_TO_UPPER(*str);
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) > 0) {
+ (void) scsi_wwnstr_to_wwn(tgt_port, &wwn);
+ if ((addr_method == SCSI_LUN_AM_PDEV) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%"PRIX64"d%"PRId64, wwn, lun64);
+ } else if ((addr_method == SCSI_LUN_AM_FLAT) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ sl = (lun.sl_lun1_msb << 8) | lun.sl_lun1_lsb;
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%"PRIX64"d%"PRIX16, wwn, sl);
+ } else {
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%"PRIX64"d%"PRIX64, wwn, lun64);
}
- (void) snprintf(disk, DISK_SUBPATH_MAX, "t%sd%d", wwn, lun);
-
} else if (di_prop_lookup_ints(DDI_DEV_T_ANY, node,
- "sata-phy", &intp) > 0) {
- /*
- * For direct attached SATA device without Device Name,
- * no wwn exist, we use phy format naming
- */
- (void) snprintf(disk, DISK_SUBPATH_MAX, "t%dd%d", *intp, lun);
+ SCSI_ADDR_PROP_SATA_PHY, &intp) > 0) {
+ /* Use phy format naming, for SATA devices without wwn */
+ if ((addr_method == SCSI_LUN_AM_PDEV) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%sd%"PRId64, *intp, lun64);
+ } else if ((addr_method == SCSI_LUN_AM_FLAT) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ sl = (lun.sl_lun1_msb << 8) | lun.sl_lun1_lsb;
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%sd%"PRIX16, *intp, sl);
+ } else {
+ (void) snprintf(disk, DISK_SUBPATH_MAX,
+ "t%sd%"PRIX64, *intp, lun64);
+ }
} else {
return (DEVFSADM_CONTINUE);
}
diff --git a/usr/src/cmd/devfsadm/sgen_link.c b/usr/src/cmd/devfsadm/sgen_link.c
index d3a6226698..92289050ac 100644
--- a/usr/src/cmd/devfsadm/sgen_link.c
+++ b/usr/src/cmd/devfsadm/sgen_link.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,18 +19,19 @@
* CDDL HEADER END
*/
/*
- * Copyright 2003 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <devfsadm.h>
#include <stdio.h>
#include <strings.h>
#include <stdlib.h>
#include <limits.h>
#include <ctype.h>
+#include <sys/int_fmtio.h>
+#include <sys/scsi/scsi_address.h>
+#include <sys/libdevid.h>
#define SGEN_LINK_RE "^scsi/.+/c[0-9]+t[0-9A-F]+d[0-9]+$"
#define SGEN_DIR "scsi"
@@ -65,17 +65,67 @@ sgen_callback(di_minor_t minor, di_node_t node)
{
char *baddr, *cnum, *tstr;
char lpath[PATH_MAX], buf[PATH_MAX];
- uchar_t *wwn;
+ uchar_t *wwnstr;
+ char *tgt_port;
+
if ((cnum = find_ctrlr(node, minor)) == NULL)
goto done;
+ /*
+ * SCSAv3 attached devices.
+ */
if (di_prop_lookup_strings(DDI_DEV_T_ANY, node,
- "client-guid", (char **)&wwn) > 0) {
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) > 0) {
+ uint64_t wwn;
+ scsi_lun64_t sl;
+ scsi_lun_t lun;
+ int64_t lun64;
+ int64_t *lun64p;
+ int *intp;
+ uchar_t addr_method;
+
+ /* Get lun property */
+ if ((di_prop_lookup_int64(DDI_DEV_T_ANY, node,
+ SCSI_ADDR_PROP_LUN64, &lun64p) > 0) &&
+ (*lun64p != SCSI_LUN64_ILLEGAL)) {
+ lun64 = *lun64p;
+ } else if (di_prop_lookup_ints(DDI_DEV_T_ANY, node,
+ SCSI_ADDR_PROP_LUN, &intp) > 0) {
+ lun64 = (uint64_t)*intp;
+ }
+
+ lun = scsi_lun64_to_lun(lun64);
+
+ addr_method = (lun.sl_lun1_msb & SCSI_LUN_AM_MASK);
+
+ (void) scsi_wwnstr_to_wwn(tgt_port, &wwn);
+ if ((addr_method == SCSI_LUN_AM_PDEV) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ (void) snprintf(lpath, PATH_MAX,
+ "%s/%s/c%st%"PRIX64"d%"PRId64, SGEN_DIR,
+ di_minor_name(minor), cnum, wwn, lun64);
+ } else if ((addr_method == SCSI_LUN_AM_FLAT) &&
+ (lun.sl_lun2_msb == 0) && (lun.sl_lun2_lsb == 0) &&
+ (lun.sl_lun3_msb == 0) && (lun.sl_lun3_lsb == 0) &&
+ (lun.sl_lun4_msb == 0) && (lun.sl_lun4_lsb == 0)) {
+ sl = (lun.sl_lun1_msb << 8) | lun.sl_lun1_lsb;
+ (void) snprintf(lpath, PATH_MAX,
+ "%s/%s/c%st%"PRIX64"d%"PRIX16, SGEN_DIR,
+ di_minor_name(minor), cnum, wwn, sl);
+ } else {
+ (void) snprintf(lpath, PATH_MAX,
+ "%s/%s/c%st%"PRIX64"d%"PRIX64, SGEN_DIR,
+ di_minor_name(minor), cnum, wwn, lun64);
+ }
+ } else if (di_prop_lookup_strings(DDI_DEV_T_ANY, node,
+ "client-guid", (char **)&wwnstr) > 0) {
/*
* MPXIO-enabled devices; lun is always 0.
*/
- if (strlcpy((char *)buf, (char *)wwn, sizeof (buf)) >=
+ if (strlcpy((char *)buf, (char *)wwnstr, sizeof (buf)) >=
sizeof (buf))
goto done;
@@ -87,7 +137,7 @@ sgen_callback(di_minor_t minor, di_node_t node)
goto done;
} else if (di_prop_lookup_bytes(DDI_DEV_T_ANY, node,
- "port-wwn", &wwn) == 8) {
+ "port-wwn", &wwnstr) == 8) {
/*
* "normal" fibre channel devices
*/
@@ -98,7 +148,7 @@ sgen_callback(di_minor_t minor, di_node_t node)
lun = 0;
for (count = 0, tstr = buf; count < 8; count++, tstr += 2)
- (void) sprintf(tstr, "%02X", wwn[count]);
+ (void) sprintf(tstr, "%02X", wwnstr[count]);
*tstr = '\0';
if (snprintf(lpath, sizeof (lpath), "%s/%s/c%st%sd%d", SGEN_DIR,
diff --git a/usr/src/cmd/mdb/Makefile.common b/usr/src/cmd/mdb/Makefile.common
index a40f40b086..638bdcdb72 100644
--- a/usr/src/cmd/mdb/Makefile.common
+++ b/usr/src/cmd/mdb/Makefile.common
@@ -77,6 +77,7 @@ COMMON_MODULES_KVM = \
nca \
nsctl \
nsmb \
+ pmcs \
ptm \
qlc \
random \
diff --git a/usr/src/cmd/mdb/common/modules/genunix/Makefile.files b/usr/src/cmd/mdb/common/modules/genunix/Makefile.files
index 54b13c437f..1d45564e4b 100644
--- a/usr/src/cmd/mdb/common/modules/genunix/Makefile.files
+++ b/usr/src/cmd/mdb/common/modules/genunix/Makefile.files
@@ -31,11 +31,13 @@
GENUNIX_SRCS = \
avl.c \
bio.c \
+ bitset.c \
combined.c \
contract.c \
cpupart.c \
ctxop.c \
cyclic.c \
+ damap.c \
devinfo.c \
dist.c \
findstack.c \
diff --git a/usr/src/cmd/mdb/common/modules/genunix/bitset.c b/usr/src/cmd/mdb/common/modules/genunix/bitset.c
new file mode 100644
index 0000000000..e79a1b4fa5
--- /dev/null
+++ b/usr/src/cmd/mdb/common/modules/genunix/bitset.c
@@ -0,0 +1,212 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <mdb/mdb_modapi.h>
+#include <sys/bitset.h>
+
+#include "bitset.h" /* XXX work out ifdef in include file... */
+
+void
+bitset_help(void)
+{
+ mdb_printf("Print the bitset at the address given\n");
+}
+
+static void
+bitset_free(bitset_t *bs)
+{
+ if (bs == NULL)
+ return;
+ if (bs->bs_set && bs->bs_words)
+ mdb_free(bs->bs_set, bs->bs_words * sizeof (ulong_t));
+ mdb_free(bs, sizeof (*bs));
+}
+
+static bitset_t *
+bitset_get(uintptr_t bsaddr)
+{
+ bitset_t *bs;
+
+ bs = mdb_zalloc(sizeof (*bs), UM_SLEEP);
+ if (mdb_vread(bs, sizeof (*bs), bsaddr) == -1) {
+ mdb_warn("couldn't read bitset 0x%p", bsaddr);
+ bitset_free(bs);
+ return (NULL);
+ }
+
+ bsaddr = (uintptr_t)bs->bs_set;
+ bs->bs_set = mdb_alloc(bs->bs_words * sizeof (ulong_t), UM_SLEEP);
+ if (mdb_vread(bs->bs_set,
+ bs->bs_words * sizeof (ulong_t), bsaddr) == -1) {
+ mdb_warn("couldn't read bitset bs_set 0x%p", bsaddr);
+ bitset_free(bs);
+ return (NULL);
+ }
+ return (bs);
+
+}
+
+static int
+bitset_highbit(bitset_t *bs)
+{
+ int high;
+ int i;
+
+ if ((bs->bs_set == NULL) || (bs->bs_words == 0))
+ return (-1);
+
+ /* move backwards through words */
+ for (i = bs->bs_words; i >= 0; i--)
+ if (bs->bs_set[i])
+ break;
+ if (i < 0)
+ return (-1);
+
+ /* move backwards through bits */
+ high = i << BT_ULSHIFT;
+ for (i = BT_NBIPUL - 1; i; i--)
+ if (BT_TEST(bs->bs_set, high + i))
+ break;
+ return (high + i + 1);
+}
+
+static int
+pow10(int exp)
+{
+ int res;
+
+ for (res = 1; exp; exp--)
+ res *= 10;
+ return (res);
+}
+
+static int
+log10(int val)
+{
+ int res = 0;
+
+ do {
+ res++;
+ val /= 10;
+ } while (val);
+ return (res);
+}
+
+/*
+ * The following prints a bitset with a 'ruler' that look like this
+ *
+ * 11111111112222222222333333333344444444445555555555666666666677
+ * 012345678901234567890123456789012345678901234567890123456789012345678901
+ * xx:........................................................................
+ * 11111111111111111111111111111111111111111111
+ * 777777778888888888999999999900000000001111111111222222222233333333334444
+ * 234567890123456789012345678901234567890123456789012345678901234567890123
+ * ........................................................................
+ * 111111111111111111111111111111111111111111111111111111112222222222222222
+ * 444444555555555566666666667777777777888888888899999999990000000000111111
+ * 456789012345678901234567890123456789012345678901234567890123456789012345
+ * ........................................................................
+ * 2222222222
+ * 1111222222
+ * 6789012345
+ * ..........
+ *
+ * to identify individual bits that are set.
+ */
+static void
+bitset_print(bitset_t *bs, char *label, int width)
+{
+ int val_start;
+ int val_max;
+ int label_width;
+ int ruler_width;
+ int v, vm, vi;
+ int nl, l;
+ int i;
+ int p;
+ char c;
+
+ val_start = 0;
+ val_max = bitset_highbit(bs) + 1;
+ if (val_max <= val_start) {
+ mdb_printf("%s: empty-set", label);
+ return;
+ }
+
+ label_width = strlen(label) + 1;
+ ruler_width = width - label_width;
+
+ for (v = val_start; v < val_max; v = vm) {
+ if ((v + ruler_width) < val_max)
+ vm = v + ruler_width;
+ else
+ vm = val_max;
+
+ nl = log10(vm) - 1;
+ for (l = nl; l >= 0; l--) {
+ p = pow10(l);
+ for (i = 0; i < label_width; i++)
+ mdb_printf(" ");
+
+ for (vi = v; vi < vm; vi++) {
+ c = '0' + ((vi / p) % 10);
+ if ((l == nl) && (c == '0'))
+ c = ' ';
+ mdb_printf("%c", c);
+ }
+
+ mdb_printf("\n");
+ }
+
+ if (v == val_start) {
+ mdb_printf("%s:", label);
+ } else {
+ for (i = 0; i < label_width; i++)
+ mdb_printf(" ");
+ }
+ for (vi = v; vi < vm; vi++) {
+ if (BT_TEST(bs->bs_set, vi))
+ mdb_printf("X");
+ else
+ mdb_printf(".");
+ }
+ mdb_printf("\n");
+ }
+}
+
+/*ARGSUSED*/
+int
+bitset(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ bitset_t *bs;
+
+ bs = bitset_get(addr);
+ if (bs == NULL)
+ return (DCMD_ERR);
+
+ bitset_print(bs, "label", 80);
+ bitset_free(bs);
+ return (DCMD_OK);
+}
diff --git a/usr/src/cmd/mdb/common/modules/genunix/bitset.h b/usr/src/cmd/mdb/common/modules/genunix/bitset.h
new file mode 100644
index 0000000000..41544673fb
--- /dev/null
+++ b/usr/src/cmd/mdb/common/modules/genunix/bitset.h
@@ -0,0 +1,43 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _MDB_BITSET_H
+#define _MDB_BITSET_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <mdb/mdb_modapi.h>
+
+extern int bitset(uintptr_t, uint_t, int, const mdb_arg_t *);
+extern void bitset_help(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MDB_BITSET_H */
diff --git a/usr/src/cmd/mdb/common/modules/genunix/damap.c b/usr/src/cmd/mdb/common/modules/genunix/damap.c
new file mode 100644
index 0000000000..342e432bbf
--- /dev/null
+++ b/usr/src/cmd/mdb/common/modules/genunix/damap.c
@@ -0,0 +1,325 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <mdb/mdb_modapi.h>
+#include <sys/sysmacros.h>
+#include <sys/sunddi.h>
+#include <sys/damap_impl.h>
+
+#include "damap.h"
+
+void
+damap_help(void)
+{
+ mdb_printf("Print the damap at the address given.\n");
+ mdb_printf("\n");
+ mdb_printf("EXAMPLE: SCSI: To display the SCSI tgtmap damaps ");
+ mdb_printf("associated with a scsi HBA driver iport dip:\n");
+ mdb_printf("\n");
+ mdb_printf("::devbindings -q <driver_name>\n");
+ mdb_printf("\n");
+ mdb_printf("<iport-dip>::print struct dev_info devi_driver_data|");
+ mdb_printf("::print scsi_hba_tran_t tran_tgtmap|");
+ mdb_printf("::print impl_scsi_tgtmap_t ");
+ mdb_printf("tgtmap_dam[0] tgtmap_dam[1]|::damap\n");
+}
+
+static char *
+local_strdup(const char *s)
+{
+ if (s)
+ return (strcpy(mdb_alloc(strlen(s) + 1, UM_SLEEP), s));
+ else
+ return (NULL);
+}
+
+static void
+local_strfree(const char *s)
+{
+ if (s)
+ mdb_free((void *)s, strlen(s) + 1);
+}
+
+static void
+bitset_free(bitset_t *bs, int embedded)
+{
+ if (bs == NULL)
+ return;
+ if (bs->bs_set && bs->bs_words)
+ mdb_free(bs->bs_set, bs->bs_words * sizeof (ulong_t));
+ if (!embedded)
+ mdb_free(bs, sizeof (*bs)); /* not embedded, free */
+}
+
+static bitset_t *
+bitset_get(uintptr_t bsaddr)
+{
+ bitset_t *bs;
+
+ bs = mdb_zalloc(sizeof (*bs), UM_SLEEP);
+ if (mdb_vread(bs, sizeof (*bs), bsaddr) == -1) {
+ mdb_warn("couldn't read bitset 0x%p", bsaddr);
+ bitset_free(bs, 0);
+ return (NULL);
+ }
+
+ bsaddr = (uintptr_t)bs->bs_set;
+ bs->bs_set = mdb_alloc(bs->bs_words * sizeof (ulong_t), UM_SLEEP);
+ if (mdb_vread(bs->bs_set,
+ bs->bs_words * sizeof (ulong_t), bsaddr) == -1) {
+ mdb_warn("couldn't read bitset bs_set 0x%p", bsaddr);
+ bitset_free(bs, 0);
+ return (NULL);
+ }
+ return (bs);
+
+}
+
+static void
+damap_free(struct dam *dam, void **kdamda, int kdamda_n)
+{
+ int i;
+ struct i_ddi_soft_state *ss;
+ dam_da_t *da;
+
+ if (dam) {
+ /* free in dam_da_t softstate */
+ ss = (struct i_ddi_soft_state *)dam->dam_da;
+ if (ss) {
+ if (ss->n_items && ss->array) {
+ for (i = 0; i < ss->n_items; i++) {
+ da = ss->array[i];
+ if (da == NULL)
+ continue;
+ local_strfree(da->da_addr);
+ mdb_free(da, sizeof (*da));
+ }
+ }
+
+ mdb_free(ss, sizeof (*ss));
+ }
+
+ /* free dam_active/stable/report_set embedded in dam */
+ bitset_free(&dam->dam_report_set, 1);
+ bitset_free(&dam->dam_stable_set, 1);
+ bitset_free(&dam->dam_active_set, 1);
+
+ /* free dam_name */
+ local_strfree(dam->dam_name);
+
+ /* free dam */
+ mdb_free(dam, sizeof (*dam));
+ }
+
+ if (kdamda)
+ mdb_free(kdamda, kdamda_n * sizeof (void *));
+}
+
+/*
+ * The dam implementation uses a number of different abstractions. Given a
+ * pointer to a damap_t, this function make an mdb instantiation of the dam -
+ * many, but not all, of the different abstractions used in the dam
+ * implementation are also instantiated in mdb. This means that callers of
+ * damap_get can perform some (but not all) types of structure pointer
+ * traversals.
+ */
+struct dam *
+damap_get(uintptr_t damaddr, void ***pkdamda, int *pkdamda_n)
+{
+ /* variables that hold instantiation read from kernel */
+ struct dam kdam;
+ char kstring[MAXPATHLEN];
+ struct i_ddi_soft_state kss;
+ void **kssarray = NULL;
+ int array_sz = 0;
+
+ /* variables that hold mdb instantiation */
+ struct dam *dam = NULL;
+ struct i_ddi_soft_state *ss;
+ bitset_t *bs;
+ dam_da_t *da;
+
+ int i;
+
+ /* read kernel: dam */
+ if (mdb_vread(&kdam, sizeof (kdam), damaddr) == -1) {
+ mdb_warn("couldn't read dam 0x%p", damaddr);
+ goto err;
+ }
+
+ /* read kernel: dam->dam_name */
+ mdb_readstr(kstring, sizeof (kstring), (uintptr_t)kdam.dam_name);
+
+ /* read kernel: dam->dam_da (softstate) */
+ if (mdb_vread(&kss, sizeof (kss), (uintptr_t)kdam.dam_da) == -1) {
+ mdb_warn("couldn't read dam dam_da 0x%p",
+ (uintptr_t)kdam.dam_da);
+ goto err;
+ }
+
+ /* read kernel ((struct i_ddi_soft_state *)dam->dam_da)->array */
+ array_sz = kss.n_items * sizeof (void *);
+ kssarray = mdb_alloc(array_sz, UM_SLEEP);
+ if (mdb_vread(kssarray, array_sz, (uintptr_t)kss.array) == -1) {
+ mdb_warn("couldn't read dam dam_da array 0x%p",
+ (uintptr_t)kss.array);
+ goto err;
+ }
+
+ /*
+ * Produce mdb instantiation of kernel data structures.
+ *
+ * Structure copy kdam to dam, then clear out pointers in dam (some
+ * will be filled in by mdb instantiation code below).
+ */
+ dam = mdb_zalloc(sizeof (*dam), UM_SLEEP);
+ *dam = kdam;
+ dam->dam_name = NULL;
+
+ dam->dam_active_set.bs_set = NULL;
+ dam->dam_stable_set.bs_set = NULL;
+ dam->dam_report_set.bs_set = NULL;
+
+ dam->dam_da = NULL;
+ /* dam_addr_hash, dam_taskqp, dam_kstatp left as kernel addresses */
+
+ /* fill in dam_name */
+ dam->dam_name = local_strdup(kstring);
+
+ /* fill in dam_active/stable/report_set embedded in the dam */
+ bs = bitset_get(damaddr + (offsetof(struct dam, dam_active_set)));
+ if (bs) {
+ dam->dam_active_set = *bs;
+ mdb_free(bs, sizeof (*bs));
+ }
+ bs = bitset_get(damaddr + (offsetof(struct dam, dam_stable_set)));
+ if (bs) {
+ dam->dam_stable_set = *bs;
+ mdb_free(bs, sizeof (*bs));
+ }
+ bs = bitset_get(damaddr + (offsetof(struct dam, dam_report_set)));
+ if (bs) {
+ dam->dam_report_set = *bs;
+ mdb_free(bs, sizeof (*bs));
+ }
+
+ /* fill in dam_da_t softstate */
+ ss = mdb_zalloc(sizeof (struct i_ddi_soft_state), UM_SLEEP);
+ *ss = kss;
+ ss->next = NULL;
+ ss->array = mdb_zalloc(array_sz, UM_SLEEP);
+ dam->dam_da = ss;
+ for (i = 0; i < kss.n_items; i++) {
+ if (kssarray[i] == NULL)
+ continue;
+ da = ss->array[i] = mdb_zalloc(sizeof (*da), UM_SLEEP);
+ if (mdb_vread(da, sizeof (*da), (uintptr_t)kssarray[i]) == -1) {
+ mdb_warn("couldn't read dam dam_da %d 0x%p", i,
+ (uintptr_t)kss.array);
+ goto err;
+ }
+ /* da_nvl, da_ppriv_rpt, da_nvl_rpt left as kernel addresses */
+
+ /* read kernel: da->da_addr */
+ mdb_readstr(kstring, sizeof (kstring), (uintptr_t)da->da_addr);
+ da->da_addr = local_strdup(kstring);
+ }
+
+ /* return array of kernel dam_da_t pointers associated with each id */
+ *pkdamda = kssarray;
+ *pkdamda_n = array_sz / sizeof (void *);
+
+ /* return pointer to mdb instantiation of the dam */
+ return (dam);
+
+err: damap_free(dam, kssarray, array_sz / sizeof (void *));
+ *pkdamda = NULL;
+ *pkdamda_n = 0;
+ return (NULL);
+}
+
+/*ARGSUSED*/
+static void
+damap_print(struct dam *dam, void **kdamda, int kdamda_n)
+{
+ struct i_ddi_soft_state *ss;
+ dam_da_t *da;
+ int i;
+
+ mdb_printf("%s:\n", dam->dam_name);
+
+ ss = (struct i_ddi_soft_state *)dam->dam_da;
+ if (ss == NULL)
+ return;
+
+ if ((ss->n_items == 0) || (ss->array == NULL))
+ return;
+
+ for (i = 0; i < ss->n_items; i++) {
+ da = ss->array[i];
+ if (da == NULL)
+ continue;
+
+ /* Print index and address. */
+ mdb_printf(" %3d: %s [", i, da->da_addr);
+
+ /* Print shorthand of Active/Stable/Report set membership */
+ if (BT_TEST(dam->dam_active_set.bs_set, i))
+ mdb_printf("A");
+ else
+ mdb_printf(".");
+ if (BT_TEST(dam->dam_stable_set.bs_set, i))
+ mdb_printf("S");
+ else
+ mdb_printf(".");
+ if (BT_TEST(dam->dam_report_set.bs_set, i))
+ mdb_printf("R");
+ else
+ mdb_printf(".");
+
+ /* Print the reference count and priv */
+ mdb_printf("] %2d %0?lx %0?lx\n",
+ da->da_ref, da->da_cfg_priv, da->da_ppriv);
+
+ mdb_printf("\t\t\t\t%p::print -ta dam_da_t\n", kdamda[i]);
+ }
+}
+
+/*ARGSUSED*/
+int
+damap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct dam *dam;
+ void **kdamda;
+ int kdamda_n;
+
+ dam = damap_get(addr, &kdamda, &kdamda_n);
+ if (dam == NULL)
+ return (DCMD_ERR);
+
+ damap_print(dam, kdamda, kdamda_n);
+ damap_free(dam, kdamda, kdamda_n);
+ return (DCMD_OK);
+}
diff --git a/usr/src/cmd/mdb/common/modules/genunix/damap.h b/usr/src/cmd/mdb/common/modules/genunix/damap.h
new file mode 100644
index 0000000000..c568693606
--- /dev/null
+++ b/usr/src/cmd/mdb/common/modules/genunix/damap.h
@@ -0,0 +1,43 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _MDB_DAMAP_H
+#define _MDB_DAMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <mdb/mdb_modapi.h>
+
+extern int damap(uintptr_t, uint_t, int, const mdb_arg_t *);
+extern void damap_help(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MDB_DAMAP_H */
diff --git a/usr/src/cmd/mdb/common/modules/genunix/genunix.c b/usr/src/cmd/mdb/common/modules/genunix/genunix.c
index c2f2653466..912d5f7ce7 100644
--- a/usr/src/cmd/mdb/common/modules/genunix/genunix.c
+++ b/usr/src/cmd/mdb/common/modules/genunix/genunix.c
@@ -68,43 +68,45 @@
#include <sys/port_impl.h>
#include "avl.h"
+#include "bio.h"
+#include "bitset.h"
#include "combined.h"
#include "contract.h"
#include "cpupart_mdb.h"
+#include "ctxop.h"
+#include "cyclic.h"
+#include "damap.h"
#include "devinfo.h"
+#include "findstack.h"
+#include "fm.h"
+#include "group.h"
#include "irm.h"
+#include "kgrep.h"
+#include "kmem.h"
+#include "ldi.h"
#include "leaky.h"
#include "lgrp.h"
-#include "pg.h"
-#include "group.h"
#include "list.h"
#include "log.h"
-#include "kgrep.h"
-#include "kmem.h"
-#include "bio.h"
-#include "streams.h"
-#include "cyclic.h"
-#include "findstack.h"
-#include "ndievents.h"
+#include "mdi.h"
+#include "memory.h"
#include "mmd.h"
+#include "modhash.h"
+#include "ndievents.h"
#include "net.h"
#include "netstack.h"
#include "nvpair.h"
-#include "ctxop.h"
-#include "tsd.h"
-#include "thread.h"
-#include "memory.h"
+#include "pg.h"
+#include "rctl.h"
#include "sobj.h"
+#include "streams.h"
#include "sysevent.h"
-#include "rctl.h"
+#include "thread.h"
+#include "tsd.h"
#include "tsol.h"
#include "typegraph.h"
-#include "ldi.h"
#include "vfs.h"
#include "zone.h"
-#include "modhash.h"
-#include "mdi.h"
-#include "fm.h"
/*
* Surely this is defined somewhere...
@@ -4329,14 +4331,12 @@ static const mdb_dcmd_t dcmds[] = {
{ "whereopen", ":", "given a vnode, dumps procs which have it open",
whereopen },
- /* from zone.c */
- { "zone", "?", "display kernel zone(s)", zoneprt },
- { "zsd", ":[-v] [zsd_key]", "display zone-specific-data entries for "
- "selected zones", zsd },
-
/* from bio.c */
{ "bufpagefind", ":addr", "find page_t on buf_t list", bufpagefind },
+ /* from bitset.c */
+ { "bitset", ":", "display a bitset", bitset, bitset_help },
+
/* from contract.c */
{ "contract", "?", "display a contract", cmd_contract },
{ "ctevent", ":", "display a contract event", cmd_ctevent },
@@ -4352,6 +4352,9 @@ static const mdb_dcmd_t dcmds[] = {
{ "cyclic", ":", "developer information", cyclic },
{ "cyctrace", "?", "dump cyclic trace buffer", cyctrace },
+ /* from damap.c */
+ { "damap", ":", "display a damap_t", damap, damap_help },
+
/* from devinfo.c */
{ "devbindings", "?[-qs] [device-name | major-num]",
"print devinfo nodes bound to device-name or major-num",
@@ -4402,6 +4405,9 @@ static const mdb_dcmd_t dcmds[] = {
"print unique kernel thread stacks",
stacks, stacks_help },
+ /* from group.c */
+ { "group", "?[-q]", "display a group", group},
+
/* from irm.c */
{ "irmpools", NULL, "display interrupt pools", irmpools_dcmd },
{ "irmreqs", NULL, "display interrupt requests in an interrupt pool",
@@ -4458,6 +4464,22 @@ static const mdb_dcmd_t dcmds[] = {
/* from log.c */
{ "msgbuf", "?[-v]", "print most recent console messages", msgbuf },
+ /* from mdi.c */
+ { "mdipi", NULL, "given a path, dump mdi_pathinfo "
+ "and detailed pi_prop list", mdipi },
+ { "mdiprops", NULL, "given a pi_prop, dump the pi_prop list",
+ mdiprops },
+ { "mdiphci", NULL, "given a phci, dump mdi_phci and "
+ "list all paths", mdiphci },
+ { "mdivhci", NULL, "given a vhci, dump mdi_vhci and list "
+ "all phcis", mdivhci },
+ { "mdiclient_paths", NULL, "given a path, walk mdi_pathinfo "
+ "client links", mdiclient_paths },
+ { "mdiphci_paths", NULL, "given a path, walk through mdi_pathinfo "
+ "phci links", mdiphci_paths },
+ { "mdiphcis", NULL, "given a phci, walk through mdi_phci ph_next links",
+ mdiphcis },
+
/* from memory.c */
{ "page", "?", "display a summarized page_t", page },
{ "memstat", NULL, "display memory usage summary", memstat },
@@ -4507,10 +4529,7 @@ static const mdb_dcmd_t dcmds[] = {
/* from pg.c */
{ "pg", "?[-q]", "display a pg", pg},
- /* from group.c */
- { "group", "?[-q]", "display a group", group},
- /* from log.c */
/* from rctl.c */
{ "rctl_dict", "?", "print systemwide default rctl definitions",
rctl_dict },
@@ -4585,21 +4604,10 @@ static const mdb_dcmd_t dcmds[] = {
{ "pfiles", ":[-fp]", "print process file information", pfiles,
pfiles_help },
- /* from mdi.c */
- { "mdipi", NULL, "given a path, dump mdi_pathinfo "
- "and detailed pi_prop list", mdipi },
- { "mdiprops", NULL, "given a pi_prop, dump the pi_prop list",
- mdiprops },
- { "mdiphci", NULL, "given a phci, dump mdi_phci and "
- "list all paths", mdiphci },
- { "mdivhci", NULL, "given a vhci, dump mdi_vhci and list "
- "all phcis", mdivhci },
- { "mdiclient_paths", NULL, "given a path, walk mdi_pathinfo "
- "client links", mdiclient_paths },
- { "mdiphci_paths", NULL, "given a path, walk through mdi_pathinfo "
- "phci links", mdiphci_paths },
- { "mdiphcis", NULL, "given a phci, walk through mdi_phci ph_next links",
- mdiphcis },
+ /* from zone.c */
+ { "zone", "?", "display kernel zone(s)", zoneprt },
+ { "zsd", ":[-v] [zsd_key]", "display zone-specific-data entries for "
+ "selected zones", zsd },
{ NULL }
};
@@ -4671,12 +4679,6 @@ static const mdb_walker_t walkers[] = {
{ AVL_WALK_NAME, AVL_WALK_DESC,
avl_walk_init, avl_walk_step, avl_walk_fini },
- /* from zone.c */
- { "zone", "walk a list of kernel zones",
- zone_walk_init, zone_walk_step, NULL },
- { "zsd", "walk list of zsd entries for a zone",
- zsd_walk_init, zsd_walk_step, NULL },
-
/* from bio.c */
{ "buf", "walk the bio buf hash",
buf_walk_init, buf_walk_step, buf_walk_fini },
@@ -4745,6 +4747,10 @@ static const mdb_walker_t walkers[] = {
"walk a fault management handle cache active list",
devinfo_fmc_walk_init, devinfo_fmc_walk_step, NULL },
+ /* from group.c */
+ { "group", "walk all elements of a group",
+ group_walk_init, group_walk_step, NULL },
+
/* from irm.c */
{ "irmpools", "walk global list of interrupt pools",
irmpools_walk_init, list_walk_step, list_walk_fini },
@@ -4822,14 +4828,24 @@ static const mdb_walker_t walkers[] = {
{ "lgrp_rsrc_cpu", "walk lgroup CPU resources of given lgroup",
lgrp_rsrc_cpu_walk_init, lgrp_set_walk_step, NULL },
- /* from group.c */
- { "group", "walk all elements of a group",
- group_walk_init, group_walk_step, NULL },
-
/* from list.c */
{ LIST_WALK_NAME, LIST_WALK_DESC,
list_walk_init, list_walk_step, list_walk_fini },
+ /* from mdi.c */
+ { "mdipi_client_list", "Walker for mdi_pathinfo pi_client_link",
+ mdi_pi_client_link_walk_init,
+ mdi_pi_client_link_walk_step,
+ mdi_pi_client_link_walk_fini },
+ { "mdipi_phci_list", "Walker for mdi_pathinfo pi_phci_link",
+ mdi_pi_phci_link_walk_init,
+ mdi_pi_phci_link_walk_step,
+ mdi_pi_phci_link_walk_fini },
+ { "mdiphci_list", "Walker for mdi_phci ph_next link",
+ mdi_phci_ph_next_walk_init,
+ mdi_phci_ph_next_walk_step,
+ mdi_phci_ph_next_walk_fini },
+
/* from memory.c */
{ "page", "walk all pages, or those from the specified vnode",
page_walk_init, page_walk_step, page_walk_fini },
@@ -4875,6 +4891,10 @@ static const mdb_walker_t walkers[] = {
{ "udp_stacks", "walk all the udp_stack_t",
udp_stacks_walk_init, udp_stacks_walk_step, NULL },
+ /* from netstack.c */
+ { "netstack", "walk a list of kernel netstacks",
+ netstack_walk_init, netstack_walk_step, NULL },
+
/* from nvpair.c */
{ NVPAIR_WALKER_NAME, NVPAIR_WALKER_DESCR,
nvpair_walk_init, nvpair_walk_step, NULL },
@@ -4950,25 +4970,11 @@ static const mdb_walker_t walkers[] = {
{ "vfs", "walk file system list",
vfs_walk_init, vfs_walk_step },
- /* from mdi.c */
- { "mdipi_client_list", "Walker for mdi_pathinfo pi_client_link",
- mdi_pi_client_link_walk_init,
- mdi_pi_client_link_walk_step,
- mdi_pi_client_link_walk_fini },
-
- { "mdipi_phci_list", "Walker for mdi_pathinfo pi_phci_link",
- mdi_pi_phci_link_walk_init,
- mdi_pi_phci_link_walk_step,
- mdi_pi_phci_link_walk_fini },
-
- { "mdiphci_list", "Walker for mdi_phci ph_next link",
- mdi_phci_ph_next_walk_init,
- mdi_phci_ph_next_walk_step,
- mdi_phci_ph_next_walk_fini },
-
- /* from netstack.c */
- { "netstack", "walk a list of kernel netstacks",
- netstack_walk_init, netstack_walk_step, NULL },
+ /* from zone.c */
+ { "zone", "walk a list of kernel zones",
+ zone_walk_init, zone_walk_step, NULL },
+ { "zsd", "walk list of zsd entries for a zone",
+ zsd_walk_init, zsd_walk_step, NULL },
{ NULL }
};
diff --git a/usr/src/cmd/mdb/common/modules/pmcs/pmcs.c b/usr/src/cmd/mdb/common/modules/pmcs/pmcs.c
new file mode 100644
index 0000000000..5e43523c95
--- /dev/null
+++ b/usr/src/cmd/mdb/common/modules/pmcs/pmcs.c
@@ -0,0 +1,1776 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <limits.h>
+#include <sys/mdb_modapi.h>
+#include <sys/sysinfo.h>
+#include <sys/scsi/scsi.h>
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+#define MDB_RD(a, b, c) mdb_vread(a, b, (uintptr_t)c)
+#define NOREAD(a, b) mdb_warn("could not read " #a " at 0x%p", b)
+
+static pmcs_hw_t ss;
+static pmcs_xscsi_t **targets = NULL;
+static int target_idx;
+
+static uint32_t sas_phys, sata_phys, exp_phys, num_expanders, empty_phys;
+
+static pmcs_phy_t *pmcs_next_sibling(pmcs_phy_t *phyp);
+
+static void
+print_sas_address(pmcs_phy_t *phy)
+{
+ int idx;
+
+ for (idx = 0; idx < 8; idx++) {
+ mdb_printf("%02x", phy->sas_address[idx]);
+ }
+}
+
+/*ARGSUSED*/
+static void
+display_ic(struct pmcs_hw m, int verbose)
+{
+ int msec_per_tick;
+
+ if (mdb_readvar(&msec_per_tick, "msec_per_tick") == -1) {
+ mdb_warn("can't read msec_per_tick");
+ msec_per_tick = 0;
+ }
+
+ mdb_printf("\n");
+ mdb_printf("Interrupt coalescing timer info\n");
+ mdb_printf("-------------------------------\n");
+ if (msec_per_tick == 0) {
+ mdb_printf("Quantum : ?? ms\n");
+ } else {
+ mdb_printf("Quantum : %d ms\n",
+ m.io_intr_coal.quantum * msec_per_tick);
+ }
+ mdb_printf("Timer enabled : ");
+ if (m.io_intr_coal.timer_on) {
+ mdb_printf("Yes\n");
+ mdb_printf("Coalescing timer value : %d us\n",
+ m.io_intr_coal.intr_coal_timer);
+ } else {
+ mdb_printf("No\n");
+ }
+ mdb_printf("Total nsecs between interrupts: %ld\n",
+ m.io_intr_coal.nsecs_between_intrs);
+ mdb_printf("Time of last I/O interrupt : %ld\n",
+ m.io_intr_coal.last_io_comp);
+ mdb_printf("Number of I/O interrupts : %d\n",
+ m.io_intr_coal.num_intrs);
+ mdb_printf("Number of I/O completions : %d\n",
+ m.io_intr_coal.num_io_completions);
+ mdb_printf("Max I/O completion interrupts : %d\n",
+ m.io_intr_coal.max_io_completions);
+ mdb_printf("Measured ECHO int latency : %d ns\n",
+ m.io_intr_coal.intr_latency);
+ mdb_printf("Interrupt threshold : %d\n",
+ m.io_intr_coal.intr_threshold);
+}
+
+/*ARGSUSED*/
+static int
+pmcs_iport_phy_walk_cb(uintptr_t addr, const void *wdata, void *priv)
+{
+ struct pmcs_phy phy;
+
+ if (mdb_vread(&phy, sizeof (struct pmcs_phy), addr) !=
+ sizeof (struct pmcs_phy)) {
+ return (DCMD_ERR);
+ }
+
+ mdb_printf("%16p %2d\n", addr, phy.phynum);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+pmcs_iport_walk_cb(uintptr_t addr, const void *wdata, void *priv)
+{
+ struct pmcs_iport iport;
+ uintptr_t list_addr;
+ char *ua_state;
+ char portid[4];
+ char unit_address[34];
+
+ if (mdb_vread(&iport, sizeof (struct pmcs_iport), addr) !=
+ sizeof (struct pmcs_iport)) {
+ return (DCMD_ERR);
+ }
+
+ if (mdb_readstr(unit_address, sizeof (unit_address),
+ (uintptr_t)(iport.ua)) == -1) {
+ strncpy(unit_address, "Unset", sizeof (unit_address));
+ }
+
+ if (iport.portid == 0xffff) {
+ mdb_snprintf(portid, sizeof (portid), "%s", "-");
+ } else {
+ mdb_snprintf(portid, sizeof (portid), "%d", iport.portid);
+ }
+
+ switch (iport.ua_state) {
+ case UA_INACTIVE:
+ ua_state = "Inactive";
+ break;
+ case UA_PEND_ACTIVATE:
+ ua_state = "PendActivate";
+ break;
+ case UA_ACTIVE:
+ ua_state = "Active";
+ break;
+ case UA_PEND_DEACTIVATE:
+ ua_state = "PendDeactivate";
+ break;
+ default:
+ ua_state = "Unknown";
+ }
+
+ if (strlen(unit_address) < 3) {
+ /* Standard iport unit address */
+ mdb_printf("UA %-16s %16s %8s %8s %16s", "Iport", "UA State",
+ "PortID", "NumPhys", "DIP\n");
+ mdb_printf("%2s %16p %16s %8s %8d %16p\n", unit_address, addr,
+ ua_state, portid, iport.nphy, iport.dip);
+ } else {
+ /* Temporary iport unit address */
+ mdb_printf("%-32s %16s %20s %8s %8s %16s", "UA", "Iport",
+ "UA State", "PortID", "NumPhys", "DIP\n");
+ mdb_printf("%32s %16p %20s %8s %8d %16p\n", unit_address, addr,
+ ua_state, portid, iport.nphy, iport.dip);
+ }
+
+ if (iport.nphy > 0) {
+ mdb_inc_indent(4);
+ mdb_printf("%-18s %8s", "Phy", "PhyNum\n");
+ mdb_inc_indent(2);
+ list_addr =
+ (uintptr_t)(addr + offsetof(struct pmcs_iport, phys));
+ if (mdb_pwalk("list", pmcs_iport_phy_walk_cb, NULL,
+ list_addr) == -1) {
+ mdb_warn("pmcs iport walk failed");
+ }
+ mdb_dec_indent(6);
+ mdb_printf("\n");
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+display_iport(struct pmcs_hw m, uintptr_t addr, int verbose)
+{
+ uintptr_t list_addr;
+
+ if (m.iports_attached) {
+ mdb_printf("Iport information:\n");
+ mdb_printf("-----------------\n");
+ } else {
+ mdb_printf("No Iports found.\n\n");
+ return;
+ }
+
+ list_addr = (uintptr_t)(addr + offsetof(struct pmcs_hw, iports));
+
+ if (mdb_pwalk("list", pmcs_iport_walk_cb, NULL, list_addr) == -1) {
+ mdb_warn("pmcs iport walk failed");
+ }
+
+ mdb_printf("\n");
+}
+
+/*ARGSUSED*/
+static void
+display_hwinfo(struct pmcs_hw m, int verbose)
+{
+ struct pmcs_hw *mp = &m;
+ char *fwsupport;
+
+ switch (PMCS_FW_TYPE(mp)) {
+ case PMCS_FW_TYPE_RELEASED:
+ fwsupport = "Released";
+ break;
+ case PMCS_FW_TYPE_DEVELOPMENT:
+ fwsupport = "Development";
+ break;
+ case PMCS_FW_TYPE_ALPHA:
+ fwsupport = "Alpha";
+ break;
+ case PMCS_FW_TYPE_BETA:
+ fwsupport = "Beta";
+ break;
+ default:
+ fwsupport = "Special";
+ break;
+ }
+
+ mdb_printf("\nHardware information:\n");
+ mdb_printf("---------------------\n");
+
+ mdb_printf("Chip revision: %c\n", 'A' + m.chiprev);
+ mdb_printf("SAS WWID: %"PRIx64"\n", m.sas_wwns[0]);
+ mdb_printf("Firmware version: %x.%x.%x (%s)\n",
+ PMCS_FW_MAJOR(mp), PMCS_FW_MINOR(mp), PMCS_FW_MICRO(mp),
+ fwsupport);
+
+ mdb_printf("Number of PHYs: %d\n", m.nphy);
+ mdb_printf("Maximum commands: %d\n", m.max_cmd);
+ mdb_printf("Maximum devices: %d\n", m.max_dev);
+ mdb_printf("I/O queue depth: %d\n", m.ioq_depth);
+ if (m.fwlog == 0) {
+ mdb_printf("Firmware logging: Disabled\n");
+ } else {
+ mdb_printf("Firmware logging: Enabled (%d)\n", m.fwlog);
+ }
+}
+
+static void
+display_targets(struct pmcs_hw m, int verbose, int totals_only)
+{
+ char *dtype;
+ pmcs_xscsi_t xs;
+ pmcs_phy_t phy;
+ uint16_t max_dev, idx;
+ uint32_t sas_targets = 0, smp_targets = 0, sata_targets = 0;
+
+ max_dev = m.max_dev;
+
+ if (targets == NULL) {
+ targets = mdb_alloc(sizeof (targets) * max_dev, UM_SLEEP);
+ }
+
+ if (MDB_RD(targets, sizeof (targets) * max_dev, m.targets) == -1) {
+ NOREAD(targets, m.targets);
+ return;
+ }
+
+ if (!totals_only) {
+ mdb_printf("\nTarget information:\n");
+ mdb_printf("---------------------------------------\n");
+ mdb_printf("VTGT %-16s %-16s %-5s %8s %s", "SAS Address",
+ "PHY Address", "DType", "Active", "DS");
+ mdb_printf("\n");
+ }
+
+ for (idx = 0; idx < max_dev; idx++) {
+ if (targets[idx] == NULL) {
+ continue;
+ }
+
+ if (MDB_RD(&xs, sizeof (xs), targets[idx]) == -1) {
+ NOREAD(pmcs_xscsi_t, targets[idx]);
+ continue;
+ }
+
+ /*
+ * It has to be one of new, assigned or dying to be of interest.
+ */
+ if (xs.new == 0 && xs.assigned == 0 && xs.dying == 0) {
+ continue;
+ }
+
+ switch (xs.dtype) {
+ case NOTHING:
+ dtype = "None";
+ break;
+ case SATA:
+ dtype = "SATA";
+ sata_targets++;
+ break;
+ case SAS:
+ dtype = "SAS";
+ sas_targets++;
+ break;
+ case EXPANDER:
+ dtype = "SMP";
+ smp_targets++;
+ break;
+ }
+
+ if (totals_only) {
+ continue;
+ }
+
+ if (xs.phy) {
+ if (MDB_RD(&phy, sizeof (phy), xs.phy) == -1) {
+ NOREAD(pmcs_phy_t, xs.phy);
+ continue;
+ }
+ mdb_printf("%4d ", idx);
+ print_sas_address(&phy);
+ mdb_printf(" %16p", xs.phy);
+ } else {
+ mdb_printf("%4d %16s", idx, "<no phy avail>");
+ }
+ mdb_printf(" %5s", dtype);
+ mdb_printf(" %8d", xs.actv_cnt);
+ mdb_printf(" %2d", xs.dev_state);
+
+ if (verbose) {
+ if (xs.new) {
+ mdb_printf(" new");
+ } else if (xs.dying) {
+ mdb_printf(" dying");
+ } else if (xs.assigned) {
+ mdb_printf(" assigned");
+ }
+ if (xs.draining) {
+ mdb_printf(" draining");
+ }
+ if (xs.reset_wait) {
+ mdb_printf(" reset_wait");
+ }
+ if (xs.resetting) {
+ mdb_printf(" resetting");
+ }
+ if (xs.recover_wait) {
+ mdb_printf(" recover_wait");
+ }
+ if (xs.recovering) {
+ mdb_printf(" recovering");
+ }
+ if (xs.event_recovery) {
+ mdb_printf(" event recovery");
+ }
+ if (xs.special_running) {
+ mdb_printf(" special_active");
+ }
+ if (xs.ncq) {
+ mdb_printf(" ncq_tagmap=0x%x qdepth=%d",
+ xs.tagmap, xs.qdepth);
+ } else if (xs.pio) {
+ mdb_printf(" pio");
+ }
+ }
+
+ mdb_printf("\n");
+ }
+
+ if (!totals_only) {
+ mdb_printf("\n");
+ }
+
+ mdb_printf("%19s %d (%d SAS + %d SATA + %d SMP)\n",
+ "Configured targets:", (sas_targets + sata_targets + smp_targets),
+ sas_targets, sata_targets, smp_targets);
+}
+
+/*ARGSUSED1*/
+static void
+display_work(struct pmcs_hw m, int verbose)
+{
+ int idx;
+ int tgt;
+ int hdrp = 0;
+ pmcs_xscsi_t xs;
+ pmcs_phy_t phy;
+ char buf[16];
+ pmcwork_t work, *wp = &work;
+ uintptr_t _wp;
+ char *state;
+ char *path;
+
+ mdb_printf("\nActive Work structure information:\n");
+ mdb_printf("----------------------------------\n");
+
+ _wp = (uintptr_t)m.work;
+
+ for (idx = 0; idx < m.max_cmd; idx++, _wp += sizeof (pmcwork_t)) {
+ if (MDB_RD(&work, sizeof (pmcwork_t), _wp) == -1) {
+ NOREAD(pmcwork_t, _wp);
+ continue;
+ }
+ if (wp->htag == PMCS_TAG_TYPE_FREE) {
+ continue;
+ }
+ if (hdrp++ == 0) {
+ mdb_printf("%8s %10s %20s %8s %8s O D\n",
+ "HTag", "State", "Phy Path", "Target", "Timer");
+ }
+ switch (wp->state) {
+ case PMCS_WORK_STATE_NIL:
+ state = "N/A";
+ break;
+ case PMCS_WORK_STATE_READY:
+ state = "Ready";
+ break;
+ case PMCS_WORK_STATE_ONCHIP:
+ state = "On Chip";
+ break;
+ case PMCS_WORK_STATE_INTR:
+ state = "In Intr";
+ break;
+ case PMCS_WORK_STATE_IOCOMPQ:
+ state = "I/O Comp";
+ break;
+ case PMCS_WORK_STATE_ABORTED:
+ state = "I/O Aborted";
+ break;
+ case PMCS_WORK_STATE_TIMED_OUT:
+ state = "I/O Timed Out";
+ break;
+ default:
+ mdb_snprintf(buf, sizeof (buf), "STATE=%d", wp->state);
+ state = buf;
+ break;
+ }
+ if (wp->ssp_event && wp->ssp_event != 0xffffffff) {
+ mdb_printf("SSP event 0x%x", wp->ssp_event);
+ }
+ tgt = -1;
+ if (wp->xp) {
+ if (MDB_RD(&xs, sizeof (xs), wp->xp) == -1) {
+ NOREAD(pmcs_xscsi_t, wp->xp);
+ } else {
+ tgt = xs.target_num;
+ }
+ }
+ if (wp->phy) {
+ if (MDB_RD(&phy, sizeof (phy), wp->phy) == -1) {
+ NOREAD(pmcs_phy_t, wp->phy);
+ continue;
+ }
+ path = phy.path;
+ } else {
+ path = "????";
+ }
+ mdb_printf("%08x %10s %20s %8d %8u %1d %1d\n",
+ wp->htag, state, path, tgt, wp->timer,
+ wp->onwire, wp->dead);
+ }
+}
+
+static void
+print_spcmd(pmcs_cmd_t *sp, void *kaddr, int printhdr, int indent)
+{
+ if (indent)
+ mdb_inc_indent(4);
+ if (printhdr) {
+ mdb_printf("%16s %16s %16s %8s %s\n",
+ "Command", "SCSA pkt", "DMA Chunks", "HTAG", "SATL Tag");
+ }
+ mdb_printf("%16p %16p %16p %08x %08x\n",
+ kaddr, sp->cmd_pkt, sp->cmd_clist, sp->cmd_tag, sp->cmd_satltag);
+ if (indent)
+ mdb_dec_indent(4);
+}
+
+/*ARGSUSED1*/
+static void
+display_waitqs(struct pmcs_hw m, int verbose)
+{
+ pmcs_cmd_t *sp, s;
+ pmcs_xscsi_t xs;
+ int first, i;
+ int max_dev = m.max_dev;
+
+ sp = m.dq.stqh_first;
+ first = 1;
+ while (sp) {
+ if (first) {
+ mdb_printf("\nDead Command Queue:\n");
+ mdb_printf("---------------------------\n");
+ }
+ if (MDB_RD(&s, sizeof (s), sp) == -1) {
+ NOREAD(pmcs_cmd_t, sp);
+ break;
+ }
+ print_spcmd(&s, sp, first, 0);
+ sp = s.cmd_next.stqe_next;
+ first = 0;
+ }
+
+ sp = m.cq.stqh_first;
+ first = 1;
+ while (sp) {
+ if (first) {
+ mdb_printf("\nCompletion Command Queue:\n");
+ mdb_printf("---------------------------\n");
+ }
+ if (MDB_RD(&s, sizeof (s), sp) == -1) {
+ NOREAD(pmcs_cmd_t, sp);
+ break;
+ }
+ print_spcmd(&s, sp, first, 0);
+ sp = s.cmd_next.stqe_next;
+ first = 0;
+ }
+
+
+ if (targets == NULL) {
+ targets = mdb_alloc(sizeof (targets) * max_dev, UM_SLEEP);
+ }
+
+ if (MDB_RD(targets, sizeof (targets) * max_dev, m.targets) == -1) {
+ NOREAD(targets, m.targets);
+ return;
+ }
+
+ for (i = 0; i < max_dev; i++) {
+ if (targets[i] == NULL) {
+ continue;
+ }
+ if (MDB_RD(&xs, sizeof (xs), targets[i]) == -1) {
+ NOREAD(pmcs_xscsi_t, targets[i]);
+ continue;
+ }
+ sp = xs.wq.stqh_first;
+ first = 1;
+ while (sp) {
+ if (first) {
+ mdb_printf("\nTarget %u Wait Queue:\n",
+ xs.target_num);
+ mdb_printf("---------------------------\n");
+ }
+ if (MDB_RD(&s, sizeof (s), sp) == -1) {
+ NOREAD(pmcs_cmd_t, sp);
+ break;
+ }
+ print_spcmd(&s, sp, first, 0);
+ sp = s.cmd_next.stqe_next;
+ first = 0;
+ }
+ sp = xs.aq.stqh_first;
+ first = 1;
+ while (sp) {
+ if (first) {
+ mdb_printf("\nTarget %u Active Queue:\n",
+ xs.target_num);
+ mdb_printf("---------------------------\n");
+ }
+ if (MDB_RD(&s, sizeof (s), sp) == -1) {
+ NOREAD(pmcs_cmd_t, sp);
+ break;
+ }
+ print_spcmd(&s, sp, first, 0);
+ sp = s.cmd_next.stqe_next;
+ first = 0;
+ }
+ sp = xs.sq.stqh_first;
+ first = 1;
+ while (sp) {
+ if (first) {
+ mdb_printf("\nTarget %u Special Queue:\n",
+ xs.target_num);
+ mdb_printf("---------------------------\n");
+ }
+ if (MDB_RD(&s, sizeof (s), sp) == -1) {
+ NOREAD(pmcs_cmd_t, sp);
+ break;
+ }
+ print_spcmd(&s, sp, first, 0);
+ sp = s.cmd_next.stqe_next;
+ first = 0;
+ }
+ }
+}
+
+static char *
+ibq_type(int qnum)
+{
+ if (qnum < 0 || qnum >= PMCS_NIQ) {
+ return ("UNKNOWN");
+ }
+
+ if (qnum < PMCS_IQ_OTHER) {
+ return ("I/O");
+ }
+
+ return ("Other");
+}
+
+static char *
+obq_type(int qnum)
+{
+ switch (qnum) {
+ case PMCS_OQ_IODONE:
+ return ("I/O");
+ break;
+ case PMCS_OQ_GENERAL:
+ return ("General");
+ break;
+ case PMCS_OQ_EVENTS:
+ return ("Events");
+ break;
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static char *
+iomb_cat(uint32_t cat)
+{
+ switch (cat) {
+ case PMCS_IOMB_CAT_NET:
+ return ("NET");
+ break;
+ case PMCS_IOMB_CAT_FC:
+ return ("FC");
+ break;
+ case PMCS_IOMB_CAT_SAS:
+ return ("SAS");
+ break;
+ case PMCS_IOMB_CAT_SCSI:
+ return ("SCSI");
+ break;
+ default:
+ return ("???");
+ }
+}
+
+static char *
+inbound_iomb_opcode(uint32_t opcode)
+{
+ switch (opcode) {
+ case PMCIN_ECHO:
+ return ("ECHO");
+ break;
+ case PMCIN_GET_INFO:
+ return ("GET_INFO");
+ break;
+ case PMCIN_GET_VPD:
+ return ("GET_VPD");
+ break;
+ case PMCIN_PHY_START:
+ return ("PHY_START");
+ break;
+ case PMCIN_PHY_STOP:
+ return ("PHY_STOP");
+ break;
+ case PMCIN_SSP_INI_IO_START:
+ return ("INI_IO_START");
+ break;
+ case PMCIN_SSP_INI_TM_START:
+ return ("INI_TM_START");
+ break;
+ case PMCIN_SSP_INI_EXT_IO_START:
+ return ("INI_EXT_IO_START");
+ break;
+ case PMCIN_DEVICE_HANDLE_ACCEPT:
+ return ("DEVICE_HANDLE_ACCEPT");
+ break;
+ case PMCIN_SSP_TGT_IO_START:
+ return ("TGT_IO_START");
+ break;
+ case PMCIN_SSP_TGT_RESPONSE_START:
+ return ("TGT_RESPONSE_START");
+ break;
+ case PMCIN_SSP_INI_EDC_EXT_IO_START:
+ return ("INI_EDC_EXT_IO_START");
+ break;
+ case PMCIN_SSP_INI_EDC_EXT_IO_START1:
+ return ("INI_EDC_EXT_IO_START1");
+ break;
+ case PMCIN_SSP_TGT_EDC_IO_START:
+ return ("TGT_EDC_IO_START");
+ break;
+ case PMCIN_SSP_ABORT:
+ return ("SSP_ABORT");
+ break;
+ case PMCIN_DEREGISTER_DEVICE_HANDLE:
+ return ("DEREGISTER_DEVICE_HANDLE");
+ break;
+ case PMCIN_GET_DEVICE_HANDLE:
+ return ("GET_DEVICE_HANDLE");
+ break;
+ case PMCIN_SMP_REQUEST:
+ return ("SMP_REQUEST");
+ break;
+ case PMCIN_SMP_RESPONSE:
+ return ("SMP_RESPONSE");
+ break;
+ case PMCIN_SMP_ABORT:
+ return ("SMP_ABORT");
+ break;
+ case PMCIN_ASSISTED_DISCOVERY:
+ return ("ASSISTED_DISCOVERY");
+ break;
+ case PMCIN_REGISTER_DEVICE:
+ return ("REGISTER_DEVICE");
+ break;
+ case PMCIN_SATA_HOST_IO_START:
+ return ("SATA_HOST_IO_START");
+ break;
+ case PMCIN_SATA_ABORT:
+ return ("SATA_ABORT");
+ break;
+ case PMCIN_LOCAL_PHY_CONTROL:
+ return ("LOCAL_PHY_CONTROL");
+ break;
+ case PMCIN_GET_DEVICE_INFO:
+ return ("GET_DEVICE_INFO");
+ break;
+ case PMCIN_TWI:
+ return ("TWI");
+ break;
+ case PMCIN_FW_FLASH_UPDATE:
+ return ("FW_FLASH_UPDATE");
+ break;
+ case PMCIN_SET_VPD:
+ return ("SET_VPD");
+ break;
+ case PMCIN_GPIO:
+ return ("GPIO");
+ break;
+ case PMCIN_SAS_DIAG_MODE_START_END:
+ return ("SAS_DIAG_MODE_START_END");
+ break;
+ case PMCIN_SAS_DIAG_EXECUTE:
+ return ("SAS_DIAG_EXECUTE");
+ break;
+ case PMCIN_SAW_HW_EVENT_ACK:
+ return ("SAS_HW_EVENT_ACK");
+ break;
+ case PMCIN_GET_TIME_STAMP:
+ return ("GET_TIME_STAMP");
+ break;
+ case PMCIN_PORT_CONTROL:
+ return ("PORT_CONTROL");
+ break;
+ case PMCIN_GET_NVMD_DATA:
+ return ("GET_NVMD_DATA");
+ break;
+ case PMCIN_SET_NVMD_DATA:
+ return ("SET_NVMD_DATA");
+ break;
+ case PMCIN_SET_DEVICE_STATE:
+ return ("SET_DEVICE_STATE");
+ break;
+ case PMCIN_GET_DEVICE_STATE:
+ return ("GET_DEVICE_STATE");
+ break;
+ default:
+ return ("UNKNOWN");
+ break;
+ }
+}
+
+static char *
+outbound_iomb_opcode(uint32_t opcode)
+{
+ switch (opcode) {
+ case PMCOUT_ECHO:
+ return ("ECHO");
+ break;
+ case PMCOUT_GET_INFO:
+ return ("GET_INFO");
+ break;
+ case PMCOUT_GET_VPD:
+ return ("GET_VPD");
+ break;
+ case PMCOUT_SAS_HW_EVENT:
+ return ("SAS_HW_EVENT");
+ break;
+ case PMCOUT_SSP_COMPLETION:
+ return ("SSP_COMPLETION");
+ break;
+ case PMCOUT_SMP_COMPLETION:
+ return ("SMP_COMPLETION");
+ break;
+ case PMCOUT_LOCAL_PHY_CONTROL:
+ return ("LOCAL_PHY_CONTROL");
+ break;
+ case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT:
+ return ("SAS_ASSISTED_DISCOVERY_SENT");
+ break;
+ case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT:
+ return ("SATA_ASSISTED_DISCOVERY_SENT");
+ break;
+ case PMCOUT_DEVICE_REGISTRATION:
+ return ("DEVICE_REGISTRATION");
+ break;
+ case PMCOUT_DEREGISTER_DEVICE_HANDLE:
+ return ("DEREGISTER_DEVICE_HANDLE");
+ break;
+ case PMCOUT_GET_DEVICE_HANDLE:
+ return ("GET_DEVICE_HANDLE");
+ break;
+ case PMCOUT_SATA_COMPLETION:
+ return ("SATA_COMPLETION");
+ break;
+ case PMCOUT_SATA_EVENT:
+ return ("SATA_EVENT");
+ break;
+ case PMCOUT_SSP_EVENT:
+ return ("SSP_EVENT");
+ break;
+ case PMCOUT_DEVICE_HANDLE_ARRIVED:
+ return ("DEVICE_HANDLE_ARRIVED");
+ break;
+ case PMCOUT_SMP_REQUEST_RECEIVED:
+ return ("SMP_REQUEST_RECEIVED");
+ break;
+ case PMCOUT_SSP_REQUEST_RECEIVED:
+ return ("SSP_REQUEST_RECEIVED");
+ break;
+ case PMCOUT_DEVICE_INFO:
+ return ("DEVICE_INFO");
+ break;
+ case PMCOUT_FW_FLASH_UPDATE:
+ return ("FW_FLASH_UPDATE");
+ break;
+ case PMCOUT_SET_VPD:
+ return ("SET_VPD");
+ break;
+ case PMCOUT_GPIO:
+ return ("GPIO");
+ break;
+ case PMCOUT_GPIO_EVENT:
+ return ("GPIO_EVENT");
+ break;
+ case PMCOUT_GENERAL_EVENT:
+ return ("GENERAL_EVENT");
+ break;
+ case PMCOUT_TWI:
+ return ("TWI");
+ break;
+ case PMCOUT_SSP_ABORT:
+ return ("SSP_ABORT");
+ break;
+ case PMCOUT_SATA_ABORT:
+ return ("SATA_ABORT");
+ break;
+ case PMCOUT_SAS_DIAG_MODE_START_END:
+ return ("SAS_DIAG_MODE_START_END");
+ break;
+ case PMCOUT_SAS_DIAG_EXECUTE:
+ return ("SAS_DIAG_EXECUTE");
+ break;
+ case PMCOUT_GET_TIME_STAMP:
+ return ("GET_TIME_STAMP");
+ break;
+ case PMCOUT_SAS_HW_EVENT_ACK_ACK:
+ return ("SAS_HW_EVENT_ACK_ACK");
+ break;
+ case PMCOUT_PORT_CONTROL:
+ return ("PORT_CONTROL");
+ break;
+ case PMCOUT_SKIP_ENTRIES:
+ return ("SKIP_ENTRIES");
+ break;
+ case PMCOUT_SMP_ABORT:
+ return ("SMP_ABORT");
+ break;
+ case PMCOUT_GET_NVMD_DATA:
+ return ("GET_NVMD_DATA");
+ break;
+ case PMCOUT_SET_NVMD_DATA:
+ return ("SET_NVMD_DATA");
+ break;
+ case PMCOUT_DEVICE_HANDLE_REMOVED:
+ return ("DEVICE_HANDLE_REMOVED");
+ break;
+ case PMCOUT_SET_DEVICE_STATE:
+ return ("SET_DEVICE_STATE");
+ break;
+ case PMCOUT_GET_DEVICE_STATE:
+ return ("GET_DEVICE_STATE");
+ break;
+ case PMCOUT_SET_DEVICE_INFO:
+ return ("SET_DEVICE_INFO");
+ break;
+ default:
+ return ("UNKNOWN");
+ break;
+ }
+}
+
+static void
+dump_one_qentry_outbound(uint32_t *qentryp, int idx)
+{
+ int qeidx;
+ uint32_t word0 = LE_32(*qentryp);
+
+ mdb_printf("Entry #%02d\n", idx);
+ mdb_inc_indent(2);
+
+ mdb_printf("Header: 0x%08x (", word0);
+ if (word0 & PMCS_IOMB_VALID) {
+ mdb_printf("VALID, ");
+ }
+ if (word0 & PMCS_IOMB_HIPRI) {
+ mdb_printf("HIPRI, ");
+ }
+ mdb_printf("OBID=%d, ",
+ (word0 & PMCS_IOMB_OBID_MASK) >> PMCS_IOMB_OBID_SHIFT);
+ mdb_printf("CAT=%s, ",
+ iomb_cat((word0 & PMCS_IOMB_CAT_MASK) >> PMCS_IOMB_CAT_SHIFT));
+ mdb_printf("OPCODE=%s",
+ outbound_iomb_opcode(word0 & PMCS_IOMB_OPCODE_MASK));
+ mdb_printf(")\n");
+
+ mdb_printf("Remaining Payload:\n");
+
+ mdb_inc_indent(2);
+ for (qeidx = 1; qeidx < (PMCS_QENTRY_SIZE / 4); qeidx++) {
+ mdb_printf("%08x ", LE_32(*(qentryp + qeidx)));
+ }
+ mdb_printf("\n");
+ mdb_dec_indent(4);
+}
+
+static void
+display_outbound_queues(struct pmcs_hw ss, uint_t verbose)
+{
+ int idx, qidx;
+ uintptr_t obqp;
+ uint32_t *cip;
+ uint32_t *qentryp = mdb_alloc(PMCS_QENTRY_SIZE, UM_SLEEP);
+ uint32_t last_consumed, oqpi;
+
+ mdb_printf("\n");
+ mdb_printf("Outbound Queues\n");
+ mdb_printf("---------------\n");
+
+ mdb_inc_indent(2);
+
+ for (qidx = 0; qidx < PMCS_NOQ; qidx++) {
+ obqp = (uintptr_t)ss.oqp[qidx];
+
+ if (obqp == NULL) {
+ mdb_printf("No outbound queue ptr for queue #%d\n",
+ qidx);
+ continue;
+ }
+
+ mdb_printf("Outbound Queue #%d (Queue Type = %s)\n", qidx,
+ obq_type(qidx));
+ /*
+ * Chip is the producer, so read the actual producer index
+ * and not the driver's version
+ */
+ cip = (uint32_t *)((void *)ss.cip);
+ if (MDB_RD(&oqpi, 4, cip + OQPI_BASE_OFFSET +
+ (qidx * 4)) == -1) {
+ mdb_warn("Couldn't read oqpi\n");
+ break;
+ }
+
+ mdb_printf("Producer index: %d Consumer index: %d\n\n",
+ LE_32(oqpi), ss.oqci[qidx]);
+ mdb_inc_indent(2);
+
+ if (ss.oqci[qidx] == 0) {
+ last_consumed = ss.ioq_depth - 1;
+ } else {
+ last_consumed = ss.oqci[qidx] - 1;
+ }
+
+
+ if (!verbose) {
+ mdb_printf("Last processed entry:\n");
+ if (MDB_RD(qentryp, PMCS_QENTRY_SIZE,
+ (obqp + (PMCS_QENTRY_SIZE * last_consumed)))
+ == -1) {
+ mdb_warn("Couldn't read queue entry at 0x%p\n",
+ (obqp + (PMCS_QENTRY_SIZE *
+ last_consumed)));
+ break;
+ }
+ dump_one_qentry_outbound(qentryp, last_consumed);
+ mdb_printf("\n");
+ mdb_dec_indent(2);
+ continue;
+ }
+
+ for (idx = 0; idx < ss.ioq_depth; idx++) {
+ if (MDB_RD(qentryp, PMCS_QENTRY_SIZE,
+ (obqp + (PMCS_QENTRY_SIZE * idx))) == -1) {
+ mdb_warn("Couldn't read queue entry at 0x%p\n",
+ (obqp + (PMCS_QENTRY_SIZE * idx)));
+ break;
+ }
+ dump_one_qentry_outbound(qentryp, idx);
+ }
+
+ mdb_printf("\n");
+ mdb_dec_indent(2);
+ }
+
+ mdb_dec_indent(2);
+ mdb_free(qentryp, PMCS_QENTRY_SIZE);
+}
+
+static void
+dump_one_qentry_inbound(uint32_t *qentryp, int idx)
+{
+ int qeidx;
+ uint32_t word0 = LE_32(*qentryp);
+
+ mdb_printf("Entry #%02d\n", idx);
+ mdb_inc_indent(2);
+
+ mdb_printf("Header: 0x%08x (", word0);
+ if (word0 & PMCS_IOMB_VALID) {
+ mdb_printf("VALID, ");
+ }
+ if (word0 & PMCS_IOMB_HIPRI) {
+ mdb_printf("HIPRI, ");
+ }
+ mdb_printf("OBID=%d, ",
+ (word0 & PMCS_IOMB_OBID_MASK) >> PMCS_IOMB_OBID_SHIFT);
+ mdb_printf("CAT=%s, ",
+ iomb_cat((word0 & PMCS_IOMB_CAT_MASK) >> PMCS_IOMB_CAT_SHIFT));
+ mdb_printf("OPCODE=%s",
+ inbound_iomb_opcode(word0 & PMCS_IOMB_OPCODE_MASK));
+ mdb_printf(")\n");
+
+ mdb_printf("HTAG: 0x%08x\n", LE_32(*(qentryp + 1)));
+ mdb_printf("Remaining Payload:\n");
+
+ mdb_inc_indent(2);
+ for (qeidx = 2; qeidx < (PMCS_QENTRY_SIZE / 4); qeidx++) {
+ mdb_printf("%08x ", LE_32(*(qentryp + qeidx)));
+ }
+ mdb_printf("\n");
+ mdb_dec_indent(4);
+}
+
+static void
+display_inbound_queues(struct pmcs_hw ss, uint_t verbose)
+{
+ int idx, qidx, iqci, last_consumed;
+ uintptr_t ibqp;
+ uint32_t *qentryp = mdb_alloc(PMCS_QENTRY_SIZE, UM_SLEEP);
+ uint32_t *cip;
+
+ mdb_printf("\n");
+ mdb_printf("Inbound Queues\n");
+ mdb_printf("--------------\n");
+
+ mdb_inc_indent(2);
+
+ for (qidx = 0; qidx < PMCS_NIQ; qidx++) {
+ ibqp = (uintptr_t)ss.iqp[qidx];
+
+ if (ibqp == NULL) {
+ mdb_printf("No inbound queue ptr for queue #%d\n",
+ qidx);
+ continue;
+ }
+
+ mdb_printf("Inbound Queue #%d (Queue Type = %s)\n", qidx,
+ ibq_type(qidx));
+
+ cip = (uint32_t *)((void *)ss.cip);
+ if (MDB_RD(&iqci, 4, cip + (qidx * 4)) == -1) {
+ mdb_warn("Couldn't read iqci\n");
+ break;
+ }
+ iqci = LE_32(iqci);
+
+ mdb_printf("Producer index: %d Consumer index: %d\n\n",
+ ss.shadow_iqpi[qidx], iqci);
+ mdb_inc_indent(2);
+
+ if (iqci == 0) {
+ last_consumed = ss.ioq_depth - 1;
+ } else {
+ last_consumed = iqci - 1;
+ }
+
+ if (!verbose) {
+ mdb_printf("Last processed entry:\n");
+ if (MDB_RD(qentryp, PMCS_QENTRY_SIZE,
+ (ibqp + (PMCS_QENTRY_SIZE * last_consumed)))
+ == -1) {
+ mdb_warn("Couldn't read queue entry at 0x%p\n",
+ (ibqp + (PMCS_QENTRY_SIZE *
+ last_consumed)));
+ break;
+ }
+ dump_one_qentry_inbound(qentryp, last_consumed);
+ mdb_printf("\n");
+ mdb_dec_indent(2);
+ continue;
+ }
+
+ for (idx = 0; idx < ss.ioq_depth; idx++) {
+ if (MDB_RD(qentryp, PMCS_QENTRY_SIZE,
+ (ibqp + (PMCS_QENTRY_SIZE * idx))) == -1) {
+ mdb_warn("Couldn't read queue entry at 0x%p\n",
+ (ibqp + (PMCS_QENTRY_SIZE * idx)));
+ break;
+ }
+ dump_one_qentry_inbound(qentryp, idx);
+ }
+
+ mdb_printf("\n");
+ mdb_dec_indent(2);
+ }
+
+ mdb_dec_indent(2);
+ mdb_free(qentryp, PMCS_QENTRY_SIZE);
+}
+
+static void
+display_phy(struct pmcs_phy phy, int verbose, int totals_only)
+{
+ char *dtype, *speed;
+ char *yes = "Yes";
+ char *no = "No";
+ char *cfgd = no;
+ char *apend = no;
+ char *asent = no;
+ char *dead = no;
+ char *changed = no;
+
+ switch (phy.dtype) {
+ case NOTHING:
+ dtype = "None";
+ break;
+ case SATA:
+ dtype = "SATA";
+ if (phy.configured) {
+ ++sata_phys;
+ }
+ break;
+ case SAS:
+ dtype = "SAS";
+ if (phy.configured) {
+ ++sas_phys;
+ }
+ break;
+ case EXPANDER:
+ dtype = "EXP";
+ if (phy.configured) {
+ ++exp_phys;
+ }
+ break;
+ }
+
+ if (phy.dtype == NOTHING) {
+ empty_phys++;
+ } else if ((phy.dtype == EXPANDER) && phy.configured) {
+ num_expanders++;
+ }
+
+ if (totals_only) {
+ return;
+ }
+
+ switch (phy.link_rate) {
+ case SAS_LINK_RATE_1_5GBIT:
+ speed = "1.5Gb/s";
+ break;
+ case SAS_LINK_RATE_3GBIT:
+ speed = "3 Gb/s";
+ break;
+ case SAS_LINK_RATE_6GBIT:
+ speed = "6 Gb/s";
+ break;
+ default:
+ speed = "N/A";
+ break;
+ }
+
+ if ((phy.dtype != NOTHING) || verbose) {
+ print_sas_address(&phy);
+
+ if (phy.device_id != PMCS_INVALID_DEVICE_ID) {
+ mdb_printf(" %3d %4d %6s %4s ",
+ phy.device_id, phy.phynum, speed, dtype);
+ } else {
+ mdb_printf(" N/A %4d %6s %4s ",
+ phy.phynum, speed, dtype);
+ }
+
+ if (verbose) {
+ if (phy.abort_sent) {
+ asent = yes;
+ }
+ if (phy.abort_pending) {
+ apend = yes;
+ }
+ if (phy.configured) {
+ cfgd = yes;
+ }
+ if (phy.dead) {
+ dead = yes;
+ }
+ if (phy.changed) {
+ changed = yes;
+ }
+
+ mdb_printf("%-4s %-4s %-4s %-4s %-4s %3d "
+ "0x%p ", cfgd, apend, asent,
+ changed, dead, phy.ref_count, phy.phy_lock);
+ }
+
+ mdb_printf("Path: %s\n", phy.path);
+ }
+}
+
+static void
+display_phys(struct pmcs_hw ss, int verbose, struct pmcs_phy *parent, int level,
+ int totals_only)
+{
+ pmcs_phy_t phy;
+ pmcs_phy_t *pphy = parent;
+
+ mdb_inc_indent(3);
+
+ if (parent == NULL) {
+ pphy = (pmcs_phy_t *)ss.root_phys;
+ } else {
+ pphy = (pmcs_phy_t *)parent;
+ }
+
+ if (level == 0) {
+ sas_phys = 0;
+ sata_phys = 0;
+ exp_phys = 0;
+ num_expanders = 0;
+ empty_phys = 0;
+ }
+
+ if (!totals_only) {
+ if (level == 0) {
+ mdb_printf("PHY information\n");
+ }
+ mdb_printf("--------\n");
+ mdb_printf("Level %2d\n", level);
+ mdb_printf("--------\n");
+ mdb_printf("SAS Address Hdl Phy# Speed Type ");
+
+ if (verbose) {
+ mdb_printf("Cfgd AbtP AbtS Chgd Dead Ref Lock\n");
+ } else {
+ mdb_printf("\n");
+ }
+ }
+
+ while (pphy) {
+ if (MDB_RD(&phy, sizeof (phy), (uintptr_t)pphy) == -1) {
+ NOREAD(pmcs_phy_t, phy);
+ break;
+ }
+
+ display_phy(phy, verbose, totals_only);
+
+ if (phy.children) {
+ display_phys(ss, verbose, phy.children, level + 1,
+ totals_only);
+ if (!totals_only) {
+ mdb_printf("\n");
+ }
+ }
+
+ pphy = phy.sibling;
+ }
+
+ mdb_dec_indent(3);
+
+ if (level == 0) {
+ if (verbose) {
+ mdb_printf("%19s %d (%d SAS + %d SATA + %d SMP) "
+ "(+%d subsidiary + %d empty)\n", "Occupied PHYs:",
+ (sas_phys + sata_phys + num_expanders),
+ sas_phys, sata_phys, num_expanders,
+ (exp_phys - num_expanders), empty_phys);
+ } else {
+ mdb_printf("%19s %d (%d SAS + %d SATA + %d SMP)\n",
+ "Occupied PHYs:",
+ (sas_phys + sata_phys + num_expanders),
+ sas_phys, sata_phys, num_expanders);
+ }
+ }
+}
+
+/*
+ * MAX_INST_STRLEN is the largest string size from which we will attempt
+ * to convert to an instance number. The string will be formed up as
+ * "0t<inst>\0" so that mdb_strtoull can parse it properly.
+ */
+#define MAX_INST_STRLEN 8
+
+static int
+pmcs_dump_tracelog(boolean_t filter, int instance)
+{
+ pmcs_tbuf_t *tbuf_addr;
+ uint_t tbuf_idx;
+ pmcs_tbuf_t tbuf;
+ boolean_t wrap, elem_filtered;
+ uint_t start_idx, elems_to_print, idx, tbuf_num_elems;
+ char *bufp;
+ char elem_inst[MAX_INST_STRLEN], ei_idx;
+
+ /* Get the address of the first element */
+ if (mdb_readvar(&tbuf_addr, "pmcs_tbuf") == -1) {
+ mdb_warn("can't read pmcs_tbuf");
+ return (DCMD_ERR);
+ }
+
+ /* Get the total number */
+ if (mdb_readvar(&tbuf_num_elems, "pmcs_tbuf_num_elems") == -1) {
+ mdb_warn("can't read pmcs_tbuf_num_elems");
+ return (DCMD_ERR);
+ }
+
+ /* Get the current index */
+ if (mdb_readvar(&tbuf_idx, "pmcs_tbuf_idx") == -1) {
+ mdb_warn("can't read pmcs_tbuf_idx");
+ return (DCMD_ERR);
+ }
+
+ /* Indicator as to whether the buffer has wrapped */
+ if (mdb_readvar(&wrap, "pmcs_tbuf_wrap") == -1) {
+ mdb_warn("can't read pmcs_tbuf_wrap");
+ return (DCMD_ERR);
+ }
+
+ /* Figure out where we start and stop */
+ if (wrap) {
+ start_idx = tbuf_idx;
+ elems_to_print = tbuf_num_elems;
+ } else {
+ start_idx = 0;
+ elems_to_print = tbuf_idx;
+ }
+
+ idx = start_idx;
+
+ /* Dump the buffer contents */
+ while (elems_to_print != 0) {
+ if (MDB_RD(&tbuf, sizeof (pmcs_tbuf_t), (tbuf_addr + idx))
+ == -1) {
+ NOREAD(tbuf, (tbuf_addr + idx));
+ return (DCMD_ERR);
+ }
+
+ elem_filtered = B_FALSE;
+
+ if (filter) {
+ bufp = tbuf.buf;
+ /* Skip the driver name */
+ while (*bufp < '0' || *bufp > '9') {
+ bufp++;
+ }
+
+ ei_idx = 0;
+ elem_inst[ei_idx++] = '0';
+ elem_inst[ei_idx++] = 't';
+ while (*bufp != ':' && ei_idx < (MAX_INST_STRLEN - 1)) {
+ elem_inst[ei_idx++] = *bufp;
+ bufp++;
+ }
+ elem_inst[ei_idx] = 0;
+
+ /* Get the instance */
+ if ((int)mdb_strtoull(elem_inst) != instance) {
+ elem_filtered = B_TRUE;
+ }
+ }
+
+ if (!elem_filtered) {
+ mdb_printf("%Y.%09ld %s\n", tbuf.timestamp, tbuf.buf);
+ }
+
+ --elems_to_print;
+ if (++idx == tbuf_num_elems) {
+ idx = 0;
+ }
+ }
+
+ return (DCMD_OK);
+}
+
+/*
+ * Walkers
+ */
+static int
+targets_walk_i(mdb_walk_state_t *wsp)
+{
+ if (wsp->walk_addr == NULL) {
+ mdb_warn("Can not perform global walk\n");
+ return (WALK_ERR);
+ }
+
+ /*
+ * Address provided belongs to HBA softstate. Get the targets pointer
+ * to begin the walk.
+ */
+ if (mdb_vread(&ss, sizeof (pmcs_hw_t), wsp->walk_addr) !=
+ sizeof (pmcs_hw_t)) {
+ mdb_warn("Unable to read HBA softstate\n");
+ return (WALK_ERR);
+ }
+
+ if (targets == NULL) {
+ targets = mdb_alloc(sizeof (targets) * ss.max_dev, UM_SLEEP);
+ }
+
+ if (MDB_RD(targets, sizeof (targets) * ss.max_dev, ss.targets) == -1) {
+ NOREAD(targets, ss.targets);
+ return (WALK_ERR);
+ }
+
+ target_idx = 0;
+ wsp->walk_addr = (uintptr_t)(targets[0]);
+ wsp->walk_data = mdb_alloc(sizeof (pmcs_xscsi_t), UM_SLEEP);
+
+ return (WALK_NEXT);
+}
+
+static int
+targets_walk_s(mdb_walk_state_t *wsp)
+{
+ int status;
+
+ if (target_idx == ss.max_dev) {
+ return (WALK_DONE);
+ }
+
+ if (mdb_vread(wsp->walk_data, sizeof (pmcs_xscsi_t),
+ wsp->walk_addr) == -1) {
+ mdb_warn("Failed to read target at %p", (void *)wsp->walk_addr);
+ return (WALK_DONE);
+ }
+
+ status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
+ wsp->walk_cbdata);
+
+ do {
+ wsp->walk_addr = (uintptr_t)(targets[++target_idx]);
+ } while ((wsp->walk_addr == NULL) && (target_idx < ss.max_dev));
+
+ if (target_idx == ss.max_dev) {
+ return (WALK_DONE);
+ }
+
+ return (status);
+}
+
+static void
+targets_walk_f(mdb_walk_state_t *wsp)
+{
+ mdb_free(wsp->walk_data, sizeof (pmcs_xscsi_t));
+}
+
+
+static pmcs_phy_t *
+pmcs_next_sibling(pmcs_phy_t *phyp)
+{
+ pmcs_phy_t parent;
+
+ /*
+ * First, if this is a root PHY, there are no more siblings
+ */
+ if (phyp->level == 0) {
+ return (NULL);
+ }
+
+ /*
+ * Otherwise, next sibling is the parent's sibling
+ */
+ while (phyp->level > 0) {
+ if (mdb_vread(&parent, sizeof (pmcs_phy_t),
+ (uintptr_t)phyp->parent) == -1) {
+ mdb_warn("pmcs_next_sibling: Failed to read PHY at %p",
+ (void *)phyp->parent);
+ return (NULL);
+ }
+
+ if (parent.sibling != NULL) {
+ break;
+ }
+
+ phyp = phyp->parent;
+ }
+
+ return (parent.sibling);
+}
+
+static int
+phy_walk_i(mdb_walk_state_t *wsp)
+{
+ if (wsp->walk_addr == NULL) {
+ mdb_warn("Can not perform global walk\n");
+ return (WALK_ERR);
+ }
+
+ /*
+ * Address provided belongs to HBA softstate. Get the targets pointer
+ * to begin the walk.
+ */
+ if (mdb_vread(&ss, sizeof (pmcs_hw_t), wsp->walk_addr) !=
+ sizeof (pmcs_hw_t)) {
+ mdb_warn("Unable to read HBA softstate\n");
+ return (WALK_ERR);
+ }
+
+ wsp->walk_addr = (uintptr_t)(ss.root_phys);
+ wsp->walk_data = mdb_alloc(sizeof (pmcs_phy_t), UM_SLEEP);
+
+ return (WALK_NEXT);
+}
+
+static int
+phy_walk_s(mdb_walk_state_t *wsp)
+{
+ pmcs_phy_t *phyp, *nphyp;
+ int status;
+
+ if (mdb_vread(wsp->walk_data, sizeof (pmcs_phy_t),
+ wsp->walk_addr) == -1) {
+ mdb_warn("phy_walk_s: Failed to read PHY at %p",
+ (void *)wsp->walk_addr);
+ return (WALK_DONE);
+ }
+
+ status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
+ wsp->walk_cbdata);
+
+ phyp = (pmcs_phy_t *)wsp->walk_data;
+ if (phyp->children) {
+ wsp->walk_addr = (uintptr_t)(phyp->children);
+ } else {
+ wsp->walk_addr = (uintptr_t)(phyp->sibling);
+ }
+
+ if (wsp->walk_addr == NULL) {
+ /*
+ * We reached the end of this sibling list. Trudge back up
+ * to the parent and find the next sibling after the expander
+ * we just finished traversing, if there is one.
+ */
+ nphyp = pmcs_next_sibling(phyp);
+
+ if (nphyp == NULL) {
+ return (WALK_DONE);
+ }
+
+ wsp->walk_addr = (uintptr_t)nphyp;
+ }
+
+ return (status);
+}
+
+static void
+phy_walk_f(mdb_walk_state_t *wsp)
+{
+ mdb_free(wsp->walk_data, sizeof (pmcs_phy_t));
+}
+
+static int
+pmcs_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct pmcs_hw ss;
+ uint_t verbose = FALSE;
+ uint_t phy_info = FALSE;
+ uint_t hw_info = FALSE;
+ uint_t target_info = FALSE;
+ uint_t work_info = FALSE;
+ uint_t ic_info = FALSE;
+ uint_t iport_info = FALSE;
+ uint_t waitqs_info = FALSE;
+ uint_t tracelog = FALSE;
+ uint_t ibq = FALSE;
+ uint_t obq = FALSE;
+ uint_t tgt_phy_count = FALSE;
+ int rv = DCMD_OK;
+ void *pmcs_state;
+ char *state_str;
+ struct dev_info dip;
+
+ if (!(flags & DCMD_ADDRSPEC)) {
+ pmcs_state = NULL;
+ if (mdb_readvar(&pmcs_state, "pmcs_softc_state") == -1) {
+ mdb_warn("can't read pmcs_softc_state");
+ return (DCMD_ERR);
+ }
+ if (mdb_pwalk_dcmd("genunix`softstate", "pmcs`pmcs", argc, argv,
+ (uintptr_t)pmcs_state) == -1) {
+ mdb_warn("mdb_pwalk_dcmd failed");
+ return (DCMD_ERR);
+ }
+ return (DCMD_OK);
+ }
+
+ if (mdb_getopts(argc, argv,
+ 'h', MDB_OPT_SETBITS, TRUE, &hw_info,
+ 'i', MDB_OPT_SETBITS, TRUE, &ic_info,
+ 'I', MDB_OPT_SETBITS, TRUE, &iport_info,
+ 'l', MDB_OPT_SETBITS, TRUE, &tracelog,
+ 'p', MDB_OPT_SETBITS, TRUE, &phy_info,
+ 'q', MDB_OPT_SETBITS, TRUE, &ibq,
+ 'Q', MDB_OPT_SETBITS, TRUE, &obq,
+ 't', MDB_OPT_SETBITS, TRUE, &target_info,
+ 'T', MDB_OPT_SETBITS, TRUE, &tgt_phy_count,
+ 'v', MDB_OPT_SETBITS, TRUE, &verbose,
+ 'w', MDB_OPT_SETBITS, TRUE, &work_info,
+ 'W', MDB_OPT_SETBITS, TRUE, &waitqs_info,
+ NULL) != argc)
+ return (DCMD_USAGE);
+
+ if (MDB_RD(&ss, sizeof (ss), addr) == -1) {
+ NOREAD(pmcs_hw_t, addr);
+ return (DCMD_ERR);
+ }
+
+ if (MDB_RD(&dip, sizeof (struct dev_info), ss.dip) == -1) {
+ NOREAD(pmcs_hw_t, addr);
+ return (DCMD_ERR);
+ }
+
+ /*
+ * Dumping the trace log is special. It's global, not per-HBA.
+ * Thus, a provided address is ignored. In addition, other options
+ * cannot be specified at the same time.
+ */
+
+ if (tracelog) {
+ if (hw_info || ic_info || iport_info || phy_info || work_info ||
+ target_info || waitqs_info || ibq || obq || tgt_phy_count) {
+ return (DCMD_USAGE);
+ }
+
+ if ((flags & DCMD_ADDRSPEC) && !(flags & DCMD_LOOP)) {
+ return (pmcs_dump_tracelog(B_TRUE, dip.devi_instance));
+ } else if (flags & DCMD_LOOPFIRST) {
+ return (pmcs_dump_tracelog(B_FALSE, 0));
+ } else {
+ return (DCMD_OK);
+ }
+ }
+
+ /* processing completed */
+
+ if (((flags & DCMD_ADDRSPEC) && !(flags & DCMD_LOOP)) ||
+ (flags & DCMD_LOOPFIRST) || phy_info || target_info || hw_info ||
+ work_info || waitqs_info || ibq || obq || tgt_phy_count) {
+ if ((flags & DCMD_LOOP) && !(flags & DCMD_LOOPFIRST))
+ mdb_printf("\n");
+ mdb_printf("%16s %9s %4s B C WorkFlags wserno DbgMsk %16s\n",
+ "Address", "State", "Inst", "DIP");
+ mdb_printf("================================="
+ "============================================\n");
+ }
+
+ switch (ss.state) {
+ case STATE_NIL:
+ state_str = "Invalid";
+ break;
+ case STATE_PROBING:
+ state_str = "Probing";
+ break;
+ case STATE_RUNNING:
+ state_str = "Running";
+ break;
+ case STATE_UNPROBING:
+ state_str = "Unprobing";
+ break;
+ case STATE_DEAD:
+ state_str = "Dead";
+ break;
+ }
+
+ mdb_printf("%16p %9s %4d %1d %1d 0x%08x 0x%04x 0x%04x %16p\n", addr,
+ state_str, dip.devi_instance, ss.blocked, ss.configuring,
+ ss.work_flags, ss.wserno, ss.debug_mask, ss.dip);
+ mdb_printf("\n");
+
+ mdb_inc_indent(4);
+
+ if (waitqs_info)
+ display_waitqs(ss, verbose);
+
+ if (hw_info)
+ display_hwinfo(ss, verbose);
+
+ if (phy_info || tgt_phy_count)
+ display_phys(ss, verbose, NULL, 0, tgt_phy_count);
+
+ if (target_info || tgt_phy_count)
+ display_targets(ss, verbose, tgt_phy_count);
+
+ if (work_info)
+ display_work(ss, verbose);
+
+ if (ic_info)
+ display_ic(ss, verbose);
+
+ if (ibq)
+ display_inbound_queues(ss, verbose);
+
+ if (obq)
+ display_outbound_queues(ss, verbose);
+
+ if (iport_info)
+ display_iport(ss, addr, verbose);
+
+ mdb_dec_indent(4);
+
+ return (rv);
+}
+
+void
+pmcs_help()
+{
+ mdb_printf("Prints summary information about each pmcs instance.\n"
+ " -h: Print more detailed hardware information\n"
+ " -i: Print interrupt coalescing information\n"
+ " -I: Print information about each iport\n"
+ " -l: Dump the trace log (cannot be used with other options)\n"
+ " -p: Print information about each attached PHY\n"
+ " -q: Dump inbound queues\n"
+ " -Q: Dump outbound queues\n"
+ " -t: Print information about each known target\n"
+ " -T: Print target and PHY count summary\n"
+ " -w: Dump work structures\n"
+ " -W: List pmcs cmds waiting on various queues\n"
+ " -v: Add verbosity to the above options\n");
+}
+
+static const mdb_dcmd_t dcmds[] = {
+ { "pmcs", "?[-hiIpQqtTwWv] | -l", "print pmcs information",
+ pmcs_dcmd, pmcs_help
+ },
+ { NULL }
+};
+
+static const mdb_walker_t walkers[] = {
+ { "pmcs_targets", "walk target structures",
+ targets_walk_i, targets_walk_s, targets_walk_f },
+ { "pmcs_phys", "walk PHY structures",
+ phy_walk_i, phy_walk_s, phy_walk_f },
+ { NULL }
+};
+
+static const mdb_modinfo_t modinfo = {
+ MDB_API_VERSION, dcmds, walkers
+};
+
+const mdb_modinfo_t *
+_mdb_init(void)
+{
+ return (&modinfo);
+}
diff --git a/usr/src/cmd/mdb/intel/amd64/pmcs/Makefile b/usr/src/cmd/mdb/intel/amd64/pmcs/Makefile
new file mode 100644
index 0000000000..e016f01a1d
--- /dev/null
+++ b/usr/src/cmd/mdb/intel/amd64/pmcs/Makefile
@@ -0,0 +1,38 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+MODULE = pmcs.so
+MDBTGT = kvm
+
+MODSRCS = pmcs.c
+
+include $(SRC)/cmd/Makefile.cmd
+include $(SRC)/cmd/Makefile.cmd.64
+include $(SRC)/cmd/mdb/intel/Makefile.amd64
+include $(SRC)/cmd/mdb/Makefile.module
+
+C99MODE = -xc99=%all
+
+CPPFLAGS += -I$(SRC)/uts/common
diff --git a/usr/src/cmd/mdb/intel/ia32/pmcs/Makefile b/usr/src/cmd/mdb/intel/ia32/pmcs/Makefile
new file mode 100644
index 0000000000..f68f0e6f9e
--- /dev/null
+++ b/usr/src/cmd/mdb/intel/ia32/pmcs/Makefile
@@ -0,0 +1,37 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+MODULE = pmcs.so
+MDBTGT = kvm
+
+MODSRCS = pmcs.c
+
+include $(SRC)/cmd/Makefile.cmd
+include $(SRC)/cmd/mdb/intel/Makefile.ia32
+include $(SRC)/cmd/mdb/Makefile.module
+
+C99MODE = -xc99=%all
+
+CPPFLAGS += -I$(SRC)/uts/common
diff --git a/usr/src/cmd/mdb/sparc/v9/pmcs/Makefile b/usr/src/cmd/mdb/sparc/v9/pmcs/Makefile
new file mode 100644
index 0000000000..27c8563bb2
--- /dev/null
+++ b/usr/src/cmd/mdb/sparc/v9/pmcs/Makefile
@@ -0,0 +1,38 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+MODULE = pmcs.so
+MDBTGT = kvm
+
+MODSRCS = pmcs.c
+
+include $(SRC)/cmd/Makefile.cmd
+include $(SRC)/cmd/Makefile.cmd.64
+include $(SRC)/cmd/mdb/sparc/Makefile.sparcv9
+include $(SRC)/cmd/mdb/Makefile.module
+
+C99MODE = -xc99=%all
+
+CPPFLAGS += -I$(SRC)/uts/common
diff --git a/usr/src/cmd/mpathadm/mpathadm.c b/usr/src/cmd/mpathadm/mpathadm.c
index c6f413fda0..468f772fd4 100644
--- a/usr/src/cmd/mpathadm/mpathadm.c
+++ b/usr/src/cmd/mpathadm/mpathadm.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -228,8 +228,7 @@ listMpathSupport(int operandLen, char *operand[])
MP_OID_LIST *pPluginOidList;
boolean_t shown = B_FALSE;
/* number of plugins listed */
- int i,
- op;
+ int i, op;
if ((mpstatus = MP_GetPluginOidList(&pPluginOidList))
!= MP_STATUS_SUCCESS) {
@@ -276,11 +275,12 @@ listMpathSupport(int operandLen, char *operand[])
TEXT_LB_MPATH_SUPPORT),
pluginProps.fileName);
} else {
- /* LINTED E_SEC_PRINTF_VAR_FMT */
- (void) fprintf(stderr,
- getTextString(
+ /* begin back-up indentation */
+ /* LINTED E_SEC_PRINTF_VAR_FMT */
+ (void) fprintf(stderr, getTextString(
ERR_CANT_FIND_MPATH_SUPPORT_WITH_NAME),
- operand[op]);
+ operand[op]);
+ /* end back-up indentation */
(void) printf("\n");
}
}
@@ -311,9 +311,7 @@ showMpathSupport(int operandLen, char *operand[])
MP_OID_LIST *deviceOidListArray;
MP_DEVICE_PRODUCT_PROPERTIES devProps;
boolean_t bListIt = B_FALSE;
- int op,
- i,
- j;
+ int op, i, j;
MP_LOAD_BALANCE_TYPE lb;
@@ -330,7 +328,7 @@ showMpathSupport(int operandLen, char *operand[])
}
for (op = 0; op < operandLen; op++) {
- bListIt = B_FALSE;
+ bListIt = B_FALSE;
for (i = 0; i < pPluginOidList->oidCount; i++) {
@@ -377,11 +375,11 @@ showMpathSupport(int operandLen, char *operand[])
getTextString(TEXT_LB_DEFAULT_LB));
/* don't ignore load balance type none. */
if (pluginProps.defaultloadBalanceType == 0) {
- (void) printf("%s",
- getTextString(TEXT_LBTYPE_NONE));
+ (void) printf("%s",
+ getTextString(TEXT_LBTYPE_NONE));
} else {
- displayLoadBalanceString(
- pluginProps.defaultloadBalanceType);
+ displayLoadBalanceString(
+ pluginProps.defaultloadBalanceType);
}
(void) printf("\n");
@@ -391,19 +389,20 @@ showMpathSupport(int operandLen, char *operand[])
/* check each bit, display string if found set */
if (pluginProps.supportedLoadBalanceTypes == 0) {
(void) printf("\t\t%s\n",
- getTextString(TEXT_LBTYPE_NONE));
+ getTextString(TEXT_LBTYPE_NONE));
} else {
- lb = 1;
- do {
- if (0 != (lb &
- pluginProps.supportedLoadBalanceTypes)) {
- (void) printf("\t\t");
- displayLoadBalanceString(lb &
- pluginProps.supportedLoadBalanceTypes);
- (void) printf("\n");
- }
- lb = lb<<1;
- } while (lb < 0x80000000);
+ lb = 1;
+ do {
+ if (0 != (lb & pluginProps.
+ supportedLoadBalanceTypes)) {
+ (void) printf("\t\t");
+ displayLoadBalanceString(lb &
+ pluginProps.
+ supportedLoadBalanceTypes);
+ (void) printf("\n");
+ }
+ lb = lb<<1;
+ } while (lb < 0x80000000);
}
(void) printf("\t%s %s\n",
@@ -493,9 +492,10 @@ showMpathSupport(int operandLen, char *operand[])
for (j = 0; j < deviceOidListArray->oidCount;
j++) {
- (void) memset(&devProps, 0,
- sizeof (MP_DEVICE_PRODUCT_PROPERTIES));
-
+ /* begin backup indentation */
+ (void) memset(&devProps, 0,
+ sizeof (MP_DEVICE_PRODUCT_PROPERTIES));
+ /* end backup indentation */
if ((mpstatus =
MP_GetDeviceProductProperties(\
deviceOidListArray->oids[j],
@@ -520,24 +520,24 @@ showMpathSupport(int operandLen, char *operand[])
(void) printf("\n\t\t%s\n",
getTextString(
TEXT_LB_SUPPORTED_LB));
- if (devProps.supportedLoadBalanceTypes == 0) {
- (void) printf("\t\t\t%s\n",
- getTextString(TEXT_LBTYPE_NONE));
- } else {
- lb = 1;
- do {
+ /* begin back-up indentation */
+ if (devProps.supportedLoadBalanceTypes == 0) {
+ (void) printf("\t\t\t%s\n",
+ getTextString(TEXT_LBTYPE_NONE));
+ } else {
+ lb = 1;
+ do {
if (0 != (lb &
devProps.supportedLoadBalanceTypes)) {
(void) printf("\t\t\t");
displayLoadBalanceString(lb &
- devProps.supportedLoadBalanceTypes);
+ devProps.supportedLoadBalanceTypes);
(void) printf("\n");
}
lb = lb<<1;
- } while (lb < 0x80000000);
- }
-
-
+ } while (lb < 0x80000000);
+ }
+ /* end back-up indentation */
(void) printf("\n");
} else {
@@ -582,18 +582,14 @@ showMpathSupport(int operandLen, char *operand[])
int
modifyMpathSupport(int operandLen, char *operand[], cmdOptions_t *options)
{
- MP_STATUS mpstatus = MP_STATUS_SUCCESS;
- MP_PLUGIN_PROPERTIES pluginProps;
- MP_OID_LIST *pPluginOidList;
- boolean_t bFoundIt = B_FALSE;
- MP_OID pluginOid;
- cmdOptions_t *optionList = options;
- char *cmdStr =
- getTextString(
- TEXT_UNKNOWN);
- int op,
- i,
- lbValue;
+ MP_STATUS mpstatus = MP_STATUS_SUCCESS;
+ MP_PLUGIN_PROPERTIES pluginProps;
+ MP_OID_LIST *pPluginOidList;
+ boolean_t bFoundIt = B_FALSE;
+ MP_OID pluginOid;
+ cmdOptions_t *optionList = options;
+ char *cmdStr = getTextString(TEXT_UNKNOWN);
+ int op, i, lbValue;
if ((mpstatus = MP_GetPluginOidList(&pPluginOidList))
!= MP_STATUS_SUCCESS) {
@@ -645,8 +641,8 @@ modifyMpathSupport(int operandLen, char *operand[], cmdOptions_t *options)
mpstatus =
MP_EnableAutoFailback(pluginOid);
} else if (0 ==
- strcasecmp(optionList->optarg,
- getTextString(TEXT_OFF))) {
+ strcasecmp(optionList->optarg,
+ getTextString(TEXT_OFF))) {
mpstatus =
MP_DisableAutoFailback(pluginOid);
} else {
@@ -667,8 +663,8 @@ modifyMpathSupport(int operandLen, char *operand[], cmdOptions_t *options)
mpstatus =
MP_EnableAutoProbing(pluginOid);
} else if (0 ==
- strcasecmp(optionList->optarg,
- getTextString(TEXT_OFF))) {
+ strcasecmp(optionList->optarg,
+ getTextString(TEXT_OFF))) {
mpstatus =
MP_DisableAutoProbing(pluginOid);
} else {
@@ -738,29 +734,18 @@ modifyMpathSupport(int operandLen, char *operand[], cmdOptions_t *options)
int
listLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
{
- MP_STATUS mpstatus = MP_STATUS_SUCCESS;
- MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES luProps;
- MP_PLUGIN_PROPERTIES pluginProps;
- MP_TARGET_PORT_PROPERTIES tportProps;
- MP_OID_LIST *pPluginOidList,
- *pLogicalUnitOidList,
- *pTpgOidListArray,
- *pTportOidListArray;
- boolean_t bListIt = B_FALSE,
- bFoundOperand = B_FALSE,
- *bFoundOption,
- bContinue = B_FALSE;
- MP_OID luOid;
- cmdOptions_t *optionList = options;
- int opListCount = 0,
- i = 0,
- lu = 0,
- tpg = 0,
- opoffset = 0,
- j = 0,
- opStart = 0,
- opEnd = 0,
- opIndex;
+ MP_STATUS mpstatus = MP_STATUS_SUCCESS;
+ MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES luProps;
+ MP_PLUGIN_PROPERTIES pluginProps;
+ MP_TARGET_PORT_PROPERTIES tportProps;
+ MP_OID_LIST *pPluginOidList, *pLogicalUnitOidList,
+ *pTpgOidListArray, *pTportOidListArray;
+ boolean_t bListIt = B_FALSE, bFoundOperand = B_FALSE,
+ *bFoundOption, bContinue = B_FALSE;
+ MP_OID luOid;
+ cmdOptions_t *optionList = options;
+ int opListCount = 0, i = 0, lu = 0, tpg = 0, opoffset = 0, j = 0,
+ opStart = 0, opEnd = 0, opIndex;
/* count number of options */
for (; optionList->optval; optionList++) {
@@ -819,9 +804,11 @@ listLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
}
for (lu = 0; lu < pLogicalUnitOidList->oidCount; lu++) {
- /* get lu properties so we can check the name */
- (void) memset(&luProps, 0,
- sizeof (MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES));
+ /* begin backup indentation */
+ /* get lu properties so we can check the name */
+ (void) memset(&luProps, 0,
+ sizeof (MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES));
+ /* end backup indentation */
mpstatus =
MP_GetMPLogicalUnitProperties(
pLogicalUnitOidList->oids[lu],
@@ -903,9 +890,11 @@ listLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
(lu < pLogicalUnitOidList->oidCount);
lu++) {
bListIt = B_FALSE;
- /* get lu props & check the name */
- (void) memset(&luProps, 0,
- sizeof (MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES));
+ /* begin backup indentation */
+ /* get lu props & check the name */
+ (void) memset(&luProps, 0,
+ sizeof (MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES));
+ /* end backup indentation */
mpstatus =
MP_GetMPLogicalUnitProperties(
pLogicalUnitOidList->oids[lu],
@@ -968,7 +957,7 @@ switch (optionList->optval) {
(B_FALSE == bListIt); tpg++) {
mpstatus =
MP_GetTargetPortOidList(pTpgOidListArray->oids[tpg],
- &pTportOidListArray);
+ &pTportOidListArray);
if (mpstatus != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s\n",
cmdName,
@@ -1148,8 +1137,7 @@ listIndividualLogicalUnit(MP_OID luOid,
MP_PATH_LOGICAL_UNIT_PROPERTIES pathProps;
MP_OID_LIST *pPathOidListArray;
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
- int numOperationalPaths,
- pa;
+ int numOperationalPaths, pa;
(void) printf("\t");
displayArray(luProps.deviceFileName, sizeof (luProps.deviceFileName));
@@ -1161,7 +1149,7 @@ listIndividualLogicalUnit(MP_OID luOid,
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) fprintf(stderr,
getTextString(ERR_NO_LU_PATH_INFO_WITH_MISSING_LU_STR),
- getStringArray(luProps.deviceFileName,
+ getStringArray(luProps.deviceFileName,
sizeof (luProps.deviceFileName)));
(void) fprintf(stderr, "\n");
return (mpstatus);
@@ -1213,8 +1201,7 @@ showLogicalUnit(int operandLen, char *operand[])
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES luProps;
MP_PLUGIN_PROPERTIES pluginProps;
- MP_OID luOid,
- pluginOid;
+ MP_OID luOid, pluginOid;
int op;
@@ -1292,21 +1279,18 @@ showIndividualLogicalUnit(MP_OID luOid,
MP_TARGET_PORT_GROUP_PROPERTIES tpgProps;
MP_TARGET_PORT_PROPERTIES tportProps;
MP_INITIATOR_PORT_PROPERTIES initProps;
- MP_OID_LIST *pPathOidListArray,
- *pTPGOidListArray,
- *pTportOidListArray;
+ MP_OID_LIST *pPathOidListArray, *pTPGOidListArray,
+ *pTportOidListArray;
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
boolean_t showTportLabel = B_TRUE;
- int pa,
- tpg,
- tport;
+ int pa, tpg, tport;
(void) printf("%s ", getTextString(TEXT_LB_LOGICAL_UNIT));
displayArray(luProps.deviceFileName, sizeof (luProps.deviceFileName));
(void) printf("\n");
(void) printf("\t%s %s\n", getTextString(TEXT_LB_MPATH_SUPPORT),
- pluginProps.fileName);
+ pluginProps.fileName);
(void) printf("\t%s ", getTextString(TEXT_LB_VENDOR));
displayArray(luProps.vendor,
@@ -1467,14 +1451,16 @@ showIndividualLogicalUnit(MP_OID luOid,
getAccessStateStr(
tpgProps.accessState));
/* display label for each tpg. */
- (void) printf("\t\t%s\n",
- getTextString(TEXT_TPORT_LIST));
+ (void) printf("\t\t%s\n",
+ getTextString(TEXT_TPORT_LIST));
} else {
/* display label once for symmetric. */
if (B_TRUE == showTportLabel) {
- (void) printf("\t%s\n",
- getTextString(TEXT_TPORT_LIST));
- showTportLabel = B_FALSE;
+ /* begin back-up indentation */
+ (void) printf("\t%s\n",
+ getTextString(TEXT_TPORT_LIST));
+ showTportLabel = B_FALSE;
+ /* end back-up indentation */
}
}
@@ -1496,28 +1482,28 @@ showIndividualLogicalUnit(MP_OID luOid,
MP_GetTargetPortProperties(pTportOidListArray->oids[tport],
&tportProps)) != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s",
- cmdName, getTextString(ERR_NO_PROPERTIES));
+ cmdName, getTextString(ERR_NO_PROPERTIES));
} else {
if (MP_TRUE == luProps.asymmetric) {
(void) printf("\t\t\t%s ",
- getTextString(TEXT_LB_NAME));
+ getTextString(TEXT_LB_NAME));
displayArray(tportProps.portID,
- sizeof (tportProps.portID));
+ sizeof (tportProps.portID));
(void) printf("\n\t\t\t%s %d\n",
- getTextString(TEXT_LB_RELATIVE_ID),
- tportProps.relativePortID);
+ getTextString(TEXT_LB_RELATIVE_ID),
+ tportProps.relativePortID);
} else {
(void) printf("\t\t%s ",
- getTextString(TEXT_LB_NAME));
+ getTextString(TEXT_LB_NAME));
displayArray(tportProps.portID,
- sizeof (tportProps.portID));
+ sizeof (tportProps.portID));
(void) printf("\n\t\t%s %d\n",
- getTextString(TEXT_LB_RELATIVE_ID),
- tportProps.relativePortID);
+ getTextString(TEXT_LB_RELATIVE_ID),
+ tportProps.relativePortID);
}
/* insert blank line if not the last target port. */
if (!(tport == (pTportOidListArray->oidCount - 1))) {
- (void) printf("\n");
+ (void) printf("\n");
}
}
} /* for each target port */
@@ -1551,9 +1537,7 @@ modifyLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
MP_OID luOid;
cmdOptions_t *optionList = options;
- char *cmdStr =
- getTextString(
- TEXT_UNKNOWN);
+ char *cmdStr = getTextString(TEXT_UNKNOWN);
int op;
for (op = 0; op < operandLen; op++) {
@@ -1580,14 +1564,15 @@ modifyLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
mpstatus =
MP_DisableAutoFailback(luOid);
} else {
- /* LINTED E_SEC_PRINTF_VAR_FMT */
- (void) fprintf(stderr, getTextString(
+ /* begin back-up indentation */
+ /* LINTED E_SEC_PRINTF_VAR_FMT */
+ (void) fprintf(stderr, getTextString(
ERR_FAILED_TO_CHANGE_OPTION_WITH_REASON),
- cmdStr,
- getTextString(
- TEXT_ILLEGAL_ARGUMENT));
- (void) printf("\n");
- return (ERROR_CLI_FAILED);
+ cmdStr, getTextString(
+ TEXT_ILLEGAL_ARGUMENT));
+ (void) printf("\n");
+ return (ERROR_CLI_FAILED);
+ /* start back-up indentation */
}
break;
case 'p':
@@ -1598,18 +1583,19 @@ modifyLogicalUnit(int operandLen, char *operand[], cmdOptions_t *options)
mpstatus =
MP_EnableAutoProbing(luOid);
} else if (0 == strcasecmp(optionList->optarg,
- getTextString(TEXT_OFF))) {
+ getTextString(TEXT_OFF))) {
mpstatus =
MP_DisableAutoProbing(luOid);
} else {
- /* LINTED E_SEC_PRINTF_VAR_FMT */
- (void) fprintf(stderr, getTextString(
+ /* begin back-up indentation */
+ /* LINTED E_SEC_PRINTF_VAR_FMT */
+ (void) fprintf(stderr, getTextString(
ERR_FAILED_TO_CHANGE_OPTION_WITH_REASON),
- cmdStr,
- getTextString(
- TEXT_ILLEGAL_ARGUMENT));
- (void) printf("\n");
- return (ERROR_CLI_FAILED);
+ cmdStr, getTextString(
+ TEXT_ILLEGAL_ARGUMENT));
+ (void) printf("\n");
+ return (ERROR_CLI_FAILED);
+ /* end back-up indentation */
}
break;
case 'b':
@@ -1689,7 +1675,7 @@ failoverLogicalUnit(char *operand[])
MP_GetAssociatedTPGOidList(luOid, &pTpgOidListArray);
if (mpstatus != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s\n",
- cmdName, getTextString(ERR_NO_ASSOC_TPGS));
+ cmdName, getTextString(ERR_NO_ASSOC_TPGS));
return (mpstatus);
}
@@ -1709,7 +1695,7 @@ failoverLogicalUnit(char *operand[])
}
if (MP_FALSE == tpgProps.explicitFailover) {
(void) fprintf(stderr, "%s: %s\n",
- cmdName, getTextString(ERR_NO_FAILOVER_ALLOWED));
+ cmdName, getTextString(ERR_NO_FAILOVER_ALLOWED));
return (ERROR_CLI_FAILED);
}
@@ -1726,12 +1712,14 @@ failoverLogicalUnit(char *operand[])
mpstatus =
MP_SetTPGAccess(luOid, 1, &tpgStatePair);
if (MP_STATUS_SUCCESS != mpstatus) {
- /* LINTED E_SEC_PRINTF_VAR_FMT */
- (void) fprintf(stderr, getTextString(
+ /* begin back-up indentation */
+ /* LINTED E_SEC_PRINTF_VAR_FMT */
+ (void) fprintf(stderr, getTextString(
ERR_FAILED_TO_FAILOVER_WITH_REASON),
getMpStatusStr(mpstatus));
- (void) printf("\n");
- return (mpstatus);
+ (void) printf("\n");
+ return (mpstatus);
+ /* end back-up indentation */
}
}
@@ -1765,15 +1753,13 @@ getLogicalUnitOid(MP_CHAR *luFileName, MP_OID *pluOid)
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
MP_MULTIPATH_LOGICAL_UNIT_PROPERTIES luProps;
MP_PLUGIN_PROPERTIES pluginProps;
- MP_OID_LIST *pPluginOidList,
- *pLogicalUnitOidList;
+ MP_OID_LIST *pPluginOidList, *pLogicalUnitOidList;
boolean_t foundIt = B_FALSE;
- int i,
- lu;
+ int i, lu;
int fd1, fd2;
- ddi_devid_t devid1 = NULL, devid2 = NULL;
+ ddi_devid_t devid1, devid2;
if (NULL == pluOid) {
/* print some kind of error msg here - should never happen */
@@ -1788,14 +1774,14 @@ getLogicalUnitOid(MP_CHAR *luFileName, MP_OID *pluOid)
pluOid->ownerId = 0;
if ((mpstatus = MP_GetPluginOidList(&pPluginOidList))
- != MP_STATUS_SUCCESS) {
+ != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s\n", cmdName,
getTextString(ERR_NO_MPATH_SUPPORT_LIST));
return (B_FALSE);
}
if ((NULL == pPluginOidList) || (pPluginOidList->oidCount < 1)) {
(void) fprintf(stderr, "%s: %s\n", cmdName,
- getTextString(ERR_NO_MPATH_SUPPORT_LIST));
+ getTextString(ERR_NO_MPATH_SUPPORT_LIST));
return (ERROR_CLI_FAILED);
}
for (i = 0; i < pPluginOidList->oidCount; i++) {
@@ -1805,9 +1791,9 @@ getLogicalUnitOid(MP_CHAR *luFileName, MP_OID *pluOid)
if ((mpstatus =
MP_GetPluginProperties(pPluginOidList->oids[i],
&pluginProps)) != MP_STATUS_SUCCESS) {
- (void) fprintf(stderr, "%s: %s\n",
- cmdName, getTextString(ERR_NO_PROPERTIES));
- return (B_FALSE);
+ (void) fprintf(stderr, "%s: %s\n",
+ cmdName, getTextString(ERR_NO_PROPERTIES));
+ return (B_FALSE);
}
/* attempt to find this logical unit */
@@ -1834,7 +1820,8 @@ getLogicalUnitOid(MP_CHAR *luFileName, MP_OID *pluOid)
return (B_FALSE);
}
- if (0 == strcmp(luFileName, luProps.deviceFileName)) {
+ if (compareLUName(luFileName, luProps.deviceFileName)
+ == B_TRUE) {
foundIt = B_TRUE;
} else {
/* user input didn't match, try via devid */
@@ -1845,10 +1832,11 @@ getLogicalUnitOid(MP_CHAR *luFileName, MP_OID *pluOid)
*/
fd1 = fd2 = -1;
+ devid1 = devid2 = NULL;
if (((fd1 = open(luFileName,
- O_RDONLY|O_NDELAY)) >= 0) &&
+ O_RDONLY|O_NDELAY)) >= 0) &&
((fd2 = open(luProps.deviceFileName,
- O_RDONLY|O_NDELAY)) >= 0) &&
+ O_RDONLY|O_NDELAY)) >= 0) &&
(devid_get(fd1, &devid1) == 0) &&
(devid_get(fd2, &devid2) == 0) &&
((NULL != devid1) && (NULL != devid2))) {
@@ -1905,14 +1893,11 @@ listInitiatorPort(int operandLen, char *operand[])
{
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
MP_INITIATOR_PORT_PROPERTIES initProps;
- MP_OID_LIST *pPluginOidList,
- *pInitOidList;
+ MP_OID_LIST *pPluginOidList, *pInitOidList;
boolean_t bListIt = B_FALSE;
boolean_t *foundOp;
- int ol,
- i,
- iport;
+ int ol, i, iport;
foundOp = malloc((sizeof (boolean_t)) * operandLen);
if (NULL == foundOp) {
@@ -1926,7 +1911,7 @@ listInitiatorPort(int operandLen, char *operand[])
}
if ((mpstatus = MP_GetPluginOidList(&pPluginOidList))
- != MP_STATUS_SUCCESS) {
+ != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s\n", cmdName,
getTextString(ERR_NO_MPATH_SUPPORT_LIST));
return (mpstatus);
@@ -1948,7 +1933,7 @@ listInitiatorPort(int operandLen, char *operand[])
getMpStatusStr(mpstatus));
(void) printf("\n");
} else if ((NULL == pInitOidList) ||
- (pInitOidList->oidCount < 1)) {
+ (pInitOidList->oidCount < 1)) {
(void) fprintf(stderr, "%s: %s\n", cmdName,
getTextString(ERR_NO_INIT_PORTS));
} else {
@@ -2032,7 +2017,7 @@ listIndividualInitiatorPort(MP_INITIATOR_PORT_PROPERTIES initProps)
(void) printf("%s ", getTextString(TEXT_LB_INITATOR_PORT));
displayArray(initProps.portID,
- sizeof (initProps.portID));
+ sizeof (initProps.portID));
(void) printf("\n");
return (mpstatus);
@@ -2056,16 +2041,12 @@ showInitiatorPort(int operandLen, char *operand[])
{
MP_STATUS mpstatus = MP_STATUS_SUCCESS;
MP_INITIATOR_PORT_PROPERTIES initProps;
- MP_OID_LIST *pPluginOidList,
- *pInitOidList;
- boolean_t bListIt = B_FALSE,
- bFoundIt = B_FALSE;
- int op,
- i,
- iport;
+ MP_OID_LIST *pPluginOidList, *pInitOidList;
+ boolean_t bListIt = B_FALSE, bFoundIt = B_FALSE;
+ int op, i, iport;
if ((mpstatus = MP_GetPluginOidList(&pPluginOidList))
- != MP_STATUS_SUCCESS) {
+ != MP_STATUS_SUCCESS) {
(void) fprintf(stderr, "%s: %s\n", cmdName,
getTextString(ERR_NO_MPATH_SUPPORT_LIST));
return (mpstatus);
@@ -2107,9 +2088,11 @@ showInitiatorPort(int operandLen, char *operand[])
pInitOidList->oids[iport],
&initProps))
!= MP_STATUS_SUCCESS) {
- (void) fprintf(stderr,
- "%s: %s\n", cmdName,
+ /* begin back-up indentation */
+ (void) fprintf(stderr,
+ "%s: %s\n", cmdName,
getTextString(ERR_NO_PROPERTIES));
+ /* end back-up indentation */
} else {
if (0 == strcmp(operand[op],
initProps.portID)) {
@@ -2120,8 +2103,8 @@ showInitiatorPort(int operandLen, char *operand[])
if (B_TRUE == bListIt) {
mpstatus =
- showIndividualInitiatorPort(
- initProps);
+ showIndividualInitiatorPort(
+ initProps);
if (0 != mpstatus) {
return (mpstatus);
}
@@ -2199,9 +2182,7 @@ enablePath(cmdOptions_t *options)
MP_OID pathOid;
cmdOptions_t *optionList = options;
- boolean_t bHaveInit = B_FALSE,
- bHaveTarg = B_FALSE,
- bHaveLu = B_FALSE;
+ boolean_t bHaveInit = B_FALSE, bHaveTarg = B_FALSE, bHaveLu = B_FALSE;
for (; optionList->optval; optionList++) {
switch (optionList->optval) {
@@ -2284,9 +2265,8 @@ disablePath(cmdOptions_t *options)
MP_OID pathOid;
cmdOptions_t *optionList = options;
- boolean_t bHaveInit = B_FALSE,
- bHaveTarg = B_FALSE,
- bHaveLu = B_FALSE;
+ boolean_t bHaveInit = B_FALSE, bHaveTarg = B_FALSE,
+ bHaveLu = B_FALSE;
for (; optionList->optval; optionList++) {
switch (optionList->optval) {
@@ -2471,24 +2451,18 @@ getPathOid(cmdOptions_t *options, MP_OID *pPathOid)
MP_INITIATOR_PORT_PROPERTIES initProps;
MP_TARGET_PORT_PROPERTIES targProps;
- MP_OID_LIST *pPluginOidList,
- *pLogicalUnitOidList,
- *pathOidListArray;
+ MP_OID_LIST *pPluginOidList, *pLogicalUnitOidList,
+ *pathOidListArray;
boolean_t bFoundIt = B_FALSE;
MP_CHAR initPortID[256];
MP_CHAR targetPortID[256];
MP_CHAR luDeviceFileName[256];
- boolean_t bHaveTarg = B_FALSE,
- bHaveLu = B_FALSE,
- bHaveInit = B_FALSE;
-
-
+ boolean_t bHaveTarg = B_FALSE, bHaveLu = B_FALSE,
+ bHaveInit = B_FALSE;
cmdOptions_t *optionList = options;
- int i,
- lu,
- pa;
+ int i, lu, pa;
if (NULL == pPathOid) {
return (B_FALSE);
}
@@ -2550,7 +2524,7 @@ getPathOid(cmdOptions_t *options, MP_OID *pPathOid)
}
for (lu = 0; (lu < pLogicalUnitOidList->oidCount) &&
- (B_FALSE == bFoundIt); lu++) {
+ (B_FALSE == bFoundIt); lu++) {
/* get lu properties so we can check the name */
(void) memset(&luProps, 0,
@@ -2563,8 +2537,8 @@ getPathOid(cmdOptions_t *options, MP_OID *pPathOid)
cmdName, getTextString(ERR_NO_PROPERTIES));
return (B_FALSE);
}
- if (0 == strcmp(luDeviceFileName,
- luProps.deviceFileName)) {
+ if (compareLUName(luDeviceFileName,
+ luProps.deviceFileName) == B_TRUE) {
/* get paths for this LU and search from here */
mpstatus =
MP_GetAssociatedPathOidList(
@@ -2580,8 +2554,8 @@ getPathOid(cmdOptions_t *options, MP_OID *pPathOid)
}
for (pa = 0;
- (pa < pathOidListArray->oidCount) &&
- (B_FALSE == bFoundIt); pa++) {
+ (pa < pathOidListArray->oidCount) &&
+ (B_FALSE == bFoundIt); pa++) {
mpstatus =
MP_GetPathLogicalUnitProperties
(pathOidListArray->oids[pa],
@@ -2767,7 +2741,7 @@ displayLogicalUnitNameTypeString(MP_LOGICAL_UNIT_NAME_TYPE typeVal)
break;
case MP_LU_NAME_TYPE_DEVICE_SPECIFIC:
typeString =
- getTextString(TEXT_NAME_TYPE_DEVICE_SPECIFIC);
+ getTextString(TEXT_NAME_TYPE_DEVICE_SPECIFIC);
break;
default:
typeString = getTextString(TEXT_UNKNOWN);
@@ -3203,7 +3177,7 @@ listFunc(int operandLen, char *operand[], int object, cmdOptions_t *options,
break;
default:
(void) fprintf(stderr, "%s: %s\n",
- cmdName, getTextString(TEXT_UNKNOWN_OBJECT));
+ cmdName, getTextString(TEXT_UNKNOWN_OBJECT));
ret = 1;
break;
}
diff --git a/usr/src/cmd/prtconf/pdevinfo.c b/usr/src/cmd/prtconf/pdevinfo.c
index 190ed0d0b7..fccc4ff2e7 100644
--- a/usr/src/cmd/prtconf/pdevinfo.c
+++ b/usr/src/cmd/prtconf/pdevinfo.c
@@ -770,7 +770,6 @@ dump_devs(di_node_t node, void *arg)
(void) printf(" (retired)");
} else if (di_state(node) & DI_DRIVER_DETACHED)
(void) printf(" (driver not attached)");
-
(void) printf("\n");
if (opts.o_verbose) {
@@ -1030,6 +1029,13 @@ dump_pathing_data(int ilev, di_node_t node)
return;
while ((pi = di_path_client_next_path(node, pi)) != DI_PATH_NIL) {
+
+ /* It is not really a path if we failed to capture the pHCI */
+ phci_node = di_path_phci_node(pi);
+ if (phci_node == DI_NODE_NIL)
+ continue;
+
+ /* Print header for the first path */
if (firsttime) {
indent_to_level(ilev);
firsttime = 0;
@@ -1042,10 +1048,9 @@ dump_pathing_data(int ilev, di_node_t node)
* the same as the /devices devifo path had the device been
* enumerated under pHCI.
*/
- phci_node = di_path_phci_node(pi);
phci_path = di_devfs_path(phci_node);
- path_instance = di_path_instance(pi);
if (phci_path) {
+ path_instance = di_path_instance(pi);
if (path_instance > 0) {
indent_to_level(ilev);
(void) printf("Path %d: %s/%s@%s\n",
@@ -1060,6 +1065,7 @@ dump_pathing_data(int ilev, di_node_t node)
indent_to_level(ilev);
(void) printf("%s#%d (%s)\n", di_driver_name(phci_node),
di_instance(phci_node), path_state_name(di_path_state(pi)));
+
(void) dump_prop_list(&pathprop_dumpops, NULL, ilev + 1,
pi, DDI_DEV_T_ANY, NULL);
}
diff --git a/usr/src/cmd/stmsboot/stmsboot.sh b/usr/src/cmd/stmsboot/stmsboot.sh
index 08caf6e749..504be16e2e 100644
--- a/usr/src/cmd/stmsboot/stmsboot.sh
+++ b/usr/src/cmd/stmsboot/stmsboot.sh
@@ -39,7 +39,7 @@ SAVEDIR=/etc/mpxio
BOOTDEVICES=$SAVEDIR/boot-devices
RECOVERFILE=$SAVEDIR/recover_instructions
SVCCFG_RECOVERY=$SAVEDIR/svccfg_recover
-SUPPORTED_DRIVERS="fp|mpt|mpt_sas"
+SUPPORTED_DRIVERS="fp|mpt|mpt_sas|pmcs"
USAGE=`gettext "Usage: stmsboot [-D $SUPPORTED_DRIVERS] -e | -d | -u | -L | -l controller_number"`
TEXTDOMAIN=SUNW_OST_OSCMD
export TEXTDOMAIN
@@ -290,7 +290,7 @@ update_sysfiles()
# Returns 0 on success, 1 on failure.
#
# Args: $cmd = {enable | disable}
-# $d = {fp | mpt | mpt_sas}
+# $d = {fp | mpt | mpt_sas | pmcs}
#
# the global variable $DRVLIST is used
#
@@ -417,7 +417,7 @@ get_newbootpath_for_stmsdev() {
# Emit a warning message to the user that by default we
# operate on all multipath-capable controllers that are
# attached to the system, and that if they want to operate
-# on only a specific controller type (fp|mpt|mpt_sas|....) then
+# on only a specific controller type (fp|mpt|mpt_sas|pmcs|....) then
# they need to re-invoke stmsboot with "-D $driver" in
# their argument list
#
@@ -437,7 +437,7 @@ emit_driver_warning_msg() {
echo ""
gettext "If you do NOT wish to operate on these controllers, please quit stmsboot\n"
- gettext "and re-invoke with -D { fp | mpt | mpt_sas} to specify which controllers you wish\n"
+ gettext "and re-invoke with -D { fp | mpt | mpt_sas | pmcs} to specify which controllers you wish\n"
gettext "to modify your multipathing configuration for.\n"
echo ""
@@ -480,7 +480,7 @@ if [ "x$cmd" = xnone ]; then
fi
if [ "x$DRV" = "x" ]; then
- DRVLIST="fp mpt mpt_sas"
+ DRVLIST="fp mpt mpt_sas pmcs"
else
DRVLIST=$DRV
fi
diff --git a/usr/src/cmd/stmsboot/stmsboot_util.c b/usr/src/cmd/stmsboot/stmsboot_util.c
index 4abfcce035..fc3231c161 100644
--- a/usr/src/cmd/stmsboot/stmsboot_util.c
+++ b/usr/src/cmd/stmsboot/stmsboot_util.c
@@ -143,6 +143,8 @@ main(int argc, char **argv)
devinfo_root));
print_mpx_capable(di_drv_first_node("mpt_sas",
devinfo_root));
+ print_mpx_capable(di_drv_first_node("pmcs",
+ devinfo_root));
}
di_fini(devinfo_root);
return (0);
@@ -401,7 +403,8 @@ parse_args(int argc, char *argv[])
/* update this if adding support for a new driver */
if ((strncmp(drvlimit, "fp", 2) == NULL) &&
(strncmp(drvlimit, "mpt", 3) == NULL) &&
- (strncmp(drvlimit, "mpt_sas", 7) == NULL)) {
+ (strncmp(drvlimit, "mpt_sas", 7) == NULL) &&
+ (strncmp(drvlimit, "pmcs", 4) == NULL)) {
logmsg(MSG_ERROR,
gettext("invalid parent driver (%s) "
"specified"), drvlimit);
diff --git a/usr/src/common/devid/devid_scsi.c b/usr/src/common/devid/devid_scsi.c
index 45aaacbf0d..6b2574a8db 100644
--- a/usr/src/common/devid/devid_scsi.c
+++ b/usr/src/common/devid/devid_scsi.c
@@ -41,6 +41,9 @@
#include <sys/dditypes.h>
#include <sys/ddi_impldefs.h>
#include <sys/scsi/scsi.h>
+#ifndef _KERNEL
+#include <sys/libdevid.h>
+#endif /* !_KERNEL */
#include "devid_impl.h"
#define SCSI_INQUIRY_VID_POS 9
@@ -1244,8 +1247,7 @@ scsi_wwnstr_to_wwn(const char *wwnstr, uint64_t *wwnp)
return (DDI_FAILURE);
/* Skip leading 'w' if wwnstr is in unit-address form */
- if (*wwnstr == 'w')
- wwnstr++;
+ wwnstr = scsi_wwnstr_skip_ua_prefix(wwnstr);
if (strlen(wwnstr) != 16)
return (DDI_FAILURE);
@@ -1328,6 +1330,23 @@ scsi_wwnstr_hexcase(char *wwnstr, int upper_case_hex)
}
/*
+ * Function: scsi_wwnstr_skip_ua_prefix
+ *
+ * Description: This routine removes the leading 'w' in wwnstr,
+ * if its in unit-address form.
+ *
+ * Arguments: wwnstr - the string wwn to be transformed
+ *
+ */
+const char *
+scsi_wwnstr_skip_ua_prefix(const char *wwnstr)
+{
+ if (*wwnstr == 'w')
+ wwnstr++;
+ return (wwnstr);
+}
+
+/*
* Function: scsi_wwnstr_free
*
* Description: This routine frees a wwnstr returned by a call
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_ctl.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_ctl.c
index c9ce0c6e4f..f81b54a804 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_ctl.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_ctl.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "cfga_scsi.h"
struct larg {
@@ -299,44 +297,54 @@ dev_change_state(
break;
}
- /* When unconfiguring a device, first offline it through RCM. */
- if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
- if (cmd == SCFGA_DEV_UNCONFIGURE) {
- dev_list[0] = get_node_path(apidp->path);
- if (dev_list[0] == NULL) {
- ret = SCFGA_ERR;
- break;
- }
- if ((ret = scsi_rcm_offline(dev_list,
- errstring, flags)) != SCFGA_OK) {
- break;
+ if (apidp->dyntype == PATH_APID) {
+ /* call a scsi_vhci ioctl to do online/offline path. */
+ ret = path_apid_state_change(apidp, cmd,
+ flags, errstring, &l_errno, errid);
+ } else {
+ /*
+ * When unconfiguring a device, first offline it
+ * through RCM.
+ */
+ if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
+ if (cmd == SCFGA_DEV_UNCONFIGURE) {
+ dev_list[0] =
+ get_node_path(apidp->path);
+ if (dev_list[0] == NULL) {
+ ret = SCFGA_ERR;
+ break;
+ }
+ if ((ret = scsi_rcm_offline(dev_list,
+ errstring, flags)) != SCFGA_OK) {
+ break;
+ }
}
}
- }
- ret = devctl_cmd(apidp->path, cmd, NULL, &l_errno);
- if (ret != SCFGA_OK) {
- cfga_err(errstring, l_errno, errid, 0);
+ ret = devctl_cmd(apidp->path, cmd, NULL, &l_errno);
+ if (ret != SCFGA_OK) {
+ cfga_err(errstring, l_errno, errid, 0);
/*
* If an unconfigure fails, cancel the RCM offline.
* Discard any RCM failures so that the devctl
* failure will still be reported.
*/
- if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
- if (cmd == SCFGA_DEV_UNCONFIGURE)
- (void) scsi_rcm_online(dev_list,
- errstring, flags);
+ if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
+ if (cmd == SCFGA_DEV_UNCONFIGURE)
+ (void) scsi_rcm_online(dev_list,
+ errstring, flags);
+ }
+ break;
}
- break;
- }
- if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
+ if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
/*
* Unconfigure succeeded, call the RCM notify_remove.
*/
- if (cmd == SCFGA_DEV_UNCONFIGURE)
- (void) scsi_rcm_remove(dev_list,
+ if (cmd == SCFGA_DEV_UNCONFIGURE)
+ (void) scsi_rcm_remove(dev_list,
errstring, flags);
+ }
}
break;
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_cvt.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_cvt.c
index e41debdb87..e3e260f9f5 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_cvt.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_cvt.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 1999, 2001-2002 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "cfga_scsi.h"
typedef struct {
@@ -78,6 +75,8 @@ static scfga_ret_t drv_to_dyncomp(di_node_t node, const char *phys,
char **dyncompp, int *l_errnop);
static scfga_ret_t get_hba_devlink(const char *hba_phys,
char **hba_logpp, int *l_errnop);
+static scfga_ret_t path_apid_dyn_to_path(const char *hba_phys, const char *dyn,
+ char **pathpp, int *l_errnop);
/* Globals */
@@ -263,19 +262,152 @@ apid_to_path(
/*
* If the dynamic component has a '/', it was derived from a devlink
* Else it was derived from driver name and instance number.
+ * If it is pathinfo instance number based ap id, it will have a format
+ * path#.???.
*/
if (strchr(dyncomp, '/') != NULL) {
ret = devlink_dyn_to_devpath(hba_phys, dyncomp, pathpp,
l_errnop);
+ } else if (strstr(dyncomp, PATH_APID_DYN_SEP) != NULL) {
+ ret = path_apid_dyn_to_path(hba_phys, dyncomp, pathpp,
+ l_errnop);
} else {
ret = drv_dyn_to_devpath(hba_phys, dyncomp, pathpp, l_errnop);
}
-
assert(ret != SCFGA_OK || *pathpp != NULL);
+
return (ret);
}
+/*
+ * Get the devfs path of pathinfo node that is associated with
+ * the given dynamic component.
+ *
+ * input
+ * hba_phys: physical path of HBA
+ * dyn : bus address of pathinfo node
+ * output:
+ * pathpp: devfs path of the pathinfo node.
+ */
+static scfga_ret_t
+path_apid_dyn_to_path(
+ const char *hba_phys,
+ const char *dyn,
+ char **pathpp,
+ int *l_errnop)
+{
+
+ di_node_t root, walk_root;
+ di_path_t pi_node = DI_PATH_NIL;
+ char *root_path, *devpath, *cp;
+ int len;
+
+ *l_errnop = 0;
+
+ /* *pathpp should be NULL if pathpp is not NULL. */
+ if ((hba_phys == NULL) || (pathpp != NULL) && (*pathpp != NULL)) {
+ return (SCFGA_LIB_ERR);
+ }
+
+ if ((root_path = strdup(hba_phys)) == NULL) {
+ *l_errnop = errno;
+ return (SCFGA_LIB_ERR);
+ }
+
+ /* Fix up path for di_init() */
+ len = strlen(DEVICES_DIR);
+ if (strncmp(root_path, DEVICES_DIR SLASH,
+ len + strlen(SLASH)) == 0) {
+ cp = root_path + len;
+ (void) memmove(root_path, cp, strlen(cp) + 1);
+ } else if (*root_path != '/') {
+ *l_errnop = 0;
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ }
+
+ /* Remove dynamic component if any */
+ if ((cp = GET_DYN(root_path)) != NULL) {
+ *cp = '\0';
+ }
+
+ /* Remove minor name if any */
+ if ((cp = strrchr(root_path, ':')) != NULL) {
+ *cp = '\0';
+ }
+
+ /*
+ * Cached snapshots are always rooted at "/"
+ */
+
+ /* Get a snapshot */
+ if ((root = di_init("/", DINFOCACHE)) == DI_NODE_NIL) {
+ *l_errnop = errno;
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ }
+
+ /*
+ * Lookup the subtree of interest
+ */
+ walk_root = di_lookup_node(root, root_path);
+
+ if (walk_root == DI_NODE_NIL) {
+ *l_errnop = errno;
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_LIB_ERR);
+ }
+
+ S_FREE(root_path);
+
+ if ((pi_node = di_path_next_client(walk_root, pi_node)) ==
+ DI_PATH_NIL) {
+ di_fini(root);
+ return (SCFGA_APID_NOEXIST);
+ }
+
+ /*
+ * now parse the path info node.
+ */
+ do {
+ /* check the length first. */
+ if (strlen(di_path_bus_addr(pi_node)) != strlen(dyn)) {
+ continue;
+ }
+
+ if (strcmp(di_path_bus_addr(pi_node), dyn) == 0) {
+ /* get the devfspath of pathinfo node. */
+ devpath = di_path_devfs_path(pi_node);
+ if (devpath == NULL) {
+ *l_errnop = errno;
+ di_fini(root);
+ return (SCFGA_ERR);
+ }
+
+ len = strlen(DEVICES_DIR) + strlen(devpath) + 1;
+ *pathpp = calloc(1, len);
+ if (*pathpp == NULL) {
+ *l_errnop = errno;
+ di_devfs_path_free(devpath);
+ di_fini(root);
+ return (SCFGA_ERR);
+ } else {
+ (void) snprintf(*pathpp, len, "%s%s",
+ DEVICES_DIR, devpath);
+ di_devfs_path_free(devpath);
+ di_fini(root);
+ return (SCFGA_OK);
+ }
+ }
+ pi_node = di_path_next_client(walk_root, pi_node);
+ } while (pi_node != DI_PATH_NIL);
+
+ di_fini(root);
+ return (SCFGA_APID_NOEXIST);
+}
+
static scfga_ret_t
drv_dyn_to_devpath(
const char *hba_phys,
@@ -497,7 +629,7 @@ make_dyncomp(
path = (char *)physpath;
} else {
match_minor = 1;
- snprintf(pathbuf, MAXPATHLEN, "%s:%s", physpath,
+ (void) snprintf(pathbuf, MAXPATHLEN, "%s:%s", physpath,
di_minor_name(minor));
path = pathbuf;
}
@@ -527,6 +659,36 @@ make_dyncomp(
return (ret);
}
+/*
+ * Create a dynamic component of path ap_id for the given path info node.
+ * The caller should free the buffer for the dynamic component.
+ */
+scfga_ret_t
+make_path_dyncomp(
+ di_path_t path,
+ char **dyncompp,
+ int *l_errnop)
+{
+ char *pi_addr;
+
+ if ((path == DI_PATH_NIL) || (*dyncompp != NULL)) {
+ return (SCFGA_LIB_ERR);
+ }
+
+ if ((pi_addr = di_path_bus_addr(path)) != NULL) {
+ *dyncompp = calloc(1, strlen(pi_addr) + 1);
+ if (*dyncompp == NULL) {
+ *l_errnop = errno;
+ return (SCFGA_LIB_ERR);
+ }
+ (void) strncpy(*dyncompp, pi_addr, strlen(pi_addr));
+ } else {
+ return (SCFGA_LIB_ERR);
+ }
+
+ return (SCFGA_OK);
+}
+
/*ARGSUSED*/
static scfga_ret_t
drv_to_dyncomp(di_node_t node, const char *phys, char **dyncompp, int *l_errnop)
@@ -723,7 +885,8 @@ tape_devlink_to_dyncomp(dyn_t *dyntp)
cp = strrchr(dyntp->dyncomp, '/');
/* Remove the mode part */
- while (isdigit(*(++cp)));
+ while (isdigit(*(++cp))) {
+ };
*cp = '\0';
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_list.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_list.c
index a58847e280..5a010a3dd1 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_list.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_list.c
@@ -41,6 +41,7 @@ typedef struct {
uint_t itype;
const char *ntype;
const char *name;
+ const char *pathname;
} scfga_devtype_t;
/* The TYPE field is parseable and should not contain spaces */
@@ -57,28 +58,30 @@ static scfga_ret_t do_stat_dev(const di_node_t node, const char *nodepath,
scfga_list_t *lap, int limited_dev_stat);
static cfga_stat_t bus_devinfo_to_recep_state(uint_t bus_di_state);
static cfga_stat_t dev_devinfo_to_occupant_state(uint_t dev_di_state);
-static char *get_device_type(di_node_t);
-static void get_hw_info(di_node_t node, cfga_list_data_t *clp);
+static char *get_device_type(di_node_t, dyncomp_t);
+static void get_hw_info(di_node_t node, cfga_list_data_t *clp, dyncomp_t type);
+static scfga_ret_t create_pathinfo_ldata(di_path_t pi_node, scfga_list_t *lap,
+ int *l_errnop);
static scfga_devtype_t device_list[] = {
- { DTYPE_DIRECT, DDI_NT_BLOCK_CHAN, "disk"},
- { DTYPE_DIRECT, DDI_NT_BLOCK, "disk"},
- { DTYPE_DIRECT, DDI_NT_BLOCK_WWN, "disk"},
- { DTYPE_DIRECT, DDI_NT_BLOCK_FABRIC, "disk"},
- { DTYPE_DIRECT, DDI_NT_BLOCK_SAS, "disk"},
- { DTYPE_SEQUENTIAL, DDI_NT_TAPE, "tape"},
- { DTYPE_PRINTER, NULL, "printer"},
- { DTYPE_PROCESSOR, NULL, "processor"},
- { DTYPE_WORM, NULL, "WORM"},
- { DTYPE_RODIRECT, DDI_NT_CD_CHAN, "CD-ROM"},
- { DTYPE_RODIRECT, DDI_NT_CD, "CD-ROM"},
- { DTYPE_SCANNER, NULL, "scanner"},
- { DTYPE_OPTICAL, NULL, "optical"},
- { DTYPE_CHANGER, NULL, "med-changer"},
- { DTYPE_COMM, NULL, "comm-device"},
- { DTYPE_ARRAY_CTRL, NULL, "array-ctrl"},
- { DTYPE_ESI, NULL, "ESI"}
+ { DTYPE_DIRECT, DDI_NT_BLOCK_CHAN, "disk", "disk-path"},
+ { DTYPE_DIRECT, DDI_NT_BLOCK, "disk", "disk-path"},
+ { DTYPE_DIRECT, DDI_NT_BLOCK_WWN, "disk", "disk-path"},
+ { DTYPE_DIRECT, DDI_NT_BLOCK_FABRIC, "disk", "disk-path"},
+ { DTYPE_DIRECT, DDI_NT_BLOCK_SAS, "disk", "disk-path"},
+ { DTYPE_SEQUENTIAL, DDI_NT_TAPE, "tape", "tape-path"},
+ { DTYPE_PRINTER, NULL, "printer", "printer-path"},
+ { DTYPE_PROCESSOR, NULL, "processor", "PRCS-path"},
+ { DTYPE_WORM, NULL, "WORM", "WORM-path"},
+ { DTYPE_RODIRECT, DDI_NT_CD_CHAN, "CD-ROM", "CD-ROM-path"},
+ { DTYPE_RODIRECT, DDI_NT_CD, "CD-ROM", "CD-ROM-path"},
+ { DTYPE_SCANNER, NULL, "scanner", "scanner-path"},
+ { DTYPE_OPTICAL, NULL, "optical", "optical-path"},
+ { DTYPE_CHANGER, NULL, "med-changer", "MEDCHGR-path"},
+ { DTYPE_COMM, NULL, "comm-device", "COMDEV-path"},
+ { DTYPE_ARRAY_CTRL, NULL, "array-ctrl", "ARRCTRL-path"},
+ { DTYPE_ESI, NULL, "ESI", "ESI-path"}
};
#define N_DEVICE_TYPES (sizeof (device_list) / sizeof (device_list[0]))
@@ -182,15 +185,42 @@ do_list(
}
/* we need to stat at least 1 device for all commands */
- u.node_args.flags = DI_WALK_CLDFIRST;
- u.node_args.fcn = stat_dev;
+ if (apidp->dyntype == PATH_APID) {
+ /*
+ * When cmd is SCFGA_STAT_DEV and the ap id is pathinfo
+ * related.
+ */
+ ret = walk_tree(apidp->hba_phys, &larg, init_flag, NULL,
+ SCFGA_WALK_PATH, &larg.l_errno);
+ } else {
+ /* we need to stat at least 1 device for all commands */
+ u.node_args.flags = DI_WALK_CLDFIRST;
+ u.node_args.fcn = stat_dev;
- /*
- * Subtree is ALWAYS rooted at the HBA (not at the device) as
- * otherwise deadlock may occur if bus is disconnected.
- */
- ret = walk_tree(apidp->hba_phys, &larg, init_flag, &u,
- SCFGA_WALK_NODE, &larg.l_errno);
+ /*
+ * Subtree is ALWAYS rooted at the HBA (not at the device) as
+ * otherwise deadlock may occur if bus is disconnected.
+ */
+ ret = walk_tree(apidp->hba_phys, &larg, init_flag, &u,
+ SCFGA_WALK_NODE, &larg.l_errno);
+
+ /*
+ * Check path info on the following conditions.
+ *
+ * - chld_config is still set to CFGA_STAT_UNCONFIGURED for
+ * SCFGA_STAT_BUS cmd after walking any child node.
+ * - walking node succeeded for SCFGA_STAT_ALL cmd(Continue on
+ * stating path info node).
+ * - apid is pathinfo associated and larg.ret is still set to
+ * SCFGA_APID_NOEXIST for SCFGA_STAT_DEV cmd.
+ */
+ if (((cmd == SCFGA_STAT_BUS) &&
+ (larg.chld_config == CFGA_STAT_UNCONFIGURED)) ||
+ ((cmd == SCFGA_STAT_ALL) && (ret == SCFGA_OK))) {
+ ret = walk_tree(apidp->hba_phys, &larg, init_flag, NULL,
+ SCFGA_WALK_PATH, &larg.l_errno);
+ }
+ }
if (ret != SCFGA_OK || (ret = larg.ret) != SCFGA_OK) {
if (ret != SCFGA_APID_NOEXIST) {
@@ -380,10 +410,189 @@ out:
return (rv);
}
+/*
+ * Create list date entry and add to ldata list.
+ */
+static scfga_ret_t
+create_pathinfo_ldata(di_path_t pi_node, scfga_list_t *lap, int *l_errnop)
+{
+ ldata_list_t *listp = NULL;
+ cfga_list_data_t *clp;
+ di_node_t client_node = DI_NODE_NIL;
+ di_minor_t minor;
+ scfga_ret_t ret;
+ di_path_state_t pi_state;
+ char *dyncomp = NULL, *client_path = NULL;
+ char pathbuf[MAXPATHLEN], *client_devlink = NULL;
+ int match_minor;
+
+ listp = calloc(1, sizeof (ldata_list_t));
+ if (listp == NULL) {
+ lap->l_errno = errno;
+ return (SCFGA_LIB_ERR);
+ }
+ clp = &listp->ldata;
+ ret = make_path_dyncomp(pi_node, &dyncomp, &lap->l_errno);
+ if (ret != SCFGA_OK) {
+ S_FREE(listp);
+ return (ret);
+ }
+
+ client_node = di_path_client_node(pi_node);
+ if (client_node == DI_NODE_NIL) {
+ *l_errnop = errno;
+ S_FREE(dyncomp);
+ return (SCFGA_LIB_ERR);
+ }
+
+ /* Create logical and physical ap_id */
+ (void) snprintf(clp->ap_log_id, sizeof (clp->ap_log_id), "%s%s%s",
+ lap->hba_logp, DYN_SEP, dyncomp);
+
+ (void) snprintf(clp->ap_phys_id, sizeof (clp->ap_phys_id), "%s%s%s",
+ lap->apidp->hba_phys, DYN_SEP, dyncomp);
+
+ S_FREE(dyncomp);
+
+ /* ap class filled in by libcfgadm */
+ clp->ap_class[0] = '\0';
+ clp->ap_r_state = lap->hba_rstate;
+ /* path info exist so set to configured. */
+ clp->ap_o_state = CFGA_STAT_CONFIGURED;
+
+ /* now fill up ap_info field with client dev link and instance #. */
+ client_path = di_devfs_path(client_node);
+ if (client_path) {
+ /* get first minor node. */
+ minor = di_minor_next(client_node, DI_MINOR_NIL);
+ if (minor == DI_MINOR_NIL) {
+ match_minor = 0;
+ (void) snprintf(pathbuf, MAXPATHLEN, "%s:%s",
+ DEVICES_DIR, client_path);
+ } else {
+ match_minor = 1;
+ (void) snprintf(pathbuf, MAXPATHLEN, "%s%s:%s",
+ DEVICES_DIR, client_path, di_minor_name(minor));
+ }
+ (void) physpath_to_devlink(pathbuf, &client_devlink, l_errnop,
+ match_minor);
+ di_devfs_path_free(client_path);
+ }
+
+ if (client_devlink) {
+ (void) snprintf(clp->ap_info, CFGA_INFO_LEN,
+ "%s: %s", "Client Device", client_devlink);
+ S_FREE(client_devlink);
+ }
+
+ get_hw_info(client_node, clp, PATH_APID);
+
+ if ((pi_state = di_path_state(pi_node)) == DI_PATH_STATE_OFFLINE) {
+ clp->ap_o_state = CFGA_STAT_UNCONFIGURED;
+ }
+
+ if (pi_state == DI_PATH_STATE_FAULT) {
+ clp->ap_cond = CFGA_COND_FAILED;
+ } else {
+ clp->ap_cond = CFGA_COND_UNKNOWN;
+ }
+
+ /* no way to determine state change */
+ clp->ap_busy = 0;
+ clp->ap_status_time = (time_t)-1;
+
+ /* Link it in */
+ listp->next = lap->listp;
+ lap->listp = listp;
+
+ return (SCFGA_OK);
+}
+
+/*
+ * Routine to stat pathinfo nodes.
+ *
+ * No pathinfo founds returns a success.
+ * When cmd is SCFGA_STAT_DEV, finds a matching pathinfo node and
+ * and create ldata if found.
+ * When cmd is SCFGA_STAT_ALL, create ldata for each pathinfo node.
+ * When cmd is SCFGA_STAT_BUS, checks if any pathinfo exist.
+ *
+ * Return:
+ * 0 for success
+ * -1 for failure.
+ */
+int
+stat_path_info(
+ di_node_t root,
+ void *arg,
+ int *l_errnop)
+{
+ scfga_list_t *lap = (scfga_list_t *)arg;
+ di_path_t pi_node;
+
+ if (root == DI_NODE_NIL) {
+ return (-1);
+ }
+
+ /*
+ * when there is no path_info node return SCFGA_OK.
+ */
+ if (di_path_next_client(root, DI_PATH_NIL) == DI_PATH_NIL) {
+ return (0);
+ }
+
+ if (lap->cmd == SCFGA_STAT_BUS) {
+ lap->chld_config = CFGA_STAT_CONFIGURED;
+ return (0);
+ } else if (lap->cmd == SCFGA_STAT_DEV) {
+ assert(lap->apidp->dyntype == PATH_APID);
+ for (pi_node = di_path_next_client(root, DI_PATH_NIL); pi_node;
+ pi_node = di_path_next_client(root, pi_node)) {
+ /*
+ * NOTE: apidt_create() validated pathinfo apid so
+ * the apid should have a valid format.
+ */
+
+ /* check the length first. */
+ if (strlen(di_path_bus_addr(pi_node)) !=
+ strlen(lap->apidp->dyncomp)) {
+ continue;
+ }
+
+ /* check for full match. */
+ if (strcmp(di_path_bus_addr(pi_node),
+ lap->apidp->dyncomp)) {
+ continue;
+ }
+
+ /* found match, record information */
+ if (create_pathinfo_ldata(pi_node, lap,
+ l_errnop) == SCFGA_OK) {
+ lap->ret = SCFGA_OK;
+ return (0);
+ } else {
+ return (-1);
+ }
+ }
+ } else { /* cmd = STAT_ALL */
+ /* set child config to configured */
+ lap->chld_config = CFGA_STAT_CONFIGURED;
+ for (pi_node = di_path_next_client(root, DI_PATH_NIL); pi_node;
+ pi_node = di_path_next_client(root, pi_node)) {
+ /* continue on even if there is an error on one path. */
+ (void) create_pathinfo_ldata(pi_node, lap, l_errnop);
+ }
+ }
+
+ lap->ret = SCFGA_OK;
+ return (0);
+
+}
struct bus_state {
int b_state;
int b_retired;
+ char iconnect_type[16];
};
static scfga_ret_t
@@ -395,6 +604,8 @@ do_stat_bus(scfga_list_t *lap, int limited_bus_stat)
struct bus_state bstate = {0};
walkarg_t u;
scfga_ret_t ret;
+ int i;
+ char itypelower[MAXNAMELEN];
assert(lap->hba_logp != NULL);
@@ -437,8 +648,24 @@ do_stat_bus(scfga_list_t *lap, int limited_bus_stat)
clp->ap_status_time = (time_t)-1;
clp->ap_info[0] = '\0';
- (void) snprintf(clp->ap_type, sizeof (clp->ap_type), "%s",
- SCFGA_BUS_TYPE);
+ if (bstate.iconnect_type) {
+ /*
+ * For SPI type, keep the existing SCFGA_BUS_TYPE.
+ * For other types, the ap type will be scsi-'interconnct-type'.
+ */
+ if (strcmp(bstate.iconnect_type, "SPI") == 0) {
+ (void) snprintf(clp->ap_type, sizeof (clp->ap_type),
+ "%s", SCFGA_BUS_TYPE);
+ } else {
+ for (i = 0; i < strlen(bstate.iconnect_type); i++) {
+ itypelower[i] =
+ tolower(bstate.iconnect_type[i]);
+ }
+ itypelower[i] = '\0';
+ (void) snprintf(clp->ap_type, sizeof (clp->ap_type),
+ "%s-%s", "scsi", itypelower);
+ }
+ }
/* Link it in */
listp->next = lap->listp;
@@ -451,9 +678,17 @@ static int
get_bus_state(di_node_t node, void *arg)
{
struct bus_state *bsp = (struct bus_state *)arg;
+ char *itype = NULL;
bsp->b_state = di_state(node);
bsp->b_retired = di_retired(node);
+ (void) di_prop_lookup_strings(DDI_DEV_T_ANY,
+ node, "initiator-interconnect-type", &itype);
+ if (itype != NULL) {
+ (void) strlcpy(bsp->iconnect_type, itype, 16);
+ } else {
+ bsp->iconnect_type[0] = '\0';
+ }
return (DI_WALK_TERMINATE);
}
@@ -521,7 +756,7 @@ do_stat_dev(
clp->ap_busy = 0; /* no way to determine state change */
clp->ap_status_time = (time_t)-1;
- get_hw_info(node, clp);
+ get_hw_info(node, clp, DEV_APID);
/* Link it in */
listp->next = lap->listp;
@@ -532,29 +767,50 @@ do_stat_dev(
/* fill in device type, vid, pid from properties */
static void
-get_hw_info(di_node_t node, cfga_list_data_t *clp)
+get_hw_info(di_node_t node, cfga_list_data_t *clp, dyncomp_t type)
{
char *cp = NULL;
char *inq_vid, *inq_pid;
+ char client_inst[MAXNAMELEN];
/*
* Fill in type information
*/
- cp = (char *)get_device_type(node);
+ cp = (char *)get_device_type(node, type);
if (cp == NULL) {
cp = (char *)GET_MSG_STR(ERR_UNAVAILABLE);
}
(void) snprintf(clp->ap_type, sizeof (clp->ap_type), "%s", S_STR(cp));
- /*
- * Fill in vendor and product ID.
- */
- if ((di_prop_lookup_strings(DDI_DEV_T_ANY, node,
- "inquiry-product-id", &inq_pid) == 1) &&
- (di_prop_lookup_strings(DDI_DEV_T_ANY, node,
- "inquiry-vendor-id", &inq_vid) == 1)) {
- (void) snprintf(clp->ap_info, sizeof (clp->ap_info),
- "%s %s", inq_vid, inq_pid);
+ if (type == DEV_APID) {
+ /*
+ * Fill in vendor and product ID.
+ */
+ if ((di_prop_lookup_strings(DDI_DEV_T_ANY, node,
+ "inquiry-product-id", &inq_pid) == 1) &&
+ (di_prop_lookup_strings(DDI_DEV_T_ANY, node,
+ "inquiry-vendor-id", &inq_vid) == 1)) {
+ (void) snprintf(clp->ap_info, sizeof (clp->ap_info),
+ "%s %s", inq_vid, inq_pid);
+ }
+ } else {
+ if ((di_driver_name(node) != NULL) &&
+ (di_instance(node) != -1)) {
+ if (clp->ap_info == NULL) {
+ (void) snprintf(client_inst, MAXNAMELEN - 1,
+ "%s%d", di_driver_name(node),
+ di_instance(node));
+ (void) snprintf(clp->ap_info, MAXNAMELEN - 1,
+ "Client Device: %s", client_inst);
+ } else {
+ (void) snprintf(client_inst, MAXNAMELEN - 1,
+ "(%s%d)", di_driver_name(node),
+ di_instance(node));
+ (void) strlcat(clp->ap_info, client_inst,
+ CFGA_INFO_LEN);
+ }
+ }
+
}
}
@@ -563,7 +819,7 @@ get_hw_info(di_node_t node, cfga_list_data_t *clp)
* derive it from minor node type
*/
static char *
-get_device_type(di_node_t node)
+get_device_type(di_node_t node, dyncomp_t type)
{
char *name = NULL;
int *inq_dtype;
@@ -582,7 +838,9 @@ get_device_type(di_node_t node)
if (device_list[i].itype == DTYPE_UNKNOWN)
continue;
if (itype == device_list[i].itype) {
- name = (char *)device_list[i].name;
+ name = (type == DEV_APID) ?
+ (char *)device_list[i].name :
+ (char *)device_list[i].pathname;
break;
}
}
@@ -599,7 +857,9 @@ get_device_type(di_node_t node)
if (device_list[i].ntype &&
(strcmp(nodetype, device_list[i].ntype)
== 0)) {
- name = (char *)device_list[i].name;
+ name = (type == DEV_APID) ?
+ (char *)device_list[i].name :
+ (char *)device_list[i].pathname;
break;
}
}
@@ -665,23 +925,10 @@ list_ext_postprocess(
static cfga_stat_t
bus_devinfo_to_recep_state(uint_t bus_di_state)
{
- cfga_stat_t rs;
+ if (bus_di_state & (DI_BUS_QUIESCED | DI_BUS_DOWN))
+ return (CFGA_STAT_DISCONNECTED);
- switch (bus_di_state) {
- case DI_BUS_QUIESCED:
- case DI_BUS_DOWN:
- rs = CFGA_STAT_DISCONNECTED;
- break;
- /*
- * NOTE: An explicit flag for active should probably be added to
- * libdevinfo.
- */
- default:
- rs = CFGA_STAT_CONNECTED;
- break;
- }
-
- return (rs);
+ return (CFGA_STAT_CONNECTED);
}
/*
@@ -690,15 +937,11 @@ bus_devinfo_to_recep_state(uint_t bus_di_state)
static cfga_stat_t
dev_devinfo_to_occupant_state(uint_t dev_di_state)
{
- /* Driver attached ? */
- if ((dev_di_state & DI_DRIVER_DETACHED) != DI_DRIVER_DETACHED) {
+ if (dev_di_state & (DI_DEVICE_OFFLINE | DI_DEVICE_DOWN))
+ return (CFGA_STAT_UNCONFIGURED);
+
+ if (!(dev_di_state & DI_DRIVER_DETACHED))
return (CFGA_STAT_CONFIGURED);
- }
- if ((dev_di_state & DI_DEVICE_OFFLINE) == DI_DEVICE_OFFLINE ||
- (dev_di_state & DI_DEVICE_DOWN) == DI_DEVICE_DOWN) {
- return (CFGA_STAT_UNCONFIGURED);
- } else {
- return (CFGA_STAT_NONE);
- }
+ return (CFGA_STAT_NONE);
}
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_rcm.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_rcm.c
index 44bb5293c3..f65867f4ba 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_rcm.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_rcm.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "cfga_scsi.h"
#define MAX_FORMAT 80
@@ -59,7 +56,11 @@ scsi_rcm_offline(char **rsrclist, char **errstring, cfga_flags_t flags)
if ((rret = rcm_request_offline_list(rcm_handle, rsrclist, rflags,
&rinfo)) != RCM_SUCCESS) {
- cfga_err(errstring, 0, ERRARG_RCM_OFFLINE, 0);
+ if ((flags & FLAG_CLIENT_DEV) == FLAG_CLIENT_DEV) {
+ cfga_err(errstring, 0, ERRARG_RCM_CLIENT_OFFLINE, 0);
+ } else {
+ cfga_err(errstring, 0, ERRARG_RCM_OFFLINE, 0);
+ }
if (rinfo) {
(void) scsi_rcm_info_table(rinfo, errstring);
rcm_free_info(rinfo);
@@ -69,7 +70,7 @@ scsi_rcm_offline(char **rsrclist, char **errstring, cfga_flags_t flags)
rflags & ~RCM_FORCE, NULL);
ret = SCFGA_BUSY;
}
- rcm_free_handle(rcm_handle);
+ (void) rcm_free_handle(rcm_handle);
return (ret);
}
@@ -102,7 +103,7 @@ scsi_rcm_online(char **rsrclist, char **errstring, cfga_flags_t flags)
}
ret = SCFGA_BUSY;
}
- rcm_free_handle(rcm_handle);
+ (void) rcm_free_handle(rcm_handle);
return (ret);
}
@@ -136,7 +137,7 @@ scsi_rcm_remove(char **rsrclist, char **errstring, cfga_flags_t flags)
ret = SCFGA_BUSY;
}
- rcm_free_handle(rcm_handle);
+ (void) rcm_free_handle(rcm_handle);
return (ret);
}
@@ -182,7 +183,7 @@ scsi_rcm_suspend(char **rsrclist, char **errstring, cfga_flags_t flags,
(rflags & (~RCM_FORCE)), NULL);
ret = SCFGA_BUSY;
}
- rcm_free_handle(rcm_handle);
+ (void) rcm_free_handle(rcm_handle);
return (ret);
}
@@ -220,7 +221,7 @@ scsi_rcm_resume(char **rsrclist, char **errstring, cfga_flags_t flags,
}
ret = SCFGA_BUSY;
}
- rcm_free_handle(rcm_handle);
+ (void) rcm_free_handle(rcm_handle);
return (ret);
}
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.c
index 07391c22d3..c11f48165f 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,13 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-
#include "cfga_scsi.h"
/*
@@ -142,6 +138,10 @@ cfga_private_func(
return (err_cvt(ret));
}
+ if (apidt.dyntype == PATH_APID) {
+ return (CFGA_OPNOTSUPP);
+ }
+
if (options != NULL)
apidt.flags |= FLAG_DISABLE_RCM;
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.h b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.h
index 8e4748c4cf..b0aab187a7 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.h
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_scsi.h
@@ -20,15 +20,13 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _CFGA_SCSI_H
#define _CFGA_SCSI_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -118,6 +116,7 @@ typedef enum {
SCFGA_REPLACE_DEV,
SCFGA_WALK_NODE,
SCFGA_WALK_MINOR,
+ SCFGA_WALK_PATH,
SCFGA_BUS_QUIESCE,
SCFGA_BUS_UNQUIESCE,
SCFGA_BUS_GETSTATE,
@@ -141,6 +140,12 @@ typedef enum {
SCFGA_CONTINUE
} scfga_recur_t;
+typedef enum {
+ NODYNCOMP = 1,
+ DEV_APID,
+ PATH_APID
+} dyncomp_t;
+
/* Structures for tree walking code */
@@ -180,7 +185,8 @@ typedef struct {
typedef struct {
char *hba_phys;
char *dyncomp;
- char *path;
+ dyncomp_t dyntype; /* is pathinfo or dev apid? */
+ char *path; /* for apid with device dyn comp. */
uint_t flags;
} apid_t;
@@ -192,6 +198,9 @@ typedef struct {
#define FLAG_DISABLE_RCM 0x01
#define FLAG_USE_DIFORCE 0x02
+/* internal use for handling pathinfo */
+#define FLAG_CLIENT_DEV 0x04
+
/* Message ids */
typedef enum {
@@ -239,6 +248,7 @@ ERR_RCM_HANDLE,
ERRARG_RCM_SUSPEND,
ERRARG_RCM_RESUME,
ERRARG_RCM_OFFLINE,
+ERRARG_RCM_CLIENT_OFFLINE,
ERRARG_RCM_ONLINE,
ERRARG_RCM_REMOVE,
@@ -323,6 +333,7 @@ typedef struct {
#define DYN_SEP "::"
#define MINOR_SEP ":"
+#define PATH_APID_DYN_SEP ","
#define S_FREE(x) (((x) != NULL) ? (free(x), (x) = NULL) : (void *)0)
#define S_STR(x) (((x) == NULL) ? "" : (x))
@@ -376,6 +387,7 @@ scfga_ret_t do_list(apid_t *apidp, scfga_cmd_t cmd,
ldata_list_t **llpp, int *nelem, char **errstring);
scfga_ret_t list_ext_postprocess(ldata_list_t **llpp, int nelem,
cfga_list_data_t **ap_id_list, int *nlistp, char **errstring);
+int stat_path_info(di_node_t root, void *arg, int *l_errnop);
/* Conversion routines */
@@ -385,6 +397,7 @@ scfga_ret_t apid_to_path(const char *hba_phys, const char *dyncomp,
char **pathpp, int *l_errnop);
scfga_ret_t make_dyncomp(di_node_t node, const char *physpath,
char **dyncompp, int *l_errnop);
+scfga_ret_t make_path_dyncomp(di_path_t path, char **dyncomp, int *l_errnop);
/* RCM routines */
@@ -411,6 +424,8 @@ void list_free(ldata_list_t **llpp);
int known_state(di_node_t node);
scfga_ret_t devctl_cmd(const char *ap_id, scfga_cmd_t cmd,
uint_t *statep, int *l_errnop);
+scfga_ret_t path_apid_state_change(apid_t *apidp, scfga_cmd_t cmd,
+ cfga_flags_t flags, char **errstring, int *l_errnop, msgid_t errid);
scfga_ret_t invoke_cmd(const char *func, apid_t *apidt, prompt_t *prp,
cfga_flags_t flags, char **errstring);
diff --git a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_utils.c b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_utils.c
index 65c4e9f96c..db570fd22a 100644
--- a/usr/src/lib/cfgadm_plugins/scsi/common/cfga_utils.c
+++ b/usr/src/lib/cfgadm_plugins/scsi/common/cfga_utils.c
@@ -20,12 +20,10 @@
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "cfga_scsi.h"
#include <libgen.h>
#include <limits.h>
@@ -125,6 +123,7 @@ msgcvt_t str_tbl[] = {
{ERRARG_RCM_SUSPEND, 0, 1, "failed to suspend: "},
{ERRARG_RCM_RESUME, 0, 1, "failed to resume: "},
{ERRARG_RCM_OFFLINE, 0, 1, "failed to offline: "},
+{ERRARG_RCM_CLIENT_OFFLINE, 0, 1, "failed to offline a client device: "},
{ERRARG_RCM_ONLINE, 0, 1, "failed to online: "},
{ERRARG_RCM_REMOVE, 0, 1, "failed to remove: "},
@@ -327,6 +326,7 @@ pathdup(const char *path, int *l_errnop)
return (dup);
}
+
scfga_ret_t
apidt_create(const char *ap_id, apid_t *apidp, char **errstring)
{
@@ -355,11 +355,22 @@ apidt_create(const char *ap_id, apid_t *apidp, char **errstring)
/* Remove the dynamic component from the base */
*dyn = '\0';
+ } else {
+ apidp->dyntype = NODYNCOMP;
+ }
+
+ /* get dyn comp type */
+ if (dyncomp != NULL) {
+ if (strstr(dyncomp, PATH_APID_DYN_SEP) != NULL) {
+ apidp->dyntype = PATH_APID;
+ } else {
+ apidp->dyntype = DEV_APID;
+ }
}
/* Create the path */
- if ((ret = apid_to_path(hba_phys, dyncomp, &path, &l_errno))
- != SCFGA_OK) {
+ if ((ret = apid_to_path(hba_phys, dyncomp, &path,
+ &l_errno)) != SCFGA_OK) {
cfga_err(errstring, l_errno, ERR_OP_FAILED, 0);
goto err;
}
@@ -471,6 +482,8 @@ walk_tree(
if (cmd == SCFGA_WALK_NODE) {
rv = di_walk_node(walk_root, up->node_args.flags, arg,
up->node_args.fcn);
+ } else if (cmd == SCFGA_WALK_PATH) {
+ rv = stat_path_info(walk_root, arg, l_errnop);
} else {
assert(cmd == SCFGA_WALK_MINOR);
rv = di_walk_minor(walk_root, up->minor_args.nodetype, 0, arg,
@@ -509,7 +522,8 @@ invoke_cmd(
* Determine if the func has an equal sign; only compare up to
* the equals
*/
- for (len = 0; func[len] != 0 && func[len] != '='; len++);
+ for (len = 0; func[len] != 0 && func[len] != '='; len++) {
+ };
for (i = 0; i < N_HW_CMDS; i++) {
const char *s = GET_MSG_STR(hw_cmds[i].str_id);
@@ -617,10 +631,10 @@ cfga_led_msg(struct cfga_msg *msgp, apid_t *apidp, led_strid_t led,
if ((apidp == NULL) || (apidp->dyncomp == NULL)) {
return;
}
- snprintf(led_msg, sizeof (led_msg), "%-23s\t%s=%s\n",
- basename(apidp->dyncomp),
- dgettext(TEXT_DOMAIN, led_strs[led]),
- dgettext(TEXT_DOMAIN, led_mode_strs[mode]));
+ (void) snprintf(led_msg, sizeof (led_msg), "%-23s\t%s=%s\n",
+ basename(apidp->dyncomp),
+ dgettext(TEXT_DOMAIN, led_strs[led]),
+ dgettext(TEXT_DOMAIN, led_mode_strs[mode]));
(void) (*msgp->message_routine)(msgp->appdata_ptr, led_msg);
}
@@ -722,6 +736,211 @@ out:
}
}
+/*
+ * Check to see if the given pi_node is the last path to the client device.
+ *
+ * Return:
+ * 0: if there is another path avialable.
+ * -1: if no other paths available.
+ */
+static int
+check_available_path(
+ di_node_t client_node,
+ di_path_t pi_node)
+{
+ di_path_state_t pi_state;
+ di_path_t next_pi = DI_PATH_NIL;
+
+ if (((pi_state = di_path_state(pi_node)) != DI_PATH_STATE_ONLINE) &&
+ (pi_state != DI_PATH_STATE_STANDBY)) {
+ /* it is not last available path */
+ return (0);
+ }
+
+ while (next_pi = di_path_client_next_path(client_node, next_pi)) {
+ /* if anohter pi node is avaialble, return 0 */
+ if ((next_pi != pi_node) &&
+ (((pi_state = di_path_state(next_pi)) ==
+ DI_PATH_STATE_ONLINE) ||
+ pi_state == DI_PATH_STATE_STANDBY)) {
+ return (0);
+ }
+ }
+ return (-1);
+}
+
+scfga_ret_t
+path_apid_state_change(
+ apid_t *apidp,
+ scfga_cmd_t cmd,
+ cfga_flags_t flags,
+ char **errstring,
+ int *l_errnop,
+ msgid_t errid)
+{
+ di_node_t root, walk_root, client_node;
+ di_path_t pi_node = DI_PATH_NIL;
+ char *root_path, *cp, *client_path, devpath[MAXPATHLEN];
+ int len, found = 0;
+ scfga_ret_t ret;
+ char *dev_list[2] = {NULL};
+
+ *l_errnop = 0;
+
+ /* Make sure apid is pathinfo associated apid. */
+ if ((apidp->dyntype != PATH_APID) || (apidp->dyncomp == NULL)) {
+ return (SCFGA_LIB_ERR);
+ }
+
+ if ((cmd != SCFGA_DEV_CONFIGURE) && (cmd != SCFGA_DEV_UNCONFIGURE)) {
+ return (SCFGA_LIB_ERR);
+ }
+
+ if ((root_path = strdup(apidp->hba_phys)) == NULL) {
+ *l_errnop = errno;
+ return (SCFGA_LIB_ERR);
+ }
+
+ /* Fix up path for di_init() */
+ len = strlen(DEVICES_DIR);
+ if (strncmp(root_path, DEVICES_DIR SLASH,
+ len + strlen(SLASH)) == 0) {
+ cp = root_path + len;
+ (void) memmove(root_path, cp, strlen(cp) + 1);
+ } else if (*root_path != '/') {
+ *l_errnop = 0;
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ }
+
+ /* Remove dynamic component if any */
+ if ((cp = GET_DYN(root_path)) != NULL) {
+ *cp = '\0';
+ }
+
+ /* Remove minor name if any */
+ if ((cp = strrchr(root_path, ':')) != NULL) {
+ *cp = '\0';
+ }
+
+ /*
+ * Cached snapshots are always rooted at "/"
+ */
+
+ /* Get a snapshot */
+ if ((root = di_init("/", DINFOCACHE)) == DI_NODE_NIL) {
+ *l_errnop = errno;
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ }
+
+ /*
+ * Lookup the subtree of interest
+ */
+ walk_root = di_lookup_node(root, root_path);
+
+ if (walk_root == DI_NODE_NIL) {
+ *l_errnop = errno;
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_LIB_ERR);
+ }
+
+
+ if ((pi_node = di_path_next_client(walk_root, pi_node)) ==
+ DI_PATH_NIL) {
+ /* the path apid not found */
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_APID_NOEXIST);
+ }
+
+ do {
+ /* check the length first. */
+ if (strlen(di_path_bus_addr(pi_node)) !=
+ strlen(apidp->dyncomp)) {
+ continue;
+ }
+
+ /* compare bus addr. */
+ if (strcmp(di_path_bus_addr(pi_node), apidp->dyncomp) == 0) {
+ found = 1;
+ break;
+ }
+ pi_node = di_path_next_client(root, pi_node);
+ } while (pi_node != DI_PATH_NIL);
+
+ if (!found) {
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_APID_NOEXIST);
+ }
+
+ /* Get client node path. */
+ client_node = di_path_client_node(pi_node);
+ if (client_node == DI_NODE_NIL) {
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ } else {
+ client_path = di_devfs_path(client_node);
+ if (client_path == NULL) {
+ di_fini(root);
+ S_FREE(root_path);
+ return (SCFGA_ERR);
+ }
+
+ if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
+ if (cmd == SCFGA_DEV_UNCONFIGURE) {
+ if (check_available_path(client_node,
+ pi_node) != 0) {
+ /*
+ * last path. check if unconfiguring
+ * is okay.
+ */
+ (void) snprintf(devpath,
+ strlen(DEVICES_DIR) +
+ strlen(client_path) + 1, "%s%s",
+ DEVICES_DIR, client_path);
+ dev_list[0] = devpath;
+ flags |= FLAG_CLIENT_DEV;
+ ret = scsi_rcm_offline(dev_list,
+ errstring, flags);
+ if (ret != SCFGA_OK) {
+ di_fini(root);
+ di_devfs_path_free(client_path);
+ S_FREE(root_path);
+ return (ret);
+ }
+ }
+ }
+ }
+ }
+
+ ret = devctl_cmd(apidp->path, cmd, NULL, l_errnop);
+ if (ret != SCFGA_OK) {
+ cfga_err(errstring, *l_errnop, errid, 0);
+
+ /*
+ * If an unconfigure fails, cancel the RCM offline.
+ * Discard any RCM failures so that the devctl
+ * failure will still be reported.
+ */
+ if ((apidp->flags & FLAG_DISABLE_RCM) == 0) {
+ if (cmd == SCFGA_DEV_UNCONFIGURE)
+ (void) scsi_rcm_online(dev_list,
+ errstring, flags);
+ }
+ }
+
+ di_devfs_path_free(client_path);
+ di_fini(root);
+ S_FREE(root_path);
+
+ return (ret);
+}
+
+
scfga_ret_t
devctl_cmd(
const char *physpath,
@@ -840,7 +1059,7 @@ known_state(di_node_t node)
* offline.
*/
if ((state & DI_DEVICE_OFFLINE) == DI_DEVICE_OFFLINE ||
- (state & DI_DRIVER_DETACHED) != DI_DRIVER_DETACHED) {
+ (state & DI_DRIVER_DETACHED) != DI_DRIVER_DETACHED) {
return (1);
}
diff --git a/usr/src/lib/fm/topo/modules/common/disk/disk_common.c b/usr/src/lib/fm/topo/modules/common/disk/disk_common.c
index 174d2dc64e..867e8a252e 100644
--- a/usr/src/lib/fm/topo/modules/common/disk/disk_common.c
+++ b/usr/src/lib/fm/topo/modules/common/disk/disk_common.c
@@ -611,7 +611,7 @@ disk_di_node_add(di_node_t node, char *devid, disk_cbdata_t *cbp)
pnode = NULL;
while ((pnode = di_path_client_next_path(node, pnode)) != NULL) {
if ((ret = di_path_prop_lookup_strings(pnode,
- "target-port", &s)) > 0)
+ SCSI_ADDR_PROP_TARGET_PORT, &s)) > 0)
portcount += ret;
pathcount++;
}
@@ -627,16 +627,17 @@ disk_di_node_add(di_node_t node, char *devid, disk_cbdata_t *cbp)
goto error;
if ((ret = di_prop_lookup_strings(DDI_DEV_T_ANY, node,
- "target-port", &s)) > 0) {
+ SCSI_ADDR_PROP_TARGET_PORT, &s)) > 0) {
if ((dnode->ddn_target_port = topo_mod_zalloc(mod,
ret * sizeof (uintptr_t))) == NULL)
goto error;
-
dnode->ddn_target_port_count = ret;
for (i = 0; i < ret; i++) {
if ((dnode->ddn_target_port[i] =
- topo_mod_strdup(mod, s)) == NULL)
+ topo_mod_strdup(mod,
+ scsi_wwnstr_skip_ua_prefix(s))) ==
+ NULL)
goto error;
s += strlen(s) + 1;
@@ -672,10 +673,12 @@ disk_di_node_add(di_node_t node, char *devid, disk_cbdata_t *cbp)
goto error;
if ((ret = di_path_prop_lookup_strings(pnode,
- "target-port", &s)) > 0) {
+ SCSI_ADDR_PROP_TARGET_PORT, &s)) > 0) {
for (i = 0; i < ret; i++) {
if ((dnode->ddn_target_port[portcount] =
- topo_mod_strdup(mod, s)) == NULL)
+ topo_mod_strdup(mod,
+ scsi_wwnstr_skip_ua_prefix(s))) ==
+ NULL)
goto error;
portcount++;
diff --git a/usr/src/lib/libdevice/llib-ldevice b/usr/src/lib/libdevice/llib-ldevice
index cb8c831cad..e836d588c6 100644
--- a/usr/src/lib/libdevice/llib-ldevice
+++ b/usr/src/lib/libdevice/llib-ldevice
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -23,60 +22,10 @@
/* PROTOLIB1 */
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/types.h>
+#include <sys/devctl.h>
#include "libdevice.h"
-
-/*
- * usr/src/lib/libdevice
- */
-
-/* devctl.c */
-devctl_hdl_t devctl_device_acquire(char *devfs_path, uint_t flags);
-devctl_hdl_t devctl_bus_acquire(char *devfs_path, uint_t flags);
-void devctl_release(devctl_hdl_t hdl);
-int devctl_pm_raisepower(devctl_hdl_t hdl);
-int devctl_pm_changepowerlow(devctl_hdl_t hdl);
-int devctl_pm_changepowerhigh(devctl_hdl_t hdl);
-int devctl_pm_idlecomponent(devctl_hdl_t hdl);
-int devctl_pm_busycomponent(devctl_hdl_t hdl);
-int devctl_pm_testbusy(devctl_hdl_t hdl, uint_t *busyp);
-int devctl_pm_failsuspend(devctl_hdl_t hdl);
-int devctl_pm_bus_teststrict(devctl_hdl_t hdl, uint_t *strict);
-int devctl_pm_device_changeonresume(devctl_hdl_t hdl);
-int devctl_pm_device_no_lower_power(devctl_hdl_t hdl);
-int devctl_pm_bus_no_invol(devctl_hdl_t hdl);
-int devctl_pm_device_promprintf(devctl_hdl_t hdl);
-int devctl_device_offline(devctl_hdl_t hdl);
-int devctl_device_online(devctl_hdl_t hdl);
-int devctl_device_reset(devctl_hdl_t hdl);
-int devctl_device_getstate(devctl_hdl_t hdl, uint_t *statep);
-int devctl_bus_quiesce(devctl_hdl_t hdl);
-int devctl_bus_unquiesce(devctl_hdl_t hdl);
-int devctl_bus_reset(devctl_hdl_t hdl);
-int devctl_bus_resetall(devctl_hdl_t hdl);
-int devctl_bus_getstate(devctl_hdl_t hdl, uint_t *statep);
-devctl_hdl_t devctl_ap_acquire(char *devfs_path, uint_t flags);
-devctl_hdl_t devctl_pm_dev_acquire(char *devfs_path, uint_t flags);
-devctl_hdl_t devctl_pm_bus_acquire(char *devfs_path, uint_t flags);
-int devctl_ap_insert(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_remove(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_connect(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_disconnect(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_configure(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_unconfigure(devctl_hdl_t hdl, nvlist_t *ap_data);
-int devctl_ap_getstate(devctl_hdl_t hdl, nvlist_t *ap_data,
- devctl_ap_state_t *statep);
-devctl_ddef_t devctl_ddef_alloc(char *, int);
-void devctl_ddef_free(devctl_ddef_t);
-int devctl_ddef_int(devctl_ddef_t, char *, int32_t);
-int devctl_ddef_int_array(devctl_ddef_t, char *, int, int32_t *value);
-int devctl_ddef_string(devctl_ddef_t ddef_hdl, char *name, char *value);
-int devctl_ddef_string_array(devctl_ddef_t, char *, int, char **);
-int devctl_ddef_byte_array(devctl_ddef_t, char *, int, uchar_t *value);
-int devctl_bus_dev_create(devctl_hdl_t, devctl_ddef_t, uint_t, devctl_hdl_t *);
-char *devctl_get_pathname(devctl_hdl_t, char *, size_t);
diff --git a/usr/src/lib/libdevid/libdevid.h b/usr/src/lib/libdevid/libdevid.h
index f5df7c10e6..50a0dcd142 100644
--- a/usr/src/lib/libdevid/libdevid.h
+++ b/usr/src/lib/libdevid/libdevid.h
@@ -53,6 +53,7 @@ extern int scsi_wwnstr_to_wwn(const char *wwnstr, uint64_t *wwnp);
extern char *scsi_wwn_to_wwnstr(uint64_t wwn,
int unit_address_form, char *wwnstr);
extern void scsi_wwnstr_hexcase(char *wwnstr, int lower_case);
+extern const char *scsi_wwnstr_skip_ua_prefix(const char *wwnstr);
extern void scsi_free_wwnstr(char *wwnstr);
#ifdef SCSI_ADDR_PROP_LUN64
diff --git a/usr/src/lib/libdevid/mapfile-vers b/usr/src/lib/libdevid/mapfile-vers
index 48d9e987ad..fca14daa8f 100644
--- a/usr/src/lib/libdevid/mapfile-vers
+++ b/usr/src/lib/libdevid/mapfile-vers
@@ -67,6 +67,7 @@ SUNWprivate_1.1 {
scsi_lun_to_lun64;
scsi_wwn_to_wwnstr;
scsi_wwnstr_hexcase;
+ scsi_wwnstr_skip_ua_prefix;
scsi_wwnstr_to_wwn;
local:
*;
diff --git a/usr/src/lib/libdevinfo/devinfo.c b/usr/src/lib/libdevinfo/devinfo.c
index db6a62cab1..2431c87a96 100644
--- a/usr/src/lib/libdevinfo/devinfo.c
+++ b/usr/src/lib/libdevinfo/devinfo.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Interfaces for getting device configuration data from kernel
* through the devinfo driver.
@@ -977,9 +975,11 @@ di_state(di_node_t node)
if (DI_NODE(node)->state & DEVI_DEVICE_OFFLINE)
result |= DI_DEVICE_OFFLINE;
if (DI_NODE(node)->state & DEVI_DEVICE_DOWN)
- result |= DI_DEVICE_OFFLINE;
+ result |= DI_DEVICE_DOWN;
if (DI_NODE(node)->state & DEVI_DEVICE_DEGRADED)
result |= DI_DEVICE_DEGRADED;
+ if (DI_NODE(node)->state & DEVI_DEVICE_REMOVED)
+ result |= DI_DEVICE_REMOVED;
if (DI_NODE(node)->state & DEVI_BUS_QUIESCED)
result |= DI_BUS_QUIESCED;
if (DI_NODE(node)->state & DEVI_BUS_DOWN)
@@ -2028,6 +2028,12 @@ di_path_state(di_path_t path)
return ((di_path_state_t)DI_PATH(path)->path_state);
}
+uint_t
+di_path_flags(di_path_t path)
+{
+ return (DI_PATH(path)->path_flags);
+}
+
char *
di_path_node_name(di_path_t path)
{
diff --git a/usr/src/lib/libdevinfo/libdevinfo.h b/usr/src/lib/libdevinfo/libdevinfo.h
index 8103c3407f..520e5fb7a1 100644
--- a/usr/src/lib/libdevinfo/libdevinfo.h
+++ b/usr/src/lib/libdevinfo/libdevinfo.h
@@ -81,6 +81,7 @@ extern "C" {
#define DI_DEVICE_OFFLINE 0x1
#define DI_DEVICE_DOWN 0x2
#define DI_DEVICE_DEGRADED 0x4
+#define DI_DEVICE_REMOVED 0x8
#define DI_BUS_QUIESCED 0x100
#define DI_BUS_DOWN 0x200
@@ -192,6 +193,7 @@ extern char *di_path_node_name(di_path_t path);
extern char *di_path_bus_addr(di_path_t path);
extern int di_path_instance(di_path_t path);
extern di_path_state_t di_path_state(di_path_t path);
+extern uint_t di_path_flags(di_path_t path);
extern char *di_path_devfs_path(di_path_t path);
extern char *di_path_client_devfs_path(di_path_t path);
diff --git a/usr/src/lib/libdevinfo/mapfile-vers b/usr/src/lib/libdevinfo/mapfile-vers
index b1d9b5a9e8..2f3527c3f0 100644
--- a/usr/src/lib/libdevinfo/mapfile-vers
+++ b/usr/src/lib/libdevinfo/mapfile-vers
@@ -206,6 +206,7 @@ SUNWprivate_1.1 {
di_minor_devinfo;
di_node_state;
di_parent_private_data;
+ di_path_flags;
# XXX remove: di_path_(addr,next,next_client,next_phci)
di_path_addr;
di_path_next;
diff --git a/usr/src/lib/mpapi/libmpscsi_vhci/common/MP_GetMultipathLusPlugin.c b/usr/src/lib/mpapi/libmpscsi_vhci/common/MP_GetMultipathLusPlugin.c
index b15a807dfa..2f4315fb56 100644
--- a/usr/src/lib/mpapi/libmpscsi_vhci/common/MP_GetMultipathLusPlugin.c
+++ b/usr/src/lib/mpapi/libmpscsi_vhci/common/MP_GetMultipathLusPlugin.c
@@ -32,9 +32,40 @@
#include <libdevinfo.h>
+/*
+ * Checks whether there is online path or not.
+ * - no path found returns -1.
+ * - online/standby path found returns 1.
+ * - path exists but no online/standby path found returns 0.
+ */
+static int checkAvailablePath(di_node_t node)
+{
+ di_path_t path;
+ di_path_state_t state;
+
+ if ((path = di_path_client_next_path(node, DI_PATH_NIL))
+ == DI_PATH_NIL) {
+ log(LOG_INFO, "checkAvailalblePath()",
+ " - No path found");
+ return (-1);
+ }
+
+ do {
+ /* ignore the path that is neither online nor standby. */
+ if (((state = di_path_state(path)) == DI_PATH_STATE_ONLINE) ||
+ (state == DI_PATH_STATE_STANDBY)) {
+ return (1);
+ }
+ } while ((path = di_path_client_next_path(node, path)) != DI_PATH_NIL);
+
+ /* return 0 for the case that there is no online path to the node. */
+ log(LOG_INFO, "checkAvailalblePath()", " - No online path found");
+ return (0);
+}
+
static int getOidList(di_node_t root_node, MP_OID_LIST *pOidList)
{
- int numNodes = 0;
+ int numNodes = 0, state;
MP_UINT64 instNum = 0;
@@ -59,8 +90,21 @@ static int getOidList(di_node_t root_node, MP_OID_LIST *pOidList)
while (DI_NODE_NIL != sv_child_node) {
- /* Skip the node which is not online. */
- if (di_state(sv_child_node) != 0) {
+ /* skip the node which is offline, down or detached. */
+ state = di_state(sv_child_node);
+ if ((state & DI_DEVICE_DOWN) ||
+ (state & DI_DEVICE_OFFLINE)) {
+ sv_child_node = di_sibling_node(sv_child_node);
+ continue;
+ }
+
+ /*
+ * skip if the node doesn't have any path avaialble.
+ * If any path is found from the DINFOCACHE snaphost
+ * that means the driver keeps track of the path regadless
+ * of state.
+ */
+ if (checkAvailablePath(sv_child_node) == -1) {
sv_child_node = di_sibling_node(sv_child_node);
continue;
}
diff --git a/usr/src/pkgdefs/Makefile b/usr/src/pkgdefs/Makefile
index 6acc8f4491..2dfde21be1 100644
--- a/usr/src/pkgdefs/Makefile
+++ b/usr/src/pkgdefs/Makefile
@@ -84,6 +84,8 @@ sparc_SUBDIRS= \
SUNWiopc.u \
SUNWiopc.v \
SUNWpdu \
+ SUNWpmcsr \
+ SUNWpmcsu \
SUNWpstl.u \
SUNWqfed \
SUNWqus \
@@ -149,6 +151,8 @@ i386_SUBDIRS= \
SUNWonmtst.i \
SUNWos86r \
SUNWparted \
+ SUNWpmcsr \
+ SUNWpmcsu \
SUNWpsdcr \
SUNWpsdir \
SUNWpsh \
diff --git a/usr/src/pkgdefs/SUNWhea/prototype_com b/usr/src/pkgdefs/SUNWhea/prototype_com
index f8e18385d1..4e1d00c89b 100644
--- a/usr/src/pkgdefs/SUNWhea/prototype_com
+++ b/usr/src/pkgdefs/SUNWhea/prototype_com
@@ -744,6 +744,8 @@ f none usr/include/sys/cyclic.h 644 root bin
f none usr/include/sys/cyclic_impl.h 644 root bin
f none usr/include/sys/dacf.h 644 root bin
f none usr/include/sys/dacf_impl.h 644 root bin
+f none usr/include/sys/damap.h 644 root bin
+f none usr/include/sys/damap_impl.h 644 root bin
f none usr/include/sys/dc_ki.h 644 root bin
f none usr/include/sys/ddi.h 644 root bin
f none usr/include/sys/ddifm.h 644 root bin
diff --git a/usr/src/pkgdefs/SUNWpmcsr/Makefile b/usr/src/pkgdefs/SUNWpmcsr/Makefile
new file mode 100644
index 0000000000..c5b701afcd
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/Makefile
@@ -0,0 +1,38 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+include ../Makefile.com
+
+TMPLFILES += postinstall preremove
+DATAFILES += depend i.pmcsconf
+
+.KEEP_STATE:
+
+all: $(FILES)
+install: all pkg
+
+include ../Makefile.targ
+include ../Makefile.prtarg
diff --git a/usr/src/pkgdefs/SUNWpmcsr/pkginfo.tmpl b/usr/src/pkgdefs/SUNWpmcsr/pkginfo.tmpl
new file mode 100644
index 0000000000..1fa8538b73
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/pkginfo.tmpl
@@ -0,0 +1,46 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#
+
+PKG="SUNWpmcsr"
+NAME="PMC-Sierra SAS-2 HBA driver (root)"
+ARCH="ISA"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGVERS="1.0"
+SUNW_PKGTYPE="root"
+MAXINST="1000"
+CATEGORY=system
+VENDOR="Sun Microsystems, Inc."
+DESC="PMC-Sierra SAS-2 HBA Driver (root)"
+CLASSES="none pmcsconf"
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+BASEDIR=/
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
diff --git a/usr/src/pkgdefs/SUNWpmcsr/postinstall.tmpl b/usr/src/pkgdefs/SUNWpmcsr/postinstall.tmpl
new file mode 100644
index 0000000000..e477826c4a
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/postinstall.tmpl
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+include drv_utils
+
+ALIASES1="pciex11f8,8001"
+
+pkg_drvadd -i $ALIASES1 \
+ -m '* 0600 root sys' \
+ -c scsi-self-identifying \
+ pmcs || exit 1
diff --git a/usr/src/pkgdefs/SUNWpmcsr/preremove.tmpl b/usr/src/pkgdefs/SUNWpmcsr/preremove.tmpl
new file mode 100644
index 0000000000..bce1665e1f
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/preremove.tmpl
@@ -0,0 +1,28 @@
+#!/sbin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+include drv_utils
+
+pkg_drvrem pmcs || exit 1
diff --git a/usr/src/pkgdefs/SUNWpmcsr/prototype_com b/usr/src/pkgdefs/SUNWpmcsr/prototype_com
new file mode 100644
index 0000000000..0b404b9e87
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/prototype_com
@@ -0,0 +1,51 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+#
+i pkginfo
+i copyright
+i depend
+i postinstall
+i preremove
+i i.pmcsconf
+
+d none kernel 0755 root sys
+d none kernel/drv 0755 root sys
+e pmcsconf kernel/drv/pmcs.conf 0644 root sys
+d none kernel/kmdb 0755 root sys
+d none kernel/misc 0755 root sys
diff --git a/usr/src/pkgdefs/SUNWpmcsr/prototype_i386 b/usr/src/pkgdefs/SUNWpmcsr/prototype_i386
new file mode 100644
index 0000000000..84039a075c
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/prototype_i386
@@ -0,0 +1,51 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+# PMC-Sierra (pmcs) SAS and SATA HBA Driver
+f none kernel/drv/pmcs 0755 root sys
+d none kernel/drv/amd64 0755 root sys
+f none kernel/drv/amd64/pmcs 0755 root sys
+f none kernel/kmdb/pmcs 0555 root sys
+d none kernel/kmdb/amd64 0755 root sys
+f none kernel/kmdb/amd64/pmcs 0555 root sys
diff --git a/usr/src/pkgdefs/SUNWpmcsr/prototype_sparc b/usr/src/pkgdefs/SUNWpmcsr/prototype_sparc
new file mode 100644
index 0000000000..21131a125a
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsr/prototype_sparc
@@ -0,0 +1,49 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+# PMC-Sierra (pmcs) SAS and SATA HBA Driver
+d none kernel/drv/sparcv9 0755 root sys
+f none kernel/drv/sparcv9/pmcs 0755 root sys
+d none kernel/kmdb/sparcv9 0755 root sys
+f none kernel/kmdb/sparcv9/pmcs 0555 root sys
diff --git a/usr/src/pkgdefs/SUNWpmcsu/Makefile b/usr/src/pkgdefs/SUNWpmcsu/Makefile
new file mode 100644
index 0000000000..fe80ac480c
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsu/Makefile
@@ -0,0 +1,36 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+include ../Makefile.com
+
+DATAFILES += depend
+
+.KEEP_STATE:
+
+all: $(FILES)
+install: all pkg
+
+include ../Makefile.targ
diff --git a/usr/src/pkgdefs/SUNWpmcsu/pkginfo.tmpl b/usr/src/pkgdefs/SUNWpmcsu/pkginfo.tmpl
new file mode 100644
index 0000000000..fc75477b09
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsu/pkginfo.tmpl
@@ -0,0 +1,46 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#
+
+PKG=SUNWpmcsu
+NAME=PMC-Sierra SAS-2 HBA driver (usr)
+ARCH="ISA"
+VERSION="ONVERS,REV=0.0.0"
+SUNW_PRODNAME="SunOS"
+SUNW_PRODVERS="RELEASE/VERSION"
+SUNW_PKGVERS="1.0"
+SUNW_PKGTYPE="root"
+MAXINST="1000"
+CATEGORY=system
+VENDOR="Sun Microsystems, Inc."
+DESC="PMC-Sierra SAS-2 HBA driver (usr)"
+CLASSES="none"
+HOTLINE="Please contact your local service provider"
+EMAIL=""
+BASEDIR=/
+SUNW_PKG_ALLZONES="true"
+SUNW_PKG_HOLLOW="true"
+SUNW_PKG_THISZONE="false"
diff --git a/usr/src/pkgdefs/SUNWpmcsu/prototype_com b/usr/src/pkgdefs/SUNWpmcsu/prototype_com
new file mode 100644
index 0000000000..971a32c03c
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsu/prototype_com
@@ -0,0 +1,47 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+#
+i pkginfo
+i copyright
+i depend
+
+d none usr 0755 root sys
+d none usr/lib 0755 root bin
+d none usr/lib/mdb 0755 root sys
+d none usr/lib/mdb/kvm 0755 root sys
diff --git a/usr/src/pkgdefs/SUNWpmcsu/prototype_i386 b/usr/src/pkgdefs/SUNWpmcsu/prototype_i386
new file mode 100644
index 0000000000..6e7cebd89e
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsu/prototype_i386
@@ -0,0 +1,47 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+d none usr/lib/mdb/kvm/amd64 0755 root sys
+f none usr/lib/mdb/kvm/pmcs.so 0555 root sys
+f none usr/lib/mdb/kvm/amd64/pmcs.so 0555 root sys
diff --git a/usr/src/pkgdefs/SUNWpmcsu/prototype_sparc b/usr/src/pkgdefs/SUNWpmcsu/prototype_sparc
new file mode 100644
index 0000000000..56b7ba0bc1
--- /dev/null
+++ b/usr/src/pkgdefs/SUNWpmcsu/prototype_sparc
@@ -0,0 +1,46 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# This required package information file contains a list of package contents.
+# The 'pkgmk' command uses this file to identify the contents of a package
+# and their location on the development machine when building the package.
+# Can be created via a text editor or through use of the 'pkgproto' command.
+
+#!search <pathname pathname ...> # where to find pkg objects
+#!include <filename> # include another 'prototype' file
+#!default <mode> <owner> <group> # default used if not specified on entry
+#!<param>=<value> # puts parameter in pkg environment
+
+#
+# Include ISA independent files (prototype_com)
+#
+!include prototype_com
+#
+#
+
+d none usr/lib/mdb/kvm/sparcv9 0755 root sys
+f none usr/lib/mdb/kvm/sparcv9/pmcs.so 0555 root sys
diff --git a/usr/src/pkgdefs/common_files/i.pmcsconf b/usr/src/pkgdefs/common_files/i.pmcsconf
new file mode 100644
index 0000000000..d81d85fb73
--- /dev/null
+++ b/usr/src/pkgdefs/common_files/i.pmcsconf
@@ -0,0 +1,137 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+PATH=/usr/bin:/usr/sbin:$PATH; export PATH
+PREFIX=/tmp/pmcs.conf.$$
+
+add_comment_for_vhci_class()
+{
+ if grep "^# As a pHCI driver, pmcs must spec" $1 > /dev/null 2>&1; then
+ return
+ fi
+ cat >> $1 << EOF
+#
+# As a pHCI driver, pmcs must specify the vHCI class it belongs to (scsi_vhci).
+#
+EOF
+}
+
+add_comment_for_mpxio_disable()
+{
+ if grep "^# By default, MPxIO will be enabled" $1 > /dev/null 2>&1; then
+ return
+ fi
+ cat >> $1 << EOF
+#
+# By default, MPxIO will be enabled on all pmcs controllers. To disable MPxIO
+# for all pmcs controllers, set:
+#
+#mpxio-disable="yes";
+
+#
+# You can disable MPxIO on a per-HBA basis. Per port settings override the
+# global setting for the specified ports. To disable MPxIO for the controller
+# whose unit-address is 0 and whose parent is /pci@0/pci10de,5d@e, set:
+#
+#name="pmcs" parent="/pci@0/pci10de,5d@e" unit-address="0" mpxio-disable="yes";
+
+EOF
+}
+
+add_comment_for_debug()
+{
+ if grep "^# Global debug settings may be set " $1 > /dev/null 2>&1; then
+ return
+ fi
+ cat >> $1 << EOF
+#
+# Global debug settings may be set using the 'pmcs-debug-mask' property.
+# Any combination of values may be set according to the following:
+#
+# 0x0001 - Basic info; shouldn't print anything during normal operation
+# 0x0002 - Small amount of I/O information during normal operation
+# 0x0004 - Much more info during I/O; will impact performance
+# 0x0008 - Very verbose at all times; definitely a performance impact
+# 0x0010 - Debug information with regard to discovery/configuration
+# 0x0020 - Debug information with regard to iport
+# 0x0040 - Debug information with regard to map
+# 0x0080 - Report on SCSI underflow status
+# 0x0100 - Report SCSI status for every command
+# 0x0200 - PHY lock/unlock debug (very noisy)
+# 0x0400 - Debug information with regard to device state
+#
+pmcs-debug-mask=0x71;
+
+EOF
+}
+
+update_pmcsconf()
+{
+ NEWHDR1=$PREFIX.hdr1
+ NEWHDR2=$PREFIX.hdr2
+ TMPFILE=$PREFIX.tmp
+
+ # replace old copyright with new one
+ HEADER="^#.* Copyright.*Sun Microsystems.*$"
+ if grep "$HEADER" $1 > $NEWHDR1 2>/dev/null; then
+ # replace / by \/
+ sed "s/\\//\\\\\\//g" $NEWHDR1 > $NEWHDR2 2>/dev/null
+ if sed "s/$HEADER/`cat $NEWHDR2`/" $2 > $TMPFILE 2>/dev/null
+ then
+ cp $TMPFILE $2
+ fi
+ fi
+
+ # ddi-vhci-class: Add comment
+ add_comment_for_vhci_class $2
+
+ # ddi-vhci-class: Add property
+ grep '^ddi-vhci-class' $2 > /dev/null 2>&1
+ if [ $? != 0 ] ; then
+ # add the property
+ echo 'ddi-vhci-class="scsi_vhci";' >> $2
+ echo '' >> $2
+ fi
+
+ # mpxio-disable: Add comment
+ add_comment_for_mpxio_disable $2
+
+ # pmcs-debug-mask: Add comment
+ add_comment_for_debug $2
+
+ rm -f $NEWHDR1 $NEWHDR2 $TMPFILE
+}
+
+if read src dest; then
+ if [ ! -f $dest ]; then
+ cp $src $dest
+ else
+ # upgrading solaris
+ update_pmcsconf $src $dest
+ fi
+
+fi
+
+exit 0
diff --git a/usr/src/tools/scripts/bfu.sh b/usr/src/tools/scripts/bfu.sh
index 64414a36cf..59966df4b8 100644
--- a/usr/src/tools/scripts/bfu.sh
+++ b/usr/src/tools/scripts/bfu.sh
@@ -222,6 +222,7 @@ global_zone_only_files="
kernel/drv/mpt.conf
kernel/drv/mpt_sas.conf
kernel/drv/options.conf
+ kernel/drv/pmcs.conf
kernel/drv/qlc.conf
kernel/drv/ra.conf
kernel/drv/scsa2usb.conf
diff --git a/usr/src/uts/Makefile.targ b/usr/src/uts/Makefile.targ
index 81685eb5b5..11d748e662 100644
--- a/usr/src/uts/Makefile.targ
+++ b/usr/src/uts/Makefile.targ
@@ -194,6 +194,9 @@ $(ROOT_KGSS_DIR)/%: $(OBJS_DIR)/% $(ROOT_KGSS_DIR) FRC
$(ROOT_SCSI_VHCI_DIR)/%: $(OBJS_DIR)/% $(ROOT_SCSI_VHCI_DIR) FRC
$(INS.file)
+$(ROOT_PMCS_FW_DIR)/%: $(OBJS_DIR)/% $(ROOT_PMCS_FW_DIR) FRC
+ $(INS.file)
+
$(ROOT_QLC_FW_DIR)/%: $(OBJS_DIR)/% $(ROOT_QLC_FW_DIR) FRC
$(INS.file)
diff --git a/usr/src/uts/Makefile.uts b/usr/src/uts/Makefile.uts
index 70de98bd11..cb79b12fa0 100644
--- a/usr/src/uts/Makefile.uts
+++ b/usr/src/uts/Makefile.uts
@@ -430,7 +430,6 @@ ROOT_MISC_DIR_32 = $(ROOT_MOD_DIR)/misc
ROOT_KGSS_DIR_32 = $(ROOT_MOD_DIR)/misc/kgss
ROOT_SCSI_VHCI_DIR_32 = $(ROOT_MOD_DIR)/misc/scsi_vhci
ROOT_QLC_FW_DIR_32 = $(ROOT_MOD_DIR)/misc/qlc
-
ROOT_EMLXS_FW_DIR_32 = $(ROOT_MOD_DIR)/misc/emlxs
ROOT_NLMISC_DIR_32 = $(ROOT_MOD_DIR)/misc
ROOT_MACH_DIR_32 = $(ROOT_MOD_DIR)/mach
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index bf747815c4..a476c50628 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -121,6 +121,7 @@ GENUNIX_OBJS += \
cs_stubs.o \
dacf.o \
dacf_clnt.o \
+ damap.o \
cyclic.o \
ddi.o \
ddifm.o \
@@ -883,6 +884,13 @@ NSKERN_OBJS += nsc_ddi.o \
SV_OBJS += sv.o
+PMCS_OBJS += pmcs_attach.o pmcs_intr.o pmcs_nvram.o pmcs_sata.o \
+ pmcs_scsa.o pmcs_smhba.o pmcs_subr.o pmcs_fwlog.o
+
+PMCS8001FW_C_OBJS += pmcs_fw_hdr.o
+
+PMCS8001FW_OBJS += $(PMCS8001FW_C_OBJS) \
+ SPCBoot.o ila.o firmware.o
#
# Build up defines and paths.
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index 10bcb14bfb..24d164cded 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -332,6 +332,14 @@ $(OBJS_DIR)/%.o: $(COMMONBASE)/zfs/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/scsi/adapters/pmcs/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/scsi/adapters/pmcs/%.bin
+ $(COMPILE.b) -o $@ $<
+ $(CTFCONVERT_O)
+
KMECHKRB5_BASE=$(UTSBASE)/common/gssapi/mechs/krb5
KGSSDFLAGS=-I $(UTSBASE)/common/gssapi/include
@@ -2153,6 +2161,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/scsi/adapters/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/scsi/adapters/blk2scsa/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/scsi/adapters/pmcs/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/scsi/adapters/scsi_vhci/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/common/fs/devfs/devfs_subr.c b/usr/src/uts/common/fs/devfs/devfs_subr.c
index 06dd76cc3d..341070c6b4 100644
--- a/usr/src/uts/common/fs/devfs/devfs_subr.c
+++ b/usr/src/uts/common/fs/devfs/devfs_subr.c
@@ -1184,6 +1184,17 @@ founddv:
found:
/*
+ * Fail lookup of device that has now become hidden (typically via
+ * hot removal of open device).
+ */
+ if (dv->dv_devi && ndi_dev_is_hidden_node(dv->dv_devi)) {
+ dcmn_err2(("dv_find: nm %s failed: hidden/removed\n", nm));
+ VN_RELE(vp);
+ rv = ENOENT;
+ goto notfound;
+ }
+
+ /*
* Skip non-kernel lookups of internal nodes.
* This use of kcred to distinguish between user and
* internal kernel lookups is unfortunate. The information
@@ -1192,6 +1203,7 @@ found:
* this distinction.
*/
if ((dv->dv_flags & DV_INTERNAL) && (cred != kcred)) {
+ dcmn_err2(("dv_find: nm %s failed: internal\n", nm));
VN_RELE(vp);
rv = ENOENT;
goto notfound;
@@ -1251,7 +1263,7 @@ dv_filldir(struct dv_node *ddv)
ndi_devi_enter(pdevi, &circ);
for (devi = ddi_get_child(pdevi); devi;
devi = ddi_get_next_sibling(devi)) {
- if (i_ddi_node_state(devi) < DS_PROBED)
+ if (i_ddi_node_state(devi) < DS_INITIALIZED)
continue;
/* skip hidden nodes */
diff --git a/usr/src/uts/common/fs/devfs/devfs_vnops.c b/usr/src/uts/common/fs/devfs/devfs_vnops.c
index d5fade9b12..359e9e1d6a 100644
--- a/usr/src/uts/common/fs/devfs/devfs_vnops.c
+++ b/usr/src/uts/common/fs/devfs/devfs_vnops.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* vnode ops for the devfs
*
@@ -59,7 +57,7 @@
#include <sys/debug.h>
#include <sys/policy.h>
#include <sys/modctl.h>
-
+#include <sys/sunndi.h>
#include <fs/fs_subr.h>
#include <sys/fs/dv_node.h>
@@ -960,13 +958,23 @@ devfs_readdir(struct vnode *dvp, struct uio *uiop, struct cred *cred, int *eofp,
diroff++;
for (dv = DV_FIRST_ENTRY(ddv); dv;
dv = DV_NEXT_ENTRY(ddv, dv), diroff++) {
+ /* skip entries until at correct directory offset */
+ if (diroff < soff)
+ continue;
+
+ /*
+ * hidden nodes are skipped (but they still occupy a
+ * directory offset).
+ */
+ if (dv->dv_devi && ndi_dev_is_hidden_node(dv->dv_devi))
+ continue;
+
/*
- * although DDM_INTERNAL_PATH minor nodes are skipped for
- * readdirs outside the kernel, they still occupy directory
- * offsets
+ * DDM_INTERNAL_PATH minor nodes are skipped for readdirs
+ * outside the kernel (but they still occupy a directory
+ * offset).
*/
- if (diroff < soff ||
- ((dv->dv_flags & DV_INTERNAL) && (cred != kcred)))
+ if ((dv->dv_flags & DV_INTERNAL) && (cred != kcred))
continue;
reclen = DIRENT64_RECLEN(strlen(dv->dv_name));
diff --git a/usr/src/uts/common/io/1394/targets/scsa1394/hba.c b/usr/src/uts/common/io/1394/targets/scsa1394/hba.c
index 463c749479..3aade0f089 100644
--- a/usr/src/uts/common/io/1394/targets/scsa1394/hba.c
+++ b/usr/src/uts/common/io/1394/targets/scsa1394/hba.c
@@ -813,13 +813,15 @@ scsa1394_create_children(scsa1394_state_t *sp)
ndi_devi_alloc_sleep(sp->s_dip, node_name,
(pnode_t)DEVI_SID_NODEID, &cdip);
- ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip, "target", 0);
+ ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
+ SCSI_ADDR_PROP_TARGET, 0);
if (ret != DDI_PROP_SUCCESS) {
(void) ndi_devi_free(cdip);
continue;
}
- ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip, "lun", i);
+ ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
+ SCSI_ADDR_PROP_LUN, i);
if (ret != DDI_PROP_SUCCESS) {
ddi_prop_remove_all(cdip);
(void) ndi_devi_free(cdip);
@@ -973,8 +975,8 @@ scsa1394_scsi_tgt_init(dev_info_t *dip, dev_info_t *cdip, scsi_hba_tran_t *tran,
int ret = DDI_FAILURE;
if (ddi_prop_op(DDI_DEV_T_ANY, cdip, PROP_LEN_AND_VAL_BUF,
- DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", (caddr_t)&lun,
- &plen) != DDI_PROP_SUCCESS) {
+ DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, SCSI_ADDR_PROP_LUN,
+ (caddr_t)&lun, &plen) != DDI_PROP_SUCCESS) {
return (DDI_FAILURE);
}
@@ -1019,8 +1021,8 @@ scsa1394_scsi_tgt_free(dev_info_t *dip, dev_info_t *cdip, scsi_hba_tran_t *tran,
}
if (ddi_prop_op(DDI_DEV_T_ANY, cdip, PROP_LEN_AND_VAL_BUF,
- DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", (caddr_t)&lun,
- &plen) != DDI_PROP_SUCCESS) {
+ DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, SCSI_ADDR_PROP_LUN,
+ (caddr_t)&lun, &plen) != DDI_PROP_SUCCESS) {
return;
}
diff --git a/usr/src/uts/common/io/devinfo.c b/usr/src/uts/common/io/devinfo.c
index cff9b3ac4f..d1d02ce378 100644
--- a/usr/src/uts/common/io/devinfo.c
+++ b/usr/src/uts/common/io/devinfo.c
@@ -647,7 +647,7 @@ di_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
rv = i_ddi_devs_attached(i);
modunload_enable();
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
return ((rv == DDI_SUCCESS)? 0 : ENXIO);
@@ -2638,6 +2638,19 @@ path_state_convert(mdi_pathinfo_state_t st)
}
}
+static uint_t
+path_flags_convert(uint_t pi_path_flags)
+{
+ uint_t di_path_flags = 0;
+
+ /* MDI_PATHINFO_FLAGS_HIDDEN nodes not in snapshot */
+
+ if (pi_path_flags & MDI_PATHINFO_FLAGS_DEVICE_REMOVED)
+ di_path_flags |= DI_PATH_FLAGS_DEVICE_REMOVED;
+
+ return (di_path_flags);
+}
+
static di_off_t
di_path_getprop(mdi_pathinfo_t *pip, di_off_t *off_p,
@@ -2793,13 +2806,19 @@ di_getpath_data(dev_info_t *dip, di_off_t *off_p, di_off_t noff,
pip = NULL;
while (pip = (*next_pip)(dip, pip)) {
- mdi_pathinfo_state_t state;
di_off_t stored_offset;
dcmn_err((CE_WARN, "marshalling pip = %p", (void *)pip));
mdi_pi_lock(pip);
+ /* We don't represent hidden paths in the snapshot */
+ if (mdi_pi_ishidden(pip)) {
+ dcmn_err((CE_WARN, "hidden, skip"));
+ mdi_pi_unlock(pip);
+ continue;
+ }
+
if (di_pip_find(st, pip, &stored_offset) != -1) {
/*
* We've already seen this pathinfo node so we need to
@@ -2856,8 +2875,8 @@ di_getpath_data(dev_info_t *dip, di_off_t *off_p, di_off_t noff,
*/
di_register_pip(st, pip, me->self);
- state = mdi_pi_get_state(pip);
- me->path_state = path_state_convert(state);
+ me->path_state = path_state_convert(mdi_pi_get_state(pip));
+ me->path_flags = path_flags_convert(mdi_pi_get_flags(pip));
me->path_instance = mdi_pi_get_path_instance(pip);
diff --git a/usr/src/uts/common/io/sata/impl/sata.c b/usr/src/uts/common/io/sata/impl/sata.c
index 9c7a713d06..eca21dde57 100644
--- a/usr/src/uts/common/io/sata/impl/sata.c
+++ b/usr/src/uts/common/io/sata/impl/sata.c
@@ -2035,12 +2035,12 @@ sata_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
}
if (vid)
- (void) scsi_hba_prop_update_inqstring(sd, INQUIRY_VENDOR_ID,
+ (void) scsi_device_prop_update_inqstring(sd, INQUIRY_VENDOR_ID,
vid, strlen(vid));
if (pid)
- (void) scsi_hba_prop_update_inqstring(sd, INQUIRY_PRODUCT_ID,
+ (void) scsi_device_prop_update_inqstring(sd, INQUIRY_PRODUCT_ID,
pid, strlen(pid));
- (void) scsi_hba_prop_update_inqstring(sd, INQUIRY_REVISION_ID,
+ (void) scsi_device_prop_update_inqstring(sd, INQUIRY_REVISION_ID,
fw, strlen(fw));
return (DDI_SUCCESS);
diff --git a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi.c b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi.c
index 326d27811e..1fce338611 100644
--- a/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi.c
+++ b/usr/src/uts/common/io/scsi/adapters/iscsi/iscsi.c
@@ -586,10 +586,11 @@ iscsi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
ihp->hba_isid[5]);
if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
- "initiator-port", init_port_name) !=
+ SCSI_ADDR_PROP_INITIATOR_PORT, init_port_name) !=
DDI_PROP_SUCCESS) {
cmn_err(CE_WARN, "iscsi_attach: Creating "
- "initiator-port property on iSCSI "
+ SCSI_ADDR_PROP_INITIATOR_PORT
+ " property on iSCSI "
"HBA(%s) with dip(%d) Failed",
(char *)ihp->hba_name,
ddi_get_instance(dip));
@@ -1758,10 +1759,10 @@ iscsi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
ihp->hba_isid[5]);
if (ddi_prop_update_string(DDI_DEV_T_NONE,
- ihp->hba_dip, "initiator-port",
+ ihp->hba_dip, SCSI_ADDR_PROP_INITIATOR_PORT,
init_port_name) != DDI_PROP_SUCCESS) {
cmn_err(CE_WARN, "iscsi_ioctl: Updating "
- "initiator-port property on iSCSI "
+ SCSI_ADDR_PROP_INITIATOR_PORT " property on iSCSI "
"HBA(%s) with dip(%d) Failed",
(char *)ihp->hba_name,
ddi_get_instance(ihp->hba_dip));
@@ -4709,10 +4710,11 @@ iscsi_virt_lun_init(dev_info_t *hba_dip, dev_info_t *lun_dip,
ilp->lun_sess->sess_name, ilp->lun_sess->sess_tpgt_conf);
}
- if (mdi_prop_update_string(pip, "target-port",
- target_port_name) != DDI_PROP_SUCCESS) {
- cmn_err(CE_WARN, "iscsi_virt_lun_init: Creating 'target-port' "
- "property on Path(%p) for Target(%s), Lun(%d) Failed",
+ if (mdi_prop_update_string(pip,
+ SCSI_ADDR_PROP_TARGET_PORT, target_port_name) != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "iscsi_virt_lun_init: Creating '"
+ SCSI_ADDR_PROP_TARGET_PORT "' property on Path(%p) "
+ "for Target(%s), Lun(%d) Failed",
(void *)pip, ilp->lun_sess->sess_name, ilp->lun_num);
}
@@ -4825,10 +4827,10 @@ iscsi_phys_lun_init(dev_info_t *hba_dip, dev_info_t *lun_dip,
}
if (ddi_prop_update_string(DDI_DEV_T_NONE, lun_dip,
- "target-port", target_port_name) != DDI_PROP_SUCCESS) {
- cmn_err(CE_WARN, "iscsi_phys_lun_init: Creating 'target-port' "
- "property on Target(%s), Lun(%d) Failed",
- ilp->lun_sess->sess_name, ilp->lun_num);
+ SCSI_ADDR_PROP_TARGET_PORT, target_port_name) != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "iscsi_phys_lun_init: Creating '"
+ SCSI_ADDR_PROP_TARGET_PORT "' property on Target(%s), "
+ "Lun(%d) Failed", ilp->lun_sess->sess_name, ilp->lun_num);
}
return (rtn);
diff --git a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
index e39ba442c8..4850f52870 100644
--- a/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ b/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
@@ -1242,7 +1242,7 @@ intr_done:
}
hba_attach_setup++;
- mpt->m_smptran = sas_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
+ mpt->m_smptran = sas_hba_tran_alloc(dip);
ASSERT(mpt->m_smptran != NULL);
mpt->m_smptran->tran_hba_private = mpt;
mpt->m_smptran->tran_smp_start = mptsas_smp_start;
@@ -2664,7 +2664,7 @@ mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
LUN_PROP, 0);
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
- "target-port", &sas_wwn) == DDI_PROP_SUCCESS) {
+ SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn) == DDI_PROP_SUCCESS) {
/*
* Stick in the address of the form "wWWN,LUN"
*/
@@ -2748,8 +2748,8 @@ mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
return (DDI_FAILURE);
}
- if (mdi_prop_lookup_string(pip, "target-port", &psas_wwn) ==
- MDI_SUCCESS) {
+ if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
+ &psas_wwn) == MDI_SUCCESS) {
if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
sas_wwn = 0;
}
@@ -2759,7 +2759,7 @@ mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
DDI_PROP_DONTPASS, LUN_PROP, 0);
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
- DDI_PROP_DONTPASS, "target-port", &psas_wwn) ==
+ DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
DDI_PROP_SUCCESS) {
if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
sas_wwn = 0;
@@ -2860,12 +2860,12 @@ mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
* override SCSA "inquiry-*" properties
*/
if (vid)
- (void) scsi_hba_prop_update_inqstring(sd,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_VENDOR_ID, vid, strlen(vid));
if (pid)
- (void) scsi_hba_prop_update_inqstring(sd,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_PRODUCT_ID, pid, strlen(pid));
- (void) scsi_hba_prop_update_inqstring(sd,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_REVISION_ID, fw, strlen(fw));
if (inq89 != NULL) {
@@ -12295,8 +12295,8 @@ mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
mdi_rtn = MDI_FAILURE;
goto virt_create_done;
}
- if (sas_wwn && (mdi_prop_update_string(*pip, "target-port",
- wwn_str) != DDI_PROP_SUCCESS)) {
+ if (sas_wwn && (mdi_prop_update_string(*pip,
+ SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
"create property for target %d lun %d "
"(target-port)", target, lun);
@@ -12449,7 +12449,7 @@ mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
- *lun_dip, "target-port", wwn_str)
+ *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
!= DDI_PROP_SUCCESS) {
mptsas_log(mpt, CE_WARN, "mptsas unable to "
"create property for SAS target %d lun %d "
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs.conf b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs.conf
new file mode 100644
index 0000000000..b5641a8f4e
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs.conf
@@ -0,0 +1,59 @@
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# As a pHCI driver, pmcs must specify the vHCI class it belongs to (scsi_vhci).
+#
+ddi-vhci-class="scsi_vhci";
+
+#
+# By default, MPxIO will be enabled on all pmcs controllers. To disable MPxIO
+# for all pmcs controllers, set:
+#
+#mpxio-disable="yes";
+
+#
+# You can disable MPxIO on a per-HBA basis. Per port settings override the
+# global setting for the specified ports. To disable MPxIO for the controller
+# whose unit-address is 0 and whose parent is /pci@0/pci10de,5d@e, set:
+#
+#name="pmcs" parent="/pci@0/pci10de,5d@e" unit-address="0" mpxio-disable="yes";
+
+#
+# Global debug settings may be set using the 'pmcs-debug-mask' property.
+# Any combination of values may be set according to the following:
+#
+# 0x0001 - Basic info; shouldn't print anything during normal operation
+# 0x0002 - Small amount of I/O information during normal operation
+# 0x0004 - Much more info during I/O; will impact performance
+# 0x0008 - Very verbose at all times; definitely a performance impact
+# 0x0010 - Debug information with regard to discovery/configuration
+# 0x0020 - Debug information with regard to iport
+# 0x0040 - Debug information with regard to map
+# 0x0080 - Report on SCSI underflow status
+# 0x0100 - Report SCSI status for every command
+# 0x0200 - PHY lock/unlock debug (very noisy)
+# 0x0400 - Debug information with regard to device state
+#
+pmcs-debug-mask=0x71;
+
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs8001fw.version b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs8001fw.version
new file mode 100644
index 0000000000..8c5820d51b
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs8001fw.version
@@ -0,0 +1,24 @@
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+PMCS_FW_VERSION=0x01080100
+PMCS_FW_VERSION_STRING="1.8.01 Released"
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c
new file mode 100644
index 0000000000..1c03259812
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_attach.c
@@ -0,0 +1,3026 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+#include <sys/scsi/adapters/pmcs/pmcs_bldrev.h>
+
+#define PMCS_DRIVER_VERSION "PMC-Sierra HBA Driver "\
+ PMCS_BUILD_VERSION
+
+static char *pmcs_driver_rev = PMCS_DRIVER_VERSION;
+
+/*
+ * Non-DDI Compliant stuff
+ */
+extern char hw_serial[];
+
+/*
+ * Global driver data
+ */
+void *pmcs_softc_state = NULL;
+void *pmcs_iport_softstate = NULL;
+
+/*
+ * Tracing and Logging info
+ */
+pmcs_tbuf_t *pmcs_tbuf = NULL;
+uint32_t pmcs_tbuf_num_elems = 0;
+pmcs_tbuf_t *pmcs_tbuf_ptr;
+uint32_t pmcs_tbuf_idx = 0;
+boolean_t pmcs_tbuf_wrap = B_FALSE;
+static kmutex_t pmcs_trace_lock;
+
+/*
+ * If pmcs_force_syslog value is non-zero, all messages put in the trace log
+ * will also be sent to system log.
+ */
+int pmcs_force_syslog = 0;
+int pmcs_console = 0;
+
+/*
+ * External References
+ */
+extern int ncpus_online;
+
+/*
+ * Local static data
+ */
+static int fwlog_level = 3;
+static int physpeed = PHY_LINK_ALL;
+static int phymode = PHY_LM_AUTO;
+static int block_mask = 0;
+static int phymap_usec = 3 * MICROSEC;
+static int iportmap_usec = 2 * MICROSEC;
+
+#ifdef DEBUG
+static int debug_mask = 1;
+#else
+static int debug_mask = 0;
+#endif
+
+#ifdef DISABLE_MSIX
+static int disable_msix = 1;
+#else
+static int disable_msix = 0;
+#endif
+
+#ifdef DISABLE_MSI
+static int disable_msi = 1;
+#else
+static int disable_msi = 0;
+#endif
+
+static uint16_t maxqdepth = 0xfffe;
+
+/*
+ * Local prototypes
+ */
+static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t);
+static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t);
+static int pmcs_unattach(pmcs_hw_t *);
+static int pmcs_iport_unattach(pmcs_iport_t *);
+static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long);
+static void pmcs_watchdog(void *);
+static int pmcs_setup_intr(pmcs_hw_t *);
+static int pmcs_teardown_intr(pmcs_hw_t *);
+
+static uint_t pmcs_nonio_ix(caddr_t, caddr_t);
+static uint_t pmcs_general_ix(caddr_t, caddr_t);
+static uint_t pmcs_event_ix(caddr_t, caddr_t);
+static uint_t pmcs_iodone_ix(caddr_t, caddr_t);
+static uint_t pmcs_fatal_ix(caddr_t, caddr_t);
+static uint_t pmcs_all_intr(caddr_t, caddr_t);
+static int pmcs_quiesce(dev_info_t *dip);
+static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *);
+
+static void pmcs_create_phy_stats(pmcs_iport_t *);
+int pmcs_update_phy_stats(kstat_t *, int);
+static void pmcs_destroy_phy_stats(pmcs_iport_t *);
+
+static void pmcs_fm_fini(pmcs_hw_t *pwp);
+static void pmcs_fm_init(pmcs_hw_t *pwp);
+static int pmcs_fm_error_cb(dev_info_t *dip,
+ ddi_fm_error_t *err, const void *impl_data);
+
+/*
+ * Local configuration data
+ */
+static struct dev_ops pmcs_ops = {
+ DEVO_REV, /* devo_rev, */
+ 0, /* refcnt */
+ ddi_no_info, /* info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ pmcs_attach, /* attach */
+ pmcs_detach, /* detach */
+ nodev, /* reset */
+ NULL, /* driver operations */
+ NULL, /* bus operations */
+ ddi_power, /* power management */
+ pmcs_quiesce /* quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ PMCS_DRIVER_VERSION,
+ &pmcs_ops, /* driver ops */
+};
+static struct modlinkage modlinkage = {
+ MODREV_1, &modldrv, NULL
+};
+
+const ddi_dma_attr_t pmcs_dattr = {
+ DMA_ATTR_V0, /* dma_attr version */
+ 0x0000000000000000ull, /* dma_attr_addr_lo */
+ 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
+ 0x00000000FFFFFFFFull, /* dma_attr_count_max */
+ 0x0000000000000001ull, /* dma_attr_align */
+ 0x00000078, /* dma_attr_burstsizes */
+ 0x00000001, /* dma_attr_minxfer */
+ 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */
+ 0x00000000FFFFFFFFull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 512, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+static ddi_device_acc_attr_t rattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_DEFAULT_ACC
+};
+
+
+/*
+ * Attach/Detach functions
+ */
+
+int
+_init(void)
+{
+ int ret;
+
+ ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "?soft state init failed for pmcs");
+ return (ret);
+ }
+
+ if ((ret = scsi_hba_init(&modlinkage)) != 0) {
+ cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs");
+ ddi_soft_state_fini(&pmcs_softc_state);
+ return (ret);
+ }
+
+ /*
+ * Allocate soft state for iports
+ */
+ ret = ddi_soft_state_init(&pmcs_iport_softstate,
+ sizeof (pmcs_iport_t), 2);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "?iport soft state init failed for pmcs");
+ ddi_soft_state_fini(&pmcs_softc_state);
+ return (ret);
+ }
+
+ ret = mod_install(&modlinkage);
+ if (ret != 0) {
+ cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret);
+ scsi_hba_fini(&modlinkage);
+ ddi_soft_state_fini(&pmcs_iport_softstate);
+ ddi_soft_state_fini(&pmcs_softc_state);
+ return (ret);
+ }
+
+ /* Initialize the global trace lock */
+ mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL);
+
+ return (0);
+}
+
+int
+_fini(void)
+{
+ int ret;
+ if ((ret = mod_remove(&modlinkage)) != 0) {
+ return (ret);
+ }
+ scsi_hba_fini(&modlinkage);
+
+ /* Free pmcs log buffer and destroy the global lock */
+ if (pmcs_tbuf) {
+ kmem_free(pmcs_tbuf,
+ pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t));
+ pmcs_tbuf = NULL;
+ }
+ mutex_destroy(&pmcs_trace_lock);
+
+ ddi_soft_state_fini(&pmcs_iport_softstate);
+ ddi_soft_state_fini(&pmcs_softc_state);
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+static int
+pmcs_iport_attach(dev_info_t *dip)
+{
+ pmcs_iport_t *iport;
+ pmcs_hw_t *pwp;
+ scsi_hba_tran_t *tran;
+ void *ua_priv = NULL;
+ char *iport_ua;
+ char *init_port;
+ int hba_inst;
+ int inst;
+
+ hba_inst = ddi_get_instance(ddi_get_parent(dip));
+ inst = ddi_get_instance(dip);
+
+ pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst);
+ if (pwp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: iport%d attach invoked with NULL parent (HBA) node)",
+ __func__, inst);
+ return (DDI_FAILURE);
+ }
+
+ if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) {
+ return (DDI_FAILURE);
+ }
+
+ if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: invoked with NULL unit address, inst (%d)",
+ __func__, inst);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to alloc soft state for iport %d", inst);
+ return (DDI_FAILURE);
+ }
+
+ iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
+ if (iport == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "cannot get iport soft state");
+ goto iport_attach_fail1;
+ }
+
+ mutex_init(&iport->lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+
+ /* Set some data on the iport handle */
+ iport->dip = dip;
+ iport->pwp = pwp;
+
+ /* Dup the UA into the iport handle */
+ iport->ua = strdup(iport_ua);
+
+ tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
+ tran->tran_hba_private = iport;
+
+ list_create(&iport->phys, sizeof (pmcs_phy_t),
+ offsetof(pmcs_phy_t, list_node));
+
+ /*
+ * If our unit address is active in the phymap, configure our
+ * iport's phylist.
+ */
+ mutex_enter(&iport->lock);
+ ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua);
+ if (ua_priv) {
+ /* Non-NULL private data indicates the unit address is active */
+ iport->ua_state = UA_ACTIVE;
+ if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: failed to "
+ "configure phys on iport handle (0x%p), "
+ " unit address [%s]", __func__,
+ (void *)iport, iport_ua);
+ mutex_exit(&iport->lock);
+ goto iport_attach_fail2;
+ }
+ } else {
+ iport->ua_state = UA_INACTIVE;
+ }
+ mutex_exit(&iport->lock);
+
+ /* Allocate string-based soft state pool for targets */
+ iport->tgt_sstate = NULL;
+ if (ddi_soft_state_bystr_init(&iport->tgt_sstate,
+ sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "cannot get iport tgt soft state");
+ goto iport_attach_fail2;
+ }
+
+ /* Create this iport's target map */
+ if (pmcs_iport_tgtmap_create(iport) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to create tgtmap on iport %d", inst);
+ goto iport_attach_fail3;
+ }
+
+ /* Set up the 'initiator-port' DDI property on this iport */
+ init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
+ if (pwp->separate_ports) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: separate ports not "
+ "supported", __func__);
+ } else {
+ /* Set initiator-port value to the HBA's base WWN */
+ (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1,
+ init_port);
+ }
+ pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING,
+ SCSI_ADDR_PROP_INITIATOR_PORT, init_port);
+ kmem_free(init_port, PMCS_MAX_UA_SIZE);
+
+ /* Set up a 'num-phys' DDI property for the iport node */
+ pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
+ &iport->nphy);
+
+ /* Create kstats for each of the phys in this port */
+ pmcs_create_phy_stats(iport);
+
+ /*
+ * Insert this iport handle into our list and set
+ * iports_attached on the HBA node.
+ */
+ rw_enter(&pwp->iports_lock, RW_WRITER);
+ ASSERT(!list_link_active(&iport->list_node));
+ list_insert_tail(&pwp->iports, iport);
+ pwp->iports_attached = 1;
+ pwp->num_iports++;
+ rw_exit(&pwp->iports_lock);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d attached", inst);
+ ddi_report_dev(dip);
+ return (DDI_SUCCESS);
+
+ /* teardown and fail */
+iport_attach_fail3:
+ ddi_soft_state_bystr_fini(&iport->tgt_sstate);
+iport_attach_fail2:
+ list_destroy(&iport->phys);
+ strfree(iport->ua);
+ mutex_destroy(&iport->refcnt_lock);
+ cv_destroy(&iport->refcnt_cv);
+ mutex_destroy(&iport->lock);
+iport_attach_fail1:
+ ddi_soft_state_free(pmcs_iport_softstate, inst);
+ return (DDI_FAILURE);
+}
+
+static int
+pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ scsi_hba_tran_t *tran;
+ char chiprev, *fwsupport, hw_rev[24], fw_rev[24];
+ off_t set3size;
+ int inst, i;
+ int sm_hba = 1;
+ int protocol = 0;
+ int num_phys = 0;
+ pmcs_hw_t *pwp;
+ pmcs_phy_t *phyp;
+ uint32_t num_threads;
+ char buf[64];
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+
+ case DDI_PM_RESUME:
+ case DDI_RESUME:
+ tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
+ if (!tran) {
+ return (DDI_FAILURE);
+ }
+ /* No DDI_?_RESUME on iport nodes */
+ if (scsi_hba_iport_unit_address(dip) != NULL) {
+ return (DDI_SUCCESS);
+ }
+ pwp = TRAN2PMC(tran);
+ if (pwp == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&pwp->lock);
+ pwp->suspended = 0;
+ if (pwp->tq) {
+ ddi_taskq_resume(pwp->tq);
+ }
+ mutex_exit(&pwp->lock);
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If this is an iport node, invoke iport attach.
+ */
+ if (scsi_hba_iport_unit_address(dip) != NULL) {
+ return (pmcs_iport_attach(dip));
+ }
+
+ /*
+ * From here on is attach for the HBA node
+ */
+
+#ifdef DEBUG
+ /*
+ * Check to see if this unit is to be disabled. We can't disable
+ * on a per-iport node. It's either the entire HBA or nothing.
+ */
+ (void) snprintf(buf, sizeof (buf),
+ "disable-instance-%d", ddi_get_instance(dip));
+ if (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) {
+ cmn_err(CE_NOTE, "pmcs%d: disabled by configuration",
+ ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+#endif
+
+ /*
+ * Allocate softstate
+ */
+ inst = ddi_get_instance(dip);
+ if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst);
+ return (DDI_FAILURE);
+ }
+
+ pwp = ddi_get_soft_state(pmcs_softc_state, inst);
+ if (pwp == NULL) {
+ cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+ pwp->dip = dip;
+ STAILQ_INIT(&pwp->dq);
+ STAILQ_INIT(&pwp->cq);
+ STAILQ_INIT(&pwp->wf);
+ STAILQ_INIT(&pwp->pf);
+ /*
+ * Create the list for iports
+ */
+ list_create(&pwp->iports, sizeof (pmcs_iport_t),
+ offsetof(pmcs_iport_t, list_node));
+
+ pwp->state = STATE_PROBING;
+
+ /*
+ * Get driver.conf properties
+ */
+ pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask",
+ debug_mask);
+ pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask",
+ block_mask);
+ pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed);
+ pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode);
+ pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level);
+ if (pwp->fwlog > PMCS_FWLOG_MAX) {
+ pwp->fwlog = PMCS_FWLOG_MAX;
+ }
+
+ mutex_enter(&pmcs_trace_lock);
+ if (pmcs_tbuf == NULL) {
+ /* Allocate trace buffer */
+ pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems",
+ PMCS_TBUF_NUM_ELEMS_DEF);
+ if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) ||
+ (pmcs_tbuf_num_elems == 0)) {
+ pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF;
+ }
+
+ pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems *
+ sizeof (pmcs_tbuf_t), KM_SLEEP);
+ pmcs_tbuf_ptr = pmcs_tbuf;
+ pmcs_tbuf_idx = 0;
+ }
+ mutex_exit(&pmcs_trace_lock);
+
+ disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix",
+ disable_msix);
+ disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi",
+ disable_msi);
+ maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth);
+ pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0);
+ if (pwp->fw_force_update == 0) {
+ pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ "pmcs-fw-disable-update", 0);
+ }
+ pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries",
+ PMCS_NQENTRY);
+
+ /*
+ * Initialize FMA
+ */
+ pwp->dev_acc_attr = pwp->reg_acc_attr = rattr;
+ pwp->iqp_dma_attr = pwp->oqp_dma_attr =
+ pwp->regdump_dma_attr = pwp->cip_dma_attr =
+ pwp->fwlog_dma_attr = pmcs_dattr;
+ pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable",
+ DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
+ DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+ pmcs_fm_init(pwp);
+
+ /*
+ * Map registers
+ */
+ if (pci_config_setup(dip, &pwp->pci_acc_handle)) {
+ pmcs_prt(pwp, PMCS_PRT_WARN, "pci config setup failed");
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Get the size of register set 3.
+ */
+ if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unable to get size of register set %d", PMCS_REGSET_3);
+ pci_config_teardown(&pwp->pci_acc_handle);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Map registers
+ */
+ pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
+
+ if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs,
+ 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "failed to map Message Unit registers");
+ pci_config_teardown(&pwp->pci_acc_handle);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs,
+ 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map TOP registers");
+ ddi_regs_map_free(&pwp->msg_acc_handle);
+ pci_config_teardown(&pwp->pci_acc_handle);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs,
+ 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map GSM registers");
+ ddi_regs_map_free(&pwp->top_acc_handle);
+ ddi_regs_map_free(&pwp->msg_acc_handle);
+ pci_config_teardown(&pwp->pci_acc_handle);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs,
+ 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "failed to map MPI registers");
+ ddi_regs_map_free(&pwp->top_acc_handle);
+ ddi_regs_map_free(&pwp->gsm_acc_handle);
+ ddi_regs_map_free(&pwp->msg_acc_handle);
+ pci_config_teardown(&pwp->pci_acc_handle);
+ ddi_soft_state_free(pmcs_softc_state, inst);
+ return (DDI_FAILURE);
+ }
+ pwp->mpibar =
+ (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size;
+
+ /*
+ * Make sure we can support this card.
+ */
+ pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION);
+
+ switch (pwp->chiprev) {
+ case PMCS_PM8001_REV_A:
+ case PMCS_PM8001_REV_B:
+ pmcs_prt(pwp, PMCS_PRT_ERR,
+ "Rev A/B Card no longer supported");
+ goto failure;
+ case PMCS_PM8001_REV_C:
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_ERR,
+ "Unknown chip revision (%d)", pwp->chiprev);
+ goto failure;
+ }
+
+ /*
+ * Allocate DMA addressable area for Inbound and Outbound Queue indices
+ * that the chip needs to access plus a space for scratch usage
+ */
+ pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
+ if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls,
+ &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip,
+ &pwp->ciaddr) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to setup DMA for index/scratch");
+ goto failure;
+ }
+
+ bzero(pwp->cip, ptob(1));
+ pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE];
+ pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE;
+
+ /*
+ * Allocate DMA S/G list chunks
+ */
+ (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES);
+
+ /*
+ * Allocate a DMA addressable area for the firmware log (if needed)
+ */
+ if (pwp->fwlog) {
+ /*
+ * Align to event log header and entry size
+ */
+ pwp->fwlog_dma_attr.dma_attr_align = 32;
+ if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr,
+ &pwp->fwlog_acchdl,
+ &pwp->fwlog_hndl, PMCS_FWLOG_SIZE,
+ (caddr_t *)&pwp->fwlogp,
+ &pwp->fwaddr) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to setup DMA for fwlog area");
+ pwp->fwlog = 0;
+ } else {
+ bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
+ }
+ }
+
+ if (pwp->flash_chunk_addr == NULL) {
+ pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE;
+ if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr,
+ &pwp->regdump_acchdl,
+ &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE,
+ (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) ==
+ B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to setup DMA for register dump area");
+ goto failure;
+ }
+ bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE);
+ }
+
+ /*
+ * More bits of local initialization...
+ */
+ pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0);
+ if (pwp->tq == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to create worker taskq");
+ goto failure;
+ }
+
+ /*
+ * Cache of structures for dealing with I/O completion callbacks.
+ */
+ (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst);
+ pwp->iocomp_cb_cache = kmem_cache_create(buf,
+ sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0);
+
+ /*
+ * Cache of PHY structures
+ */
+ (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst);
+ pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8,
+ pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp,
+ NULL, 0);
+
+ /*
+ * Allocate space for the I/O completion threads
+ */
+ num_threads = ncpus_online;
+ if (num_threads > PMCS_MAX_CQ_THREADS) {
+ num_threads = PMCS_MAX_CQ_THREADS;
+ }
+
+ pwp->cq_info.cq_thr_info = kmem_zalloc(sizeof (pmcs_cq_thr_info_t) *
+ num_threads, KM_SLEEP);
+ pwp->cq_info.cq_threads = num_threads;
+ pwp->cq_info.cq_next_disp_thr = 0;
+ pwp->cq_info.cq_stop = B_FALSE;
+
+ /*
+ * Set the quantum value in clock ticks for the I/O interrupt
+ * coalescing timer.
+ */
+ pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS);
+
+ /*
+ * We have a delicate dance here. We need to set up
+ * interrupts so we know how to set up some OQC
+ * tables. However, while we're setting up table
+ * access, we may need to flash new firmware and
+ * reset the card, which will take some finessing.
+ */
+
+ /*
+ * Set up interrupts here.
+ */
+ switch (pmcs_setup_intr(pwp)) {
+ case 0:
+ break;
+ case EIO:
+ pwp->stuck = 1;
+ /* FALLTHROUGH */
+ default:
+ goto failure;
+ }
+
+ /*
+ * Set these up now becuase they are used to initialize the OQC tables.
+ *
+ * If we have MSI or MSI-X interrupts set up and we have enough
+ * vectors for each OQ, the Outbound Queue vectors can all be the
+ * same as the appropriate interrupt routine will have been called
+ * and the doorbell register automatically cleared.
+ * This keeps us from having to check the Outbound Doorbell register
+ * when the routines for these interrupts are called.
+ *
+ * If we have Legacy INT-X interrupts set up or we didn't have enough
+ * MSI/MSI-X vectors to uniquely identify each OQ, we point these
+ * vectors to the bits we would like to have set in the Outbound
+ * Doorbell register because pmcs_all_intr will read the doorbell
+ * register to find out why we have an interrupt and write the
+ * corresponding 'clear' bit for that interrupt.
+ */
+
+ switch (pwp->intr_cnt) {
+ case 1:
+ /*
+ * Only one vector, so we must check all OQs for MSI. For
+ * INT-X, there's only one vector anyway, so we can just
+ * use the outbound queue bits to keep from having to
+ * check each queue for each interrupt.
+ */
+ if (pwp->int_type == PMCS_INT_FIXED) {
+ pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
+ pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
+ pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
+ } else {
+ pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
+ pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE;
+ pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE;
+ }
+ break;
+ case 2:
+ /* With 2, we can at least isolate IODONE */
+ pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
+ pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
+ pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL;
+ break;
+ case 4:
+ /* With 4 vectors, everybody gets one */
+ pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE;
+ pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL;
+ pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS;
+ break;
+ }
+
+ /*
+ * Do the first part of setup
+ */
+ if (pmcs_setup(pwp)) {
+ goto failure;
+ }
+ pmcs_report_fwversion(pwp);
+
+ /*
+ * Now do some additonal allocations based upon information
+ * gathered during MPI setup.
+ */
+ pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP);
+ ASSERT(pwp->nphy < SAS2_PHYNUM_MAX);
+ phyp = pwp->root_phys;
+ for (i = 0; i < pwp->nphy; i++) {
+ if (i < pwp->nphy-1) {
+ phyp->sibling = (phyp + 1);
+ }
+ mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ phyp->phynum = i & SAS2_PHYNUM_MASK;
+ pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path));
+ phyp->pwp = pwp;
+ phyp->device_id = PMCS_INVALID_DEVICE_ID;
+ phyp++;
+ }
+
+ pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP);
+ for (i = 0; i < pwp->max_cmd - 1; i++) {
+ pmcwork_t *pwrk = &pwp->work[i];
+ mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL);
+ STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next);
+
+ }
+ pwp->targets = (pmcs_xscsi_t **)
+ kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP);
+
+ pwp->iqpt = (pmcs_iqp_trace_t *)
+ kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP);
+ pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP);
+ pwp->iqpt->curpos = pwp->iqpt->head;
+ pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE;
+
+ /*
+ * Start MPI communication.
+ */
+ if (pmcs_start_mpi(pwp)) {
+ if (pmcs_soft_reset(pwp, B_FALSE)) {
+ goto failure;
+ }
+ }
+
+ /*
+ * Do some initial acceptance tests.
+ * This tests interrupts and queues.
+ */
+ if (pmcs_echo_test(pwp)) {
+ goto failure;
+ }
+
+ /* Read VPD - if it exists */
+ if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unable to read VPD: "
+ "attempting to fabricate", __func__);
+ /*
+ * When we release, this must goto failure and the call
+ * to pmcs_fabricate_wwid is removed.
+ */
+ /* goto failure; */
+ if (!pmcs_fabricate_wwid(pwp)) {
+ goto failure;
+ }
+ }
+
+ /*
+ * We're now officially running
+ */
+ pwp->state = STATE_RUNNING;
+
+ /*
+ * Check firmware versions and load new firmware
+ * if needed and reset.
+ */
+ if (pmcs_firmware_update(pwp)) {
+ pmcs_prt(pwp, PMCS_PRT_WARN, "%s: Firmware update failed",
+ __func__);
+ goto failure;
+ }
+
+ /*
+ * Create completion threads.
+ */
+ for (i = 0; i < pwp->cq_info.cq_threads; i++) {
+ pwp->cq_info.cq_thr_info[i].cq_pwp = pwp;
+ pwp->cq_info.cq_thr_info[i].cq_thread =
+ thread_create(NULL, 0, pmcs_scsa_cq_run,
+ &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri);
+ }
+
+ /*
+ * Create one thread to deal with the updating of the interrupt
+ * coalescing timer.
+ */
+ pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal,
+ pwp, 0, &p0, TS_RUN, minclsyspri);
+
+ /*
+ * Kick off the watchdog
+ */
+ pwp->wdhandle = timeout(pmcs_watchdog, pwp,
+ drv_usectohz(PMCS_WATCH_INTERVAL));
+ /*
+ * Do the SCSI attachment code (before starting phys)
+ */
+ if (pmcs_scsa_init(pwp, &pmcs_dattr)) {
+ goto failure;
+ }
+ pwp->hba_attached = 1;
+
+ /*
+ * Initialize the rwlock for the iport elements.
+ */
+ rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL);
+
+ /* Check all acc & dma handles allocated in attach */
+ if (pmcs_check_acc_dma_handle(pwp)) {
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
+ goto failure;
+ }
+
+ /*
+ * Create the phymap for this HBA instance
+ */
+ if (sas_phymap_create(dip, phymap_usec, PHYMAP_MODE_SIMPLE, NULL,
+ pwp, pmcs_phymap_activate, pmcs_phymap_deactivate,
+ &pwp->hss_phymap) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d phymap_create failed",
+ __func__, inst);
+ goto failure;
+ }
+ ASSERT(pwp->hss_phymap);
+
+ /*
+ * Create the iportmap for this HBA instance
+ */
+ if (scsi_hba_iportmap_create(dip, iportmap_usec, pwp->nphy,
+ &pwp->hss_iportmap) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: pmcs%d iportmap_create "
+ "failed", __func__, inst);
+ goto failure;
+ }
+ ASSERT(pwp->hss_iportmap);
+
+ /*
+ * Start the PHYs.
+ */
+ if (pmcs_start_phys(pwp)) {
+ goto failure;
+ }
+
+ /*
+ * From this point on, we can't fail.
+ */
+ ddi_report_dev(dip);
+
+ /* SM-HBA */
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED,
+ &sm_hba);
+
+ /* SM-HBA */
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION,
+ pmcs_driver_rev);
+
+ /* SM-HBA */
+ chiprev = 'A' + pwp->chiprev;
+ (void) snprintf(hw_rev, 2, "%s", &chiprev);
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION,
+ hw_rev);
+
+ /* SM-HBA */
+ switch (PMCS_FW_TYPE(pwp)) {
+ case PMCS_FW_TYPE_RELEASED:
+ fwsupport = "Released";
+ break;
+ case PMCS_FW_TYPE_DEVELOPMENT:
+ fwsupport = "Development";
+ break;
+ case PMCS_FW_TYPE_ALPHA:
+ fwsupport = "Alpha";
+ break;
+ case PMCS_FW_TYPE_BETA:
+ fwsupport = "Beta";
+ break;
+ default:
+ fwsupport = "Special";
+ break;
+ }
+ (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s",
+ PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp),
+ fwsupport);
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION,
+ fw_rev);
+
+ /* SM-HBA */
+ num_phys = pwp->nphy;
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA,
+ &num_phys);
+
+ /* SM-HBA */
+ protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT;
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL,
+ &protocol);
+
+ return (DDI_SUCCESS);
+
+failure:
+ if (pmcs_unattach(pwp)) {
+ pwp->stuck = 1;
+ }
+ return (DDI_FAILURE);
+}
+
+int
+pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int inst = ddi_get_instance(dip);
+ pmcs_iport_t *iport = NULL;
+ pmcs_hw_t *pwp = NULL;
+ scsi_hba_tran_t *tran;
+
+ if (scsi_hba_iport_unit_address(dip) != NULL) {
+ /* iport node */
+ iport = ddi_get_soft_state(pmcs_iport_softstate, inst);
+ ASSERT(iport);
+ if (iport == NULL) {
+ return (DDI_FAILURE);
+ }
+ pwp = iport->pwp;
+ } else {
+ /* hba node */
+ pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst);
+ ASSERT(pwp);
+ if (pwp == NULL) {
+ return (DDI_FAILURE);
+ }
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ if (iport) {
+ /* iport detach */
+ if (pmcs_iport_unattach(iport)) {
+ return (DDI_FAILURE);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "iport%d detached", inst);
+ return (DDI_SUCCESS);
+ } else {
+ /* HBA detach */
+ if (pmcs_unattach(pwp)) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+ }
+
+ case DDI_SUSPEND:
+ case DDI_PM_SUSPEND:
+ /* No DDI_SUSPEND on iport nodes */
+ if (iport) {
+ return (DDI_SUCCESS);
+ }
+
+ if (pwp->stuck) {
+ return (DDI_FAILURE);
+ }
+ tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
+ if (!tran) {
+ return (DDI_FAILURE);
+ }
+
+ pwp = TRAN2PMC(tran);
+ if (pwp == NULL) {
+ return (DDI_FAILURE);
+ }
+ mutex_enter(&pwp->lock);
+ if (pwp->tq) {
+ ddi_taskq_suspend(pwp->tq);
+ }
+ pwp->suspended = 1;
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PMC8X6G suspending");
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+static int
+pmcs_iport_unattach(pmcs_iport_t *iport)
+{
+ pmcs_hw_t *pwp = iport->pwp;
+
+ /*
+ * First, check if there are still any configured targets on this
+ * iport. If so, we fail detach.
+ */
+ if (pmcs_iport_has_targets(pwp, iport)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: "
+ "iport has targets (luns)", ddi_get_instance(iport->dip));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Remove this iport from our list if it is inactive in the phymap.
+ */
+ rw_enter(&pwp->iports_lock, RW_WRITER);
+ mutex_enter(&iport->lock);
+
+ if (iport->ua_state == UA_ACTIVE) {
+ mutex_exit(&iport->lock);
+ rw_exit(&pwp->iports_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, "iport%d detach failure: "
+ "iport unit address active in phymap",
+ ddi_get_instance(iport->dip));
+ return (DDI_FAILURE);
+ }
+
+ /* If it's our only iport, clear iports_attached */
+ ASSERT(pwp->num_iports >= 1);
+ if (--pwp->num_iports == 0) {
+ pwp->iports_attached = 0;
+ }
+
+ ASSERT(list_link_active(&iport->list_node));
+ list_remove(&pwp->iports, iport);
+ rw_exit(&pwp->iports_lock);
+
+ /*
+ * We have removed the iport handle from the HBA's iports list,
+ * there will be no new references to it. Two things must be
+ * guarded against here. First, we could have PHY up events,
+ * adding themselves to the iport->phys list and grabbing ref's
+ * on our iport handle. Second, we could have existing references
+ * to this iport handle from a point in time prior to the list
+ * removal above.
+ *
+ * So first, destroy the phys list. Remove any phys that have snuck
+ * in after the phymap deactivate, dropping the refcnt accordingly.
+ * If these PHYs are still up if and when the phymap reactivates
+ * (i.e. when this iport reattaches), we'll populate the list with
+ * them and bump the refcnt back up.
+ */
+ pmcs_remove_phy_from_iport(iport, NULL);
+ ASSERT(list_is_empty(&iport->phys));
+ list_destroy(&iport->phys);
+ mutex_exit(&iport->lock);
+
+ /*
+ * Second, wait for any other references to this iport to be
+ * dropped, then continue teardown.
+ */
+ mutex_enter(&iport->refcnt_lock);
+ while (iport->refcnt != 0) {
+ cv_wait(&iport->refcnt_cv, &iport->refcnt_lock);
+ }
+ mutex_exit(&iport->refcnt_lock);
+
+ /* Delete kstats */
+ pmcs_destroy_phy_stats(iport);
+
+ /* Destroy the iport target map */
+ if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) {
+ return (DDI_FAILURE);
+ }
+
+ /* Free the tgt soft state */
+ if (iport->tgt_sstate != NULL) {
+ ddi_soft_state_bystr_fini(&iport->tgt_sstate);
+ }
+
+ /* Free our unit address string */
+ strfree(iport->ua);
+
+ /* Finish teardown and free the softstate */
+ mutex_destroy(&iport->refcnt_lock);
+ ASSERT(iport->refcnt == 0);
+ cv_destroy(&iport->refcnt_cv);
+ mutex_destroy(&iport->lock);
+ ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip));
+
+ return (DDI_SUCCESS);
+}
+
+static int
+pmcs_unattach(pmcs_hw_t *pwp)
+{
+ int i;
+ enum pwpstate curstate;
+ pmcs_cq_thr_info_t *cqti;
+
+ /*
+ * Tear down the interrupt infrastructure.
+ */
+ if (pmcs_teardown_intr(pwp)) {
+ pwp->stuck = 1;
+ }
+ pwp->intr_cnt = 0;
+
+ /*
+ * Grab a lock, if initted, to set state.
+ */
+ if (pwp->locks_initted) {
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_DEAD) {
+ pwp->state = STATE_UNPROBING;
+ }
+ curstate = pwp->state;
+ mutex_exit(&pwp->lock);
+
+ /*
+ * Stop the I/O completion threads.
+ */
+ mutex_enter(&pwp->cq_lock);
+ pwp->cq_info.cq_stop = B_TRUE;
+ for (i = 0; i < pwp->cq_info.cq_threads; i++) {
+ if (pwp->cq_info.cq_thr_info[i].cq_thread) {
+ cqti = &pwp->cq_info.cq_thr_info[i];
+ mutex_enter(&cqti->cq_thr_lock);
+ cv_signal(&cqti->cq_cv);
+ mutex_exit(&cqti->cq_thr_lock);
+ mutex_exit(&pwp->cq_lock);
+ thread_join(cqti->cq_thread->t_did);
+ mutex_enter(&pwp->cq_lock);
+ }
+ }
+ mutex_exit(&pwp->cq_lock);
+
+ /*
+ * Stop the interrupt coalescing timer thread
+ */
+ if (pwp->ict_thread) {
+ mutex_enter(&pwp->ict_lock);
+ pwp->io_intr_coal.stop_thread = B_TRUE;
+ cv_signal(&pwp->ict_cv);
+ mutex_exit(&pwp->ict_lock);
+ thread_join(pwp->ict_thread->t_did);
+ }
+ } else {
+ if (pwp->state != STATE_DEAD) {
+ pwp->state = STATE_UNPROBING;
+ }
+ curstate = pwp->state;
+ }
+
+ if (&pwp->iports != NULL) {
+ /* Destroy the iports lock */
+ rw_destroy(&pwp->iports_lock);
+ /* Destroy the iports list */
+ ASSERT(list_is_empty(&pwp->iports));
+ list_destroy(&pwp->iports);
+ }
+
+ if (pwp->hss_iportmap != NULL) {
+ /* Destroy the iportmap */
+ scsi_hba_iportmap_destroy(pwp->hss_iportmap);
+ }
+
+ if (pwp->hss_phymap != NULL) {
+ /* Destroy the phymap */
+ sas_phymap_destroy(pwp->hss_phymap);
+ }
+
+ /*
+ * Make sure that any pending watchdog won't
+ * be called from this point on out.
+ */
+ (void) untimeout(pwp->wdhandle);
+ /*
+ * After the above action, the watchdog
+ * timer that starts up the worker task
+ * may trigger but will exit immediately
+ * on triggering.
+ *
+ * Now that this is done, we can destroy
+ * the task queue, which will wait if we're
+ * running something on it.
+ */
+ if (pwp->tq) {
+ ddi_taskq_destroy(pwp->tq);
+ pwp->tq = NULL;
+ }
+
+ pmcs_fm_fini(pwp);
+
+ if (pwp->hba_attached) {
+ (void) scsi_hba_detach(pwp->dip);
+ pwp->hba_attached = 0;
+ }
+
+ /*
+ * If the chip hasn't been marked dead, shut it down now
+ * to bring it back to a known state without attempting
+ * a soft reset.
+ */
+ if (curstate != STATE_DEAD && pwp->locks_initted) {
+ /*
+ * De-register all registered devices
+ */
+ pmcs_deregister_devices(pwp, pwp->root_phys);
+
+ /*
+ * Stop all the phys.
+ */
+ pmcs_stop_phys(pwp);
+
+ /*
+ * Shut Down Message Passing
+ */
+ (void) pmcs_stop_mpi(pwp);
+
+ /*
+ * Reset chip
+ */
+ (void) pmcs_soft_reset(pwp, B_FALSE);
+ }
+
+ /*
+ * Turn off interrupts on the chip
+ */
+ if (pwp->mpi_acc_handle) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
+ }
+
+ /* Destroy pwp's lock */
+ if (pwp->locks_initted) {
+ mutex_destroy(&pwp->lock);
+ mutex_destroy(&pwp->dma_lock);
+ mutex_destroy(&pwp->axil_lock);
+ mutex_destroy(&pwp->cq_lock);
+ mutex_destroy(&pwp->config_lock);
+ mutex_destroy(&pwp->ict_lock);
+ mutex_destroy(&pwp->wfree_lock);
+ mutex_destroy(&pwp->pfree_lock);
+ mutex_destroy(&pwp->dead_phylist_lock);
+#ifdef DEBUG
+ mutex_destroy(&pwp->dbglock);
+#endif
+ cv_destroy(&pwp->ict_cv);
+ cv_destroy(&pwp->drain_cv);
+ pwp->locks_initted = 0;
+ }
+
+ /*
+ * Free DMA handles and associated consistent memory
+ */
+ if (pwp->regdump_hndl) {
+ if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
+ "at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pwp->regdump_hndl);
+ ddi_dma_mem_free(&pwp->regdump_acchdl);
+ pwp->regdump_hndl = 0;
+ }
+ if (pwp->fwlog_hndl) {
+ if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
+ "at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pwp->fwlog_hndl);
+ ddi_dma_mem_free(&pwp->fwlog_acchdl);
+ pwp->fwlog_hndl = 0;
+ }
+ if (pwp->cip_handles) {
+ if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed "
+ "at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pwp->cip_handles);
+ ddi_dma_mem_free(&pwp->cip_acchdls);
+ pwp->cip_handles = 0;
+ }
+ for (i = 0; i < PMCS_NOQ; i++) {
+ if (pwp->oqp_handles[i]) {
+ if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
+ "failed at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pwp->oqp_handles[i]);
+ ddi_dma_mem_free(&pwp->oqp_acchdls[i]);
+ pwp->oqp_handles[i] = 0;
+ }
+ }
+ for (i = 0; i < PMCS_NIQ; i++) {
+ if (pwp->iqp_handles[i]) {
+ if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
+ "failed at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pwp->iqp_handles[i]);
+ ddi_dma_mem_free(&pwp->iqp_acchdls[i]);
+ pwp->iqp_handles[i] = 0;
+ }
+ }
+
+ pmcs_free_dma_chunklist(pwp);
+
+ /*
+ * Unmap registers and destroy access handles
+ */
+ if (pwp->mpi_acc_handle) {
+ ddi_regs_map_free(&pwp->mpi_acc_handle);
+ pwp->mpi_acc_handle = 0;
+ }
+ if (pwp->top_acc_handle) {
+ ddi_regs_map_free(&pwp->top_acc_handle);
+ pwp->top_acc_handle = 0;
+ }
+ if (pwp->gsm_acc_handle) {
+ ddi_regs_map_free(&pwp->gsm_acc_handle);
+ pwp->gsm_acc_handle = 0;
+ }
+ if (pwp->msg_acc_handle) {
+ ddi_regs_map_free(&pwp->msg_acc_handle);
+ pwp->msg_acc_handle = 0;
+ }
+ if (pwp->pci_acc_handle) {
+ pci_config_teardown(&pwp->pci_acc_handle);
+ pwp->pci_acc_handle = 0;
+ }
+
+ /*
+ * Do memory allocation cleanup.
+ */
+ while (pwp->dma_freelist) {
+ pmcs_dmachunk_t *this = pwp->dma_freelist;
+ pwp->dma_freelist = this->nxt;
+ kmem_free(this, sizeof (pmcs_dmachunk_t));
+ }
+
+ /*
+ * Free pools
+ */
+ if (pwp->iocomp_cb_cache) {
+ kmem_cache_destroy(pwp->iocomp_cb_cache);
+ }
+
+ /*
+ * Free all PHYs (at level > 0), then free the cache
+ */
+ pmcs_free_all_phys(pwp, pwp->root_phys);
+ if (pwp->phy_cache) {
+ kmem_cache_destroy(pwp->phy_cache);
+ }
+
+ /*
+ * Free root PHYs
+ */
+ if (pwp->root_phys) {
+ pmcs_phy_t *phyp = pwp->root_phys;
+ for (i = 0; i < pwp->nphy; i++) {
+ mutex_destroy(&phyp->phy_lock);
+ phyp = phyp->sibling;
+ }
+ kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t));
+ pwp->root_phys = NULL;
+ pwp->nphy = 0;
+ }
+
+ /* Free the targets list */
+ if (pwp->targets) {
+ kmem_free(pwp->targets,
+ sizeof (pmcs_xscsi_t *) * pwp->max_dev);
+ }
+
+ /*
+ * Free work structures
+ */
+
+ if (pwp->work && pwp->max_cmd) {
+ for (i = 0; i < pwp->max_cmd - 1; i++) {
+ pmcwork_t *pwrk = &pwp->work[i];
+ mutex_destroy(&pwrk->lock);
+ cv_destroy(&pwrk->sleep_cv);
+ }
+ kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd);
+ pwp->work = NULL;
+ pwp->max_cmd = 0;
+ }
+
+ /*
+ * Do last property and SCSA cleanup
+ */
+ if (pwp->tran) {
+ scsi_hba_tran_free(pwp->tran);
+ pwp->tran = NULL;
+ }
+ if (pwp->reset_notify_listf) {
+ scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf);
+ pwp->reset_notify_listf = NULL;
+ }
+ ddi_prop_remove_all(pwp->dip);
+ if (pwp->stuck) {
+ return (-1);
+ }
+
+ /* Free register dump area if allocated */
+ if (pwp->regdumpp) {
+ kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE);
+ pwp->regdumpp = NULL;
+ }
+ if (pwp->iqpt && pwp->iqpt->head) {
+ kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE);
+ pwp->iqpt->head = pwp->iqpt->curpos = NULL;
+ }
+ if (pwp->iqpt) {
+ kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t));
+ pwp->iqpt = NULL;
+ }
+
+ ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip));
+ return (0);
+}
+
+/*
+ * quiesce (9E) entry point
+ *
+ * This function is called when the system is single-threaded at high PIL
+ * with preemption disabled. Therefore, the function must not block/wait/sleep.
+ *
+ * Returns DDI_SUCCESS or DDI_FAILURE.
+ *
+ */
+static int
+pmcs_quiesce(dev_info_t *dip)
+{
+ pmcs_hw_t *pwp;
+ scsi_hba_tran_t *tran;
+
+ if ((tran = ddi_get_driver_private(dip)) == NULL)
+ return (DDI_SUCCESS);
+
+ /* No quiesce necessary on a per-iport basis */
+ if (scsi_hba_iport_unit_address(dip) != NULL) {
+ return (DDI_SUCCESS);
+ }
+
+ if ((pwp = TRAN2PMC(tran)) == NULL)
+ return (DDI_SUCCESS);
+
+ /* Stop MPI & Reset chip (no need to re-initialize) */
+ (void) pmcs_stop_mpi(pwp);
+ (void) pmcs_soft_reset(pwp, B_TRUE);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Called with xp->statlock and PHY lock and scratch acquired.
+ */
+static int
+pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
+{
+ ata_identify_t *ati;
+ int result, i;
+ pmcs_phy_t *pptr;
+ uint16_t *a;
+ union {
+ uint8_t nsa[8];
+ uint16_t nsb[4];
+ } u;
+
+ /*
+ * Safe defaults - use only if this target is brand new (i.e. doesn't
+ * already have these settings configured)
+ */
+ if (xp->capacity == 0) {
+ xp->capacity = (uint64_t)-1;
+ xp->ca = 1;
+ xp->qdepth = 1;
+ xp->pio = 1;
+ }
+
+ pptr = xp->phy;
+
+ /*
+ * We only try and issue an IDENTIFY for first level
+ * (direct attached) devices. We don't try and
+ * set other quirks here (this will happen later,
+ * if the device is fully configured)
+ */
+ if (pptr->level) {
+ return (0);
+ }
+
+ mutex_exit(&xp->statlock);
+ result = pmcs_sata_identify(pwp, pptr);
+ mutex_enter(&xp->statlock);
+
+ if (result) {
+ return (result);
+ }
+ ati = pwp->scratch;
+ a = &ati->word108;
+ for (i = 0; i < 4; i++) {
+ u.nsb[i] = ddi_swap16(*a++);
+ }
+
+ /*
+ * Check the returned data for being a valid (NAA=5) WWN.
+ * If so, use that and override the SAS address we were
+ * given at Link Up time.
+ */
+ if ((u.nsa[0] >> 4) == 5) {
+ (void) memcpy(pptr->sas_address, u.nsa, 8);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: %s has SAS ADDRESS " SAS_ADDR_FMT,
+ __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address));
+ return (0);
+}
+
+/*
+ * Called with PHY lock and target statlock held and scratch acquired
+ */
+static boolean_t
+pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target)
+{
+ ASSERT(target != NULL);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: target = 0x%p",
+ __func__, (void *) target);
+
+ switch (target->phy->dtype) {
+ case SATA:
+ if (pmcs_add_sata_device(pwp, target) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: add_sata_device failed for tgt 0x%p",
+ __func__, (void *) target);
+ return (B_FALSE);
+ }
+ break;
+ case SAS:
+ target->qdepth = maxqdepth;
+ break;
+ case EXPANDER:
+ target->qdepth = 1;
+ break;
+ }
+
+ target->new = 0;
+ target->assigned = 1;
+ target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL;
+ target->dtype = target->phy->dtype;
+
+ /*
+ * Set the PHY's config stop time to 0. This is one of the final
+ * stops along the config path, so we're indicating that we
+ * successfully configured the PHY.
+ */
+ target->phy->config_stop = 0;
+
+ return (B_TRUE);
+}
+
+
+static void
+pmcs_rem_old_devices(pmcs_hw_t *pwp)
+{
+ pmcs_xscsi_t *xp;
+ int i;
+
+ mutex_enter(&pwp->lock);
+ for (i = 0; i < pwp->max_dev; i++) {
+ xp = pwp->targets[i];
+ if (xp == NULL) {
+ continue;
+ }
+ mutex_exit(&pwp->lock);
+
+ mutex_enter(&xp->statlock);
+ if (xp->dying && (xp->dip != NULL)) {
+ pmcs_clear_xp(pwp, xp);
+ /* Target is now gone */
+ }
+ mutex_exit(&xp->statlock);
+ mutex_enter(&pwp->lock);
+ }
+ mutex_exit(&pwp->lock);
+}
+
+
+void
+pmcs_worker(void *arg)
+{
+ pmcs_hw_t *pwp = arg;
+ ulong_t work_flags;
+
+ DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t,
+ pwp->config_changed);
+
+ if (pwp->state != STATE_RUNNING) {
+ return;
+ }
+
+ work_flags = atomic_swap_ulong(&pwp->work_flags, 0);
+
+ if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) {
+ pmcs_ack_events(pwp);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) {
+ mutex_enter(&pwp->lock);
+ pmcs_spinup_release(pwp, NULL);
+ mutex_exit(&pwp->lock);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) {
+ pmcs_ssp_event_recovery(pwp);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) {
+ pmcs_dev_state_recovery(pwp, NULL);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_REM_DEVICES) {
+ pmcs_rem_old_devices(pwp);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_DISCOVER) {
+ pmcs_discover(pwp);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) {
+ if (pmcs_abort_handler(pwp)) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ }
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_SATA_RUN) {
+ pmcs_sata_work(pwp);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) {
+ pmcs_scsa_wq_run(pwp);
+ mutex_enter(&pwp->lock);
+ PMCS_CQ_RUN(pwp);
+ mutex_exit(&pwp->lock);
+ }
+
+ if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) {
+ if (pmcs_add_more_chunks(pwp,
+ ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
+ } else {
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ }
+ }
+}
+
+static int
+pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize)
+{
+ pmcs_dmachunk_t *dc;
+ unsigned long dl;
+ pmcs_chunk_t *pchunk = NULL;
+
+ pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t);
+
+ pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP);
+ if (pchunk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Not enough memory for DMA chunks");
+ return (-1);
+ }
+
+ if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle,
+ &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp,
+ &pchunk->dma_addr) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to setup DMA for chunks");
+ kmem_free(pchunk, sizeof (pmcs_chunk_t));
+ return (-1);
+ }
+
+ if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) ||
+ (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) {
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED);
+ return (-1);
+ }
+
+ bzero(pchunk->addrp, nsize);
+ dc = NULL;
+ for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) {
+ pmcs_dmachunk_t *tmp;
+ tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP);
+ tmp->nxt = dc;
+ dc = tmp;
+ }
+ mutex_enter(&pwp->dma_lock);
+ pmcs_idma_chunks(pwp, dc, pchunk, nsize);
+ pwp->nchunks++;
+ mutex_exit(&pwp->dma_lock);
+ return (0);
+}
+
+
+static void
+pmcs_check_commands(pmcs_hw_t *pwp)
+{
+ pmcs_cmd_t *sp;
+ size_t amt;
+ char path[32];
+ pmcwork_t *pwrk;
+ pmcs_xscsi_t *target;
+ pmcs_phy_t *phyp;
+
+ for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) {
+ mutex_enter(&pwrk->lock);
+
+ /*
+ * If the command isn't active, we can't be timing it still.
+ * Active means the tag is not free and the state is "on chip".
+ */
+ if (!PMCS_COMMAND_ACTIVE(pwrk)) {
+ mutex_exit(&pwrk->lock);
+ continue;
+ }
+
+ /*
+ * No timer active for this command.
+ */
+ if (pwrk->timer == 0) {
+ mutex_exit(&pwrk->lock);
+ continue;
+ }
+
+ /*
+ * Knock off bits for the time interval.
+ */
+ if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) {
+ pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL);
+ } else {
+ pwrk->timer = 0;
+ }
+ if (pwrk->timer > 0) {
+ mutex_exit(&pwrk->lock);
+ continue;
+ }
+
+ /*
+ * The command has now officially timed out.
+ * Get the path for it. If it doesn't have
+ * a phy pointer any more, it's really dead
+ * and can just be put back on the free list.
+ * There should *not* be any commands associated
+ * with it any more.
+ */
+ if (pwrk->phy == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "dead command with gone phy being recycled");
+ ASSERT(pwrk->xp == NULL);
+ pmcs_pwork(pwp, pwrk);
+ continue;
+ }
+ amt = sizeof (path);
+ amt = min(sizeof (pwrk->phy->path), amt);
+ (void) memcpy(path, pwrk->phy->path, amt);
+
+ /*
+ * If this is a non-SCSA command, stop here. Eventually
+ * we might do something with non-SCSA commands here-
+ * but so far their timeout mechanisms are handled in
+ * the WAIT_FOR macro.
+ */
+ if (pwrk->xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: non-SCSA cmd tag 0x%x timed out",
+ path, pwrk->htag);
+ mutex_exit(&pwrk->lock);
+ continue;
+ }
+
+ sp = pwrk->arg;
+ ASSERT(sp != NULL);
+
+ /*
+ * Mark it as timed out.
+ */
+ CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT;
+ CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT;
+#ifdef DEBUG
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d",
+ path, pwrk->htag, pwrk->state, pwrk->onwire);
+#else
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: SCSA cmd tag 0x%x timed out (state %x)",
+ path, pwrk->htag, pwrk->state);
+#endif
+ /*
+ * Mark the work structure as timed out.
+ */
+ pwrk->state = PMCS_WORK_STATE_TIMED_OUT;
+ phyp = pwrk->phy;
+ target = pwrk->xp;
+ mutex_exit(&pwrk->lock);
+
+ pmcs_lock_phy(phyp);
+ mutex_enter(&target->statlock);
+
+ /*
+ * No point attempting recovery if the device is gone
+ */
+ if (pwrk->xp->dev_gone) {
+ mutex_exit(&target->statlock);
+ pmcs_unlock_phy(phyp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE "
+ "for htag 0x%08x", __func__,
+ (void *)pwrk->xp, pwrk->htag);
+ mutex_enter(&pwrk->lock);
+ if (!PMCS_COMMAND_DONE(pwrk)) {
+ /* Complete this command here */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: "
+ "Completing cmd (htag 0x%08x) "
+ "anyway", __func__, pwrk->htag);
+ pwrk->dead = 1;
+ CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
+ CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
+ pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
+ } else {
+ mutex_exit(&pwrk->lock);
+ }
+ continue;
+ }
+
+ /*
+ * See if we're already waiting for device state recovery
+ */
+ if (target->recover_wait) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Target %p already in recovery", __func__,
+ (void *)target);
+ mutex_exit(&target->statlock);
+ pmcs_unlock_phy(phyp);
+ continue;
+ }
+
+ pmcs_start_dev_state_recovery(target, phyp);
+ mutex_exit(&target->statlock);
+ pmcs_unlock_phy(phyp);
+ }
+ /*
+ * Run any completions that may have been queued up.
+ */
+ PMCS_CQ_RUN(pwp);
+}
+
+static void
+pmcs_watchdog(void *arg)
+{
+ pmcs_hw_t *pwp = arg;
+
+ DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t,
+ pwp->config_changed);
+
+ mutex_enter(&pwp->lock);
+
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ return;
+ }
+
+ if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) {
+ if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp,
+ DDI_NOSLEEP) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Could not dispatch to worker thread");
+ }
+ }
+ pwp->wdhandle = timeout(pmcs_watchdog, pwp,
+ drv_usectohz(PMCS_WATCH_INTERVAL));
+ mutex_exit(&pwp->lock);
+ pmcs_check_commands(pwp);
+ pmcs_handle_dead_phys(pwp);
+}
+
+static int
+pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt)
+{
+ int i, r, rslt = 0;
+ for (i = 0; i < icnt; i++) {
+ r = ddi_intr_remove_handler(pwp->ih_table[i]);
+ if (r == DDI_SUCCESS) {
+ continue;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: unable to remove interrupt handler %d", __func__, i);
+ rslt = -1;
+ break;
+ }
+ return (rslt);
+}
+
+static int
+pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt)
+{
+ if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ int r = ddi_intr_block_disable(&pwp->ih_table[0],
+ pwp->intr_cnt);
+ if (r != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unable to disable interrupt block");
+ return (-1);
+ }
+ } else {
+ int i;
+ for (i = 0; i < icnt; i++) {
+ if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) {
+ continue;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unable to disable interrupt %d", i);
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+static int
+pmcs_free_intrs(pmcs_hw_t *pwp, int icnt)
+{
+ int i;
+ for (i = 0; i < icnt; i++) {
+ if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) {
+ continue;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to free interrupt %d", i);
+ return (-1);
+ }
+ kmem_free(pwp->ih_table, pwp->ih_table_size);
+ pwp->ih_table_size = 0;
+ return (0);
+}
+
+/*
+ * Try to set up interrupts of type "type" with a minimum number of interrupts
+ * of "min".
+ */
+static void
+pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min)
+{
+ int rval, avail, count, actual, max;
+
+ rval = ddi_intr_get_nintrs(pwp->dip, type, &count);
+ if ((rval != DDI_SUCCESS) || (count < min)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d",
+ __func__, type, rval, count, min);
+ return;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: nintrs = %d for type: %d", __func__, count, type);
+
+ rval = ddi_intr_get_navail(pwp->dip, type, &avail);
+ if ((rval != DDI_SUCCESS) || (avail < min)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: get_navail failed; type: %d rc: %d avail: %d min: %d",
+ __func__, type, rval, avail, min);
+ return;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: navail = %d for type: %d", __func__, avail, type);
+
+ pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t);
+ pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP);
+
+ switch (type) {
+ case DDI_INTR_TYPE_MSIX:
+ pwp->int_type = PMCS_INT_MSIX;
+ max = PMCS_MAX_MSIX;
+ break;
+ case DDI_INTR_TYPE_MSI:
+ pwp->int_type = PMCS_INT_MSI;
+ max = PMCS_MAX_MSI;
+ break;
+ case DDI_INTR_TYPE_FIXED:
+ default:
+ pwp->int_type = PMCS_INT_FIXED;
+ max = PMCS_MAX_FIXED;
+ break;
+ }
+
+ rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual,
+ DDI_INTR_ALLOC_NORMAL);
+ if (rval != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: ddi_intr_alloc failed; type: %d rc: %d",
+ __func__, type, rval);
+ kmem_free(pwp->ih_table, pwp->ih_table_size);
+ pwp->ih_table = NULL;
+ pwp->ih_table_size = 0;
+ pwp->intr_cnt = 0;
+ pwp->int_type = PMCS_INT_NONE;
+ return;
+ }
+
+ pwp->intr_cnt = actual;
+}
+
+/*
+ * Set up interrupts.
+ * We return one of three values:
+ *
+ * 0 - success
+ * EAGAIN - failure to set up interrupts
+ * EIO - "" + we're now stuck partly enabled
+ *
+ * If EIO is returned, we can't unload the driver.
+ */
+static int
+pmcs_setup_intr(pmcs_hw_t *pwp)
+{
+ int i, r, itypes, oqv_count;
+ ddi_intr_handler_t **iv_table;
+ size_t iv_table_size;
+ uint_t pri;
+
+ if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "cannot get interrupt types");
+ return (EAGAIN);
+ }
+
+ if (disable_msix) {
+ itypes &= ~DDI_INTR_TYPE_MSIX;
+ }
+ if (disable_msi) {
+ itypes &= ~DDI_INTR_TYPE_MSI;
+ }
+
+ /*
+ * We won't know what firmware we're running until we call pmcs_setup,
+ * and we can't call pmcs_setup until we establish interrupts.
+ */
+
+ pwp->int_type = PMCS_INT_NONE;
+
+ /*
+ * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be
+ * uncivilized.
+ */
+ if (itypes & DDI_INTR_TYPE_MSIX) {
+ pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX);
+ if (pwp->int_type == PMCS_INT_MSIX) {
+ itypes = 0;
+ }
+ }
+
+ if (itypes & DDI_INTR_TYPE_MSI) {
+ pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1);
+ if (pwp->int_type == PMCS_INT_MSI) {
+ itypes = 0;
+ }
+ }
+
+ if (itypes & DDI_INTR_TYPE_FIXED) {
+ pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1);
+ if (pwp->int_type == PMCS_INT_FIXED) {
+ itypes = 0;
+ }
+ }
+
+ if (pwp->intr_cnt == 0) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, "No interrupts available");
+ return (EAGAIN);
+ }
+
+ iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt;
+ iv_table = kmem_alloc(iv_table_size, KM_SLEEP);
+
+ /*
+ * Get iblock cookie and add handlers.
+ */
+ switch (pwp->intr_cnt) {
+ case 1:
+ iv_table[0] = pmcs_all_intr;
+ break;
+ case 2:
+ iv_table[0] = pmcs_iodone_ix;
+ iv_table[1] = pmcs_nonio_ix;
+ break;
+ case 4:
+ iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix;
+ iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix;
+ iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix;
+ iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt);
+ kmem_free(iv_table, iv_table_size);
+ return (EAGAIN);
+ }
+
+ for (i = 0; i < pwp->intr_cnt; i++) {
+ r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i],
+ (caddr_t)pwp, NULL);
+ if (r != DDI_SUCCESS) {
+ kmem_free(iv_table, iv_table_size);
+ if (pmcs_remove_ihandlers(pwp, i)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, i)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ return (EAGAIN);
+ }
+ }
+
+ kmem_free(iv_table, iv_table_size);
+
+ if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "unable to get int capabilities");
+ if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ return (EAGAIN);
+ }
+
+ if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt);
+ if (r != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "intr blk enable failed");
+ if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ return (EFAULT);
+ }
+ } else {
+ for (i = 0; i < pwp->intr_cnt; i++) {
+ r = ddi_intr_enable(pwp->ih_table[i]);
+ if (r == DDI_SUCCESS) {
+ continue;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unable to enable interrupt %d", i);
+ if (pmcs_disable_intrs(pwp, i)) {
+ return (EIO);
+ }
+ if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ return (EAGAIN);
+ }
+ }
+
+ /*
+ * Set up locks.
+ */
+ if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unable to get interrupt priority");
+ if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ return (EAGAIN);
+ }
+
+ pwp->locks_initted = 1;
+ pwp->intr_pri = pri;
+ mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+ mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pri));
+#ifdef DEBUG
+ mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri));
+#endif
+ cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL);
+ for (i = 0; i < PMCS_NIQ; i++) {
+ mutex_init(&pwp->iqp_lock[i], NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
+ }
+ for (i = 0; i < pwp->cq_info.cq_threads; i++) {
+ mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL,
+ MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri));
+ cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL,
+ CV_DRIVER, NULL);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_INFO, "%d %s interrup%s configured",
+ pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" :
+ ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"),
+ pwp->intr_cnt == 1? "t" : "ts");
+
+
+ /*
+ * Enable Interrupts
+ */
+ if (pwp->intr_cnt > PMCS_NOQ) {
+ oqv_count = pwp->intr_cnt;
+ } else {
+ oqv_count = PMCS_NOQ;
+ }
+ for (pri = 0xffffffff, i = 0; i < oqv_count; i++) {
+ pri ^= (1 << i);
+ }
+
+ mutex_enter(&pwp->lock);
+ pwp->intr_mask = pri;
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
+ mutex_exit(&pwp->lock);
+
+ return (0);
+}
+
+static int
+pmcs_teardown_intr(pmcs_hw_t *pwp)
+{
+ if (pwp->intr_cnt) {
+ if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ if (pmcs_free_intrs(pwp, pwp->intr_cnt)) {
+ return (EIO);
+ }
+ pwp->intr_cnt = 0;
+ }
+ return (0);
+}
+
+static uint_t
+pmcs_general_ix(caddr_t arg1, caddr_t arg2)
+{
+ pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_general_intr(pwp);
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+pmcs_event_ix(caddr_t arg1, caddr_t arg2)
+{
+ pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_event_intr(pwp);
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+pmcs_iodone_ix(caddr_t arg1, caddr_t arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
+
+ /*
+ * It's possible that if we just turned interrupt coalescing off
+ * (and thus, re-enabled auto clear for interrupts on the I/O outbound
+ * queue) that there was an interrupt already pending. We use
+ * io_intr_coal.int_cleared to ensure that we still drop in here and
+ * clear the appropriate interrupt bit one last time.
+ */
+ mutex_enter(&pwp->ict_lock);
+ if (pwp->io_intr_coal.timer_on ||
+ (pwp->io_intr_coal.int_cleared == B_FALSE)) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
+ (1 << PMCS_OQ_IODONE));
+ pwp->io_intr_coal.int_cleared = B_TRUE;
+ }
+ mutex_exit(&pwp->ict_lock);
+
+ pmcs_iodone_intr(pwp);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+pmcs_fatal_ix(caddr_t arg1, caddr_t arg2)
+{
+ pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1);
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_fatal_handler(pwp);
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+pmcs_nonio_ix(caddr_t arg1, caddr_t arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_hw_t *pwp = (void *)arg1;
+ uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
+
+ /*
+ * Check for Fatal Interrupts
+ */
+ if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
+ pmcs_fatal_handler(pwp);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ if (obdb & (1 << PMCS_OQ_GENERAL)) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
+ (1 << PMCS_OQ_GENERAL));
+ pmcs_general_intr(pwp);
+ pmcs_event_intr(pwp);
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+static uint_t
+pmcs_all_intr(caddr_t arg1, caddr_t arg2)
+{
+ _NOTE(ARGUNUSED(arg2));
+ pmcs_hw_t *pwp = (void *) arg1;
+ uint32_t obdb;
+ int handled = 0;
+
+ obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB);
+
+ /*
+ * Check for Fatal Interrupts
+ */
+ if (obdb & (1 << PMCS_FATAL_INTERRUPT)) {
+ pmcs_fatal_handler(pwp);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ /*
+ * Check for Outbound Queue service needed
+ */
+ if (obdb & (1 << PMCS_OQ_IODONE)) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
+ (1 << PMCS_OQ_IODONE));
+ obdb ^= (1 << PMCS_OQ_IODONE);
+ handled++;
+ pmcs_iodone_intr(pwp);
+ }
+ if (obdb & (1 << PMCS_OQ_GENERAL)) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
+ (1 << PMCS_OQ_GENERAL));
+ obdb ^= (1 << PMCS_OQ_GENERAL);
+ handled++;
+ pmcs_general_intr(pwp);
+ }
+ if (obdb & (1 << PMCS_OQ_EVENTS)) {
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR,
+ (1 << PMCS_OQ_EVENTS));
+ obdb ^= (1 << PMCS_OQ_EVENTS);
+ handled++;
+ pmcs_event_intr(pwp);
+ }
+ if (obdb) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "interrupt bits not handled (0x%x)", obdb);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb);
+ handled++;
+ }
+ if (pwp->int_type == PMCS_INT_MSI) {
+ handled++;
+ }
+ return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
+}
+
+void
+pmcs_fatal_handler(pmcs_hw_t *pwp)
+{
+ pmcs_prt(pwp, PMCS_PRT_ERR, "Fatal Interrupt caught");
+ mutex_enter(&pwp->lock);
+ pwp->state = STATE_DEAD;
+ pmcs_register_dump_int(pwp);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
+ mutex_exit(&pwp->lock);
+ pmcs_fm_ereport(pwp, DDI_FM_DEVICE_NO_RESPONSE);
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
+
+#ifdef DEBUG
+ cmn_err(CE_PANIC, "PMCS Fatal Firmware Error");
+#endif
+}
+
+/*
+ * Called with PHY lock and target statlock held and scratch acquired.
+ */
+boolean_t
+pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt)
+{
+ pmcs_phy_t *pptr = tgt->phy;
+
+ switch (pptr->dtype) {
+ case SAS:
+ case EXPANDER:
+ break;
+ case SATA:
+ tgt->ca = 1;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Target %p has PHY %p with invalid dtype",
+ __func__, (void *)tgt, (void *)pptr);
+ return (B_FALSE);
+ }
+
+ tgt->new = 1;
+ tgt->dev_gone = 0;
+ tgt->dying = 0;
+ tgt->recover_wait = 0;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__,
+ pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
+
+ if (pmcs_add_new_device(pwp, tgt) != B_TRUE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__,
+ tgt->target_num, SAS_ADDR_PRT(pptr->sas_address));
+ mutex_destroy(&tgt->statlock);
+ mutex_destroy(&tgt->wqlock);
+ mutex_destroy(&tgt->aqlock);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Called with softstate lock held
+ */
+void
+pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_xscsi_t *xp;
+ unsigned int vtgt;
+
+ ASSERT(mutex_owned(&pwp->lock));
+
+ for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) {
+ xp = pwp->targets[vtgt];
+ if (xp == NULL) {
+ continue;
+ }
+
+ mutex_enter(&xp->statlock);
+ if (xp->phy == pptr) {
+ if (xp->new) {
+ xp->new = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "cancel config of vtgt %u", vtgt);
+ } else {
+ xp->assigned = 0;
+ xp->dying = 1;
+ SCHEDULE_WORK(pwp, PMCS_WORK_REM_DEVICES);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Scheduling removal of tgt 0x%p vtgt %u",
+ (void *)xp, vtgt);
+ }
+ mutex_exit(&xp->statlock);
+ break;
+ }
+ mutex_exit(&xp->statlock);
+ }
+}
+
+void
+pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, const char *fmt, ...)
+{
+ va_list ap;
+ int written = 0;
+ char *ptr;
+ uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1;
+ boolean_t system_log;
+ int system_log_level;
+
+ switch (level) {
+ case PMCS_PRT_DEBUG_DEVEL:
+ case PMCS_PRT_DEBUG_DEV_STATE:
+ case PMCS_PRT_DEBUG_PHY_LOCKING:
+ case PMCS_PRT_DEBUG_SCSI_STATUS:
+ case PMCS_PRT_DEBUG_UNDERFLOW:
+ case PMCS_PRT_DEBUG_CONFIG:
+ case PMCS_PRT_DEBUG_IPORT:
+ case PMCS_PRT_DEBUG_MAP:
+ case PMCS_PRT_DEBUG3:
+ case PMCS_PRT_DEBUG2:
+ case PMCS_PRT_DEBUG1:
+ case PMCS_PRT_DEBUG:
+ system_log = B_FALSE;
+ break;
+ case PMCS_PRT_INFO:
+ system_log = B_TRUE;
+ system_log_level = CE_CONT;
+ break;
+ case PMCS_PRT_WARN:
+ system_log = B_TRUE;
+ system_log_level = CE_NOTE;
+ break;
+ case PMCS_PRT_ERR:
+ system_log = B_TRUE;
+ system_log_level = CE_WARN;
+ break;
+ default:
+ return;
+ }
+
+ mutex_enter(&pmcs_trace_lock);
+ gethrestime(&pmcs_tbuf_ptr->timestamp);
+ ptr = pmcs_tbuf_ptr->buf;
+ written += snprintf(ptr, elem_size, "pmcs%d:%d: ",
+ ddi_get_instance(pwp->dip), level);
+ ptr += strlen(ptr);
+ va_start(ap, fmt);
+ written += vsnprintf(ptr, elem_size - written, fmt, ap);
+ va_end(ap);
+ if (written > elem_size - 1) {
+ /* Indicate truncation */
+ pmcs_tbuf_ptr->buf[elem_size - 1] = '+';
+ }
+ if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) {
+ pmcs_tbuf_ptr = pmcs_tbuf;
+ pmcs_tbuf_wrap = B_TRUE;
+ pmcs_tbuf_idx = 0;
+ } else {
+ ++pmcs_tbuf_ptr;
+ }
+ mutex_exit(&pmcs_trace_lock);
+
+ /*
+ * When pmcs_force_syslog in non-zero, everything goes also
+ * to syslog, at CE_CONT level.
+ */
+ if (pmcs_force_syslog) {
+ system_log = B_TRUE;
+ system_log_level = CE_CONT;
+ }
+
+ /*
+ * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also
+ * goes to syslog.
+ */
+ if (system_log) {
+ char local[196];
+
+ switch (system_log_level) {
+ case CE_CONT:
+ (void) snprintf(local, sizeof (local), "%sINFO: ",
+ pmcs_console ? "" : "?");
+ break;
+ case CE_NOTE:
+ case CE_WARN:
+ local[0] = 0;
+ break;
+ default:
+ return;
+ }
+
+ ptr = local;
+ ptr += strlen(local);
+ (void) snprintf(ptr, (sizeof (local)) -
+ ((size_t)ptr - (size_t)local), "pmcs%d: ",
+ ddi_get_instance(pwp->dip));
+ ptr += strlen(ptr);
+ va_start(ap, fmt);
+ (void) vsnprintf(ptr,
+ (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap);
+ va_end(ap);
+ if (level == CE_CONT) {
+ (void) strlcat(local, "\n", sizeof (local));
+ }
+ cmn_err(system_log_level, local);
+ }
+
+}
+
+/*
+ * pmcs_acquire_scratch
+ *
+ * If "wait" is true, the caller will wait until it can acquire the scratch.
+ * This implies the caller needs to be in a context where spinning for an
+ * indeterminate amount of time is acceptable.
+ */
+int
+pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait)
+{
+ int rval;
+
+ if (!wait) {
+ return (atomic_swap_8(&pwp->scratch_locked, 1));
+ }
+
+ /*
+ * Caller will wait for scratch.
+ */
+ while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) {
+ drv_usecwait(100);
+ }
+
+ return (rval);
+}
+
+void
+pmcs_release_scratch(pmcs_hw_t *pwp)
+{
+ pwp->scratch_locked = 0;
+}
+
+static void
+pmcs_create_phy_stats(pmcs_iport_t *iport)
+{
+ sas_phy_stats_t *ps;
+ pmcs_hw_t *pwp;
+ pmcs_phy_t *phyp;
+ int ndata;
+ char ks_name[KSTAT_STRLEN];
+
+ ASSERT(iport != NULL);
+ pwp = iport->pwp;
+ ASSERT(pwp != NULL);
+
+ mutex_enter(&iport->lock);
+
+ for (phyp = list_head(&iport->phys);
+ phyp != NULL;
+ phyp = list_next(&iport->phys, phyp)) {
+
+ pmcs_lock_phy(phyp);
+
+ if (phyp->phy_stats != NULL) {
+ pmcs_unlock_phy(phyp);
+ /* We've already created this kstat instance */
+ continue;
+ }
+
+ ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t));
+
+ (void) snprintf(ks_name, sizeof (ks_name),
+ "%s.%llx.%d.%d", ddi_driver_name(iport->dip),
+ (longlong_t)pwp->sas_wwns[0],
+ ddi_get_instance(iport->dip), phyp->phynum);
+
+ phyp->phy_stats = kstat_create("pmcs",
+ ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS,
+ KSTAT_TYPE_NAMED, ndata, 0);
+
+ if (phyp->phy_stats == NULL) {
+ pmcs_unlock_phy(phyp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Failed to create %s kstats", __func__,
+ ks_name);
+ continue;
+ }
+
+ ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data;
+
+ kstat_named_init(&ps->seconds_since_last_reset,
+ "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->tx_frames,
+ "TxFrames", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->rx_frames,
+ "RxFrames", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->tx_words,
+ "TxWords", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->rx_words,
+ "RxWords", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->invalid_dword_count,
+ "InvalidDwordCount", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->running_disparity_error_count,
+ "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->loss_of_dword_sync_count,
+ "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&ps->phy_reset_problem_count,
+ "PhyResetProblemCount", KSTAT_DATA_ULONGLONG);
+
+ phyp->phy_stats->ks_private = phyp;
+ phyp->phy_stats->ks_update = pmcs_update_phy_stats;
+ kstat_install(phyp->phy_stats);
+ pmcs_unlock_phy(phyp);
+ }
+
+ mutex_exit(&iport->lock);
+}
+
+int
+pmcs_update_phy_stats(kstat_t *ks, int rw)
+{
+ int val, ret = DDI_FAILURE;
+ pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private;
+ pmcs_hw_t *pwp = pptr->pwp;
+ sas_phy_stats_t *ps = ks->ks_data;
+
+ _NOTE(ARGUNUSED(rw));
+ ASSERT((pptr != NULL) && (pwp != NULL));
+
+ /*
+ * We just want to lock against other invocations of kstat;
+ * we don't need to pmcs_lock_phy() for this.
+ */
+ mutex_enter(&pptr->phy_lock);
+
+ /* Get Stats from Chip */
+ val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum);
+ if (val == DDI_FAILURE)
+ goto fail;
+ ps->invalid_dword_count.value.ull = (unsigned long long)val;
+
+ val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum);
+ if (val == DDI_FAILURE)
+ goto fail;
+ ps->running_disparity_error_count.value.ull = (unsigned long long)val;
+
+ val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum);
+ if (val == DDI_FAILURE)
+ goto fail;
+ ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val;
+
+ val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum);
+ if (val == DDI_FAILURE)
+ goto fail;
+ ps->phy_reset_problem_count.value.ull = (unsigned long long)val;
+
+ ret = DDI_SUCCESS;
+fail:
+ mutex_exit(&pptr->phy_lock);
+ return (ret);
+}
+
+static void
+pmcs_destroy_phy_stats(pmcs_iport_t *iport)
+{
+ pmcs_phy_t *phyp;
+
+ ASSERT(iport != NULL);
+ mutex_enter(&iport->lock);
+ phyp = iport->pptr;
+ if (phyp == NULL) {
+ mutex_exit(&iport->lock);
+ return;
+ }
+
+ pmcs_lock_phy(phyp);
+ if (phyp->phy_stats != NULL) {
+ kstat_delete(phyp->phy_stats);
+ phyp->phy_stats = NULL;
+ }
+ pmcs_unlock_phy(phyp);
+
+ mutex_exit(&iport->lock);
+}
+
+/*ARGSUSED*/
+static int
+pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
+{
+ /*
+ * as the driver can always deal with an error in any dma or
+ * access handle, we can just return the fme_status value.
+ */
+ pci_ereport_post(dip, err, NULL);
+ return (err->fme_status);
+}
+
+static void
+pmcs_fm_init(pmcs_hw_t *pwp)
+{
+ ddi_iblock_cookie_t fm_ibc;
+
+ /* Only register with IO Fault Services if we have some capability */
+ if (pwp->fm_capabilities) {
+ /* Adjust access and dma attributes for FMA */
+ pwp->reg_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
+ pwp->dev_acc_attr.devacc_attr_access |= DDI_FLAGERR_ACC;
+ pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+ pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+ pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+ pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
+
+ /*
+ * Register capabilities with IO Fault Services.
+ */
+ ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc);
+
+ /*
+ * Initialize pci ereport capabilities if ereport
+ * capable (should always be.)
+ */
+ if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
+ pci_ereport_setup(pwp->dip);
+ }
+
+ /*
+ * Register error callback if error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
+ ddi_fm_handler_register(pwp->dip,
+ pmcs_fm_error_cb, (void *) pwp);
+ }
+ }
+}
+
+static void
+pmcs_fm_fini(pmcs_hw_t *pwp)
+{
+ /* Only unregister FMA capabilities if registered */
+ if (pwp->fm_capabilities) {
+ /*
+ * Un-register error callback if error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
+ ddi_fm_handler_unregister(pwp->dip);
+ }
+
+ /*
+ * Release any resources allocated by pci_ereport_setup()
+ */
+ if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) ||
+ DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) {
+ pci_ereport_teardown(pwp->dip);
+ }
+
+ /* Unregister from IO Fault Services */
+ ddi_fm_fini(pwp->dip);
+
+ /* Adjust access and dma attributes for FMA */
+ pwp->reg_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
+ pwp->dev_acc_attr.devacc_attr_access &= ~DDI_FLAGERR_ACC;
+ pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
+ }
+}
+
+static boolean_t
+pmcs_fabricate_wwid(pmcs_hw_t *pwp)
+{
+ char *cp, c;
+ uint64_t adr;
+ int i;
+
+ cp = &c;
+ (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr);
+ if (adr == 0) {
+ static const char foo[] = __DATE__ __TIME__;
+ /* Oh, dear, we're toast */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: No serial number available to fabricate WWN",
+ __func__);
+ for (i = 0; foo[i]; i++) {
+ adr += foo[i];
+ }
+ }
+ adr <<= 8;
+ adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52);
+ adr |= (5ULL << 60);
+ for (i = 0; i < PMCS_MAX_PORTS; i++) {
+ pwp->sas_wwns[i] = adr + i;
+ }
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fw_hdr.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fw_hdr.c
new file mode 100644
index 0000000000..ba2841b4b1
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fw_hdr.c
@@ -0,0 +1,56 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Solaris kernel module linkage code to make a minimal module.
+ */
+#include <sys/modctl.h>
+int pmcs8001_fwversion = PMCS_FIRMWARE_VERSION;
+
+static struct modlmisc modlmisc = {
+ &mod_miscops, "PMC-Sierra Firmware " PMCS_FIRMWARE_VERSION_STRING
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ (void *)&modlmisc
+};
+
+int
+_init()
+{
+ return (mod_install(&modlinkage));
+}
+
+int
+_fini()
+{
+ return (mod_remove(&modlinkage));
+}
+
+int
+_info(struct modinfo *mip)
+{
+ return (mod_info(&modlinkage, mip));
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fwlog.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fwlog.c
new file mode 100644
index 0000000000..76a9f9adc6
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_fwlog.c
@@ -0,0 +1,1024 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file contains firmware log routines.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+static int pmcs_dump_ioqs(pmcs_hw_t *, caddr_t, uint32_t);
+static int pmcs_dump_spc_ver(pmcs_hw_t *, caddr_t, uint32_t);
+static int pmcs_dump_mpi_table(pmcs_hw_t *, caddr_t, uint32_t);
+static int pmcs_dump_gsm_conf(pmcs_hw_t *, caddr_t, uint32_t);
+static int pmcs_dump_pcie_conf(pmcs_hw_t *, caddr_t, uint32_t);
+static uint32_t pmcs_get_axil(pmcs_hw_t *);
+static boolean_t pmcs_shift_axil(pmcs_hw_t *, uint32_t);
+static void pmcs_restore_axil(pmcs_hw_t *, uint32_t);
+static int pmcs_dump_gsm(pmcs_hw_t *, caddr_t, uint32_t);
+static int pmcs_dump_fwlog(pmcs_hw_t *, caddr_t, uint32_t);
+
+/*
+ * Dump internal registers. Used after a firmware crash.
+ * Here dump various registers for firmware forensics,
+ * including MPI, GSM configuration, firmware log, IO Queues etc.
+ */
+void
+pmcs_register_dump_int(pmcs_hw_t *pwp)
+{
+ int n = 0;
+ uint32_t size_left = 0;
+ uint8_t slice = 0;
+ caddr_t buf = NULL;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "pmcs%d: Internal register dump",
+ ddi_get_instance(pwp->dip));
+ ASSERT(mutex_owned(&pwp->lock));
+
+ if (pwp->regdumpp == NULL) {
+ pwp->regdumpp =
+ kmem_zalloc(PMCS_REG_DUMP_SIZE, KM_NOSLEEP);
+ if (pwp->regdumpp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: register"
+ " dump memory not allocated", __func__);
+ return;
+ }
+ }
+ buf = pwp->regdumpp;
+ size_left = PMCS_REG_DUMP_SIZE - 1;
+
+ n = pmcs_dump_spc_ver(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ n = pmcs_dump_gsm_conf(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ n = pmcs_dump_pcie_conf(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ n = pmcs_dump_mpi_table(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ n = pmcs_dump_ioqs(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+
+ mutex_exit(&pwp->lock);
+ slice = (PMCS_REGISTER_DUMP_FLASH_SIZE / PMCS_FLASH_CHUNK_SIZE);
+ n = snprintf(buf, size_left, "\nDump AAP1 register: \n"
+ "-----------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ for (uint8_t j = 0; j < slice; j++) {
+ n = pmcs_get_nvmd(pwp, PMCS_NVMD_REG_DUMP,
+ PMCIN_NVMD_AAP1, (j * PMCS_FLASH_CHUNK_SIZE),
+ buf, size_left);
+ if (n == PMCS_FLASH_CHUNK_SIZE) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ } else if ((n < PMCS_FLASH_CHUNK_SIZE) && (n > 0)) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ } else if (n == 0) {
+ n = snprintf(buf, size_left, "AAP1: Content of "
+ "register dump on flash is NULL\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ } else {
+ n = snprintf(buf, size_left,
+ "AAP1: Unable to obtain internal register dump\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ }
+
+ }
+
+ n = snprintf(buf, size_left, "\nDump IOP register: \n"
+ "-----------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ for (uint8_t j = 0; j < slice; j++) {
+ n = pmcs_get_nvmd(pwp, PMCS_NVMD_REG_DUMP,
+ PMCIN_NVMD_IOP, (j * PMCS_FLASH_CHUNK_SIZE),
+ buf, size_left);
+ if (n == PMCS_FLASH_CHUNK_SIZE) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ } else if ((n < PMCS_FLASH_CHUNK_SIZE) && (n > 0)) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ } else if (n == 0) {
+ n = snprintf(buf, size_left,
+ "IOP: Content of internal register dump is NULL\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ } else {
+ n = snprintf(buf, size_left,
+ "IOP: Unable to obtain internal register dump\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ }
+
+ }
+
+ n = snprintf(buf, size_left, "\nDump AAP1 event log: \n"
+ "-----------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ for (uint8_t j = 0; j < slice; j++) {
+ n = pmcs_get_nvmd(pwp, PMCS_NVMD_EVENT_LOG,
+ PMCIN_NVMD_AAP1, (j * PMCS_FLASH_CHUNK_SIZE),
+ buf, size_left);
+ if (n > 0) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ } else {
+ n = snprintf(buf, size_left,
+ "AAP1: Unable to obtain event log on flash\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ }
+ }
+
+ n = snprintf(buf, size_left, "\nDump IOP event log: \n"
+ "-----------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ for (uint8_t j = 0; j < slice; j++) {
+ n = pmcs_get_nvmd(pwp, PMCS_NVMD_EVENT_LOG,
+ PMCIN_NVMD_IOP, (j * PMCS_FLASH_CHUNK_SIZE),
+ buf, size_left);
+ if (n > 0) {
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ } else {
+ n = snprintf(buf, size_left,
+ "IOP: Unable to obtain event log dump\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ break;
+ }
+ }
+ mutex_enter(&pwp->lock);
+
+ n = snprintf(buf, size_left, "\nDump firmware log: \n"
+ "-----------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+
+ n = pmcs_dump_fwlog(pwp, buf, size_left);
+
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+ n = snprintf(buf, size_left, "-----------------\n"
+ "\n------------ Dump internal registers end -------------\n");
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+
+ n = pmcs_dump_gsm(pwp, buf, size_left);
+ ASSERT(size_left >= n);
+ buf += n; size_left -= n;
+}
+
+static int
+pmcs_dump_fwlog(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ pmcs_fw_event_hdr_t *evl_hdr;
+ int n = 0, retries = 0;
+ uint32_t evlog_latest_idx;
+ boolean_t log_is_current = B_FALSE;
+
+ if (pwp->fwlogp == NULL) {
+ n = snprintf(buf, size_left, "\nFirmware logging "
+ "not enabled\n");
+ return (n);
+ }
+
+ /*
+ * First, check to make sure all entries have been DMAed to the
+ * log buffer.
+ *
+ * We'll wait the required 50ms, but if the latest entry keeps
+ * changing, we'll only retry twice
+ */
+ evl_hdr = (pmcs_fw_event_hdr_t *)pwp->fwlogp;
+ evlog_latest_idx = evl_hdr->fw_el_latest_idx;
+
+ while ((log_is_current == B_FALSE) && (retries < 3)) {
+ drv_usecwait(50 * 1000);
+ if (evl_hdr->fw_el_latest_idx == evlog_latest_idx) {
+ log_is_current = B_TRUE;
+ } else {
+ ++retries;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: event log is still being updated... waiting",
+ __func__);
+ evlog_latest_idx = evl_hdr->fw_el_latest_idx;
+ }
+ }
+
+ n = pmcs_dump_binary(pwp, pwp->fwlogp, 0, (PMCS_FWLOG_SIZE >> 2),
+ buf, size_left);
+
+ return (n);
+}
+
+/*
+ * Dump Inbound and Outbound Queues.
+ */
+static int
+pmcs_dump_ioqs(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ uint8_t i = 0, k = 0;
+ uint32_t j = 0, depth = 0;
+ int n = 0;
+ uint32_t *ptr = NULL;
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump I/O queues: \n"
+ "-----------------\n");
+ for (i = 0; i < PMCS_NIQ; i++) {
+ depth = PMCS_IQDX(pmcs_rd_iqc_tbl(pwp, PMCS_IQC_PARMX(i)));
+ n += snprintf(&buf[n], (size_left - n),
+ "IQ[%d] Details:\n-----------------\n", i);
+ n += snprintf(&buf[n], (size_left - n),
+ " depth = 0x%04x\n", depth);
+ n += snprintf(&buf[n], (size_left - n),
+ " latest ci = 0x%02x\n", pmcs_rd_iqci(pwp, i));
+ n += snprintf(&buf[n], (size_left - n),
+ " latest pi = 0x%02x\n", pmcs_rd_iqpi(pwp, i));
+ for (j = 0; j < depth; j++) {
+ n += snprintf(&buf[n], (size_left - n),
+ "IOMB[%d]:\n", j);
+ ptr = &pwp->iqp[i][(j * PMCS_QENTRY_SIZE) >> 2];
+ for (k = 0; k < (PMCS_QENTRY_SIZE / sizeof (uint32_t));
+ k += 8) {
+ n += snprintf(&buf[n], (size_left - n),
+ "0x%08x 0x%08x 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ LE_32(ptr[k]), LE_32(ptr[k+1]),
+ LE_32(ptr[k+2]), LE_32(ptr[k+3]),
+ LE_32(ptr[k+4]), LE_32(ptr[k+5]),
+ LE_32(ptr[k+6]), LE_32(ptr[k+7]));
+ }
+ }
+ }
+ for (i = 0; i < PMCS_NOQ; i++) {
+ depth = PMCS_OQDX(pmcs_rd_oqc_tbl(pwp, PMCS_OQC_PARMX(i)));
+ n += snprintf(&buf[n], (size_left - n),
+ "OQ[%d] Details:\n", i);
+ n += snprintf(&buf[n], (size_left - n),
+ " depth = 0x%04x\n", depth);
+ n += snprintf(&buf[n], (size_left - n),
+ " latest ci = 0x%02x\n", pmcs_rd_oqci(pwp, i));
+ n += snprintf(&buf[n], (size_left - n),
+ " latest pi = 0x%02x\n", pmcs_rd_oqpi(pwp, i));
+ for (j = 0; j < depth; j++) {
+ n += snprintf(&buf[n], (size_left - n),
+ "IOMB[%d]:\n", j);
+ ptr = &pwp->oqp[i][(j * PMCS_QENTRY_SIZE) >> 2];
+ for (k = 0; k < (PMCS_QENTRY_SIZE / sizeof (uint32_t));
+ k += 8) {
+ n += snprintf(&buf[n], (size_left - n),
+ "0x%08x 0x%08x 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ LE_32(ptr[k]), LE_32(ptr[k+1]),
+ LE_32(ptr[k+2]), LE_32(ptr[k+3]),
+ LE_32(ptr[k+4]), LE_32(ptr[k+5]),
+ LE_32(ptr[k+6]), LE_32(ptr[k+7]));
+ }
+ }
+
+ }
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "Dump I/O queues end \n");
+ return (n);
+}
+
+/*
+ * Dump SPC Version.
+ */
+static int
+pmcs_dump_spc_ver(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ int n = 0;
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump SPC version: \n"
+ "-----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "Firmware Release Type = "
+ "0x%02x\n", PMCS_FW_TYPE(pwp));
+ n += snprintf(&buf[n], (size_left - n), " Sub-Minor Release "
+ "Number = 0x%02x\n", PMCS_FW_MICRO(pwp));
+ n += snprintf(&buf[n], (size_left - n), " Minor Release "
+ "Number = 0x%02x\n", PMCS_FW_MINOR(pwp));
+ n += snprintf(&buf[n], (size_left - n), " Major Release "
+ "Number = 0x%02x\n", PMCS_FW_MAJOR(pwp));
+ n += snprintf(&buf[n], (size_left - n), "SPC DeviceID = 0x%04x\n",
+ pmcs_rd_topunit(pwp, PMCS_SPC_DEVICE_ID));
+ n += snprintf(&buf[n], (size_left - n), "SPC Device Revision = "
+ "0x%08x\n", pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION));
+ n += snprintf(&buf[n], (size_left - n), "SPC BootStrap Register = "
+ "0x%08x\n", pmcs_rd_topunit(pwp, PMCS_SPC_BOOT_STRAP));
+ n += snprintf(&buf[n], (size_left - n), "SPC Reset Register = 0x%08x\n",
+ pmcs_rd_topunit(pwp, PMCS_SPC_RESET));
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "Dump SPC version end \n");
+ return (n);
+}
+
+/*
+ * Dump MPI Table.
+ */
+static int
+pmcs_dump_mpi_table(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ int n = 0;
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump MSGU registers: \n"
+ "-----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "inb_doorbell = 0x%08x\n",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB));
+ n += snprintf(&buf[n], (size_left - n), "inb_doorbell_clear = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR));
+ n += snprintf(&buf[n], (size_left - n), "outb_doorbell = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB));
+ n += snprintf(&buf[n], (size_left - n), "outb_doorbell_clear = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR));
+ n += snprintf(&buf[n], (size_left - n), "scratch_pad0 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0));
+ n += snprintf(&buf[n], (size_left - n), "scratch_pad1 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1));
+ n += snprintf(&buf[n], (size_left - n), "scratch_pad2 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2));
+ n += snprintf(&buf[n], (size_left - n), "scratch_pad3 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad0 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad1 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH1));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad2 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH2));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad3 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH3));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad4 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH4));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad5 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH5));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad6 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH6));
+ n += snprintf(&buf[n], (size_left - n), "host_scratch_pad7 = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH7));
+ n += snprintf(&buf[n], (size_left - n), "outb_doorbell_mask = 0x%08x"
+ "\n", pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK));
+
+ n += snprintf(&buf[n], (size_left - n), "MPI Configuration Table: \n"
+ "-----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "ASCII Signature = 0x%08x\n",
+ pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS));
+ n += snprintf(&buf[n], (size_left - n), "Firmware Release Type = "
+ "0x%08x\n", PMCS_FW_TYPE(pwp));
+ n += snprintf(&buf[n], (size_left - n), "Firmware Release Variant = "
+ "0x%08x\n", PMCS_FW_VARIANT(pwp));
+ n += snprintf(&buf[n], (size_left - n), "Firmware Sub-Minor Release "
+ "Number = 0x%08x\n", PMCS_FW_MICRO(pwp));
+ n += snprintf(&buf[n], (size_left - n), "Firmware Minor Release "
+ "Number = 0x%08x\n", PMCS_FW_MINOR(pwp));
+ n += snprintf(&buf[n], (size_left - n), "Firmware Major Release "
+ "Number = 0x%08x\n", PMCS_FW_MAJOR(pwp));
+ n += snprintf(&buf[n], (size_left - n), "Maximum Outstanding I/Os "
+ "supported = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO));
+ n += snprintf(&buf[n], (size_left - n), "Maximum Scatter-Gather List "
+ "Elements = 0x%08x\n",
+ PMCS_MSGL(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0)));
+ n += snprintf(&buf[n], (size_left - n), "Maximum number of devices "
+ "connected to the SPC = 0x%08x\n",
+ PMCS_MD(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0)));
+ n += snprintf(&buf[n], (size_left - n), "Maximum Number of IQs "
+ "supported = 0x%08x\n",
+ PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "Maximum Number of OQs "
+ "supported = 0x%08x\n",
+ PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "High Priority Queue supported"
+ " = 0x%08x\n", PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "Interrupt Coalescing supported"
+ " = 0x%08x\n", PMCS_ICS(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "Number of Phys = "
+ "0x%08x\n", PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "SAS Revision Specification = "
+ "0x%08x\n", PMCS_SASREV(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1)));
+ n += snprintf(&buf[n], (size_left - n), "General Status Table Offset = "
+ "0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO));
+ n += snprintf(&buf[n], (size_left - n), "Inbound Queue Configuration "
+ "Table Offset = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO));
+ n += snprintf(&buf[n], (size_left - n), "Outbound Queue Configuration "
+ "Table Offset = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO));
+ n += snprintf(&buf[n], (size_left - n), "Inbound Queue Normal/High "
+ "Priority Processing Depth = 0x%02x 0x%02x\n",
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO2) & IQ_NORMAL_PRI_DEPTH_MASK),
+ ((pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO2) &
+ IQ_HIPRI_PRI_DEPTH_MASK) >> IQ_HIPRI_PRI_DEPTH_SHIFT));
+ n += snprintf(&buf[n], (size_left - n), "General Event Notification "
+ "Queue = 0x%02x\n", (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO2) &
+ GENERAL_EVENT_OQ_MASK) >> GENERAL_EVENT_OQ_SHIFT);
+ n += snprintf(&buf[n], (size_left - n), "Device Handle Removed "
+ "Notification Queue = 0x%02x\n",
+ (uint32_t)(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO2) &
+ DEVICE_HANDLE_REMOVED_MASK) >> DEVICE_HANDLE_REMOVED_SHIFT);
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ uint32_t woff = i / 4;
+ uint32_t shf = (i % 4) * 8;
+ n += snprintf(&buf[n], (size_left - n), "SAS HW Event "
+ "Notification Queue - PHY ID %d = 0x%02x\n", i,
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_EVQS + (woff << 2)) >> shf)
+ & 0xff);
+ }
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ uint32_t woff = i / 4;
+ uint32_t shf = (i % 4) * 8;
+ n += snprintf(&buf[n], (size_left - n), "SATA NCQ Error "
+ "Event Notification Queue - PHY ID %d = 0x%02x\n", i,
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_SNCQ + (woff << 2)) >> shf)
+ & 0xff);
+ }
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ uint32_t woff = i / 4;
+ uint32_t shf = (i % 4) * 8;
+ n += snprintf(&buf[n], (size_left - n), "I_T Nexus Target "
+ "Event Notification Queue - PHY ID %d = 0x%02x\n", i,
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IT_NTENQ +
+ (woff << 2)) >> shf) & 0xff);
+ }
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ uint32_t woff = i / 4;
+ uint32_t shf = (i % 4) * 8;
+ n += snprintf(&buf[n], (size_left - n), "SSP Target "
+ "Event Notification Queue - PHY ID %d = 0x%02x\n", i,
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_SSP_TENQ +
+ (woff << 2)) >> shf) & 0xff);
+ }
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ uint32_t woff = i / 4;
+ uint32_t shf = (i % 4) * 8;
+ n += snprintf(&buf[n], (size_left - n), "SMP Target "
+ "Event Notification Queue - PHY ID %d = 0x%02x\n", i,
+ (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_SMP_TENQ +
+ (woff << 2)) >> shf) & 0xff);
+ }
+ n += snprintf(&buf[n], (size_left - n), "MSGU Event Log Buffer Address "
+ "Higher = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MELBAH));
+ n += snprintf(&buf[n], (size_left - n), "MSGU Event Log Buffer Address "
+ "Lower = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MELBAL));
+ n += snprintf(&buf[n], (size_left - n), "MSGU Event Log Buffer Size "
+ "= 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MELBS));
+ n += snprintf(&buf[n], (size_left - n), "MSGU Event Log Severity "
+ "= 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MELSEV));
+ n += snprintf(&buf[n], (size_left - n), "IOP Event Log Buffer Address "
+ "Higher = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IELBAH));
+ n += snprintf(&buf[n], (size_left - n), "IOP Event Log Buffer Address "
+ "Lower = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IELBAL));
+ n += snprintf(&buf[n], (size_left - n), "IOP Event Log Buffer Size "
+ "= 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IELBS));
+ n += snprintf(&buf[n], (size_left - n), "IOP Event Log Severity "
+ "= 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IELSEV));
+ n += snprintf(&buf[n], (size_left - n), "Fatal Error Interrupt "
+ "= 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR));
+ n += snprintf(&buf[n], (size_left - n),
+ "Fatal Error Register Dump Offset "
+ "For MSGU = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_FERDOMSGU));
+ n += snprintf(&buf[n], (size_left - n),
+ "Fatal Error Register Dump Length "
+ "For MSGU = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_FERDLMSGU));
+ n += snprintf(&buf[n], (size_left - n),
+ "Fatal Error Register Dump Offset "
+ "For IOP = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_FERDOIOP));
+ n += snprintf(&buf[n], (size_left - n),
+ "Fatal Error Register Dump Length "
+ "For IOP = 0x%08x\n", pmcs_rd_mpi_tbl(pwp, PMCS_FERDLIOP));
+
+ n += snprintf(&buf[n], (size_left - n), "Dump GS Table: \n"
+ "-----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "GST MPI State: 0x%08x\n",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE));
+ n += snprintf(&buf[n], (size_left - n), "Inbound Queue Freeze State 0 "
+ "= 0x%08x\n", pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0));
+ n += snprintf(&buf[n], (size_left - n), "Inbound Queue Freeze State 1 "
+ "= 0x%08x\n", pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1));
+ n += snprintf(&buf[n], (size_left - n), "MSGU Tick Count = 0x%08x \n",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK));
+ n += snprintf(&buf[n], (size_left - n), "IOP Tick Count = 0x%08x\n",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK));
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ n += snprintf(&buf[n], (size_left - n), " Phy %d state = "
+ "0x%08x\n", i, pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i)));
+ }
+ for (uint8_t i = 0; i < pwp->nphy; i++) {
+ n += snprintf(&buf[n], (size_left - n), " Recoverable Error "
+ "Information %d = 0x%08x\n", i,
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i)));
+ }
+
+ n += snprintf(&buf[n], (size_left - n), "Dump IQCT Table\n"
+ "-----------------\n");
+ for (uint8_t i = 0; i < PMCS_NIQ; i++) {
+ n += snprintf(&buf[n], (size_left - n), "Inbound Queue "
+ "Configuration Table - [%d]:\n", i);
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Depth = 0x%08x\n",
+ PMCS_IQDX(pmcs_rd_iqc_tbl(pwp, PMCS_IQC_PARMX(i))));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Element Size and Priority = 0x%08x 0x%08x\n",
+ PMCS_IQESX(pmcs_rd_iqc_tbl(pwp, PMCS_IQC_PARMX(i))),
+ PMCS_IQPX(pmcs_rd_iqc_tbl(pwp, PMCS_IQC_PARMX(i))));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Base Address High = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQBAHX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Base Address Low = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQBALX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Consumer Index Base Address High = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQCIBAHX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Consumer Index Base Address Low = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQCIBALX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Producer Index PCI BAR = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQPIBARX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Inbound Queue "
+ "Producer Index PCI BAR offset = 0x%08x\n",
+ pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i)));
+ }
+
+ n += snprintf(&buf[n], (size_left - n), "Dump OQCT Table: \n"
+ "-----------------\n");
+ for (uint8_t i = 0; i < PMCS_NOQ; i++) {
+ n += snprintf(&buf[n], (size_left - n), "Outbound Queue "
+ "Configuration Table - [%d]:\n", i);
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Depth = 0x%08x\n",
+ PMCS_OQDX(pmcs_rd_oqc_tbl(pwp, PMCS_OQC_PARMX(i))));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Element Size = 0x%08x\n",
+ PMCS_OQESX(pmcs_rd_oqc_tbl(pwp, PMCS_OQC_PARMX(i))));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Base Address High = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQBAHX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Base Address Low = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQBALX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Producer Index Base Address High = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQPIBAHX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Producer Index Base Address Low = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQPIBALX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Consumer Index PCI BAR = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQCIBARX(i)));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Consumer Index PCI BAR offset = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i)));
+
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Interrupt Coalescing Timeout = 0x%08x\n",
+ PMCS_OQICT(pmcs_rd_oqc_tbl(pwp, PMCS_OQIPARM(i))));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Interrupt Coalescing Count = 0x%08x\n",
+ PMCS_OQICC(pmcs_rd_oqc_tbl(pwp, PMCS_OQIPARM(i))));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Interrupt Vector = 0x%08x\n",
+ PMCS_OQIV(pmcs_rd_oqc_tbl(pwp, PMCS_OQIPARM(i))));
+ n += snprintf(&buf[n], (size_left - n), " Outbound Queue "
+ "Dynamic Interrupt Coalescing Timeout = 0x%08x\n",
+ pmcs_rd_oqc_tbl(pwp, PMCS_OQDICX(i)));
+
+ }
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "Dump MPI Table end\n");
+ return (n);
+}
+
+/*ARGSUSED*/
+int
+pmcs_dump_binary(pmcs_hw_t *pwp, uint32_t *addr, uint32_t off,
+ uint32_t words_to_read, caddr_t buf, uint32_t size_left)
+{
+ uint32_t i;
+ int n = 0;
+ char c = ' ';
+
+ for (i = 0, n = 0; i < words_to_read; i++) {
+ if ((i & 7) == 0) {
+ n += snprintf(&buf[n], (size_left - n),
+ "%08x: ", (i << 2) + off);
+ }
+ if ((i + 1) & 7) {
+ c = ' ';
+ } else {
+ c = '\n';
+ }
+ n += snprintf(&buf[n], (size_left - n), "%08x%c", addr[i], c);
+ }
+ return (n);
+}
+
+/*
+ * Dump Global Shared Memory Configuration Registers
+ */
+static int
+pmcs_dump_gsm_conf(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ int n = 0;
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump GSM configuration "
+ "registers: \n -----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "RB6 Access Register = "
+ "0x%08x\n", pmcs_rd_gsm_reg(pwp, RB6_ACCESS));
+ n += snprintf(&buf[n], (size_left - n), "CFG and RST = 0x%08x\n",
+ pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET));
+ n += snprintf(&buf[n], (size_left - n), "RAM ECC ERR INDICATOR= "
+ "0x%08x\n", pmcs_rd_gsm_reg(pwp, RAM_ECC_DOUBLE_ERROR_INDICATOR));
+ n += snprintf(&buf[n], (size_left - n), "READ ADR PARITY CHK EN = "
+ "0x%08x\n", pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN));
+ n += snprintf(&buf[n], (size_left - n), "WRITE ADR PARITY CHK EN = "
+ "0x%08x\n", pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN));
+ n += snprintf(&buf[n], (size_left - n), "WRITE DATA PARITY CHK EN= "
+ "0x%08x\n", pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN));
+ n += snprintf(&buf[n], (size_left - n),
+ "READ ADR PARITY ERROR INDICATOR = 0x%08x\n",
+ pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_ERROR_INDICATOR));
+ n += snprintf(&buf[n], (size_left - n),
+ "WRITE ADR PARITY ERROR INDICATOR = 0x%08x\n",
+ pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_ERROR_INDICATOR));
+ n += snprintf(&buf[n], (size_left - n),
+ "WRITE DATA PARITY ERROR INDICATOR = 0x%08x\n",
+ pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_ERROR_INDICATOR));
+ n += snprintf(&buf[n], (size_left - n), "NMI Enable VPE0 IOP Register"
+ " = 0x%08x\n", pmcs_rd_gsm_reg(pwp, NMI_EN_VPE0_IOP));
+ n += snprintf(&buf[n], (size_left - n), "NMI Enable VPE0 AAP1 Register"
+ " = 0x%08x\n", pmcs_rd_gsm_reg(pwp, NMI_EN_VPE0_AAP1));
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "Dump GSM configuration registers end \n");
+ return (n);
+}
+
+/*
+ * Dump PCIe Configuration Registers.
+ */
+static int
+pmcs_dump_pcie_conf(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ int n = 0;
+ uint32_t i = 0;
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump PCIe configuration "
+ "registers: \n -----------------\n");
+ n += snprintf(&buf[n], (size_left - n), "VENID = 0x%04x\n",
+ pci_config_get16(pwp->pci_acc_handle, PCI_CONF_VENID));
+ n += snprintf(&buf[n], (size_left - n), "DEVICE_ID = 0x%04x\n",
+ pci_config_get16(pwp->pci_acc_handle, PCI_CONF_DEVID));
+ n += snprintf(&buf[n], (size_left - n), "CFGCMD = 0x%04x\n",
+ pci_config_get16(pwp->pci_acc_handle, PCI_CONF_COMM));
+ n += snprintf(&buf[n], (size_left - n), "CFGSTAT = 0x%04x\n",
+ pci_config_get16(pwp->pci_acc_handle, PCI_CONF_STAT));
+ n += snprintf(&buf[n], (size_left - n), "CLSCODE and REVID = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_REVID));
+ n += snprintf(&buf[n], (size_left - n), "BIST HDRTYPE LATTIM CLSIZE = "
+ "0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_CACHE_LINESZ));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-I LOWER = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE0));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-I UPPER = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE1));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-II LOWER = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE2));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-II UPPER = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE3));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-III = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE4));
+ n += snprintf(&buf[n], (size_left - n), "MEMBASE-IV = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_BASE5));
+ n += snprintf(&buf[n], (size_left - n), "SVID = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_SUBVENID));
+ n += snprintf(&buf[n], (size_left - n), "ROMBASE = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_ROM));
+ n += snprintf(&buf[n], (size_left - n), "CAP_PTR = 0x%02x\n",
+ pci_config_get8(pwp->pci_acc_handle, PCI_CONF_CAP_PTR));
+ n += snprintf(&buf[n], (size_left - n), "MAXLAT MINGNT INTPIN "
+ "INTLINE = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PCI_CONF_ILINE));
+ n += snprintf(&buf[n], (size_left - n), "PMC PM_NEXT_CAP PM_CAP_ID = "
+ "0x%08x\n", pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_PMC));
+ n += snprintf(&buf[n], (size_left - n), "PMCSR = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_PMCSR));
+ n += snprintf(&buf[n], (size_left - n),
+ "MC MSI_NEXT_CAP MSI_CAP_ID = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_MSI));
+ n += snprintf(&buf[n], (size_left - n), "MAL = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_MAL));
+ n += snprintf(&buf[n], (size_left - n), "MAU = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_MAU));
+ n += snprintf(&buf[n], (size_left - n), "MD = 0x%04x\n",
+ pci_config_get16(pwp->pci_acc_handle, PMCS_PCI_MD));
+ n += snprintf(&buf[n], (size_left - n),
+ "PCIE_CAP PCIE_NEXT_CAP PCIE_CAP_ID = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_PCIE));
+ n += snprintf(&buf[n], (size_left - n), "DEVICE_CAP = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_DEV_CAP));
+ n += snprintf(&buf[n], (size_left - n),
+ "DEVICE_STAT DEVICE_CTRL = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_DEV_CTRL));
+ n += snprintf(&buf[n], (size_left - n), "LINK_CAP = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_LINK_CAP));
+ n += snprintf(&buf[n], (size_left - n),
+ "LINK_STAT LINK_CTRL = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_LINK_CTRL));
+ n += snprintf(&buf[n], (size_left - n), "MSIX_CAP = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_MSIX_CAP));
+ n += snprintf(&buf[n], (size_left - n), "TBL_OFFSET = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_TBL_OFFSET));
+ n += snprintf(&buf[n], (size_left - n), "PBA_OFFSET = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_PBA_OFFSET));
+ n += snprintf(&buf[n], (size_left - n), "PCIE_CAP_HD = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_PCIE_CAP_HD));
+ n += snprintf(&buf[n], (size_left - n), "UE_STAT = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_UE_STAT));
+ n += snprintf(&buf[n], (size_left - n), "UE_MASK = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_UE_MASK));
+ n += snprintf(&buf[n], (size_left - n), "UE_SEV = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_UE_SEV));
+ n += snprintf(&buf[n], (size_left - n), "CE_STAT = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_CE_STAT));
+ n += snprintf(&buf[n], (size_left - n), "CE_MASK = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_CE_MASK));
+ n += snprintf(&buf[n], (size_left - n), "ADV_ERR_CTRL = 0x%08x\n",
+ pci_config_get32(pwp->pci_acc_handle, PMCS_PCI_ADV_ERR_CTRL));
+ for (i = 0; i < 4; i++) {
+ n += snprintf(&buf[n], (size_left - n), "HD_LOG_DW%d = "
+ "0x%08x\n", i, pci_config_get32(pwp->pci_acc_handle,
+ (PMCS_PCI_HD_LOG_DW + i * 4)));
+ }
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "Dump PCIe configuration registers end \n");
+ return (n);
+}
+/*
+ * Called with axil_lock held
+ */
+static boolean_t
+pmcs_shift_axil(pmcs_hw_t *pwp, uint32_t offset)
+{
+ uint32_t newaxil = offset & ~GSM_BASE_MASK;
+
+ ASSERT(mutex_owned(&pwp->axil_lock));
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
+ drv_usecwait(10);
+
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register update failed");
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+static uint32_t
+pmcs_get_axil(pmcs_hw_t *pwp)
+{
+ uint32_t regval = 0;
+ mutex_enter(&pwp->axil_lock);
+ regval = ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
+ mutex_exit(&pwp->axil_lock);
+ return (regval);
+}
+
+static void
+pmcs_restore_axil(pmcs_hw_t *pwp, uint32_t oldaxil)
+{
+ mutex_enter(&pwp->axil_lock);
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
+ drv_usecwait(10);
+
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register restore failed");
+ }
+ mutex_exit(&pwp->axil_lock);
+}
+
+/*
+ * Dump GSM Memory Regions.
+ */
+static int
+pmcs_dump_gsm(pmcs_hw_t *pwp, caddr_t buf, uint32_t size_left)
+{
+ int n = 0;
+ uint32_t i = 0;
+ uint32_t oldaxil = 0;
+ uint32_t gsm_addr = 0;
+ uint32_t *local_buf = NULL;
+
+ local_buf = kmem_zalloc(GSM_SM_BLKSZ, KM_NOSLEEP);
+ if (local_buf == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: local_buf memory not allocated", __func__);
+ return (0);
+ }
+
+ oldaxil = pmcs_get_axil(pwp);
+ mutex_enter(&pwp->axil_lock);
+ n += snprintf(&buf[n], (size_left - n), "\nDump GSM IO Status Table: \n"
+ " -----------------\n");
+ for (i = 0; i < 4; i++) {
+ gsm_addr = IO_STATUS_TABLE_BASE + GSM_SM_BLKSZ * i;
+ if (pmcs_shift_axil(pwp, gsm_addr) == B_TRUE) {
+ gsm_addr &= GSM_BASE_MASK;
+ ddi_rep_get32(pwp->gsm_acc_handle, local_buf,
+ &pwp->gsm_regs[gsm_addr >> 2], GSM_SM_BLKSZ >> 2,
+ DDI_DEV_AUTOINCR);
+ n += pmcs_dump_binary(pwp, local_buf, i * GSM_SM_BLKSZ,
+ GSM_SM_BLKSZ >> 2, &buf[n], size_left - n);
+ }
+ }
+ n += snprintf(&buf[n], (size_left - n), "\n-----------------\n"
+ "Dump GSM IO Status Table end \n");
+ n += snprintf(&buf[n], (size_left - n), "\nDump Ring Buffer Storage: \n"
+ " -----------------\n");
+ for (i = 0; i < 2; i++) {
+ gsm_addr = RING_BUF_STORAGE_0 + GSM_SM_BLKSZ * i;
+ if (pmcs_shift_axil(pwp, gsm_addr) == B_TRUE) {
+ gsm_addr &= GSM_BASE_MASK;
+ ddi_rep_get32(pwp->gsm_acc_handle, local_buf,
+ &pwp->gsm_regs[gsm_addr >> 2], GSM_SM_BLKSZ >> 2,
+ DDI_DEV_AUTOINCR);
+ n += pmcs_dump_binary(pwp, local_buf, i * GSM_SM_BLKSZ,
+ GSM_SM_BLKSZ >> 2, &buf[n], size_left - n);
+ }
+ }
+ n += snprintf(&buf[n], (size_left - n), "\n-----------------\n"
+ "Dump Ring Buffer Storage end \n");
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump Ring Buffer Pointers:\n"
+ " -----------------\n");
+ gsm_addr = RING_BUF_PTR_ACC_BASE + RING_BUF_PTR_OFF;
+ if (pmcs_shift_axil(pwp, gsm_addr) == B_TRUE) {
+ gsm_addr &= GSM_BASE_MASK;
+ ddi_rep_get32(pwp->gsm_acc_handle, local_buf,
+ &pwp->gsm_regs[gsm_addr >> 2],
+ RING_BUF_PTR_SIZE >> 2, DDI_DEV_AUTOINCR);
+ n += pmcs_dump_binary(pwp, local_buf, 0,
+ RING_BUF_PTR_SIZE >> 2, &buf[n], size_left - n);
+ }
+ n += snprintf(&buf[n], (size_left - n), "\n-----------------\n"
+ "Dump Ring Buffer Pointers end \n");
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump Ring Buffer Access: \n"
+ " -----------------\n");
+ gsm_addr = RING_BUF_PTR_ACC_BASE + RING_BUF_ACC_OFF;
+ if (pmcs_shift_axil(pwp, gsm_addr) == B_TRUE) {
+ gsm_addr &= GSM_BASE_MASK;
+ ddi_rep_get32(pwp->gsm_acc_handle, local_buf,
+ &pwp->gsm_regs[gsm_addr >> 2],
+ RING_BUF_ACC_SIZE >> 2, DDI_DEV_AUTOINCR);
+ n += pmcs_dump_binary(pwp, local_buf, 0,
+ RING_BUF_ACC_SIZE >> 2, &buf[n], size_left - n);
+ }
+ n += snprintf(&buf[n], (size_left - n), "\n-----------------\n"
+ "Dump Ring Buffer Access end \n");
+
+ n += snprintf(&buf[n], (size_left - n), "\nDump GSM SM: \n"
+ " -----------------\n");
+ for (i = 0; i < 16; i++) {
+ gsm_addr = GSM_SM_BASE + GSM_SM_BLKSZ * i;
+ if (pmcs_shift_axil(pwp, gsm_addr) == B_TRUE) {
+ gsm_addr &= GSM_BASE_MASK;
+ ddi_rep_get32(pwp->gsm_acc_handle, local_buf,
+ &pwp->gsm_regs[gsm_addr >> 2],
+ GSM_SM_BLKSZ >> 2, DDI_DEV_AUTOINCR);
+ n += pmcs_dump_binary(pwp, local_buf, i * GSM_SM_BLKSZ,
+ GSM_SM_BLKSZ >> 2, &buf[n], size_left - n);
+ }
+ }
+ mutex_exit(&pwp->axil_lock);
+ pmcs_restore_axil(pwp, oldaxil);
+
+ n += snprintf(&buf[n], (size_left - n), "\n-----------------\n"
+ "Dump GSM SM end \n");
+ n += snprintf(&buf[n], (size_left - n), "-----------------\n"
+ "\n------------ Dump GSM Memory Regions end -------------\n");
+ if (local_buf) {
+ kmem_free(local_buf, GSM_SM_BLKSZ);
+ }
+ return (n);
+}
+
+/*
+ * Trace current Inbound Message host sent to SPC.
+ */
+void
+pmcs_iqp_trace(pmcs_hw_t *pwp, uint32_t qnum)
+{
+ uint32_t k = 0, n = 0;
+ uint32_t *ptr = NULL;
+ char *tbuf = pwp->iqpt->curpos;
+ uint32_t size_left = pwp->iqpt->size_left;
+
+ if (tbuf == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: trace buffer is not ready,"
+ " Inbound Message from host to SPC is not traced",
+ __func__);
+ return;
+ } else if (size_left < PMCS_QENTRY_SIZE * PMCS_QENTRY_SIZE) {
+ tbuf = pwp->iqpt->curpos = pwp->iqpt->head;
+ size_left = pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE;
+ }
+
+ ptr = &pwp->iqp[qnum][pwp->shadow_iqpi[qnum] *
+ (PMCS_QENTRY_SIZE >> 2)];
+ for (k = 0; k < (PMCS_QENTRY_SIZE / sizeof (uint32_t));
+ k += 8) {
+ n += snprintf(&tbuf[n], (size_left - n),
+ "0x%08x 0x%08x 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ LE_32(ptr[k]), LE_32(ptr[k+1]),
+ LE_32(ptr[k+2]), LE_32(ptr[k+3]),
+ LE_32(ptr[k+4]), LE_32(ptr[k+5]),
+ LE_32(ptr[k+6]), LE_32(ptr[k+7]));
+ }
+ pwp->iqpt->size_left -= n;
+ if (pwp->iqpt->size_left > 0) {
+ pwp->iqpt->curpos += n;
+ } else {
+ pwp->iqpt->curpos =
+ pwp->iqpt->head + PMCS_IQP_TRACE_BUFFER_SIZE - 1;
+ }
+}
+
+/*
+ * Dump fatal error register content from GSM.
+ */
+int
+pmcs_dump_feregs(pmcs_hw_t *pwp, uint32_t *addr, uint8_t nvmd,
+ caddr_t buf, uint32_t size_left)
+{
+ uint32_t offset = 0, length = 0;
+ int i = 0;
+ uint8_t *ptr = (uint8_t *)addr;
+
+ if ((addr == NULL) || (buf == NULL)) {
+ return (0);
+ }
+ switch (nvmd) {
+ case PMCIN_NVMD_AAP1:
+ offset = pmcs_rd_mpi_tbl(pwp, PMCS_FERDOMSGU);
+ length = pmcs_rd_mpi_tbl(pwp, PMCS_FERDLMSGU);
+ break;
+ case PMCIN_NVMD_IOP:
+ offset = pmcs_rd_mpi_tbl(pwp, PMCS_FERDOIOP);
+ length = pmcs_rd_mpi_tbl(pwp, PMCS_FERDLIOP);
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "UNKNOWN NVMD DEVICE"
+ "%s():%d", __func__, __LINE__);
+ return (0);
+ }
+
+ while ((i < length) && (ptr[i + offset] != 0xff) &&
+ (ptr[i + offset] != '\0')) {
+ i += snprintf(&buf[i], (size_left - i),
+ "%c", ptr[i + offset]);
+ }
+ return (i);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_intr.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_intr.c
new file mode 100644
index 0000000000..926bed86fa
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_intr.c
@@ -0,0 +1,1654 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file contains functions that are called via interrupts.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+#ifdef DEBUG
+#define VALID_IOMB_CHECK(p, w, m, b, c) \
+ if (!(w & PMCS_IOMB_VALID)) { \
+ char l[64]; \
+ (void) snprintf(l, sizeof (l), \
+ "%s: INVALID IOMB (oq_ci=%u oq_pi=%u)", __func__, b, c); \
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, l, m); \
+ STEP_OQ_ENTRY(pwp, PMCS_OQ_EVENTS, b, 1); \
+ continue; \
+ }
+#define WRONG_OBID_CHECK(pwp, w, q) \
+ if (((w & PMCS_IOMB_OBID_MASK) >> PMCS_IOMB_OBID_SHIFT) != q) { \
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, \
+ "%s: COMPLETION WITH WRONG OBID (0x%x)", __func__, \
+ (w & PMCS_IOMB_OBID_MASK) >> PMCS_IOMB_OBID_SHIFT); \
+ }
+#else
+#define VALID_IOMB_CHECK(a, b, c, d, e)
+#define WRONG_OBID_CHECK(a, b, c)
+#endif
+
+#define OQLIM_CHECK(p, l) \
+ if (++l == (p)->ioq_depth) { \
+ pmcs_prt(p, PMCS_PRT_DEBUG, \
+ "%s: possible ob queue overflow", \
+ __func__); \
+ break; \
+ }
+
+#define COPY_OUTBOUND(p, w, l, n, a, x, q, c) \
+ n = ((w & PMCS_IOMB_BC_MASK) >> PMCS_IOMB_BC_SHIFT); \
+ a = PMCS_QENTRY_SIZE; \
+ (void) memcpy(l, x, PMCS_QENTRY_SIZE); \
+ if (n > 1) { \
+ a <<= 1; \
+ (void) memcpy(&l[PMCS_QENTRY_SIZE], \
+ GET_OQ_ENTRY(p, q, c, 1), PMCS_QENTRY_SIZE); \
+ } \
+ pmcs_prt(p, PMCS_PRT_DEBUG3, \
+ "%s: ptr %p ci %d w0 %x nbuf %d", \
+ __func__, (void *)x, ci, w0, n)
+
+#define EVT_PRT(hwp, msg, phy) \
+ pmcs_prt(hwp, PMCS_PRT_INFO, "Phy 0x%x: %s", phy, # msg)
+
+
+/*
+ * Map the link rate reported in the event to the SAS link rate value
+ */
+static uint8_t
+pmcs_link_rate(uint32_t event_link_rate)
+{
+ uint8_t sas_link_rate = 0;
+
+ switch (event_link_rate) {
+ case 1:
+ sas_link_rate = SAS_LINK_RATE_1_5GBIT;
+ break;
+ case 2:
+ sas_link_rate = SAS_LINK_RATE_3GBIT;
+ break;
+ case 4:
+ sas_link_rate = SAS_LINK_RATE_6GBIT;
+ break;
+ }
+
+ return (sas_link_rate);
+}
+
+/*
+ * Called with pwrk lock
+ */
+static void
+pmcs_complete_work(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb, size_t amt)
+{
+#ifdef DEBUG
+ pwp->ltime[pwp->lti] = gethrtime();
+ pwp->ltags[pwp->lti++] = pwrk->htag;
+#endif
+ pwrk->htag |= PMCS_TAG_DONE;
+
+ /*
+ * If the command has timed out, leave it in that state.
+ */
+ if (pwrk->state != PMCS_WORK_STATE_TIMED_OUT) {
+ pwrk->state = PMCS_WORK_STATE_INTR;
+ }
+
+ pmcs_complete_work_impl(pwp, pwrk, iomb, amt);
+}
+
+static void
+pmcs_work_not_found(pmcs_hw_t *pwp, uint32_t htag, uint32_t *iomb)
+{
+#ifdef DEBUG
+ int i;
+ hrtime_t now;
+ char buf[64];
+
+ (void) snprintf(buf, sizeof (buf),
+ "unable to find work structure for tag 0x%x", htag);
+
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, iomb);
+ if (htag == 0) {
+ return;
+ }
+ now = gethrtime();
+ for (i = 0; i < 256; i++) {
+ mutex_enter(&pwp->dbglock);
+ if (pwp->ltags[i] == htag) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "same tag already completed (%llu us ago)",
+ (unsigned long long) (now - pwp->ltime[i]));
+ }
+ if (pwp->ftags[i] == htag) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "same tag started (line %d) (%llu ns ago)",
+ pwp->ftag_lines[i], (unsigned long long)
+ (now - pwp->ftime[i]));
+ }
+ mutex_exit(&pwp->dbglock);
+ }
+#else
+ char buf[64];
+ (void) snprintf(buf, sizeof (buf),
+ "unable to find work structure for tag 0x%x", htag);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, iomb);
+#endif
+}
+
+
+static void
+pmcs_process_io_completion(pmcs_hw_t *pwp, pmcs_iocomp_cb_t *ioccb, size_t amt)
+{
+ pmcwork_t *pwrk;
+ uint32_t tag_type;
+ uint32_t htag = LE_32(((uint32_t *)((void *)ioccb->iomb))[1]);
+
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk == NULL) {
+ pmcs_work_not_found(pwp, htag, (void *)&ioccb->iomb);
+ kmem_cache_free(pwp->iocomp_cb_cache, ioccb);
+ return;
+ }
+
+ pwrk->htag |= PMCS_TAG_DONE;
+
+ /*
+ * If the command has timed out, leave it in that state.
+ */
+ if (pwrk->state != PMCS_WORK_STATE_TIMED_OUT) {
+ pwrk->state = PMCS_WORK_STATE_INTR;
+ }
+
+ /*
+ * Some SATA and SAS commands are run in "WAIT" mode.
+ * We can tell this from the tag type. In this case,
+ * we just do a wakeup (not a callback).
+ */
+ tag_type = PMCS_TAG_TYPE(pwrk->htag);
+ if (tag_type == PMCS_TAG_TYPE_WAIT) {
+ ASSERT(PMCS_TAG_TYPE(pwrk->htag) == PMCS_TAG_TYPE_WAIT);
+ if (pwrk->arg && amt) {
+ (void) memcpy(pwrk->arg, ioccb->iomb, amt);
+ }
+ cv_signal(&pwrk->sleep_cv);
+ mutex_exit(&pwrk->lock);
+ kmem_cache_free(pwp->iocomp_cb_cache, ioccb);
+ return;
+ }
+ ASSERT(tag_type == PMCS_TAG_TYPE_CBACK);
+
+#ifdef DEBUG
+ pwp->ltime[pwp->lti] = gethrtime();
+ pwp->ltags[pwp->lti++] = pwrk->htag;
+#endif
+
+ ioccb->pwrk = pwrk;
+
+ /*
+ * Only update state to IOCOMPQ if we were in the INTR state.
+ * Any other state (e.g. TIMED_OUT, ABORTED) needs to remain.
+ */
+ if (pwrk->state == PMCS_WORK_STATE_INTR) {
+ pwrk->state = PMCS_WORK_STATE_IOCOMPQ;
+ }
+
+ mutex_enter(&pwp->cq_lock);
+ if (pwp->iocomp_cb_tail) {
+ pwp->iocomp_cb_tail->next = ioccb;
+ pwp->iocomp_cb_tail = ioccb;
+ } else {
+ pwp->iocomp_cb_head = ioccb;
+ pwp->iocomp_cb_tail = ioccb;
+ }
+ ioccb->next = NULL;
+ mutex_exit(&pwp->cq_lock);
+
+ mutex_exit(&pwrk->lock);
+ /* Completion queue will be run at end of pmcs_iodone_intr */
+}
+
+
+static void
+pmcs_process_completion(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ pmcwork_t *pwrk;
+ uint32_t htag = LE_32(((uint32_t *)iomb)[1]);
+
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk == NULL) {
+ pmcs_work_not_found(pwp, htag, iomb);
+ return;
+ }
+
+ pmcs_complete_work(pwp, pwrk, iomb, amt);
+ /*
+ * The pwrk lock is now released
+ */
+}
+
+static void
+pmcs_kill_port(pmcs_hw_t *pwp, int portid)
+{
+ pmcs_phy_t *pptr = pwp->ports[portid];
+
+ if (pptr == NULL) {
+ return;
+ }
+
+ /*
+ * Clear any subsidiary phys
+ */
+ mutex_enter(&pwp->lock);
+
+ for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
+ pmcs_lock_phy(pptr);
+ if (pptr->link_rate && pptr->portid == portid &&
+ pptr->subsidiary) {
+ pmcs_clear_phy(pwp, pptr);
+ }
+ pmcs_unlock_phy(pptr);
+ }
+
+ pptr = pwp->ports[portid];
+ pwp->ports[portid] = NULL;
+ mutex_exit(&pwp->lock);
+
+ pmcs_lock_phy(pptr);
+ pmcs_kill_changed(pwp, pptr, 0);
+ pmcs_unlock_phy(pptr);
+
+ RESTART_DISCOVERY(pwp);
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x Cleared", portid);
+}
+
+void
+pmcs_process_sas_hw_event(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ uint32_t w1 = LE_32(((uint32_t *)iomb)[1]);
+ uint32_t w3 = LE_32(((uint32_t *)iomb)[3]);
+ char buf[32];
+ uint8_t phynum = IOP_EVENT_PHYNUM(w1);
+ uint8_t portid = IOP_EVENT_PORTID(w1);
+ pmcs_iport_t *iport;
+ pmcs_phy_t *pptr, *subphy, *tphyp;
+ int need_ack = 0;
+ int primary;
+
+ switch (IOP_EVENT_EVENT(w1)) {
+ case IOP_EVENT_PHY_STOP_STATUS:
+ if (IOP_EVENT_STATUS(w1)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PORT %d failed to stop (0x%x)",
+ phynum, IOP_EVENT_STATUS(w1));
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "PHY 0x%x Stopped",
+ phynum);
+ mutex_enter(&pwp->lock);
+ pptr = pwp->root_phys + phynum;
+ pmcs_lock_phy(pptr);
+ mutex_exit(&pwp->lock);
+ if (pptr->configured) {
+ pmcs_kill_changed(pwp, pptr, 0);
+ } else {
+ pmcs_set_changed(pwp, pptr, B_TRUE, 0);
+ }
+ pmcs_unlock_phy(pptr);
+ RESTART_DISCOVERY(pwp);
+ }
+ /* Reposition htag to the 'expected' position. */
+ ((uint32_t *)iomb)[1] = ((uint32_t *)iomb)[2];
+ pmcs_process_completion(pwp, iomb, amt);
+ break;
+ case IOP_EVENT_SAS_PHY_UP:
+ {
+ static const uint8_t sas_identify_af_endian_xfvec[] = {
+ 0x5c, 0x5a, 0x56, 0x00
+ };
+ pmcs_phy_t *rp;
+ sas_identify_af_t af;
+
+ /*
+ * If we're not at running state, don't do anything
+ */
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ break;
+ }
+ pptr = pwp->root_phys + phynum;
+ pmcs_lock_phy(pptr);
+
+ rp = pwp->ports[portid];
+
+ /* rp and pptr may be the same */
+ if (rp && (rp != pptr)) {
+ pmcs_lock_phy(rp);
+ }
+ mutex_exit(&pwp->lock);
+
+ pmcs_endian_transform(pwp, &af, &((uint32_t *)iomb)[4],
+ sas_identify_af_endian_xfvec);
+
+ /* Copy the remote address into our phy handle */
+ (void) memcpy(pptr->sas_address, af.sas_address, 8);
+
+ /*
+ * Check to see if there is a PortID already active.
+ */
+ if (rp) {
+ if (rp->portid != portid) {
+ if (rp != pptr) {
+ pmcs_unlock_phy(rp);
+ }
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "PortID 0x%x"
+ ": PHY 0x%x SAS LINK UP IS FOR A "
+ "DIFFERENT PORTID 0x%x", rp->portid,
+ phynum, portid);
+ break;
+ }
+
+ /*
+ * If the dtype isn't NOTHING, then this is actually
+ * the primary PHY for this port. It probably went
+ * down and came back up, so be sure not to mark it
+ * as a subsidiary.
+ */
+ if (pptr->dtype == NOTHING) {
+ pptr->subsidiary = 1;
+ }
+ pptr->link_rate =
+ pmcs_link_rate(IOP_EVENT_LINK_RATE(w1));
+ pptr->portid = portid;
+ pptr->dead = 0;
+
+ if (pptr != rp) {
+ pmcs_unlock_phy(pptr);
+ }
+
+ rp->width = IOP_EVENT_NPIP(w3);
+
+ /* Add this PHY to the phymap */
+ if (sas_phymap_phy_add(pwp->hss_phymap, phynum,
+ pwp->sas_wwns[0],
+ pmcs_barray2wwn(pptr->sas_address)) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Unable to add phy %u for 0x%" PRIx64 ".0x%"
+ PRIx64, phynum, pwp->sas_wwns[rp->phynum],
+ pmcs_barray2wwn(pptr->sas_address));
+ }
+
+ /* Get our iport, if attached, and set it up */
+ if (pptr != rp) {
+ pmcs_lock_phy(pptr);
+ }
+ iport = pmcs_get_iport_by_phy(pwp, pptr);
+ if (iport) {
+ pptr->iport = iport;
+ primary = !pptr->subsidiary;
+
+ mutex_enter(&iport->lock);
+ if (primary) {
+ iport->pptr = pptr;
+ }
+ if (iport->ua_state == UA_ACTIVE) {
+ pmcs_add_phy_to_iport(iport, pptr);
+ }
+ mutex_exit(&iport->lock);
+ pmcs_rele_iport(iport);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "PortID 0x%x: PHY"
+ " 0x%x SAS LINK UP WIDENS PORT TO %d PHYS",
+ portid, phynum, rp->width);
+
+ if (pptr != rp) {
+ pmcs_unlock_phy(pptr);
+ }
+ pmcs_unlock_phy(rp);
+ break;
+ }
+
+ /*
+ * Check to see if anything is here already
+ */
+ if (pptr->dtype != NOTHING && pptr->configured) {
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PortID 0x%x: SAS PHY 0x%x "
+ "UP HITS EXISTING CONFIGURED TREE", portid, phynum);
+ break;
+ }
+
+ if (af.address_frame_type != SAS_AF_IDENTIFY) {
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "SAS link up on phy 0x%x, "
+ "but unexpected frame type 0x%x found", phynum,
+ af.address_frame_type);
+ break;
+ }
+ pptr->width = IOP_EVENT_NPIP(w3);
+ pptr->portid = portid;
+ pptr->dead = 0;
+ pptr->link_rate = pmcs_link_rate(IOP_EVENT_LINK_RATE(w1));
+
+ /*
+ * Check to see whether this is an expander or an endpoint
+ */
+ switch (af.device_type) {
+ case SAS_IF_DTYPE_ENDPOINT:
+ pptr->pend_dtype = SAS;
+ pptr->dtype = SAS;
+ break;
+ case SAS_IF_DTYPE_EDGE:
+ case SAS_IF_DTYPE_FANOUT:
+ pptr->pend_dtype = EXPANDER;
+ pptr->dtype = EXPANDER;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unknown device type 0x%x", af.device_type);
+ pptr->pend_dtype = NOTHING;
+ pptr->dtype = NOTHING;
+ break;
+ }
+
+ /*
+ * If this is a direct-attached SAS drive, do the spinup
+ * release now.
+ */
+ if (pptr->dtype == SAS) {
+ pptr->spinup_hold = 1;
+ pmcs_spinup_release(pwp, pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Release spinup hold on PHY 0x%x", phynum);
+ }
+
+ pmcs_set_changed(pwp, pptr, B_TRUE, 0);
+ if (pptr->width > 1) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x: PHY 0x%x SAS"
+ " LINK UP @ %s Gb with %d phys/s", portid, phynum,
+ pmcs_get_rate(pptr->link_rate), pptr->width);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x: PHY 0x%x SAS"
+ " LINK UP @ %s Gb/s", portid, phynum,
+ pmcs_get_rate(pptr->link_rate));
+ }
+ pmcs_unlock_phy(pptr);
+
+ /* Add this PHY to the phymap */
+ if (sas_phymap_phy_add(pwp->hss_phymap, phynum,
+ pwp->sas_wwns[0],
+ pmcs_barray2wwn(pptr->sas_address)) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Unable to add phy %u for 0x%" PRIx64 ".0x%"
+ PRIx64, phynum, pwp->sas_wwns[pptr->phynum],
+ pmcs_barray2wwn(pptr->sas_address));
+ }
+
+ /* Get a pointer to our iport and set it up if attached */
+ pmcs_lock_phy(pptr);
+ iport = pmcs_get_iport_by_phy(pwp, pptr);
+ if (iport) {
+ pptr->iport = iport;
+ primary = !pptr->subsidiary;
+
+ mutex_enter(&iport->lock);
+ if (primary) {
+ iport->pptr = pptr;
+ }
+ if (iport->ua_state == UA_ACTIVE) {
+ pmcs_add_phy_to_iport(iport, pptr);
+ }
+ mutex_exit(&iport->lock);
+ pmcs_rele_iport(iport);
+ }
+
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_PHY_EVENT,
+ SAS_PHY_ONLINE, pptr);
+ pmcs_unlock_phy(pptr);
+
+ mutex_enter(&pwp->lock);
+ pwp->ports[portid] = pptr;
+ mutex_exit(&pwp->lock);
+ RESTART_DISCOVERY(pwp);
+
+ break;
+ }
+ case IOP_EVENT_SATA_PHY_UP:
+ /*
+ * If we're not at running state, don't do anything
+ */
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ break;
+ }
+
+ /*
+ * Check to see if anything is here already
+ */
+ pmcs_lock_phy(pwp->root_phys + phynum);
+ pptr = pwp->root_phys + phynum;
+ mutex_exit(&pwp->lock);
+
+ if (pptr->dtype != NOTHING && pptr->configured) {
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PortID 0x%x: SATA PHY 0x%x"
+ " UP HITS EXISTING CONFIGURED TREE",
+ portid, phynum);
+ break;
+ }
+
+ pptr->width = 1;
+ pptr->dead = 0;
+
+ /*
+ * Install the PHY number in the least significant byte
+ * with a NAA=3 (locally assigned address) in the most
+ * significant nubble.
+ *
+ * Later, we'll either use that or dig a
+ * WWN out of words 108..111.
+ */
+ pptr->sas_address[0] = 0x30;
+ pptr->sas_address[1] = 0;
+ pptr->sas_address[2] = 0;
+ pptr->sas_address[3] = 0;
+ pptr->sas_address[4] = 0;
+ pptr->sas_address[5] = 0;
+ pptr->sas_address[6] = 0;
+ pptr->sas_address[7] = phynum;
+ pptr->portid = portid;
+ pptr->link_rate = pmcs_link_rate(IOP_EVENT_LINK_RATE(w1));
+ pptr->dtype = SATA;
+ pmcs_set_changed(pwp, pptr, B_TRUE, 0);
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PortID 0x%x: PHY 0x%x SATA LINK UP @ %s Gb/s",
+ pptr->portid, phynum, pmcs_get_rate(pptr->link_rate));
+ pmcs_unlock_phy(pptr);
+
+ /* Add this PHY to the phymap */
+ if (sas_phymap_phy_add(pwp->hss_phymap, phynum,
+ pwp->sas_wwns[0],
+ pmcs_barray2wwn(pptr->sas_address)) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Unable to add phy %u for 0x%" PRIx64 ".0x%"
+ PRIx64, phynum, pwp->sas_wwns[pptr->phynum],
+ pmcs_barray2wwn(pptr->sas_address));
+ }
+
+ /* Get our iport, if attached, and set it up */
+ pmcs_lock_phy(pptr);
+ iport = pmcs_get_iport_by_phy(pwp, pptr);
+ if (iport) {
+ pptr->iport = iport;
+
+ mutex_enter(&iport->lock);
+ iport->pptr = pptr;
+ if (iport->ua_state == UA_ACTIVE) {
+ pmcs_add_phy_to_iport(iport, pptr);
+ ASSERT(iport->nphy == 1);
+ iport->nphy = 1;
+ }
+ mutex_exit(&iport->lock);
+ pmcs_rele_iport(iport);
+ }
+
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_PHY_EVENT,
+ SAS_PHY_ONLINE, pptr);
+ pmcs_unlock_phy(pptr);
+
+ mutex_enter(&pwp->lock);
+ pwp->ports[pptr->portid] = pptr;
+ mutex_exit(&pwp->lock);
+ RESTART_DISCOVERY(pwp);
+ break;
+
+ case IOP_EVENT_SATA_SPINUP_HOLD:
+ tphyp = (pmcs_phy_t *)(pwp->root_phys + phynum);
+ /*
+ * No need to lock the entire tree for this
+ */
+ mutex_enter(&tphyp->phy_lock);
+ tphyp->spinup_hold = 1;
+ pmcs_spinup_release(pwp, tphyp);
+ mutex_exit(&tphyp->phy_lock);
+ break;
+ case IOP_EVENT_PHY_DOWN:
+ /*
+ * If we're not at running state, don't do anything
+ */
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ break;
+ }
+ pptr = pwp->ports[portid];
+
+ subphy = pwp->root_phys + phynum;
+ /*
+ * subphy is a pointer to the PHY corresponding to the incoming
+ * event. pptr points to the primary PHY for the corresponding
+ * port. So, subphy and pptr may or may not be the same PHY,
+ * but that doesn't change what we need to do with each.
+ */
+ ASSERT(subphy);
+ mutex_exit(&pwp->lock);
+
+ if (pptr == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "PortID 0x%x: PHY 0x%x "
+ "LINK DOWN- no portid pointer", portid, phynum);
+ break;
+ }
+ if (IOP_EVENT_PORT_STATE(w3) == IOP_EVENT_PS_NIL) {
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PortID 0x%x: PHY 0x%x NOT VALID YET",
+ portid, phynum);
+ need_ack = 1;
+ break;
+ }
+ if (IOP_EVENT_PORT_STATE(w3) == IOP_EVENT_PS_IN_RESET) {
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PortID 0x%x: PHY 0x%x IN RESET",
+ portid, phynum);
+ break;
+ }
+ if (IOP_EVENT_PORT_STATE(w3) == IOP_EVENT_PS_LOSTCOMM) {
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PortID 0x%x: PHY 0x%x TEMPORARILY DOWN",
+ portid, phynum);
+ need_ack = 1;
+ break;
+ }
+
+ if (IOP_EVENT_PORT_STATE(w3) == IOP_EVENT_PS_VALID) {
+ /*
+ * Drop port width on the primary phy handle
+ * No need to lock the entire tree for this
+ */
+ mutex_enter(&pptr->phy_lock);
+ pptr->width = IOP_EVENT_NPIP(w3);
+ mutex_exit(&pptr->phy_lock);
+
+ /* Clear the iport reference on the subphy */
+ mutex_enter(&subphy->phy_lock);
+ iport = subphy->iport;
+ subphy->iport = NULL;
+ mutex_exit(&subphy->phy_lock);
+
+ /*
+ * If the iport was set on this phy, decrement its
+ * nphy count and remove this phy from the phys list.
+ */
+ if (iport) {
+ mutex_enter(&iport->lock);
+ pmcs_remove_phy_from_iport(iport, subphy);
+ mutex_exit(&iport->lock);
+ }
+
+ pmcs_lock_phy(subphy);
+ if (subphy->subsidiary)
+ pmcs_clear_phy(pwp, subphy);
+ pmcs_unlock_phy(subphy);
+
+ /* Remove this PHY from the phymap */
+ if (sas_phymap_phy_rem(pwp->hss_phymap, phynum) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Unable to remove phy %u for 0x%" PRIx64
+ ".0x%" PRIx64, phynum,
+ pwp->sas_wwns[pptr->phynum],
+ pmcs_barray2wwn((pwp->root_phys +
+ pptr->phynum)-> sas_address));
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x: PHY 0x%x "
+ "LINK DOWN NARROWS PORT TO %d PHYS",
+ portid, phynum, pptr->width);
+ break;
+ }
+ if (IOP_EVENT_PORT_STATE(w3) != IOP_EVENT_PS_INVALID) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x: PHY 0x"
+ "%x LINK DOWN NOT HANDLED (state 0x%x)",
+ portid, phynum, IOP_EVENT_PORT_STATE(w3));
+ need_ack = 1;
+ break;
+ }
+ /* Remove this PHY from the phymap */
+ if (sas_phymap_phy_rem(pwp->hss_phymap, phynum) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Unable to remove phy %u for 0x%" PRIx64
+ ".0x%" PRIx64, phynum,
+ pwp->sas_wwns[pptr->phynum],
+ pmcs_barray2wwn(
+ (pwp->root_phys + pptr->phynum)->sas_address));
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PortID 0x%x: PHY 0x%x LINK DOWN (port invalid)",
+ portid, phynum);
+
+ /*
+ * Last PHY on the port.
+ * Assumption: pptr and subphy are both "valid"
+ *
+ * Drop port width on the primary phy handle
+ * Report the event while we've got the lock
+ */
+ mutex_enter(&pptr->phy_lock);
+ pptr->width = 0;
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_PHY_EVENT,
+ SAS_PHY_OFFLINE, pptr);
+ mutex_exit(&pptr->phy_lock);
+
+ /* Clear the iport reference on the subphy */
+ mutex_enter(&subphy->phy_lock);
+ iport = subphy->iport;
+ subphy->iport = NULL;
+ mutex_exit(&subphy->phy_lock);
+
+ /*
+ * If the iport was set on this phy, decrement its
+ * nphy count and remove this phy from the phys list.
+ * Also, clear the iport's pptr as this port is now
+ * down.
+ */
+ if (iport) {
+ mutex_enter(&iport->lock);
+ pmcs_remove_phy_from_iport(iport, subphy);
+ iport->pptr = NULL;
+ iport->ua_state = UA_PEND_DEACTIVATE;
+ mutex_exit(&iport->lock);
+ }
+
+ pmcs_lock_phy(subphy);
+ if (subphy->subsidiary)
+ pmcs_clear_phy(pwp, subphy);
+ pmcs_unlock_phy(subphy);
+
+ /*
+ * Since we're now really dead, it's time to clean up.
+ */
+ pmcs_kill_port(pwp, portid);
+ need_ack = 1;
+
+ break;
+ case IOP_EVENT_BROADCAST_CHANGE:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "PortID 0x%x: PHY 0x%x Broadcast Change", portid, phynum);
+ need_ack = 1;
+ mutex_enter(&pwp->lock);
+ pptr = pwp->ports[portid];
+ if (pptr) {
+ pmcs_lock_phy(pptr);
+ if (pptr->phynum == phynum) {
+ pmcs_set_changed(pwp, pptr, B_TRUE, 0);
+ }
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_HBA_PORT_BROADCAST,
+ SAS_PORT_BROADCAST_CHANGE, pptr);
+ pmcs_unlock_phy(pptr);
+ }
+ mutex_exit(&pwp->lock);
+ RESTART_DISCOVERY(pwp);
+ break;
+ case IOP_EVENT_BROADCAST_SES:
+ EVT_PRT(pwp, IOP_EVENT_BROADCAST_SES, phynum);
+ mutex_enter(&pwp->lock);
+ pptr = pwp->ports[portid];
+ mutex_exit(&pwp->lock);
+ if (pptr) {
+ pmcs_lock_phy(pptr);
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_HBA_PORT_BROADCAST,
+ SAS_PORT_BROADCAST_SES, pptr);
+ pmcs_unlock_phy(pptr);
+ }
+ break;
+ case IOP_EVENT_PHY_ERR_INBOUND_CRC:
+ {
+ char buf[32];
+ (void) snprintf(buf, sizeof (buf), "Inbound PHY CRC error");
+ need_ack = 1;
+ break;
+ }
+ case IOP_EVENT_HARD_RESET_RECEIVED:
+ EVT_PRT(pwp, IOP_EVENT_HARD_RESET_RECEIVED, phynum);
+ break;
+ case IOP_EVENT_EVENT_ID_FRAME_TIMO:
+ EVT_PRT(pwp, IOP_EVENT_EVENT_ID_FRAME_TIMO, phynum);
+ break;
+ case IOP_EVENT_BROADCAST_EXP:
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PortID 0x%x: PHY 0x%x Broadcast Exp Change",
+ portid, phynum);
+ /*
+ * Comparing Section 6.8.1.4 of SMHBA (rev 7) spec and Section
+ * 7.2.3 of SAS2 (Rev 15) spec,
+ * _BROADCAST_EXPANDER event corresponds to _D01_4 primitive
+ */
+ mutex_enter(&pwp->lock);
+ pptr = pwp->ports[portid];
+ mutex_exit(&pwp->lock);
+ if (pptr) {
+ pmcs_lock_phy(pptr);
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_HBA_PORT_BROADCAST,
+ SAS_PORT_BROADCAST_D01_4, pptr);
+ pmcs_unlock_phy(pptr);
+ }
+ break;
+ case IOP_EVENT_PHY_START_STATUS:
+ switch (IOP_EVENT_STATUS(w1)) {
+ case IOP_PHY_START_OK:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "PHY 0x%x Started",
+ phynum);
+ break;
+ case IOP_PHY_START_ALREADY:
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "PHY 0x%x Started (Already)", phynum);
+ break;
+ case IOP_PHY_START_INVALID:
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "PHY 0x%x failed to start (invalid phy)", phynum);
+ break;
+ case IOP_PHY_START_ERROR:
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "PHY 0x%x Start Error", phynum);
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_WARN, "PHY 0x%x failed to start "
+ "(0x%x)", phynum, IOP_EVENT_STATUS(w1));
+ break;
+ }
+ /* Reposition htag to the 'expected' position. */
+ ((uint32_t *)iomb)[1] = ((uint32_t *)iomb)[2];
+ pmcs_process_completion(pwp, iomb, amt);
+ break;
+ case IOP_EVENT_PHY_ERR_INVALID_DWORD:
+ need_ack = 1;
+ EVT_PRT(pwp, IOP_EVENT_PHY_ERR_INVALID_DWORD, phynum);
+ break;
+ case IOP_EVENT_PHY_ERR_DISPARITY_ERROR:
+ need_ack = 1;
+ EVT_PRT(pwp, IOP_EVENT_PHY_ERR_DISPARITY_ERROR, phynum);
+ break;
+ case IOP_EVENT_PHY_ERR_CODE_VIOLATION:
+ need_ack = 1;
+ EVT_PRT(pwp, IOP_EVENT_PHY_ERR_CODE_VIOLATION, phynum);
+ break;
+ case IOP_EVENT_PHY_ERR_LOSS_OF_DWORD_SYN:
+ need_ack = 1;
+ EVT_PRT(pwp, IOP_EVENT_PHY_ERR_LOSS_OF_DWORD_SYN, phynum);
+ break;
+ case IOP_EVENT_PHY_ERR_PHY_RESET_FAILD:
+ need_ack = 1;
+ EVT_PRT(pwp, IOP_EVENT_PHY_ERR_PHY_RESET_FAILD, phynum);
+ break;
+ case IOP_EVENT_PORT_RECOVERY_TIMER_TMO:
+ EVT_PRT(pwp, IOP_EVENT_PORT_RECOVERY_TIMER_TMO, phynum);
+ break;
+ case IOP_EVENT_PORT_RECOVER:
+ EVT_PRT(pwp, IOP_EVENT_PORT_RECOVER, phynum);
+ break;
+ case IOP_EVENT_PORT_INVALID:
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ break;
+ }
+ mutex_exit(&pwp->lock);
+ pmcs_kill_port(pwp, portid);
+ pmcs_prt(pwp, PMCS_PRT_INFO, "PortID 0x%x: PORT Now Invalid",
+ portid);
+ break;
+ case IOP_EVENT_PORT_RESET_TIMER_TMO:
+ EVT_PRT(pwp, IOP_EVENT_PORT_RESET_TIMER_TMO, phynum);
+ break;
+ case IOP_EVENT_PORT_RESET_COMPLETE:
+ EVT_PRT(pwp, IOP_EVENT_PORT_RESET_COMPLETE, phynum);
+ break;
+ case IOP_EVENT_BROADCAST_ASYNC_EVENT:
+ EVT_PRT(pwp, IOP_EVENT_BROADCAST_ASYNC_EVENT, phynum);
+ /*
+ * Comparing Section 6.8.1.4 of SMHBA (rev 7) spec and Section
+ * 7.2.3 of SAS2 (Rev 15) spec,
+ * _BROADCAST_ASYNC event corresponds to _D04_7 primitive
+ */
+ mutex_enter(&pwp->lock);
+ pptr = pwp->ports[portid];
+ mutex_exit(&pwp->lock);
+ if (pptr) {
+ pmcs_lock_phy(pptr);
+ pmcs_smhba_log_sysevent(pwp, ESC_SAS_HBA_PORT_BROADCAST,
+ SAS_PORT_BROADCAST_D04_7, pptr);
+ pmcs_unlock_phy(pptr);
+ }
+ break;
+ default:
+ (void) snprintf(buf, sizeof (buf),
+ "unknown SAS H/W Event PHY 0x%x", phynum);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, iomb);
+ break;
+ }
+ if (need_ack) {
+ mutex_enter(&pwp->lock);
+ /*
+ * Don't lock the entire tree for this. Just grab the mutex
+ * on the root PHY.
+ */
+ tphyp = pwp->root_phys + phynum;
+ mutex_enter(&tphyp->phy_lock);
+ tphyp->hw_event_ack = w1;
+ mutex_exit(&tphyp->phy_lock);
+ mutex_exit(&pwp->lock);
+ pmcs_ack_events(pwp);
+ }
+}
+
+static void
+pmcs_process_echo_completion(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ echo_test_t fred;
+ pmcwork_t *pwrk;
+ uint32_t *msg = iomb, htag = LE_32(msg[1]);
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk) {
+ (void) memcpy(&fred, &((uint32_t *)iomb)[2], sizeof (fred));
+ fred.ptr[0]++;
+ msg[2] = LE_32(PMCOUT_STATUS_OK);
+ pmcs_complete_work(pwp, pwrk, msg, amt);
+ } else {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "ECHO completion with no work structure", iomb);
+ }
+}
+
+static void
+pmcs_process_ssp_event(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ _NOTE(ARGUNUSED(amt));
+ uint32_t status, htag, *w;
+ pmcwork_t *pwrk;
+ char *path;
+
+ w = iomb;
+ htag = LE_32(w[1]);
+ status = LE_32(w[2]);
+
+
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk == NULL) {
+ path = "????";
+ } else {
+ path = pwrk->phy->path;
+ }
+
+ if (status != PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED) {
+ char buf[20];
+ const char *emsg = pmcs_status_str(status);
+
+ if (emsg == NULL) {
+ (void) snprintf(buf, sizeof (buf), "Status 0x%x",
+ status);
+ emsg = buf;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Bad SAS Status (tag 0x%x) "
+ "%s on %s", __func__, htag, emsg, path);
+ if (pwrk != NULL) {
+ /*
+ * There may be pending command on a target device.
+ * Or, it may be a double fault.
+ */
+ pmcs_start_ssp_event_recovery(pwp, pwrk, iomb, amt);
+ }
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: tag %x put onto the wire for %s",
+ __func__, htag, path);
+ if (pwrk) {
+ pwrk->onwire = 1;
+ mutex_exit(&pwrk->lock);
+ }
+ }
+}
+
+static void
+pmcs_process_sata_event(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ _NOTE(ARGUNUSED(amt));
+ pmcwork_t *pwrk = NULL;
+ pmcs_phy_t *pptr;
+ uint32_t status, htag, *w;
+ char *path;
+
+ w = iomb;
+ htag = LE_32(w[1]);
+ status = LE_32(w[2]);
+
+ /*
+ * If the status is PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE,
+ * we have to issue a READ LOG EXT ATA (page 0x10) command
+ * to the device. In this case, htag is not valid.
+ *
+ * If the status is PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED, we're
+ * just noting that an I/O got put onto the wire.
+ *
+ * Othewise, other errors are indicative that things need to
+ * be aborted.
+ */
+ path = NULL;
+ if (htag) {
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk) {
+ pmcs_lock_phy(pwrk->phy);
+ pptr = pwrk->phy;
+ path = pptr->path;
+ }
+ }
+ if (path == NULL) {
+ mutex_enter(&pwp->lock);
+ pptr = pmcs_find_phy_by_devid(pwp, LE_32(w[4]));
+ /* This PHY is now locked */
+ mutex_exit(&pwp->lock);
+ if (pptr) {
+ path = pptr->path;
+ } else {
+ path = "????";
+ }
+ }
+
+ if (status != PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED) {
+ char buf[20];
+ const char *emsg = pmcs_status_str(status);
+
+ ASSERT(pptr != NULL);
+ if (emsg == NULL) {
+ (void) snprintf(buf, sizeof (buf), "Status 0x%x",
+ status);
+ emsg = buf;
+ }
+ if (status == PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE) {
+ ASSERT(pptr != NULL);
+ pptr->need_rl_ext = 1;
+ htag = 0;
+ } else {
+ pptr->abort_pending = 1;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Bad SATA Status (tag 0x%x) "
+ "%s on %s", __func__, htag, emsg, path);
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ /*
+ * Unlike SSP devices, we let the abort we
+ * schedule above force the completion of
+ * problem commands.
+ */
+ if (pwrk) {
+ mutex_exit(&pwrk->lock);
+ }
+ } else if (status == PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: tag %x put onto the wire for %s",
+ __func__, htag, path);
+ if (pwrk) {
+ pwrk->onwire = 1;
+ mutex_exit(&pwrk->lock);
+ }
+ }
+
+ if (pptr) {
+ pmcs_unlock_phy(pptr);
+ }
+}
+
+static void
+pmcs_process_abort_completion(pmcs_hw_t *pwp, void *iomb, size_t amt)
+{
+ pmcs_phy_t *pptr;
+ struct pmcwork *pwrk;
+ uint32_t rtag;
+ uint32_t htag = LE_32(((uint32_t *)iomb)[1]);
+ uint32_t status = LE_32(((uint32_t *)iomb)[2]);
+ uint32_t scp = LE_32(((uint32_t *)iomb)[3]) & 0x1;
+ char *path;
+
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: cannot find work structure for ABORT", __func__);
+ return;
+ }
+ if (pwrk->ptr) {
+ rtag = *((uint32_t *)pwrk->ptr);
+ } else {
+ rtag = 0;
+ }
+
+ pptr = pwrk->phy;
+ if (pptr) {
+ pmcs_lock_phy(pptr);
+ pptr->abort_pending = 0;
+ pptr->abort_sent = 0;
+
+ /*
+ * Don't do this if the status was ABORT_IN_PROGRESS and
+ * the scope bit was set
+ */
+ if ((status != PMCOUT_STATUS_IO_ABORT_IN_PROGRESS) || !scp) {
+ pptr->abort_all_start = 0;
+ cv_signal(&pptr->abort_all_cv);
+ }
+ path = pptr->path;
+ pmcs_unlock_phy(pptr);
+ } else {
+ path = "(no phy)";
+ }
+
+ switch (status) {
+ case PMCOUT_STATUS_OK:
+ if (scp) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: abort all succeeded for %s. (htag=0x%x)",
+ __func__, path, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: abort tag 0x%x succeeded for %s. (htag=0x%x)",
+ __func__, rtag, path, htag);
+ }
+ break;
+
+ case PMCOUT_STATUS_IO_NOT_VALID:
+ if (scp) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: ABORT %s failed (DEV NOT VALID) for %s. "
+ "(htag=0x%x)", __func__, scp ? "all" : "tag",
+ path, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: ABORT %s failed (I/O NOT VALID) for %s. "
+ "(htag=0x%x)", __func__, scp ? "all" : "tag",
+ path, htag);
+ }
+ break;
+
+ case PMCOUT_STATUS_IO_ABORT_IN_PROGRESS:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ABORT %s failed for "
+ "%s, htag 0x%x (ABORT IN PROGRESS)", __func__,
+ scp ? "all" : "tag", path, htag);
+ break;
+
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unknown status %d for ABORT"
+ " %s, htag 0x%x, PHY %s", __func__, status,
+ scp ? "all" : "tag", htag, path);
+ break;
+ }
+
+ pmcs_complete_work(pwp, pwrk, iomb, amt);
+}
+
+static void
+pmcs_process_general_event(pmcs_hw_t *pwp, uint32_t *iomb)
+{
+ uint32_t htag;
+ char local[60];
+ struct pmcwork *pwrk;
+ int i;
+
+ if (LE_32(iomb[1]) == INBOUND_IOMB_V_BIT_NOT_SET) {
+ (void) snprintf(local, sizeof (local),
+ "VALID bit not set on INBOUND IOMB");
+ } else if (LE_32(iomb[1]) ==
+ INBOUND_IOMB_OPC_NOT_SUPPORTED) {
+ (void) snprintf(local, sizeof (local),
+ "opcode not set on inbound IOMB");
+ } else {
+ (void) snprintf(local, sizeof (local),
+ "unknown GENERAL EVENT status (0x%x)",
+ LE_32(iomb[1]));
+ }
+ /* Pull up bad IOMB into usual position */
+ for (i = 0; i < PMCS_MSG_SIZE - 2; i++) {
+ iomb[i] = iomb[i+2];
+ }
+ /* overwrite status with an error */
+ iomb[2] = LE_32(PMCOUT_STATUS_PROG_ERROR);
+ iomb[PMCS_MSG_SIZE - 2] = 0;
+ iomb[PMCS_MSG_SIZE - 1] = 0;
+ htag = LE_32(iomb[1]);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, local, iomb);
+ pwrk = pmcs_tag2wp(pwp, htag);
+ if (pwrk) {
+ pmcs_complete_work(pwp, pwrk, iomb, PMCS_QENTRY_SIZE);
+ }
+}
+
+void
+pmcs_general_intr(pmcs_hw_t *pwp)
+{
+ char local[PMCS_QENTRY_SIZE << 1];
+ uint32_t w0, pi, ci;
+ uint32_t *ptr, nbuf, lim = 0;
+ size_t amt;
+
+ ci = pmcs_rd_oqci(pwp, PMCS_OQ_GENERAL);
+ pi = pmcs_rd_oqpi(pwp, PMCS_OQ_GENERAL);
+
+ while (ci != pi) {
+ OQLIM_CHECK(pwp, lim);
+ ptr = GET_OQ_ENTRY(pwp, PMCS_OQ_GENERAL, ci, 0);
+ w0 = LE_32(ptr[0]);
+ VALID_IOMB_CHECK(pwp, w0, ptr, ci, pi);
+ WRONG_OBID_CHECK(pwp, w0, PMCS_OQ_GENERAL);
+ COPY_OUTBOUND(pwp, w0, local, nbuf, amt, ptr,
+ PMCS_OQ_GENERAL, ci);
+
+ switch (w0 & PMCS_IOMB_OPCODE_MASK) {
+ case PMCOUT_SSP_COMPLETION:
+ /*
+ * We only get SSP completion here for Task Management
+ * completions.
+ */
+ case PMCOUT_SMP_COMPLETION:
+ case PMCOUT_LOCAL_PHY_CONTROL:
+ case PMCOUT_DEVICE_REGISTRATION:
+ case PMCOUT_DEREGISTER_DEVICE_HANDLE:
+ case PMCOUT_GET_NVMD_DATA:
+ case PMCOUT_SET_NVMD_DATA:
+ case PMCOUT_GET_DEVICE_STATE:
+ case PMCOUT_SET_DEVICE_STATE:
+ pmcs_process_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SSP_ABORT:
+ case PMCOUT_SATA_ABORT:
+ case PMCOUT_SMP_ABORT:
+ pmcs_process_abort_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SSP_EVENT:
+ pmcs_process_ssp_event(pwp, local, amt);
+ break;
+ case PMCOUT_ECHO:
+ pmcs_process_echo_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SAS_HW_EVENT_ACK_ACK:
+ if (LE_32(ptr[2]) != SAS_HW_EVENT_ACK_OK) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "SAS H/W EVENT ACK/ACK Status=0x%b",
+ LE_32(ptr[2]), "\020\4InvParm\3"
+ "InvPort\2InvPhy\1InvSEA");
+ }
+ pmcs_process_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SKIP_ENTRIES:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: skip %d entries",
+ __func__, nbuf);
+ break;
+ default:
+ (void) snprintf(local, sizeof (local),
+ "%s: unhandled message", __func__);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, local, ptr);
+ break;
+ }
+ STEP_OQ_ENTRY(pwp, PMCS_OQ_GENERAL, ci, nbuf);
+ }
+ if (lim) {
+ SYNC_OQ_ENTRY(pwp, PMCS_OQ_GENERAL, ci, pi);
+ }
+}
+
+/*
+ * pmcs_check_intr_coal
+ *
+ * This function makes a determination on the dynamic value of the
+ * interrupt coalescing timer register. We only use this for I/O
+ * completions.
+ *
+ * The basic algorithm is as follows:
+ *
+ * PMCS_MAX_IO_COMPS_PER_INTR: The maximum number of I/O completions per
+ * I/O completion interrupt. We won't increase the interrupt coalescing
+ * timer if we're already processing this many completions per interrupt
+ * beyond the threshold.
+ *
+ * Values in io_intr_coal structure:
+ *
+ * intr_latency: The average number of nsecs between interrupts during
+ * the echo test. Used to help determine whether to increase the coalescing
+ * timer.
+ *
+ * intr_threshold: Calculated number of interrupts beyond which we may
+ * increase the timer. This value is calculated based on the calculated
+ * interrupt latency during the ECHO test and the current value of the
+ * coalescing timer.
+ *
+ * nsecs_between_intrs: Total number of nsecs between all the interrupts
+ * in the current timeslice.
+ *
+ * last_io_comp: Time of the last I/O interrupt.
+ *
+ * num_io_completions: Number of I/O completions during the slice
+ *
+ * num_intrs: Number of I/O completion interrupts during the slice
+ *
+ * max_io_completions: Number of times we hit >= PMCS_MAX_IO_COMPS_PER_INTR
+ * during interrupt processing.
+ *
+ * PMCS_MAX_IO_COMPS_LOWAT_SHIFT/HIWAT_SHIFT
+ * Low and high marks used to determine whether we processed enough interrupts
+ * that contained the maximum number of I/O completions to warrant increasing
+ * the timer
+ *
+ * intr_coal_timer: The current value of the register (in usecs)
+ *
+ * timer_on: B_TRUE means we are using the timer
+ *
+ * The timer is increased if we processed more than intr_threshold interrupts
+ * during the quantum and the number of interrupts containing the maximum
+ * number of I/O completions is between PMCS_MAX_IO_COMPS_LOWAT_SHIFT and
+ * _HIWAT_SHIFT
+ *
+ * If the average time between completions is greater than twice
+ * the current timer value, the timer value is decreased.
+ *
+ * If we did not take any interrupts during a quantum, we turn the timer off.
+ */
+void
+pmcs_check_intr_coal(void *arg)
+{
+ pmcs_hw_t *pwp = (pmcs_hw_t *)arg;
+ uint32_t avg_nsecs;
+ pmcs_io_intr_coal_t *ici;
+
+ ici = &pwp->io_intr_coal;
+ mutex_enter(&pwp->ict_lock);
+
+ while (ici->stop_thread == B_FALSE) {
+ /*
+ * Wait for next time quantum... collect stats
+ */
+ (void) cv_timedwait(&pwp->ict_cv, &pwp->ict_lock,
+ ddi_get_lbolt() + ici->quantum);
+
+ if (ici->stop_thread == B_TRUE) {
+ continue;
+ }
+
+ DTRACE_PROBE1(pmcs__check__intr__coal, pmcs_io_intr_coal_t *,
+ &pwp->io_intr_coal);
+
+ /*
+ * Determine whether to adjust timer
+ */
+ if (ici->num_intrs == 0) {
+ /*
+ * If timer is off, nothing more to do.
+ */
+ if (!pwp->io_intr_coal.timer_on) {
+ continue;
+ }
+
+ /*
+ * No interrupts. Turn off the timer.
+ */
+ pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0);
+
+ if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
+ pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
+ pwp->odb_auto_clear);
+ }
+
+ ici->timer_on = B_FALSE;
+ ici->max_io_completions = 0;
+ ici->num_intrs = 0;
+ ici->int_cleared = B_FALSE;
+ ici->num_io_completions = 0;
+ DTRACE_PROBE1(pmcs__intr__coalesce__timer__off,
+ pmcs_io_intr_coal_t *, ici);
+ continue;
+ }
+
+ avg_nsecs = ici->nsecs_between_intrs / ici->num_intrs;
+
+ if ((ici->num_intrs > ici->intr_threshold) &&
+ (ici->max_io_completions > (ici->num_intrs >>
+ PMCS_MAX_IO_COMPS_LOWAT_SHIFT)) &&
+ (ici->max_io_completions < (ici->num_intrs >>
+ PMCS_MAX_IO_COMPS_HIWAT_SHIFT))) {
+ pmcs_set_intr_coal_timer(pwp, INCREASE_TIMER);
+ } else if (avg_nsecs >
+ (ici->intr_coal_timer * 1000 * 2)) {
+ pmcs_set_intr_coal_timer(pwp, DECREASE_TIMER);
+ }
+
+ /*
+ * Reset values for new sampling period.
+ */
+ ici->max_io_completions = 0;
+ ici->nsecs_between_intrs = 0;
+ ici->num_intrs = 0;
+ ici->num_io_completions = 0;
+ }
+
+ mutex_exit(&pwp->ict_lock);
+ thread_exit();
+}
+
+void
+pmcs_iodone_intr(pmcs_hw_t *pwp)
+{
+ char local[PMCS_QENTRY_SIZE << 1];
+ pmcs_iocomp_cb_t *ioccb;
+ uint32_t w0, ci, pi, nbuf, lim = 0, niodone = 0, iomb_opcode;
+ size_t amt;
+ uint32_t *ptr;
+ hrtime_t curtime = gethrtime();
+
+ ci = pmcs_rd_oqci(pwp, PMCS_OQ_IODONE);
+ pi = pmcs_rd_oqpi(pwp, PMCS_OQ_IODONE);
+
+ while (ci != pi) {
+ OQLIM_CHECK(pwp, lim);
+ ptr = GET_OQ_ENTRY(pwp, PMCS_OQ_IODONE, ci, 0);
+ w0 = LE_32(ptr[0]);
+ VALID_IOMB_CHECK(pwp, w0, ptr, ci, pi);
+ WRONG_OBID_CHECK(pwp, w0, PMCS_OQ_IODONE);
+ iomb_opcode = (w0 & PMCS_IOMB_OPCODE_MASK);
+
+ if ((iomb_opcode == PMCOUT_SSP_COMPLETION) ||
+ (iomb_opcode == PMCOUT_SATA_COMPLETION)) {
+ ioccb =
+ kmem_cache_alloc(pwp->iocomp_cb_cache, KM_NOSLEEP);
+ if (ioccb == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_WARN, "%s: "
+ "kmem_cache_alloc failed", __func__);
+ break;
+ }
+
+ COPY_OUTBOUND(pwp, w0, ioccb->iomb, nbuf, amt, ptr,
+ PMCS_OQ_IODONE, ci);
+
+ niodone++;
+ pmcs_process_io_completion(pwp, ioccb, amt);
+ } else {
+ COPY_OUTBOUND(pwp, w0, local, nbuf, amt, ptr,
+ PMCS_OQ_IODONE, ci);
+
+ switch (iomb_opcode) {
+ case PMCOUT_ECHO:
+ pmcs_process_echo_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SATA_EVENT:
+ pmcs_process_sata_event(pwp, local, amt);
+ break;
+ case PMCOUT_SSP_EVENT:
+ pmcs_process_ssp_event(pwp, local, amt);
+ break;
+ case PMCOUT_SKIP_ENTRIES:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3,
+ "%s: skip %d entries", __func__, nbuf);
+ break;
+ default:
+ (void) snprintf(local, sizeof (local),
+ "%s: unhandled message", __func__);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, local,
+ ptr);
+ break;
+ }
+ }
+
+ STEP_OQ_ENTRY(pwp, PMCS_OQ_IODONE, ci, nbuf);
+ }
+
+ if (lim != 0) {
+ SYNC_OQ_ENTRY(pwp, PMCS_OQ_IODONE, ci, pi);
+ }
+
+ /*
+ * Update the interrupt coalescing timer check stats and run
+ * completions for queued up commands.
+ */
+
+ if (niodone > 0) {
+ /*
+ * If we can't get the lock, then completions are either
+ * already running or will be scheduled to do so shortly.
+ */
+ if (mutex_tryenter(&pwp->cq_lock) != 0) {
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ }
+
+ mutex_enter(&pwp->ict_lock);
+ pwp->io_intr_coal.nsecs_between_intrs +=
+ curtime - pwp->io_intr_coal.last_io_comp;
+ pwp->io_intr_coal.num_intrs++;
+ pwp->io_intr_coal.num_io_completions += niodone;
+ if (niodone >= PMCS_MAX_IO_COMPS_PER_INTR) {
+ pwp->io_intr_coal.max_io_completions++;
+ }
+ pwp->io_intr_coal.last_io_comp = gethrtime();
+ mutex_exit(&pwp->ict_lock);
+ }
+}
+
+void
+pmcs_event_intr(pmcs_hw_t *pwp)
+{
+ char local[PMCS_QENTRY_SIZE << 1];
+ uint32_t w0, ci, pi, nbuf, lim = 0;
+ size_t amt;
+ uint32_t *ptr;
+
+ ci = pmcs_rd_oqci(pwp, PMCS_OQ_EVENTS);
+ pi = pmcs_rd_oqpi(pwp, PMCS_OQ_EVENTS);
+
+ while (ci != pi) {
+ OQLIM_CHECK(pwp, lim);
+ ptr = GET_OQ_ENTRY(pwp, PMCS_OQ_EVENTS, ci, 0);
+ w0 = LE_32(ptr[0]);
+ VALID_IOMB_CHECK(pwp, w0, ptr, ci, pi);
+ WRONG_OBID_CHECK(pwp, w0, PMCS_OQ_EVENTS);
+ COPY_OUTBOUND(pwp, w0, local, nbuf, amt, ptr,
+ PMCS_OQ_EVENTS, ci);
+
+ switch (w0 & PMCS_IOMB_OPCODE_MASK) {
+ case PMCOUT_ECHO:
+ pmcs_process_echo_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SATA_EVENT:
+ pmcs_process_sata_event(pwp, local, amt);
+ break;
+ case PMCOUT_SSP_EVENT:
+ pmcs_process_ssp_event(pwp, local, amt);
+ break;
+ case PMCOUT_GENERAL_EVENT:
+ pmcs_process_general_event(pwp, ptr);
+ break;
+ case PMCOUT_DEVICE_HANDLE_REMOVED:
+ {
+ uint32_t port = IOP_EVENT_PORTID(LE_32(ptr[1]));
+ uint32_t did = LE_32(ptr[2]);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PortID 0x%x device_id 0x%x removed", port, did);
+ break;
+ }
+ case PMCOUT_SAS_HW_EVENT:
+ if (nbuf > 1) {
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "multiple SAS HW_EVENT (%d) responses "
+ "in EVENT OQ", nbuf);
+ }
+ pmcs_process_sas_hw_event(pwp, local, PMCS_QENTRY_SIZE);
+ break;
+ case PMCOUT_FW_FLASH_UPDATE:
+ case PMCOUT_GET_TIME_STAMP:
+ case PMCOUT_GET_DEVICE_STATE:
+ case PMCOUT_SET_DEVICE_STATE:
+ case PMCOUT_SAS_DIAG_EXECUTE:
+ pmcs_process_completion(pwp, local, amt);
+ break;
+ case PMCOUT_SKIP_ENTRIES:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: skip %d entries",
+ __func__, nbuf);
+ break;
+ default:
+ (void) snprintf(local, sizeof (local),
+ "%s: unhandled message", __func__);
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, local, ptr);
+ break;
+ }
+ STEP_OQ_ENTRY(pwp, PMCS_OQ_EVENTS, ci, nbuf);
+ }
+ if (lim) {
+ SYNC_OQ_ENTRY(pwp, PMCS_OQ_EVENTS, ci, pi);
+ }
+}
+
+void
+pmcs_timed_out(pmcs_hw_t *pwp, uint32_t htag, const char *func)
+{
+#ifdef DEBUG
+ hrtime_t now = gethrtime();
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ if (pwp->ftags[i] == htag) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Inbound msg (tag 0x%8x) timed out - "
+ "was started %llu ns ago in %s:%d",
+ htag, (unsigned long long) (now - pwp->ftime[i]),
+ func, pwp->ftag_lines[i]);
+ return;
+ }
+ }
+#endif
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Inbound Message (tag 0x%08x) timed out- was started in %s",
+ htag, func);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_nvram.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_nvram.c
new file mode 100644
index 0000000000..e163466d78
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_nvram.c
@@ -0,0 +1,823 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file contains various support routines.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+/*
+ * SAS Topology Configuration
+ */
+static int pmcs_flash_chunk(pmcs_hw_t *, uint8_t *);
+
+/*
+ * Check current firmware version for correctness
+ * and try to flash the correct firmware if what is
+ * running isn't correct.
+ *
+ * Must be called after setup and MPI setup and
+ * interrupts are enabled.
+ */
+
+int
+pmcs_firmware_update(pmcs_hw_t *pwp)
+{
+ ddi_modhandle_t modhp;
+ char buf[64];
+ int errno;
+ uint8_t *cstart, *cend; /* Firmware image file */
+ uint8_t *istart, *iend; /* ila */
+ uint8_t *sstart, *send; /* SPCBoot */
+ uint32_t *fwvp;
+ int defret = 0;
+
+ /*
+ * If updating is disabled, we're done.
+ */
+ if (pwp->fw_disable_update) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Firmware update disabled by conf file");
+ return (0);
+ }
+
+ /*
+ * If we're already running the right firmware, we're done.
+ */
+ if (pwp->fw == PMCS_FIRMWARE_VERSION) {
+ if (pwp->fw_force_update == 0) {
+ return (0);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Firmware version matches, but still forcing update");
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "Upgrading firmware on card from 0x%x to 0x%x",
+ pwp->fw, PMCS_FIRMWARE_VERSION);
+ }
+
+ modhp = ddi_modopen(PMCS_FIRMWARE_FILENAME, KRTLD_MODE_FIRST, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Firmware module not available; will not upgrade",
+ __func__);
+ return (defret);
+ }
+
+ fwvp = ddi_modsym(modhp, PMCS_FIRMWARE_VERSION_NAME, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, PMCS_FIRMWARE_VERSION_NAME);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ /*
+ * If the firmware version from the module isn't what we expect,
+ * and force updating is disabled, return the default (for this
+ * mode of operation) value.
+ */
+ if (*fwvp != PMCS_FIRMWARE_VERSION) {
+ if (pwp->fw_force_update == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: firmware module version wrong (0x%x)",
+ __func__, *fwvp);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: firmware module version wrong (0x%x) - update forced",
+ __func__, *fwvp);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_CODE_NAME PMCS_FIRMWARE_START_SUF);
+ cstart = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_CODE_NAME PMCS_FIRMWARE_END_SUF);
+ cend = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_ILA_NAME PMCS_FIRMWARE_START_SUF);
+ istart = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_ILA_NAME PMCS_FIRMWARE_END_SUF);
+ iend = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_SPCBOOT_NAME PMCS_FIRMWARE_START_SUF);
+ sstart = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ (void) snprintf(buf, sizeof (buf),
+ PMCS_FIRMWARE_SPCBOOT_NAME PMCS_FIRMWARE_END_SUF);
+ send = ddi_modsym(modhp, buf, &errno);
+ if (errno) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: unable to find symbol '%s'",
+ __func__, buf);
+ (void) ddi_modclose(modhp);
+ return (defret);
+ }
+
+ /*
+ * The SPCBoot image must be updated first, and this is written to
+ * SEEPROM, not flash.
+ */
+ if (pmcs_set_nvmd(pwp, PMCS_NVMD_SPCBOOT, sstart,
+ (size_t)((size_t)send - (size_t)sstart)) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: unable to flash '%s' segment",
+ __func__, PMCS_FIRMWARE_SPCBOOT_NAME);
+ (void) ddi_modclose(modhp);
+ return (-1);
+ }
+
+ if (pmcs_fw_flash(pwp, (void *)istart,
+ (uint32_t)((size_t)iend - (size_t)istart))) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: unable to flash '%s' segment",
+ __func__, PMCS_FIRMWARE_ILA_NAME);
+ (void) ddi_modclose(modhp);
+ return (-1);
+ }
+
+ if (pmcs_fw_flash(pwp, (void *)cstart,
+ (uint32_t)((size_t)cend - (size_t)cstart))) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: unable to flash '%s' segment",
+ __func__, PMCS_FIRMWARE_CODE_NAME);
+ (void) ddi_modclose(modhp);
+ return (-1);
+ }
+
+ (void) ddi_modclose(modhp);
+
+ if (pmcs_soft_reset(pwp, B_FALSE)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: soft reset after flash update failed", __func__);
+ return (-1);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "%s: Firmware successfully upgraded.", __func__);
+ }
+ return (0);
+}
+
+/*
+ * Flash firmware support
+ * Called unlocked.
+ */
+int
+pmcs_fw_flash(pmcs_hw_t *pwp, pmcs_fw_hdr_t *hdr, uint32_t length)
+{
+ pmcs_fw_hdr_t *hp;
+ uint8_t *wrk, *base;
+
+ /*
+ * Step 1- Validate firmware chunks within passed pointer.
+ */
+ hp = hdr;
+ wrk = (uint8_t *)hdr;
+ base = wrk;
+ for (;;) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: partition 0x%x, Length 0x%x", __func__,
+ hp->destination_partition, ntohl(hp->firmware_length));
+ if (ntohl(hp->firmware_length) == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: bad firmware length 0x%x",
+ __func__, ntohl(hp->firmware_length));
+ return (EINVAL);
+ }
+ wrk += (sizeof (pmcs_fw_hdr_t) + ntohl(hp->firmware_length));
+ if (wrk == base + length) {
+ break;
+ }
+ if (wrk > base + length) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: out of bounds firmware length", __func__);
+ return (EINVAL);
+ }
+ hp = (void *)wrk;
+ }
+
+ /*
+ * Step 2- acquire scratch
+ */
+ (void) pmcs_acquire_scratch(pwp, B_TRUE);
+
+ /*
+ * Step 3- loop through firmware chunks and send each one
+ * down to be flashed.
+ */
+ hp = hdr;
+ wrk = (uint8_t *)hdr;
+ base = wrk;
+ for (;;) {
+ if (pmcs_flash_chunk(pwp, wrk)) {
+ pmcs_release_scratch(pwp);
+ return (EIO);
+ }
+ wrk += (sizeof (pmcs_fw_hdr_t) + ntohl(hp->firmware_length));
+ if (wrk == base + length) {
+ break;
+ }
+ hp = (void *) wrk;
+ }
+ pmcs_release_scratch(pwp);
+ return (0);
+}
+
+static int
+pmcs_flash_chunk(pmcs_hw_t *pwp, uint8_t *chunk)
+{
+ pmcs_fw_hdr_t *hp;
+ pmcwork_t *pwrk;
+ uint32_t len, seg, off, result, amt, msg[PMCS_MSG_SIZE], *ptr;
+
+ hp = (void *)chunk;
+ len = sizeof (pmcs_fw_hdr_t) + ntohl(hp->firmware_length);
+
+ seg = off = 0;
+ while (off < len) {
+ amt = PMCS_SCRATCH_SIZE;
+ if (off + amt > len) {
+ amt = len - off;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: segment %d offset %u length %u",
+ __func__, seg, off, amt);
+ (void) memcpy(pwp->scratch, &chunk[off], amt);
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (pwrk == NULL) {
+ return (ENOMEM);
+ }
+ pwrk->arg = msg;
+ msg[0] = LE_32(PMCS_HIPRI(pwp,
+ PMCS_OQ_EVENTS, PMCIN_FW_FLASH_UPDATE));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(off);
+ msg[3] = LE_32(amt);
+ if (off == 0) {
+ msg[4] = LE_32(len);
+ } else {
+ msg[4] = 0;
+ }
+ msg[5] = 0;
+ msg[6] = 0;
+ msg[7] = 0;
+ msg[8] = 0;
+ msg[9] = 0;
+ msg[10] = 0;
+ msg[11] = 0;
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma));
+ msg[14] = LE_32(amt);
+ msg[15] = 0;
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (ENOMEM);
+ }
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ (void) memset(msg, 0xaf, sizeof (msg));
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ WAIT_FOR(pwrk, 5000, result);
+ pmcs_pwork(pwp, pwrk);
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+ return (EIO);
+ }
+ switch (LE_32(msg[2])) {
+ case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: segment %d complete pending reboot",
+ __func__, seg);
+ break;
+ case FLASH_UPDATE_IN_PROGRESS:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: segment %d downloaded", __func__, seg);
+ break;
+ case FLASH_UPDATE_HDR_ERR:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d header error", __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_OFFSET_ERR:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d offset error", __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_UPDATE_CRC_ERR:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d update crc error", __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_LENGTH_ERR:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d length error", __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_HW_ERR:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d hw error", __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d download not supported error",
+ __func__, seg);
+ return (EIO);
+ case FLASH_UPDATE_DISABLED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d update disabled error",
+ __func__, seg);
+ return (EIO);
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: segment %d unknown error %x",
+ __func__, seg, msg[2]);
+ return (EIO);
+ }
+ off += amt;
+ seg++;
+ }
+ return (0);
+}
+
+/*
+ * pmcs_validate_vpd
+ *
+ * Input: softstate pointer and pointer to vpd data buffer
+ * Returns: B_TRUE if VPD data looks OK, B_FALSE otherwise
+ */
+static boolean_t
+pmcs_validate_vpd(pmcs_hw_t *pwp, uint8_t *data)
+{
+ pmcs_vpd_header_t *vpd_header;
+ uint8_t *bufp, kv_len, *chksump, chksum = 0;
+ char tbuf[80];
+ char prop[24];
+ int idx, str_len;
+ uint16_t strid_length, chksum_len;
+ uint64_t wwid;
+ pmcs_vpd_kv_t *vkvp;
+
+ vpd_header = (pmcs_vpd_header_t *)data;
+
+ /*
+ * Make sure we understand the format of this data
+ */
+
+ if (vpd_header->eeprom_version < PMCS_VPD_VERSION) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: VPD version(%d) out-of-date; (%d) required."
+ " Thebe card needs to be flashed.",
+ __func__, vpd_header->eeprom_version, PMCS_VPD_VERSION);
+ }
+ if ((vpd_header->eeprom_version != PMCS_VPD_VERSION) &&
+ (vpd_header->eeprom_version != (PMCS_VPD_VERSION - 1))) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: VPD version mismatch (%d != %d)",
+ __func__, vpd_header->eeprom_version, PMCS_VPD_VERSION);
+ return (B_FALSE);
+ }
+
+ /*
+ * Do we have a valid SAS WWID?
+ */
+ if (((vpd_header->hba_sas_wwid[0] & 0xf0) >> 4) != NAA_IEEE_REG) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: SAS WWN has invalid NAA (%d)", __func__,
+ ((vpd_header->hba_sas_wwid[0] & 0xf0) >> 4));
+ return (B_FALSE);
+ }
+ wwid = pmcs_barray2wwn(vpd_header->hba_sas_wwid);
+ for (idx = 0; idx < PMCS_MAX_PORTS; idx++) {
+ pwp->sas_wwns[idx] = wwid + idx;
+ }
+
+ if (vpd_header->vpd_start_byte != PMCS_VPD_START) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Didn't see VPD start byte", __func__);
+ return (B_FALSE);
+ }
+
+ /*
+ * We only checksum the VPD data between (and including) VPD Start byte
+ * and the checksum value byte. The length of this data for CRC is
+ * 15 less than the length indicated in vpd_length field of the header.
+ * 8 (SAS WWN) + 2 (subsystem ID) + 2 (subsystem vendor ID) +
+ * 1 (end tag) + 2 (hex byte CRC, different from this one) = 15 bytes
+ */
+ /*
+ * VPD length (little endian format) is represented as byte-array field
+ * & read the following way to avoid alignment issues (in SPARC)
+ */
+ chksum_len = ((vpd_header->vpd_length[1] << 8) |
+ (vpd_header->vpd_length[0])) - 15;
+ /* Validate VPD data checksum */
+ chksump = (uint8_t *)&vpd_header->vpd_start_byte;
+ ASSERT (*chksump == PMCS_VPD_START);
+ for (idx = 0; idx < chksum_len; idx++, chksump++) {
+ chksum += *chksump;
+ }
+ ASSERT (*chksump == PMCS_VPD_END);
+ if (chksum) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: VPD checksum(%d) non-zero."
+ " Checksum validation failed.", __func__, chksum);
+ }
+
+ /*
+ * Get length of string ID tag and read it.
+ */
+ bufp = (uint8_t *)&vpd_header->vpd_start_byte;
+ bufp += 3; /* Skip the start byte and length */
+ /*
+ * String ID tag length (little endian format) is represented as
+ * byte-array & read the following way to avoid alignment issues
+ * (in SPARC)
+ */
+ strid_length = (vpd_header->strid_length[1] << 8) |
+ (vpd_header->strid_length[0]);
+ if (strid_length > 79) {
+ strid_length = 79;
+ }
+ bcopy(bufp, tbuf, strid_length);
+ tbuf[strid_length] = 0;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: Product Name: '%s'", __func__, tbuf);
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_MODEL_NAME, tbuf);
+
+ /*
+ * Skip VPD-R tag and length of read-only tag, then start reading
+ * keyword/value pairs
+ */
+ bufp += strid_length; /* Skip to VPD-R tag */
+ bufp += 3; /* Skip VPD-R tag and length of VPD-R data */
+
+ vkvp = (pmcs_vpd_kv_t *)bufp;
+
+ while (vkvp->keyword[0] != PMCS_VPD_END) {
+ tbuf[0] = 0;
+ str_len = snprintf(tbuf, 80, "VPD: %c%c = <",
+ vkvp->keyword[0], vkvp->keyword[1]);
+
+ kv_len = vkvp->value_length;
+ for (idx = 0; idx < kv_len; idx++) {
+ tbuf[str_len + idx] = vkvp->value[idx];
+ prop[idx] = vkvp->value[idx];
+ }
+ prop[idx] = '\0';
+ str_len += kv_len;
+ tbuf[str_len] = '>';
+ tbuf[str_len + 1] = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s (Len: 0x%x)", tbuf, kv_len);
+
+ /* Keyword is Manufacturer */
+ if ((vkvp->keyword[0] == 'M') && (vkvp->keyword[1] == 'N')) {
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING,
+ PMCS_MANUFACTURER, prop);
+ }
+ /* Keyword is Serial Number */
+ if ((vkvp->keyword[0] == 'S') && (vkvp->keyword[1] == 'N')) {
+ pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING,
+ PMCS_SERIAL_NUMBER, prop);
+ }
+
+ vkvp = (pmcs_vpd_kv_t *)(bufp + 3 + kv_len);
+ bufp += kv_len + 3;
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * pmcs_get_nvmd
+ *
+ * This function will read the requested data from the non-volatile
+ * storage on the card. This could mean SEEPROM, VPD, or other areas
+ * as defined by the PM8001 programmer's manual.
+ *
+ * nvmd_type: The data type being requested
+ * nvmd: NVM device to access (IOP/AAP1)
+ * offset: Must be 4K alignment
+ * buf: Pointer to memory region for retrieved data
+ * size_left: Total available bytes left in buf
+ *
+ * Returns: non-negative on success, -1 on failure
+ */
+
+/*ARGSUSED*/
+int
+pmcs_get_nvmd(pmcs_hw_t *pwp, pmcs_nvmd_type_t nvmd_type, uint8_t nvmd,
+ uint32_t offset, char *buf, uint32_t size_left)
+{
+ pmcs_get_nvmd_cmd_t iomb;
+ pmcwork_t *workp;
+ uint8_t *chunkp;
+ uint32_t *ptr, ibq, *iombp;
+ uint32_t dlen;
+ uint16_t status;
+ uint8_t tdas_nvmd, ip, tda, tbn_tdps;
+ uint8_t doa[3];
+ int32_t result = -1, i = 0;
+
+ switch (nvmd_type) {
+ case PMCS_NVMD_VPD:
+ tdas_nvmd = PMCIN_NVMD_TDPS_1 | PMCIN_NVMD_TWI;
+ tda = PMCIN_TDA_PAGE(2);
+ tbn_tdps = PMCIN_NVMD_TBN(0) | PMCIN_NVMD_TDPS_8;
+ ip = PMCIN_NVMD_INDIRECT_PLD;
+ dlen = LE_32(PMCS_SEEPROM_PAGE_SIZE);
+ doa[0] = 0;
+ doa[1] = 0;
+ doa[2] = 0;
+ break;
+ case PMCS_NVMD_REG_DUMP:
+ tdas_nvmd = nvmd;
+ tda = 0;
+ tbn_tdps = 0;
+ ip = PMCIN_NVMD_INDIRECT_PLD;
+ dlen = LE_32(PMCS_REGISTER_DUMP_BLOCK_SIZE);
+ doa[0] = offset & 0xff;
+ doa[1] = (offset >> 8) & 0xff;
+ doa[2] = (offset >> 16) & 0xff;
+ break;
+ case PMCS_NVMD_EVENT_LOG:
+ tdas_nvmd = nvmd;
+ tda = 0;
+ tbn_tdps = 0;
+ ip = PMCIN_NVMD_INDIRECT_PLD;
+ dlen = LE_32(PMCS_REGISTER_DUMP_BLOCK_SIZE);
+ offset = offset + PMCS_NVMD_EVENT_LOG_OFFSET;
+ doa[0] = offset & 0xff;
+ doa[1] = (offset >> 8) & 0xff;
+ doa[2] = (offset >> 16) & 0xff;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Invalid nvmd type: %d", __func__, nvmd_type);
+ return (-1);
+ }
+
+ workp = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (workp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "%s: Unable to get work struct", __func__);
+ return (-1);
+ }
+
+ ptr = &iomb.header;
+ bzero(ptr, sizeof (pmcs_get_nvmd_cmd_t));
+ *ptr = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_GENERAL, PMCIN_GET_NVMD_DATA));
+ workp->arg = (void *)&iomb;
+ iomb.htag = LE_32(workp->htag);
+ iomb.ip = ip;
+ iomb.tbn_tdps = tbn_tdps;
+ iomb.tda = tda;
+ iomb.tdas_nvmd = tdas_nvmd;
+ iomb.ipbal = LE_32(DWORD0(pwp->flash_chunk_addr));
+ iomb.ipbah = LE_32(DWORD1(pwp->flash_chunk_addr));
+ iomb.ipdl = dlen;
+ iomb.doa[0] = doa[0];
+ iomb.doa[1] = doa[1];
+ iomb.doa[2] = doa[2];
+
+ /*
+ * ptr will now point to the inbound queue message
+ */
+ GET_IO_IQ_ENTRY(pwp, ptr, 0, ibq);
+ if (ptr == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, "!%s: Unable to get IQ entry",
+ __func__);
+ pmcs_pwork(pwp, workp);
+ return (-1);
+ }
+
+ bzero(ptr, PMCS_MSG_SIZE << 2); /* PMCS_MSG_SIZE is in dwords */
+ iombp = (uint32_t *)&iomb;
+ COPY_MESSAGE(ptr, iombp, sizeof (pmcs_get_nvmd_cmd_t) >> 2);
+ workp->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, ibq);
+
+ WAIT_FOR(workp, 1000, result);
+ ptr = workp->arg;
+ if (result) {
+ pmcs_timed_out(pwp, workp->htag, __func__);
+ pmcs_pwork(pwp, workp);
+ return (-1);
+ }
+ status = LE_32(*(ptr + 3)) & 0xffff;
+ if (status != PMCS_NVMD_STAT_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Error, status = 0x%04x",
+ __func__, status);
+ pmcs_pwork(pwp, workp);
+ return (-1);
+ }
+
+ pmcs_pwork(pwp, workp);
+
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0,
+ DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed at "
+ "%s():%d", __func__, __LINE__);
+ }
+ chunkp = (uint8_t *)pwp->flash_chunkp;
+
+ switch (nvmd) {
+ case PMCIN_NVMD_VPD:
+ if (pmcs_validate_vpd(pwp, chunkp)) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ break;
+ case PMCIN_NVMD_AAP1:
+ case PMCIN_NVMD_IOP:
+ ASSERT(buf);
+ i = 0;
+ if (nvmd_type == PMCS_NVMD_REG_DUMP) {
+ while ((i < PMCS_FLASH_CHUNK_SIZE) &&
+ (chunkp[i] != 0xff) && (chunkp[i] != '\0')) {
+ (void) snprintf(&buf[i], (size_left - i),
+ "%c", chunkp[i]);
+ i++;
+ }
+ } else if (nvmd_type == PMCS_NVMD_EVENT_LOG) {
+ i = pmcs_dump_binary(pwp, pwp->flash_chunkp, 0,
+ (PMCS_FLASH_CHUNK_SIZE >> 2), buf, size_left);
+ }
+ result = i;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "UNKNOWN NVMD DEVICE");
+ return (-1);
+ }
+
+ return (result);
+}
+
+/*
+ * pmcs_set_nvmd
+ *
+ * This function will write the requested data to non-volatile storage
+ * on the HBA. This could mean SEEPROM, VPD, or other areas as defined by
+ * the PM8001 programmer's manual.
+ *
+ * nvmd_type: The data type to be written
+ * buf: Pointer to memory region for data to write
+ * len: Length of the data buffer
+ *
+ * Returns: B_TRUE on success, B_FALSE on failure
+ */
+
+boolean_t
+pmcs_set_nvmd(pmcs_hw_t *pwp, pmcs_nvmd_type_t nvmd_type, uint8_t *buf,
+ size_t len)
+{
+ pmcs_set_nvmd_cmd_t iomb;
+ pmcwork_t *workp;
+ uint32_t *ptr, ibq, *iombp;
+ uint32_t dlen;
+ uint16_t status;
+ uint8_t tdas_nvmd, ip;
+ int result;
+
+ switch (nvmd_type) {
+ case PMCS_NVMD_SPCBOOT:
+ tdas_nvmd = PMCIN_NVMD_SEEPROM;
+ ip = PMCIN_NVMD_INDIRECT_PLD;
+ ASSERT((len >= PMCS_SPCBOOT_MIN_SIZE) &&
+ (len <= PMCS_SPCBOOT_MAX_SIZE));
+ dlen = LE_32(len);
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Invalid nvmd type: %d", __func__, nvmd_type);
+ return (B_FALSE);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEVEL, "%s: Request for nvmd type: %d",
+ __func__, nvmd_type);
+
+ workp = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (workp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "%s: Unable to get work struct", __func__);
+ return (B_FALSE);
+ }
+
+ ptr = &iomb.header;
+ bzero(ptr, sizeof (pmcs_set_nvmd_cmd_t));
+ *ptr = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_GENERAL, PMCIN_SET_NVMD_DATA));
+ workp->arg = (void *)&iomb;
+ iomb.htag = LE_32(workp->htag);
+ iomb.ip = ip;
+ iomb.tdas_nvmd = tdas_nvmd;
+ iomb.signature = LE_32(PMCS_SEEPROM_SIGNATURE);
+ iomb.ipbal = LE_32(DWORD0(pwp->flash_chunk_addr));
+ iomb.ipbah = LE_32(DWORD1(pwp->flash_chunk_addr));
+ iomb.ipdl = dlen;
+
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG_DEVEL,
+ "PMCIN_SET_NVMD_DATA iomb", (void *)&iomb);
+
+ bcopy(buf, pwp->flash_chunkp, len);
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0,
+ DDI_DMA_SYNC_FORDEV) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check failed at "
+ "%s():%d", __func__, __LINE__);
+ }
+
+ /*
+ * ptr will now point to the inbound queue message
+ */
+ GET_IO_IQ_ENTRY(pwp, ptr, 0, ibq);
+ if (ptr == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, "!%s: Unable to get IQ entry",
+ __func__);
+ pmcs_pwork(pwp, workp);
+ return (B_FALSE);
+ }
+
+ bzero(ptr, PMCS_MSG_SIZE << 2); /* PMCS_MSG_SIZE is in dwords */
+ iombp = (uint32_t *)&iomb;
+ COPY_MESSAGE(ptr, iombp, sizeof (pmcs_set_nvmd_cmd_t) >> 2);
+ workp->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, ibq);
+
+ WAIT_FOR(workp, 2000, result);
+
+ if (result) {
+ pmcs_timed_out(pwp, workp->htag, __func__);
+ pmcs_pwork(pwp, workp);
+ return (B_FALSE);
+ }
+
+ pmcs_pwork(pwp, workp);
+
+ status = LE_32(*(ptr + 3)) & 0xffff;
+ if (status != PMCS_NVMD_STAT_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Error, status = 0x%04x",
+ __func__, status);
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_sata.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_sata.c
new file mode 100644
index 0000000000..1ee93abadc
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_sata.c
@@ -0,0 +1,709 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * SATA midlayer interface for PMC drier.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+static void
+SATAcopy(pmcs_cmd_t *sp, void *kbuf, uint32_t amt)
+{
+ struct buf *bp = scsi_pkt2bp(CMD2PKT(sp));
+
+ bp_mapin(scsi_pkt2bp(CMD2PKT(sp)));
+ /* There is only one direction currently */
+ (void) memcpy(bp->b_un.b_addr, kbuf, amt);
+ CMD2PKT(sp)->pkt_resid -= amt;
+ CMD2PKT(sp)->pkt_state |= STATE_XFERRED_DATA;
+ bp_mapout(scsi_pkt2bp(CMD2PKT(sp)));
+}
+
+/*
+ * Run a non block-io command. Some commands are interpreted
+ * out of extant data. Some imply actually running a SATA command.
+ *
+ * Returns zero if we were able to run.
+ *
+ * Returns -1 only if other commands are active, either another
+ * command here or regular I/O active.
+ *
+ * Called with PHY lock and xp statlock held.
+ */
+#define SRESPSZ 128
+
+static int
+pmcs_sata_special_work(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
+{
+ int i;
+ int saq;
+ pmcs_cmd_t *sp;
+ struct scsi_pkt *pkt;
+ pmcs_phy_t *pptr;
+ uint8_t rp[SRESPSZ];
+ ata_identify_t *id;
+ uint32_t amt = 0;
+ uint8_t key = 0x05; /* illegal command */
+ uint8_t asc = 0;
+ uint8_t ascq = 0;
+ uint8_t status = STATUS_GOOD;
+
+ if (xp->actv_cnt) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: target %p actv count %u",
+ __func__, (void *)xp, xp->actv_cnt);
+ return (-1);
+ }
+ if (xp->special_running) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: target %p special running already",
+ __func__, (void *)xp);
+ return (-1);
+ }
+ xp->special_needed = 0;
+
+ /*
+ * We're now running special.
+ */
+ xp->special_running = 1;
+ pptr = xp->phy;
+
+ sp = STAILQ_FIRST(&xp->sq);
+ if (sp == NULL) {
+ xp->special_running = 0;
+ return (0);
+ }
+
+ pkt = CMD2PKT(sp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: target %p cmd %p cdb0 %x with actv_cnt %u",
+ __func__, (void *)xp, (void *)sp, pkt->pkt_cdbp[0], xp->actv_cnt);
+
+ if (pkt->pkt_cdbp[0] == SCMD_INQUIRY ||
+ pkt->pkt_cdbp[0] == SCMD_READ_CAPACITY) {
+ int retval;
+
+ if (pmcs_acquire_scratch(pwp, B_FALSE)) {
+ xp->special_running = 0;
+ return (-1);
+ }
+ saq = 1;
+
+ mutex_exit(&xp->statlock);
+ retval = pmcs_sata_identify(pwp, pptr);
+ mutex_enter(&xp->statlock);
+
+ if (retval) {
+ pmcs_release_scratch(pwp);
+ xp->special_running = 0;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: target %p identify failed %x",
+ __func__, (void *)xp, retval);
+ /*
+ * If the failure is due to not being
+ * able to get resources, return such
+ * that we'll try later. Otherwise,
+ * fail current command.
+ */
+ if (retval == ENOMEM) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: sata identify failed (ENOMEM) for "
+ "cmd %p", __func__, (void *)sp);
+ return (-1);
+ }
+ pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD;
+ if (retval == ETIMEDOUT) {
+ pkt->pkt_reason = CMD_TIMEOUT;
+ pkt->pkt_statistics |= STAT_TIMEOUT;
+ } else {
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ }
+ goto out;
+ }
+
+ id = pwp->scratch;
+
+ /*
+ * Check to see if this device is an NCQ capable device.
+ * Yes, we'll end up doing this check for every INQUIRY
+ * if indeed we *are* only a pio device, but this is so
+ * infrequent that it's not really worth an extra bitfield.
+ *
+ * Note that PIO mode here means that the PMCS firmware
+ * performs PIO- not us.
+ */
+ if (xp->ncq == 0) {
+ /*
+ * Reset existing stuff.
+ */
+ xp->pio = 0;
+ xp->qdepth = 1;
+ xp->tagmap = 0;
+
+ if (id->word76 != 0 && id->word76 != 0xffff &&
+ (LE_16(id->word76) & (1 << 8))) {
+ xp->ncq = 1;
+ xp->qdepth = (LE_16(id->word75) & 0x1f) + 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: device %s supports NCQ %u deep",
+ __func__, xp->phy->path, xp->qdepth);
+ } else {
+ /*
+ * Default back to PIO.
+ *
+ * Note that non-FPDMA would still be possible,
+ * but for this specific configuration, if it's
+ * not NCQ it's safest to assume PIO.
+ */
+ xp->pio = 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: device %s assumed PIO",
+ __func__, xp->phy->path);
+ }
+ }
+ } else {
+ saq = 0;
+ id = NULL;
+ }
+
+ bzero(rp, SRESPSZ);
+
+ switch (pkt->pkt_cdbp[0]) {
+ case SCMD_INQUIRY:
+ {
+ struct scsi_inquiry *inqp;
+ uint16_t *a, *b;
+
+ /* Check for illegal bits */
+ if ((pkt->pkt_cdbp[1] & 0xfc) || pkt->pkt_cdbp[5]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ if (pkt->pkt_cdbp[1] & 0x1) {
+ switch (pkt->pkt_cdbp[2]) {
+ case 0x0:
+ rp[3] = 3;
+ rp[5] = 0x80;
+ rp[6] = 0x83;
+ amt = 7;
+ break;
+ case 0x80:
+ rp[1] = 0x80;
+ rp[3] = 0x14;
+ a = (void *) &rp[4];
+ b = id->model_number;
+ for (i = 0; i < 5; i++) {
+ *a = ddi_swap16(*b);
+ a++;
+ b++;
+ }
+ amt = 24;
+ break;
+ case 0x83:
+ rp[1] = 0x83;
+ if ((LE_16(id->word87) & 0x100) &&
+ (LE_16(id->word108) >> 12) == 5) {
+ rp[3] = 12;
+ rp[4] = 1;
+ rp[5] = 3;
+ rp[7] = 8;
+ rp[8] = LE_16(id->word108) >> 8;
+ rp[9] = LE_16(id->word108);
+ rp[10] = LE_16(id->word109) >> 8;
+ rp[11] = LE_16(id->word109);
+ rp[12] = LE_16(id->word110) >> 8;
+ rp[13] = LE_16(id->word110);
+ rp[14] = LE_16(id->word111) >> 8;
+ rp[15] = LE_16(id->word111);
+ amt = 16;
+ } else {
+ rp[3] = 64;
+ rp[4] = 2;
+ rp[5] = 1;
+ rp[7] = 60;
+ rp[8] = 'A';
+ rp[9] = 'T';
+ rp[10] = 'A';
+ rp[11] = ' ';
+ rp[12] = ' ';
+ rp[13] = ' ';
+ rp[14] = ' ';
+ rp[15] = ' ';
+ a = (void *) &rp[16];
+ b = id->model_number;
+ for (i = 0; i < 20; i++) {
+ *a = ddi_swap16(*b);
+ a++;
+ b++;
+ }
+ a = (void *) &rp[40];
+ b = id->serial_number;
+ for (i = 0; i < 10; i++) {
+ *a = ddi_swap16(*b);
+ a++;
+ b++;
+ }
+ amt = 68;
+ }
+ break;
+ default:
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ } else {
+ inqp = (struct scsi_inquiry *)rp;
+ inqp->inq_qual = (LE_16(id->word0) & 0x80) ? 0x80 : 0;
+ inqp->inq_ansi = 5; /* spc3 */
+ inqp->inq_rdf = 2; /* response format 2 */
+ inqp->inq_len = 32;
+
+ if (xp->ncq && (xp->qdepth > 1)) {
+ inqp->inq_cmdque = 1;
+ }
+
+ (void) memcpy(inqp->inq_vid, "ATA ", 8);
+
+ a = (void *)inqp->inq_pid;
+ b = id->model_number;
+ for (i = 0; i < 8; i++) {
+ *a = ddi_swap16(*b);
+ a++;
+ b++;
+ }
+ if (id->firmware_revision[2] == 0x2020 &&
+ id->firmware_revision[3] == 0x2020) {
+ inqp->inq_revision[0] =
+ ddi_swap16(id->firmware_revision[0]) >> 8;
+ inqp->inq_revision[1] =
+ ddi_swap16(id->firmware_revision[0]);
+ inqp->inq_revision[2] =
+ ddi_swap16(id->firmware_revision[1]) >> 8;
+ inqp->inq_revision[3] =
+ ddi_swap16(id->firmware_revision[1]);
+ } else {
+ inqp->inq_revision[0] =
+ ddi_swap16(id->firmware_revision[2]) >> 8;
+ inqp->inq_revision[1] =
+ ddi_swap16(id->firmware_revision[2]);
+ inqp->inq_revision[2] =
+ ddi_swap16(id->firmware_revision[3]) >> 8;
+ inqp->inq_revision[3] =
+ ddi_swap16(id->firmware_revision[3]);
+ }
+ amt = 36;
+ }
+ amt = pmcs_set_resid(pkt, amt, pkt->pkt_cdbp[4]);
+ if (amt) {
+ if (xp->actv_cnt) {
+ xp->special_needed = 1;
+ xp->special_running = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: @ line %d",
+ __func__, __LINE__);
+ if (saq) {
+ pmcs_release_scratch(pwp);
+ }
+ return (-1);
+ }
+ SATAcopy(sp, rp, amt);
+ }
+ break;
+ }
+ case SCMD_READ_CAPACITY:
+ {
+ uint64_t last_block;
+ uint32_t block_size = 512; /* XXXX */
+
+ xp->capacity = LBA_CAPACITY(id);
+ last_block = xp->capacity - 1;
+ /* Check for illegal bits */
+ if ((pkt->pkt_cdbp[1] & 0xfe) || pkt->pkt_cdbp[6] ||
+ (pkt->pkt_cdbp[8] & 0xfe) || pkt->pkt_cdbp[7] ||
+ pkt->pkt_cdbp[9]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ for (i = 1; i < 10; i++) {
+ if (pkt->pkt_cdbp[i]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ }
+ if (status != STATUS_GOOD) {
+ break;
+ }
+ if (last_block > 0xffffffffULL) {
+ last_block = 0xffffffffULL;
+ }
+ rp[0] = (last_block >> 24) & 0xff;
+ rp[1] = (last_block >> 16) & 0xff;
+ rp[2] = (last_block >> 8) & 0xff;
+ rp[3] = (last_block) & 0xff;
+ rp[4] = (block_size >> 24) & 0xff;
+ rp[5] = (block_size >> 16) & 0xff;
+ rp[6] = (block_size >> 8) & 0xff;
+ rp[7] = (block_size) & 0xff;
+ amt = 8;
+ amt = pmcs_set_resid(pkt, amt, 8);
+ if (amt) {
+ if (xp->actv_cnt) {
+ xp->special_needed = 1;
+ xp->special_running = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: @ line %d",
+ __func__, __LINE__);
+ if (saq) {
+ pmcs_release_scratch(pwp);
+ }
+ return (-1);
+ }
+ SATAcopy(sp, rp, amt);
+ }
+ break;
+ }
+ case SCMD_REPORT_LUNS: {
+ int rl_len;
+
+ /* Check for illegal bits */
+ if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[3] || pkt->pkt_cdbp[4] ||
+ pkt->pkt_cdbp[5] || pkt->pkt_cdbp[10] ||
+ pkt->pkt_cdbp[11]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+
+ rp[3] = 8;
+ rl_len = 16; /* list length (4) + reserved (4) + 1 LUN (8) */
+ amt = rl_len;
+ amt = pmcs_set_resid(pkt, amt, rl_len);
+
+ if (amt) {
+ if (xp->actv_cnt) {
+ xp->special_needed = 1;
+ xp->special_running = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: @ line %d",
+ __func__, __LINE__);
+ if (saq) {
+ pmcs_release_scratch(pwp);
+ }
+ return (-1);
+ }
+ SATAcopy(sp, rp, rl_len);
+ }
+ break;
+ }
+
+ case SCMD_REQUEST_SENSE:
+ /* Check for illegal bits */
+ if ((pkt->pkt_cdbp[1] & 0xfe) || pkt->pkt_cdbp[2] ||
+ pkt->pkt_cdbp[3] || pkt->pkt_cdbp[5]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ rp[0] = 0xf0;
+ amt = 18;
+ amt = pmcs_set_resid(pkt, amt, pkt->pkt_cdbp[4]);
+ if (amt) {
+ if (xp->actv_cnt) {
+ xp->special_needed = 1;
+ xp->special_running = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: @ line %d",
+ __func__, __LINE__);
+ if (saq) {
+ pmcs_release_scratch(pwp);
+ }
+ return (-1);
+ }
+ SATAcopy(sp, rp, 18);
+ }
+ break;
+ case SCMD_START_STOP:
+ /* Check for illegal bits */
+ if ((pkt->pkt_cdbp[1] & 0xfe) || pkt->pkt_cdbp[2] ||
+ (pkt->pkt_cdbp[3] & 0xf0) || (pkt->pkt_cdbp[4] & 0x08) ||
+ pkt->pkt_cdbp[5]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ break;
+ case SCMD_SYNCHRONIZE_CACHE:
+ /* Check for illegal bits */
+ if ((pkt->pkt_cdbp[1] & 0xf8) || (pkt->pkt_cdbp[6] & 0xe0) ||
+ pkt->pkt_cdbp[9]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ break;
+ case SCMD_TEST_UNIT_READY:
+ /* Check for illegal bits */
+ if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2] || pkt->pkt_cdbp[3] ||
+ pkt->pkt_cdbp[4] || pkt->pkt_cdbp[5]) {
+ status = STATUS_CHECK;
+ asc = 0x24; /* invalid field in cdb */
+ break;
+ }
+ if (xp->ca) {
+ status = STATUS_CHECK;
+ key = 0x6;
+ asc = 0x28;
+ xp->ca = 0;
+ }
+ break;
+ default:
+ asc = 0x20; /* invalid operation command code */
+ status = STATUS_CHECK;
+ break;
+ }
+ if (status != STATUS_GOOD) {
+ bzero(rp, 18);
+ rp[0] = 0xf0;
+ rp[2] = key;
+ rp[12] = asc;
+ rp[13] = ascq;
+ pmcs_latch_status(pwp, sp, status, rp, 18, pptr->path);
+ } else {
+ pmcs_latch_status(pwp, sp, status, NULL, 0, pptr->path);
+ }
+
+out:
+ STAILQ_REMOVE_HEAD(&xp->sq, cmd_next);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x",
+ __func__, (void *)pkt, xp->target_num, pkt->pkt_reason,
+ pkt->pkt_state, pkt->pkt_resid, status);
+
+ if (saq) {
+ pmcs_release_scratch(pwp);
+ }
+
+ if (xp->draining) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: waking up drain waiters", __func__);
+ cv_signal(&pwp->drain_cv);
+ }
+
+ mutex_exit(&xp->statlock);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ mutex_enter(&xp->statlock);
+ xp->special_running = 0;
+ return (0);
+}
+
+/*
+ * Run all special commands queued up for a SATA device.
+ * We're only called if the caller knows we have work to do.
+ *
+ * We can't run them if things are still active for the device,
+ * return saying we didn't run anything.
+ *
+ * When we finish, wake up anyone waiting for active commands
+ * to go to zero.
+ *
+ * Called with PHY lock and xp statlock held.
+ */
+int
+pmcs_run_sata_special(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
+{
+ while (!STAILQ_EMPTY(&xp->sq)) {
+ if (pmcs_sata_special_work(pwp, xp)) {
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Search for SATA special commands to run and run them.
+ * If we succeed in running the special command(s), kick
+ * the normal commands into operation again. Call completion
+ * for any commands that were completed while we were here.
+ *
+ * Called unlocked.
+ */
+void
+pmcs_sata_work(pmcs_hw_t *pwp)
+{
+ pmcs_xscsi_t *xp;
+ int spinagain = 0;
+ uint16_t target;
+
+ for (target = 0; target < pwp->max_dev; target++) {
+ xp = pwp->targets[target];
+ if ((xp == NULL) || (xp->phy == NULL)) {
+ continue;
+ }
+ pmcs_lock_phy(xp->phy);
+ mutex_enter(&xp->statlock);
+ if (STAILQ_EMPTY(&xp->sq)) {
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(xp->phy);
+ continue;
+ }
+ if (xp->actv_cnt) {
+ xp->special_needed = 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: deferring until drained", __func__);
+ spinagain++;
+ } else {
+ if (pmcs_run_sata_special(pwp, xp)) {
+ spinagain++;
+ }
+ }
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(xp->phy);
+ }
+
+ if (spinagain) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN);
+ } else {
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ }
+
+ /*
+ * Run completion on any commands ready for it.
+ */
+ PMCS_CQ_RUN(pwp);
+}
+
+/*
+ * Called with PHY lock held and scratch acquired
+ */
+int
+pmcs_sata_identify(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ fis_t fis;
+ fis[0] = (IDENTIFY_DEVICE << 16) | (1 << 15) | FIS_REG_H2DEV;
+ fis[1] = 0;
+ fis[2] = 0;
+ fis[3] = 0;
+ fis[4] = 0;
+ return (pmcs_run_sata_cmd(pwp, pptr, fis, SATA_PROTOCOL_PIO,
+ PMCIN_DATADIR_2_INI, sizeof (ata_identify_t)));
+}
+
+/*
+ * Called with PHY lock held and scratch held
+ */
+int
+pmcs_run_sata_cmd(pmcs_hw_t *pwp, pmcs_phy_t *pptr, fis_t fis, uint32_t mode,
+ uint32_t ddir, uint32_t dlen)
+{
+ struct pmcwork *pwrk;
+ uint32_t *ptr, msg[PMCS_MSG_SIZE];
+ uint32_t iq, htag;
+ int i, result = 0;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ return (ENOMEM);
+ }
+
+ msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE,
+ PMCIN_SATA_HOST_IO_START));
+ htag = pwrk->htag;
+ pwrk->arg = msg;
+ pwrk->dtype = SATA;
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ msg[3] = LE_32(dlen);
+ msg[4] = LE_32(mode | ddir);
+ if (dlen) {
+ if (ddir == PMCIN_DATADIR_2_DEV) {
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0,
+ DDI_DMA_SYNC_FORDEV) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
+ "failed at %s():%d", __func__, __LINE__);
+ }
+ }
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma));
+ msg[14] = LE_32(dlen);
+ msg[15] = 0;
+ } else {
+ msg[12] = 0;
+ msg[13] = 0;
+ msg[14] = 0;
+ msg[15] = 0;
+ }
+ for (i = 0; i < 5; i++) {
+ msg[5+i] = LE_32(fis[i]);
+ }
+ msg[10] = 0;
+ msg[11] = 0;
+ GET_IO_IQ_ENTRY(pwp, ptr, pptr->device_id, iq);
+ if (ptr == NULL) {
+ pmcs_pwork(pwp, pwrk);
+ return (ENOMEM);
+ }
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, iq);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_lock_phy(pptr);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ if (pmcs_abort(pwp, pptr, htag, 0, 1)) {
+ pptr->abort_pending = 1;
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ }
+ return (ETIMEDOUT);
+ }
+
+ if (LE_32(msg[2]) != PMCOUT_STATUS_OK) {
+ return (EIO);
+ }
+ if (LE_32(ptr[3]) != 0) {
+ size_t j, amt = LE_32(ptr[3]);
+ if (amt > sizeof (fis_t)) {
+ amt = sizeof (fis_t);
+ }
+ amt >>= 2;
+ for (j = 0; j < amt; j++) {
+ fis[j] = LE_32(msg[4 + j]);
+ }
+ }
+ if (dlen && ddir == PMCIN_DATADIR_2_INI) {
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0,
+ DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition check "
+ "failed at %s():%d", __func__, __LINE__);
+ }
+ }
+ return (0);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_scsa.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_scsa.c
new file mode 100644
index 0000000000..43c06cf155
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_scsa.c
@@ -0,0 +1,2983 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * SCSI (SCSA) midlayer interface for PMC drier.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64);
+
+static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *);
+static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *);
+static int pmcs_scsa_reset(struct scsi_address *, int);
+static int pmcs_scsi_reset_notify(struct scsi_address *, int,
+ void (*)(caddr_t), caddr_t);
+static int pmcs_scsa_getcap(struct scsi_address *, char *, int);
+static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int);
+static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t);
+static void pmcs_scsa_teardown_pkt(struct scsi_pkt *);
+static int pmcs_smp_start(struct smp_pkt *);
+static int pmcs_smp_getcap(struct sas_addr *, char *);
+static int pmcs_smp_init(dev_info_t *, dev_info_t *, sas_hba_tran_t *,
+ smp_device_t *);
+static void pmcs_smp_free(dev_info_t *, dev_info_t *, sas_hba_tran_t *,
+ smp_device_t *);
+
+static int pmcs_scsi_quiesce(dev_info_t *);
+static int pmcs_scsi_unquiesce(dev_info_t *);
+
+static int pmcs_cap(struct scsi_address *, char *, int, int, int);
+static pmcs_xscsi_t *
+ pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *);
+static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *);
+static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *);
+
+static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *);
+static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *);
+static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t);
+
+static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype,
+ pmcwork_t *, uint32_t *);
+
+
+int
+pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap)
+{
+ scsi_hba_tran_t *tran;
+ ddi_dma_attr_t pmcs_scsa_dattr;
+ int flags;
+
+ (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t));
+ pmcs_scsa_dattr.dma_attr_sgllen =
+ ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS;
+ pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING;
+ pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR;
+
+ /*
+ * Allocate a transport structure
+ */
+ tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP);
+ if (tran == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "scsi_hba_tran_alloc failed");
+ return (DDI_FAILURE);
+ }
+
+ tran->tran_hba_private = pwp;
+ tran->tran_tgt_init = pmcs_scsa_tran_tgt_init;
+ tran->tran_tgt_free = pmcs_scsa_tran_tgt_free;
+ tran->tran_start = pmcs_scsa_start;
+ tran->tran_abort = pmcs_scsa_abort;
+ tran->tran_reset = pmcs_scsa_reset;
+ tran->tran_reset_notify = pmcs_scsi_reset_notify;
+ tran->tran_getcap = pmcs_scsa_getcap;
+ tran->tran_setcap = pmcs_scsa_setcap;
+ tran->tran_setup_pkt = pmcs_scsa_setup_pkt;
+ tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt;
+ tran->tran_quiesce = pmcs_scsi_quiesce;
+ tran->tran_unquiesce = pmcs_scsi_unquiesce;
+ tran->tran_interconnect_type = INTERCONNECT_SAS;
+ tran->tran_hba_len = sizeof (pmcs_cmd_t);
+
+ /*
+ * Attach this instance of the hba
+ */
+
+ flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX |
+ SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA;
+
+ if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) {
+ scsi_hba_tran_free(tran);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "scsi_hba_attach failed");
+ return (DDI_FAILURE);
+ }
+ pwp->tran = tran;
+
+ /*
+ * Attach the SMP part of this hba
+ */
+ pwp->smp_tran = sas_hba_tran_alloc(pwp->dip);
+ ASSERT(pwp->smp_tran != NULL);
+ pwp->smp_tran->tran_hba_private = pwp;
+ pwp->smp_tran->tran_smp_start = pmcs_smp_start;
+ pwp->smp_tran->tran_sas_getcap = pmcs_smp_getcap;
+ pwp->smp_tran->tran_smp_init = pmcs_smp_init;
+ pwp->smp_tran->tran_smp_free = pmcs_smp_free;
+
+ if (sas_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "sas_hba_attach failed");
+ sas_hba_tran_free(pwp->smp_tran);
+ pwp->smp_tran = NULL;
+ scsi_hba_tran_free(tran);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * SCSA entry points
+ */
+
+static int
+pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *tran, struct scsi_device *sd)
+{
+ pmcs_hw_t *pwp = NULL;
+ int rval;
+ char *variant_prop = "sata";
+ char *tgt_port = NULL, *ua = NULL;
+ pmcs_xscsi_t *tgt = NULL;
+ pmcs_iport_t *iport;
+ pmcs_lun_t *lun = NULL;
+ pmcs_phy_t *phyp = NULL;
+ uint64_t lun_num;
+ boolean_t got_scratch = B_FALSE;
+
+ /*
+ * First, make sure we're an iport and get the pointer to the HBA
+ * node's softstate
+ */
+ if (scsi_hba_iport_unit_address(hba_dip) == NULL) {
+ pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG,
+ "%s: We don't enumerate devices on the HBA node", __func__);
+ goto tgt_init_fail;
+ }
+
+ pwp = ITRAN2PMC(tran);
+ iport = ITRAN2IPORT(tran);
+
+ /*
+ * Get the target address
+ */
+ rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port);
+ if (rval != DDI_PROP_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Couldn't get target UA");
+ pwp = NULL;
+ goto tgt_init_fail;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "got tgt_port '%s'", tgt_port);
+
+ /*
+ * Validate that this tran_tgt_init is for an active iport.
+ */
+ if (iport->ua_state == UA_INACTIVE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Got tran_tgt_init on inactive iport for '%s'",
+ __func__, tgt_port);
+ pwp = NULL;
+ goto tgt_init_fail;
+ }
+
+ /*
+ * Since we're going to wait for scratch, be sure to acquire it while
+ * we're not holding any other locks
+ */
+ (void) pmcs_acquire_scratch(pwp, B_TRUE);
+ got_scratch = B_TRUE;
+
+ mutex_enter(&pwp->lock);
+
+ /*
+ * See if there's already a target softstate. If not, allocate one.
+ */
+ tgt = pmcs_get_target(iport, tgt_port);
+
+ if (tgt == NULL) {
+ goto tgt_init_fail;
+ }
+
+ phyp = tgt->phy;
+ if (!IS_ROOT_PHY(phyp)) {
+ pmcs_inc_phy_ref_count(phyp);
+ }
+ ASSERT(mutex_owned(&phyp->phy_lock));
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "tgt = 0x%p, dip = 0x%p",
+ (void *)tgt, (void *)tgt_dip);
+
+ /*
+ * Now get the full "w<WWN>,LUN" unit-address (including LU).
+ */
+ ua = scsi_device_unit_address(sd);
+ if (ua == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Couldn't get LU unit address");
+ goto tgt_init_fail;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "got lun ua '%s'", ua);
+
+ lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL);
+ if (lun_num == SCSI_LUN64_ILLEGAL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "No LUN for tgt %p",
+ (void *)tgt);
+ goto tgt_init_fail;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: @%s tgt 0x%p phy 0x%p (%s)",
+ __func__, ua, (void *)tgt, (void *)phyp, phyp->path);
+
+ mutex_enter(&tgt->statlock);
+ tgt->dtype = phyp->dtype;
+ if (tgt->dtype != SAS && tgt->dtype != SATA) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "PHY 0x%p went away?",
+ (void *)phyp);
+ goto tgt_init_fail;
+ }
+
+ /* We don't support SATA devices at LUN > 0. */
+ if ((tgt->dtype == SATA) && (lun_num > 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: No support for SATA devices at LUN > 0 "
+ "(target = 0x%p)", __func__, (void *)tgt);
+ goto tgt_init_fail;
+ }
+
+ /*
+ * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead
+ * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to
+ * verify that the framework never tries to initialize two scsi_device
+ * structures with the same unit-address at the same time.
+ */
+ if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "Couldn't allocate LU soft state");
+ goto tgt_init_fail;
+ }
+
+ lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua);
+ if (lun == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "Couldn't get LU soft state");
+ goto tgt_init_fail;
+ }
+ scsi_device_hba_private_set(sd, lun);
+ lun->lun_num = lun_num;
+
+ /* convert the scsi_lun64_t value to SCSI standard form */
+ lun->scsi_lun = scsi_lun64_to_lun(lun_num);
+
+ ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1));
+ bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1));
+
+ lun->target = tgt;
+
+ /*
+ * If this is the first tran_tgt_init, add this target to our list
+ */
+ if (tgt->target_num == PMCS_INVALID_TARGET_NUM) {
+ int target;
+ for (target = 0; target < pwp->max_dev; target++) {
+ if (pwp->targets[target] != NULL) {
+ continue;
+ }
+
+ pwp->targets[target] = tgt;
+ tgt->target_num = (uint16_t)target;
+ break;
+ }
+
+ if (target == pwp->max_dev) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Target list full.");
+ goto tgt_init_fail;
+ }
+ }
+
+ tgt->dip = sd->sd_dev;
+
+ if (!pmcs_assign_device(pwp, tgt)) {
+ pmcs_release_scratch(pwp);
+ pwp->targets[tgt->target_num] = NULL;
+ tgt->target_num = PMCS_INVALID_TARGET_NUM;
+ tgt->phy = NULL;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: pmcs_assign_device failed for target 0x%p",
+ __func__, (void *)tgt);
+ goto tgt_init_fail;
+ }
+
+ pmcs_release_scratch(pwp);
+ tgt->ref_count++;
+
+ (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num));
+
+ /* SM-HBA */
+ if (tgt->dtype == SATA) {
+ /* TCR in PSARC/1997/281 opinion */
+ (void) scsi_device_prop_update_string(sd,
+ SCSI_DEVICE_PROP_PATH, "variant", variant_prop);
+ }
+
+ tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp);
+
+ if (tgt->phy_addressable) {
+ (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_SATA_PHY, phyp->phynum);
+ }
+
+ /* SM-HBA */
+ (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd);
+
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(phyp);
+ mutex_exit(&pwp->lock);
+ scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
+ return (DDI_SUCCESS);
+
+tgt_init_fail:
+ if (got_scratch) {
+ pmcs_release_scratch(pwp);
+ }
+ if (lun) {
+ ddi_soft_state_bystr_free(tgt->lun_sstate, ua);
+ }
+ if (phyp) {
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(phyp);
+ /*
+ * phyp's ref count was incremented in pmcs_new_tport.
+ * We're failing configuration, we now need to decrement it.
+ */
+ if (!IS_ROOT_PHY(phyp)) {
+ pmcs_dec_phy_ref_count(phyp);
+ }
+ phyp->target = NULL;
+ }
+ if (tgt && tgt->ref_count == 0) {
+ ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port);
+ }
+ if (pwp) {
+ mutex_exit(&pwp->lock);
+ }
+ if (tgt_port) {
+ scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
+ }
+ return (DDI_FAILURE);
+}
+
+static void
+pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *tran, struct scsi_device *sd)
+{
+ _NOTE(ARGUNUSED(hba_dip, tgt_dip));
+ pmcs_hw_t *pwp;
+ pmcs_lun_t *lun;
+ pmcs_xscsi_t *target;
+ char *unit_address;
+ pmcs_phy_t *phyp;
+
+ if (scsi_hba_iport_unit_address(hba_dip) == NULL) {
+ pwp = TRAN2PMC(tran);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: We don't enumerate devices on the HBA node", __func__);
+ return;
+ }
+
+ lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd);
+
+ ASSERT((lun != NULL) && (lun->target != NULL));
+ ASSERT(lun->target->ref_count > 0);
+
+ target = lun->target;
+
+ unit_address = lun->unit_address;
+ ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address);
+
+ pwp = ITRAN2PMC(tran);
+ mutex_enter(&pwp->lock);
+ mutex_enter(&target->statlock);
+ ASSERT(target->phy);
+ phyp = target->phy;
+
+ /*
+ * If this target still has a PHY pointer and that PHY's target pointer
+ * has been cleared, then that PHY has been reaped. In that case, there
+ * would be no need to decrement the reference count
+ */
+ if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) {
+ pmcs_dec_phy_ref_count(phyp);
+ }
+
+ if (--target->ref_count == 0) {
+ /*
+ * Remove this target from our list. The target soft
+ * state will remain, and the device will remain registered
+ * with the hardware unless/until we're told the device
+ * physically went away.
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target,
+ target->target_num);
+ pwp->targets[target->target_num] = NULL;
+ target->target_num = PMCS_INVALID_TARGET_NUM;
+ /*
+ * If the target still has a PHY pointer, break the linkage
+ */
+ if (phyp) {
+ phyp->target = NULL;
+ }
+ target->phy = NULL;
+ pmcs_destroy_target(target);
+ } else {
+ mutex_exit(&target->statlock);
+ }
+
+ mutex_exit(&pwp->lock);
+}
+
+static int
+pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ pmcs_cmd_t *sp = PKT2CMD(pkt);
+ pmcs_hw_t *pwp = ADDR2PMC(ap);
+ pmcs_xscsi_t *xp;
+ boolean_t blocked;
+ uint32_t hba_state;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: pkt %p sd %p cdb0=0x%02x dl=%lu",
+ __func__, (void *)pkt,
+ (void *)scsi_address_device(&pkt->pkt_address),
+ pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len);
+
+ if (pkt->pkt_flags & FLAG_NOINTR) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: nointr pkt", __func__);
+ return (TRAN_BADPKT);
+ }
+
+ sp->cmd_tag = 0;
+ pkt->pkt_state = pkt->pkt_statistics = 0;
+ pkt->pkt_reason = CMD_INCOMPLETE;
+
+ mutex_enter(&pwp->lock);
+ hba_state = pwp->state;
+ blocked = pwp->blocked;
+ mutex_exit(&pwp->lock);
+
+ if (hba_state != STATE_RUNNING) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
+ return (TRAN_FATAL_ERROR);
+ }
+
+ xp = pmcs_addr2xp(ap, NULL, sp);
+ if (xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: dropping due to null target", __func__);
+ goto dead_duck;
+ }
+ ASSERT(mutex_owned(&xp->statlock));
+
+ /*
+ * First, check to see if we're dying or unassigned.
+ */
+ if (xp->dying || xp->dev_gone) {
+ mutex_exit(&xp->statlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3,
+ "%s: dropping due to dying/dead target 0x%p",
+ __func__, (void *)xp);
+ goto dead_duck;
+ }
+
+ /*
+ * If we're blocked (quiesced) just return.
+ */
+ if (blocked) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba blocked", __func__);
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ return (TRAN_ACCEPT);
+ }
+
+ /*
+ * If we're draining or resetting, queue and return.
+ */
+ if (xp->draining || xp->resetting || xp->recover_wait) {
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: draining/resetting/recovering (cnt %u)",
+ __func__, xp->actv_cnt);
+ /*
+ * By the time we get here, draining or
+ * resetting may have come and gone, not
+ * yet noticing that we had put something
+ * on the wait queue, so schedule a worker
+ * to look at this later.
+ */
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ return (TRAN_ACCEPT);
+ }
+ mutex_exit(&xp->statlock);
+
+ /*
+ * Queue this command to the tail of the wait queue.
+ * This keeps us getting commands out of order.
+ */
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+
+ /*
+ * Now run the queue for this device.
+ */
+ (void) pmcs_scsa_wq_run_one(pwp, xp);
+
+ return (TRAN_ACCEPT);
+
+dead_duck:
+ pkt->pkt_state = STATE_GOT_BUS;
+ pkt->pkt_reason = CMD_DEV_GONE;
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ return (TRAN_ACCEPT);
+}
+
+static int
+pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ pmcs_hw_t *pwp = ADDR2PMC(ap);
+ pmcs_cmd_t *sp = PKT2CMD(pkt);
+ pmcs_xscsi_t *xp = sp->cmd_target;
+ pmcs_phy_t *pptr;
+ uint32_t tag;
+ uint64_t lun;
+ pmcwork_t *pwrk;
+
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
+ return (0);
+ }
+ mutex_exit(&pwp->lock);
+
+ if (sp->cmd_lun) {
+ lun = sp->cmd_lun->lun_num;
+ } else {
+ lun = 0;
+ }
+ if (xp == NULL) {
+ return (0);
+ }
+
+ /*
+ * See if we have a real work structure associated with this cmd.
+ */
+ pwrk = pmcs_tag2wp(pwp, sp->cmd_tag);
+ if (pwrk && pwrk->arg == sp) {
+ tag = pwrk->htag;
+ pptr = pwrk->phy;
+ pwrk->timer = 0; /* we don't time this here */
+ ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP);
+ mutex_exit(&pwrk->lock);
+ pmcs_lock_phy(pptr);
+ if (pptr->dtype == SAS) {
+ if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun,
+ NULL)) {
+ pptr->abort_pending = 1;
+ pmcs_unlock_phy(pptr);
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ return (0);
+ }
+ } else {
+ /*
+ * XXX: Was the command that was active an
+ * NCQ I/O command?
+ */
+ pptr->need_rl_ext = 1;
+ if (pmcs_sata_abort_ncq(pwp, pptr)) {
+ pptr->abort_pending = 1;
+ pmcs_unlock_phy(pptr);
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ return (0);
+ }
+ }
+ pptr->abort_pending = 1;
+ pmcs_unlock_phy(pptr);
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ return (1);
+ }
+ if (pwrk) {
+ mutex_exit(&pwrk->lock);
+ }
+ /*
+ * Okay, those weren't the droids we were looking for.
+ * See if the command is on any of the wait queues.
+ */
+ mutex_enter(&xp->wqlock);
+ sp = NULL;
+ STAILQ_FOREACH(sp, &xp->wq, cmd_next) {
+ if (sp == PKT2CMD(pkt)) {
+ STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next);
+ break;
+ }
+ }
+ mutex_exit(&xp->wqlock);
+ if (sp) {
+ pkt->pkt_reason = CMD_ABORTED;
+ pkt->pkt_statistics |= STAT_ABORTED;
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * SCSA reset functions
+ */
+static int
+pmcs_scsa_reset(struct scsi_address *ap, int level)
+{
+ pmcs_hw_t *pwp = ADDR2PMC(ap);
+ pmcs_phy_t *pptr;
+ pmcs_xscsi_t *xp;
+ uint64_t lun = (uint64_t)-1, *lp = NULL;
+ int rval;
+
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
+ return (0);
+ }
+ mutex_exit(&pwp->lock);
+
+ switch (level) {
+ case RESET_ALL:
+ rval = 0;
+ break;
+ case RESET_LUN:
+ /*
+ * Point lp at lun so that pmcs_addr2xp
+ * will fill out the 64 bit lun number.
+ */
+ lp = &lun;
+ /* FALLTHROUGH */
+ case RESET_TARGET:
+ xp = pmcs_addr2xp(ap, lp, NULL);
+ if (xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: no xp found for this scsi address", __func__);
+ return (0);
+ }
+
+ if (xp->dying || xp->dev_gone) {
+ mutex_exit(&xp->statlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Target 0x%p has gone away", __func__,
+ (void *)xp);
+ return (0);
+ }
+
+ /*
+ * If we're already performing this action, or if device
+ * state recovery is already running, just return failure.
+ */
+ if (xp->resetting || xp->recover_wait) {
+ mutex_exit(&xp->statlock);
+ return (0);
+ }
+ xp->reset_wait = 0;
+ xp->reset_success = 0;
+ xp->resetting = 1;
+ pptr = xp->phy;
+ mutex_exit(&xp->statlock);
+
+ if (pmcs_reset_dev(pwp, pptr, lun)) {
+ rval = 0;
+ } else {
+ rval = 1;
+ }
+
+ mutex_enter(&xp->statlock);
+ if (rval == 1) {
+ xp->reset_success = 1;
+ }
+ if (xp->reset_wait) {
+ xp->reset_wait = 0;
+ cv_signal(&xp->reset_cv);
+ }
+ xp->resetting = 0;
+ mutex_exit(&xp->statlock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return (rval);
+}
+
+static int
+pmcs_scsi_reset_notify(struct scsi_address *ap, int flag,
+ void (*callback)(caddr_t), caddr_t arg)
+{
+ pmcs_hw_t *pwp = ADDR2PMC(ap);
+ return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
+ &pwp->lock, &pwp->reset_notify_listf));
+}
+
+
+static int
+pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set)
+{
+ _NOTE(ARGUNUSED(val, tonly));
+ int cidx, rval = 0;
+ pmcs_xscsi_t *xp;
+
+ cidx = scsi_hba_lookup_capstr(cap);
+ if (cidx == -1) {
+ return (-1);
+ }
+
+ xp = pmcs_addr2xp(ap, NULL, NULL);
+ if (xp == NULL) {
+ return (-1);
+ }
+
+ switch (cidx) {
+ case SCSI_CAP_DMA_MAX:
+ case SCSI_CAP_INITIATOR_ID:
+ if (set == 0) {
+ rval = INT_MAX; /* argh */
+ }
+ break;
+ case SCSI_CAP_DISCONNECT:
+ case SCSI_CAP_SYNCHRONOUS:
+ case SCSI_CAP_WIDE_XFER:
+ case SCSI_CAP_PARITY:
+ case SCSI_CAP_ARQ:
+ case SCSI_CAP_UNTAGGED_QING:
+ if (set == 0) {
+ rval = 1;
+ }
+ break;
+
+ case SCSI_CAP_TAGGED_QING:
+ rval = 1;
+ break;
+
+ case SCSI_CAP_MSG_OUT:
+ case SCSI_CAP_RESET_NOTIFICATION:
+ case SCSI_CAP_QFULL_RETRIES:
+ case SCSI_CAP_QFULL_RETRY_INTERVAL:
+ break;
+ case SCSI_CAP_SCSI_VERSION:
+ if (set == 0) {
+ rval = SCSI_VERSION_3;
+ }
+ break;
+ case SCSI_CAP_INTERCONNECT_TYPE:
+ if (set) {
+ break;
+ }
+ if (xp->phy_addressable) {
+ rval = INTERCONNECT_SATA;
+ } else {
+ rval = INTERCONNECT_SAS;
+ }
+ break;
+ case SCSI_CAP_CDB_LEN:
+ if (set == 0) {
+ rval = 16;
+ }
+ break;
+ case SCSI_CAP_LUN_RESET:
+ if (set) {
+ break;
+ }
+ if (xp->dtype == SATA) {
+ rval = 0;
+ } else {
+ rval = 1;
+ }
+ break;
+ default:
+ rval = -1;
+ break;
+ }
+ mutex_exit(&xp->statlock);
+ pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3,
+ "%s: cap %s val %d set %d rval %d",
+ __func__, cap, val, set, rval);
+ return (rval);
+}
+
+/*
+ * Returns with statlock held if the xp is found.
+ * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL.
+ */
+static pmcs_xscsi_t *
+pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp)
+{
+ pmcs_xscsi_t *xp;
+ pmcs_lun_t *lun = (pmcs_lun_t *)
+ scsi_device_hba_private_get(scsi_address_device(ap));
+
+ if ((lun == NULL) || (lun->target == NULL)) {
+ return (NULL);
+ }
+ xp = lun->target;
+ mutex_enter(&xp->statlock);
+
+ if (xp->dying || xp->dev_gone || (xp->phy == NULL)) {
+ mutex_exit(&xp->statlock);
+ return (NULL);
+ }
+
+ if (sp != NULL) {
+ sp->cmd_target = xp;
+ sp->cmd_lun = lun;
+ }
+ if (lp) {
+ *lp = lun->lun_num;
+ }
+ return (xp);
+}
+
+static int
+pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom)
+{
+ int r;
+ if (cap == NULL) {
+ return (-1);
+ }
+ r = pmcs_cap(ap, cap, 0, whom, 0);
+ return (r);
+}
+
+static int
+pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom)
+{
+ int r;
+ if (cap == NULL) {
+ return (-1);
+ }
+ r = pmcs_cap(ap, cap, value, whom, 1);
+ return (r);
+}
+
+static int
+pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
+ caddr_t cbarg)
+{
+ _NOTE(ARGUNUSED(callback, cbarg));
+ pmcs_cmd_t *sp = pkt->pkt_ha_private;
+
+ bzero(sp, sizeof (pmcs_cmd_t));
+ sp->cmd_pkt = pkt;
+ return (0);
+}
+
+static void
+pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt)
+{
+ pmcs_cmd_t *sp = pkt->pkt_ha_private;
+ sp->cmd_target = NULL;
+ sp->cmd_lun = NULL;
+}
+
+static int
+pmcs_smp_getcap(struct sas_addr *ap, char *cap)
+{
+ _NOTE(ARGUNUSED(ap));
+ int ckey = -1;
+ int ret = EINVAL;
+
+ ckey = sas_hba_lookup_capstr(cap);
+ if (ckey == -1)
+ return (EINVAL);
+
+ switch (ckey) {
+ case SAS_CAP_SMP_CRC:
+ ret = 0;
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+ return (ret);
+}
+
+static int
+pmcs_smp_start(struct smp_pkt *pktp)
+{
+ struct pmcwork *pwrk;
+ const uint_t rdoff = SAS_SMP_MAX_PAYLOAD;
+ uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status;
+ uint64_t wwn;
+ pmcs_hw_t *pwp = pktp->pkt_address->a_hba_tran->tran_hba_private;
+ pmcs_phy_t *pptr;
+ pmcs_xscsi_t *xp;
+ uint_t reqsz, rspsz, will_retry;
+ int result;
+
+ bcopy(pktp->pkt_address->a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: starting for wwn 0x%" PRIx64,
+ __func__, wwn);
+
+ will_retry = pktp->pkt_will_retry;
+
+ (void) pmcs_acquire_scratch(pwp, B_TRUE);
+ reqsz = pktp->pkt_reqsize;
+ if (reqsz > SAS_SMP_MAX_PAYLOAD) {
+ reqsz = SAS_SMP_MAX_PAYLOAD;
+ }
+ (void) memcpy(pwp->scratch, pktp->pkt_req, reqsz);
+
+ rspsz = pktp->pkt_rspsize;
+ if (rspsz > SAS_SMP_MAX_PAYLOAD) {
+ rspsz = SAS_SMP_MAX_PAYLOAD;
+ }
+
+ /*
+ * The request size from the SMP driver always includes 4 bytes
+ * for the CRC. The PMCS chip, however, doesn't want to see those
+ * counts as part of the transfer size.
+ */
+ reqsz -= 4;
+
+ pptr = pmcs_find_phy_by_wwn(pwp, wwn);
+ /* PHY is now locked */
+ if (pptr == NULL || pptr->dtype != EXPANDER) {
+ if (pptr) {
+ pmcs_unlock_phy(pptr);
+ }
+ pmcs_release_scratch(pwp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: could not find phy",
+ __func__);
+ pktp->pkt_reason = ENXIO;
+ return (DDI_FAILURE);
+ }
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ pmcs_unlock_phy(pptr);
+ pmcs_release_scratch(pwp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: could not get work structure", __func__);
+ pktp->pkt_reason = will_retry ? EAGAIN :EBUSY;
+ return (DDI_FAILURE);
+ }
+
+ pwrk->arg = msg;
+ pwrk->dtype = EXPANDER;
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ pmcs_pwork(pwp, pwrk);
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_unlock_phy(pptr);
+ pmcs_release_scratch(pwp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: could not get IQ entry",
+ __func__);
+ pktp->pkt_reason = will_retry ? EAGAIN :EBUSY;
+ return (DDI_FAILURE);
+ }
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST);
+ msg[8] = LE_32(DWORD0(pwp->scratch_dma));
+ msg[9] = LE_32(DWORD1(pwp->scratch_dma));
+ msg[10] = LE_32(reqsz);
+ msg[11] = 0;
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
+ msg[14] = LE_32(rspsz);
+ msg[15] = 0;
+
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ htag = pwrk->htag;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, pktp->pkt_timeout * 1000, result);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_lock_phy(pptr);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Unable to issue SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ }
+ pmcs_unlock_phy(pptr);
+ pmcs_release_scratch(pwp);
+ pktp->pkt_reason = ETIMEDOUT;
+ return (DDI_FAILURE);
+ }
+ status = LE_32(msg[2]);
+ if (status == PMCOUT_STATUS_OVERFLOW) {
+ status = PMCOUT_STATUS_OK;
+ pktp->pkt_reason = EOVERFLOW;
+ }
+ if (status != PMCOUT_STATUS_OK) {
+ const char *emsg = pmcs_status_str(status);
+ if (emsg == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "SMP operation failed (0x%x)", status);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "SMP operation failed (%s)", emsg);
+ }
+
+ if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) ||
+ (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) {
+ pktp->pkt_reason = will_retry ? EAGAIN : ETIMEDOUT;
+ result = DDI_FAILURE;
+ } else if (status ==
+ PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) {
+ xp = pptr->target;
+ if (xp == NULL) {
+ pktp->pkt_reason = EIO;
+ result = DDI_FAILURE;
+ goto out;
+ }
+ if (xp->dev_state !=
+ PMCS_DEVICE_STATE_NON_OPERATIONAL) {
+ xp->dev_state =
+ PMCS_DEVICE_STATE_NON_OPERATIONAL;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Got _IT_NEXUS_LOSS SMP status. "
+ "Tgt(0x%p) dev_state set to "
+ "_NON_OPERATIONAL", __func__,
+ (void *)xp);
+ }
+ /* ABORT any pending commands related to this device */
+ if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) {
+ pptr->abort_pending = 1;
+ pktp->pkt_reason = EIO;
+ result = DDI_FAILURE;
+ }
+ } else {
+ pktp->pkt_reason = will_retry ? EAGAIN : EIO;
+ result = DDI_FAILURE;
+ }
+ } else {
+ (void) memcpy(pktp->pkt_rsp,
+ &((uint8_t *)pwp->scratch)[rdoff], rspsz);
+ if (pktp->pkt_reason == EOVERFLOW) {
+ result = DDI_FAILURE;
+ } else {
+ result = DDI_SUCCESS;
+ }
+ }
+out:
+ pmcs_unlock_phy(pptr);
+ pmcs_release_scratch(pwp);
+ return (result);
+}
+
+static int
+pmcs_smp_init(dev_info_t *self, dev_info_t *child,
+ sas_hba_tran_t *tran, smp_device_t *smp)
+{
+ _NOTE(ARGUNUSED(tran, smp));
+ pmcs_iport_t *iport;
+ pmcs_hw_t *pwp;
+ pmcs_xscsi_t *tgt;
+ pmcs_phy_t *phy, *pphy;
+ uint64_t wwn;
+ char *addr, *tgt_port;
+ int ua_form = 1;
+
+ iport = ddi_get_soft_state(pmcs_iport_softstate,
+ ddi_get_instance(self));
+ ASSERT(iport);
+ if (iport == NULL)
+ return (DDI_FAILURE);
+ pwp = iport->pwp;
+ ASSERT(pwp);
+ if (pwp == NULL)
+ return (DDI_FAILURE);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__,
+ ddi_get_name(child));
+
+ /* Get "target-port" prop from devinfo node */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to lookup prop ("
+ SCSI_ADDR_PROP_TARGET_PORT")", __func__);
+ /* Dont fail _smp_init() because we couldnt get/set a prop */
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * Validate that this tran_tgt_init is for an active iport.
+ */
+ if (iport->ua_state == UA_INACTIVE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Init on inactive iport for '%s'",
+ __func__, tgt_port);
+ ddi_prop_free(tgt_port);
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&pwp->lock);
+
+ /* Retrieve softstate using unit-address */
+ tgt = pmcs_get_target(iport, tgt_port);
+ if (tgt == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: tgt softstate not found",
+ __func__);
+ ddi_prop_free(tgt_port);
+ mutex_exit(&pwp->lock);
+ return (DDI_FAILURE);
+ }
+
+ phy = tgt->phy;
+ ASSERT(mutex_owned(&phy->phy_lock));
+
+ if (IS_ROOT_PHY(phy)) {
+ /* Expander attached to HBA - don't ref_count it */
+ wwn = pwp->sas_wwns[0];
+ } else {
+ pmcs_inc_phy_ref_count(phy);
+
+ /*
+ * Parent (in topology) is also an expander
+ * Now that we've increased the ref count on phy, it's OK
+ * to drop the lock so we can acquire the parent's lock.
+ */
+
+ pphy = phy->parent;
+ pmcs_unlock_phy(phy);
+ pmcs_lock_phy(pphy);
+ wwn = pmcs_barray2wwn(pphy->sas_address);
+ pmcs_unlock_phy(pphy);
+ pmcs_lock_phy(phy);
+ }
+
+ /*
+ * If this is the 1st smp_init, add this to our list.
+ */
+ if (tgt->target_num == PMCS_INVALID_TARGET_NUM) {
+ int target;
+ for (target = 0; target < pwp->max_dev; target++) {
+ if (pwp->targets[target] != NULL) {
+ continue;
+ }
+
+ pwp->targets[target] = tgt;
+ tgt->target_num = (uint16_t)target;
+ tgt->assigned = 1;
+ tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL;
+ break;
+ }
+
+ if (target == pwp->max_dev) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Target list full.");
+ goto smp_init_fail;
+ }
+ }
+
+ if (!pmcs_assign_device(pwp, tgt)) {
+ pwp->targets[tgt->target_num] = NULL;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: pmcs_assign_device failed for target 0x%p",
+ __func__, (void *)tgt);
+ goto smp_init_fail;
+ }
+
+ pmcs_unlock_phy(phy);
+ mutex_exit(&pwp->lock);
+
+ tgt->ref_count++;
+ tgt->dtype = phy->dtype;
+
+ addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL);
+ /* XXX: Update smp devinfo node using ndi_xxx */
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, child,
+ SCSI_ADDR_PROP_ATTACHED_PORT, addr) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to set prop ("
+ SCSI_ADDR_PROP_ATTACHED_PORT")", __func__);
+ }
+ (void) scsi_free_wwnstr(addr);
+ ddi_prop_free(tgt_port);
+ return (DDI_SUCCESS);
+
+smp_init_fail:
+ tgt->phy = NULL;
+ tgt->target_num = PMCS_INVALID_TARGET_NUM;
+ phy->target = NULL;
+ if (!IS_ROOT_PHY(phy)) {
+ pmcs_dec_phy_ref_count(phy);
+ }
+ pmcs_unlock_phy(phy);
+ mutex_exit(&pwp->lock);
+ ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address);
+ ddi_prop_free(tgt_port);
+ return (DDI_FAILURE);
+}
+
+static void
+pmcs_smp_free(dev_info_t *self, dev_info_t *child,
+ sas_hba_tran_t *tran, smp_device_t *smp)
+{
+ _NOTE(ARGUNUSED(tran, smp));
+ pmcs_iport_t *iport;
+ pmcs_hw_t *pwp;
+ pmcs_xscsi_t *tgt;
+ char *tgt_port;
+
+ iport = ddi_get_soft_state(pmcs_iport_softstate,
+ ddi_get_instance(self));
+ ASSERT(iport);
+ if (iport == NULL)
+ return;
+
+ pwp = iport->pwp;
+ if (pwp == NULL)
+ return;
+ ASSERT(pwp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__,
+ ddi_get_name(child));
+
+ /* Get "target-port" prop from devinfo node */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to lookup prop ("
+ SCSI_ADDR_PROP_TARGET_PORT")", __func__);
+ return;
+ }
+ /* Retrieve softstate using unit-address */
+ tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port);
+ ddi_prop_free(tgt_port);
+
+ if (tgt == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: tgt softstate not found",
+ __func__);
+ return;
+ }
+
+ mutex_enter(&pwp->lock);
+ mutex_enter(&tgt->statlock);
+ if (tgt->phy) {
+ if (!IS_ROOT_PHY(tgt->phy)) {
+ pmcs_dec_phy_ref_count(tgt->phy);
+ }
+ }
+
+ if (--tgt->ref_count == 0) {
+ /*
+ * Remove this target from our list. The softstate
+ * will remain, and the device will remain registered
+ * with the hardware unless/until we're told that the
+ * device physically went away.
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "Removing target 0x%p (vtgt %d) from target list",
+ (void *)tgt, tgt->target_num);
+ pwp->targets[tgt->target_num] = NULL;
+ tgt->target_num = PMCS_INVALID_TARGET_NUM;
+ tgt->phy->target = NULL;
+ tgt->phy = NULL;
+ }
+
+ mutex_exit(&tgt->statlock);
+ mutex_exit(&pwp->lock);
+}
+
+static int
+pmcs_scsi_quiesce(dev_info_t *dip)
+{
+ pmcs_hw_t *pwp;
+ int totactive = -1;
+ pmcs_xscsi_t *xp;
+ uint16_t target;
+
+ if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip)))
+ return (0); /* iport */
+
+ pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip));
+ if (pwp == NULL) {
+ return (-1);
+ }
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ return (-1);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s called", __func__);
+ pwp->blocked = 1;
+ while (totactive) {
+ totactive = 0;
+ for (target = 0; target < pwp->max_dev; target++) {
+ xp = pwp->targets[target];
+ if (xp == NULL) {
+ continue;
+ }
+ mutex_enter(&xp->statlock);
+ if (xp->actv_cnt) {
+ totactive += xp->actv_cnt;
+ xp->draining = 1;
+ }
+ mutex_exit(&xp->statlock);
+ }
+ if (totactive) {
+ cv_wait(&pwp->drain_cv, &pwp->lock);
+ }
+ /*
+ * The pwp->blocked may have been reset. e.g a SCSI bus reset
+ */
+ pwp->blocked = 1;
+ }
+
+ for (target = 0; target < pwp->max_dev; target++) {
+ xp = pwp->targets[target];
+ if (xp == NULL) {
+ continue;
+ }
+ mutex_enter(&xp->statlock);
+ xp->draining = 0;
+ mutex_exit(&xp->statlock);
+ }
+
+ mutex_exit(&pwp->lock);
+ if (totactive == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s drain complete", __func__);
+ }
+ return (0);
+}
+
+static int
+pmcs_scsi_unquiesce(dev_info_t *dip)
+{
+ pmcs_hw_t *pwp;
+
+ if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip)))
+ return (0); /* iport */
+
+ pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip));
+ if (pwp == NULL) {
+ return (-1);
+ }
+ mutex_enter(&pwp->lock);
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ return (-1);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s called", __func__);
+ pwp->blocked = 0;
+ mutex_exit(&pwp->lock);
+
+ /*
+ * Run all pending commands.
+ */
+ pmcs_scsa_wq_run(pwp);
+
+ /*
+ * Complete all completed commands.
+ * This also unlocks us.
+ */
+ PMCS_CQ_RUN(pwp);
+ return (0);
+}
+
+/*
+ * Start commands for a particular device
+ * If the actual start of a command fails, return B_FALSE. Any other result
+ * is a B_TRUE return.
+ */
+boolean_t
+pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
+{
+ pmcs_cmd_t *sp;
+ pmcs_phy_t *phyp;
+ pmcwork_t *pwrk;
+ boolean_t run_one, blocked;
+ int rval;
+
+ /*
+ * First, check to see if we're blocked or resource limited
+ */
+ mutex_enter(&pwp->lock);
+ blocked = pwp->blocked;
+ /*
+ * If resource_limited is set, we're resource constrained and
+ * we will run only one work request for this target.
+ */
+ run_one = pwp->resource_limited;
+ mutex_exit(&pwp->lock);
+
+ if (blocked) {
+ /* Queues will get restarted when we get unblocked */
+ return (B_TRUE);
+ }
+
+ /*
+ * Might as well verify the queue is not empty before moving on
+ */
+ mutex_enter(&xp->wqlock);
+ if (STAILQ_EMPTY(&xp->wq)) {
+ mutex_exit(&xp->wqlock);
+ return (B_TRUE);
+ }
+ mutex_exit(&xp->wqlock);
+
+ /*
+ * If we're draining or resetting, just reschedule work queue and bail.
+ */
+ mutex_enter(&xp->statlock);
+ if (xp->draining || xp->resetting || xp->special_running ||
+ xp->special_needed) {
+ mutex_exit(&xp->statlock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ return (B_TRUE);
+ }
+
+ /*
+ * Next, check to see if the target is alive
+ */
+ if (xp->dying || xp->dev_gone) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Flushing wait queue for dying/dead tgt 0x%p", __func__,
+ (void *)xp);
+ pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE);
+ mutex_exit(&xp->statlock);
+ return (B_TRUE);
+ }
+
+ /*
+ * Increment the PHY's ref_count now so we know it won't go away
+ * after we drop the target lock. Drop it before returning. If the
+ * PHY dies, the commands we attempt to send will fail, but at least
+ * we know we have a real PHY pointer.
+ */
+ phyp = xp->phy;
+ pmcs_inc_phy_ref_count(phyp);
+ mutex_exit(&xp->statlock);
+
+ mutex_enter(&xp->wqlock);
+ while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) {
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: out of work structures", __func__);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ break;
+ }
+ STAILQ_REMOVE_HEAD(&xp->wq, cmd_next);
+ mutex_exit(&xp->wqlock);
+
+ pwrk->xp = xp;
+ pwrk->arg = sp;
+ sp->cmd_tag = pwrk->htag;
+ pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000);
+ if (pwrk->timer == 0) {
+ pwrk->timer = US2WT(1000000);
+ }
+
+ pwrk->dtype = xp->dtype;
+
+ if (xp->dtype == SAS) {
+ pwrk->ptr = (void *) pmcs_SAS_done;
+ if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) {
+ sp->cmd_tag = NULL;
+ pmcs_dec_phy_ref_count(phyp);
+ pmcs_pwork(pwp, pwrk);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ if (rval == PMCS_WQ_RUN_FAIL_RES) {
+ return (B_FALSE);
+ } else {
+ return (B_TRUE);
+ }
+ }
+ } else {
+ ASSERT(xp->dtype == SATA);
+ pwrk->ptr = (void *) pmcs_SATA_done;
+ if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) {
+ sp->cmd_tag = NULL;
+ pmcs_dec_phy_ref_count(phyp);
+ pmcs_pwork(pwp, pwrk);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ if (rval == PMCS_WQ_RUN_FAIL_RES) {
+ return (B_FALSE);
+ } else {
+ return (B_TRUE);
+ }
+ }
+ }
+
+ if (run_one) {
+ goto wq_out;
+ }
+ mutex_enter(&xp->wqlock);
+ }
+
+ mutex_exit(&xp->wqlock);
+
+wq_out:
+ pmcs_dec_phy_ref_count(phyp);
+ return (B_TRUE);
+}
+
+/*
+ * Start commands for all devices.
+ */
+void
+pmcs_scsa_wq_run(pmcs_hw_t *pwp)
+{
+ pmcs_xscsi_t *xp;
+ uint16_t target_start, target;
+ boolean_t rval = B_TRUE;
+
+ mutex_enter(&pwp->lock);
+ target_start = pwp->last_wq_dev;
+ target = target_start;
+
+ do {
+ xp = pwp->targets[target];
+ if (xp == NULL) {
+ if (++target == pwp->max_dev) {
+ target = 0;
+ }
+ continue;
+ }
+
+ mutex_exit(&pwp->lock);
+ rval = pmcs_scsa_wq_run_one(pwp, xp);
+ if (rval == B_FALSE) {
+ mutex_enter(&pwp->lock);
+ break;
+ }
+ mutex_enter(&pwp->lock);
+ if (++target == pwp->max_dev) {
+ target = 0;
+ }
+ } while (target != target_start);
+
+ if (rval) {
+ pwp->resource_limited = 0; /* Not resource-constrained */
+ } else {
+ pwp->resource_limited = 1; /* Give others a chance */
+ }
+
+ pwp->last_wq_dev = target;
+ mutex_exit(&pwp->lock);
+}
+
+/*
+ * Pull the completion queue, drop the lock and complete all elements.
+ */
+
+void
+pmcs_scsa_cq_run(void *arg)
+{
+ pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg;
+ pmcs_hw_t *pwp = cqti->cq_pwp;
+ pmcs_cmd_t *sp, *nxt;
+ struct scsi_pkt *pkt;
+ pmcs_iocomp_cb_t *ioccb, *ioccb_next;
+ pmcs_cb_t callback;
+ uint32_t niodone;
+
+ DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti);
+
+ mutex_enter(&pwp->cq_lock);
+
+ while (!pwp->cq_info.cq_stop) {
+ /*
+ * First, check the I/O completion callback queue.
+ */
+
+ ioccb = pwp->iocomp_cb_head;
+ pwp->iocomp_cb_head = NULL;
+ pwp->iocomp_cb_tail = NULL;
+ mutex_exit(&pwp->cq_lock);
+
+ niodone = 0;
+
+ while (ioccb) {
+ niodone++;
+ /*
+ * Grab the lock on the work structure. The callback
+ * routine is responsible for clearing it.
+ */
+ mutex_enter(&ioccb->pwrk->lock);
+ ioccb_next = ioccb->next;
+ callback = (pmcs_cb_t)ioccb->pwrk->ptr;
+ (*callback)(pwp, ioccb->pwrk,
+ (uint32_t *)((void *)ioccb->iomb));
+ kmem_cache_free(pwp->iocomp_cb_cache, ioccb);
+ ioccb = ioccb_next;
+ }
+
+ /*
+ * Next, run the completion queue
+ */
+
+ mutex_enter(&pwp->cq_lock);
+ sp = STAILQ_FIRST(&pwp->cq);
+ STAILQ_INIT(&pwp->cq);
+ mutex_exit(&pwp->cq_lock);
+
+ DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop,
+ pmcs_cq_thr_info_t *, cqti);
+
+ if (sp && pmcs_check_acc_dma_handle(pwp)) {
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED);
+ }
+
+ while (sp) {
+ nxt = STAILQ_NEXT(sp, cmd_next);
+ pkt = CMD2PKT(sp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3,
+ "%s: calling completion on %p for tgt %p", __func__,
+ (void *)sp, (void *)sp->cmd_target);
+ scsi_hba_pkt_comp(pkt);
+ sp = nxt;
+ }
+
+ DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop,
+ pmcs_cq_thr_info_t *, cqti);
+
+ mutex_enter(&cqti->cq_thr_lock);
+ cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock);
+ mutex_exit(&cqti->cq_thr_lock);
+
+ mutex_enter(&pwp->cq_lock);
+ }
+
+ mutex_exit(&pwp->cq_lock);
+ DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti);
+ thread_exit();
+}
+
+/*
+ * Run a SAS command. Called with pwrk->lock held, returns unlocked.
+ */
+static int
+pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk)
+{
+ pmcs_hw_t *pwp = CMD2PMC(sp);
+ struct scsi_pkt *pkt = CMD2PKT(sp);
+ pmcs_xscsi_t *xp = pwrk->xp;
+ uint32_t iq, *ptr;
+ sas_ssp_cmd_iu_t sc;
+
+ mutex_enter(&xp->statlock);
+ if (xp->dying || !xp->assigned) {
+ mutex_exit(&xp->statlock);
+ return (PMCS_WQ_RUN_FAIL_OTHER);
+ }
+ if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) {
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ return (PMCS_WQ_RUN_FAIL_OTHER);
+ }
+ GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq);
+ if (ptr == NULL) {
+ mutex_exit(&xp->statlock);
+ /*
+ * This is a temporary failure not likely to unblocked by
+ * commands completing as the test for scheduling the
+ * restart of work is a per-device test.
+ */
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Failed to get IO IQ entry for tgt %d",
+ __func__, xp->target_num);
+ return (PMCS_WQ_RUN_FAIL_RES);
+
+ }
+
+ ptr[0] =
+ LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START));
+ ptr[1] = LE_32(pwrk->htag);
+ ptr[2] = LE_32(pwrk->phy->device_id);
+ ptr[3] = LE_32(pkt->pkt_dma_len);
+ if (ptr[3]) {
+ ASSERT(pkt->pkt_numcookies);
+ if (pkt->pkt_dma_flags & DDI_DMA_READ) {
+ ptr[4] = LE_32(PMCIN_DATADIR_2_INI);
+ } else {
+ ptr[4] = LE_32(PMCIN_DATADIR_2_DEV);
+ }
+ if (pmcs_dma_load(pwp, sp, ptr)) {
+ mutex_exit(&pwp->iqp_lock[iq]);
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->wqlock);
+ if (STAILQ_EMPTY(&xp->wq)) {
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ } else {
+ mutex_exit(&xp->wqlock);
+ CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL;
+ CMD2PKT(sp)->pkt_reason = CMD_CMPLT;
+ CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS |
+ STATE_GOT_TARGET | STATE_SENT_CMD |
+ STATE_GOT_STATUS;
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Failed to dma_load for tgt %d (QF)",
+ __func__, xp->target_num);
+ }
+ return (PMCS_WQ_RUN_FAIL_RES);
+ }
+ } else {
+ ptr[4] = LE_32(PMCIN_DATADIR_NONE);
+ CLEAN_MESSAGE(ptr, 12);
+ }
+ xp->actv_cnt++;
+ if (xp->actv_cnt > xp->maxdepth) {
+ xp->maxdepth = xp->actv_cnt;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: max depth now %u",
+ pwrk->phy->path, xp->maxdepth);
+ }
+ mutex_exit(&xp->statlock);
+
+
+#ifdef DEBUG
+ /*
+ * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED
+ * event when this goes out on the wire.
+ */
+ ptr[4] |= PMCIN_MESSAGE_REPORT;
+#endif
+ /*
+ * Fill in the SSP IU
+ */
+
+ bzero(&sc, sizeof (sas_ssp_cmd_iu_t));
+ bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t));
+
+ switch (pkt->pkt_flags & FLAG_TAGMASK) {
+ case FLAG_HTAG:
+ sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD;
+ break;
+ case FLAG_OTAG:
+ sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED;
+ break;
+ case FLAG_STAG:
+ default:
+ sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE;
+ break;
+ }
+ (void) memcpy(sc.cdb, pkt->pkt_cdbp,
+ min(SCSA_CDBLEN(sp), sizeof (sc.cdb)));
+ (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t));
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ mutex_exit(&pwrk->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: giving pkt %p (tag %x) to the hardware", __func__,
+ (void *)pkt, pwrk->htag);
+#ifdef DEBUG
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr);
+#endif
+ mutex_enter(&xp->aqlock);
+ STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next);
+ mutex_exit(&xp->aqlock);
+ INC_IQ_ENTRY(pwp, iq);
+
+ /*
+ * If we just submitted the last command queued from device state
+ * recovery, clear the wq_recovery_tail pointer.
+ */
+ mutex_enter(&xp->wqlock);
+ if (xp->wq_recovery_tail == sp) {
+ xp->wq_recovery_tail = NULL;
+ }
+ mutex_exit(&xp->wqlock);
+
+ return (PMCS_WQ_RUN_SUCCESS);
+}
+
+/*
+ * Complete a SAS command
+ *
+ * Called with pwrk lock held.
+ * The free of pwrk releases the lock.
+ */
+
+static void
+pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg)
+{
+ pmcs_cmd_t *sp = pwrk->arg;
+ pmcs_phy_t *pptr = pwrk->phy;
+ pmcs_xscsi_t *xp = pwrk->xp;
+ struct scsi_pkt *pkt = CMD2PKT(sp);
+ int dead;
+ uint32_t sts;
+ boolean_t aborted = B_FALSE;
+ boolean_t do_ds_recovery = B_FALSE;
+
+ ASSERT(xp != NULL);
+ ASSERT(sp != NULL);
+ ASSERT(pptr != NULL);
+
+ DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int,
+ (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start,
+ hrtime_t, gethrtime());
+
+ dead = pwrk->dead;
+
+ if (msg) {
+ sts = LE_32(msg[2]);
+ } else {
+ sts = 0;
+ }
+
+ if (dead != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: dead cmd tag 0x%x for %s",
+ __func__, pwrk->htag, pptr->path);
+ goto out;
+ }
+
+ if (sts == PMCOUT_STATUS_ABORTED) {
+ aborted = B_TRUE;
+ }
+
+ if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: cmd 0x%p (tag 0x%x) timed out for %s",
+ __func__, (void *)sp, pwrk->htag, pptr->path);
+ do_ds_recovery = B_TRUE;
+ goto out;
+ }
+
+ /*
+ * If the status isn't okay but not underflow,
+ * step to the side and parse the (possible) error.
+ */
+#ifdef DEBUG
+ if (msg) {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg);
+ }
+#endif
+ if (!msg) {
+ goto out;
+ }
+
+ switch (sts) {
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
+ case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: PHY %s requires device state recovery (status=%d)",
+ __func__, pptr->path, sts);
+ do_ds_recovery = B_TRUE;
+ break;
+ case PMCOUT_STATUS_UNDERFLOW:
+ (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3]));
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW,
+ "%s: underflow %u for cdb 0x%x",
+ __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff);
+ sts = PMCOUT_STATUS_OK;
+ msg[3] = 0;
+ break;
+ case PMCOUT_STATUS_OK:
+ pkt->pkt_resid = 0;
+ break;
+ }
+
+ if (sts != PMCOUT_STATUS_OK) {
+ pmcs_ioerror(pwp, SAS, pwrk, msg);
+ } else {
+ if (msg[3]) {
+ uint8_t local[PMCS_QENTRY_SIZE << 1], *xd;
+ sas_ssp_rsp_iu_t *rptr = (void *)local;
+ const int lim =
+ (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE;
+ static const uint8_t ssp_rsp_evec[] = {
+ 0x58, 0x61, 0x56, 0x72, 0x00
+ };
+
+ /*
+ * Transform the the first part of the response
+ * to host canonical form. This gives us enough
+ * information to figure out what to do with the
+ * rest (which remains unchanged in the incoming
+ * message which can be up to two queue entries
+ * in length).
+ */
+ pmcs_endian_transform(pwp, local, &msg[5],
+ ssp_rsp_evec);
+ xd = (uint8_t *)(&msg[5]);
+ xd += SAS_RSP_HDR_SIZE;
+
+ if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) {
+ if (rptr->response_data_length != 4) {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "Bad SAS RESPONSE DATA LENGTH",
+ msg);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ goto out;
+ }
+ (void) memcpy(&sts, xd, sizeof (uint32_t));
+ sts = BE_32(sts);
+ /*
+ * The only response code we should legally get
+ * here is an INVALID FRAME response code.
+ */
+ if (sts == SAS_RSP_INVALID_FRAME) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: pkt %p tgt %u path %s "
+ "completed: INVALID FRAME response",
+ __func__, (void *)pkt,
+ xp->target_num, pptr->path);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: pkt %p tgt %u path %s "
+ "completed: illegal response 0x%x",
+ __func__, (void *)pkt,
+ xp->target_num, pptr->path, sts);
+ }
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ goto out;
+ }
+ if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) {
+ uint32_t slen;
+ slen = rptr->sense_data_length;
+ if (slen > lim) {
+ slen = lim;
+ }
+ pmcs_latch_status(pwp, sp, rptr->status, xd,
+ slen, pptr->path);
+ } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) {
+ /*
+ * This is the case for a plain SCSI status.
+ */
+ pmcs_latch_status(pwp, sp, rptr->status, NULL,
+ 0, pptr->path);
+ } else {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "illegal SAS response", msg);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ goto out;
+ }
+ } else {
+ pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
+ pptr->path);
+ }
+ if (pkt->pkt_dma_len) {
+ pkt->pkt_state |= STATE_XFERRED_DATA;
+ }
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x",
+ __func__, (void *)pkt, xp->target_num, pkt->pkt_reason,
+ pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]);
+
+ if (pwrk->state == PMCS_WORK_STATE_ABORTED) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p",
+ __func__, (void *)pkt, pptr->path, (void *)pwrk);
+ aborted = B_TRUE;
+ }
+
+out:
+ pmcs_pwork(pwp, pwrk);
+ pmcs_dma_unload(pwp, sp);
+
+ mutex_enter(&xp->statlock);
+ if (xp->dev_gone) {
+ mutex_exit(&xp->statlock);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: Completing command for dead target 0x%p", __func__,
+ (void *)xp);
+ return;
+ }
+
+ ASSERT(xp->actv_cnt > 0);
+ if (--(xp->actv_cnt) == 0) {
+ if (xp->draining) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: waking up drain waiters", __func__);
+ cv_signal(&pwp->drain_cv);
+ }
+ }
+ mutex_exit(&xp->statlock);
+ if (dead == 0) {
+#ifdef DEBUG
+ pmcs_cmd_t *wp;
+ mutex_enter(&xp->aqlock);
+ STAILQ_FOREACH(wp, &xp->aq, cmd_next) {
+ if (wp == sp) {
+ break;
+ }
+ }
+ ASSERT(wp != NULL);
+#else
+ mutex_enter(&xp->aqlock);
+#endif
+ STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next);
+ if (aborted) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Aborted cmd for tgt 0x%p, signaling waiters",
+ __func__, (void *)xp);
+ cv_signal(&xp->abort_cv);
+ }
+ mutex_exit(&xp->aqlock);
+ }
+
+ /*
+ * If do_ds_recovery is set, we need to initiate device state
+ * recovery. In this case, we put this I/O back on the head of
+ * the wait queue to run again after recovery is complete
+ */
+ if (do_ds_recovery) {
+ mutex_enter(&xp->statlock);
+ pmcs_start_dev_state_recovery(xp, pptr);
+ mutex_exit(&xp->statlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: Putting cmd 0x%p back on "
+ "wq during recovery for tgt 0x%p", __func__, (void *)sp,
+ (void *)xp);
+ mutex_enter(&xp->wqlock);
+ if (xp->wq_recovery_tail == NULL) {
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ } else {
+ /*
+ * If there are other I/Os waiting at the head due to
+ * device state recovery, add this one in the right spot
+ * to maintain proper order.
+ */
+ STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp,
+ cmd_next);
+ }
+ xp->wq_recovery_tail = sp;
+ mutex_exit(&xp->wqlock);
+ } else {
+ /*
+ * If we're not initiating device state recovery and this
+ * command was not "dead", put it on the completion queue
+ */
+ if (!dead) {
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ }
+ }
+}
+
+/*
+ * Run a SATA command (normal reads and writes),
+ * or block and schedule a SATL interpretation
+ * of the command.
+ *
+ * Called with pwrk lock held, returns unlocked.
+ */
+
+static int
+pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk)
+{
+ pmcs_hw_t *pwp = CMD2PMC(sp);
+ struct scsi_pkt *pkt = CMD2PKT(sp);
+ pmcs_xscsi_t *xp;
+ uint8_t cdb_base, asc, tag;
+ uint32_t *ptr, iq, nblk, i, mtype;
+ fis_t fis;
+ size_t amt;
+ uint64_t lba;
+
+ xp = pwrk->xp;
+
+ /*
+ * First, see if this is just a plain read/write command.
+ * If not, we have to queue it up for processing, block
+ * any additional commands from coming in, and wake up
+ * the thread that will process this command.
+ */
+ cdb_base = pkt->pkt_cdbp[0] & 0x1f;
+ if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: special SATA cmd %p",
+ __func__, (void *)sp);
+
+ ASSERT(xp->phy != NULL);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_lock_phy(xp->phy);
+ mutex_enter(&xp->statlock);
+ xp->special_needed = 1; /* Set the special_needed flag */
+ STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next);
+ if (pmcs_run_sata_special(pwp, xp)) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN);
+ }
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(xp->phy);
+
+ return (PMCS_WQ_RUN_SUCCESS);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: regular cmd", __func__);
+
+ mutex_enter(&xp->statlock);
+ if (xp->dying || !xp->assigned) {
+ mutex_exit(&xp->statlock);
+ return (PMCS_WQ_RUN_FAIL_OTHER);
+ }
+ if (xp->special_running || xp->special_needed || xp->recover_wait) {
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ /*
+ * By the time we get here the special
+ * commands running or waiting to be run
+ * may have come and gone, so kick our
+ * worker to run the waiting queues
+ * just in case.
+ */
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ return (PMCS_WQ_RUN_FAIL_OTHER);
+ }
+ lba = xp->capacity;
+ mutex_exit(&xp->statlock);
+
+ /*
+ * Extract data length and lba parameters out of the command. The
+ * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB
+ * values are considered illegal.
+ */
+ asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba);
+ if (asc) {
+ uint8_t sns[18];
+ bzero(sns, sizeof (sns));
+ sns[0] = 0xf0;
+ sns[2] = 0x5;
+ sns[12] = asc;
+ pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns),
+ pwrk->phy->path);
+ pmcs_pwork(pwp, pwrk);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ return (PMCS_WQ_RUN_SUCCESS);
+ }
+
+ /*
+ * If the command decodes as not moving any data, complete it here.
+ */
+ amt = nblk;
+ amt <<= 9;
+ amt = pmcs_set_resid(pkt, amt, nblk << 9);
+ if (amt == 0) {
+ pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
+ pwrk->phy->path);
+ pmcs_pwork(pwp, pwrk);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ PMCS_CQ_RUN_LOCKED(pwp);
+ mutex_exit(&pwp->cq_lock);
+ return (PMCS_WQ_RUN_SUCCESS);
+ }
+
+ /*
+ * Get an inbound queue entry for this I/O
+ */
+ GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq);
+ if (ptr == NULL) {
+ /*
+ * This is a temporary failure not likely to unblocked by
+ * commands completing as the test for scheduling the
+ * restart of work is a per-device test.
+ */
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ pmcs_dma_unload(pwp, sp);
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Failed to get IO IQ entry for tgt %d",
+ __func__, xp->target_num);
+ return (PMCS_WQ_RUN_FAIL_RES);
+ }
+
+ /*
+ * Get a tag. At this point, hold statlock until the tagmap is
+ * updated (just prior to sending the cmd to the hardware).
+ */
+ mutex_enter(&xp->statlock);
+ for (tag = 0; tag < xp->qdepth; tag++) {
+ if ((xp->tagmap & (1 << tag)) == 0) {
+ break;
+ }
+ }
+
+ if (tag == xp->qdepth) {
+ mutex_exit(&xp->statlock);
+ mutex_exit(&pwp->iqp_lock[iq]);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ return (PMCS_WQ_RUN_FAIL_OTHER);
+ }
+
+ sp->cmd_satltag = (uint8_t)tag;
+
+ /*
+ * Set up the command
+ */
+ bzero(fis, sizeof (fis));
+ ptr[0] =
+ LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START));
+ ptr[1] = LE_32(pwrk->htag);
+ ptr[2] = LE_32(pwrk->phy->device_id);
+ ptr[3] = LE_32(amt);
+
+ if (xp->ncq) {
+ mtype = SATA_PROTOCOL_FPDMA | (tag << 16);
+ fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV;
+ if (cdb_base == SCMD_READ) {
+ fis[0] |= (READ_FPDMA_QUEUED << 16);
+ } else {
+ fis[0] |= (WRITE_FPDMA_QUEUED << 16);
+ }
+ fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff);
+ fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff);
+ fis[3] = tag << 3;
+ } else {
+ int op;
+ fis[0] = (C_BIT << 8) | FIS_REG_H2DEV;
+ if (xp->pio) {
+ mtype = SATA_PROTOCOL_PIO;
+ if (cdb_base == SCMD_READ) {
+ op = READ_SECTORS_EXT;
+ } else {
+ op = WRITE_SECTORS_EXT;
+ }
+ } else {
+ mtype = SATA_PROTOCOL_DMA;
+ if (cdb_base == SCMD_READ) {
+ op = READ_DMA_EXT;
+ } else {
+ op = WRITE_DMA_EXT;
+ }
+ }
+ fis[0] |= (op << 16);
+ fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff);
+ fis[2] = (lba >> 24) & 0xffffff;
+ fis[3] = nblk;
+ }
+
+ if (cdb_base == SCMD_READ) {
+ ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI);
+ } else {
+ ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV);
+ }
+#ifdef DEBUG
+ /*
+ * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED
+ * event when this goes out on the wire.
+ */
+ ptr[4] |= PMCIN_MESSAGE_REPORT;
+#endif
+ for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) {
+ ptr[i+5] = LE_32(fis[i]);
+ }
+ if (pmcs_dma_load(pwp, sp, ptr)) {
+ mutex_exit(&xp->statlock);
+ mutex_exit(&pwp->iqp_lock[iq]);
+ mutex_enter(&xp->wqlock);
+ STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
+ mutex_exit(&xp->wqlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Failed to dma_load for tgt %d",
+ __func__, xp->target_num);
+ return (PMCS_WQ_RUN_FAIL_RES);
+
+ }
+
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ mutex_exit(&pwrk->lock);
+ xp->tagmap |= (1 << tag);
+ xp->actv_cnt++;
+ if (xp->actv_cnt > xp->maxdepth) {
+ xp->maxdepth = xp->actv_cnt;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: max depth now %u",
+ pwrk->phy->path, xp->maxdepth);
+ }
+ mutex_exit(&xp->statlock);
+ mutex_enter(&xp->aqlock);
+ STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next);
+ mutex_exit(&xp->aqlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: giving pkt %p to hardware",
+ __func__, (void *)pkt);
+#ifdef DEBUG
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr);
+#endif
+ INC_IQ_ENTRY(pwp, iq);
+
+ return (PMCS_WQ_RUN_SUCCESS);
+}
+
+/*
+ * Complete a SATA command. Called with pwrk lock held.
+ */
+void
+pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg)
+{
+ pmcs_cmd_t *sp = pwrk->arg;
+ struct scsi_pkt *pkt = CMD2PKT(sp);
+ pmcs_phy_t *pptr = pwrk->phy;
+ int dead;
+ uint32_t sts;
+ pmcs_xscsi_t *xp;
+ boolean_t aborted = B_FALSE;
+
+ xp = pwrk->xp;
+ ASSERT(xp != NULL);
+
+ DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int,
+ (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start,
+ hrtime_t, gethrtime());
+
+ dead = pwrk->dead;
+
+ if (msg) {
+ sts = LE_32(msg[2]);
+ } else {
+ sts = 0;
+ }
+
+ if (dead != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: dead cmd tag 0x%x for %s",
+ __func__, pwrk->htag, pptr->path);
+ goto out;
+ }
+ if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) &&
+ (sts != PMCOUT_STATUS_ABORTED)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: cmd 0x%p (tag 0x%x) timed out for %s",
+ __func__, (void *)sp, pwrk->htag, pptr->path);
+ CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD;
+ /* pkt_reason already set to CMD_TIMEOUT */
+ ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT);
+ CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD;
+ CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT;
+ goto out;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: pkt %p tgt %u done",
+ __func__, (void *)pkt, xp->target_num);
+
+ /*
+ * If the status isn't okay but not underflow,
+ * step to the side and parse the (possible) error.
+ */
+#ifdef DEBUG
+ if (msg) {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg);
+ }
+#endif
+ if (!msg) {
+ goto out;
+ }
+
+ /*
+ * If the status isn't okay or we got a FIS response of some kind,
+ * step to the side and parse the (possible) error.
+ */
+ if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) {
+ if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) {
+ mutex_exit(&pwrk->lock);
+ pmcs_lock_phy(pptr);
+ mutex_enter(&xp->statlock);
+ if ((xp->resetting == 0) && (xp->reset_success != 0) &&
+ (xp->reset_wait == 0)) {
+ mutex_exit(&xp->statlock);
+ if (pmcs_reset_phy(pwp, pptr,
+ PMCS_PHYOP_LINK_RESET) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: PHY "
+ "(%s) Local Control/Link Reset "
+ "FAILED as part of error recovery",
+ __func__, pptr->path);
+ }
+ mutex_enter(&xp->statlock);
+ }
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(pptr);
+ mutex_enter(&pwrk->lock);
+ }
+ pmcs_ioerror(pwp, SATA, pwrk, msg);
+ } else {
+ pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
+ pwrk->phy->path);
+ pkt->pkt_state |= STATE_XFERRED_DATA;
+ pkt->pkt_resid = 0;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x",
+ __func__, (void *)pkt, xp->target_num, pkt->pkt_reason,
+ pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]);
+
+ if (pwrk->state == PMCS_WORK_STATE_ABORTED) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p",
+ __func__, (void *)pkt, pptr->path, (void *)pwrk);
+ aborted = B_TRUE;
+ }
+
+out:
+ pmcs_pwork(pwp, pwrk);
+ pmcs_dma_unload(pwp, sp);
+
+ mutex_enter(&xp->statlock);
+ xp->tagmap &= ~(1 << sp->cmd_satltag);
+
+ if (xp->dev_gone) {
+ mutex_exit(&xp->statlock);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: Completing command for dead target 0x%p", __func__,
+ (void *)xp);
+ return;
+ }
+
+ ASSERT(xp->actv_cnt > 0);
+ if (--(xp->actv_cnt) == 0) {
+ if (xp->draining) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: waking up drain waiters", __func__);
+ cv_signal(&pwp->drain_cv);
+ } else if (xp->special_needed) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN);
+ }
+ }
+ mutex_exit(&xp->statlock);
+
+ if (dead == 0) {
+#ifdef DEBUG
+ pmcs_cmd_t *wp;
+ mutex_enter(&xp->aqlock);
+ STAILQ_FOREACH(wp, &xp->aq, cmd_next) {
+ if (wp == sp) {
+ break;
+ }
+ }
+ ASSERT(wp != NULL);
+#else
+ mutex_enter(&xp->aqlock);
+#endif
+ STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next);
+ if (aborted) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Aborted cmd for tgt 0x%p, signaling waiters",
+ __func__, (void *)xp);
+ cv_signal(&xp->abort_cv);
+ }
+ mutex_exit(&xp->aqlock);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ }
+}
+
+static uint8_t
+pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax)
+{
+ uint8_t asc = 0;
+ switch (cdb[0]) {
+ case SCMD_READ_G5:
+ case SCMD_WRITE_G5:
+ *xfr =
+ (((uint32_t)cdb[10]) << 24) |
+ (((uint32_t)cdb[11]) << 16) |
+ (((uint32_t)cdb[12]) << 8) |
+ ((uint32_t)cdb[13]);
+ *lba =
+ (((uint64_t)cdb[2]) << 56) |
+ (((uint64_t)cdb[3]) << 48) |
+ (((uint64_t)cdb[4]) << 40) |
+ (((uint64_t)cdb[5]) << 32) |
+ (((uint64_t)cdb[6]) << 24) |
+ (((uint64_t)cdb[7]) << 16) |
+ (((uint64_t)cdb[8]) << 8) |
+ ((uint64_t)cdb[9]);
+ /* Check for illegal bits */
+ if (cdb[15]) {
+ asc = 0x24; /* invalid field in cdb */
+ }
+ break;
+ case SCMD_READ_G4:
+ case SCMD_WRITE_G4:
+ *xfr =
+ (((uint32_t)cdb[6]) << 16) |
+ (((uint32_t)cdb[7]) << 8) |
+ ((uint32_t)cdb[8]);
+ *lba =
+ (((uint32_t)cdb[2]) << 24) |
+ (((uint32_t)cdb[3]) << 16) |
+ (((uint32_t)cdb[4]) << 8) |
+ ((uint32_t)cdb[5]);
+ /* Check for illegal bits */
+ if (cdb[11]) {
+ asc = 0x24; /* invalid field in cdb */
+ }
+ break;
+ case SCMD_READ_G1:
+ case SCMD_WRITE_G1:
+ *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]);
+ *lba =
+ (((uint32_t)cdb[2]) << 24) |
+ (((uint32_t)cdb[3]) << 16) |
+ (((uint32_t)cdb[4]) << 8) |
+ ((uint32_t)cdb[5]);
+ /* Check for illegal bits */
+ if (cdb[9]) {
+ asc = 0x24; /* invalid field in cdb */
+ }
+ break;
+ case SCMD_READ:
+ case SCMD_WRITE:
+ *xfr = cdb[4];
+ if (*xfr == 0) {
+ *xfr = 256;
+ }
+ *lba =
+ (((uint32_t)cdb[1] & 0x1f) << 16) |
+ (((uint32_t)cdb[2]) << 8) |
+ ((uint32_t)cdb[3]);
+ /* Check for illegal bits */
+ if (cdb[5]) {
+ asc = 0x24; /* invalid field in cdb */
+ }
+ break;
+ }
+
+ if (asc == 0) {
+ if ((*lba + *xfr) > lbamax) {
+ asc = 0x21; /* logical block out of range */
+ }
+ }
+ return (asc);
+}
+
+/*
+ * Called with pwrk lock held.
+ */
+static void
+pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w)
+{
+ static uint8_t por[] = {
+ 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28
+ };
+ static uint8_t parity[] = {
+ 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5
+ };
+ const char *msg;
+ char buf[20];
+ pmcs_cmd_t *sp = pwrk->arg;
+ pmcs_phy_t *phyp = pwrk->phy;
+ struct scsi_pkt *pkt = CMD2PKT(sp);
+ uint32_t status;
+ uint32_t resid;
+
+ ASSERT(w != NULL);
+ status = LE_32(w[2]);
+ resid = LE_32(w[3]);
+
+ msg = pmcs_status_str(status);
+ if (msg == NULL) {
+ (void) snprintf(buf, sizeof (buf), "Error 0x%x", status);
+ msg = buf;
+ }
+
+ if (status != PMCOUT_STATUS_OK) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: device %s tag 0x%x status %s @ %llu", __func__,
+ phyp->path, pwrk->htag, msg,
+ (unsigned long long)gethrtime());
+ }
+
+ pkt->pkt_reason = CMD_CMPLT; /* default reason */
+
+ switch (status) {
+ case PMCOUT_STATUS_OK:
+ if (t == SATA) {
+ int i;
+ fis_t fis;
+ for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) {
+ fis[i] = LE_32(w[4+i]);
+ }
+ if ((fis[0] & 0xff) != FIS_REG_D2H) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "unexpected fis code 0x%x", fis[0] & 0xff);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "FIS ERROR");
+ pmcs_fis_dump(pwp, fis);
+ }
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ break;
+ }
+ pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path);
+ break;
+
+ case PMCOUT_STATUS_ABORTED:
+ /*
+ * Command successfully aborted.
+ */
+ if (phyp->dead) {
+ pkt->pkt_reason = CMD_DEV_GONE;
+ pkt->pkt_state = STATE_GOT_BUS;
+ } else if (pwrk->ssp_event != 0) {
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ pkt->pkt_state = STATE_GOT_BUS;
+ } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) {
+ pkt->pkt_reason = CMD_TIMEOUT;
+ pkt->pkt_statistics |= STAT_TIMEOUT;
+ pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD;
+ } else {
+ pkt->pkt_reason = CMD_ABORTED;
+ pkt->pkt_statistics |= STAT_ABORTED;
+ pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD;
+ }
+
+ /*
+ * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past
+ * this point, so go ahead and mark it as aborted.
+ */
+ pwrk->state = PMCS_WORK_STATE_ABORTED;
+ break;
+
+ case PMCOUT_STATUS_UNDERFLOW:
+ /*
+ * This will only get called for SATA
+ */
+ pkt->pkt_resid = resid;
+ if (pkt->pkt_dma_len < pkt->pkt_resid) {
+ (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid);
+ }
+ pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path);
+ break;
+
+ case PMCOUT_STATUS_NO_DEVICE:
+ case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT:
+ pkt->pkt_reason = CMD_DEV_GONE;
+ break;
+
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ /*
+ * Need to do rediscovery. We probably have
+ * the wrong device (disk swap), so kill
+ * this one.
+ */
+ case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_EROOR:
+ /*
+ * Need to do rediscovery.
+ */
+ if (!phyp->dead) {
+ mutex_exit(&pwrk->lock);
+ pmcs_lock_phy(pwrk->phy);
+ pmcs_kill_changed(pwp, pwrk->phy, 0);
+ pmcs_unlock_phy(pwrk->phy);
+ mutex_enter(&pwrk->lock);
+ pkt->pkt_reason = CMD_INCOMPLETE;
+ pkt->pkt_state = STATE_GOT_BUS;
+ } else {
+ pkt->pkt_reason = CMD_DEV_GONE;
+ }
+ break;
+
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
+ case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
+ /* cmd is pending on the target */
+ case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH:
+ case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE:
+ /* transitory - commands sent while in NCQ failure mode */
+ case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE:
+ /* NCQ failure */
+ case PMCOUT_STATUS_IO_PORT_IN_RESET:
+ case PMCOUT_STATUS_XFER_ERR_BREAK:
+ case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
+ pkt->pkt_reason = CMD_INCOMPLETE;
+ pkt->pkt_state = STATE_GOT_BUS;
+ break;
+
+ case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
+ pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path);
+ break;
+
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+ /* synthesize a RESERVATION CONFLICT */
+ pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL,
+ 0, phyp->path);
+ break;
+
+ case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST:
+ /* synthesize a power-on/reset */
+ pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por),
+ phyp->path);
+ break;
+
+ case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE:
+ case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN:
+ case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED:
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK:
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK:
+ /* synthesize a PARITY ERROR */
+ pmcs_latch_status(pwp, sp, STATUS_CHECK, parity,
+ sizeof (parity), phyp->path);
+ break;
+
+ case PMCOUT_STATUS_IO_XFER_ERROR_DMA:
+ case PMCOUT_STATUS_IO_NOT_VALID:
+ case PMCOUT_STATUS_PROG_ERROR:
+ case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED:
+ case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */
+ default:
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ break;
+ }
+}
+
+/*
+ * Latch up SCSI status
+ */
+
+void
+pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status,
+ uint8_t *snsp, size_t snslen, char *path)
+{
+ static const char c1[] =
+ "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) "
+ "HTAG 0x%x @ %llu";
+ static const char c2[] =
+ "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu";
+
+ CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD | STATE_GOT_STATUS;
+ CMD2PKT(sp)->pkt_scbp[0] = status;
+
+ if (status == STATUS_CHECK && snsp &&
+ (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) {
+ struct scsi_arq_status *aqp =
+ (void *) CMD2PKT(sp)->pkt_scbp;
+ size_t amt = sizeof (struct scsi_extended_sense);
+ uint8_t key = scsi_sense_key(snsp);
+ uint8_t asc = scsi_sense_asc(snsp);
+ uint8_t ascq = scsi_sense_ascq(snsp);
+ if (amt > snslen) {
+ amt = snslen;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, c1, path, status,
+ CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq,
+ sp->cmd_tag, (unsigned long long)gethrtime());
+ CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE;
+ (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD;
+ aqp->sts_rqpkt_statistics = 0;
+ aqp->sts_rqpkt_reason = CMD_CMPLT;
+ aqp->sts_rqpkt_state = STATE_GOT_BUS |
+ STATE_GOT_TARGET | STATE_SENT_CMD |
+ STATE_XFERRED_DATA | STATE_GOT_STATUS;
+ (void) memcpy(&aqp->sts_sensedata, snsp, amt);
+ if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) {
+ aqp->sts_rqpkt_reason = CMD_TRAN_ERR;
+ aqp->sts_rqpkt_state = 0;
+ aqp->sts_rqpkt_resid =
+ sizeof (struct scsi_extended_sense);
+ } else {
+ aqp->sts_rqpkt_resid =
+ sizeof (struct scsi_extended_sense) - amt;
+ }
+ } else if (status) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, c2,
+ path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff,
+ sp->cmd_tag, (unsigned long long)gethrtime());
+ }
+
+ CMD2PKT(sp)->pkt_reason = CMD_CMPLT;
+}
+
+/*
+ * Calculate and set packet residual and return the amount
+ * left over after applying various filters.
+ */
+size_t
+pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt)
+{
+ pkt->pkt_resid = cdbamt;
+ if (amt > pkt->pkt_resid) {
+ amt = pkt->pkt_resid;
+ }
+ if (amt > pkt->pkt_dma_len) {
+ amt = pkt->pkt_dma_len;
+ }
+ return (amt);
+}
+
+/*
+ * Return the existing target softstate if there is one. If there is,
+ * the PHY is locked as well and that lock must be freed by the caller
+ * after the target/PHY linkage is established.
+ */
+pmcs_xscsi_t *
+pmcs_get_target(pmcs_iport_t *iport, char *tgt_port)
+{
+ pmcs_hw_t *pwp = iport->pwp;
+ pmcs_phy_t *phyp;
+ pmcs_xscsi_t *tgt;
+ uint64_t wwn;
+ char unit_address[PMCS_MAX_UA_SIZE];
+ int ua_form = 1;
+
+ /*
+ * Find the PHY for this target
+ */
+ phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port);
+ if (phyp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: No PHY for target @ %s",
+ __func__, tgt_port);
+ return (NULL);
+ }
+
+ tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port);
+
+ if (tgt) {
+ /*
+ * There's already a target. Check its PHY pointer to see
+ * if we need to clear the old linkages
+ */
+ if (tgt->phy && (tgt->phy != phyp)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Target PHY updated from %p to %p", __func__,
+ (void *)tgt->phy, (void *)phyp);
+ if (!IS_ROOT_PHY(tgt->phy)) {
+ pmcs_dec_phy_ref_count(tgt->phy);
+ pmcs_inc_phy_ref_count(phyp);
+ }
+ tgt->phy->target = NULL;
+ }
+
+ tgt->phy = phyp;
+ phyp->target = tgt;
+ return (tgt);
+ }
+
+ /*
+ * Make sure the PHY we found is on the correct iport
+ */
+ if (phyp->iport != iport) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: No target at %s on this iport", __func__, tgt_port);
+ pmcs_unlock_phy(phyp);
+ return (NULL);
+ }
+
+ /*
+ * Allocate the new softstate
+ */
+ wwn = pmcs_barray2wwn(phyp->sas_address);
+ (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address);
+
+ if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Couldn't alloc softstate for device at %s",
+ __func__, unit_address);
+ pmcs_unlock_phy(phyp);
+ return (NULL);
+ }
+
+ tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address);
+ STAILQ_INIT(&tgt->wq);
+ STAILQ_INIT(&tgt->aq);
+ STAILQ_INIT(&tgt->sq);
+ mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL);
+ tgt->qdepth = 1;
+ tgt->target_num = PMCS_INVALID_TARGET_NUM;
+ bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE);
+ tgt->pwp = pwp;
+ tgt->ua = strdup(iport->ua);
+ tgt->phy = phyp;
+ ASSERT((phyp->target == NULL) || (phyp->target == tgt));
+ if (phyp->target == NULL) {
+ phyp->target = tgt;
+ }
+
+ /*
+ * Don't allocate LUN softstate for SMP targets
+ */
+ if (phyp->dtype == EXPANDER) {
+ return (tgt);
+ }
+
+ if (ddi_soft_state_bystr_init(&tgt->lun_sstate,
+ sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: LUN soft_state_bystr_init failed", __func__);
+ ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port);
+ pmcs_unlock_phy(phyp);
+ return (NULL);
+ }
+
+ return (tgt);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_smhba.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_smhba.c
new file mode 100644
index 0000000000..36a5ef2dc8
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_smhba.c
@@ -0,0 +1,287 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file contains SM-HBA support for PMC-S driver
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+
+void
+pmcs_smhba_add_hba_prop(pmcs_hw_t *pwp, data_type_t dt,
+ char *prop_name, void *prop_val)
+{
+ ASSERT(pwp != NULL);
+
+ switch (dt) {
+ case DATA_TYPE_INT32:
+ if (ddi_prop_update_int(DDI_DEV_T_NONE, pwp->dip,
+ prop_name, *(int *)prop_val)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ case DATA_TYPE_STRING:
+ if (ddi_prop_update_string(DDI_DEV_T_NONE, pwp->dip,
+ prop_name, (char *)prop_val)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unhandled datatype(%d) for "
+ "(%s). Skipping prop update.", __func__, dt, prop_name);
+ }
+}
+
+
+void
+pmcs_smhba_add_iport_prop(pmcs_iport_t *iport, data_type_t dt,
+ char *prop_name, void *prop_val)
+{
+ ASSERT(iport != NULL);
+
+ switch (dt) {
+ case DATA_TYPE_INT32:
+ if (ddi_prop_update_int(DDI_DEV_T_NONE, iport->dip,
+ prop_name, *(int *)prop_val)) {
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ case DATA_TYPE_STRING:
+ if (ddi_prop_update_string(DDI_DEV_T_NONE, iport->dip,
+ prop_name, (char *)prop_val)) {
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ default:
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG, "%s: Unhandled "
+ "datatype(%d) for(%s). Skipping prop update.",
+ __func__, dt, prop_name);
+ }
+}
+
+
+void
+pmcs_smhba_add_tgt_prop(pmcs_xscsi_t *tgt, data_type_t dt,
+ char *prop_name, void *prop_val)
+{
+ ASSERT(tgt != NULL);
+
+ switch (dt) {
+ case DATA_TYPE_INT32:
+ if (ddi_prop_update_int(DDI_DEV_T_NONE, tgt->dip,
+ prop_name, *(int *)prop_val)) {
+ pmcs_prt(tgt->pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ case DATA_TYPE_STRING:
+ if (ddi_prop_update_string(DDI_DEV_T_NONE, tgt->dip,
+ prop_name, (char *)prop_val)) {
+ pmcs_prt(tgt->pwp, PMCS_PRT_DEBUG,
+ "%s: %s prop update failed", __func__, prop_name);
+ }
+ break;
+ default:
+ pmcs_prt(tgt->pwp, PMCS_PRT_DEBUG, "%s: Unhandled datatype(%d) "
+ "for (%s). Skipping prop update.", __func__, dt, prop_name);
+ }
+}
+
+/* ARGSUSED */
+void
+pmcs_smhba_set_scsi_device_props(pmcs_hw_t *pwp, pmcs_phy_t *pptr,
+ struct scsi_device *sd)
+{
+ char *addr;
+ int ua_form = 1;
+ uint64_t wwn;
+ pmcs_phy_t *pphy;
+
+ pphy = pptr->parent;
+
+ if (pphy != NULL) {
+ addr = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
+ wwn = pmcs_barray2wwn(pphy->sas_address);
+ (void) scsi_wwn_to_wwnstr(wwn, ua_form, addr);
+
+ if (pphy->dtype == SATA) {
+ (void) scsi_device_prop_update_string(sd,
+ SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_BRIDGE_PORT, addr);
+ }
+ if (pphy->dtype == EXPANDER) {
+ (void) scsi_device_prop_update_string(sd,
+ SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_ATTACHED_PORT, addr);
+ }
+ kmem_free(addr, PMCS_MAX_UA_SIZE);
+ }
+}
+
+void
+pmcs_smhba_set_phy_props(pmcs_iport_t *iport)
+{
+ int i;
+ size_t packed_size;
+ char *packed_data;
+ pmcs_hw_t *pwp = iport->pwp;
+ pmcs_phy_t *phy_ptr;
+ nvlist_t **phy_props;
+ nvlist_t *nvl;
+
+ mutex_enter(&iport->lock);
+ if (iport->nphy == 0) {
+ mutex_exit(&iport->lock);
+ return;
+ }
+
+ if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: nvlist_alloc() failed",
+ __func__);
+ }
+
+ phy_props = kmem_zalloc(sizeof (nvlist_t *) * iport->nphy, KM_SLEEP);
+
+ for (phy_ptr = list_head(&iport->phys), i = 0;
+ phy_ptr != NULL;
+ phy_ptr = list_next(&iport->phys, phy_ptr), i++) {
+ pmcs_lock_phy(phy_ptr);
+
+ (void) nvlist_alloc(&phy_props[i], NV_UNIQUE_NAME, 0);
+
+ (void) nvlist_add_uint8(phy_props[i], SAS_PHY_ID,
+ phy_ptr->phynum);
+ (void) nvlist_add_int8(phy_props[i], SAS_NEG_LINK_RATE,
+ phy_ptr->link_rate);
+ (void) nvlist_add_int8(phy_props[i], SAS_PROG_MIN_LINK_RATE,
+ phy_ptr->state.prog_min_rate);
+ (void) nvlist_add_int8(phy_props[i], SAS_HW_MIN_LINK_RATE,
+ phy_ptr->state.hw_min_rate);
+ (void) nvlist_add_int8(phy_props[i], SAS_PROG_MAX_LINK_RATE,
+ phy_ptr->state.prog_max_rate);
+ (void) nvlist_add_int8(phy_props[i], SAS_HW_MAX_LINK_RATE,
+ phy_ptr->state.hw_max_rate);
+
+ pmcs_unlock_phy(phy_ptr);
+ }
+
+ (void) nvlist_add_nvlist_array(nvl, SAS_PHY_INFO_NVL, phy_props,
+ iport->nphy);
+
+ (void) nvlist_size(nvl, &packed_size, NV_ENCODE_NATIVE);
+ packed_data = kmem_zalloc(packed_size, KM_SLEEP);
+ (void) nvlist_pack(nvl, &packed_data, &packed_size,
+ NV_ENCODE_NATIVE, 0);
+
+ (void) ddi_prop_update_byte_array(DDI_DEV_T_NONE, iport->dip,
+ SAS_PHY_INFO, (uchar_t *)packed_data, packed_size);
+
+ for (i = 0; i < iport->nphy && phy_props[i] != NULL; i++) {
+ nvlist_free(phy_props[i]);
+ }
+ nvlist_free(nvl);
+ kmem_free(phy_props, sizeof (nvlist_t *) * iport->nphy);
+ mutex_exit(&iport->lock);
+ kmem_free(packed_data, packed_size);
+}
+
+/*
+ * Called with PHY lock held on phyp
+ */
+void
+pmcs_smhba_log_sysevent(pmcs_hw_t *pwp, char *subclass, char *etype,
+ pmcs_phy_t *phyp)
+{
+ nvlist_t *attr_list;
+ char *pname;
+ char sas_addr[PMCS_MAX_UA_SIZE];
+ uint8_t phynum = 0;
+ uint8_t lrate = 0;
+ uint64_t wwn;
+ int ua_form = 0;
+
+ if (pwp->dip == NULL)
+ return;
+ if (phyp == NULL)
+ return;
+
+ pname = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP);
+ if (pname == NULL)
+ return;
+
+ if ((strcmp(subclass, ESC_SAS_PHY_EVENT) == 0) ||
+ (strcmp(subclass, ESC_SAS_HBA_PORT_BROADCAST) == 0)) {
+ ASSERT(phyp != NULL);
+ (void) strncpy(pname, phyp->path, strlen(phyp->path));
+ phynum = phyp->phynum;
+ wwn = pmcs_barray2wwn(phyp->sas_address);
+ (void) scsi_wwn_to_wwnstr(wwn, ua_form, sas_addr);
+ if (strcmp(etype, SAS_PHY_ONLINE) == 0) {
+ lrate = phyp->link_rate;
+ }
+ }
+ if (strcmp(subclass, ESC_SAS_HBA_PORT_BROADCAST) == 0) {
+ (void) ddi_pathname(pwp->dip, pname);
+ }
+
+ if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 0) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to post sysevent",
+ __func__);
+ kmem_free(pname, MAXPATHLEN);
+ return;
+ }
+
+ if (nvlist_add_int32(attr_list, SAS_DRV_INST,
+ ddi_get_instance(pwp->dip)) != 0)
+ goto fail;
+
+ if (nvlist_add_string(attr_list, SAS_PORT_ADDR, sas_addr) != 0)
+ goto fail;
+
+ if (nvlist_add_string(attr_list, SAS_DEVFS_PATH, pname) != 0)
+ goto fail;
+
+ if (nvlist_add_uint8(attr_list, SAS_PHY_ID, phynum) != 0)
+ goto fail;
+
+ if (strcmp(etype, SAS_PHY_ONLINE) == 0) {
+ if (nvlist_add_uint8(attr_list, SAS_LINK_RATE, lrate) != 0)
+ goto fail;
+ }
+
+ if (nvlist_add_string(attr_list, SAS_EVENT_TYPE, etype) != 0)
+ goto fail;
+
+ (void) ddi_log_sysevent(pwp->dip, DDI_VENDOR_SUNW, EC_HBA, subclass,
+ attr_list, NULL, DDI_NOSLEEP);
+
+fail:
+ kmem_free(pname, MAXPATHLEN);
+ nvlist_free(attr_list);
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c
new file mode 100644
index 0000000000..2cc3eb95aa
--- /dev/null
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c
@@ -0,0 +1,8258 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file contains various support routines.
+ */
+
+#include <sys/scsi/adapters/pmcs/pmcs.h>
+
+/*
+ * Local static data
+ */
+static int tgtmap_usec = MICROSEC;
+
+/*
+ * SAS Topology Configuration
+ */
+static void pmcs_new_tport(pmcs_hw_t *, pmcs_phy_t *);
+static void pmcs_configure_expander(pmcs_hw_t *, pmcs_phy_t *, pmcs_iport_t *);
+
+static boolean_t pmcs_check_expanders(pmcs_hw_t *, pmcs_phy_t *);
+static void pmcs_check_expander(pmcs_hw_t *, pmcs_phy_t *);
+static void pmcs_clear_expander(pmcs_hw_t *, pmcs_phy_t *, int);
+
+static int pmcs_expander_get_nphy(pmcs_hw_t *, pmcs_phy_t *);
+static int pmcs_expander_content_discover(pmcs_hw_t *, pmcs_phy_t *,
+ pmcs_phy_t *);
+
+static int pmcs_smp_function_result(pmcs_hw_t *, smp_response_frame_t *);
+static boolean_t pmcs_validate_devid(pmcs_phy_t *, pmcs_phy_t *, uint32_t);
+static void pmcs_clear_phys(pmcs_hw_t *, pmcs_phy_t *);
+static int pmcs_configure_new_devices(pmcs_hw_t *, pmcs_phy_t *);
+static boolean_t pmcs_report_observations(pmcs_hw_t *);
+static boolean_t pmcs_report_iport_observations(pmcs_hw_t *, pmcs_iport_t *,
+ pmcs_phy_t *);
+static pmcs_phy_t *pmcs_find_phy_needing_work(pmcs_hw_t *, pmcs_phy_t *);
+static int pmcs_kill_devices(pmcs_hw_t *, pmcs_phy_t *);
+static void pmcs_lock_phy_impl(pmcs_phy_t *, int);
+static void pmcs_unlock_phy_impl(pmcs_phy_t *, int);
+static pmcs_phy_t *pmcs_clone_phy(pmcs_phy_t *);
+static boolean_t pmcs_configure_phy(pmcs_hw_t *, pmcs_phy_t *);
+static void pmcs_reap_dead_phy(pmcs_phy_t *);
+static pmcs_iport_t *pmcs_get_iport_by_ua(pmcs_hw_t *, char *);
+static boolean_t pmcs_phy_target_match(pmcs_phy_t *);
+static void pmcs_handle_ds_recovery_error(pmcs_phy_t *phyp,
+ pmcs_xscsi_t *tgt, pmcs_hw_t *pwp, const char *func_name, int line,
+ char *reason_string);
+
+/*
+ * Often used strings
+ */
+const char pmcs_nowrk[] = "%s: unable to get work structure";
+const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry";
+const char pmcs_timeo[] = "!%s: command timed out";
+
+extern const ddi_dma_attr_t pmcs_dattr;
+
+/*
+ * Some Initial setup steps.
+ */
+
+int
+pmcs_setup(pmcs_hw_t *pwp)
+{
+ uint32_t barval = pwp->mpibar;
+ uint32_t i, scratch, regbar, regoff, barbar, baroff;
+ uint32_t new_ioq_depth, ferr = 0;
+
+ /*
+ * Check current state. If we're not at READY state,
+ * we can't go further.
+ */
+ scratch = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
+ if ((scratch & PMCS_MSGU_AAP_STATE_MASK) == PMCS_MSGU_AAP_STATE_ERROR) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: AAP Error State (0x%x)",
+ __func__, pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
+ PMCS_MSGU_AAP_ERROR_MASK);
+ pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
+ return (-1);
+ }
+ if ((scratch & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: AAP unit not ready (state 0x%x)",
+ __func__, scratch & PMCS_MSGU_AAP_STATE_MASK);
+ pmcs_fm_ereport(pwp, DDI_FM_DEVICE_INVAL_STATE);
+ ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST);
+ return (-1);
+ }
+
+ /*
+ * Read the offset from the Message Unit scratchpad 0 register.
+ * This allows us to read the MPI Configuration table.
+ *
+ * Check its signature for validity.
+ */
+ baroff = barval;
+ barbar = barval >> PMCS_MSGU_MPI_BAR_SHIFT;
+ baroff &= PMCS_MSGU_MPI_OFFSET_MASK;
+
+ regoff = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0);
+ regbar = regoff >> PMCS_MSGU_MPI_BAR_SHIFT;
+ regoff &= PMCS_MSGU_MPI_OFFSET_MASK;
+
+ if (regoff > baroff) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad MPI Table Length "
+ "(register offset=0x%08x, passed offset=0x%08x)", __func__,
+ regoff, baroff);
+ return (-1);
+ }
+ if (regbar != barbar) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad MPI BAR (register "
+ "BAROFF=0x%08x, passed BAROFF=0x%08x)", __func__,
+ regbar, barbar);
+ return (-1);
+ }
+ pwp->mpi_offset = regoff;
+ if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS) != PMCS_SIGNATURE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Bad MPI Configuration Table Signature 0x%x", __func__,
+ pmcs_rd_mpi_tbl(pwp, PMCS_MPI_AS));
+ return (-1);
+ }
+
+ if (pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR) != PMCS_MPI_REVISION1) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Bad MPI Configuration Revision 0x%x", __func__,
+ pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IR));
+ return (-1);
+ }
+
+ /*
+ * Generate offsets for the General System, Inbound Queue Configuration
+ * and Outbound Queue configuration tables. This way the macros to
+ * access those tables will work correctly.
+ */
+ pwp->mpi_gst_offset =
+ pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_GSTO);
+ pwp->mpi_iqc_offset =
+ pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_IQCTO);
+ pwp->mpi_oqc_offset =
+ pwp->mpi_offset + pmcs_rd_mpi_tbl(pwp, PMCS_MPI_OQCTO);
+
+ pwp->fw = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FW);
+
+ pwp->max_cmd = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_MOIO);
+ pwp->max_dev = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO0) >> 16;
+
+ pwp->max_iq = PMCS_MNIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
+ pwp->max_oq = PMCS_MNOQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
+ pwp->nphy = PMCS_NPHY(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1));
+ if (pwp->max_iq <= PMCS_NIQ) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: not enough Inbound Queues "
+ "supported (need %d, max_oq=%d)", __func__, pwp->max_iq,
+ PMCS_NIQ);
+ return (-1);
+ }
+ if (pwp->max_oq <= PMCS_NOQ) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: not enough Outbound Queues "
+ "supported (need %d, max_oq=%d)", __func__, pwp->max_oq,
+ PMCS_NOQ);
+ return (-1);
+ }
+ if (pwp->nphy == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: zero phys reported",
+ __func__);
+ return (-1);
+ }
+ if (PMCS_HPIQ(pmcs_rd_mpi_tbl(pwp, PMCS_MPI_INFO1))) {
+ pwp->hipri_queue = (1 << PMCS_IQ_OTHER);
+ }
+
+
+ for (i = 0; i < pwp->nphy; i++) {
+ PMCS_MPI_EVQSET(pwp, PMCS_OQ_EVENTS, i);
+ PMCS_MPI_NCQSET(pwp, PMCS_OQ_EVENTS, i);
+ }
+
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_INFO2,
+ (PMCS_OQ_EVENTS << GENERAL_EVENT_OQ_SHIFT) |
+ (PMCS_OQ_EVENTS << DEVICE_HANDLE_REMOVED_SHIFT));
+
+ /*
+ * Verify that ioq_depth is valid (> 0 and not so high that it
+ * would cause us to overrun the chip with commands).
+ */
+ if (pwp->ioq_depth == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: I/O queue depth set to 0. Setting to %d",
+ __func__, PMCS_NQENTRY);
+ pwp->ioq_depth = PMCS_NQENTRY;
+ }
+
+ if (pwp->ioq_depth < PMCS_MIN_NQENTRY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: I/O queue depth set too low (%d). Setting to %d",
+ __func__, pwp->ioq_depth, PMCS_MIN_NQENTRY);
+ pwp->ioq_depth = PMCS_MIN_NQENTRY;
+ }
+
+ if (pwp->ioq_depth > (pwp->max_cmd / (PMCS_IO_IQ_MASK + 1))) {
+ new_ioq_depth = pwp->max_cmd / (PMCS_IO_IQ_MASK + 1);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: I/O queue depth set too high (%d). Setting to %d",
+ __func__, pwp->ioq_depth, new_ioq_depth);
+ pwp->ioq_depth = new_ioq_depth;
+ }
+
+ /*
+ * Allocate consistent memory for OQs and IQs.
+ */
+ pwp->iqp_dma_attr = pwp->oqp_dma_attr = pmcs_dattr;
+ pwp->iqp_dma_attr.dma_attr_align =
+ pwp->oqp_dma_attr.dma_attr_align = PMCS_QENTRY_SIZE;
+
+ /*
+ * The Rev C chip has the ability to do PIO to or from consistent
+ * memory anywhere in a 64 bit address space, but the firmware is
+ * not presently set up to do so.
+ */
+ pwp->iqp_dma_attr.dma_attr_addr_hi =
+ pwp->oqp_dma_attr.dma_attr_addr_hi = 0x000000FFFFFFFFFFull;
+
+ for (i = 0; i < PMCS_NIQ; i++) {
+ if (pmcs_dma_setup(pwp, &pwp->iqp_dma_attr,
+ &pwp->iqp_acchdls[i],
+ &pwp->iqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
+ (caddr_t *)&pwp->iqp[i], &pwp->iqaddr[i]) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to setup DMA for iqp[%d]", i);
+ return (-1);
+ }
+ bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
+ }
+
+ for (i = 0; i < PMCS_NOQ; i++) {
+ if (pmcs_dma_setup(pwp, &pwp->oqp_dma_attr,
+ &pwp->oqp_acchdls[i],
+ &pwp->oqp_handles[i], PMCS_QENTRY_SIZE * pwp->ioq_depth,
+ (caddr_t *)&pwp->oqp[i], &pwp->oqaddr[i]) == B_FALSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "Failed to setup DMA for oqp[%d]", i);
+ return (-1);
+ }
+ bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
+ }
+
+ /*
+ * Install the IQ and OQ addresses (and null out the rest).
+ */
+ for (i = 0; i < pwp->max_iq; i++) {
+ pwp->iqpi_offset[i] = pmcs_rd_iqc_tbl(pwp, PMCS_IQPIOFFX(i));
+ if (i < PMCS_NIQ) {
+ if (i != PMCS_IQ_OTHER) {
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
+ pwp->ioq_depth | (PMCS_QENTRY_SIZE << 16));
+ } else {
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i),
+ (1 << 30) | pwp->ioq_depth |
+ (PMCS_QENTRY_SIZE << 16));
+ }
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i),
+ DWORD1(pwp->iqaddr[i]));
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i),
+ DWORD0(pwp->iqaddr[i]));
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i),
+ DWORD1(pwp->ciaddr+IQ_OFFSET(i)));
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i),
+ DWORD0(pwp->ciaddr+IQ_OFFSET(i)));
+ } else {
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
+ }
+ }
+
+ for (i = 0; i < pwp->max_oq; i++) {
+ pwp->oqci_offset[i] = pmcs_rd_oqc_tbl(pwp, PMCS_OQCIOFFX(i));
+ if (i < PMCS_NOQ) {
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), pwp->ioq_depth |
+ (PMCS_QENTRY_SIZE << 16) | OQIEX);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i),
+ DWORD1(pwp->oqaddr[i]));
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i),
+ DWORD0(pwp->oqaddr[i]));
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i),
+ DWORD1(pwp->ciaddr+OQ_OFFSET(i)));
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i),
+ DWORD0(pwp->ciaddr+OQ_OFFSET(i)));
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i),
+ pwp->oqvec[i] << 24);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
+ } else {
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
+ }
+ }
+
+ /*
+ * Set up logging, if defined.
+ */
+ if (pwp->fwlog) {
+ uint64_t logdma = pwp->fwaddr;
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAH, DWORD1(logdma));
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBAL, DWORD0(logdma));
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELBS, PMCS_FWLOG_SIZE >> 1);
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_MELSEV, pwp->fwlog);
+ logdma += (PMCS_FWLOG_SIZE >> 1);
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAH, DWORD1(logdma));
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBAL, DWORD0(logdma));
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELBS, PMCS_FWLOG_SIZE >> 1);
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_IELSEV, pwp->fwlog);
+ }
+
+ /*
+ * Interrupt vectors, outbound queues, and odb_auto_clear
+ *
+ * MSI/MSI-X:
+ * If we got 4 interrupt vectors, we'll assign one to each outbound
+ * queue as well as the fatal interrupt, and auto clear can be set
+ * for each.
+ *
+ * If we only got 2 vectors, one will be used for I/O completions
+ * and the other for the other two vectors. In this case, auto_
+ * clear can only be set for I/Os, which is fine. The fatal
+ * interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which
+ * is not an interrupt vector.
+ *
+ * MSI/MSI-X/INT-X:
+ * If we only got 1 interrupt vector, auto_clear must be set to 0,
+ * and again the fatal interrupt will be mapped to the
+ * PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector).
+ */
+
+ switch (pwp->int_type) {
+ case PMCS_INT_MSIX:
+ case PMCS_INT_MSI:
+ switch (pwp->intr_cnt) {
+ case 1:
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
+ (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
+ pwp->odb_auto_clear = 0;
+ break;
+ case 2:
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
+ (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
+ pwp->odb_auto_clear = (1 << PMCS_FATAL_INTERRUPT) |
+ (1 << PMCS_MSIX_IODONE);
+ break;
+ case 4:
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, PMCS_FERRIE |
+ (PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT));
+ pwp->odb_auto_clear = (1 << PMCS_MSIX_FATAL) |
+ (1 << PMCS_MSIX_GENERAL) | (1 << PMCS_MSIX_IODONE) |
+ (1 << PMCS_MSIX_EVENTS);
+ break;
+ }
+ break;
+
+ case PMCS_INT_FIXED:
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR,
+ PMCS_FERRIE | (PMCS_FATAL_INTERRUPT << PMCS_FERIV_SHIFT));
+ pwp->odb_auto_clear = 0;
+ break;
+ }
+
+ /*
+ * Enable Interrupt Reassertion
+ * Default Delay 1000us
+ */
+ ferr = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_FERR);
+ if ((ferr & PMCS_MPI_IRAE) == 0) {
+ ferr &= ~(PMCS_MPI_IRAU | PMCS_MPI_IRAD_MASK);
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, ferr | PMCS_MPI_IRAE);
+ }
+
+ pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR, pwp->odb_auto_clear);
+ pwp->mpi_table_setup = 1;
+ return (0);
+}
+
+/*
+ * Start the Message Passing protocol with the PMC chip.
+ */
+int
+pmcs_start_mpi(pmcs_hw_t *pwp)
+{
+ int i;
+
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPIINI);
+ for (i = 0; i < 1000; i++) {
+ if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
+ PMCS_MSGU_IBDB_MPIINI) == 0) {
+ break;
+ }
+ drv_usecwait(1000);
+ }
+ if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPIINI) {
+ return (-1);
+ }
+ drv_usecwait(500000);
+
+ /*
+ * Check to make sure we got to INIT state.
+ */
+ if (PMCS_MPI_S(pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE)) !=
+ PMCS_MPI_STATE_INIT) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: MPI launch failed (GST 0x%x "
+ "DBCLR 0x%x)", __func__,
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE),
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB_CLEAR));
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Stop the Message Passing protocol with the PMC chip.
+ */
+int
+pmcs_stop_mpi(pmcs_hw_t *pwp)
+{
+ int i;
+
+ for (i = 0; i < pwp->max_iq; i++) {
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQC_PARMX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBAHX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQBALX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBAHX(i), 0);
+ pmcs_wr_iqc_tbl(pwp, PMCS_IQCIBALX(i), 0);
+ }
+ for (i = 0; i < pwp->max_oq; i++) {
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQC_PARMX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBAHX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQBALX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBAHX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQPIBALX(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQIPARM(i), 0);
+ pmcs_wr_oqc_tbl(pwp, PMCS_OQDICX(i), 0);
+ }
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_FERR, 0);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_IBDB, PMCS_MSGU_IBDB_MPICTU);
+ for (i = 0; i < 2000; i++) {
+ if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) &
+ PMCS_MSGU_IBDB_MPICTU) == 0) {
+ break;
+ }
+ drv_usecwait(1000);
+ }
+ if (pmcs_rd_msgunit(pwp, PMCS_MSGU_IBDB) & PMCS_MSGU_IBDB_MPICTU) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: MPI stop failed", __func__);
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Do a sequence of ECHO messages to test for MPI functionality,
+ * all inbound and outbound queue functionality and interrupts.
+ */
+int
+pmcs_echo_test(pmcs_hw_t *pwp)
+{
+ echo_test_t fred;
+ struct pmcwork *pwrk;
+ uint32_t *msg, count;
+ int iqe = 0, iqo = 0, result, rval = 0;
+ int iterations;
+ hrtime_t echo_start, echo_end, echo_total;
+
+ ASSERT(pwp->max_cmd > 0);
+
+ /*
+ * We want iterations to be max_cmd * 3 to ensure that we run the
+ * echo test enough times to iterate through every inbound queue
+ * at least twice.
+ */
+ iterations = pwp->max_cmd * 3;
+
+ echo_total = 0;
+ count = 0;
+
+ while (count < iterations) {
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ rval = -1;
+ break;
+ }
+
+ mutex_enter(&pwp->iqp_lock[iqe]);
+ msg = GET_IQ_ENTRY(pwp, iqe);
+ if (msg == NULL) {
+ mutex_exit(&pwp->iqp_lock[iqe]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ rval = -1;
+ break;
+ }
+
+ bzero(msg, PMCS_QENTRY_SIZE);
+
+ if (iqe == PMCS_IQ_OTHER) {
+ /* This is on the high priority queue */
+ msg[0] = LE_32(PMCS_HIPRI(pwp, iqo, PMCIN_ECHO));
+ } else {
+ msg[0] = LE_32(PMCS_IOMB_IN_SAS(iqo, PMCIN_ECHO));
+ }
+ msg[1] = LE_32(pwrk->htag);
+ fred.signature = 0xdeadbeef;
+ fred.count = count;
+ fred.ptr = &count;
+ (void) memcpy(&msg[2], &fred, sizeof (fred));
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+
+ INC_IQ_ENTRY(pwp, iqe);
+
+ echo_start = gethrtime();
+ DTRACE_PROBE2(pmcs__echo__test__wait__start,
+ hrtime_t, echo_start, uint32_t, pwrk->htag);
+
+ if (++iqe == PMCS_NIQ) {
+ iqe = 0;
+ }
+ if (++iqo == PMCS_NOQ) {
+ iqo = 0;
+ }
+
+ WAIT_FOR(pwrk, 250, result);
+
+ echo_end = gethrtime();
+ DTRACE_PROBE2(pmcs__echo__test__wait__end,
+ hrtime_t, echo_end, int, result);
+
+ echo_total += (echo_end - echo_start);
+
+ pmcs_pwork(pwp, pwrk);
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: command timed out on echo test #%d",
+ __func__, count);
+ rval = -1;
+ break;
+ }
+ }
+
+ /*
+ * The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to
+ * remove the overhead of things like the delay in getting signaled
+ * for completion.
+ */
+ if (echo_total != 0) {
+ pwp->io_intr_coal.intr_latency =
+ (echo_total / iterations) / 2;
+ pwp->io_intr_coal.intr_threshold =
+ PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
+ pwp->io_intr_coal.intr_latency);
+ }
+
+ return (rval);
+}
+
+/*
+ * Start the (real) phys
+ */
+int
+pmcs_start_phy(pmcs_hw_t *pwp, int phynum, int linkmode, int speed)
+{
+ int result;
+ uint32_t *msg;
+ struct pmcwork *pwrk;
+ pmcs_phy_t *pptr;
+ sas_identify_af_t sap;
+
+ mutex_enter(&pwp->lock);
+ pptr = pwp->root_phys + phynum;
+ if (pptr == NULL) {
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: cannot find port %d",
+ __func__, phynum);
+ return (0);
+ }
+
+ pmcs_lock_phy(pptr);
+ mutex_exit(&pwp->lock);
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (-1);
+ }
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ if (msg == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_unlock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (-1);
+ }
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_START));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(linkmode | speed | phynum);
+ bzero(&sap, sizeof (sap));
+ sap.device_type = SAS_IF_DTYPE_ENDPOINT;
+ sap.ssp_ini_port = 1;
+
+ if (pwp->separate_ports) {
+ pmcs_wwn2barray(pwp->sas_wwns[phynum], sap.sas_address);
+ } else {
+ pmcs_wwn2barray(pwp->sas_wwns[0], sap.sas_address);
+ }
+
+ ASSERT(phynum < SAS2_PHYNUM_MAX);
+ sap.phy_identifier = phynum & SAS2_PHYNUM_MASK;
+ (void) memcpy(&msg[3], &sap, sizeof (sas_identify_af_t));
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pptr->state.prog_min_rate = (lowbit((ulong_t)speed) - 1);
+ pptr->state.prog_max_rate = (highbit((ulong_t)speed) - 1);
+ pptr->state.hw_min_rate = PMCS_HW_MIN_LINK_RATE;
+ pptr->state.hw_max_rate = PMCS_HW_MAX_LINK_RATE;
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+ } else {
+ mutex_enter(&pwp->lock);
+ pwp->phys_started |= (1 << phynum);
+ mutex_exit(&pwp->lock);
+ }
+
+ return (0);
+}
+
+int
+pmcs_start_phys(pmcs_hw_t *pwp)
+{
+ int i;
+
+ for (i = 0; i < pwp->nphy; i++) {
+ if ((pwp->phyid_block_mask & (1 << i)) == 0) {
+ if (pmcs_start_phy(pwp, i,
+ (pwp->phymode << PHY_MODE_SHIFT),
+ pwp->physpeed << PHY_LINK_SHIFT)) {
+ return (-1);
+ }
+ if (pmcs_clear_diag_counters(pwp, i)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: failed to "
+ "reset counters on PHY (%d)", __func__, i);
+ }
+ }
+ }
+ return (0);
+}
+
+/*
+ * Called with PHY locked
+ */
+int
+pmcs_reset_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t type)
+{
+ uint32_t *msg;
+ uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
+ const char *mbar;
+ uint32_t amt;
+ uint32_t pdevid;
+ uint32_t stsoff;
+ uint32_t status;
+ int result, level, phynum;
+ struct pmcwork *pwrk;
+ uint32_t htag;
+
+ ASSERT(mutex_owned(&pptr->phy_lock));
+
+ bzero(iomb, PMCS_QENTRY_SIZE);
+ phynum = pptr->phynum;
+ level = pptr->level;
+ if (level > 0) {
+ pdevid = pptr->parent->device_id;
+ }
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (ENOMEM);
+ }
+
+ pwrk->arg = iomb;
+
+ /*
+ * If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL
+ * function to do either a link reset or hard reset. If level == 0,
+ * then we do a LOCAL_PHY_CONTROL IOMB to do link/hard reset to the
+ * root (local) PHY
+ */
+ if (level) {
+ stsoff = 2;
+ iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_SMP_REQUEST));
+ iomb[1] = LE_32(pwrk->htag);
+ iomb[2] = LE_32(pdevid);
+ iomb[3] = LE_32(40 << SMP_REQUEST_LENGTH_SHIFT);
+ /*
+ * Send SMP PHY CONTROL/HARD or LINK RESET
+ */
+ iomb[4] = BE_32(0x40910000);
+ iomb[5] = 0;
+
+ if (type == PMCS_PHYOP_HARD_RESET) {
+ mbar = "SMP PHY CONTROL/HARD RESET";
+ iomb[6] = BE_32((phynum << 24) |
+ (PMCS_PHYOP_HARD_RESET << 16));
+ } else {
+ mbar = "SMP PHY CONTROL/LINK RESET";
+ iomb[6] = BE_32((phynum << 24) |
+ (PMCS_PHYOP_LINK_RESET << 16));
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: sending %s to %s for phy 0x%x",
+ __func__, mbar, pptr->parent->path, pptr->phynum);
+ amt = 7;
+ } else {
+ /*
+ * Unlike most other Outbound messages, status for
+ * a local phy operation is in DWORD 3.
+ */
+ stsoff = 3;
+ iomb[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_LOCAL_PHY_CONTROL));
+ iomb[1] = LE_32(pwrk->htag);
+ if (type == PMCS_PHYOP_LINK_RESET) {
+ mbar = "LOCAL PHY LINK RESET";
+ iomb[2] = LE_32((PMCS_PHYOP_LINK_RESET << 8) | phynum);
+ } else {
+ mbar = "LOCAL PHY HARD RESET";
+ iomb[2] = LE_32((PMCS_PHYOP_HARD_RESET << 8) | phynum);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: sending %s to %s", __func__, mbar, pptr->path);
+ amt = 3;
+ }
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (msg == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (ENOMEM);
+ }
+ COPY_MESSAGE(msg, iomb, amt);
+ htag = pwrk->htag;
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_lock_phy(pptr);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+
+ if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Unable to issue SMP abort for htag 0x%08x",
+ __func__, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ }
+ return (EIO);
+ }
+ status = LE_32(iomb[stsoff]);
+
+ if (status != PMCOUT_STATUS_OK) {
+ char buf[32];
+ const char *es = pmcs_status_str(status);
+ if (es == NULL) {
+ (void) snprintf(buf, sizeof (buf), "Status 0x%x",
+ status);
+ es = buf;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: %s action returned %s for %s", __func__, mbar, es,
+ pptr->path);
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/*
+ * Stop the (real) phys. No PHY or softstate locks are required as this only
+ * happens during detach.
+ */
+void
+pmcs_stop_phy(pmcs_hw_t *pwp, int phynum)
+{
+ int result;
+ pmcs_phy_t *pptr;
+ uint32_t *msg;
+ struct pmcwork *pwrk;
+
+ pptr = pwp->root_phys + phynum;
+ if (pptr == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: unable to find port %d", __func__, phynum);
+ return;
+ }
+
+ if (pwp->phys_started & (1 << phynum)) {
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return;
+ }
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ if (msg == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return;
+ }
+
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_PHY_STOP));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(phynum);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ /*
+ * Make this unconfigured now.
+ */
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ WAIT_FOR(pwrk, 1000, result);
+
+ pmcs_pwork(pwp, pwrk);
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+ }
+
+ pwp->phys_started &= ~(1 << phynum);
+ }
+
+ pptr->configured = 0;
+}
+
+/*
+ * No locks should be required as this is only called during detach
+ */
+void
+pmcs_stop_phys(pmcs_hw_t *pwp)
+{
+ int i;
+ for (i = 0; i < pwp->nphy; i++) {
+ if ((pwp->phyid_block_mask & (1 << i)) == 0) {
+ pmcs_stop_phy(pwp, i);
+ }
+ }
+}
+
+/*
+ * Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed.
+ * ERR_CNT_RESET: return status of cmd
+ * DIAG_REPORT_GET: return value of the counter
+ */
+int
+pmcs_sas_diag_execute(pmcs_hw_t *pwp, uint32_t cmd, uint32_t cmd_desc,
+ uint8_t phynum)
+{
+ uint32_t htag, *ptr, status, msg[PMCS_MSG_SIZE << 1];
+ int result;
+ struct pmcwork *pwrk;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (DDI_FAILURE);
+ }
+ pwrk->arg = msg;
+ htag = pwrk->htag;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_SAS_DIAG_EXECUTE));
+ msg[1] = LE_32(htag);
+ msg[2] = LE_32((cmd << PMCS_DIAG_CMD_SHIFT) |
+ (cmd_desc << PMCS_DIAG_CMD_DESC_SHIFT) | phynum);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (DDI_FAILURE);
+ }
+ COPY_MESSAGE(ptr, msg, 3);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ WAIT_FOR(pwrk, 1000, result);
+
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ return (DDI_FAILURE);
+ }
+
+ status = LE_32(msg[3]);
+
+ /* Return for counter reset */
+ if (cmd == PMCS_ERR_CNT_RESET)
+ return (status);
+
+ /* Return for counter value */
+ if (status) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: failed, status (0x%x)",
+ __func__, status);
+ return (DDI_FAILURE);
+ }
+ return (LE_32(msg[4]));
+}
+
+/* Get the current value of the counter for desc on phynum and return it. */
+int
+pmcs_get_diag_report(pmcs_hw_t *pwp, uint32_t desc, uint8_t phynum)
+{
+ return (pmcs_sas_diag_execute(pwp, PMCS_DIAG_REPORT_GET, desc, phynum));
+}
+
+/* Clear all of the counters for phynum. Returns the status of the command. */
+int
+pmcs_clear_diag_counters(pmcs_hw_t *pwp, uint8_t phynum)
+{
+ uint32_t cmd = PMCS_ERR_CNT_RESET;
+ uint32_t cmd_desc;
+
+ cmd_desc = PMCS_INVALID_DWORD_CNT;
+ if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
+ return (DDI_FAILURE);
+
+ cmd_desc = PMCS_DISPARITY_ERR_CNT;
+ if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
+ return (DDI_FAILURE);
+
+ cmd_desc = PMCS_LOST_DWORD_SYNC_CNT;
+ if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
+ return (DDI_FAILURE);
+
+ cmd_desc = PMCS_RESET_FAILED_CNT;
+ if (pmcs_sas_diag_execute(pwp, cmd, cmd_desc, phynum))
+ return (DDI_FAILURE);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Get firmware timestamp
+ */
+int
+pmcs_get_time_stamp(pmcs_hw_t *pwp, uint64_t *ts)
+{
+ uint32_t htag, *ptr, msg[PMCS_MSG_SIZE << 1];
+ int result;
+ struct pmcwork *pwrk;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, NULL);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (-1);
+ }
+ pwrk->arg = msg;
+ htag = pwrk->htag;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_EVENTS, PMCIN_GET_TIME_STAMP));
+ msg[1] = LE_32(pwrk->htag);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (-1);
+ }
+ COPY_MESSAGE(ptr, msg, 2);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ WAIT_FOR(pwrk, 1000, result);
+
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ return (-1);
+ }
+ *ts = LE_32(msg[2]) | (((uint64_t)LE_32(msg[3])) << 32);
+ return (0);
+}
+
+/*
+ * Dump all pertinent registers
+ */
+
+void
+pmcs_register_dump(pmcs_hw_t *pwp)
+{
+ int i;
+ uint32_t val;
+
+ pmcs_prt(pwp, PMCS_PRT_INFO, "pmcs%d: Register dump start",
+ ddi_get_instance(pwp->dip));
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB),
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_MASK),
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "SCRATCH0: 0x%08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH0));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "SCRATCH1: 0x%08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "SCRATCH2: 0x%08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "SCRATCH3: 0x%08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH3));
+ for (i = 0; i < PMCS_NIQ; i++) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "IQ %d: CI %u PI %u",
+ i, pmcs_rd_iqci(pwp, i), pmcs_rd_iqpi(pwp, i));
+ }
+ for (i = 0; i < PMCS_NOQ; i++) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "OQ %d: CI %u PI %u",
+ i, pmcs_rd_oqci(pwp, i), pmcs_rd_oqpi(pwp, i));
+ }
+ val = pmcs_rd_gst_tbl(pwp, PMCS_GST_BASE);
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)",
+ val, PMCS_MPI_S(val), PMCS_QF(val), PMCS_GSTLEN(val) * 4,
+ PMCS_HMI_ERR(val));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "GST TABLE IQFRZ0: 0x%08x",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ0));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "GST TABLE IQFRZ1: 0x%08x",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_IQFRZ1));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "GST TABLE MSGU TICK: 0x%08x",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK));
+ pmcs_prt(pwp, PMCS_PRT_INFO, "GST TABLE IOP TICK: 0x%08x",
+ pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK));
+ for (i = 0; i < pwp->nphy; i++) {
+ uint32_t rerrf, pinfo, started = 0, link = 0;
+ pinfo = pmcs_rd_gst_tbl(pwp, PMCS_GST_PHY_INFO(i));
+ if (pinfo & 1) {
+ started = 1;
+ link = pinfo & 2;
+ }
+ rerrf = pmcs_rd_gst_tbl(pwp, PMCS_GST_RERR_INFO(i));
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x",
+ i, started, link, rerrf);
+ }
+ pmcs_prt(pwp, PMCS_PRT_INFO, "pmcs%d: Register dump end",
+ ddi_get_instance(pwp->dip));
+}
+
+/*
+ * Handle SATA Abort and other error processing
+ */
+int
+pmcs_abort_handler(pmcs_hw_t *pwp)
+{
+ pmcs_phy_t *pptr, *pnext, *pnext_uplevel[PMCS_MAX_XPND];
+ int r, level = 0;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s", __func__);
+
+ mutex_enter(&pwp->lock);
+ pptr = pwp->root_phys;
+ mutex_exit(&pwp->lock);
+
+ while (pptr) {
+ /*
+ * XXX: Need to make sure this doesn't happen
+ * XXX: when non-NCQ commands are running.
+ */
+ pmcs_lock_phy(pptr);
+ if (pptr->need_rl_ext) {
+ ASSERT(pptr->dtype == SATA);
+ if (pmcs_acquire_scratch(pwp, B_FALSE)) {
+ goto next_phy;
+ }
+ r = pmcs_sata_abort_ncq(pwp, pptr);
+ pmcs_release_scratch(pwp);
+ if (r == ENOMEM) {
+ goto next_phy;
+ }
+ if (r) {
+ r = pmcs_reset_phy(pwp, pptr,
+ PMCS_PHYOP_LINK_RESET);
+ if (r == ENOMEM) {
+ goto next_phy;
+ }
+ /* what if other failures happened? */
+ pptr->abort_pending = 1;
+ pptr->abort_sent = 0;
+ }
+ }
+ if (pptr->abort_pending == 0 || pptr->abort_sent) {
+ goto next_phy;
+ }
+ pptr->abort_pending = 0;
+ if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) == ENOMEM) {
+ pptr->abort_pending = 1;
+ goto next_phy;
+ }
+ pptr->abort_sent = 1;
+
+next_phy:
+ if (pptr->children) {
+ pnext = pptr->children;
+ pnext_uplevel[level++] = pptr->sibling;
+ } else {
+ pnext = pptr->sibling;
+ while ((pnext == NULL) && (level > 0)) {
+ pnext = pnext_uplevel[--level];
+ }
+ }
+
+ pmcs_unlock_phy(pptr);
+ pptr = pnext;
+ }
+
+ return (0);
+}
+
+/*
+ * Register a device (get a device handle for it).
+ * Called with PHY lock held.
+ */
+int
+pmcs_register_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ struct pmcwork *pwrk;
+ int result = 0;
+ uint32_t *msg;
+ uint32_t tmp, status;
+ uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ if (msg == NULL ||
+ (pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ result = ENOMEM;
+ goto out;
+ }
+
+ pwrk->arg = iomb;
+ pwrk->dtype = pptr->dtype;
+
+ msg[1] = LE_32(pwrk->htag);
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_REGISTER_DEVICE));
+ tmp = PMCS_DEVREG_TLR |
+ (pptr->link_rate << PMCS_DEVREG_LINK_RATE_SHIFT);
+ if (IS_ROOT_PHY(pptr)) {
+ msg[2] = LE_32(pptr->portid |
+ (pptr->phynum << PMCS_PHYID_SHIFT));
+ } else {
+ msg[2] = LE_32(pptr->portid);
+ }
+ if (pptr->dtype == SATA) {
+ if (IS_ROOT_PHY(pptr)) {
+ tmp |= PMCS_DEVREG_TYPE_SATA_DIRECT;
+ } else {
+ tmp |= PMCS_DEVREG_TYPE_SATA;
+ }
+ } else {
+ tmp |= PMCS_DEVREG_TYPE_SAS;
+ }
+ msg[3] = LE_32(tmp);
+ msg[4] = LE_32(PMCS_DEVREG_IT_NEXUS_TIMEOUT);
+ (void) memcpy(&msg[5], pptr->sas_address, 8);
+
+ CLEAN_MESSAGE(msg, 7);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 250, result);
+ pmcs_lock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+ result = ETIMEDOUT;
+ goto out;
+ }
+ status = LE_32(iomb[2]);
+ tmp = LE_32(iomb[3]);
+ switch (status) {
+ case PMCS_DEVREG_OK:
+ case PMCS_DEVREG_DEVICE_ALREADY_REGISTERED:
+ case PMCS_DEVREG_PHY_ALREADY_REGISTERED:
+ if (pmcs_validate_devid(pwp->root_phys, pptr, tmp) == B_FALSE) {
+ result = EEXIST;
+ goto out;
+ } else if (status != PMCS_DEVREG_OK) {
+ if (tmp == 0xffffffff) { /* F/W bug */
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "%s: phy %s already has bogus devid 0x%x",
+ __func__, pptr->path, tmp);
+ result = EIO;
+ goto out;
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "%s: phy %s already has a device id 0x%x",
+ __func__, pptr->path, tmp);
+ }
+ }
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: status 0x%x when trying to "
+ "register device %s", __func__, status, pptr->path);
+ result = EIO;
+ goto out;
+ }
+ pptr->device_id = tmp;
+ pptr->valid_device_id = 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Phy %s/" SAS_ADDR_FMT
+ " registered with device_id 0x%x (portid %d)", pptr->path,
+ SAS_ADDR_PRT(pptr->sas_address), tmp, pptr->portid);
+out:
+ return (result);
+}
+
+/*
+ * Deregister a device (remove a device handle).
+ * Called with PHY locked.
+ */
+void
+pmcs_deregister_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ struct pmcwork *pwrk;
+ uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
+ uint32_t iomb[(PMCS_QENTRY_SIZE << 1) >> 2];
+ int result;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ return;
+ }
+
+ pwrk->arg = iomb;
+ pwrk->dtype = pptr->dtype;
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ return;
+ }
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_DEREGISTER_DEVICE_HANDLE));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ COPY_MESSAGE(ptr, msg, 3);
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 250, result);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_lock_phy(pptr);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_timeo, __func__);
+ return;
+ }
+ status = LE_32(iomb[2]);
+ if (status != PMCOUT_STATUS_OK) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: status 0x%x when trying to "
+ "deregister device %s", __func__, status, pptr->path);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: device %s deregistered",
+ __func__, pptr->path);
+ pptr->valid_device_id = 0;
+ pptr->device_id = PMCS_INVALID_DEVICE_ID;
+ }
+}
+
+/*
+ * Deregister all registered devices.
+ */
+void
+pmcs_deregister_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ /*
+ * Start at the maximum level and walk back to level 0. This only
+ * gets done during detach after all threads and timers have been
+ * destroyed, so there's no need to hold the softstate or PHY lock.
+ */
+ while (phyp) {
+ if (phyp->children) {
+ pmcs_deregister_devices(pwp, phyp->children);
+ }
+ if (phyp->valid_device_id) {
+ pmcs_deregister_device(pwp, phyp);
+ }
+ phyp = phyp->sibling;
+ }
+}
+
+/*
+ * Perform a 'soft' reset on the PMC chip
+ */
+int
+pmcs_soft_reset(pmcs_hw_t *pwp, boolean_t no_restart)
+{
+ uint32_t s2, sfrbits, gsm, rapchk, wapchk, wdpchk, spc, tsmode;
+ pmcs_phy_t *pptr;
+ char *msg = NULL;
+ int i;
+
+ /*
+ * Disable interrupts
+ */
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
+
+ pmcs_prt(pwp, PMCS_PRT_INFO, "%s", __func__);
+
+ if (pwp->locks_initted) {
+ mutex_enter(&pwp->lock);
+ }
+ pwp->blocked = 1;
+
+ /*
+ * Step 1
+ */
+ s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2);
+ if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) {
+ pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
+ pmcs_wr_gsm_reg(pwp, RB6_ACCESS, RB6_NMI_SIGNATURE);
+ for (i = 0; i < 100; i++) {
+ s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
+ PMCS_MSGU_HOST_SOFT_RESET_READY;
+ if (s2) {
+ break;
+ }
+ drv_usecwait(10000);
+ }
+ s2 = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
+ PMCS_MSGU_HOST_SOFT_RESET_READY;
+ if (s2 == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: PMCS_MSGU_HOST_"
+ "SOFT_RESET_READY never came ready", __func__);
+ pmcs_register_dump(pwp);
+ if ((pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
+ PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 ||
+ (pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH2) &
+ PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) {
+ pwp->state = STATE_DEAD;
+ pwp->blocked = 0;
+ if (pwp->locks_initted) {
+ mutex_exit(&pwp->lock);
+ }
+ return (-1);
+ }
+ }
+ }
+
+ /*
+ * Step 2
+ */
+ pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_IOP, 0);
+ drv_usecwait(10);
+ pmcs_wr_gsm_reg(pwp, NMI_EN_VPE0_AAP1, 0);
+ drv_usecwait(10);
+ pmcs_wr_topunit(pwp, PMCS_EVENT_INT_ENABLE, 0);
+ drv_usecwait(10);
+ pmcs_wr_topunit(pwp, PMCS_EVENT_INT_STAT,
+ pmcs_rd_topunit(pwp, PMCS_EVENT_INT_STAT));
+ drv_usecwait(10);
+ pmcs_wr_topunit(pwp, PMCS_ERROR_INT_ENABLE, 0);
+ drv_usecwait(10);
+ pmcs_wr_topunit(pwp, PMCS_ERROR_INT_STAT,
+ pmcs_rd_topunit(pwp, PMCS_ERROR_INT_STAT));
+ drv_usecwait(10);
+
+ sfrbits = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
+ PMCS_MSGU_AAP_SFR_PROGRESS;
+ sfrbits ^= PMCS_MSGU_AAP_SFR_PROGRESS;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "PMCS_MSGU_HOST_SCRATCH0 %08x -> %08x",
+ pmcs_rd_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0), HST_SFT_RESET_SIG);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_HOST_SCRATCH0, HST_SFT_RESET_SIG);
+
+ /*
+ * Step 3
+ */
+ gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "GSM %08x -> %08x", gsm,
+ gsm & ~PMCS_SOFT_RESET_BITS);
+ pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm & ~PMCS_SOFT_RESET_BITS);
+
+ /*
+ * Step 4
+ */
+ rapchk = pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "READ_ADR_PARITY_CHK_EN %08x -> %08x",
+ rapchk, 0);
+ pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, 0);
+ wapchk = pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "WRITE_ADR_PARITY_CHK_EN %08x -> %08x",
+ wapchk, 0);
+ pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, 0);
+ wdpchk = pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "WRITE_DATA_PARITY_CHK_EN %08x -> %08x",
+ wdpchk, 0);
+ pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, 0);
+
+ /*
+ * Step 5
+ */
+ drv_usecwait(100);
+
+ /*
+ * Step 5.5 (Temporary workaround for 1.07.xx Beta)
+ */
+ tsmode = pmcs_rd_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "GPIO TSMODE %08x -> %08x", tsmode,
+ tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
+ pmcs_wr_gsm_reg(pwp, PMCS_GPIO_TRISTATE_MODE_ADDR,
+ tsmode & ~(PMCS_GPIO_TSMODE_BIT0|PMCS_GPIO_TSMODE_BIT1));
+ drv_usecwait(10);
+
+ /*
+ * Step 6
+ */
+ spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "SPC_RESET %08x -> %08x", spc,
+ spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
+ pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
+ spc & ~(PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
+ drv_usecwait(10);
+
+ /*
+ * Step 7
+ */
+ spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "SPC_RESET %08x -> %08x", spc,
+ spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
+ pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc & ~(BDMA_CORE_RSTB|OSSP_RSTB));
+
+ /*
+ * Step 8
+ */
+ drv_usecwait(100);
+
+ /*
+ * Step 9
+ */
+ spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "SPC_RESET %08x -> %08x", spc,
+ spc | (BDMA_CORE_RSTB|OSSP_RSTB));
+ pmcs_wr_topunit(pwp, PMCS_SPC_RESET, spc | (BDMA_CORE_RSTB|OSSP_RSTB));
+
+ /*
+ * Step 10
+ */
+ drv_usecwait(100);
+
+ /*
+ * Step 11
+ */
+ gsm = pmcs_rd_gsm_reg(pwp, GSM_CFG_AND_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "GSM %08x -> %08x", gsm,
+ gsm | PMCS_SOFT_RESET_BITS);
+ pmcs_wr_gsm_reg(pwp, GSM_CFG_AND_RESET, gsm | PMCS_SOFT_RESET_BITS);
+ drv_usecwait(10);
+
+ /*
+ * Step 12
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "READ_ADR_PARITY_CHK_EN %08x -> %08x",
+ pmcs_rd_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN), rapchk);
+ pmcs_wr_gsm_reg(pwp, READ_ADR_PARITY_CHK_EN, rapchk);
+ drv_usecwait(10);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "WRITE_ADR_PARITY_CHK_EN %08x -> %08x",
+ pmcs_rd_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN), wapchk);
+ pmcs_wr_gsm_reg(pwp, WRITE_ADR_PARITY_CHK_EN, wapchk);
+ drv_usecwait(10);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "WRITE_DATA_PARITY_CHK_EN %08x -> %08x",
+ pmcs_rd_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN), wapchk);
+ pmcs_wr_gsm_reg(pwp, WRITE_DATA_PARITY_CHK_EN, wdpchk);
+ drv_usecwait(10);
+
+ /*
+ * Step 13
+ */
+ spc = pmcs_rd_topunit(pwp, PMCS_SPC_RESET);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "SPC_RESET %08x -> %08x", spc,
+ spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
+ pmcs_wr_topunit(pwp, PMCS_SPC_RESET,
+ spc | (PCS_IOP_SS_RSTB|PCS_AAP1_SS_RSTB));
+
+ /*
+ * Step 14
+ */
+ drv_usecwait(100);
+
+ /*
+ * Step 15
+ */
+ for (spc = 0, i = 0; i < 1000; i++) {
+ drv_usecwait(1000);
+ spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
+ if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) == sfrbits) {
+ break;
+ }
+ }
+
+ if ((spc & PMCS_MSGU_AAP_SFR_PROGRESS) != sfrbits) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "SFR didn't toggle (sfr 0x%x)", spc);
+ pwp->state = STATE_DEAD;
+ pwp->blocked = 0;
+ if (pwp->locks_initted) {
+ mutex_exit(&pwp->lock);
+ }
+ return (-1);
+ }
+
+ /*
+ * Step 16
+ */
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
+
+ /*
+ * Wait for up to 5 seconds for AAP state to come either ready or error.
+ */
+ for (i = 0; i < 50; i++) {
+ spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1) &
+ PMCS_MSGU_AAP_STATE_MASK;
+ if (spc == PMCS_MSGU_AAP_STATE_ERROR ||
+ spc == PMCS_MSGU_AAP_STATE_READY) {
+ break;
+ }
+ drv_usecwait(100000);
+ }
+ spc = pmcs_rd_msgunit(pwp, PMCS_MSGU_SCRATCH1);
+ if ((spc & PMCS_MSGU_AAP_STATE_MASK) != PMCS_MSGU_AAP_STATE_READY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "soft reset failed (state 0x%x)", spc);
+ pwp->state = STATE_DEAD;
+ pwp->blocked = 0;
+ if (pwp->locks_initted) {
+ mutex_exit(&pwp->lock);
+ }
+ return (-1);
+ }
+
+
+ if (pwp->state == STATE_DEAD || pwp->state == STATE_UNPROBING ||
+ pwp->state == STATE_PROBING || pwp->locks_initted == 0) {
+ pwp->blocked = 0;
+ if (pwp->locks_initted) {
+ mutex_exit(&pwp->lock);
+ }
+ return (0);
+ }
+
+ /*
+ * Return at this point if we dont need to startup.
+ */
+ if (no_restart) {
+ return (0);
+ }
+
+ ASSERT(pwp->locks_initted != 0);
+
+ /*
+ * Clean up various soft state.
+ */
+ bzero(pwp->ports, sizeof (pwp->ports));
+
+ pmcs_free_all_phys(pwp, pwp->root_phys);
+
+ for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
+ pmcs_lock_phy(pptr);
+ pmcs_clear_phy(pwp, pptr);
+ pmcs_unlock_phy(pptr);
+ }
+
+ if (pwp->targets) {
+ for (i = 0; i < pwp->max_dev; i++) {
+ pmcs_xscsi_t *xp = pwp->targets[i];
+
+ if (xp == NULL) {
+ continue;
+ }
+ mutex_enter(&xp->statlock);
+ if (xp->assigned == 0 && xp->dying == 0) {
+ if (xp->new) {
+ xp->new = 0;
+ xp->ca = 0;
+ xp->qdepth = 0;
+ xp->phy = NULL;
+ }
+ mutex_exit(&xp->statlock);
+ continue;
+ }
+ xp->tagmap = 0;
+ xp->dying = 1;
+ xp->assigned = 0;
+ mutex_exit(&xp->statlock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_REM_DEVICES);
+ }
+ }
+
+ bzero(pwp->shadow_iqpi, sizeof (pwp->shadow_iqpi));
+ for (i = 0; i < PMCS_NIQ; i++) {
+ if (pwp->iqp[i]) {
+ bzero(pwp->iqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
+ pmcs_wr_iqpi(pwp, i, 0);
+ pmcs_wr_iqci(pwp, i, 0);
+ }
+ }
+ for (i = 0; i < PMCS_NOQ; i++) {
+ if (pwp->oqp[i]) {
+ bzero(pwp->oqp[i], PMCS_QENTRY_SIZE * pwp->ioq_depth);
+ pmcs_wr_oqpi(pwp, i, 0);
+ pmcs_wr_oqci(pwp, i, 0);
+ }
+
+ }
+ if (pwp->fwlogp) {
+ bzero(pwp->fwlogp, PMCS_FWLOG_SIZE);
+ }
+ STAILQ_INIT(&pwp->wf);
+ bzero(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd);
+ for (i = 0; i < pwp->max_cmd - 1; i++) {
+ pmcwork_t *pwrk = &pwp->work[i];
+ STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next);
+ }
+
+ /*
+ * Clear out any leftover commands sitting in the work list
+ */
+ for (i = 0; i < pwp->max_cmd; i++) {
+ pmcwork_t *pwrk = &pwp->work[i];
+ mutex_enter(&pwrk->lock);
+ if (pwrk->state == PMCS_WORK_STATE_ONCHIP) {
+ switch (PMCS_TAG_TYPE(pwrk->htag)) {
+ case PMCS_TAG_TYPE_WAIT:
+ mutex_exit(&pwrk->lock);
+ break;
+ case PMCS_TAG_TYPE_CBACK:
+ case PMCS_TAG_TYPE_NONE:
+ pmcs_pwork(pwp, pwrk);
+ break;
+ default:
+ break;
+ }
+ } else if (pwrk->state == PMCS_WORK_STATE_IOCOMPQ) {
+ pwrk->dead = 1;
+ mutex_exit(&pwrk->lock);
+ } else {
+ /*
+ * The other states of NIL, READY and INTR
+ * should not be visible outside of a lock being held.
+ */
+ pmcs_pwork(pwp, pwrk);
+ }
+ }
+
+ /*
+ * Restore Interrupt Mask
+ */
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask);
+ pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff);
+
+ pwp->blocked = 0;
+ pwp->mpi_table_setup = 0;
+ mutex_exit(&pwp->lock);
+
+ /*
+ * Set up MPI again.
+ */
+ if (pmcs_setup(pwp)) {
+ msg = "unable to setup MPI tables again";
+ goto fail_restart;
+ }
+ pmcs_report_fwversion(pwp);
+
+ /*
+ * Restart MPI
+ */
+ if (pmcs_start_mpi(pwp)) {
+ msg = "unable to restart MPI again";
+ goto fail_restart;
+ }
+
+ mutex_enter(&pwp->lock);
+ pwp->blocked = 0;
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ mutex_exit(&pwp->lock);
+
+ /*
+ * Run any completions
+ */
+ PMCS_CQ_RUN(pwp);
+
+ /*
+ * Delay
+ */
+ drv_usecwait(1000000);
+ return (0);
+
+fail_restart:
+ mutex_enter(&pwp->lock);
+ pwp->state = STATE_DEAD;
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_ERR, "%s: Failed: %s", __func__, msg);
+ return (-1);
+}
+
+/*
+ * Reset a device or a logical unit.
+ */
+int
+pmcs_reset_dev(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint64_t lun)
+{
+ int rval = 0;
+
+ if (pptr == NULL) {
+ return (ENXIO);
+ }
+
+ pmcs_lock_phy(pptr);
+ if (pptr->dtype == SAS) {
+ /*
+ * Some devices do not support SAS_I_T_NEXUS_RESET as
+ * it is not a mandatory (in SAM4) task management
+ * function, while LOGIC_UNIT_RESET is mandatory.
+ *
+ * The problem here is that we need to iterate over
+ * all known LUNs to emulate the semantics of
+ * "RESET_TARGET".
+ *
+ * XXX: FIX ME
+ */
+ if (lun == (uint64_t)-1) {
+ lun = 0;
+ }
+ rval = pmcs_ssp_tmf(pwp, pptr, SAS_LOGICAL_UNIT_RESET, 0, lun,
+ NULL);
+ } else if (pptr->dtype == SATA) {
+ if (lun != 0ull) {
+ pmcs_unlock_phy(pptr);
+ return (EINVAL);
+ }
+ rval = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_LINK_RESET);
+ } else {
+ pmcs_unlock_phy(pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: cannot reset a SMP device yet (%s)",
+ __func__, pptr->path);
+ return (EINVAL);
+ }
+
+ /*
+ * Now harvest any commands killed by this action
+ * by issuing an ABORT for all commands on this device.
+ *
+ * We do this even if the the tmf or reset fails (in case there
+ * are any dead commands around to be harvested *anyway*).
+ * We don't have to await for the abort to complete.
+ */
+ if (pmcs_abort(pwp, pptr, 0, 1, 0)) {
+ pptr->abort_pending = 1;
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ }
+
+ pmcs_unlock_phy(pptr);
+ return (rval);
+}
+
+/*
+ * Called with PHY locked.
+ */
+static int
+pmcs_get_device_handle(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ if (pptr->valid_device_id == 0) {
+ int result = pmcs_register_device(pwp, pptr);
+
+ /*
+ * If we changed while registering, punt
+ */
+ if (pptr->changed) {
+ RESTART_DISCOVERY(pwp);
+ return (-1);
+ }
+
+ /*
+ * If we had a failure to register, check against errors.
+ * An ENOMEM error means we just retry (temp resource shortage).
+ */
+ if (result == ENOMEM) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ return (-1);
+ }
+
+ /*
+ * An ETIMEDOUT error means we retry (if our counter isn't
+ * exhausted)
+ */
+ if (result == ETIMEDOUT) {
+ if (ddi_get_lbolt() < pptr->config_stop) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Retries exhausted for %s, killing",
+ __func__, pptr->path);
+ pptr->config_stop = 0;
+ pmcs_kill_changed(pwp, pptr, 0);
+ }
+ return (-1);
+ }
+ /*
+ * Other errors or no valid device id is fatal, but don't
+ * preclude a future action.
+ */
+ if (result || pptr->valid_device_id == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s could not "
+ "be registered", __func__, pptr->path);
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+int
+pmcs_iport_tgtmap_create(pmcs_iport_t *iport)
+{
+ ASSERT(iport);
+ if (iport == NULL)
+ return (B_FALSE);
+
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, "%s", __func__);
+
+ /* create target map */
+ if (scsi_hba_tgtmap_create(iport->dip, SCSI_TM_FULLSET, tgtmap_usec,
+ 2048, NULL, NULL, NULL, &iport->iss_tgtmap) != DDI_SUCCESS) {
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG,
+ "%s: failed to create tgtmap", __func__);
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+int
+pmcs_iport_tgtmap_destroy(pmcs_iport_t *iport)
+{
+ ASSERT(iport && iport->iss_tgtmap);
+ if ((iport == NULL) || (iport->iss_tgtmap == NULL))
+ return (B_FALSE);
+
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_MAP, "%s", __func__);
+
+ /* destroy target map */
+ scsi_hba_tgtmap_destroy(iport->iss_tgtmap);
+ return (B_TRUE);
+}
+
+/*
+ * Query the phymap and populate the iport handle passed in.
+ * Called with iport lock held.
+ */
+int
+pmcs_iport_configure_phys(pmcs_iport_t *iport)
+{
+ pmcs_hw_t *pwp;
+ pmcs_phy_t *pptr;
+ sas_phymap_phys_t *phys;
+ int phynum;
+ int inst;
+
+ ASSERT(iport);
+ ASSERT(mutex_owned(&iport->lock));
+ pwp = iport->pwp;
+ ASSERT(pwp);
+ inst = ddi_get_instance(iport->dip);
+
+ mutex_enter(&pwp->lock);
+ ASSERT(pwp->root_phys != NULL);
+
+ /*
+ * Query the phymap regarding the phys in this iport and populate
+ * the iport's phys list. Hereafter this list is maintained via
+ * port up and down events in pmcs_intr.c
+ */
+ ASSERT(list_is_empty(&iport->phys));
+ phys = sas_phymap_ua2phys(pwp->hss_phymap, iport->ua);
+ while ((phynum = sas_phymap_phys_next(phys)) != -1) {
+ /* Grab the phy pointer from root_phys */
+ pptr = pwp->root_phys + phynum;
+ ASSERT(pptr);
+ pmcs_lock_phy(pptr);
+ ASSERT(pptr->phynum == phynum);
+
+ /*
+ * Set a back pointer in the phy to this iport.
+ */
+ pptr->iport = iport;
+
+ /*
+ * If this phy is the primary, set a pointer to it on our
+ * iport handle, and set our portid from it.
+ */
+ if (!pptr->subsidiary) {
+ iport->pptr = pptr;
+ iport->portid = pptr->portid;
+ }
+
+ /*
+ * Finally, insert the phy into our list
+ */
+ pmcs_add_phy_to_iport(iport, pptr);
+ pmcs_unlock_phy(pptr);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: found phy %d [0x%p] "
+ "on iport%d, refcnt(%d)", __func__, phynum,
+ (void *)pptr, inst, iport->refcnt);
+ }
+ mutex_exit(&pwp->lock);
+ sas_phymap_phys_free(phys);
+ RESTART_DISCOVERY(pwp);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Return the iport that ua is associated with, or NULL. If an iport is
+ * returned, it will be held and the caller must release the hold.
+ */
+static pmcs_iport_t *
+pmcs_get_iport_by_ua(pmcs_hw_t *pwp, char *ua)
+{
+ pmcs_iport_t *iport = NULL;
+
+ rw_enter(&pwp->iports_lock, RW_READER);
+ for (iport = list_head(&pwp->iports);
+ iport != NULL;
+ iport = list_next(&pwp->iports, iport)) {
+ mutex_enter(&iport->lock);
+ if (strcmp(iport->ua, ua) == 0) {
+ mutex_exit(&iport->lock);
+ mutex_enter(&iport->refcnt_lock);
+ iport->refcnt++;
+ mutex_exit(&iport->refcnt_lock);
+ break;
+ }
+ mutex_exit(&iport->lock);
+ }
+ rw_exit(&pwp->iports_lock);
+
+ return (iport);
+}
+
+/*
+ * Return the iport that pptr is associated with, or NULL.
+ * If an iport is returned, there is a hold that the caller must release.
+ */
+pmcs_iport_t *
+pmcs_get_iport_by_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_iport_t *iport = NULL;
+ char *ua;
+
+ ua = sas_phymap_lookup_ua(pwp->hss_phymap, pwp->sas_wwns[0],
+ pmcs_barray2wwn(pptr->sas_address));
+ if (ua) {
+ iport = pmcs_get_iport_by_ua(pwp, ua);
+ if (iport) {
+ mutex_enter(&iport->lock);
+ iport->ua_state = UA_ACTIVE;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: "
+ "found iport [0x%p] on ua (%s) for phy [0x%p], "
+ "refcnt (%d)", __func__, (void *)iport, ua,
+ (void *)pptr, iport->refcnt);
+ mutex_exit(&iport->lock);
+ }
+ }
+
+ return (iport);
+}
+
+void
+pmcs_rele_iport(pmcs_iport_t *iport)
+{
+ /*
+ * Release a refcnt on this iport. If this is the last reference,
+ * signal the potential waiter in pmcs_iport_unattach().
+ */
+ ASSERT(iport->refcnt > 0);
+ mutex_enter(&iport->refcnt_lock);
+ iport->refcnt--;
+ mutex_exit(&iport->refcnt_lock);
+ if (iport->refcnt == 0) {
+ cv_signal(&iport->refcnt_cv);
+ }
+ pmcs_prt(iport->pwp, PMCS_PRT_DEBUG_CONFIG, "%s: iport [0x%p] "
+ "refcnt (%d)", __func__, (void *)iport, iport->refcnt);
+}
+
+void
+pmcs_phymap_activate(void *arg, char *ua, void **privp)
+{
+ _NOTE(ARGUNUSED(privp));
+ pmcs_hw_t *pwp = arg;
+ pmcs_iport_t *iport = NULL;
+
+ mutex_enter(&pwp->lock);
+ if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) {
+ mutex_exit(&pwp->lock);
+ return;
+ }
+ pwp->phymap_active++;
+ mutex_exit(&pwp->lock);
+
+ if (scsi_hba_iportmap_iport_add(pwp->hss_iportmap, ua, NULL) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, "%s: failed to add "
+ "iport handle on unit address [%s]", __func__, ua);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, "%s: phymap_active count "
+ "(%d), added iport handle on unit address [%s]", __func__,
+ pwp->phymap_active, ua);
+ }
+
+ /* Set the HBA softstate as our private data for this unit address */
+ *privp = (void *)pwp;
+
+ /*
+ * We are waiting on attach for this iport node, unless it is still
+ * attached. This can happen if a consumer has an outstanding open
+ * on our iport node, but the port is down. If this is the case, we
+ * need to configure our iport here for reuse.
+ */
+ iport = pmcs_get_iport_by_ua(pwp, ua);
+ if (iport) {
+ mutex_enter(&iport->lock);
+ if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: "
+ "failed to configure phys on iport [0x%p] at "
+ "unit address (%s)", __func__, (void *)iport, ua);
+ }
+ iport->ua_state = UA_ACTIVE;
+ pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
+ &iport->nphy);
+ mutex_exit(&iport->lock);
+ pmcs_rele_iport(iport);
+ }
+
+}
+
+void
+pmcs_phymap_deactivate(void *arg, char *ua, void *privp)
+{
+ _NOTE(ARGUNUSED(privp));
+ pmcs_hw_t *pwp = arg;
+ pmcs_iport_t *iport;
+
+ mutex_enter(&pwp->lock);
+ pwp->phymap_active--;
+ mutex_exit(&pwp->lock);
+
+ if (scsi_hba_iportmap_iport_remove(pwp->hss_iportmap, ua) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, "%s: failed to remove "
+ "iport handle on unit address [%s]", __func__, ua);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP, "%s: phymap_active "
+ "count (%d), removed iport handle on unit address [%s]",
+ __func__, pwp->phymap_active, ua);
+ }
+
+ iport = pmcs_get_iport_by_ua(pwp, ua);
+
+ if (iport == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: failed lookup of "
+ "iport handle on unit address (%s)", __func__, ua);
+ return;
+ }
+
+ mutex_enter(&iport->lock);
+ iport->ua_state = UA_INACTIVE;
+ iport->portid = PMCS_IPORT_INVALID_PORT_ID;
+ pmcs_remove_phy_from_iport(iport, NULL);
+ mutex_exit(&iport->lock);
+ pmcs_rele_iport(iport);
+}
+
+/*
+ * Top-level discovery function
+ */
+void
+pmcs_discover(pmcs_hw_t *pwp)
+{
+ pmcs_phy_t *pptr;
+ pmcs_phy_t *root_phy;
+
+ DTRACE_PROBE2(pmcs__discover__entry, ulong_t, pwp->work_flags,
+ boolean_t, pwp->config_changed);
+
+ mutex_enter(&pwp->lock);
+
+ if (pwp->state != STATE_RUNNING) {
+ mutex_exit(&pwp->lock);
+ return;
+ }
+
+ /* Ensure we have at least one phymap active */
+ if (pwp->phymap_active == 0) {
+ mutex_exit(&pwp->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: phymap inactive, exiting", __func__);
+ return;
+ }
+
+ mutex_exit(&pwp->lock);
+
+ /*
+ * If no iports have attached, but we have PHYs that are up, we
+ * are waiting for iport attach to complete. Restart discovery.
+ */
+ rw_enter(&pwp->iports_lock, RW_READER);
+ if (!pwp->iports_attached) {
+ rw_exit(&pwp->iports_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: no iports attached, retry discovery", __func__);
+ SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
+ return;
+ }
+ rw_exit(&pwp->iports_lock);
+
+ mutex_enter(&pwp->config_lock);
+ if (pwp->configuring) {
+ mutex_exit(&pwp->config_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: configuration already in progress", __func__);
+ return;
+ }
+
+ if (pmcs_acquire_scratch(pwp, B_FALSE)) {
+ mutex_exit(&pwp->config_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: cannot allocate scratch", __func__);
+ SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
+ return;
+ }
+
+ pwp->configuring = 1;
+ pwp->config_changed = B_FALSE;
+ mutex_exit(&pwp->config_lock);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Discovery begin");
+
+ /*
+ * The order of the following traversals is important.
+ *
+ * The first one checks for changed expanders.
+ *
+ * The second one aborts commands for dead devices and deregisters them.
+ *
+ * The third one clears the contents of dead expanders from the tree
+ *
+ * The fourth one clears now dead devices in expanders that remain.
+ */
+
+ /*
+ * 1. Check expanders marked changed (but not dead) to see if they still
+ * have the same number of phys and the same SAS address. Mark them,
+ * their subsidiary phys (if wide) and their descendents dead if
+ * anything has changed. Check the devices they contain to see if
+ * *they* have changed. If they've changed from type NOTHING we leave
+ * them marked changed to be configured later (picking up a new SAS
+ * address and link rate if possible). Otherwise, any change in type,
+ * SAS address or removal of target role will cause us to mark them
+ * (and their descendents) as dead (and cause any pending commands
+ * and associated devices to be removed).
+ */
+ root_phy = pwp->root_phys;
+ if (pmcs_check_expanders(pwp, root_phy) == B_TRUE) {
+ goto out;
+ }
+
+ /*
+ * 2. Descend the tree looking for dead devices and kill them
+ * by aborting all active commands and then deregistering them.
+ */
+ if (pmcs_kill_devices(pwp, root_phy)) {
+ goto out;
+ }
+
+ /*
+ * 3. Check for dead expanders and remove their children from the tree.
+ * By the time we get here, the devices and commands for them have
+ * already been terminated and removed.
+ *
+ * We do this independent of the configuration count changing so we can
+ * free any dead device PHYs that were discovered while checking
+ * expanders. We ignore any subsidiary phys as pmcs_clear_expander
+ * will take care of those.
+ *
+ * NOTE: pmcs_clear_expander requires softstate lock
+ */
+ mutex_enter(&pwp->lock);
+ for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
+ /*
+ * Call pmcs_clear_expander for every root PHY. It will
+ * recurse and determine which (if any) expanders actually
+ * need to be cleared.
+ */
+ pmcs_lock_phy(pptr);
+ pmcs_clear_expander(pwp, pptr, 0);
+ pmcs_unlock_phy(pptr);
+ }
+ mutex_exit(&pwp->lock);
+
+ /*
+ * 4. Check for dead devices and nullify them. By the time we get here,
+ * the devices and commands for them have already been terminated
+ * and removed. This is different from step 2 in that this just nulls
+ * phys that are part of expanders that are still here but used to
+ * be something but are no longer something (e.g., after a pulled
+ * disk drive). Note that dead expanders had their contained phys
+ * removed from the tree- here, the expanders themselves are
+ * nullified (unless they were removed by being contained in another
+ * expander phy).
+ */
+ pmcs_clear_phys(pwp, root_phy);
+
+ /*
+ * 5. Now check for and configure new devices.
+ */
+ if (pmcs_configure_new_devices(pwp, root_phy)) {
+ goto restart;
+ }
+
+out:
+ DTRACE_PROBE2(pmcs__discover__exit, ulong_t, pwp->work_flags,
+ boolean_t, pwp->config_changed);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Discovery end");
+
+ mutex_enter(&pwp->config_lock);
+
+ if (pwp->config_changed == B_FALSE) {
+ /*
+ * Observation is stable, report what we currently see to
+ * the tgtmaps for delta processing. Start by setting
+ * BEGIN on all tgtmaps.
+ */
+ mutex_exit(&pwp->config_lock);
+ if (pmcs_report_observations(pwp) == B_FALSE) {
+ goto restart;
+ }
+ mutex_enter(&pwp->config_lock);
+ } else {
+ /*
+ * If config_changed is TRUE, we need to reschedule
+ * discovery now.
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Config has changed, will re-run discovery", __func__);
+ SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
+ }
+
+ pmcs_release_scratch(pwp);
+ pwp->configuring = 0;
+ mutex_exit(&pwp->config_lock);
+
+#ifdef DEBUG
+ pptr = pmcs_find_phy_needing_work(pwp, pwp->root_phys);
+ if (pptr != NULL) {
+ if (!WORK_IS_SCHEDULED(pwp, PMCS_WORK_DISCOVER)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "PHY %s dead=%d changed=%d configured=%d "
+ "but no work scheduled", pptr->path, pptr->dead,
+ pptr->changed, pptr->configured);
+ }
+ pmcs_unlock_phy(pptr);
+ }
+#endif
+
+ return;
+
+restart:
+ /* Clean up and restart discovery */
+ pmcs_release_scratch(pwp);
+ mutex_enter(&pwp->config_lock);
+ pwp->configuring = 0;
+ RESTART_DISCOVERY_LOCKED(pwp);
+ mutex_exit(&pwp->config_lock);
+}
+
+/*
+ * Return any PHY that needs to have scheduled work done. The PHY is returned
+ * locked.
+ */
+static pmcs_phy_t *
+pmcs_find_phy_needing_work(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_phy_t *cphyp, *pnext;
+
+ while (pptr) {
+ pmcs_lock_phy(pptr);
+
+ if (pptr->changed || (pptr->dead && pptr->valid_device_id)) {
+ return (pptr);
+ }
+
+ pnext = pptr->sibling;
+
+ if (pptr->children) {
+ cphyp = pptr->children;
+ pmcs_unlock_phy(pptr);
+ cphyp = pmcs_find_phy_needing_work(pwp, cphyp);
+ if (cphyp) {
+ return (cphyp);
+ }
+ } else {
+ pmcs_unlock_phy(pptr);
+ }
+
+ pptr = pnext;
+ }
+
+ return (NULL);
+}
+
+/*
+ * Report current observations to SCSA.
+ */
+static boolean_t
+pmcs_report_observations(pmcs_hw_t *pwp)
+{
+ pmcs_iport_t *iport;
+ scsi_hba_tgtmap_t *tgtmap;
+ char *ap;
+ pmcs_phy_t *pptr;
+ uint64_t wwn;
+
+ /*
+ * Observation is stable, report what we currently see to the tgtmaps
+ * for delta processing. Start by setting BEGIN on all tgtmaps.
+ */
+ rw_enter(&pwp->iports_lock, RW_READER);
+ for (iport = list_head(&pwp->iports); iport != NULL;
+ iport = list_next(&pwp->iports, iport)) {
+ /*
+ * Unless we have at least one phy up, skip this iport.
+ * Note we don't need to lock the iport for report_skip
+ * since it is only used here. We are doing the skip so that
+ * the phymap and iportmap stabilization times are honored -
+ * giving us the ability to recover port operation within the
+ * stabilization time without unconfiguring targets using the
+ * port.
+ */
+ if (!sas_phymap_uahasphys(pwp->hss_phymap, iport->ua)) {
+ iport->report_skip = 1;
+ continue; /* skip set_begin */
+ }
+ iport->report_skip = 0;
+
+ tgtmap = iport->iss_tgtmap;
+ ASSERT(tgtmap);
+ if (scsi_hba_tgtmap_set_begin(tgtmap) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "%s: cannot set_begin tgtmap ", __func__);
+ rw_exit(&pwp->iports_lock);
+ return (B_FALSE);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "%s: set begin on tgtmap [0x%p]", __func__,
+ (void *)tgtmap);
+ }
+ rw_exit(&pwp->iports_lock);
+
+ /*
+ * Now, cycle through all levels of all phys and report
+ * observations into their respective tgtmaps.
+ */
+ pptr = pwp->root_phys;
+
+ while (pptr) {
+ pmcs_lock_phy(pptr);
+
+ /*
+ * Skip PHYs that have nothing attached or are dead.
+ */
+ if ((pptr->dtype == NOTHING) || pptr->dead) {
+ pmcs_unlock_phy(pptr);
+ pptr = pptr->sibling;
+ continue;
+ }
+
+ if (pptr->changed) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: oops, PHY %s changed; restart discovery",
+ __func__, pptr->path);
+ pmcs_unlock_phy(pptr);
+ return (B_FALSE);
+ }
+
+ /*
+ * Get the iport for this root PHY, then call the helper
+ * to report observations for this iport's targets
+ */
+ iport = pmcs_get_iport_by_phy(pwp, pptr);
+ if (iport == NULL) {
+ /* No iport for this tgt */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: no iport for this target",
+ __func__);
+ pmcs_unlock_phy(pptr);
+ pptr = pptr->sibling;
+ continue;
+ }
+
+ if (!iport->report_skip) {
+ if (pmcs_report_iport_observations(
+ pwp, iport, pptr) == B_FALSE) {
+ pmcs_rele_iport(iport);
+ pmcs_unlock_phy(pptr);
+ return (B_FALSE);
+ }
+ }
+ pmcs_rele_iport(iport);
+ pmcs_unlock_phy(pptr);
+ pptr = pptr->sibling;
+ }
+
+ /*
+ * The observation is complete, end sets. Note we will skip any
+ * iports that are active, but have no PHYs in them (i.e. awaiting
+ * unconfigure). Set to restart discovery if we find this.
+ */
+ rw_enter(&pwp->iports_lock, RW_READER);
+ for (iport = list_head(&pwp->iports);
+ iport != NULL;
+ iport = list_next(&pwp->iports, iport)) {
+
+ if (iport->report_skip)
+ continue; /* skip set_end */
+
+ tgtmap = iport->iss_tgtmap;
+ ASSERT(tgtmap);
+ if (scsi_hba_tgtmap_set_end(tgtmap, 0) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "%s: cannot set_end tgtmap ", __func__);
+ rw_exit(&pwp->iports_lock);
+ return (B_FALSE);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "%s: set end on tgtmap [0x%p]", __func__,
+ (void *)tgtmap);
+ }
+
+ /*
+ * Now that discovery is complete, set up the necessary
+ * DDI properties on each iport node.
+ */
+ for (iport = list_head(&pwp->iports); iport != NULL;
+ iport = list_next(&pwp->iports, iport)) {
+ /* Set up the DDI properties on each phy */
+ pmcs_smhba_set_phy_props(iport);
+
+ /* Set up the 'attached-port' property on the iport */
+ ap = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP);
+ mutex_enter(&iport->lock);
+ pptr = iport->pptr;
+ mutex_exit(&iport->lock);
+ if (pptr == NULL) {
+ /*
+ * This iport is down, but has not been
+ * removed from our list (unconfigured).
+ * Set our value to '0'.
+ */
+ (void) snprintf(ap, 1, "%s", "0");
+ } else {
+ /* Otherwise, set it to remote phy's wwn */
+ pmcs_lock_phy(pptr);
+ wwn = pmcs_barray2wwn(pptr->sas_address);
+ (void) scsi_wwn_to_wwnstr(wwn, 1, ap);
+ pmcs_unlock_phy(pptr);
+ }
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, iport->dip,
+ SCSI_ADDR_PROP_ATTACHED_PORT, ap) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to "
+ "set prop ("SCSI_ADDR_PROP_ATTACHED_PORT")",
+ __func__);
+ }
+ kmem_free(ap, PMCS_MAX_UA_SIZE);
+ }
+ rw_exit(&pwp->iports_lock);
+
+ return (B_TRUE);
+}
+
+/*
+ * Report observations into a particular iport's target map
+ *
+ * Called with phyp (and all descendents) locked
+ */
+static boolean_t
+pmcs_report_iport_observations(pmcs_hw_t *pwp, pmcs_iport_t *iport,
+ pmcs_phy_t *phyp)
+{
+ pmcs_phy_t *lphyp;
+ scsi_hba_tgtmap_t *tgtmap;
+ scsi_tgtmap_tgt_type_t tgt_type;
+ char *ua;
+ uint64_t wwn;
+
+ tgtmap = iport->iss_tgtmap;
+ ASSERT(tgtmap);
+
+ lphyp = phyp;
+ while (lphyp) {
+ switch (lphyp->dtype) {
+ default: /* Skip unknown PHYs. */
+ /* for non-root phys, skip to sibling */
+ goto next_phy;
+
+ case SATA:
+ case SAS:
+ tgt_type = SCSI_TGT_SCSI_DEVICE;
+ break;
+
+ case EXPANDER:
+ tgt_type = SCSI_TGT_SMP_DEVICE;
+ break;
+ }
+
+ if (lphyp->dead) {
+ goto next_phy;
+ }
+
+ wwn = pmcs_barray2wwn(lphyp->sas_address);
+ ua = scsi_wwn_to_wwnstr(wwn, 1, NULL);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]",
+ ua, (void *)tgtmap, (void*)lphyp);
+
+ if (scsi_hba_tgtmap_set_add(tgtmap, tgt_type, ua, NULL) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_MAP,
+ "%s: failed to add address %s", __func__, ua);
+ scsi_free_wwnstr(ua);
+ return (B_FALSE);
+ }
+ scsi_free_wwnstr(ua);
+
+ if (lphyp->children) {
+ if (pmcs_report_iport_observations(pwp, iport,
+ lphyp->children) == B_FALSE) {
+ return (B_FALSE);
+ }
+ }
+
+ /* for non-root phys, report siblings too */
+next_phy:
+ if (IS_ROOT_PHY(lphyp)) {
+ lphyp = NULL;
+ } else {
+ lphyp = lphyp->sibling;
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Check for and configure new devices.
+ *
+ * If the changed device is a SATA device, add a SATA device.
+ *
+ * If the changed device is a SAS device, add a SAS device.
+ *
+ * If the changed device is an EXPANDER device, do a REPORT
+ * GENERAL SMP command to find out the number of contained phys.
+ *
+ * For each number of contained phys, allocate a phy, do a
+ * DISCOVERY SMP command to find out what kind of device it
+ * is and add it to the linked list of phys on the *next* level.
+ *
+ * NOTE: pptr passed in by the caller will be a root PHY
+ */
+static int
+pmcs_configure_new_devices(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ int rval = 0;
+ pmcs_iport_t *iport;
+ pmcs_phy_t *pnext, *orig_pptr = pptr, *root_phy, *pchild;
+
+ /*
+ * First, walk through each PHY at this level
+ */
+ while (pptr) {
+ pmcs_lock_phy(pptr);
+ pnext = pptr->sibling;
+
+ /*
+ * Set the new dtype if it has changed
+ */
+ if ((pptr->pend_dtype != NEW) &&
+ (pptr->pend_dtype != pptr->dtype)) {
+ pptr->dtype = pptr->pend_dtype;
+ }
+
+ if (pptr->changed == 0 || pptr->dead || pptr->configured) {
+ goto next_phy;
+ }
+
+ /*
+ * Confirm that this target's iport is configured
+ */
+ root_phy = pmcs_get_root_phy(pptr);
+ iport = pmcs_get_iport_by_phy(pwp, root_phy);
+ if (iport == NULL) {
+ /* No iport for this tgt, restart */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: iport not yet configured, "
+ "retry discovery", __func__);
+ pnext = NULL;
+ rval = -1;
+ goto next_phy;
+ }
+
+ switch (pptr->dtype) {
+ case NOTHING:
+ pptr->changed = 0;
+ break;
+ case SATA:
+ case SAS:
+ pptr->iport = iport;
+ pmcs_new_tport(pwp, pptr);
+ break;
+ case EXPANDER:
+ pmcs_configure_expander(pwp, pptr, iport);
+ break;
+ }
+ pmcs_rele_iport(iport);
+
+ mutex_enter(&pwp->config_lock);
+ if (pwp->config_changed) {
+ mutex_exit(&pwp->config_lock);
+ pnext = NULL;
+ goto next_phy;
+ }
+ mutex_exit(&pwp->config_lock);
+
+next_phy:
+ pmcs_unlock_phy(pptr);
+ pptr = pnext;
+ }
+
+ if (rval != 0) {
+ return (rval);
+ }
+
+ /*
+ * Now walk through each PHY again, recalling ourselves if they
+ * have children
+ */
+ pptr = orig_pptr;
+ while (pptr) {
+ pmcs_lock_phy(pptr);
+ pnext = pptr->sibling;
+ pchild = pptr->children;
+ pmcs_unlock_phy(pptr);
+
+ if (pchild) {
+ rval = pmcs_configure_new_devices(pwp, pchild);
+ if (rval != 0) {
+ break;
+ }
+ }
+
+ pptr = pnext;
+ }
+
+ return (rval);
+}
+
+/*
+ * Set all phys and descendent phys as changed if changed == B_TRUE, otherwise
+ * mark them all as not changed.
+ *
+ * Called with parent PHY locked.
+ */
+void
+pmcs_set_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, boolean_t changed,
+ int level)
+{
+ pmcs_phy_t *pptr;
+
+ if (level == 0) {
+ if (changed) {
+ PHY_CHANGED(pwp, parent);
+ } else {
+ parent->changed = 0;
+ }
+ if (parent->dtype == EXPANDER && parent->level) {
+ parent->width = 1;
+ }
+ if (parent->children) {
+ pmcs_set_changed(pwp, parent->children, changed,
+ level + 1);
+ }
+ } else {
+ pptr = parent;
+ while (pptr) {
+ if (changed) {
+ PHY_CHANGED(pwp, pptr);
+ } else {
+ pptr->changed = 0;
+ }
+ if (pptr->dtype == EXPANDER && pptr->level) {
+ pptr->width = 1;
+ }
+ if (pptr->children) {
+ pmcs_set_changed(pwp, pptr->children, changed,
+ level + 1);
+ }
+ pptr = pptr->sibling;
+ }
+ }
+}
+
+/*
+ * Take the passed phy mark it and its descendants as dead.
+ * Fire up reconfiguration to abort commands and bury it.
+ *
+ * Called with the parent PHY locked.
+ */
+void
+pmcs_kill_changed(pmcs_hw_t *pwp, pmcs_phy_t *parent, int level)
+{
+ pmcs_phy_t *pptr = parent;
+
+ while (pptr) {
+ pptr->link_rate = 0;
+ pptr->abort_sent = 0;
+ pptr->abort_pending = 1;
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ pptr->need_rl_ext = 0;
+
+ if (pptr->dead == 0) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ }
+
+ pptr->dead = 1;
+
+ if (pptr->children) {
+ pmcs_kill_changed(pwp, pptr->children, level + 1);
+ }
+
+ /*
+ * Only kill siblings at level > 0
+ */
+ if (level == 0) {
+ return;
+ }
+
+ pptr = pptr->sibling;
+ }
+}
+
+/*
+ * Go through every PHY and clear any that are dead (unless they're expanders)
+ */
+static void
+pmcs_clear_phys(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_phy_t *pnext, *phyp;
+
+ phyp = pptr;
+ while (phyp) {
+ if (IS_ROOT_PHY(phyp)) {
+ pmcs_lock_phy(phyp);
+ }
+
+ if ((phyp->dtype != EXPANDER) && phyp->dead) {
+ pmcs_clear_phy(pwp, phyp);
+ }
+
+ if (phyp->children) {
+ pmcs_clear_phys(pwp, phyp->children);
+ }
+
+ pnext = phyp->sibling;
+
+ if (IS_ROOT_PHY(phyp)) {
+ pmcs_unlock_phy(phyp);
+ }
+
+ phyp = pnext;
+ }
+}
+
+/*
+ * Clear volatile parts of a phy. Called with PHY locked.
+ */
+void
+pmcs_clear_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__, pptr->path);
+ ASSERT(mutex_owned(&pptr->phy_lock));
+ /* keep sibling */
+ /* keep children */
+ /* keep parent */
+ pptr->device_id = PMCS_INVALID_DEVICE_ID;
+ /* keep hw_event_ack */
+ pptr->ncphy = 0;
+ /* keep phynum */
+ pptr->width = 0;
+ pptr->ds_recovery_retries = 0;
+ /* keep dtype */
+ pptr->config_stop = 0;
+ pptr->spinup_hold = 0;
+ pptr->atdt = 0;
+ /* keep portid */
+ pptr->link_rate = 0;
+ pptr->valid_device_id = 0;
+ pptr->abort_sent = 0;
+ pptr->abort_pending = 0;
+ pptr->need_rl_ext = 0;
+ pptr->subsidiary = 0;
+ pptr->configured = 0;
+ /* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */
+ /* XXX: What about directly attached disks? */
+ if (!IS_ROOT_PHY(pptr) && (pptr->dtype != NOTHING))
+ pptr->dead = 1;
+ pptr->changed = 0;
+ /* keep SAS address */
+ /* keep path */
+ /* keep ref_count */
+ /* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */
+ if (!IS_ROOT_PHY(pptr)) {
+ pptr->iport = NULL;
+ }
+}
+
+/*
+ * Allocate softstate for this target if there isn't already one. If there
+ * is, just redo our internal configuration. If it is actually "new", we'll
+ * soon get a tran_tgt_init for it.
+ *
+ * Called with PHY locked.
+ */
+static void
+pmcs_new_tport(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: phy 0x%p @ %s", __func__,
+ (void *)pptr, pptr->path);
+
+ if (pmcs_configure_phy(pwp, pptr) == B_FALSE) {
+ /*
+ * If the config failed, mark the PHY as changed.
+ */
+ PHY_CHANGED(pwp, pptr);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: pmcs_configure_phy failed for phy 0x%p", __func__,
+ (void *)pptr);
+ return;
+ }
+
+ /* Mark PHY as no longer changed */
+ pptr->changed = 0;
+
+ /*
+ * If the PHY has no target pointer, see if there's a dead PHY that
+ * matches.
+ */
+ if (pptr->target == NULL) {
+ pmcs_reap_dead_phy(pptr);
+ }
+
+ /*
+ * Only assign the device if there is a target for this PHY with a
+ * matching SAS address. If an iport is disconnected from one piece
+ * of storage and connected to another within the iport stabilization
+ * time, we can get the PHY/target mismatch situation.
+ *
+ * Otherwise, it'll get done in tran_tgt_init.
+ */
+ if (pptr->target) {
+ mutex_enter(&pptr->target->statlock);
+ if (pmcs_phy_target_match(pptr) == B_FALSE) {
+ mutex_exit(&pptr->target->statlock);
+ if (!IS_ROOT_PHY(pptr)) {
+ pmcs_dec_phy_ref_count(pptr);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Not assigning existing tgt %p for PHY %p "
+ "(WWN mismatch)", __func__, (void *)pptr->target,
+ (void *)pptr);
+ pptr->target = NULL;
+ return;
+ }
+
+ if (!pmcs_assign_device(pwp, pptr->target)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: pmcs_assign_device failed for target 0x%p",
+ __func__, (void *)pptr->target);
+ }
+ mutex_exit(&pptr->target->statlock);
+ }
+}
+
+/*
+ * Called with PHY lock held.
+ */
+static boolean_t
+pmcs_configure_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ char *dtype;
+
+ ASSERT(mutex_owned(&pptr->phy_lock));
+
+ /*
+ * Mark this device as no longer changed.
+ */
+ pptr->changed = 0;
+
+ /*
+ * If we don't have a device handle, get one.
+ */
+ if (pmcs_get_device_handle(pwp, pptr)) {
+ return (B_FALSE);
+ }
+
+ pptr->configured = 1;
+
+ switch (pptr->dtype) {
+ case SAS:
+ dtype = "SAS";
+ break;
+ case SATA:
+ dtype = "SATA";
+ break;
+ case EXPANDER:
+ dtype = "SMP";
+ break;
+ default:
+ dtype = "???";
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "config_dev: %s dev %s "
+ SAS_ADDR_FMT " dev id 0x%x lr 0x%x", dtype, pptr->path,
+ SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
+
+ return (B_TRUE);
+}
+
+/*
+ * Called with PHY locked
+ */
+static void
+pmcs_configure_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, pmcs_iport_t *iport)
+{
+ pmcs_phy_t *ctmp, *clist = NULL, *cnext;
+ int result, i, nphy = 0;
+ boolean_t root_phy = B_FALSE;
+
+ ASSERT(iport);
+
+ /*
+ * Step 1- clear our "changed" bit. If we need to retry/restart due
+ * to resource shortages, we'll set it again. While we're doing
+ * configuration, other events may set it again as well. If the PHY
+ * is a root PHY and is currently marked as having changed, reset the
+ * config_stop timer as well.
+ */
+ if (IS_ROOT_PHY(pptr) && pptr->changed) {
+ pptr->config_stop = ddi_get_lbolt() +
+ drv_usectohz(PMCS_MAX_CONFIG_TIME);
+ }
+ pptr->changed = 0;
+
+ /*
+ * Step 2- make sure we don't overflow
+ */
+ if (pptr->level == PMCS_MAX_XPND-1) {
+ pmcs_prt(pwp, PMCS_PRT_WARN,
+ "%s: SAS expansion tree too deep", __func__);
+ return;
+ }
+
+ /*
+ * Step 3- Check if this expander is part of a wide phy that has
+ * already been configured.
+ *
+ * This is known by checking this level for another EXPANDER device
+ * with the same SAS address and isn't already marked as a subsidiary
+ * phy and a parent whose SAS address is the same as our SAS address
+ * (if there are parents).
+ */
+ if (!IS_ROOT_PHY(pptr)) {
+ /*
+ * No need to lock the parent here because we're in discovery
+ * and the only time a PHY's children pointer can change is
+ * in discovery; either in pmcs_clear_expander (which has
+ * already been called) or here, down below. Plus, trying to
+ * grab the parent's lock here can cause deadlock.
+ */
+ ctmp = pptr->parent->children;
+ } else {
+ ctmp = pwp->root_phys;
+ root_phy = B_TRUE;
+ }
+
+ while (ctmp) {
+ /*
+ * If we've checked all PHYs up to pptr, we stop. Otherwise,
+ * we'll be checking for a primary PHY with a higher PHY
+ * number than pptr, which will never happen. The primary
+ * PHY on non-root expanders will ALWAYS be the lowest
+ * numbered PHY.
+ */
+ if (ctmp == pptr) {
+ break;
+ }
+
+ /*
+ * If pptr and ctmp are root PHYs, just grab the mutex on
+ * ctmp. No need to lock the entire tree. If they are not
+ * root PHYs, there is no need to lock since a non-root PHY's
+ * SAS address and other characteristics can only change in
+ * discovery anyway.
+ */
+ if (root_phy) {
+ mutex_enter(&ctmp->phy_lock);
+ }
+
+ if (ctmp->dtype == EXPANDER && ctmp->width &&
+ memcmp(ctmp->sas_address, pptr->sas_address, 8) == 0) {
+ int widephy = 0;
+ /*
+ * If these phys are not root PHYs, compare their SAS
+ * addresses too.
+ */
+ if (!root_phy) {
+ if (memcmp(ctmp->parent->sas_address,
+ pptr->parent->sas_address, 8) == 0) {
+ widephy = 1;
+ }
+ } else {
+ widephy = 1;
+ }
+ if (widephy) {
+ ctmp->width++;
+ pptr->subsidiary = 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: PHY "
+ "%s part of wide PHY %s (now %d wide)",
+ __func__, pptr->path, ctmp->path,
+ ctmp->width);
+ if (root_phy) {
+ mutex_exit(&ctmp->phy_lock);
+ }
+ return;
+ }
+ }
+
+ cnext = ctmp->sibling;
+ if (root_phy) {
+ mutex_exit(&ctmp->phy_lock);
+ }
+ ctmp = cnext;
+ }
+
+ /*
+ * Step 4- If we don't have a device handle, get one. Since this
+ * is the primary PHY, make sure subsidiary is cleared.
+ */
+ pptr->subsidiary = 0;
+ if (pmcs_get_device_handle(pwp, pptr)) {
+ goto out;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Config expander %s "
+ SAS_ADDR_FMT " dev id 0x%x lr 0x%x", pptr->path,
+ SAS_ADDR_PRT(pptr->sas_address), pptr->device_id, pptr->link_rate);
+
+ /*
+ * Step 5- figure out how many phys are in this expander.
+ */
+ nphy = pmcs_expander_get_nphy(pwp, pptr);
+ if (nphy <= 0) {
+ if (nphy == 0 && ddi_get_lbolt() < pptr->config_stop) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Retries exhausted for %s, killing", __func__,
+ pptr->path);
+ pptr->config_stop = 0;
+ pmcs_kill_changed(pwp, pptr, 0);
+ }
+ goto out;
+ }
+
+ /*
+ * Step 6- Allocate a list of phys for this expander and figure out
+ * what each one is.
+ */
+ for (i = 0; i < nphy; i++) {
+ ctmp = kmem_cache_alloc(pwp->phy_cache, KM_SLEEP);
+ bzero(ctmp, sizeof (pmcs_phy_t));
+ ctmp->device_id = PMCS_INVALID_DEVICE_ID;
+ ctmp->sibling = clist;
+ ctmp->pend_dtype = NEW; /* Init pending dtype */
+ ctmp->config_stop = ddi_get_lbolt() +
+ drv_usectohz(PMCS_MAX_CONFIG_TIME);
+ clist = ctmp;
+ }
+
+ mutex_enter(&pwp->config_lock);
+ if (pwp->config_changed) {
+ RESTART_DISCOVERY_LOCKED(pwp);
+ mutex_exit(&pwp->config_lock);
+ /*
+ * Clean up the newly allocated PHYs and return
+ */
+ while (clist) {
+ ctmp = clist->sibling;
+ kmem_cache_free(pwp->phy_cache, clist);
+ clist = ctmp;
+ }
+ return;
+ }
+ mutex_exit(&pwp->config_lock);
+
+ /*
+ * Step 7- Now fill in the rest of the static portions of the phy.
+ */
+ for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
+ ctmp->parent = pptr;
+ ctmp->pwp = pwp;
+ ctmp->level = pptr->level+1;
+ ctmp->portid = pptr->portid;
+ if (ctmp->tolerates_sas2) {
+ ASSERT(i < SAS2_PHYNUM_MAX);
+ ctmp->phynum = i & SAS2_PHYNUM_MASK;
+ } else {
+ ASSERT(i < SAS_PHYNUM_MAX);
+ ctmp->phynum = i & SAS_PHYNUM_MASK;
+ }
+ pmcs_phy_name(pwp, ctmp, ctmp->path, sizeof (ctmp->path));
+ pmcs_lock_phy(ctmp);
+ }
+
+ /*
+ * Step 8- Discover things about each phy in the expander.
+ */
+ for (i = 0, ctmp = clist; ctmp; ctmp = ctmp->sibling, i++) {
+ result = pmcs_expander_content_discover(pwp, pptr, ctmp);
+ if (result <= 0) {
+ if (ddi_get_lbolt() < pptr->config_stop) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ pptr->config_stop = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Retries exhausted for %s, killing",
+ __func__, pptr->path);
+ pmcs_kill_changed(pwp, pptr, 0);
+ }
+ goto out;
+ }
+
+ /* Set pend_dtype to dtype for 1st time initialization */
+ ctmp->pend_dtype = ctmp->dtype;
+ }
+
+ /*
+ * Step 9- Install the new list on the next level. There should be
+ * no children pointer on this PHY. If there is, we'd need to know
+ * how it happened (The expander suddenly got more PHYs?).
+ */
+ ASSERT(pptr->children == NULL);
+ if (pptr->children != NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Already child PHYs attached "
+ " to PHY %s: This should never happen", __func__,
+ pptr->path);
+ goto out;
+ } else {
+ pptr->children = clist;
+ }
+
+ clist = NULL;
+ pptr->ncphy = nphy;
+ pptr->configured = 1;
+
+ /*
+ * We only set width if we're greater than level 0.
+ */
+ if (pptr->level) {
+ pptr->width = 1;
+ }
+
+ /*
+ * Now tell the rest of the world about us, as an SMP node.
+ */
+ pptr->iport = iport;
+ pmcs_new_tport(pwp, pptr);
+
+out:
+ while (clist) {
+ ctmp = clist->sibling;
+ pmcs_unlock_phy(clist);
+ kmem_cache_free(pwp->phy_cache, clist);
+ clist = ctmp;
+ }
+}
+
+/*
+ * 2. Check expanders marked changed (but not dead) to see if they still have
+ * the same number of phys and the same SAS address. Mark them, their subsidiary
+ * phys (if wide) and their descendents dead if anything has changed. Check the
+ * the devices they contain to see if *they* have changed. If they've changed
+ * from type NOTHING we leave them marked changed to be configured later
+ * (picking up a new SAS address and link rate if possible). Otherwise, any
+ * change in type, SAS address or removal of target role will cause us to
+ * mark them (and their descendents) as dead and cause any pending commands
+ * and associated devices to be removed.
+ *
+ * Called with PHY (pptr) locked.
+ */
+
+static void
+pmcs_check_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ int nphy, result;
+ pmcs_phy_t *ctmp, *local, *local_list = NULL, *local_tail = NULL;
+ boolean_t kill_changed, changed;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: check %s", __func__, pptr->path);
+
+ /*
+ * Step 1: Mark phy as not changed. We will mark it changed if we need
+ * to retry.
+ */
+ pptr->changed = 0;
+
+ /*
+ * Reset the config_stop time. Although we're not actually configuring
+ * anything here, we do want some indication of when to give up trying
+ * if we can't communicate with the expander.
+ */
+ pptr->config_stop = ddi_get_lbolt() +
+ drv_usectohz(PMCS_MAX_CONFIG_TIME);
+
+ /*
+ * Step 2: Figure out how many phys are in this expander. If
+ * pmcs_expander_get_nphy returns 0 we ran out of resources,
+ * so reschedule and try later. If it returns another error,
+ * just return.
+ */
+ nphy = pmcs_expander_get_nphy(pwp, pptr);
+ if (nphy <= 0) {
+ if ((nphy == 0) && (ddi_get_lbolt() < pptr->config_stop)) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ pptr->config_stop = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Retries exhausted for %s, killing", __func__,
+ pptr->path);
+ pmcs_kill_changed(pwp, pptr, 0);
+ }
+ return;
+ }
+
+ /*
+ * Step 3: If the number of phys don't agree, kill the old sub-tree.
+ */
+ if (nphy != pptr->ncphy) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: number of contained phys for %s changed from %d to %d",
+ __func__, pptr->path, pptr->ncphy, nphy);
+ /*
+ * Force a rescan of this expander after dead contents
+ * are cleared and removed.
+ */
+ pmcs_kill_changed(pwp, pptr, 0);
+ return;
+ }
+
+ /*
+ * Step 4: if we're at the bottom of the stack, we're done
+ * (we can't have any levels below us)
+ */
+ if (pptr->level == PMCS_MAX_XPND-1) {
+ return;
+ }
+
+ /*
+ * Step 5: Discover things about each phy in this expander. We do
+ * this by walking the current list of contained phys and doing a
+ * content discovery for it to a local phy.
+ */
+ ctmp = pptr->children;
+ ASSERT(ctmp);
+ if (ctmp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: No children attached to expander @ %s?", __func__,
+ pptr->path);
+ return;
+ }
+
+ while (ctmp) {
+ /*
+ * Allocate a local PHY to contain the proposed new contents
+ * and link it to the rest of the local PHYs so that they
+ * can all be freed later.
+ */
+ local = pmcs_clone_phy(ctmp);
+
+ if (local_list == NULL) {
+ local_list = local;
+ local_tail = local;
+ } else {
+ local_tail->sibling = local;
+ local_tail = local;
+ }
+
+ /*
+ * Need to lock the local PHY since pmcs_expander_content_
+ * discovery may call pmcs_clear_phy on it, which expects
+ * the PHY to be locked.
+ */
+ pmcs_lock_phy(local);
+ result = pmcs_expander_content_discover(pwp, pptr, local);
+ pmcs_unlock_phy(local);
+ if (result <= 0) {
+ if (ddi_get_lbolt() < pptr->config_stop) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ pptr->config_stop = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Retries exhausted for %s, killing",
+ __func__, pptr->path);
+ pmcs_kill_changed(pwp, pptr, 0);
+ }
+
+ /*
+ * Release all the local PHYs that we allocated.
+ */
+ pmcs_free_phys(pwp, local_list);
+ return;
+ }
+
+ ctmp = ctmp->sibling;
+ }
+
+ /*
+ * Step 6: Compare the local PHY's contents to our current PHY. If
+ * there are changes, take the appropriate action.
+ * This is done in two steps (step 5 above, and 6 here) so that if we
+ * have to bail during this process (e.g. pmcs_expander_content_discover
+ * fails), we haven't actually changed the state of any of the real
+ * PHYs. Next time we come through here, we'll be starting over from
+ * scratch. This keeps us from marking a changed PHY as no longer
+ * changed, but then having to bail only to come back next time and
+ * think that the PHY hadn't changed. If this were to happen, we
+ * would fail to properly configure the device behind this PHY.
+ */
+ local = local_list;
+ ctmp = pptr->children;
+
+ while (ctmp) {
+ changed = B_FALSE;
+ kill_changed = B_FALSE;
+
+ /*
+ * We set local to local_list prior to this loop so that we
+ * can simply walk the local_list while we walk this list. The
+ * two lists should be completely in sync.
+ *
+ * Clear the changed flag here.
+ */
+ ctmp->changed = 0;
+
+ if (ctmp->dtype != local->dtype) {
+ if (ctmp->dtype != NOTHING) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s "
+ "type changed from %s to %s (killing)",
+ __func__, ctmp->path, PHY_TYPE(ctmp),
+ PHY_TYPE(local));
+ /*
+ * Force a rescan of this expander after dead
+ * contents are cleared and removed.
+ */
+ changed = B_TRUE;
+ kill_changed = B_TRUE;
+ } else {
+ changed = B_TRUE;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: %s type changed from NOTHING to %s",
+ __func__, ctmp->path, PHY_TYPE(local));
+ }
+
+ } else if (ctmp->atdt != local->atdt) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s attached "
+ "device type changed from %d to %d (killing)",
+ __func__, ctmp->path, ctmp->atdt, local->atdt);
+ /*
+ * Force a rescan of this expander after dead
+ * contents are cleared and removed.
+ */
+ changed = B_TRUE;
+
+ if (local->atdt == 0) {
+ kill_changed = B_TRUE;
+ }
+ } else if (ctmp->link_rate != local->link_rate) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, "%s: %s changed speed from"
+ " %s to %s", __func__, ctmp->path,
+ pmcs_get_rate(ctmp->link_rate),
+ pmcs_get_rate(local->link_rate));
+ /* If the speed changed from invalid, force rescan */
+ if (!PMCS_VALID_LINK_RATE(ctmp->link_rate)) {
+ changed = B_TRUE;
+ RESTART_DISCOVERY(pwp);
+ } else {
+ /* Just update to the new link rate */
+ ctmp->link_rate = local->link_rate;
+ }
+
+ if (!PMCS_VALID_LINK_RATE(local->link_rate)) {
+ kill_changed = B_TRUE;
+ }
+ } else if (memcmp(ctmp->sas_address, local->sas_address,
+ sizeof (ctmp->sas_address)) != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: SASAddr "
+ "for %s changed from " SAS_ADDR_FMT " to "
+ SAS_ADDR_FMT " (kill old tree)", __func__,
+ ctmp->path, SAS_ADDR_PRT(ctmp->sas_address),
+ SAS_ADDR_PRT(local->sas_address));
+ /*
+ * Force a rescan of this expander after dead
+ * contents are cleared and removed.
+ */
+ changed = B_TRUE;
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: %s looks the same (type %s)",
+ __func__, ctmp->path, PHY_TYPE(ctmp));
+ /*
+ * If EXPANDER, still mark it changed so we
+ * re-evaluate its contents. If it's not an expander,
+ * but it hasn't been configured, also mark it as
+ * changed so that it will undergo configuration.
+ */
+ if (ctmp->dtype == EXPANDER) {
+ changed = B_TRUE;
+ } else if ((ctmp->dtype != NOTHING) &&
+ !ctmp->configured) {
+ ctmp->changed = 1;
+ } else {
+ /* It simply hasn't changed */
+ ctmp->changed = 0;
+ }
+ }
+
+ /*
+ * If the PHY changed, call pmcs_kill_changed if indicated,
+ * update its contents to reflect its current state and mark it
+ * as changed.
+ */
+ if (changed) {
+ /*
+ * pmcs_kill_changed will mark the PHY as changed, so
+ * only do PHY_CHANGED if we did not do kill_changed.
+ */
+ if (kill_changed) {
+ pmcs_kill_changed(pwp, ctmp, 0);
+ } else {
+ /*
+ * If we're not killing the device, it's not
+ * dead. Mark the PHY as changed.
+ */
+ PHY_CHANGED(pwp, ctmp);
+
+ if (ctmp->dead) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Unmarking PHY %s dead, "
+ "restarting discovery",
+ __func__, ctmp->path);
+ ctmp->dead = 0;
+ RESTART_DISCOVERY(pwp);
+ }
+ }
+
+ /*
+ * If the dtype of this PHY is now NOTHING, mark it as
+ * unconfigured. Set pend_dtype to what the new dtype
+ * is. It'll get updated at the end of the discovery
+ * process.
+ */
+ if (local->dtype == NOTHING) {
+ bzero(ctmp->sas_address,
+ sizeof (local->sas_address));
+ ctmp->atdt = 0;
+ ctmp->link_rate = 0;
+ ctmp->pend_dtype = NOTHING;
+ ctmp->configured = 0;
+ } else {
+ (void) memcpy(ctmp->sas_address,
+ local->sas_address,
+ sizeof (local->sas_address));
+ ctmp->atdt = local->atdt;
+ ctmp->link_rate = local->link_rate;
+ ctmp->pend_dtype = local->dtype;
+ }
+ }
+
+ local = local->sibling;
+ ctmp = ctmp->sibling;
+ }
+
+ /*
+ * If we got to here, that means we were able to see all the PHYs
+ * and we can now update all of the real PHYs with the information
+ * we got on the local PHYs. Once that's done, free all the local
+ * PHYs.
+ */
+
+ pmcs_free_phys(pwp, local_list);
+}
+
+/*
+ * Top level routine to check expanders. We call pmcs_check_expander for
+ * each expander. Since we're not doing any configuration right now, it
+ * doesn't matter if this is breadth-first.
+ */
+static boolean_t
+pmcs_check_expanders(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ pmcs_phy_t *phyp, *pnext, *pchild;
+ boolean_t config_changed = B_FALSE;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__, pptr->path);
+
+ /*
+ * Check each expander at this level
+ */
+ phyp = pptr;
+ while (phyp && !config_changed) {
+ pmcs_lock_phy(phyp);
+
+ if ((phyp->dtype == EXPANDER) && phyp->changed &&
+ !phyp->dead && !phyp->subsidiary &&
+ phyp->configured) {
+ pmcs_check_expander(pwp, phyp);
+ }
+
+ pnext = phyp->sibling;
+ pmcs_unlock_phy(phyp);
+
+ mutex_enter(&pwp->config_lock);
+ config_changed = pwp->config_changed;
+ mutex_exit(&pwp->config_lock);
+
+ phyp = pnext;
+ }
+
+ if (config_changed) {
+ return (config_changed);
+ }
+
+ /*
+ * Now check the children
+ */
+ phyp = pptr;
+ while (phyp && !config_changed) {
+ pmcs_lock_phy(phyp);
+ pnext = phyp->sibling;
+ pchild = phyp->children;
+ pmcs_unlock_phy(phyp);
+
+ if (pchild) {
+ (void) pmcs_check_expanders(pwp, pchild);
+ }
+
+ mutex_enter(&pwp->config_lock);
+ config_changed = pwp->config_changed;
+ mutex_exit(&pwp->config_lock);
+
+ phyp = pnext;
+ }
+
+ /*
+ * We're done
+ */
+ return (config_changed);
+}
+
+/*
+ * Called with softstate and PHY locked
+ */
+static void
+pmcs_clear_expander(pmcs_hw_t *pwp, pmcs_phy_t *pptr, int level)
+{
+ pmcs_phy_t *ctmp;
+
+ ASSERT(mutex_owned(&pwp->lock));
+ ASSERT(mutex_owned(&pptr->phy_lock));
+ ASSERT(pptr->level < PMCS_MAX_XPND - 1);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: checking %s", __func__,
+ pptr->path);
+
+ ctmp = pptr->children;
+ while (ctmp) {
+ /*
+ * If the expander is dead, mark its children dead
+ */
+ if (pptr->dead) {
+ ctmp->dead = 1;
+ }
+ if (ctmp->dtype == EXPANDER) {
+ pmcs_clear_expander(pwp, ctmp, level + 1);
+ }
+ ctmp = ctmp->sibling;
+ }
+
+ /*
+ * If this expander is not dead, we're done here.
+ */
+ if (!pptr->dead) {
+ return;
+ }
+
+ /*
+ * Now snip out the list of children below us and release them
+ */
+ ctmp = pptr->children;
+ while (ctmp) {
+ pmcs_phy_t *nxt = ctmp->sibling;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: dead PHY 0x%p (%s) (ref_count %d)", __func__,
+ (void *)ctmp, ctmp->path, ctmp->ref_count);
+ /*
+ * Put this PHY on the dead PHY list for the watchdog to
+ * clean up after any outstanding work has completed.
+ */
+ mutex_enter(&pwp->dead_phylist_lock);
+ ctmp->dead_next = pwp->dead_phys;
+ pwp->dead_phys = ctmp;
+ mutex_exit(&pwp->dead_phylist_lock);
+ pmcs_unlock_phy(ctmp);
+ ctmp = nxt;
+ }
+
+ pptr->children = NULL;
+
+ /*
+ * Clear subsidiary phys as well. Getting the parent's PHY lock
+ * is only necessary if level == 0 since otherwise the parent is
+ * already locked.
+ */
+ if (!IS_ROOT_PHY(pptr)) {
+ if (level == 0) {
+ mutex_enter(&pptr->parent->phy_lock);
+ }
+ ctmp = pptr->parent->children;
+ if (level == 0) {
+ mutex_exit(&pptr->parent->phy_lock);
+ }
+ } else {
+ ctmp = pwp->root_phys;
+ }
+
+ while (ctmp) {
+ if (ctmp == pptr) {
+ ctmp = ctmp->sibling;
+ continue;
+ }
+ /*
+ * We only need to lock subsidiary PHYs on the level 0
+ * expander. Any children of that expander, subsidiaries or
+ * not, will already be locked.
+ */
+ if (level == 0) {
+ pmcs_lock_phy(ctmp);
+ }
+ if (ctmp->dtype != EXPANDER || ctmp->subsidiary == 0 ||
+ memcmp(ctmp->sas_address, pptr->sas_address,
+ sizeof (ctmp->sas_address)) != 0) {
+ if (level == 0) {
+ pmcs_unlock_phy(ctmp);
+ }
+ ctmp = ctmp->sibling;
+ continue;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: subsidiary %s",
+ __func__, ctmp->path);
+ pmcs_clear_phy(pwp, ctmp);
+ if (level == 0) {
+ pmcs_unlock_phy(ctmp);
+ }
+ ctmp = ctmp->sibling;
+ }
+
+ pmcs_clear_phy(pwp, pptr);
+}
+
+/*
+ * Called with PHY locked and with scratch acquired. We return 0 if
+ * we fail to allocate resources or notice that the configuration
+ * count changed while we were running the command. We return
+ * less than zero if we had an I/O error or received an unsupported
+ * configuration. Otherwise we return the number of phys in the
+ * expander.
+ */
+#define DFM(m, y) if (m == NULL) m = y
+static int
+pmcs_expander_get_nphy(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ struct pmcwork *pwrk;
+ char buf[64];
+ const uint_t rdoff = 0x100; /* returned data offset */
+ smp_response_frame_t *srf;
+ smp_report_general_resp_t *srgr;
+ uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status, ival;
+ int result;
+
+ ival = 0x40001100;
+again:
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ result = 0;
+ goto out;
+ }
+ (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
+ pwrk->arg = pwp->scratch;
+ pwrk->dtype = pptr->dtype;
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: GET_IQ_ENTRY failed",
+ __func__);
+ pmcs_pwork(pwp, pwrk);
+ result = 0;
+ goto out;
+ }
+
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ msg[3] = LE_32((4 << SMP_REQUEST_LENGTH_SHIFT) | SMP_INDIRECT_RESPONSE);
+ /*
+ * Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors).
+ */
+ msg[4] = BE_32(ival);
+ msg[5] = 0;
+ msg[6] = 0;
+ msg[7] = 0;
+ msg[8] = 0;
+ msg[9] = 0;
+ msg[10] = 0;
+ msg[11] = 0;
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
+ msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
+ msg[15] = 0;
+
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ htag = pwrk->htag;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_lock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+
+ mutex_enter(&pwp->config_lock);
+ if (pwp->config_changed) {
+ RESTART_DISCOVERY_LOCKED(pwp);
+ mutex_exit(&pwp->config_lock);
+ result = 0;
+ goto out;
+ }
+ mutex_exit(&pwp->config_lock);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing SMP ABORT for htag 0x%08x", __func__, htag);
+ if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Unable to issue SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ }
+ result = 0;
+ goto out;
+ }
+ ptr = (void *)pwp->scratch;
+ status = LE_32(ptr[2]);
+ if (status == PMCOUT_STATUS_UNDERFLOW ||
+ status == PMCOUT_STATUS_OVERFLOW) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW,
+ "%s: over/underflow", __func__);
+ status = PMCOUT_STATUS_OK;
+ }
+ srf = (smp_response_frame_t *)&((uint32_t *)pwp->scratch)[rdoff >> 2];
+ srgr = (smp_report_general_resp_t *)
+ &((uint32_t *)pwp->scratch)[(rdoff >> 2)+1];
+
+ if (status != PMCOUT_STATUS_OK) {
+ char *nag = NULL;
+ (void) snprintf(buf, sizeof (buf),
+ "%s: SMP op failed (0x%x)", __func__, status);
+ switch (status) {
+ case PMCOUT_STATUS_IO_PORT_IN_RESET:
+ DFM(nag, "I/O Port In Reset");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
+ DFM(nag, "Hardware Timeout");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
+ DFM(nag, "Internal SMP Resource Failure");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
+ DFM(nag, "PHY Not Ready");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ DFM(nag, "Connection Rate Not Supported");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
+ DFM(nag, "Open Retry Timeout");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
+ DFM(nag, "Response Connection Error");
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: expander %s SMP operation failed (%s)",
+ __func__, pptr->path, nag);
+ break;
+
+ /*
+ * For the IO_DS_NON_OPERATIONAL case, we need to kick off
+ * device state recovery and return 0 so that the caller
+ * doesn't assume this expander is dead for good.
+ */
+ case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: {
+ pmcs_xscsi_t *xp = pptr->target;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: expander %s device state non-operational",
+ __func__, pptr->path);
+
+ if (xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: No target to do DS recovery for PHY "
+ "%p (%s), attempting PHY hard reset",
+ __func__, (void *)pptr, pptr->path);
+ (void) pmcs_reset_phy(pwp, pptr,
+ PMCS_PHYOP_HARD_RESET);
+ break;
+ }
+
+ mutex_enter(&xp->statlock);
+ pmcs_start_dev_state_recovery(xp, pptr);
+ mutex_exit(&xp->statlock);
+ break;
+ }
+
+ default:
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
+ result = -EIO;
+ break;
+ }
+ } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: bad response frame type 0x%x",
+ __func__, srf->srf_frame_type);
+ result = -EINVAL;
+ } else if (srf->srf_function != SMP_FUNC_REPORT_GENERAL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad response function 0x%x",
+ __func__, srf->srf_function);
+ result = -EINVAL;
+ } else if (srf->srf_result != 0) {
+ /*
+ * Check to see if we have a value of 3 for failure and
+ * whether we were using a SAS2.0 allocation length value
+ * and retry without it.
+ */
+ if (srf->srf_result == 3 && (ival & 0xff00)) {
+ ival &= ~0xff00;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: err 0x%x with SAS2 request- retry with SAS1",
+ __func__, srf->srf_result);
+ goto again;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad response 0x%x",
+ __func__, srf->srf_result);
+ result = -EINVAL;
+ } else if (srgr->srgr_configuring) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: expander at phy %s is still configuring",
+ __func__, pptr->path);
+ result = 0;
+ } else {
+ result = srgr->srgr_number_of_phys;
+ if (ival & 0xff00) {
+ pptr->tolerates_sas2 = 1;
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s has %d phys and %s SAS2", pptr->path, result,
+ pptr->tolerates_sas2? "tolerates" : "does not tolerate");
+ }
+out:
+ return (result);
+}
+
+/*
+ * Called with expander locked (and thus, pptr) as well as all PHYs up to
+ * the root, and scratch acquired. Return 0 if we fail to allocate resources
+ * or notice that the configuration changed while we were running the command.
+ *
+ * We return less than zero if we had an I/O error or received an
+ * unsupported configuration.
+ */
+static int
+pmcs_expander_content_discover(pmcs_hw_t *pwp, pmcs_phy_t *expander,
+ pmcs_phy_t *pptr)
+{
+ struct pmcwork *pwrk;
+ char buf[64];
+ uint8_t sas_address[8];
+ uint8_t att_sas_address[8];
+ smp_response_frame_t *srf;
+ smp_discover_resp_t *sdr;
+ const uint_t rdoff = 0x100; /* returned data offset */
+ uint8_t *roff;
+ uint32_t status, *ptr, msg[PMCS_MSG_SIZE], htag;
+ int result;
+ uint8_t ini_support;
+ uint8_t tgt_support;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, expander);
+ if (pwrk == NULL) {
+ result = 0;
+ goto out;
+ }
+ (void) memset(pwp->scratch, 0x77, PMCS_SCRATCH_SIZE);
+ pwrk->arg = pwp->scratch;
+ pwrk->dtype = expander->dtype;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(expander->device_id);
+ msg[3] = LE_32((12 << SMP_REQUEST_LENGTH_SHIFT) |
+ SMP_INDIRECT_RESPONSE);
+ /*
+ * Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors).
+ */
+ if (expander->tolerates_sas2) {
+ msg[4] = BE_32(0x40101B00);
+ } else {
+ msg[4] = BE_32(0x40100000);
+ }
+ msg[5] = 0;
+ msg[6] = BE_32((pptr->phynum << 16));
+ msg[7] = 0;
+ msg[8] = 0;
+ msg[9] = 0;
+ msg[10] = 0;
+ msg[11] = 0;
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
+ msg[14] = LE_32(PMCS_SCRATCH_SIZE - rdoff);
+ msg[15] = 0;
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ result = 0;
+ goto out;
+ }
+
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ htag = pwrk->htag;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ /*
+ * Drop PHY lock while waiting so other completions aren't potentially
+ * blocked.
+ */
+ pmcs_unlock_phy(expander);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_lock_phy(expander);
+ pmcs_pwork(pwp, pwrk);
+
+ mutex_enter(&pwp->config_lock);
+ if (pwp->config_changed) {
+ RESTART_DISCOVERY_LOCKED(pwp);
+ mutex_exit(&pwp->config_lock);
+ result = 0;
+ goto out;
+ }
+ mutex_exit(&pwp->config_lock);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_WARN, pmcs_timeo, __func__);
+ if (pmcs_abort(pwp, expander, htag, 0, 0)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Unable to issue SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing SMP ABORT for htag 0x%08x",
+ __func__, htag);
+ }
+ result = -ETIMEDOUT;
+ goto out;
+ }
+ ptr = (void *)pwp->scratch;
+ /*
+ * Point roff to the DMA offset for returned data
+ */
+ roff = pwp->scratch;
+ roff += rdoff;
+ srf = (smp_response_frame_t *)roff;
+ sdr = (smp_discover_resp_t *)(roff+4);
+ status = LE_32(ptr[2]);
+ if (status == PMCOUT_STATUS_UNDERFLOW ||
+ status == PMCOUT_STATUS_OVERFLOW) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW,
+ "%s: over/underflow", __func__);
+ status = PMCOUT_STATUS_OK;
+ }
+ if (status != PMCOUT_STATUS_OK) {
+ char *nag = NULL;
+ (void) snprintf(buf, sizeof (buf),
+ "%s: SMP op failed (0x%x)", __func__, status);
+ switch (status) {
+ case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
+ DFM(nag, "Hardware Timeout");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
+ DFM(nag, "Internal SMP Resource Failure");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
+ DFM(nag, "PHY Not Ready");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ DFM(nag, "Connection Rate Not Supported");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
+ DFM(nag, "Open Retry Timeout");
+ /* FALLTHROUGH */
+ case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
+ DFM(nag, "Response Connection Error");
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: expander %s SMP operation failed (%s)",
+ __func__, pptr->path, nag);
+ break;
+ default:
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, buf, ptr);
+ result = -EIO;
+ break;
+ }
+ goto out;
+ } else if (srf->srf_frame_type != SMP_FRAME_TYPE_RESPONSE) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: bad response frame type 0x%x",
+ __func__, srf->srf_frame_type);
+ result = -EINVAL;
+ goto out;
+ } else if (srf->srf_function != SMP_FUNC_DISCOVER) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad response function 0x%x",
+ __func__, srf->srf_function);
+ result = -EINVAL;
+ goto out;
+ } else if (srf->srf_result != SMP_RES_FUNCTION_ACCEPTED) {
+ result = pmcs_smp_function_result(pwp, srf);
+ /* Need not fail if PHY is Vacant */
+ if (result != SMP_RES_PHY_VACANT) {
+ result = -EINVAL;
+ goto out;
+ }
+ }
+
+ ini_support = (sdr->sdr_attached_sata_host |
+ (sdr->sdr_attached_smp_initiator << 1) |
+ (sdr->sdr_attached_stp_initiator << 2) |
+ (sdr->sdr_attached_ssp_initiator << 3));
+
+ tgt_support = (sdr->sdr_attached_sata_device |
+ (sdr->sdr_attached_smp_target << 1) |
+ (sdr->sdr_attached_stp_target << 2) |
+ (sdr->sdr_attached_ssp_target << 3));
+
+ pmcs_wwn2barray(BE_64(sdr->sdr_sas_addr), sas_address);
+ pmcs_wwn2barray(BE_64(sdr->sdr_attached_sas_addr), att_sas_address);
+
+ switch (sdr->sdr_attached_device_type) {
+ case SAS_IF_DTYPE_ENDPOINT:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
+ SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
+ pptr->path,
+ sdr->sdr_attached_device_type,
+ sdr->sdr_negotiated_logical_link_rate,
+ ini_support,
+ tgt_support,
+ SAS_ADDR_PRT(sas_address),
+ SAS_ADDR_PRT(att_sas_address),
+ sdr->sdr_attached_phy_identifier);
+
+ if (sdr->sdr_attached_sata_device ||
+ sdr->sdr_attached_stp_target) {
+ pptr->dtype = SATA;
+ } else if (sdr->sdr_attached_ssp_target) {
+ pptr->dtype = SAS;
+ } else if (tgt_support || ini_support) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s has "
+ "tgt support=%x init support=(%x)",
+ __func__, pptr->path, tgt_support, ini_support);
+ }
+ break;
+ case SAS_IF_DTYPE_EDGE:
+ case SAS_IF_DTYPE_FANOUT:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
+ SAS_ADDR_FMT " attSAS=" SAS_ADDR_FMT " atPHY=%x",
+ pptr->path,
+ sdr->sdr_attached_device_type,
+ sdr->sdr_negotiated_logical_link_rate,
+ ini_support,
+ tgt_support,
+ SAS_ADDR_PRT(sas_address),
+ SAS_ADDR_PRT(att_sas_address),
+ sdr->sdr_attached_phy_identifier);
+ if (sdr->sdr_attached_smp_target) {
+ /*
+ * Avoid configuring phys that just point back
+ * at a parent phy
+ */
+ if (expander->parent &&
+ memcmp(expander->parent->sas_address,
+ att_sas_address,
+ sizeof (expander->parent->sas_address)) == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3,
+ "%s: skipping port back to parent "
+ "expander (%s)", __func__, pptr->path);
+ pptr->dtype = NOTHING;
+ break;
+ }
+ pptr->dtype = EXPANDER;
+
+ } else if (tgt_support || ini_support) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s has "
+ "tgt support=%x init support=(%x)",
+ pptr->path, tgt_support, ini_support);
+ pptr->dtype = EXPANDER;
+ }
+ break;
+ default:
+ pptr->dtype = NOTHING;
+ break;
+ }
+ if (pptr->dtype != NOTHING) {
+ pmcs_phy_t *ctmp;
+
+ /*
+ * If the attached device is a SATA device and the expander
+ * is (possibly) a SAS2 compliant expander, check for whether
+ * there is a NAA=5 WWN field starting at this offset and
+ * use that for the SAS Address for this device.
+ */
+ if (expander->tolerates_sas2 && pptr->dtype == SATA &&
+ (roff[SAS_ATTACHED_NAME_OFFSET] >> 8) == 0x5) {
+ (void) memcpy(pptr->sas_address,
+ &roff[SAS_ATTACHED_NAME_OFFSET], 8);
+ } else {
+ (void) memcpy(pptr->sas_address, att_sas_address, 8);
+ }
+ pptr->atdt = (sdr->sdr_attached_device_type);
+ /*
+ * Now run up from the expander's parent up to the top to
+ * make sure we only use the least common link_rate.
+ */
+ for (ctmp = expander->parent; ctmp; ctmp = ctmp->parent) {
+ if (ctmp->link_rate <
+ sdr->sdr_negotiated_logical_link_rate) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: derating link rate from %x to %x due "
+ "to %s being slower", pptr->path,
+ sdr->sdr_negotiated_logical_link_rate,
+ ctmp->link_rate,
+ ctmp->path);
+ sdr->sdr_negotiated_logical_link_rate =
+ ctmp->link_rate;
+ }
+ }
+ pptr->link_rate = sdr->sdr_negotiated_logical_link_rate;
+ pptr->state.prog_min_rate = sdr->sdr_prog_min_phys_link_rate;
+ pptr->state.hw_min_rate = sdr->sdr_hw_min_phys_link_rate;
+ pptr->state.prog_max_rate = sdr->sdr_prog_max_phys_link_rate;
+ pptr->state.hw_max_rate = sdr->sdr_hw_max_phys_link_rate;
+ PHY_CHANGED(pwp, pptr);
+ } else {
+ pmcs_clear_phy(pwp, pptr);
+ }
+ result = 1;
+out:
+ return (result);
+}
+
+/*
+ * Get a work structure and assign it a tag with type and serial number
+ * If a structure is returned, it is returned locked.
+ */
+pmcwork_t *
+pmcs_gwork(pmcs_hw_t *pwp, uint32_t tag_type, pmcs_phy_t *phyp)
+{
+ pmcwork_t *p;
+ uint16_t snum;
+ uint32_t off;
+
+ mutex_enter(&pwp->wfree_lock);
+ p = STAILQ_FIRST(&pwp->wf);
+ if (p == NULL) {
+ /*
+ * If we couldn't get a work structure, it's time to bite
+ * the bullet, grab the pfree_lock and copy over all the
+ * work structures from the pending free list to the actual
+ * free list. This shouldn't happen all that often.
+ */
+ mutex_enter(&pwp->pfree_lock);
+ pwp->wf.stqh_first = pwp->pf.stqh_first;
+ pwp->wf.stqh_last = pwp->pf.stqh_last;
+ STAILQ_INIT(&pwp->pf);
+ mutex_exit(&pwp->pfree_lock);
+
+ p = STAILQ_FIRST(&pwp->wf);
+ if (p == NULL) {
+ mutex_exit(&pwp->wfree_lock);
+ return (NULL);
+ }
+ }
+ STAILQ_REMOVE(&pwp->wf, p, pmcwork, next);
+ snum = pwp->wserno++;
+ mutex_exit(&pwp->wfree_lock);
+
+ off = p - pwp->work;
+
+ mutex_enter(&p->lock);
+ ASSERT(p->state == PMCS_WORK_STATE_NIL);
+ ASSERT(p->htag == PMCS_TAG_FREE);
+ p->htag = (tag_type << PMCS_TAG_TYPE_SHIFT) & PMCS_TAG_TYPE_MASK;
+ p->htag |= ((snum << PMCS_TAG_SERNO_SHIFT) & PMCS_TAG_SERNO_MASK);
+ p->htag |= ((off << PMCS_TAG_INDEX_SHIFT) & PMCS_TAG_INDEX_MASK);
+ p->start = gethrtime();
+ p->state = PMCS_WORK_STATE_READY;
+ p->ssp_event = 0;
+ p->dead = 0;
+
+ if (phyp) {
+ p->phy = phyp;
+ pmcs_inc_phy_ref_count(phyp);
+ }
+
+ return (p);
+}
+
+/*
+ * Called with pwrk lock held. Returned with lock released.
+ */
+void
+pmcs_pwork(pmcs_hw_t *pwp, pmcwork_t *p)
+{
+ ASSERT(p != NULL);
+ ASSERT(mutex_owned(&p->lock));
+
+#ifdef DEBUG
+ p->last_ptr = p->ptr;
+ p->last_arg = p->arg;
+ p->last_phy = p->phy;
+ p->last_xp = p->xp;
+ p->last_htag = p->htag;
+ p->last_state = p->state;
+#endif
+ p->finish = gethrtime();
+
+ if (p->phy) {
+ pmcs_dec_phy_ref_count(p->phy);
+ }
+
+ p->state = PMCS_WORK_STATE_NIL;
+ p->htag = PMCS_TAG_FREE;
+ p->xp = NULL;
+ p->ptr = NULL;
+ p->arg = NULL;
+ p->phy = NULL;
+ p->timer = 0;
+ mutex_exit(&p->lock);
+
+ if (mutex_tryenter(&pwp->wfree_lock) == 0) {
+ mutex_enter(&pwp->pfree_lock);
+ STAILQ_INSERT_TAIL(&pwp->pf, p, next);
+ mutex_exit(&pwp->pfree_lock);
+ } else {
+ STAILQ_INSERT_TAIL(&pwp->wf, p, next);
+ mutex_exit(&pwp->wfree_lock);
+ }
+}
+
+/*
+ * Find a work structure based upon a tag and make sure that the tag
+ * serial number matches the work structure we've found.
+ * If a structure is found, its lock is held upon return.
+ */
+pmcwork_t *
+pmcs_tag2wp(pmcs_hw_t *pwp, uint32_t htag)
+{
+ pmcwork_t *p;
+ uint32_t idx = PMCS_TAG_INDEX(htag);
+
+ p = &pwp->work[idx];
+
+ mutex_enter(&p->lock);
+ if (p->htag == htag) {
+ return (p);
+ }
+ mutex_exit(&p->lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "INDEX 0x%x HTAG 0x%x got p->htag 0x%x",
+ idx, htag, p->htag);
+ return (NULL);
+}
+
+/*
+ * Issue an abort for a command or for all commands.
+ *
+ * Since this can be called from interrupt context,
+ * we don't wait for completion if wait is not set.
+ *
+ * Called with PHY lock held.
+ */
+int
+pmcs_abort(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint32_t tag, int all_cmds,
+ int wait)
+{
+ pmcwork_t *pwrk;
+ pmcs_xscsi_t *tgt;
+ uint32_t msg[PMCS_MSG_SIZE], *ptr;
+ int result, abt_type;
+ uint32_t abt_htag, status;
+
+ if (pptr->abort_all_start) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ABORT_ALL for (%s) already"
+ " in progress.", __func__, pptr->path);
+ return (EBUSY);
+ }
+
+ switch (pptr->dtype) {
+ case SAS:
+ abt_type = PMCIN_SSP_ABORT;
+ break;
+ case SATA:
+ abt_type = PMCIN_SATA_ABORT;
+ break;
+ case EXPANDER:
+ abt_type = PMCIN_SMP_ABORT;
+ break;
+ default:
+ return (0);
+ }
+
+ pwrk = pmcs_gwork(pwp, wait ? PMCS_TAG_TYPE_WAIT : PMCS_TAG_TYPE_NONE,
+ pptr);
+
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (ENOMEM);
+ }
+
+ pwrk->dtype = pptr->dtype;
+ if (wait) {
+ pwrk->arg = msg;
+ }
+ if (pptr->valid_device_id == 0) {
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Invalid DeviceID", __func__);
+ return (ENODEV);
+ }
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, abt_type));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ if (all_cmds) {
+ msg[3] = 0;
+ msg[4] = LE_32(1);
+ pwrk->ptr = NULL;
+ pptr->abort_all_start = gethrtime();
+ } else {
+ msg[3] = LE_32(tag);
+ msg[4] = 0;
+ pwrk->ptr = &tag;
+ }
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (ENOMEM);
+ }
+
+ COPY_MESSAGE(ptr, msg, 5);
+ if (all_cmds) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: aborting all commands for %s device %s. (htag=0x%x)",
+ __func__, pmcs_get_typename(pptr->dtype), pptr->path,
+ msg[1]);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: aborting tag 0x%x for %s device %s. (htag=0x%x)",
+ __func__, tag, pmcs_get_typename(pptr->dtype), pptr->path,
+ msg[1]);
+ }
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (!wait) {
+ mutex_exit(&pwrk->lock);
+ return (0);
+ }
+
+ abt_htag = pwrk->htag;
+ pmcs_unlock_phy(pwrk->phy);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_lock_phy(pwrk->phy);
+
+ tgt = pwrk->xp;
+ pmcs_pwork(pwp, pwrk);
+
+ if (tgt != NULL) {
+ mutex_enter(&tgt->aqlock);
+ if (!STAILQ_EMPTY(&tgt->aq)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Abort complete (result=0x%x), but "
+ "aq not empty (tgt 0x%p), waiting",
+ __func__, result, (void *)tgt);
+ cv_wait(&tgt->abort_cv, &tgt->aqlock);
+ }
+ mutex_exit(&tgt->aqlock);
+ }
+
+ if (all_cmds) {
+ pptr->abort_all_start = 0;
+ cv_signal(&pptr->abort_all_cv);
+ }
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Abort (htag 0x%08x) request timed out",
+ __func__, abt_htag);
+ if (tgt != NULL) {
+ mutex_enter(&tgt->statlock);
+ if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) &&
+ (tgt->dev_state !=
+ PMCS_DEVICE_STATE_NON_OPERATIONAL)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Trying DS error recovery for tgt 0x%p",
+ __func__, (void *)tgt);
+ (void) pmcs_send_err_recovery_cmd(pwp,
+ PMCS_DEVICE_STATE_IN_RECOVERY, tgt);
+ }
+ mutex_exit(&tgt->statlock);
+ }
+ return (ETIMEDOUT);
+ }
+
+ status = LE_32(msg[2]);
+ if (status != PMCOUT_STATUS_OK) {
+ /*
+ * The only non-success status are IO_NOT_VALID &
+ * IO_ABORT_IN_PROGRESS.
+ * In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's
+ * status is of concern and this duplicate cmd status can
+ * be ignored.
+ * If IO_NOT_VALID, that's not an error per-se.
+ * For abort of single I/O complete the command anyway.
+ * If, however, we were aborting all, that is a problem
+ * as IO_NOT_VALID really means that the IO or device is
+ * not there. So, discovery process will take of the cleanup.
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: abort result 0x%x",
+ __func__, LE_32(msg[2]));
+ if (all_cmds) {
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ return (EINVAL);
+ }
+
+ return (0);
+ }
+
+ if (tgt != NULL) {
+ mutex_enter(&tgt->statlock);
+ if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Restoring OPERATIONAL dev_state for tgt 0x%p",
+ __func__, (void *)tgt);
+ (void) pmcs_send_err_recovery_cmd(pwp,
+ PMCS_DEVICE_STATE_OPERATIONAL, tgt);
+ }
+ mutex_exit(&tgt->statlock);
+ }
+
+ return (0);
+}
+
+/*
+ * Issue a task management function to an SSP device.
+ *
+ * Called with PHY lock held.
+ * statlock CANNOT be held upon entry.
+ */
+int
+pmcs_ssp_tmf(pmcs_hw_t *pwp, pmcs_phy_t *pptr, uint8_t tmf, uint32_t tag,
+ uint64_t lun, uint32_t *response)
+{
+ int result, ds;
+ uint8_t local[PMCS_QENTRY_SIZE << 1], *xd;
+ sas_ssp_rsp_iu_t *rptr = (void *)local;
+ static const uint8_t ssp_rsp_evec[] = {
+ 0x58, 0x61, 0x56, 0x72, 0x00
+ };
+ uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
+ struct pmcwork *pwrk;
+ pmcs_xscsi_t *xp;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (ENOMEM);
+ }
+ /*
+ * NB: We use the PMCS_OQ_GENERAL outbound queue
+ * NB: so as to not get entangled in normal I/O
+ * NB: processing.
+ */
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_SSP_INI_TM_START));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ if (tmf == SAS_ABORT_TASK || tmf == SAS_QUERY_TASK) {
+ msg[3] = LE_32(tag);
+ } else {
+ msg[3] = 0;
+ }
+ msg[4] = LE_32(tmf);
+ msg[5] = BE_32((uint32_t)lun);
+ msg[6] = BE_32((uint32_t)(lun >> 32));
+ msg[7] = LE_32(PMCIN_MESSAGE_REPORT);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (ENOMEM);
+ }
+ COPY_MESSAGE(ptr, msg, 7);
+ pwrk->arg = msg;
+ pwrk->dtype = pptr->dtype;
+
+ xp = pptr->target;
+ if (xp != NULL) {
+ mutex_enter(&xp->statlock);
+ if (xp->dev_state == PMCS_DEVICE_STATE_NON_OPERATIONAL) {
+ mutex_exit(&xp->statlock);
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Not sending '%s'"
+ " because DS is '%s'", __func__, pmcs_tmf2str(tmf),
+ pmcs_status_str
+ (PMCOUT_STATUS_IO_DS_NON_OPERATIONAL));
+ return (EIO);
+ }
+ mutex_exit(&xp->statlock);
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__,
+ pmcs_tmf2str(tmf), pptr->path, (unsigned long long) lun, tag);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ /*
+ * This is a command sent to the target device, so it can take
+ * significant amount of time to complete when path & device is busy.
+ * Set a timeout to 20 seconds
+ */
+ WAIT_FOR(pwrk, 20000, result);
+ pmcs_lock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ if (xp == NULL) {
+ return (ETIMEDOUT);
+ }
+
+ mutex_enter(&xp->statlock);
+ pmcs_start_dev_state_recovery(xp, pptr);
+ mutex_exit(&xp->statlock);
+ return (ETIMEDOUT);
+ }
+
+ status = LE_32(msg[2]);
+ if (status != PMCOUT_STATUS_OK) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: status %s for TMF %s action to %s, lun %llu",
+ __func__, pmcs_status_str(status), pmcs_tmf2str(tmf),
+ pptr->path, (unsigned long long) lun);
+ if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
+ (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
+ (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
+ ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
+ } else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) {
+ /*
+ * If the status is IN_RECOVERY, it's an indication
+ * that it's now time for us to request to have the
+ * device state set to OPERATIONAL since we're the ones
+ * that requested recovery to begin with.
+ */
+ ds = PMCS_DEVICE_STATE_OPERATIONAL;
+ } else {
+ ds = PMCS_DEVICE_STATE_IN_RECOVERY;
+ }
+ if (xp != NULL) {
+ mutex_enter(&xp->statlock);
+ if (xp->dev_state != ds) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Sending err recovery cmd"
+ " for tgt 0x%p (status = %s)",
+ __func__, (void *)xp,
+ pmcs_status_str(status));
+ (void) pmcs_send_err_recovery_cmd(pwp, ds, xp);
+ }
+ mutex_exit(&xp->statlock);
+ }
+ return (EIO);
+ } else {
+ ds = PMCS_DEVICE_STATE_OPERATIONAL;
+ if (xp != NULL) {
+ mutex_enter(&xp->statlock);
+ if (xp->dev_state != ds) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Sending err recovery cmd"
+ " for tgt 0x%p (status = %s)",
+ __func__, (void *)xp,
+ pmcs_status_str(status));
+ (void) pmcs_send_err_recovery_cmd(pwp, ds, xp);
+ }
+ mutex_exit(&xp->statlock);
+ }
+ }
+ if (LE_32(msg[3]) == 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "TMF completed with no response");
+ return (EIO);
+ }
+ pmcs_endian_transform(pwp, local, &msg[5], ssp_rsp_evec);
+ xd = (uint8_t *)(&msg[5]);
+ xd += SAS_RSP_HDR_SIZE;
+ if (rptr->datapres != SAS_RSP_DATAPRES_RESPONSE_DATA) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF response not RESPONSE DATA (0x%x)",
+ __func__, rptr->datapres);
+ return (EIO);
+ }
+ if (rptr->response_data_length != 4) {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "Bad SAS RESPONSE DATA LENGTH", msg);
+ return (EIO);
+ }
+ (void) memcpy(&status, xd, sizeof (uint32_t));
+ status = BE_32(status);
+ if (response != NULL)
+ *response = status;
+ /*
+ * The status is actually in the low-order byte. The upper three
+ * bytes contain additional information for the TMFs that support them.
+ * However, at this time we do not issue any of those. In the other
+ * cases, the upper three bytes are supposed to be 0, but it appears
+ * they aren't always. Just mask them off.
+ */
+ switch (status & 0xff) {
+ case SAS_RSP_TMF_COMPLETE:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: TMF complete", __func__);
+ result = 0;
+ break;
+ case SAS_RSP_TMF_SUCCEEDED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: TMF succeeded", __func__);
+ result = 0;
+ break;
+ case SAS_RSP_INVALID_FRAME:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned INVALID FRAME", __func__);
+ result = EIO;
+ break;
+ case SAS_RSP_TMF_NOT_SUPPORTED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned TMF NOT SUPPORTED", __func__);
+ result = EIO;
+ break;
+ case SAS_RSP_TMF_FAILED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned TMF FAILED", __func__);
+ result = EIO;
+ break;
+ case SAS_RSP_TMF_INCORRECT_LUN:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned INCORRECT LUN", __func__);
+ result = EIO;
+ break;
+ case SAS_RSP_OVERLAPPED_OIPTTA:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG "
+ "ATTEMPTED", __func__);
+ result = EIO;
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: TMF returned unknown code 0x%x", __func__, status);
+ result = EIO;
+ break;
+ }
+ return (result);
+}
+
+/*
+ * Called with PHY lock held and scratch acquired
+ */
+int
+pmcs_sata_abort_ncq(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ const char *utag_fail_fmt = "%s: untagged NCQ command failure";
+ const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)";
+ uint32_t msg[PMCS_QENTRY_SIZE], *ptr, result, status;
+ uint8_t *fp = pwp->scratch, ds;
+ fis_t fis;
+ pmcwork_t *pwrk;
+ pmcs_xscsi_t *tgt;
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
+ if (pwrk == NULL) {
+ return (ENOMEM);
+ }
+ msg[0] = LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE,
+ PMCIN_SATA_HOST_IO_START));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+ msg[3] = LE_32(512);
+ msg[4] = LE_32(SATA_PROTOCOL_PIO | PMCIN_DATADIR_2_INI);
+ msg[5] = LE_32((READ_LOG_EXT << 16) | (C_BIT << 8) | FIS_REG_H2DEV);
+ msg[6] = LE_32(0x10);
+ msg[8] = LE_32(1);
+ msg[9] = 0;
+ msg[10] = 0;
+ msg[11] = 0;
+ msg[12] = LE_32(DWORD0(pwp->scratch_dma));
+ msg[13] = LE_32(DWORD1(pwp->scratch_dma));
+ msg[14] = LE_32(512);
+ msg[15] = 0;
+
+ pwrk->arg = msg;
+ pwrk->dtype = pptr->dtype;
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ return (ENOMEM);
+ }
+ COPY_MESSAGE(ptr, msg, PMCS_QENTRY_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 250, result);
+ pmcs_lock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ pmcs_prt(pwp, PMCS_PRT_INFO, pmcs_timeo, __func__);
+ return (EIO);
+ }
+ status = LE_32(msg[2]);
+ if (status != PMCOUT_STATUS_OK || LE_32(msg[3])) {
+ tgt = pptr->target;
+ if (tgt == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: cannot find target for phy 0x%p for "
+ "dev state recovery", __func__, (void *)pptr);
+ return (EIO);
+ }
+
+ mutex_enter(&tgt->statlock);
+
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG, "READ LOG EXT", msg);
+ if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
+ (status == PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK) ||
+ (status == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS)) {
+ ds = PMCS_DEVICE_STATE_NON_OPERATIONAL;
+ } else {
+ ds = PMCS_DEVICE_STATE_IN_RECOVERY;
+ }
+ if (tgt->dev_state != ds) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Trying SATA DS Error"
+ " Recovery for tgt(0x%p) for status(%s)",
+ __func__, (void *)tgt, pmcs_status_str(status));
+ (void) pmcs_send_err_recovery_cmd(pwp, ds, tgt);
+ }
+
+ mutex_exit(&tgt->statlock);
+ return (EIO);
+ }
+ fis[0] = (fp[4] << 24) | (fp[3] << 16) | (fp[2] << 8) | FIS_REG_D2H;
+ fis[1] = (fp[8] << 24) | (fp[7] << 16) | (fp[6] << 8) | fp[5];
+ fis[2] = (fp[12] << 24) | (fp[11] << 16) | (fp[10] << 8) | fp[9];
+ fis[3] = (fp[16] << 24) | (fp[15] << 16) | (fp[14] << 8) | fp[13];
+ fis[4] = 0;
+ if (fp[0] & 0x80) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, utag_fail_fmt, __func__);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, tag_fail_fmt, __func__,
+ fp[0] & 0x1f);
+ }
+ pmcs_fis_dump(pwp, fis);
+ pptr->need_rl_ext = 0;
+ return (0);
+}
+
+/*
+ * Transform a structure from CPU to Device endian format, or
+ * vice versa, based upon a transformation vector.
+ *
+ * A transformation vector is an array of bytes, each byte
+ * of which is defined thusly:
+ *
+ * bit 7: from CPU to desired endian, otherwise from desired endian
+ * to CPU format
+ * bit 6: Big Endian, else Little Endian
+ * bits 5-4:
+ * 00 Undefined
+ * 01 One Byte quantities
+ * 02 Two Byte quantities
+ * 03 Four Byte quantities
+ *
+ * bits 3-0:
+ * 00 Undefined
+ * Number of quantities to transform
+ *
+ * The vector is terminated by a 0 value.
+ */
+
+void
+pmcs_endian_transform(pmcs_hw_t *pwp, void *orig_out, void *orig_in,
+ const uint8_t *xfvec)
+{
+ uint8_t c, *out = orig_out, *in = orig_in;
+
+ if (xfvec == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: null xfvec", __func__);
+ return;
+ }
+ if (out == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: null out", __func__);
+ return;
+ }
+ if (in == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: null in", __func__);
+ return;
+ }
+ while ((c = *xfvec++) != 0) {
+ int nbyt = (c & 0xf);
+ int size = (c >> 4) & 0x3;
+ int bige = (c >> 4) & 0x4;
+
+ switch (size) {
+ case 1:
+ {
+ while (nbyt-- > 0) {
+ *out++ = *in++;
+ }
+ break;
+ }
+ case 2:
+ {
+ uint16_t tmp;
+ while (nbyt-- > 0) {
+ (void) memcpy(&tmp, in, sizeof (uint16_t));
+ if (bige) {
+ tmp = BE_16(tmp);
+ } else {
+ tmp = LE_16(tmp);
+ }
+ (void) memcpy(out, &tmp, sizeof (uint16_t));
+ out += sizeof (uint16_t);
+ in += sizeof (uint16_t);
+ }
+ break;
+ }
+ case 3:
+ {
+ uint32_t tmp;
+ while (nbyt-- > 0) {
+ (void) memcpy(&tmp, in, sizeof (uint32_t));
+ if (bige) {
+ tmp = BE_32(tmp);
+ } else {
+ tmp = LE_32(tmp);
+ }
+ (void) memcpy(out, &tmp, sizeof (uint32_t));
+ out += sizeof (uint32_t);
+ in += sizeof (uint32_t);
+ }
+ break;
+ }
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: bad size", __func__);
+ return;
+ }
+ }
+}
+
+const char *
+pmcs_get_rate(unsigned int linkrt)
+{
+ const char *rate;
+ switch (linkrt) {
+ case SAS_LINK_RATE_1_5GBIT:
+ rate = "1.5";
+ break;
+ case SAS_LINK_RATE_3GBIT:
+ rate = "3.0";
+ break;
+ case SAS_LINK_RATE_6GBIT:
+ rate = "6.0";
+ break;
+ default:
+ rate = "???";
+ break;
+ }
+ return (rate);
+}
+
+const char *
+pmcs_get_typename(pmcs_dtype_t type)
+{
+ switch (type) {
+ case NOTHING:
+ return ("NIL");
+ case SATA:
+ return ("SATA");
+ case SAS:
+ return ("SSP");
+ case EXPANDER:
+ return ("EXPANDER");
+ }
+ return ("????");
+}
+
+const char *
+pmcs_tmf2str(int tmf)
+{
+ switch (tmf) {
+ case SAS_ABORT_TASK:
+ return ("Abort Task");
+ case SAS_ABORT_TASK_SET:
+ return ("Abort Task Set");
+ case SAS_CLEAR_TASK_SET:
+ return ("Clear Task Set");
+ case SAS_LOGICAL_UNIT_RESET:
+ return ("Logical Unit Reset");
+ case SAS_I_T_NEXUS_RESET:
+ return ("I_T Nexus Reset");
+ case SAS_CLEAR_ACA:
+ return ("Clear ACA");
+ case SAS_QUERY_TASK:
+ return ("Query Task");
+ case SAS_QUERY_TASK_SET:
+ return ("Query Task Set");
+ case SAS_QUERY_UNIT_ATTENTION:
+ return ("Query Unit Attention");
+ default:
+ return ("Unknown");
+ }
+}
+
+const char *
+pmcs_status_str(uint32_t status)
+{
+ switch (status) {
+ case PMCOUT_STATUS_OK:
+ return ("OK");
+ case PMCOUT_STATUS_ABORTED:
+ return ("ABORTED");
+ case PMCOUT_STATUS_OVERFLOW:
+ return ("OVERFLOW");
+ case PMCOUT_STATUS_UNDERFLOW:
+ return ("UNDERFLOW");
+ case PMCOUT_STATUS_FAILED:
+ return ("FAILED");
+ case PMCOUT_STATUS_ABORT_RESET:
+ return ("ABORT_RESET");
+ case PMCOUT_STATUS_IO_NOT_VALID:
+ return ("IO_NOT_VALID");
+ case PMCOUT_STATUS_NO_DEVICE:
+ return ("NO_DEVICE");
+ case PMCOUT_STATUS_ILLEGAL_PARAMETER:
+ return ("ILLEGAL_PARAMETER");
+ case PMCOUT_STATUS_LINK_FAILURE:
+ return ("LINK_FAILURE");
+ case PMCOUT_STATUS_PROG_ERROR:
+ return ("PROG_ERROR");
+ case PMCOUT_STATUS_EDC_IN_ERROR:
+ return ("EDC_IN_ERROR");
+ case PMCOUT_STATUS_EDC_OUT_ERROR:
+ return ("EDC_OUT_ERROR");
+ case PMCOUT_STATUS_ERROR_HW_TIMEOUT:
+ return ("ERROR_HW_TIMEOUT");
+ case PMCOUT_STATUS_XFER_ERR_BREAK:
+ return ("XFER_ERR_BREAK");
+ case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
+ return ("XFER_ERR_PHY_NOT_READY");
+ case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
+ return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
+ return ("OPEN_CNX_ERROR_ZONE_VIOLATION");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
+ return ("OPEN_CNX_ERROR_BREAK");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS");
+ case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
+ return ("OPENCNX_ERROR_BAD_DESTINATION");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+ return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+ return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
+ return ("OPEN_CNX_ERROR_WRONG_DESTINATION");
+ case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_EROOR:
+ return ("OPEN_CNX_ERROR_UNKNOWN_EROOR");
+ case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
+ return ("IO_XFER_ERROR_NAK_RECEIVED");
+ case PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT:
+ return ("XFER_ERROR_ACK_NAK_TIMEOUT");
+ case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED:
+ return ("XFER_ERROR_PEER_ABORTED");
+ case PMCOUT_STATUS_XFER_ERROR_RX_FRAME:
+ return ("XFER_ERROR_RX_FRAME");
+ case PMCOUT_STATUS_IO_XFER_ERROR_DMA:
+ return ("IO_XFER_ERROR_DMA");
+ case PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT:
+ return ("XFER_ERROR_CREDIT_TIMEOUT");
+ case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT:
+ return ("XFER_ERROR_SATA_LINK_TIMEOUT");
+ case PMCOUT_STATUS_XFER_ERROR_SATA:
+ return ("XFER_ERROR_SATA");
+ case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE:
+ return ("XFER_ERROR_REJECTED_NCQ_MODE");
+ case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST:
+ return ("XFER_ERROR_ABORTED_DUE_TO_SRST");
+ case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE:
+ return ("XFER_ERROR_ABORTED_NCQ_MODE");
+ case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
+ return ("IO_XFER_OPEN_RETRY_TIMEOUT");
+ case PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR:
+ return ("SMP_RESP_CONNECTION_ERROR");
+ case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE:
+ return ("XFER_ERROR_UNEXPECTED_PHASE");
+ case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN:
+ return ("XFER_ERROR_RDY_OVERRUN");
+ case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED:
+ return ("XFER_ERROR_RDY_NOT_EXPECTED");
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+ return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT");
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK:
+ return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK");
+ case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK:
+ return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK");
+ case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH:
+ return ("XFER_ERROR_OFFSET_MISMATCH");
+ case PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN:
+ return ("XFER_ERROR_ZERO_DATA_LEN");
+ case PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED:
+ return ("XFER_CMD_FRAME_ISSUED");
+ case PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE:
+ return ("ERROR_INTERNAL_SMP_RESOURCE");
+ case PMCOUT_STATUS_IO_PORT_IN_RESET:
+ return ("IO_PORT_IN_RESET");
+ case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
+ return ("DEVICE STATE NON-OPERATIONAL");
+ case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
+ return ("DEVICE STATE IN RECOVERY");
+ default:
+ return (NULL);
+ }
+}
+
+uint64_t
+pmcs_barray2wwn(uint8_t ba[8])
+{
+ uint64_t result = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= ba[i];
+ }
+ return (result);
+}
+
+void
+pmcs_wwn2barray(uint64_t wwn, uint8_t ba[8])
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ ba[7 - i] = wwn & 0xff;
+ wwn >>= 8;
+ }
+}
+
+void
+pmcs_report_fwversion(pmcs_hw_t *pwp)
+{
+ const char *fwsupport;
+ switch (PMCS_FW_TYPE(pwp)) {
+ case PMCS_FW_TYPE_RELEASED:
+ fwsupport = "Released";
+ break;
+ case PMCS_FW_TYPE_DEVELOPMENT:
+ fwsupport = "Development";
+ break;
+ case PMCS_FW_TYPE_ALPHA:
+ fwsupport = "Alpha";
+ break;
+ case PMCS_FW_TYPE_BETA:
+ fwsupport = "Beta";
+ break;
+ default:
+ fwsupport = "Special";
+ break;
+ }
+ pmcs_prt(pwp, PMCS_PRT_INFO,
+ "Chip Revision: %c; F/W Revision %x.%x.%x %s", 'A' + pwp->chiprev,
+ PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp),
+ fwsupport);
+}
+
+void
+pmcs_phy_name(pmcs_hw_t *pwp, pmcs_phy_t *pptr, char *obuf, size_t olen)
+{
+ if (pptr->parent) {
+ pmcs_phy_name(pwp, pptr->parent, obuf, olen);
+ (void) snprintf(obuf, olen, "%s.%02x", obuf, pptr->phynum);
+ } else {
+ (void) snprintf(obuf, olen, "pp%02x", pptr->phynum);
+ }
+}
+
+/*
+ * Implementation for pmcs_find_phy_by_devid.
+ * If the PHY is found, it is returned locked.
+ */
+static pmcs_phy_t *
+pmcs_find_phy_by_devid_impl(pmcs_phy_t *phyp, uint32_t device_id)
+{
+ pmcs_phy_t *match, *cphyp, *nphyp;
+
+ ASSERT(!mutex_owned(&phyp->phy_lock));
+
+ while (phyp) {
+ pmcs_lock_phy(phyp);
+
+ if ((phyp->valid_device_id) && (phyp->device_id == device_id)) {
+ return (phyp);
+ }
+ if (phyp->children) {
+ cphyp = phyp->children;
+ pmcs_unlock_phy(phyp);
+ match = pmcs_find_phy_by_devid_impl(cphyp, device_id);
+ if (match) {
+ ASSERT(mutex_owned(&match->phy_lock));
+ return (match);
+ }
+ pmcs_lock_phy(phyp);
+ }
+
+ if (IS_ROOT_PHY(phyp)) {
+ pmcs_unlock_phy(phyp);
+ phyp = NULL;
+ } else {
+ nphyp = phyp->sibling;
+ pmcs_unlock_phy(phyp);
+ phyp = nphyp;
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * If the PHY is found, it is returned locked
+ */
+pmcs_phy_t *
+pmcs_find_phy_by_devid(pmcs_hw_t *pwp, uint32_t device_id)
+{
+ pmcs_phy_t *phyp, *match = NULL;
+
+ phyp = pwp->root_phys;
+
+ while (phyp) {
+ match = pmcs_find_phy_by_devid_impl(phyp, device_id);
+ if (match) {
+ ASSERT(mutex_owned(&match->phy_lock));
+ return (match);
+ }
+ phyp = phyp->sibling;
+ }
+
+ return (NULL);
+}
+
+/*
+ * This function is called as a sanity check to ensure that a newly registered
+ * PHY doesn't have a device_id that exists with another registered PHY.
+ */
+static boolean_t
+pmcs_validate_devid(pmcs_phy_t *parent, pmcs_phy_t *phyp, uint32_t device_id)
+{
+ pmcs_phy_t *pptr;
+ boolean_t rval;
+
+ pptr = parent;
+
+ while (pptr) {
+ if (pptr->valid_device_id && (pptr != phyp) &&
+ (pptr->device_id == device_id)) {
+ pmcs_prt(pptr->pwp, PMCS_PRT_DEBUG,
+ "%s: phy %s already exists as %s with "
+ "device id 0x%x", __func__, phyp->path,
+ pptr->path, device_id);
+ return (B_FALSE);
+ }
+
+ if (pptr->children) {
+ rval = pmcs_validate_devid(pptr->children, phyp,
+ device_id);
+ if (rval == B_FALSE) {
+ return (rval);
+ }
+ }
+
+ pptr = pptr->sibling;
+ }
+
+ /* This PHY and device_id are valid */
+ return (B_TRUE);
+}
+
+/*
+ * If the PHY is found, it is returned locked
+ */
+static pmcs_phy_t *
+pmcs_find_phy_by_wwn_impl(pmcs_phy_t *phyp, uint8_t *wwn)
+{
+ pmcs_phy_t *matched_phy, *cphyp, *nphyp;
+
+ ASSERT(!mutex_owned(&phyp->phy_lock));
+
+ while (phyp) {
+ pmcs_lock_phy(phyp);
+
+ if (phyp->valid_device_id) {
+ if (memcmp(phyp->sas_address, wwn, 8) == 0) {
+ return (phyp);
+ }
+ }
+
+ if (phyp->children) {
+ cphyp = phyp->children;
+ pmcs_unlock_phy(phyp);
+ matched_phy = pmcs_find_phy_by_wwn_impl(cphyp, wwn);
+ if (matched_phy) {
+ ASSERT(mutex_owned(&matched_phy->phy_lock));
+ return (matched_phy);
+ }
+ pmcs_lock_phy(phyp);
+ }
+
+ /*
+ * Only iterate through non-root PHYs
+ */
+ if (IS_ROOT_PHY(phyp)) {
+ pmcs_unlock_phy(phyp);
+ phyp = NULL;
+ } else {
+ nphyp = phyp->sibling;
+ pmcs_unlock_phy(phyp);
+ phyp = nphyp;
+ }
+ }
+
+ return (NULL);
+}
+
+pmcs_phy_t *
+pmcs_find_phy_by_wwn(pmcs_hw_t *pwp, uint64_t wwn)
+{
+ uint8_t ebstr[8];
+ pmcs_phy_t *pptr, *matched_phy;
+
+ pmcs_wwn2barray(wwn, ebstr);
+
+ pptr = pwp->root_phys;
+ while (pptr) {
+ matched_phy = pmcs_find_phy_by_wwn_impl(pptr, ebstr);
+ if (matched_phy) {
+ ASSERT(mutex_owned(&matched_phy->phy_lock));
+ return (matched_phy);
+ }
+
+ pptr = pptr->sibling;
+ }
+
+ return (NULL);
+}
+
+
+/*
+ * pmcs_find_phy_by_sas_address
+ *
+ * Find a PHY that both matches "sas_addr" and is on "iport".
+ * If a matching PHY is found, it is returned locked.
+ */
+pmcs_phy_t *
+pmcs_find_phy_by_sas_address(pmcs_hw_t *pwp, pmcs_iport_t *iport,
+ pmcs_phy_t *root, char *sas_addr)
+{
+ int ua_form = 1;
+ uint64_t wwn;
+ char addr[PMCS_MAX_UA_SIZE];
+ pmcs_phy_t *pptr, *pnext, *pchild;
+
+ if (root == NULL) {
+ pptr = pwp->root_phys;
+ } else {
+ pptr = root;
+ }
+
+ while (pptr) {
+ pmcs_lock_phy(pptr);
+ /*
+ * If the PHY is dead or does not have a valid device ID,
+ * skip it.
+ */
+ if ((pptr->dead) || (!pptr->valid_device_id)) {
+ goto next_phy;
+ }
+
+ if (pptr->iport != iport) {
+ goto next_phy;
+ }
+
+ wwn = pmcs_barray2wwn(pptr->sas_address);
+ (void *) scsi_wwn_to_wwnstr(wwn, ua_form, addr);
+ if (strncmp(addr, sas_addr, strlen(addr)) == 0) {
+ return (pptr);
+ }
+
+ if (pptr->children) {
+ pchild = pptr->children;
+ pmcs_unlock_phy(pptr);
+ pnext = pmcs_find_phy_by_sas_address(pwp, iport, pchild,
+ sas_addr);
+ if (pnext) {
+ return (pnext);
+ }
+ pmcs_lock_phy(pptr);
+ }
+
+next_phy:
+ pnext = pptr->sibling;
+ pmcs_unlock_phy(pptr);
+ pptr = pnext;
+ }
+
+ return (NULL);
+}
+
+void
+pmcs_fis_dump(pmcs_hw_t *pwp, fis_t fis)
+{
+ switch (fis[0] & 0xff) {
+ case FIS_REG_H2DEV:
+ pmcs_prt(pwp, PMCS_PRT_INFO, "FIS REGISTER HOST TO DEVICE: "
+ "OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x "
+ "LBA=%llu", BYTE2(fis[0]), BYTE3(fis[2]) << 8 |
+ BYTE3(fis[0]), WORD0(fis[3]), BYTE3(fis[1]),
+ (unsigned long long)
+ (((uint64_t)fis[2] & 0x00ffffff) << 24 |
+ ((uint64_t)fis[1] & 0x00ffffff)));
+ break;
+ case FIS_REG_D2H:
+ pmcs_prt(pwp, PMCS_PRT_INFO, "FIS REGISTER DEVICE TO HOST: Stat"
+ "us=0x%02x Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu",
+ BYTE2(fis[0]), BYTE3(fis[0]), BYTE3(fis[1]), WORD0(fis[3]),
+ (unsigned long long)(((uint64_t)fis[2] & 0x00ffffff) << 24 |
+ ((uint64_t)fis[1] & 0x00ffffff)));
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_INFO, "FIS: 0x%08x 0x%08x 0x%08x 0x%08x "
+ "0x%08x 0x%08x 0x%08x",
+ fis[0], fis[1], fis[2], fis[3], fis[4], fis[5], fis[6]);
+ break;
+ }
+}
+
+void
+pmcs_print_entry(pmcs_hw_t *pwp, int level, char *msg, void *arg)
+{
+ uint32_t *mb = arg;
+ size_t i;
+
+ pmcs_prt(pwp, level, msg);
+ for (i = 0; i < (PMCS_QENTRY_SIZE / sizeof (uint32_t)); i += 4) {
+ pmcs_prt(pwp, level, "Offset %2lu: 0x%08x 0x%08x 0x%08"
+ "x 0x%08x", i * sizeof (uint32_t), LE_32(mb[i]),
+ LE_32(mb[i+1]), LE_32(mb[i+2]),
+ LE_32(mb[i+3]));
+ }
+}
+
+/*
+ * If phyp == NULL we're being called from the worker thread, in which
+ * case we need to check all the PHYs. In this case, the softstate lock
+ * will be held.
+ * If phyp is non-NULL, just issue the spinup release for the specified PHY
+ * (which will already be locked).
+ */
+void
+pmcs_spinup_release(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ uint32_t *msg;
+ struct pmcwork *pwrk;
+ pmcs_phy_t *tphyp;
+
+ if (phyp != NULL) {
+ ASSERT(mutex_owned(&phyp->phy_lock));
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing spinup release only for PHY %s", __func__,
+ phyp->path);
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (msg == NULL || (pwrk =
+ pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
+ return;
+ }
+
+ phyp->spinup_hold = 0;
+ bzero(msg, PMCS_QENTRY_SIZE);
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_LOCAL_PHY_CONTROL));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32((0x10 << 8) | phyp->phynum);
+
+ pwrk->dtype = phyp->dtype;
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ mutex_exit(&pwrk->lock);
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ return;
+ }
+
+ ASSERT(mutex_owned(&pwp->lock));
+
+ tphyp = pwp->root_phys;
+ while (tphyp) {
+ pmcs_lock_phy(tphyp);
+ if (tphyp->spinup_hold == 0) {
+ pmcs_unlock_phy(tphyp);
+ tphyp = tphyp->sibling;
+ continue;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Issuing spinup release for PHY %s", __func__,
+ phyp->path);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ msg = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (msg == NULL || (pwrk =
+ pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
+ pmcs_unlock_phy(tphyp);
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ SCHEDULE_WORK(pwp, PMCS_WORK_SPINUP_RELEASE);
+ break;
+ }
+
+ tphyp->spinup_hold = 0;
+ bzero(msg, PMCS_QENTRY_SIZE);
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_LOCAL_PHY_CONTROL));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32((0x10 << 8) | tphyp->phynum);
+
+ pwrk->dtype = phyp->dtype;
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ mutex_exit(&pwrk->lock);
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ pmcs_unlock_phy(tphyp);
+
+ tphyp = tphyp->sibling;
+ }
+}
+
+/*
+ * Abort commands on dead PHYs and deregister them as well as removing
+ * the associated targets.
+ */
+static int
+pmcs_kill_devices(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ pmcs_phy_t *pnext, *pchild;
+ boolean_t remove_device;
+ int rval = 0;
+
+ while (phyp) {
+ pmcs_lock_phy(phyp);
+ pchild = phyp->children;
+ pnext = phyp->sibling;
+ pmcs_unlock_phy(phyp);
+
+ if (pchild) {
+ rval = pmcs_kill_devices(pwp, pchild);
+ if (rval) {
+ return (rval);
+ }
+ }
+
+ /*
+ * pmcs_remove_device requires the softstate lock.
+ */
+ mutex_enter(&pwp->lock);
+ pmcs_lock_phy(phyp);
+ if (phyp->dead && phyp->valid_device_id) {
+ remove_device = B_TRUE;
+ } else {
+ remove_device = B_FALSE;
+ }
+
+ if (remove_device) {
+ pmcs_remove_device(pwp, phyp);
+ mutex_exit(&pwp->lock);
+
+ rval = pmcs_kill_device(pwp, phyp);
+
+ if (rval) {
+ pmcs_unlock_phy(phyp);
+ return (rval);
+ }
+ } else {
+ mutex_exit(&pwp->lock);
+ }
+
+ pmcs_unlock_phy(phyp);
+ phyp = pnext;
+ }
+
+ return (rval);
+}
+
+/*
+ * Called with PHY locked
+ */
+int
+pmcs_kill_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr)
+{
+ int r, result;
+ uint32_t msg[PMCS_MSG_SIZE], *ptr, status;
+ struct pmcwork *pwrk;
+ pmcs_xscsi_t *tgt;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "kill %s device @ %s",
+ pmcs_get_typename(pptr->dtype), pptr->path);
+
+ /*
+ * There may be an outstanding ABORT_ALL running, which we wouldn't
+ * know just by checking abort_pending. We can, however, check
+ * abort_all_start. If it's non-zero, there is one, and we'll just
+ * sit here and wait for it to complete. If we don't, we'll remove
+ * the device while there are still commands pending.
+ */
+ if (pptr->abort_all_start) {
+ while (pptr->abort_all_start) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Waiting for outstanding ABORT_ALL on PHY 0x%p",
+ __func__, (void *)pptr);
+ cv_wait(&pptr->abort_all_cv, &pptr->phy_lock);
+ }
+ } else if (pptr->abort_pending) {
+ r = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1);
+
+ if (r) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: ABORT_ALL returned non-zero status (%d) for "
+ "PHY 0x%p", __func__, r, (void *)pptr);
+ return (r);
+ }
+ pptr->abort_pending = 0;
+ }
+
+ /*
+ * Now that everything is aborted from the chip's perspective (or even
+ * if it is not), flush out the wait queue. We won't flush the active
+ * queue since it is possible that abort completions may follow after
+ * the notification that the abort all has completed.
+ */
+ tgt = pptr->target;
+ if (tgt) {
+ mutex_enter(&tgt->statlock);
+ pmcs_flush_target_queues(pwp, tgt, PMCS_TGT_WAIT_QUEUE);
+ mutex_exit(&tgt->statlock);
+ }
+
+ if (pptr->valid_device_id == 0) {
+ return (0);
+ }
+
+ if ((pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr)) == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (ENOMEM);
+ }
+ pwrk->arg = msg;
+ pwrk->dtype = pptr->dtype;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_DEREGISTER_DEVICE_HANDLE));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->device_id);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ mutex_exit(&pwrk->lock);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (ENOMEM);
+ }
+
+ COPY_MESSAGE(ptr, msg, 3);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ pmcs_unlock_phy(pptr);
+ WAIT_FOR(pwrk, 250, result);
+ pmcs_lock_phy(pptr);
+ pmcs_pwork(pwp, pwrk);
+
+ if (result) {
+ return (ETIMEDOUT);
+ }
+ status = LE_32(msg[2]);
+ if (status != PMCOUT_STATUS_OK) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: status 0x%x when trying to deregister device %s",
+ __func__, status, pptr->path);
+ }
+
+ pptr->device_id = PMCS_INVALID_DEVICE_ID;
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ pptr->valid_device_id = 0;
+ return (0);
+}
+
+/*
+ * Acknowledge the SAS h/w events that need acknowledgement.
+ * This is only needed for first level PHYs.
+ */
+void
+pmcs_ack_events(pmcs_hw_t *pwp)
+{
+ uint32_t msg[PMCS_MSG_SIZE], *ptr;
+ struct pmcwork *pwrk;
+ pmcs_phy_t *pptr;
+
+ for (pptr = pwp->root_phys; pptr; pptr = pptr->sibling) {
+ pmcs_lock_phy(pptr);
+ if (pptr->hw_event_ack == 0) {
+ pmcs_unlock_phy(pptr);
+ continue;
+ }
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ if ((ptr == NULL) || (pwrk =
+ pmcs_gwork(pwp, PMCS_TAG_TYPE_NONE, NULL)) == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_unlock_phy(pptr);
+ SCHEDULE_WORK(pwp, PMCS_WORK_SAS_HW_ACK);
+ break;
+ }
+
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_SAW_HW_EVENT_ACK));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(pptr->hw_event_ack);
+
+ mutex_exit(&pwrk->lock);
+ pwrk->dtype = pptr->dtype;
+ pptr->hw_event_ack = 0;
+ COPY_MESSAGE(ptr, msg, 3);
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ pmcs_unlock_phy(pptr);
+ }
+}
+
+/*
+ * Load DMA
+ */
+int
+pmcs_dma_load(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint32_t *msg)
+{
+ ddi_dma_cookie_t *sg;
+ pmcs_dmachunk_t *tc;
+ pmcs_dmasgl_t *sgl, *prior;
+ int seg, tsc;
+ uint64_t sgl_addr;
+
+ /*
+ * If we have no data segments, we're done.
+ */
+ if (CMD2PKT(sp)->pkt_numcookies == 0) {
+ return (0);
+ }
+
+ /*
+ * Get the S/G list pointer.
+ */
+ sg = CMD2PKT(sp)->pkt_cookies;
+
+ /*
+ * If we only have one dma segment, we can directly address that
+ * data within the Inbound message itself.
+ */
+ if (CMD2PKT(sp)->pkt_numcookies == 1) {
+ msg[12] = LE_32(DWORD0(sg->dmac_laddress));
+ msg[13] = LE_32(DWORD1(sg->dmac_laddress));
+ msg[14] = LE_32(sg->dmac_size);
+ msg[15] = 0;
+ return (0);
+ }
+
+ /*
+ * Otherwise, we'll need one or more external S/G list chunks.
+ * Get the first one and its dma address into the Inbound message.
+ */
+ mutex_enter(&pwp->dma_lock);
+ tc = pwp->dma_freelist;
+ if (tc == NULL) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
+ mutex_exit(&pwp->dma_lock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: out of SG lists", __func__);
+ return (-1);
+ }
+ pwp->dma_freelist = tc->nxt;
+ mutex_exit(&pwp->dma_lock);
+
+ tc->nxt = NULL;
+ sp->cmd_clist = tc;
+ sgl = tc->chunks;
+ (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
+ sgl_addr = tc->addr;
+ msg[12] = LE_32(DWORD0(sgl_addr));
+ msg[13] = LE_32(DWORD1(sgl_addr));
+ msg[14] = 0;
+ msg[15] = LE_32(PMCS_DMASGL_EXTENSION);
+
+ prior = sgl;
+ tsc = 0;
+
+ for (seg = 0; seg < CMD2PKT(sp)->pkt_numcookies; seg++) {
+ /*
+ * If the current segment count for this chunk is one less than
+ * the number s/g lists per chunk and we have more than one seg
+ * to go, we need another chunk. Get it, and make sure that the
+ * tail end of the the previous chunk points the new chunk
+ * (if remembering an offset can be called 'pointing to').
+ *
+ * Note that we can store the offset into our command area that
+ * represents the new chunk in the length field of the part
+ * that points the PMC chip at the next chunk- the PMC chip
+ * ignores this field when the EXTENSION bit is set.
+ *
+ * This is required for dma unloads later.
+ */
+ if (tsc == (PMCS_SGL_NCHUNKS - 1) &&
+ seg < (CMD2PKT(sp)->pkt_numcookies - 1)) {
+ mutex_enter(&pwp->dma_lock);
+ tc = pwp->dma_freelist;
+ if (tc == NULL) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS);
+ mutex_exit(&pwp->dma_lock);
+ pmcs_dma_unload(pwp, sp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: out of SG lists", __func__);
+ return (-1);
+ }
+ pwp->dma_freelist = tc->nxt;
+ tc->nxt = sp->cmd_clist;
+ mutex_exit(&pwp->dma_lock);
+
+ sp->cmd_clist = tc;
+ (void) memset(tc->chunks, 0, PMCS_SGL_CHUNKSZ);
+ sgl = tc->chunks;
+ sgl_addr = tc->addr;
+ prior[PMCS_SGL_NCHUNKS-1].sglal =
+ LE_32(DWORD0(sgl_addr));
+ prior[PMCS_SGL_NCHUNKS-1].sglah =
+ LE_32(DWORD1(sgl_addr));
+ prior[PMCS_SGL_NCHUNKS-1].sglen = 0;
+ prior[PMCS_SGL_NCHUNKS-1].flags =
+ LE_32(PMCS_DMASGL_EXTENSION);
+ prior = sgl;
+ tsc = 0;
+ }
+ sgl[tsc].sglal = LE_32(DWORD0(sg->dmac_laddress));
+ sgl[tsc].sglah = LE_32(DWORD1(sg->dmac_laddress));
+ sgl[tsc].sglen = LE_32(sg->dmac_size);
+ sgl[tsc++].flags = 0;
+ sg++;
+ }
+ return (0);
+}
+
+/*
+ * Unload DMA
+ */
+void
+pmcs_dma_unload(pmcs_hw_t *pwp, pmcs_cmd_t *sp)
+{
+ pmcs_dmachunk_t *cp;
+
+ mutex_enter(&pwp->dma_lock);
+ while ((cp = sp->cmd_clist) != NULL) {
+ sp->cmd_clist = cp->nxt;
+ cp->nxt = pwp->dma_freelist;
+ pwp->dma_freelist = cp;
+ }
+ mutex_exit(&pwp->dma_lock);
+}
+
+/*
+ * Take a chunk of consistent memory that has just been allocated and inserted
+ * into the cip indices and prepare it for DMA chunk usage and add it to the
+ * freelist.
+ *
+ * Called with dma_lock locked (except during attach when it's unnecessary)
+ */
+void
+pmcs_idma_chunks(pmcs_hw_t *pwp, pmcs_dmachunk_t *dcp,
+ pmcs_chunk_t *pchunk, unsigned long lim)
+{
+ unsigned long off, n;
+ pmcs_dmachunk_t *np = dcp;
+ pmcs_chunk_t *tmp_chunk;
+
+ if (pwp->dma_chunklist == NULL) {
+ pwp->dma_chunklist = pchunk;
+ } else {
+ tmp_chunk = pwp->dma_chunklist;
+ while (tmp_chunk->next) {
+ tmp_chunk = tmp_chunk->next;
+ }
+ tmp_chunk->next = pchunk;
+ }
+
+ /*
+ * Install offsets into chunk lists.
+ */
+ for (n = 0, off = 0; off < lim; off += PMCS_SGL_CHUNKSZ, n++) {
+ np->chunks = (void *)&pchunk->addrp[off];
+ np->addr = pchunk->dma_addr + off;
+ np->acc_handle = pchunk->acc_handle;
+ np->dma_handle = pchunk->dma_handle;
+ if ((off + PMCS_SGL_CHUNKSZ) < lim) {
+ np = np->nxt;
+ }
+ }
+ np->nxt = pwp->dma_freelist;
+ pwp->dma_freelist = dcp;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "added %lu DMA chunks ", n);
+}
+
+/*
+ * Change the value of the interrupt coalescing timer. This is done currently
+ * only for I/O completions. If we're using the "auto clear" feature, it can
+ * be turned back on when interrupt coalescing is turned off and must be
+ * turned off when the coalescing timer is on.
+ * NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long
+ * as that's true, we don't need to distinguish between them.
+ */
+
+void
+pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj)
+{
+ if (adj == DECREASE_TIMER) {
+ /* If the timer is already off, nothing to do. */
+ if (pwp->io_intr_coal.timer_on == B_FALSE) {
+ return;
+ }
+
+ pwp->io_intr_coal.intr_coal_timer -= PMCS_COAL_TIMER_GRAN;
+
+ if (pwp->io_intr_coal.intr_coal_timer == 0) {
+ /* Disable the timer */
+ pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL, 0);
+
+ if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
+ pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
+ pwp->odb_auto_clear);
+ }
+
+ pwp->io_intr_coal.timer_on = B_FALSE;
+ pwp->io_intr_coal.max_io_completions = B_FALSE;
+ pwp->io_intr_coal.num_intrs = 0;
+ pwp->io_intr_coal.int_cleared = B_FALSE;
+ pwp->io_intr_coal.num_io_completions = 0;
+
+ DTRACE_PROBE1(pmcs__intr__coalesce__timer__off,
+ pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
+ } else {
+ pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
+ pwp->io_intr_coal.intr_coal_timer);
+ }
+ } else {
+ /*
+ * If the timer isn't on yet, do the setup for it now.
+ */
+ if (pwp->io_intr_coal.timer_on == B_FALSE) {
+ /* If auto clear is being used, turn it off. */
+ if (pwp->odb_auto_clear & (1 << PMCS_MSIX_IODONE)) {
+ pmcs_wr_topunit(pwp, PMCS_OBDB_AUTO_CLR,
+ (pwp->odb_auto_clear &
+ ~(1 << PMCS_MSIX_IODONE)));
+ }
+
+ pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_CONTROL,
+ (1 << PMCS_MSIX_IODONE));
+ pwp->io_intr_coal.timer_on = B_TRUE;
+ pwp->io_intr_coal.intr_coal_timer =
+ PMCS_COAL_TIMER_GRAN;
+
+ DTRACE_PROBE1(pmcs__intr__coalesce__timer__on,
+ pmcs_io_intr_coal_t *, &pwp->io_intr_coal);
+ } else {
+ pwp->io_intr_coal.intr_coal_timer +=
+ PMCS_COAL_TIMER_GRAN;
+ }
+
+ if (pwp->io_intr_coal.intr_coal_timer > PMCS_MAX_COAL_TIMER) {
+ pwp->io_intr_coal.intr_coal_timer = PMCS_MAX_COAL_TIMER;
+ }
+
+ pmcs_wr_topunit(pwp, PMCS_INT_COALESCING_TIMER,
+ pwp->io_intr_coal.intr_coal_timer);
+ }
+
+ /*
+ * Adjust the interrupt threshold based on the current timer value
+ */
+ pwp->io_intr_coal.intr_threshold =
+ PMCS_INTR_THRESHOLD(PMCS_QUANTUM_TIME_USECS * 1000 /
+ (pwp->io_intr_coal.intr_latency +
+ (pwp->io_intr_coal.intr_coal_timer * 1000)));
+}
+
+/*
+ * Register Access functions
+ */
+uint32_t
+pmcs_rd_iqci(pmcs_hw_t *pwp, uint32_t qnum)
+{
+ uint32_t iqci;
+
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ddi_dma_sync failed?",
+ __func__);
+ }
+
+ iqci = LE_32(
+ ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2]);
+
+ return (iqci);
+}
+
+uint32_t
+pmcs_rd_oqpi(pmcs_hw_t *pwp, uint32_t qnum)
+{
+ uint32_t oqpi;
+
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ddi_dma_sync failed?",
+ __func__);
+ }
+
+ oqpi = LE_32(
+ ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2]);
+
+ return (oqpi);
+}
+
+uint32_t
+pmcs_rd_gsm_reg(pmcs_hw_t *pwp, uint32_t off)
+{
+ uint32_t rv, newaxil, oldaxil;
+
+ newaxil = off & ~GSM_BASE_MASK;
+ off &= GSM_BASE_MASK;
+ mutex_enter(&pwp->axil_lock);
+ oldaxil = ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
+ drv_usecwait(10);
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register update failed");
+ }
+ rv = ddi_get32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2]);
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
+ drv_usecwait(10);
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register restore failed");
+ }
+ mutex_exit(&pwp->axil_lock);
+ return (rv);
+}
+
+void
+pmcs_wr_gsm_reg(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ uint32_t newaxil, oldaxil;
+
+ newaxil = off & ~GSM_BASE_MASK;
+ off &= GSM_BASE_MASK;
+ mutex_enter(&pwp->axil_lock);
+ oldaxil = ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]);
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], newaxil);
+ drv_usecwait(10);
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != newaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register update failed");
+ }
+ ddi_put32(pwp->gsm_acc_handle, &pwp->gsm_regs[off >> 2], val);
+ ddi_put32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2], oldaxil);
+ drv_usecwait(10);
+ if (ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[PMCS_AXI_TRANS >> 2]) != oldaxil) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "AXIL register restore failed");
+ }
+ mutex_exit(&pwp->axil_lock);
+}
+
+uint32_t
+pmcs_rd_topunit(pmcs_hw_t *pwp, uint32_t off)
+{
+ switch (off) {
+ case PMCS_SPC_RESET:
+ case PMCS_SPC_BOOT_STRAP:
+ case PMCS_SPC_DEVICE_ID:
+ case PMCS_DEVICE_REVISION:
+ off = pmcs_rd_gsm_reg(pwp, off);
+ break;
+ default:
+ off = ddi_get32(pwp->top_acc_handle,
+ &pwp->top_regs[off >> 2]);
+ break;
+ }
+ return (off);
+}
+
+void
+pmcs_wr_topunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ switch (off) {
+ case PMCS_SPC_RESET:
+ case PMCS_DEVICE_REVISION:
+ pmcs_wr_gsm_reg(pwp, off, val);
+ break;
+ default:
+ ddi_put32(pwp->top_acc_handle, &pwp->top_regs[off >> 2], val);
+ break;
+ }
+}
+
+uint32_t
+pmcs_rd_msgunit(pmcs_hw_t *pwp, uint32_t off)
+{
+ return (ddi_get32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2]));
+}
+
+uint32_t
+pmcs_rd_mpi_tbl(pmcs_hw_t *pwp, uint32_t off)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2]));
+}
+
+uint32_t
+pmcs_rd_gst_tbl(pmcs_hw_t *pwp, uint32_t off)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2]));
+}
+
+uint32_t
+pmcs_rd_iqc_tbl(pmcs_hw_t *pwp, uint32_t off)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2]));
+}
+
+uint32_t
+pmcs_rd_oqc_tbl(pmcs_hw_t *pwp, uint32_t off)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2]));
+}
+
+uint32_t
+pmcs_rd_iqpi(pmcs_hw_t *pwp, uint32_t qnum)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2]));
+}
+
+uint32_t
+pmcs_rd_oqci(pmcs_hw_t *pwp, uint32_t qnum)
+{
+ return (ddi_get32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2]));
+}
+
+void
+pmcs_wr_msgunit(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ ddi_put32(pwp->msg_acc_handle, &pwp->msg_regs[off >> 2], val);
+}
+
+void
+pmcs_wr_mpi_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_offset + off) >> 2], (val));
+}
+
+void
+pmcs_wr_gst_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_gst_offset + off) >> 2], val);
+}
+
+void
+pmcs_wr_iqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_iqc_offset + off) >> 2], val);
+}
+
+void
+pmcs_wr_oqc_tbl(pmcs_hw_t *pwp, uint32_t off, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[(pwp->mpi_oqc_offset + off) >> 2], val);
+}
+
+void
+pmcs_wr_iqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
+{
+ ((uint32_t *)((void *)pwp->cip))[IQ_OFFSET(qnum) >> 2] = val;
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ddi_dma_sync failed?",
+ __func__);
+ }
+}
+
+void
+pmcs_wr_iqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[pwp->iqpi_offset[qnum] >> 2], val);
+}
+
+void
+pmcs_wr_oqci(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
+{
+ ddi_put32(pwp->mpi_acc_handle,
+ &pwp->mpi_regs[pwp->oqci_offset[qnum] >> 2], val);
+}
+
+void
+pmcs_wr_oqpi(pmcs_hw_t *pwp, uint32_t qnum, uint32_t val)
+{
+ ((uint32_t *)((void *)pwp->cip))[OQ_OFFSET(qnum) >> 2] = val;
+ if (ddi_dma_sync(pwp->cip_handles, 0, 0, DDI_DMA_SYNC_FORDEV) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: ddi_dma_sync failed?",
+ __func__);
+ }
+}
+
+/*
+ * Check the status value of an outbound IOMB and report anything bad
+ */
+
+void
+pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb)
+{
+ uint16_t opcode;
+ int offset;
+
+ if (iomb == NULL) {
+ return;
+ }
+
+ opcode = LE_32(iomb[0]) & 0xfff;
+
+ switch (opcode) {
+ /*
+ * The following have no status field, so ignore them
+ */
+ case PMCOUT_ECHO:
+ case PMCOUT_SAS_HW_EVENT:
+ case PMCOUT_GET_DEVICE_HANDLE:
+ case PMCOUT_SATA_EVENT:
+ case PMCOUT_SSP_EVENT:
+ case PMCOUT_DEVICE_HANDLE_ARRIVED:
+ case PMCOUT_SMP_REQUEST_RECEIVED:
+ case PMCOUT_GPIO:
+ case PMCOUT_GPIO_EVENT:
+ case PMCOUT_GET_TIME_STAMP:
+ case PMCOUT_SKIP_ENTRIES:
+ case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */
+ case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */
+ case PMCOUT_DEVICE_HANDLE_REMOVED:
+ case PMCOUT_SSP_REQUEST_RECEIVED:
+ return;
+
+ case PMCOUT_GENERAL_EVENT:
+ offset = 1;
+ break;
+
+ case PMCOUT_SSP_COMPLETION:
+ case PMCOUT_SMP_COMPLETION:
+ case PMCOUT_DEVICE_REGISTRATION:
+ case PMCOUT_DEREGISTER_DEVICE_HANDLE:
+ case PMCOUT_SATA_COMPLETION:
+ case PMCOUT_DEVICE_INFO:
+ case PMCOUT_FW_FLASH_UPDATE:
+ case PMCOUT_SSP_ABORT:
+ case PMCOUT_SATA_ABORT:
+ case PMCOUT_SAS_DIAG_MODE_START_END:
+ case PMCOUT_SAS_HW_EVENT_ACK_ACK:
+ case PMCOUT_SMP_ABORT:
+ case PMCOUT_SET_DEVICE_STATE:
+ case PMCOUT_GET_DEVICE_STATE:
+ case PMCOUT_SET_DEVICE_INFO:
+ offset = 2;
+ break;
+
+ case PMCOUT_LOCAL_PHY_CONTROL:
+ case PMCOUT_SAS_DIAG_EXECUTE:
+ case PMCOUT_PORT_CONTROL:
+ offset = 3;
+ break;
+
+ case PMCOUT_GET_INFO:
+ case PMCOUT_GET_VPD:
+ case PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT:
+ case PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT:
+ case PMCOUT_SET_VPD:
+ case PMCOUT_TWI:
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "Got response for deprecated opcode", iomb);
+ return;
+
+ default:
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "Got response for unknown opcode", iomb);
+ return;
+ }
+
+ if (LE_32(iomb[offset]) != PMCOUT_STATUS_OK) {
+ pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
+ "bad status on TAG_TYPE_NONE command", iomb);
+ }
+}
+
+/*
+ * Called with statlock held
+ */
+void
+pmcs_clear_xp(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
+{
+ _NOTE(ARGUNUSED(pwp));
+
+ ASSERT(mutex_owned(&xp->statlock));
+ ASSERT(xp->dying);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Device 0x%p is gone.", __func__,
+ (void *)xp);
+
+ /*
+ * Clear the dip now. This keeps pmcs_rem_old_devices from attempting
+ * to call us on the same device while we're still flushing queues.
+ * The only side effect is we can no longer update SM-HBA properties,
+ * but this device is going away anyway, so no matter.
+ */
+ xp->dip = NULL;
+
+ /*
+ * Flush all target queues
+ */
+ pmcs_flush_target_queues(pwp, xp, PMCS_TGT_ALL_QUEUES);
+
+ xp->special_running = 0;
+ xp->recovering = 0;
+ xp->recover_wait = 0;
+ xp->draining = 0;
+ xp->dying = 0;
+ xp->new = 0;
+ xp->assigned = 0;
+ xp->dev_state = 0;
+ xp->tagmap = 0;
+ xp->dev_gone = 1;
+ xp->event_recovery = 0;
+ xp->dtype = NOTHING;
+ xp->wq_recovery_tail = NULL;
+ /* Don't clear xp->phy */
+ /* Don't clear xp->actv_cnt */
+}
+
+static int
+pmcs_smp_function_result(pmcs_hw_t *pwp, smp_response_frame_t *srf)
+{
+ int result = srf->srf_result;
+
+ switch (result) {
+ case SMP_RES_UNKNOWN_FUNCTION:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: Unknown SMP Function(0x%x)",
+ __func__, result);
+ break;
+ case SMP_RES_FUNCTION_FAILED:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: SMP Function Failed(0x%x)",
+ __func__, result);
+ break;
+ case SMP_RES_INVALID_REQUEST_FRAME_LENGTH:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: Invalid Request Frame Length(0x%x)",
+ __func__, result);
+ break;
+ case SMP_RES_INCOMPLETE_DESCRIPTOR_LIST:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: Incomplete Descriptor List(0x%x)",
+ __func__, result);
+ break;
+ case SMP_RES_PHY_DOES_NOT_EXIST:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: PHY does not exist(0x%x)",
+ __func__, result);
+ break;
+ case SMP_RES_PHY_VACANT:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: PHY Vacant(0x%x)",
+ __func__, result);
+ break;
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: SMP DISCOVER Response "
+ "Function Result: (0x%x)",
+ __func__, result);
+ break;
+ }
+
+ return (result);
+}
+
+/*
+ * Do all the repetitive stuff necessary to setup for DMA
+ *
+ * pwp: Used for dip
+ * dma_attr: ddi_dma_attr_t to use for the mapping
+ * acch: ddi_acc_handle_t to use for the mapping
+ * dmah: ddi_dma_handle_t to use
+ * length: Amount of memory for mapping
+ * kvp: Pointer filled in with kernel virtual address on successful return
+ * dma_addr: Pointer filled in with DMA address on successful return
+ */
+boolean_t
+pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch,
+ ddi_dma_handle_t *dmah, size_t length, caddr_t *kvp, uint64_t *dma_addr)
+{
+ dev_info_t *dip = pwp->dip;
+ ddi_dma_cookie_t cookie;
+ size_t real_length;
+ uint_t ddma_flag = DDI_DMA_CONSISTENT;
+ uint_t ddabh_flag = DDI_DMA_CONSISTENT | DDI_DMA_RDWR;
+ uint_t cookie_cnt;
+ ddi_device_acc_attr_t mattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_DEFAULT_ACC
+ };
+
+ *acch = NULL;
+ *dmah = NULL;
+
+ if (ddi_dma_alloc_handle(dip, dma_attr, DDI_DMA_SLEEP, NULL, dmah) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to allocate DMA handle");
+ return (B_FALSE);
+ }
+
+ if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP,
+ NULL, kvp, &real_length, acch) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to allocate DMA mem");
+ ddi_dma_free_handle(dmah);
+ *dmah = NULL;
+ return (B_FALSE);
+ }
+
+ if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvp, real_length,
+ ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt)
+ != DDI_DMA_MAPPED) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Failed to bind DMA");
+ ddi_dma_free_handle(dmah);
+ ddi_dma_mem_free(acch);
+ *dmah = NULL;
+ *acch = NULL;
+ return (B_FALSE);
+ }
+
+ if (cookie_cnt != 1) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Multiple cookies");
+ if (ddi_dma_unbind_handle(*dmah) != DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition failed at "
+ "%s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(dmah);
+ ddi_dma_mem_free(acch);
+ *dmah = NULL;
+ *acch = NULL;
+ return (B_FALSE);
+ }
+
+ *dma_addr = cookie.dmac_laddress;
+
+ return (B_TRUE);
+}
+
+/*
+ * Flush requested queues for a particular target. Called with statlock held
+ */
+void
+pmcs_flush_target_queues(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt, uint8_t queues)
+{
+ pmcs_cmd_t *sp;
+ pmcwork_t *pwrk;
+
+ ASSERT(pwp != NULL);
+ ASSERT(tgt != NULL);
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Flushing queues (%d) for target 0x%p", __func__,
+ queues, (void *)tgt);
+
+ /*
+ * Commands on the wait queue (or the special queue below) don't have
+ * work structures associated with them.
+ */
+ if (queues & PMCS_TGT_WAIT_QUEUE) {
+ mutex_enter(&tgt->wqlock);
+ while ((sp = STAILQ_FIRST(&tgt->wq)) != NULL) {
+ STAILQ_REMOVE(&tgt->wq, sp, pmcs_cmd, cmd_next);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: Removing cmd 0x%p from wq for target 0x%p",
+ __func__, (void *)sp, (void *)tgt);
+ CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
+ CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
+ mutex_exit(&tgt->wqlock);
+ pmcs_dma_unload(pwp, sp);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ mutex_enter(&tgt->wqlock);
+ }
+ mutex_exit(&tgt->wqlock);
+ }
+
+ /*
+ * Commands on the active queue will have work structures associated
+ * with them.
+ */
+ if (queues & PMCS_TGT_ACTIVE_QUEUE) {
+ mutex_enter(&tgt->aqlock);
+ while ((sp = STAILQ_FIRST(&tgt->aq)) != NULL) {
+ STAILQ_REMOVE(&tgt->aq, sp, pmcs_cmd, cmd_next);
+ pwrk = pmcs_tag2wp(pwp, sp->cmd_tag);
+ mutex_exit(&tgt->aqlock);
+ mutex_exit(&tgt->statlock);
+ /*
+ * If we found a work structure, mark it as dead
+ * and complete it
+ */
+ if (pwrk != NULL) {
+ pwrk->dead = 1;
+ CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
+ CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
+ pmcs_complete_work_impl(pwp, pwrk, NULL, 0);
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: Removing cmd 0x%p from aq for target 0x%p",
+ __func__, (void *)sp, (void *)tgt);
+ pmcs_dma_unload(pwp, sp);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ mutex_enter(&tgt->aqlock);
+ mutex_enter(&tgt->statlock);
+ }
+ mutex_exit(&tgt->aqlock);
+ }
+
+ if (queues & PMCS_TGT_SPECIAL_QUEUE) {
+ while ((sp = STAILQ_FIRST(&tgt->sq)) != NULL) {
+ STAILQ_REMOVE(&tgt->sq, sp, pmcs_cmd, cmd_next);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: Removing cmd 0x%p from sq for target 0x%p",
+ __func__, (void *)sp, (void *)tgt);
+ CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE;
+ CMD2PKT(sp)->pkt_state = STATE_GOT_BUS;
+ pmcs_dma_unload(pwp, sp);
+ mutex_enter(&pwp->cq_lock);
+ STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
+ mutex_exit(&pwp->cq_lock);
+ }
+ }
+}
+
+void
+pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb,
+ size_t amt)
+{
+ switch (PMCS_TAG_TYPE(pwrk->htag)) {
+ case PMCS_TAG_TYPE_CBACK:
+ {
+ pmcs_cb_t callback = (pmcs_cb_t)pwrk->ptr;
+ (*callback)(pwp, pwrk, iomb);
+ break;
+ }
+ case PMCS_TAG_TYPE_WAIT:
+ if (pwrk->arg && iomb && amt) {
+ (void) memcpy(pwrk->arg, iomb, amt);
+ }
+ cv_signal(&pwrk->sleep_cv);
+ mutex_exit(&pwrk->lock);
+ break;
+ case PMCS_TAG_TYPE_NONE:
+#ifdef DEBUG
+ pmcs_check_iomb_status(pwp, iomb);
+#endif
+ pmcs_pwork(pwp, pwrk);
+ break;
+ default:
+ /*
+ * We will leak a structure here if we don't know
+ * what happened
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Unknown PMCS_TAG_TYPE (%x)",
+ __func__, PMCS_TAG_TYPE(pwrk->htag));
+ break;
+ }
+}
+
+/*
+ * Determine if iport still has targets. During detach(9E), if SCSA is
+ * successfull in its guarantee of tran_tgt_free(9E) before detach(9E),
+ * this should always return B_FALSE.
+ */
+boolean_t
+pmcs_iport_has_targets(pmcs_hw_t *pwp, pmcs_iport_t *iport)
+{
+ pmcs_xscsi_t *xp;
+ int i;
+
+ mutex_enter(&pwp->lock);
+
+ if (!pwp->targets || !pwp->max_dev) {
+ mutex_exit(&pwp->lock);
+ return (B_FALSE);
+ }
+
+ for (i = 0; i < pwp->max_dev; i++) {
+ xp = pwp->targets[i];
+ if ((xp == NULL) || (xp->phy == NULL) ||
+ (xp->phy->iport != iport)) {
+ continue;
+ }
+
+ mutex_exit(&pwp->lock);
+ return (B_TRUE);
+ }
+
+ mutex_exit(&pwp->lock);
+ return (B_FALSE);
+}
+
+/*
+ * Called with softstate lock held
+ */
+void
+pmcs_destroy_target(pmcs_xscsi_t *target)
+{
+ pmcs_hw_t *pwp = target->pwp;
+ pmcs_iport_t *iport;
+
+ ASSERT(pwp);
+ ASSERT(mutex_owned(&pwp->lock));
+
+ if (!target->ua) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: target %p iport addres is null",
+ __func__, (void *)target);
+ }
+
+ iport = pmcs_get_iport_by_ua(pwp, target->ua);
+ if (iport == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: no iport associated with tgt(0x%p)",
+ __func__, (void *)target);
+ return;
+ }
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: free target %p", __func__, (void *)target);
+ if (target->ua) {
+ strfree(target->ua);
+ }
+
+ mutex_destroy(&target->wqlock);
+ mutex_destroy(&target->aqlock);
+ mutex_destroy(&target->statlock);
+ cv_destroy(&target->reset_cv);
+ cv_destroy(&target->abort_cv);
+ ddi_soft_state_bystr_fini(&target->lun_sstate);
+ ddi_soft_state_bystr_free(iport->tgt_sstate, target->unit_address);
+ pmcs_rele_iport(iport);
+}
+
+/*
+ * Get device state. Called with statlock and PHY lock held.
+ */
+int
+pmcs_get_dev_state(pmcs_hw_t *pwp, pmcs_xscsi_t *xp, uint8_t *ds)
+{
+ uint32_t htag, *ptr, msg[PMCS_MSG_SIZE];
+ int result;
+ struct pmcwork *pwrk;
+ pmcs_phy_t *phyp;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: tgt(0x%p)", __func__, (void *)xp);
+ if (xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Target is NULL", __func__);
+ return (-1);
+ }
+
+ ASSERT(mutex_owned(&xp->statlock));
+ phyp = xp->phy;
+ ASSERT(mutex_owned(&phyp->phy_lock));
+
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, phyp);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (-1);
+ }
+ pwrk->arg = msg;
+ pwrk->dtype = phyp->dtype;
+
+ if (phyp->valid_device_id == 0) {
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Invalid DeviceID", __func__);
+ return (-1);
+ }
+ htag = pwrk->htag;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_GET_DEVICE_STATE));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(phyp->device_id);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (-1);
+ }
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(phyp);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_lock_phy(phyp);
+ pmcs_pwork(pwp, pwrk);
+ mutex_enter(&xp->statlock);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: cmd timed out, returning ",
+ __func__);
+ return (-1);
+ }
+ if (LE_32(msg[2]) == 0) {
+ *ds = (uint8_t)(LE_32(msg[4]));
+ if (*ds != xp->dev_state) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: retrieved_ds=0x%x, target_ds=0x%x", __func__,
+ *ds, xp->dev_state);
+ }
+ return (0);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: cmd failed Status(0x%x), returning ", __func__,
+ LE_32(msg[2]));
+ return (-1);
+ }
+}
+
+/*
+ * Set device state. Called with target's statlock and PHY lock held.
+ */
+int
+pmcs_set_dev_state(pmcs_hw_t *pwp, pmcs_xscsi_t *xp, uint8_t ds)
+{
+ uint32_t htag, *ptr, msg[PMCS_MSG_SIZE];
+ int result;
+ uint8_t pds, nds;
+ struct pmcwork *pwrk;
+ pmcs_phy_t *phyp;
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, "%s: ds(0x%x), tgt(0x%p)",
+ __func__, ds, (void *)xp);
+ if (xp == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Target is Null", __func__);
+ return (-1);
+ }
+
+ phyp = xp->phy;
+ pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, phyp);
+ if (pwrk == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nowrk, __func__);
+ return (-1);
+ }
+ if (phyp == NULL) {
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, "%s: PHY is Null",
+ __func__);
+ return (-1);
+ }
+ if (phyp->valid_device_id == 0) {
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Invalid DeviceID", __func__);
+ return (-1);
+ }
+ pwrk->arg = msg;
+ pwrk->dtype = phyp->dtype;
+ htag = pwrk->htag;
+ msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL,
+ PMCIN_SET_DEVICE_STATE));
+ msg[1] = LE_32(pwrk->htag);
+ msg[2] = LE_32(phyp->device_id);
+ msg[3] = LE_32(ds);
+
+ mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+ if (ptr == NULL) {
+ mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
+ pmcs_pwork(pwp, pwrk);
+ pmcs_prt(pwp, PMCS_PRT_ERR, pmcs_nomsg, __func__);
+ return (-1);
+ }
+ COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
+ pwrk->state = PMCS_WORK_STATE_ONCHIP;
+ INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
+
+ mutex_exit(&xp->statlock);
+ pmcs_unlock_phy(phyp);
+ WAIT_FOR(pwrk, 1000, result);
+ pmcs_lock_phy(phyp);
+ pmcs_pwork(pwp, pwrk);
+ mutex_enter(&xp->statlock);
+
+ if (result) {
+ pmcs_timed_out(pwp, htag, __func__);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: cmd timed out, returning", __func__);
+ return (-1);
+ }
+ if (LE_32(msg[2]) == 0) {
+ pds = (uint8_t)(LE_32(msg[4]) >> 4);
+ nds = (uint8_t)(LE_32(msg[4]) & 0x0000000f);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, "%s: previous_ds=0x%x, "
+ "new_ds=0x%x", __func__, pds, nds);
+ xp->dev_state = nds;
+ return (0);
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: cmd failed Status(0x%x), returning ", __func__,
+ LE_32(msg[2]));
+ return (-1);
+ }
+}
+
+void
+pmcs_dev_state_recovery(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ uint8_t ds;
+ int rc;
+ pmcs_xscsi_t *tgt;
+ pmcs_phy_t *pptr, *pnext, *pchild;
+
+ /*
+ * First time, check to see if we're already performing recovery
+ */
+ if (phyp == NULL) {
+ mutex_enter(&pwp->lock);
+ if (pwp->ds_err_recovering) {
+ mutex_exit(&pwp->lock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY);
+ return;
+ }
+
+ pwp->ds_err_recovering = 1;
+ pptr = pwp->root_phys;
+ mutex_exit(&pwp->lock);
+ } else {
+ pptr = phyp;
+ }
+
+ while (pptr) {
+ /*
+ * Since ds_err_recovering is set, we can be assured these
+ * PHYs won't disappear on us while we do this.
+ */
+ pmcs_lock_phy(pptr);
+ pchild = pptr->children;
+ pnext = pptr->sibling;
+ pmcs_unlock_phy(pptr);
+
+ if (pchild) {
+ pmcs_dev_state_recovery(pwp, pchild);
+ }
+
+ tgt = NULL;
+ pmcs_lock_phy(pptr);
+
+ if (pptr->dead) {
+ goto next_phy;
+ }
+
+ tgt = pptr->target;
+ if (tgt == NULL) {
+ if (pptr->dtype != NOTHING) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG2,
+ "%s: no target for DS error recovery for "
+ "PHY 0x%p", __func__, (void *)pptr);
+ }
+ goto next_phy;
+ }
+
+ mutex_enter(&tgt->statlock);
+
+ if (tgt->recover_wait == 0) {
+ goto next_phy;
+ }
+
+ if (tgt->dying) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Not doing DS recovery on dying target %p",
+ __func__, (void *)tgt);
+ goto next_phy;
+ }
+
+ /*
+ * Step 1: Put the device into the IN_RECOVERY state
+ */
+ rc = pmcs_get_dev_state(pwp, tgt, &ds);
+ if (rc != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: pmcs_get_dev_state on PHY %s "
+ "failed (rc=%d)",
+ __func__, pptr->path, rc);
+
+ pmcs_handle_ds_recovery_error(pptr, tgt, pwp,
+ __func__, __LINE__, "pmcs_get_dev_state");
+
+ goto next_phy;
+ }
+
+ if (tgt->dev_state == ds) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Target 0x%p already IN_RECOVERY", __func__,
+ (void *)tgt);
+ } else {
+ tgt->dev_state = ds;
+ ds = PMCS_DEVICE_STATE_IN_RECOVERY;
+ rc = pmcs_send_err_recovery_cmd(pwp, ds, tgt);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: pmcs_send_err_recovery_cmd "
+ "result(%d) tgt(0x%p) ds(0x%x) tgt->ds(0x%x)",
+ __func__, rc, (void *)tgt, ds, tgt->dev_state);
+
+ if (rc) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: pmcs_send_err_recovery_cmd to PHY %s "
+ "failed (rc=%d)",
+ __func__, pptr->path, rc);
+
+ pmcs_handle_ds_recovery_error(pptr, tgt, pwp,
+ __func__, __LINE__,
+ "pmcs_send_err_recovery_cmd");
+
+ goto next_phy;
+ }
+ }
+
+ /*
+ * Step 2: Perform a hard reset on the PHY
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Issue HARD_RESET to PHY %s", __func__, pptr->path);
+ /*
+ * Must release statlock here because pmcs_reset_phy will
+ * drop and reacquire the PHY lock.
+ */
+ mutex_exit(&tgt->statlock);
+ rc = pmcs_reset_phy(pwp, pptr, PMCS_PHYOP_HARD_RESET);
+ mutex_enter(&tgt->statlock);
+ if (rc) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: HARD_RESET to PHY %s failed (rc=%d)",
+ __func__, pptr->path, rc);
+
+ pmcs_handle_ds_recovery_error(pptr, tgt, pwp,
+ __func__, __LINE__, "HARD_RESET");
+
+ goto next_phy;
+ }
+
+ /*
+ * Step 3: Abort all I/Os to the device
+ */
+ if (pptr->abort_all_start) {
+ while (pptr->abort_all_start) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Waiting for outstanding ABORT_ALL on "
+ "PHY 0x%p", __func__, (void *)pptr);
+ cv_wait(&pptr->abort_all_cv, &pptr->phy_lock);
+ }
+ } else {
+ mutex_exit(&tgt->statlock);
+ rc = pmcs_abort(pwp, pptr, pptr->device_id, 1, 1);
+ mutex_enter(&tgt->statlock);
+ if (rc != 0) {
+ pptr->abort_pending = 1;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: pmcs_abort to PHY %s failed (rc=%d)",
+ __func__, pptr->path, rc);
+
+ pmcs_handle_ds_recovery_error(pptr, tgt,
+ pwp, __func__, __LINE__, "pmcs_abort");
+
+ goto next_phy;
+ }
+ }
+
+ /*
+ * Step 4: Set the device back to OPERATIONAL state
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Set PHY/tgt 0x%p/0x%p to OPERATIONAL state",
+ __func__, (void *)pptr, (void *)tgt);
+ rc = pmcs_set_dev_state(pwp, tgt,
+ PMCS_DEVICE_STATE_OPERATIONAL);
+ if (rc == 0) {
+ tgt->recover_wait = 0;
+ pptr->ds_recovery_retries = 0;
+ /*
+ * Don't bother to run the work queues if the PHY
+ * is dead.
+ */
+ if (tgt->phy && !tgt->phy->dead) {
+ SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
+ (void) ddi_taskq_dispatch(pwp->tq, pmcs_worker,
+ pwp, DDI_NOSLEEP);
+ }
+ } else {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Failed to SET tgt 0x%p to OPERATIONAL state",
+ __func__, (void *)tgt);
+
+ pmcs_handle_ds_recovery_error(pptr, tgt, pwp,
+ __func__, __LINE__, "SET tgt to OPERATIONAL state");
+
+ goto next_phy;
+ }
+
+next_phy:
+ if (tgt) {
+ mutex_exit(&tgt->statlock);
+ }
+ pmcs_unlock_phy(pptr);
+ pptr = pnext;
+ }
+
+ /*
+ * Only clear ds_err_recovering if we're exiting for good and not
+ * just unwinding from recursion
+ */
+ if (phyp == NULL) {
+ mutex_enter(&pwp->lock);
+ pwp->ds_err_recovering = 0;
+ mutex_exit(&pwp->lock);
+ }
+}
+
+/*
+ * Called with target's statlock and PHY lock held.
+ */
+int
+pmcs_send_err_recovery_cmd(pmcs_hw_t *pwp, uint8_t dev_state, pmcs_xscsi_t *tgt)
+{
+ pmcs_phy_t *pptr;
+ int rc = -1;
+
+ ASSERT(tgt != NULL);
+ ASSERT(mutex_owned(&tgt->statlock));
+
+ if (tgt->recovering) {
+ return (0);
+ }
+
+ tgt->recovering = 1;
+ pptr = tgt->phy;
+
+ if (pptr == NULL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, "%s: PHY is Null",
+ __func__);
+ return (-1);
+ }
+
+ ASSERT(mutex_owned(&pptr->phy_lock));
+
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE, "%s: ds: 0x%x, tgt ds(0x%x)",
+ __func__, dev_state, tgt->dev_state);
+
+ switch (dev_state) {
+ case PMCS_DEVICE_STATE_IN_RECOVERY:
+ if (tgt->dev_state == PMCS_DEVICE_STATE_IN_RECOVERY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Target 0x%p already IN_RECOVERY", __func__,
+ (void *)tgt);
+ rc = 0; /* This is not an error */
+ goto no_action;
+ }
+
+ rc = pmcs_set_dev_state(pwp, tgt,
+ PMCS_DEVICE_STATE_IN_RECOVERY);
+ if (rc != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s(1): Failed to SET tgt(0x%p) to _IN_RECOVERY",
+ __func__, (void *)tgt);
+ }
+
+ break;
+
+ case PMCS_DEVICE_STATE_OPERATIONAL:
+ if (tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Target 0x%p not ready to go OPERATIONAL",
+ __func__, (void *)tgt);
+ goto no_action;
+ }
+
+ rc = pmcs_set_dev_state(pwp, tgt,
+ PMCS_DEVICE_STATE_OPERATIONAL);
+ tgt->reset_success = 1;
+ if (rc != 0) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s(2): Failed to SET tgt(0x%p) to OPERATIONAL",
+ __func__, (void *)tgt);
+ tgt->reset_success = 0;
+ }
+
+ break;
+
+ case PMCS_DEVICE_STATE_NON_OPERATIONAL:
+ PHY_CHANGED(pwp, pptr);
+ RESTART_DISCOVERY(pwp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Device at %s is non-operational",
+ __func__, pptr->path);
+ tgt->dev_state = PMCS_DEVICE_STATE_NON_OPERATIONAL;
+ rc = 0;
+
+ break;
+
+ default:
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Invalid state requested (%d)", __func__,
+ dev_state);
+ break;
+
+ }
+
+no_action:
+ tgt->recovering = 0;
+ return (rc);
+}
+
+/*
+ * pmcs_lock_phy_impl
+ *
+ * This function is what does the actual work for pmcs_lock_phy. It will
+ * lock all PHYs from phyp down in a top-down fashion.
+ *
+ * Locking notes:
+ * 1. level starts from 0 for the PHY ("parent") that's passed in. It is
+ * not a reflection of the actual level of the PHY in the SAS topology.
+ * 2. If parent is an expander, then parent is locked along with all its
+ * descendents.
+ * 3. Expander subsidiary PHYs at level 0 are not locked. It is the
+ * responsibility of the caller to individually lock expander subsidiary PHYs
+ * at level 0 if necessary.
+ * 4. Siblings at level 0 are not traversed due to the possibility that we're
+ * locking a PHY on the dead list. The siblings could be pointing to invalid
+ * PHYs. We don't lock siblings at level 0 anyway.
+ */
+static void
+pmcs_lock_phy_impl(pmcs_phy_t *phyp, int level)
+{
+ pmcs_phy_t *tphyp;
+
+ ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
+ (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
+
+ /*
+ * Start walking the PHYs.
+ */
+ tphyp = phyp;
+ while (tphyp) {
+ /*
+ * If we're at the top level, only lock ourselves. For anything
+ * at level > 0, traverse children while locking everything.
+ */
+ if ((level > 0) || (tphyp == phyp)) {
+ pmcs_prt(tphyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p parent 0x%p path %s lvl %d",
+ __func__, (void *)tphyp, (void *)tphyp->parent,
+ tphyp->path, level);
+ mutex_enter(&tphyp->phy_lock);
+
+ if (tphyp->children) {
+ pmcs_lock_phy_impl(tphyp->children, level + 1);
+ }
+ }
+
+ if (level == 0) {
+ return;
+ }
+
+ tphyp = tphyp->sibling;
+ }
+}
+
+/*
+ * pmcs_lock_phy
+ *
+ * This function is responsible for locking a PHY and all its descendents
+ */
+void
+pmcs_lock_phy(pmcs_phy_t *phyp)
+{
+#ifdef DEBUG
+ char *callername = NULL;
+ ulong_t off;
+
+ ASSERT(phyp != NULL);
+
+ callername = modgetsymname((uintptr_t)caller(), &off);
+
+ if (callername == NULL) {
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s caller: unknown", __func__,
+ (void *)phyp, phyp->path);
+ } else {
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
+ (void *)phyp, phyp->path, callername, off);
+ }
+#else
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
+#endif
+ pmcs_lock_phy_impl(phyp, 0);
+}
+
+/*
+ * pmcs_unlock_phy_impl
+ *
+ * Unlock all PHYs from phyp down in a bottom-up fashion.
+ */
+static void
+pmcs_unlock_phy_impl(pmcs_phy_t *phyp, int level)
+{
+ pmcs_phy_t *phy_next;
+
+ ASSERT((phyp->dtype == SAS) || (phyp->dtype == SATA) ||
+ (phyp->dtype == EXPANDER) || (phyp->dtype == NOTHING));
+
+ /*
+ * Recurse down to the bottom PHYs
+ */
+ if (level == 0) {
+ if (phyp->children) {
+ pmcs_unlock_phy_impl(phyp->children, level + 1);
+ }
+ } else {
+ phy_next = phyp;
+ while (phy_next) {
+ if (phy_next->children) {
+ pmcs_unlock_phy_impl(phy_next->children,
+ level + 1);
+ }
+ phy_next = phy_next->sibling;
+ }
+ }
+
+ /*
+ * Iterate through PHYs unlocking all at level > 0 as well the top PHY
+ */
+ phy_next = phyp;
+ while (phy_next) {
+ if ((level > 0) || (phy_next == phyp)) {
+ pmcs_prt(phy_next->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p parent 0x%p path %s lvl %d",
+ __func__, (void *)phy_next,
+ (void *)phy_next->parent, phy_next->path, level);
+ mutex_exit(&phy_next->phy_lock);
+ }
+
+ if (level == 0) {
+ return;
+ }
+
+ phy_next = phy_next->sibling;
+ }
+}
+
+/*
+ * pmcs_unlock_phy
+ *
+ * Unlock a PHY and all its descendents
+ */
+void
+pmcs_unlock_phy(pmcs_phy_t *phyp)
+{
+#ifdef DEBUG
+ char *callername = NULL;
+ ulong_t off;
+
+ ASSERT(phyp != NULL);
+
+ callername = modgetsymname((uintptr_t)caller(), &off);
+
+ if (callername == NULL) {
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s caller: unknown", __func__,
+ (void *)phyp, phyp->path);
+ } else {
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s caller: %s+%lx", __func__,
+ (void *)phyp, phyp->path, callername, off);
+ }
+#else
+ pmcs_prt(phyp->pwp, PMCS_PRT_DEBUG_PHY_LOCKING,
+ "%s: PHY 0x%p path %s", __func__, (void *)phyp, phyp->path);
+#endif
+ pmcs_unlock_phy_impl(phyp, 0);
+}
+
+/*
+ * pmcs_get_root_phy
+ *
+ * For a given phy pointer return its root phy.
+ * The caller must be holding the lock on every PHY from phyp up to the root.
+ */
+pmcs_phy_t *
+pmcs_get_root_phy(pmcs_phy_t *phyp)
+{
+ ASSERT(phyp);
+
+ while (phyp) {
+ if (IS_ROOT_PHY(phyp)) {
+ break;
+ }
+ phyp = phyp->parent;
+ }
+
+ return (phyp);
+}
+
+/*
+ * pmcs_free_dma_chunklist
+ *
+ * Free DMA S/G chunk list
+ */
+void
+pmcs_free_dma_chunklist(pmcs_hw_t *pwp)
+{
+ pmcs_chunk_t *pchunk;
+
+ while (pwp->dma_chunklist) {
+ pchunk = pwp->dma_chunklist;
+ pwp->dma_chunklist = pwp->dma_chunklist->next;
+ if (pchunk->dma_handle) {
+ if (ddi_dma_unbind_handle(pchunk->dma_handle) !=
+ DDI_SUCCESS) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "Condition failed"
+ " at %s():%d", __func__, __LINE__);
+ }
+ ddi_dma_free_handle(&pchunk->dma_handle);
+ ddi_dma_mem_free(&pchunk->acc_handle);
+ }
+ kmem_free(pchunk, sizeof (pmcs_chunk_t));
+ }
+}
+
+
+/*
+ * Start ssp event recovery. We have to schedule recovery operation because
+ * it involves sending multiple commands to device and we should not do it
+ * in the interrupt context.
+ * If it is failure of a recovery command, let the recovery thread deal with it.
+ * Called with pmcwork lock held.
+ */
+
+void
+pmcs_start_ssp_event_recovery(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb,
+ size_t amt)
+{
+ pmcs_xscsi_t *tgt = pwrk->xp;
+ uint32_t event = LE_32(iomb[2]);
+ pmcs_phy_t *pptr = pwrk->phy;
+ uint32_t tag;
+
+ if (tgt != NULL) {
+ mutex_enter(&tgt->statlock);
+ if (tgt->dying || !tgt->assigned) {
+ if (pptr) {
+ pmcs_dec_phy_ref_count(pptr);
+ }
+ pptr = NULL;
+ pwrk->phy = NULL;
+ }
+ mutex_exit(&tgt->statlock);
+ }
+ if (pptr == NULL) {
+ /*
+ * No target or dying target.Need to run RE-DISCOVERY here.
+ */
+ if (pwrk->state != PMCS_WORK_STATE_TIMED_OUT) {
+ pwrk->state = PMCS_WORK_STATE_INTR;
+ }
+ /*
+ * Although we cannot mark phy to force abort nor mark phy
+ * as changed, killing of a target would take care of aborting
+ * commands for the device.
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: No valid target for event "
+ "processing found. Scheduling RECONFIGURE", __func__);
+ pmcs_pwork(pwp, pwrk);
+ RESTART_DISCOVERY(pwp);
+ return;
+ } else {
+ pmcs_lock_phy(pptr);
+ mutex_enter(&tgt->statlock);
+ if (event == PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) {
+ if (tgt->dev_state !=
+ PMCS_DEVICE_STATE_NON_OPERATIONAL) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Device at "
+ "%s is non-operational", __func__,
+ pptr->path);
+ tgt->dev_state =
+ PMCS_DEVICE_STATE_NON_OPERATIONAL;
+ }
+ pptr->abort_pending = 1;
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(pptr);
+ mutex_exit(&pwrk->lock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
+ RESTART_DISCOVERY(pwp);
+ return;
+ }
+
+ /*
+ * If this command is run in WAIT mode, it is a failing recovery
+ * command. If so, just wake up recovery thread waiting for
+ * command completion.
+ */
+ tag = PMCS_TAG_TYPE(pwrk->htag);
+ if (tag == PMCS_TAG_TYPE_WAIT) {
+ pwrk->htag |= PMCS_TAG_DONE;
+ if (pwrk->arg && amt) {
+ (void) memcpy(pwrk->arg, iomb, amt);
+ }
+ cv_signal(&pwrk->sleep_cv);
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(pptr);
+ mutex_exit(&pwrk->lock);
+ return;
+ }
+
+ /*
+ * To recover from primary failures,
+ * we need to schedule handling events recovery.
+ */
+ tgt->event_recovery = 1;
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(pptr);
+ pwrk->ssp_event = event;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Scheduling SSP event recovery for tgt(0x%p) "
+ "pwrk(%p) tag(0x%x)", __func__, (void *)tgt, (void *)pwrk,
+ pwrk->htag);
+ mutex_exit(&pwrk->lock);
+ SCHEDULE_WORK(pwp, PMCS_WORK_SSP_EVT_RECOVERY);
+ }
+
+ /* Work cannot be completed until event recovery is completed. */
+}
+
+/*
+ * SSP target event recovery
+ * Entered with a phy lock held
+ * Pwrk lock is not needed - pwrk is on the target aq and no other thread
+ * will do anything with it until this thread starts the chain of recovery.
+ * Statlock may be acquired and released.
+ */
+
+void
+pmcs_tgt_event_recovery(pmcs_hw_t *pwp, pmcwork_t *pwrk)
+{
+ pmcs_phy_t *pptr = pwrk->phy;
+ pmcs_cmd_t *sp = pwrk->arg;
+ pmcs_lun_t *lun = sp->cmd_lun;
+ pmcs_xscsi_t *tgt = pwrk->xp;
+ uint32_t event;
+ uint32_t htag;
+ uint32_t status;
+ uint8_t dstate;
+ int rv;
+
+ ASSERT(pwrk->arg != NULL);
+ ASSERT(pwrk->xp != NULL);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: event recovery for "
+ "target 0x%p", __func__, (void *)pwrk->xp);
+ htag = pwrk->htag;
+ event = pwrk->ssp_event;
+ pwrk->ssp_event = 0xffffffff;
+ if (event == PMCOUT_STATUS_XFER_ERR_BREAK ||
+ event == PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY ||
+ event == PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT) {
+ /* Command may be still pending on device */
+ rv = pmcs_ssp_tmf(pwp, pptr, SAS_QUERY_TASK, htag,
+ lun->lun_num, &status);
+ if (rv != 0) {
+ goto out;
+ }
+ if (status == SAS_RSP_TMF_COMPLETE) {
+ /* Command NOT pending on a device */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: No pending command for tgt 0x%p",
+ __func__, (void *)tgt);
+ /* Nothing more to do, just abort it on chip */
+ htag = 0;
+ }
+ }
+ /*
+ * All other events left the command pending in the host
+ * Send abort task and abort it on the chip
+ */
+ if (htag != 0) {
+ if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, htag,
+ lun->lun_num, &status))
+ goto out;
+ }
+ (void) pmcs_abort(pwp, pptr, pwrk->htag, 0, 1);
+ /*
+ * Abort either took care of work completion, or put device in
+ * a recovery state
+ */
+ return;
+out:
+ /* Abort failed, do full device recovery */
+ mutex_enter(&tgt->statlock);
+ if (!pmcs_get_dev_state(pwp, tgt, &dstate))
+ tgt->dev_state = dstate;
+
+ if ((tgt->dev_state != PMCS_DEVICE_STATE_IN_RECOVERY) &&
+ (tgt->dev_state != PMCS_DEVICE_STATE_NON_OPERATIONAL)) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Setting IN_RECOVERY for tgt 0x%p",
+ __func__, (void *)tgt);
+ (void) pmcs_send_err_recovery_cmd(pwp,
+ PMCS_DEVICE_STATE_IN_RECOVERY, tgt);
+ }
+ mutex_exit(&tgt->statlock);
+}
+
+/*
+ * SSP event recovery task.
+ */
+void
+pmcs_ssp_event_recovery(pmcs_hw_t *pwp)
+{
+ int idx;
+ pmcs_xscsi_t *tgt;
+ pmcs_cmd_t *cp;
+ pmcwork_t *pwrk;
+ pmcs_phy_t *pphy;
+ int er_flag;
+ uint32_t idxpwrk;
+
+restart:
+ for (idx = 0; idx < pwp->max_dev; idx++) {
+ mutex_enter(&pwp->lock);
+ tgt = pwp->targets[idx];
+ mutex_exit(&pwp->lock);
+ if (tgt != NULL) {
+ mutex_enter(&tgt->statlock);
+ if (tgt->dying || !tgt->assigned) {
+ mutex_exit(&tgt->statlock);
+ continue;
+ }
+ pphy = tgt->phy;
+ er_flag = tgt->event_recovery;
+ mutex_exit(&tgt->statlock);
+ if (pphy != NULL && er_flag != 0) {
+ pmcs_lock_phy(pphy);
+ mutex_enter(&tgt->statlock);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: found target(0x%p)", __func__,
+ (void *) tgt);
+
+ /* Check what cmd expects recovery */
+ mutex_enter(&tgt->aqlock);
+ STAILQ_FOREACH(cp, &tgt->aq, cmd_next) {
+ /*
+ * Since work structure is on this
+ * target aq, and only this thread
+ * is accessing it now, we do not need
+ * to lock it
+ */
+ idxpwrk = PMCS_TAG_INDEX(cp->cmd_tag);
+ pwrk = &pwp->work[idxpwrk];
+ if (pwrk->htag != cp->cmd_tag) {
+ /*
+ * aq may contain TMF commands,
+ * so we may not find work
+ * structure with htag
+ */
+ break;
+ }
+ if (pwrk->ssp_event != 0 &&
+ pwrk->ssp_event !=
+ PMCS_REC_EVENT) {
+ pmcs_prt(pwp,
+ PMCS_PRT_DEBUG,
+ "%s: pwrk(%p) ctag(0x%x)",
+ __func__, (void *) pwrk,
+ cp->cmd_tag);
+ mutex_exit(&tgt->aqlock);
+ mutex_exit(&tgt->statlock);
+ pmcs_tgt_event_recovery(
+ pwp, pwrk);
+ /*
+ * We dropped statlock, so
+ * restart scanning from scratch
+ */
+ pmcs_unlock_phy(pphy);
+ goto restart;
+ }
+ }
+ mutex_exit(&tgt->aqlock);
+ tgt->event_recovery = 0;
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: end of SSP event recovery for "
+ "target(0x%p)", __func__, (void *) tgt);
+ mutex_exit(&tgt->statlock);
+ pmcs_unlock_phy(pphy);
+ }
+ }
+ }
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: end of SSP event recovery for pwp(0x%p)", __func__,
+ (void *) pwp);
+}
+
+/*ARGSUSED2*/
+int
+pmcs_phy_constructor(void *buf, void *arg, int kmflags)
+{
+ pmcs_hw_t *pwp = (pmcs_hw_t *)arg;
+ pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
+
+ mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(pwp->intr_pri));
+ cv_init(&phyp->abort_all_cv, NULL, CV_DRIVER, NULL);
+ return (0);
+}
+
+/*ARGSUSED1*/
+void
+pmcs_phy_destructor(void *buf, void *arg)
+{
+ pmcs_phy_t *phyp = (pmcs_phy_t *)buf;
+
+ cv_destroy(&phyp->abort_all_cv);
+ mutex_destroy(&phyp->phy_lock);
+}
+
+/*
+ * Free all PHYs from the kmem_cache starting at phyp as well as everything
+ * on the dead_phys list.
+ *
+ * NOTE: This function does not free root PHYs as they are not allocated
+ * from the kmem_cache.
+ *
+ * No PHY locks are acquired as this should only be called during DDI_DETACH
+ * or soft reset (while pmcs interrupts are disabled).
+ */
+void
+pmcs_free_all_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ pmcs_phy_t *tphyp, *nphyp;
+
+ if (phyp == NULL) {
+ return;
+ }
+
+ tphyp = phyp;
+ while (tphyp) {
+ nphyp = tphyp->sibling;
+
+ if (tphyp->children) {
+ pmcs_free_all_phys(pwp, tphyp->children);
+ tphyp->children = NULL;
+ }
+ if (!IS_ROOT_PHY(tphyp)) {
+ kmem_cache_free(pwp->phy_cache, tphyp);
+ }
+
+ tphyp = nphyp;
+ }
+
+ tphyp = pwp->dead_phys;
+ while (tphyp) {
+ nphyp = tphyp->sibling;
+ kmem_cache_free(pwp->phy_cache, tphyp);
+ tphyp = nphyp;
+ }
+ pwp->dead_phys = NULL;
+}
+
+/*
+ * Free a list of PHYs linked together by the sibling pointer back to the
+ * kmem cache from whence they came. This function does not recurse, so the
+ * caller must ensure there are no children.
+ */
+void
+pmcs_free_phys(pmcs_hw_t *pwp, pmcs_phy_t *phyp)
+{
+ pmcs_phy_t *next_phy;
+
+ while (phyp) {
+ next_phy = phyp->sibling;
+ ASSERT(!mutex_owned(&phyp->phy_lock));
+ kmem_cache_free(pwp->phy_cache, phyp);
+ phyp = next_phy;
+ }
+}
+
+/*
+ * Make a copy of an existing PHY structure. This is used primarily in
+ * discovery to compare the contents of an existing PHY with what gets
+ * reported back by an expander.
+ *
+ * This function must not be called from any context where sleeping is
+ * not possible.
+ *
+ * The new PHY is returned unlocked.
+ */
+static pmcs_phy_t *
+pmcs_clone_phy(pmcs_phy_t *orig_phy)
+{
+ pmcs_phy_t *local;
+
+ local = kmem_cache_alloc(orig_phy->pwp->phy_cache, KM_SLEEP);
+
+ /*
+ * Go ahead and just copy everything...
+ */
+ *local = *orig_phy;
+
+ /*
+ * But the following must be set appropriately for this copy
+ */
+ local->sibling = NULL;
+ local->children = NULL;
+ mutex_init(&local->phy_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(orig_phy->pwp->intr_pri));
+
+ return (local);
+}
+
+int
+pmcs_check_acc_handle(ddi_acc_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ if (handle == NULL) {
+ return (DDI_FAILURE);
+ }
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
+ return (de.fme_status);
+}
+
+int
+pmcs_check_dma_handle(ddi_dma_handle_t handle)
+{
+ ddi_fm_error_t de;
+
+ if (handle == NULL) {
+ return (DDI_FAILURE);
+ }
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
+ return (de.fme_status);
+}
+
+
+void
+pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities)) {
+ ddi_fm_ereport_post(pwp->dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
+ }
+}
+
+int
+pmcs_check_acc_dma_handle(pmcs_hw_t *pwp)
+{
+ pmcs_chunk_t *pchunk;
+ int i;
+
+ /* check all acc & dma handles allocated in attach */
+ if ((pmcs_check_acc_handle(pwp->pci_acc_handle) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->msg_acc_handle) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->top_acc_handle) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->mpi_acc_handle) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->gsm_acc_handle) != DDI_SUCCESS)) {
+ goto check_failed;
+ }
+
+ for (i = 0; i < PMCS_NIQ; i++) {
+ if ((pmcs_check_dma_handle(
+ pwp->iqp_handles[i]) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(
+ pwp->iqp_acchdls[i]) != DDI_SUCCESS)) {
+ goto check_failed;
+ }
+ }
+
+ for (i = 0; i < PMCS_NOQ; i++) {
+ if ((pmcs_check_dma_handle(
+ pwp->oqp_handles[i]) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(
+ pwp->oqp_acchdls[i]) != DDI_SUCCESS)) {
+ goto check_failed;
+ }
+ }
+
+ if ((pmcs_check_dma_handle(pwp->cip_handles) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->cip_acchdls) != DDI_SUCCESS)) {
+ goto check_failed;
+ }
+
+ if (pwp->fwlog &&
+ ((pmcs_check_dma_handle(pwp->fwlog_hndl) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->fwlog_acchdl) != DDI_SUCCESS))) {
+ goto check_failed;
+ }
+
+ if (pwp->regdump_hndl && pwp->regdump_acchdl &&
+ ((pmcs_check_dma_handle(pwp->regdump_hndl) != DDI_SUCCESS) ||
+ (pmcs_check_acc_handle(pwp->regdump_acchdl)
+ != DDI_SUCCESS))) {
+ goto check_failed;
+ }
+
+
+ pchunk = pwp->dma_chunklist;
+ while (pchunk) {
+ if ((pmcs_check_acc_handle(pchunk->acc_handle)
+ != DDI_SUCCESS) ||
+ (pmcs_check_dma_handle(pchunk->dma_handle)
+ != DDI_SUCCESS)) {
+ goto check_failed;
+ }
+ pchunk = pchunk->next;
+ }
+
+ return (0);
+
+check_failed:
+
+ return (1);
+}
+
+/*
+ * pmcs_handle_dead_phys
+ *
+ * If the PHY has no outstanding work associated with it, remove it from
+ * the dead PHY list and free it.
+ *
+ * If pwp->ds_err_recovering or pwp->configuring is set, don't run.
+ * This keeps routines that need to submit work to the chip from having to
+ * hold PHY locks to ensure that PHYs don't disappear while they do their work.
+ */
+void
+pmcs_handle_dead_phys(pmcs_hw_t *pwp)
+{
+ pmcs_phy_t *phyp, *nphyp, *pphyp;
+
+ mutex_enter(&pwp->lock);
+ mutex_enter(&pwp->config_lock);
+
+ if (pwp->configuring | pwp->ds_err_recovering) {
+ mutex_exit(&pwp->config_lock);
+ mutex_exit(&pwp->lock);
+ return;
+ }
+
+ /*
+ * Check every PHY in the dead PHY list
+ */
+ mutex_enter(&pwp->dead_phylist_lock);
+ phyp = pwp->dead_phys;
+ pphyp = NULL; /* Set previous PHY to NULL */
+
+ while (phyp != NULL) {
+ pmcs_lock_phy(phyp);
+ ASSERT(phyp->dead);
+
+ nphyp = phyp->dead_next;
+
+ /*
+ * Check for outstanding work
+ */
+ if (phyp->ref_count > 0) {
+ pmcs_unlock_phy(phyp);
+ pphyp = phyp; /* This PHY becomes "previous" */
+ } else if (phyp->target) {
+ pmcs_unlock_phy(phyp);
+ pmcs_prt(pwp, PMCS_PRT_DEBUG1,
+ "%s: Not freeing PHY 0x%p: target 0x%p is not free",
+ __func__, (void *)phyp, (void *)phyp->target);
+ pphyp = phyp;
+ } else {
+ /*
+ * No outstanding work or target references. Remove it
+ * from the list and free it
+ */
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: Freeing inactive dead PHY 0x%p @ %s "
+ "target = 0x%p", __func__, (void *)phyp,
+ phyp->path, (void *)phyp->target);
+ /*
+ * If pphyp is NULL, then phyp was the head of the list,
+ * so just reset the head to nphyp. Otherwise, the
+ * previous PHY will now point to nphyp (the next PHY)
+ */
+ if (pphyp == NULL) {
+ pwp->dead_phys = nphyp;
+ } else {
+ pphyp->dead_next = nphyp;
+ }
+ /*
+ * If the target still points to this PHY, remove
+ * that linkage now.
+ */
+ if (phyp->target) {
+ mutex_enter(&phyp->target->statlock);
+ if (phyp->target->phy == phyp) {
+ phyp->target->phy = NULL;
+ }
+ mutex_exit(&phyp->target->statlock);
+ }
+ kmem_cache_free(pwp->phy_cache, phyp);
+ }
+
+ phyp = nphyp;
+ }
+
+ mutex_exit(&pwp->dead_phylist_lock);
+ mutex_exit(&pwp->config_lock);
+ mutex_exit(&pwp->lock);
+}
+
+void
+pmcs_inc_phy_ref_count(pmcs_phy_t *phyp)
+{
+ atomic_inc_32(&phyp->ref_count);
+}
+
+void
+pmcs_dec_phy_ref_count(pmcs_phy_t *phyp)
+{
+ ASSERT(phyp->ref_count != 0);
+ atomic_dec_32(&phyp->ref_count);
+}
+
+/*
+ * pmcs_reap_dead_phy
+ *
+ * This function is called from pmcs_new_tport when we have a PHY
+ * without a target pointer. It's possible in that case that this PHY
+ * may have a "brother" on the dead_phys list. That is, it may be the same as
+ * this one but with a different root PHY number (e.g. pp05 vs. pp04). If
+ * that's the case, update the dead PHY and this new PHY. If that's not the
+ * case, we should get a tran_tgt_init on this after it's reported to SCSA.
+ *
+ * Called with PHY locked.
+ */
+static void
+pmcs_reap_dead_phy(pmcs_phy_t *phyp)
+{
+ pmcs_hw_t *pwp = phyp->pwp;
+ pmcs_phy_t *ctmp;
+
+ ASSERT(mutex_owned(&phyp->phy_lock));
+
+ /*
+ * Check the dead PHYs list
+ */
+ mutex_enter(&pwp->dead_phylist_lock);
+ ctmp = pwp->dead_phys;
+ while (ctmp) {
+ if ((ctmp->iport != phyp->iport) ||
+ (memcmp((void *)&ctmp->sas_address[0],
+ (void *)&phyp->sas_address[0], 8))) {
+ ctmp = ctmp->dead_next;
+ continue;
+ }
+
+ /*
+ * Same SAS address on same iport. Now check to see if
+ * the PHY path is the same with the possible exception
+ * of the root PHY number.
+ * The "5" is the string length of "pp00."
+ */
+ if ((strnlen(phyp->path, 5) >= 5) &&
+ (strnlen(ctmp->path, 5) >= 5)) {
+ if (memcmp((void *)&phyp->path[5],
+ (void *)&ctmp->path[5],
+ strnlen(phyp->path, 32) - 5) == 0) {
+ break;
+ }
+ }
+
+ ctmp = ctmp->dead_next;
+ }
+ mutex_exit(&pwp->dead_phylist_lock);
+
+ /*
+ * Found a match. Remove the target linkage and drop the
+ * ref count on the old PHY. Then, increment the ref count
+ * on the new PHY to compensate.
+ */
+ if (ctmp) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
+ "%s: Found match in dead PHY list for new PHY %s",
+ __func__, phyp->path);
+ if (ctmp->target) {
+ /*
+ * If there is a pointer to the target in the dead
+ * PHY, and that PHY's ref_count drops to 0, we can
+ * clear the target linkage now. If the PHY's
+ * ref_count is > 1, then there may be multiple
+ * LUNs still remaining, so leave the linkage.
+ */
+ pmcs_inc_phy_ref_count(phyp);
+ pmcs_dec_phy_ref_count(ctmp);
+ phyp->target = ctmp->target;
+ /*
+ * Update the target's linkage as well
+ */
+ mutex_enter(&phyp->target->statlock);
+ phyp->target->phy = phyp;
+ phyp->target->dtype = phyp->dtype;
+ mutex_exit(&phyp->target->statlock);
+
+ if (ctmp->ref_count == 0) {
+ ctmp->target = NULL;
+ }
+ }
+ }
+}
+
+/*
+ * Called with iport lock held
+ */
+void
+pmcs_add_phy_to_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
+{
+ ASSERT(mutex_owned(&iport->lock));
+ ASSERT(phyp);
+ ASSERT(!list_link_active(&phyp->list_node));
+ iport->nphy++;
+ pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
+ &iport->nphy);
+ list_insert_tail(&iport->phys, phyp);
+ mutex_enter(&iport->refcnt_lock);
+ iport->refcnt++;
+ mutex_exit(&iport->refcnt_lock);
+}
+
+/*
+ * Called with the iport lock held
+ */
+void
+pmcs_remove_phy_from_iport(pmcs_iport_t *iport, pmcs_phy_t *phyp)
+{
+ pmcs_phy_t *pptr, *next_pptr;
+
+ ASSERT(mutex_owned(&iport->lock));
+
+ /*
+ * If phyp is NULL, remove all PHYs from the iport
+ */
+ if (phyp == NULL) {
+ for (pptr = list_head(&iport->phys); pptr != NULL;
+ pptr = next_pptr) {
+ next_pptr = list_next(&iport->phys, pptr);
+ mutex_enter(&pptr->phy_lock);
+ pptr->iport = NULL;
+ mutex_exit(&pptr->phy_lock);
+ pmcs_rele_iport(iport);
+ list_remove(&iport->phys, pptr);
+ }
+ iport->nphy = 0;
+ return;
+ }
+
+ ASSERT(phyp);
+ ASSERT(iport->nphy > 0);
+ ASSERT(list_link_active(&phyp->list_node));
+ iport->nphy--;
+ pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS,
+ &iport->nphy);
+ list_remove(&iport->phys, phyp);
+ pmcs_rele_iport(iport);
+}
+
+/*
+ * This function checks to see if the target pointed to by phyp is still
+ * correct. This is done by comparing the target's unit address with the
+ * SAS address in phyp.
+ *
+ * Called with PHY locked and target statlock held
+ */
+static boolean_t
+pmcs_phy_target_match(pmcs_phy_t *phyp)
+{
+ uint64_t wwn;
+ char unit_address[PMCS_MAX_UA_SIZE];
+ boolean_t rval = B_FALSE;
+
+ ASSERT(phyp);
+ ASSERT(phyp->target);
+ ASSERT(mutex_owned(&phyp->phy_lock));
+ ASSERT(mutex_owned(&phyp->target->statlock));
+
+ wwn = pmcs_barray2wwn(phyp->sas_address);
+ (void) scsi_wwn_to_wwnstr(wwn, 1, unit_address);
+
+ if (memcmp((void *)unit_address, (void *)phyp->target->unit_address,
+ strnlen(phyp->target->unit_address, PMCS_MAX_UA_SIZE)) == 0) {
+ rval = B_TRUE;
+ }
+
+ return (rval);
+}
+
+void
+pmcs_start_dev_state_recovery(pmcs_xscsi_t *xp, pmcs_phy_t *phyp)
+{
+ ASSERT(mutex_owned(&xp->statlock));
+ ASSERT(xp->pwp != NULL);
+
+ if (xp->recover_wait == 0) {
+ pmcs_prt(xp->pwp, PMCS_PRT_DEBUG_DEV_STATE,
+ "%s: Start ds_recovery for tgt 0x%p/PHY 0x%p (%s)",
+ __func__, (void *)xp, (void *)phyp, phyp->path);
+ xp->recover_wait = 1;
+
+ /*
+ * Rather than waiting for the watchdog timer, we'll
+ * kick it right now.
+ */
+ SCHEDULE_WORK(xp->pwp, PMCS_WORK_DS_ERR_RECOVERY);
+ (void) ddi_taskq_dispatch(xp->pwp->tq, pmcs_worker, xp->pwp,
+ DDI_NOSLEEP);
+ }
+}
+
+/*
+ * Increment the phy ds error retry count.
+ * If too many retries, mark phy dead and restart discovery;
+ * otherwise schedule ds recovery.
+ */
+static void
+pmcs_handle_ds_recovery_error(pmcs_phy_t *phyp, pmcs_xscsi_t *tgt,
+ pmcs_hw_t *pwp, const char *func_name, int line, char *reason_string)
+{
+ ASSERT(mutex_owned(&phyp->phy_lock));
+
+ phyp->ds_recovery_retries++;
+
+ if (phyp->ds_recovery_retries > PMCS_MAX_DS_RECOVERY_RETRIES) {
+ pmcs_prt(pwp, PMCS_PRT_DEBUG,
+ "%s: retry limit reached after %s to PHY %s failed",
+ func_name, reason_string, phyp->path);
+ tgt->recover_wait = 0;
+ phyp->dead = 1;
+ PHY_CHANGED_AT_LOCATION(pwp, phyp, func_name, line);
+ RESTART_DISCOVERY(pwp);
+ } else {
+ SCHEDULE_WORK(pwp, PMCS_WORK_DS_ERR_RECOVERY);
+ }
+}
diff --git a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/mpapi_impl.c b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/mpapi_impl.c
index 4dc69ba6b6..38c2bf8f4e 100644
--- a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/mpapi_impl.c
+++ b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/mpapi_impl.c
@@ -908,6 +908,8 @@ vhci_get_path_list_for_mp_lu(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
uint64_t *oid = (uint64_t *)(input_data);
mpapi_item_list_t *ilist, *mplu_path_list = NULL;
mpapi_lu_data_t *mplup;
+ mpapi_path_data_t *mppathp;
+ mdi_pathinfo_t *pip;
ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
@@ -934,14 +936,33 @@ vhci_get_path_list_for_mp_lu(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
}
while (mplu_path_list != NULL) {
- if (count < list_len) {
- oid_list[count] = (uint64_t)mplu_path_list->
- item->oid.raw_oid;
- } else {
- rval = MP_MORE_DATA;
+ mppathp = (mpapi_path_data_t *)(mplu_path_list->item->idata);
+ /* skip a path that should be hidden. */
+ if (!(mppathp->hide)) {
+ pip = (mdi_pathinfo_t *)mppathp->resp;
+ mdi_hold_path(pip);
+ /*
+ * check if the pip is marked as device removed.
+ * When pi_flag MDI_PATHINFO_FLAGS_DEVICE_REMOVED is set
+ * the node should have been destroyed but did not
+ * due to open on the client node.
+ * The driver tracks such a node through the hide flag
+ * and doesn't report it throuth ioctl response.
+ * The devinfo driver doesn't report such a path.
+ */
+ if (!(MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip))) {
+ if (count < list_len) {
+ oid_list[count] =
+ (uint64_t)mplu_path_list->
+ item->oid.raw_oid;
+ } else {
+ rval = MP_MORE_DATA;
+ }
+ count++;
+ }
+ mdi_rele_path(pip);
}
mplu_path_list = mplu_path_list->next;
- count++;
}
mpioc->mp_alen = (uint32_t)(count * sizeof (uint64_t));
@@ -972,6 +993,8 @@ vhci_get_path_list_for_init_port(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
uint64_t *oid = (uint64_t *)(input_data);
mpapi_item_list_t *ilist, *mpinit_path_list = NULL;
mpapi_initiator_data_t *mpinitp;
+ mpapi_path_data_t *mppathp;
+ mdi_pathinfo_t *pip;
ilist = vhci->mp_priv->
obj_hdr_list[MP_OBJECT_TYPE_INITIATOR_PORT]->head;
@@ -1016,14 +1039,33 @@ vhci_get_path_list_for_init_port(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
}
while (mpinit_path_list != NULL) {
- if (count < list_len) {
- oid_list[count] = (uint64_t)mpinit_path_list->
- item->oid.raw_oid;
- } else {
- rval = MP_MORE_DATA;
+ mppathp = (mpapi_path_data_t *)(mpinit_path_list->item->idata);
+ /* skip a path that should be hidden. */
+ if (!(mppathp->hide)) {
+ pip = (mdi_pathinfo_t *)mppathp->resp;
+ mdi_hold_path(pip);
+ /*
+ * check if the pip is marked as device removed.
+ * When pi_flag MDI_PATHINFO_FLAGS_DEVICE_REMOVED is set
+ * the node should have been destroyed but did not
+ * due to open on the client node.
+ * The driver tracks such a node through the hide flag
+ * and doesn't report it throuth ioctl response.
+ * The devinfo driver doesn't report such a path.
+ */
+ if (!(MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip))) {
+ if (count < list_len) {
+ oid_list[count] =
+ (uint64_t)mpinit_path_list->
+ item->oid.raw_oid;
+ } else {
+ rval = MP_MORE_DATA;
+ }
+ count++;
+ }
+ mdi_rele_path(pip);
}
mpinit_path_list = mpinit_path_list->next;
- count++;
}
mpioc->mp_alen = (uint32_t)(count * sizeof (uint64_t));
@@ -1054,6 +1096,8 @@ vhci_get_path_list_for_target_port(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
uint64_t *oid = (uint64_t *)(input_data);
mpapi_item_list_t *ilist, *mptp_path_list = NULL;
mpapi_tport_data_t *mptpp;
+ mpapi_path_data_t *mppathp;
+ mdi_pathinfo_t *pip;
ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_TARGET_PORT]->head;
@@ -1080,14 +1124,33 @@ vhci_get_path_list_for_target_port(struct scsi_vhci *vhci, mp_iocdata_t *mpioc,
}
while (mptp_path_list != NULL) {
- if (count < list_len) {
- oid_list[count] =
- (uint64_t)mptp_path_list->item->oid.raw_oid;
- } else {
- rval = MP_MORE_DATA;
+ mppathp = (mpapi_path_data_t *)(mptp_path_list->item->idata);
+ /* skip a path that should be hidden. */
+ if (!(mppathp->hide)) {
+ pip = (mdi_pathinfo_t *)mppathp->resp;
+ mdi_hold_path(pip);
+ /*
+ * check if the pip is marked as device removed.
+ * When pi_flag MDI_PATHINFO_FLAGS_DEVICE_REMOVED is set
+ * the node should have been destroyed but did not
+ * due to open on the client node.
+ * The driver tracks such a node through the hide flag
+ * and doesn't report it throuth ioctl response.
+ * The devinfo driver doesn't report such a path.
+ */
+ if (!(MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip))) {
+ if (count < list_len) {
+ oid_list[count] =
+ (uint64_t)mptp_path_list->
+ item->oid.raw_oid;
+ } else {
+ rval = MP_MORE_DATA;
+ }
+ count++;
+ }
+ mdi_rele_path(pip);
}
mptp_path_list = mptp_path_list->next;
- count++;
}
mpioc->mp_alen = (uint32_t)(count * sizeof (uint64_t));
@@ -2559,21 +2622,26 @@ vhci_mpapi_create_item(struct scsi_vhci *vhci, uint8_t obj_type, void* res)
if (strncmp(interconnect,
INTERCONNECT_FABRIC_STR,
strlen(interconnect)) == 0) {
- mp_interconnect_type = 2;
+ mp_interconnect_type =
+ MP_DRVR_TRANSPORT_TYPE_FC;
} else if (strncmp(interconnect,
INTERCONNECT_PARALLEL_STR,
strlen(interconnect)) == 0) {
- mp_interconnect_type = 3;
+ mp_interconnect_type =
+ MP_DRVR_TRANSPORT_TYPE_SPI;
} else if (strncmp(interconnect,
INTERCONNECT_ISCSI_STR,
strlen(interconnect)) == 0) {
- mp_interconnect_type = 4;
+ mp_interconnect_type =
+ MP_DRVR_TRANSPORT_TYPE_ISCSI;
} else if (strncmp(interconnect,
INTERCONNECT_IBSRP_STR,
strlen(interconnect)) == 0) {
- mp_interconnect_type = 5;
+ mp_interconnect_type =
+ MP_DRVR_TRANSPORT_TYPE_IFB;
} else {
- mp_interconnect_type = 0;
+ mp_interconnect_type =
+ MP_DRVR_TRANSPORT_TYPE_UNKNOWN;
}
init = kmem_zalloc(
@@ -2762,6 +2830,7 @@ vhci_mpapi_create_item(struct scsi_vhci *vhci, uint8_t obj_type, void* res)
path->resp = res;
path->path_name = pname;
path->valid = 1;
+ path->hide = 0;
path->prop.id = item->oid.raw_oid;
item->idata = (void *)path;
vhci_mpapi_log_sysevent(vhci->vhci_dip,
@@ -2886,6 +2955,7 @@ vhci_update_mpapi_data(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun,
*/
pd = path_list->item->idata;
pd->valid = 1;
+ pd->hide = 0;
pd->resp = pip;
}
@@ -3907,14 +3977,27 @@ vhci_mpapi_set_path_state(dev_info_t *vdip, mdi_pathinfo_t *pip, int state)
}
/*
+ * Check if the pathinfo is uninitialized(destroyed).
+ */
+ if (state == MP_DRVR_PATH_STATE_UNINIT) {
+ pp->hide = 1;
+ VHCI_DEBUG(6, (CE_NOTE, NULL, "vhci_mpapi_set_path_state: "
+ "path(pip: %p) is uninited(destroyed).",
+ (void *)pip));
+ } else {
+ pp->hide = 0;
+ }
+ /*
* Find if there are any paths at all to the lun
*/
if ((state == MP_DRVR_PATH_STATE_REMOVED) || (state ==
MP_DRVR_PATH_STATE_PATH_ERR) || (state ==
MP_DRVR_PATH_STATE_LU_ERR) || (state ==
- MP_DRVR_PATH_STATE_UNKNOWN)) {
+ MP_DRVR_PATH_STATE_UNKNOWN) || pp->hide) {
pp->valid = 0;
-
+ VHCI_DEBUG(6, (CE_NOTE, NULL, "vhci_mpapi_set_path_state: "
+ "path(pip: %p) is not okay state. Set to invalid.",
+ (void *)pip));
svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
svl = svp->svp_svl;
/*
@@ -3937,6 +4020,9 @@ vhci_mpapi_set_path_state(dev_info_t *vdip, mdi_pathinfo_t *pip, int state)
if (lu_list != NULL) {
ld = lu_list->item->idata;
ld->valid = 0;
+ VHCI_DEBUG(6, (CE_NOTE, NULL,
+ "vhci_mpapi_set_path_state: "
+ " Invalidated LU(%s)", svl->svl_lun_wwn));
}
}
}
@@ -4198,7 +4284,8 @@ vhci_mpapi_chk_last_path(mdi_pathinfo_t *pip)
MDI_PI_IS_STANDBY(ret_pip) ||
MDI_PI_IS_INIT(ret_pip)) &&
!(MDI_PI_IS_DISABLE(ret_pip) ||
- MDI_PI_IS_TRANSIENT(ret_pip))) {
+ MDI_PI_IS_TRANSIENT(ret_pip) ||
+ MDI_PI_FLAGS_IS_DEVICE_REMOVED(ret_pip))) {
count++;
}
mdi_pi_unlock(ret_pip);
diff --git a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
index e0a8f1551b..cd90f213cf 100644
--- a/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
+++ b/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
@@ -221,6 +221,7 @@ static void vhci_clean_print(dev_info_t *dev, uint_t level,
#endif
static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
+static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
/*
* MP-API related functions
@@ -1514,7 +1515,7 @@ vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
static int
vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
{
- dev_info_t *vdip, *pdip, *cdip;
+ dev_info_t *vdip, *cdip;
mdi_pathinfo_t *pip = NULL;
mdi_pathinfo_t *npip = NULL;
int rval = -1;
@@ -1572,12 +1573,10 @@ again:
if (hba->tran_reset != NULL) {
if (hba->tran_reset(pap, level) == 0) {
- pdip = mdi_pi_get_phci(pip);
- vhci_log(CE_WARN, vdip, "!(%s%d):"
- " path (%s%d), reset %d failed",
+ vhci_log(CE_WARN, vdip, "!%s%d: "
+ "path %s, reset %d failed",
ddi_driver_name(cdip), ddi_get_instance(cdip),
- ddi_driver_name(pdip), ddi_get_instance(pdip),
- level);
+ mdi_pi_spathname(pip), level);
/*
* Select next path and issue the reset, repeat
@@ -2052,28 +2051,32 @@ vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
ASSERT(sd != NULL);
ASSERT(name != NULL);
+ *name = 0;
cdip = sd->sd_dev;
ASSERT(cdip != NULL);
- if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS) {
- name[0] = '\0';
+ if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
return (1);
- }
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
- MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
- name[0] = '\0';
+ MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
return (1);
- }
+ /*
+ * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
+ * <guid> bus_addr argument == 0
+ * <bus_addr> bus_addr argument != 0
+ * Since the <guid> is already provided with unit-address, we just
+ * provide failover module in <bus_addr> to keep output shorter.
+ */
vlun = ADDR2VLUN(&sd->sd_address);
- if (bus_addr && vlun && vlun->svl_fops_name) {
- /* report the guid and the name of the failover module */
- (void) snprintf(name, len, "g%s %s", guid, vlun->svl_fops_name);
- } else {
- /* report the guid */
+ if (bus_addr == 0) {
+ /* report the guid: */
(void) snprintf(name, len, "g%s", guid);
+ } else if (vlun && vlun->svl_fops_name) {
+ /* report the name of the failover module */
+ (void) snprintf(name, len, "%s", vlun->svl_fops_name);
}
ddi_prop_free(guid);
@@ -2431,12 +2434,15 @@ bind_path:
sema_v(&vlun->svl_pgr_sema);
}
/*
- * Looks like a fatal error.
- * May be device disappeared underneath.
- * Give another chance to target driver for a retry to
- * get another path.
+ * Consider it a fatal error if b_error is
+ * set as a result of DMA binding failure
+ * vs. a condition of being temporarily out of
+ * some resource
*/
- return (TRAN_BUSY);
+ if (geterror(vpkt->vpkt_tgt_init_bp))
+ return (TRAN_FATAL_ERROR);
+ else
+ return (TRAN_BUSY);
}
}
@@ -3054,8 +3060,8 @@ vhci_intr(struct scsi_pkt *pkt)
static char *timeout_err = "Command Timeout";
static char *parity_err = "Parity Error";
char *err_str = NULL;
- dev_info_t *vdip, *cdip, *pdip;
- char *cpath, *dpath;
+ dev_info_t *vdip, *cdip;
+ char *cpath;
ASSERT(vpkt != NULL);
tpkt = vpkt->vpkt_tgt_pkt;
@@ -3359,6 +3365,21 @@ vhci_intr(struct scsi_pkt *pkt)
break;
case CMD_DEV_GONE:
+ /*
+ * If this is the last path then report CMD_DEV_GONE to the
+ * target driver, otherwise report BUSY to triggger retry.
+ */
+ if (vlun->svl_dip &&
+ (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
+ struct scsi_vhci *vhci;
+ vhci = ADDR2VHCI(&tpkt->pkt_address);
+ VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
+ "cmd_dev_gone on last path\n"));
+ (void) vhci_invalidate_mpapi_lu(vhci, vlun);
+ break;
+ }
+
+ /* Report CMD_CMPLT-with-BUSY to cause retry. */
VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
"cmd_dev_gone\n"));
tpkt->pkt_reason = CMD_CMPLT;
@@ -3414,17 +3435,13 @@ vhci_intr(struct scsi_pkt *pkt)
if ((err_str != NULL) && (pkt->pkt_reason !=
svp->svp_last_pkt_reason)) {
cdip = vlun->svl_dip;
- pdip = mdi_pi_get_phci(vpkt->vpkt_path);
vdip = ddi_get_parent(cdip);
cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s (%s%d)",
+ vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
ddi_get_instance(cdip), err_str,
- ddi_pathname(pdip, dpath), ddi_driver_name(pdip),
- ddi_get_instance(pdip));
+ mdi_pi_spathname(vpkt->vpkt_path));
kmem_free(cpath, MAXPATHLEN);
- kmem_free(dpath, MAXPATHLEN);
}
svp->svp_last_pkt_reason = pkt->pkt_reason;
VHCI_DECR_PATH_CMDCOUNT(svp);
@@ -3672,7 +3689,7 @@ void
vhci_update_pathstates(void *arg)
{
mdi_pathinfo_t *pip, *npip;
- dev_info_t *dip, *pdip;
+ dev_info_t *dip;
struct scsi_failover_ops *fo;
struct scsi_vhci_priv *svp;
struct scsi_device *psd;
@@ -3680,7 +3697,7 @@ vhci_update_pathstates(void *arg)
char *pclass, *tptr;
struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
int sps; /* mdi_select_path() status */
- char *cpath, *dpath;
+ char *cpath;
struct scsi_vhci *vhci;
struct scsi_pkt *pkt;
struct buf *bp;
@@ -3747,22 +3764,16 @@ vhci_update_pathstates(void *arg)
VHCI_DEBUG(1, (CE_NOTE, NULL,
"!vhci_update_pathstates: marking path"
" 0x%p as ONLINE\n", (void *)pip));
- pdip = mdi_pi_get_phci(pip);
cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
- " (%s%d): path %s (%s%d) target address %s"
- " is now ONLINE because of"
- " an externally initiated failover",
+ vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
+ "(%s%d): path %s "
+ "is now ONLINE because of "
+ "an externally initiated failover",
ddi_pathname(dip, cpath),
ddi_driver_name(dip),
ddi_get_instance(dip),
- ddi_pathname(pdip, dpath),
- ddi_driver_name(pdip),
- ddi_get_instance(pdip),
- mdi_pi_get_addr(pip));
+ mdi_pi_spathname(pip));
kmem_free(cpath, MAXPATHLEN);
- kmem_free(dpath, MAXPATHLEN);
mdi_pi_set_state(pip,
MDI_PATHINFO_STATE_ONLINE);
mdi_pi_set_preferred(pip,
@@ -3861,22 +3872,16 @@ vhci_update_pathstates(void *arg)
VHCI_DEBUG(1, (CE_NOTE, NULL,
"!vhci_update_pathstates: marking path"
" 0x%p as STANDBY\n", (void *)pip));
- pdip = mdi_pi_get_phci(pip);
cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- dpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s"
- " (%s%d): path %s (%s%d) target address %s"
- " is now STANDBY because of"
- " an externally initiated failover",
+ vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
+ "(%s%d): path %s "
+ "is now STANDBY because of "
+ "an externally initiated failover",
ddi_pathname(dip, cpath),
ddi_driver_name(dip),
ddi_get_instance(dip),
- ddi_pathname(pdip, dpath),
- ddi_driver_name(pdip),
- ddi_get_instance(pdip),
- mdi_pi_get_addr(pip));
+ mdi_pi_spathname(pip));
kmem_free(cpath, MAXPATHLEN);
- kmem_free(dpath, MAXPATHLEN);
mdi_pi_set_state(pip,
MDI_PATHINFO_STATE_STANDBY);
mdi_pi_set_preferred(pip,
@@ -4103,7 +4108,7 @@ vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
hba = ddi_get_driver_private(pdip);
ASSERT(hba != NULL);
- vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
+ vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
if (svp == NULL) {
/* path already freed. Nothing to do. */
@@ -8647,3 +8652,33 @@ vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
}
}
#endif
+static void
+vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
+{
+ char *svl_wwn;
+ mpapi_item_list_t *ilist;
+ mpapi_lu_data_t *ld;
+
+ if (vlun == NULL) {
+ return;
+ } else {
+ svl_wwn = vlun->svl_lun_wwn;
+ }
+
+ ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
+
+ while (ilist != NULL) {
+ ld = (mpapi_lu_data_t *)(ilist->item->idata);
+ if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
+ strlen(svl_wwn)) == 0)) {
+ ld->valid = 0;
+ VHCI_DEBUG(6, (CE_WARN, NULL,
+ "vhci_invalidate_mpapi_lu: "
+ "Invalidated LU(%s)", svl_wwn));
+ return;
+ }
+ ilist = ilist->next;
+ }
+ VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
+ "Could not find LU(%s) to invalidate.", svl_wwn));
+}
diff --git a/usr/src/uts/common/io/scsi/conf/scsi_confdata.c b/usr/src/uts/common/io/scsi/conf/scsi_confdata.c
index c41d105fc7..c229aca6ae 100644
--- a/usr/src/uts/common/io/scsi/conf/scsi_confdata.c
+++ b/usr/src/uts/common/io/scsi/conf/scsi_confdata.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -98,4 +98,22 @@ int scsi_watchdog_tick = 10;
*/
int scsi_fm_capable = DDI_FM_EREPORT_CAPABLE;
+/*
+ * SCSI enumeration options defines are kept in <scsi/conf/autoconf.h>.
+ * When scsi_enumeration is enabled, driver.conf enumeration is unnecessary.
+ *
+ * The global variable "scsi_enumeration" is used as the default value of the
+ * "scsi-enumeration" property. In addition to enabline/disabling enumeration
+ * (bit 0), target and lun threading can be specified.
+ *
+ * 0 driver.conf enumeration
+ * 1 dynamic enumeration with target/lun multi-threading.
+ * 3 dynamic enumeration with lun multi-threading disabled.
+ * 5 dynamic enumeration with target multi-threading disabled;
+ * 7 dynamic enumeration with target/lun multi-threading disabled.
+ *
+ * Default is currently driver.conf enumeration (0).
+ */
+int scsi_enumeration = 0;
+
#endif /* _KERNEL */
diff --git a/usr/src/uts/common/io/scsi/conf/scsi_confsubr.c b/usr/src/uts/common/io/scsi/conf/scsi_confsubr.c
index e197c54365..cf0f4135a9 100644
--- a/usr/src/uts/common/io/scsi/conf/scsi_confsubr.c
+++ b/usr/src/uts/common/io/scsi/conf/scsi_confsubr.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -39,11 +39,11 @@
* macro for filling in lun value for scsi-1 support
*/
-#define FILL_SCSI1_LUN(devp, pkt) \
- if ((devp->sd_address.a_lun > 0) && \
- (devp->sd_inq->inq_ansi == 0x1)) { \
+#define FILL_SCSI1_LUN(sd, pkt) \
+ if ((sd->sd_address.a_lun > 0) && \
+ (sd->sd_inq->inq_ansi == 0x1)) { \
((union scsi_cdb *)(pkt)->pkt_cdbp)->scc_lun = \
- devp->sd_address.a_lun; \
+ sd->sd_address.a_lun; \
}
extern struct mod_ops mod_miscops;
@@ -64,6 +64,12 @@ static int scsi_check_ss2_LUN_limit(struct scsi_device *);
static void scsi_establish_LUN_limit(struct scsi_device *);
static void scsi_update_parent_ss2_prop(dev_info_t *, int, int);
+static int check_vpd_page_support8083(struct scsi_device *sd,
+ int (*callback)(), int *, int *);
+static int send_scsi_INQUIRY(struct scsi_device *sd,
+ int (*callback)(), uchar_t *bufaddr, size_t buflen,
+ uchar_t evpd, uchar_t page_code, size_t *lenp);
+
/*
* this int-array HBA-node property keeps track of strictly SCSI-2
* target IDs
@@ -209,10 +215,10 @@ _info(struct modinfo *modinfop)
return (mod_info(&modlinkage, modinfop));
}
-#define ROUTE (&devp->sd_address)
+#define ROUTE (&sd->sd_address)
static int
-scsi_slave_do_rqsense(struct scsi_device *devp, int (*callback)())
+scsi_slave_do_rqsense(struct scsi_device *sd, int (*callback)())
{
struct scsi_pkt *rq_pkt = NULL;
struct buf *rq_bp = NULL;
@@ -243,7 +249,7 @@ scsi_slave_do_rqsense(struct scsi_device *devp, int (*callback)())
(void) scsi_setup_cdb((union scsi_cdb *)rq_pkt->
pkt_cdbp, SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
- FILL_SCSI1_LUN(devp, rq_pkt);
+ FILL_SCSI1_LUN(sd, rq_pkt);
rq_pkt->pkt_flags = FLAG_NOINTR|FLAG_NOPARITY|FLAG_SENSING;
/*
@@ -275,11 +281,11 @@ out:
*
* SCSI slave probe routine - provided as a service to target drivers
*
- * Mostly attempts to allocate and fill devp inquiry data..
+ * Mostly attempts to allocate and fill sd inquiry data..
*/
int
-scsi_slave(struct scsi_device *devp, int (*callback)())
+scsi_slave(struct scsi_device *sd, int (*callback)())
{
struct scsi_pkt *pkt;
int rval = SCSIPROBE_EXISTS;
@@ -298,7 +304,7 @@ scsi_slave(struct scsi_device *devp, int (*callback)())
(void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp,
SCMD_TEST_UNIT_READY, 0, 0, 0);
- FILL_SCSI1_LUN(devp, pkt);
+ FILL_SCSI1_LUN(sd, pkt);
pkt->pkt_flags = FLAG_NOINTR|FLAG_NOPARITY;
if (scsi_poll(pkt) < 0) {
@@ -313,7 +319,7 @@ scsi_slave(struct scsi_device *devp, int (*callback)())
* scanner and processor devices can return a
* check condition here
*/
- rval = scsi_slave_do_rqsense(devp, callback);
+ rval = scsi_slave_do_rqsense(sd, callback);
}
if (rval != SCSIPROBE_EXISTS) {
@@ -338,18 +344,19 @@ scsi_slave(struct scsi_device *devp, int (*callback)())
*/
if ((pkt->pkt_state & STATE_ARQ_DONE) == 0) {
if (((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
- rval = scsi_slave_do_rqsense(devp, callback);
+ rval = scsi_slave_do_rqsense(sd, callback);
}
}
/*
* call scsi_probe to do the inquiry
- * XXX there is minor difference with the old scsi_slave implementation:
- * busy conditions are not handled in scsi_probe.
+ *
+ * NOTE: there is minor difference with the old scsi_slave
+ * implementation: busy conditions are not handled in scsi_probe.
*/
scsi_destroy_pkt(pkt);
if (rval == SCSIPROBE_EXISTS) {
- return (scsi_probe(devp, callback));
+ return (scsi_probe(sd, callback));
} else {
return (rval);
}
@@ -363,7 +370,7 @@ scsi_slave(struct scsi_device *devp, int (*callback)())
*/
/*ARGSUSED*/
void
-scsi_unslave(struct scsi_device *devp)
+scsi_unslave(struct scsi_device *sd)
{
}
@@ -375,7 +382,7 @@ scsi_unslave(struct scsi_device *devp)
*/
/*ARGSUSED*/
void
-scsi_unprobe(struct scsi_device *devp)
+scsi_unprobe(struct scsi_device *sd)
{
}
@@ -445,12 +452,12 @@ exit:
* intact.
*/
int
-scsi_probe(struct scsi_device *devp, int (*callback)())
+scsi_probe(struct scsi_device *sd, int (*callback)())
{
- int ret;
- scsi_hba_tran_t *tran = devp->sd_address.a_hba_tran;
+ int ret;
+ scsi_hba_tran_t *tran = sd->sd_address.a_hba_tran;
- if (scsi_check_ss2_LUN_limit(devp) != 0) {
+ if (scsi_check_ss2_LUN_limit(sd) != 0) {
/*
* caller is trying to probe a strictly-SCSI-2 device
* with a LUN that is too large, so do not allow it
@@ -459,32 +466,43 @@ scsi_probe(struct scsi_device *devp, int (*callback)())
}
if (tran->tran_tgt_probe != NULL) {
- ret = (*tran->tran_tgt_probe)(devp, callback);
+ ret = (*tran->tran_tgt_probe)(sd, callback);
} else {
- ret = scsi_hba_probe(devp, callback);
+ ret = scsi_hba_probe(sd, callback);
}
if (ret == SCSIPROBE_EXISTS) {
- create_inquiry_props(devp);
+ create_inquiry_props(sd);
/* is this a strictly-SCSI-2 node ?? */
- scsi_establish_LUN_limit(devp);
+ scsi_establish_LUN_limit(sd);
}
return (ret);
}
+/*
+ * probe scsi device using any available path
+ *
+ */
+int
+scsi_hba_probe(struct scsi_device *sd, int (*callback)())
+{
+ return (scsi_hba_probe_pi(sd, callback, 0));
+}
/*
- * scsi_hba_probe does not do any test unit ready's which access the medium
+ * probe scsi device using specific path
+ *
+ * scsi_hba_probe_pi does not do any test unit ready's which access the medium
* and could cause busy or not ready conditions.
- * scsi_hba_probe does 2 inquiries and a rqsense to clear unit attention
+ * scsi_hba_probe_pi does 2 inquiries and a rqsense to clear unit attention
* and to allow sync negotiation to take place
- * finally, scsi_hba_probe does one more inquiry which should
+ * finally, scsi_hba_probe_pi does one more inquiry which should
* reliably tell us what kind of target we have.
* A scsi-2 compliant target should be able to return inquiry with 250ms
* and we actually wait more than a second after reset.
*/
int
-scsi_hba_probe(struct scsi_device *devp, int (*callback)())
+scsi_hba_probe_pi(struct scsi_device *sd, int (*callback)(), int pi)
{
struct scsi_pkt *inq_pkt = NULL;
struct scsi_pkt *rq_pkt = NULL;
@@ -494,11 +512,11 @@ scsi_hba_probe(struct scsi_device *devp, int (*callback)())
int (*cb_flag)();
int pass = 1;
- if (devp->sd_inq == NULL) {
- devp->sd_inq = (struct scsi_inquiry *)
+ if (sd->sd_inq == NULL) {
+ sd->sd_inq = (struct scsi_inquiry *)
kmem_alloc(SUN_INQSIZE, ((callback == SLEEP_FUNC) ?
KM_SLEEP : KM_NOSLEEP));
- if (devp->sd_inq == NULL) {
+ if (sd->sd_inq == NULL) {
goto out;
}
}
@@ -529,6 +547,14 @@ scsi_hba_probe(struct scsi_device *devp, int (*callback)())
inq_pkt->pkt_flags = FLAG_NOINTR|FLAG_NOPARITY;
/*
+ * set transport path
+ */
+ if (pi && scsi_pkt_allocated_correctly(inq_pkt)) {
+ inq_pkt->pkt_path_instance = pi;
+ inq_pkt->pkt_flags |= FLAG_PKT_PATH_INSTANCE;
+ }
+
+ /*
* the first inquiry will tell us whether a target
* responded
*
@@ -537,8 +563,8 @@ scsi_hba_probe(struct scsi_device *devp, int (*callback)())
* incorrect after we have real sd_inq data (for lun0) we will do a
* second pass during which FILL_SCSI1_LUN will place lun in CDB.
*/
- bzero((caddr_t)devp->sd_inq, SUN_INQSIZE);
-again: FILL_SCSI1_LUN(devp, inq_pkt);
+ bzero((caddr_t)sd->sd_inq, SUN_INQSIZE);
+again: FILL_SCSI1_LUN(sd, inq_pkt);
if (scsi_test(inq_pkt) < 0) {
if (inq_pkt->pkt_reason == CMD_INCOMPLETE) {
@@ -614,10 +640,18 @@ again: FILL_SCSI1_LUN(devp, inq_pkt);
(void) scsi_setup_cdb((union scsi_cdb *)rq_pkt->
pkt_cdbp, SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
- FILL_SCSI1_LUN(devp, rq_pkt);
+ FILL_SCSI1_LUN(sd, rq_pkt);
rq_pkt->pkt_flags = FLAG_NOINTR|FLAG_NOPARITY;
/*
+ * set transport path
+ */
+ if (pi && scsi_pkt_allocated_correctly(rq_pkt)) {
+ rq_pkt->pkt_path_instance = pi;
+ rq_pkt->pkt_flags |= FLAG_PKT_PATH_INSTANCE;
+ }
+
+ /*
* The FILL_SCSI1_LUN above will find "inq_ansi != 1"
* on first pass, see "again" comment above.
*
@@ -698,7 +732,7 @@ done:
} else {
ASSERT(inq_pkt->pkt_resid >= 0);
bcopy((caddr_t)inq_bp->b_un.b_addr,
- (caddr_t)devp->sd_inq, (SUN_INQSIZE - inq_pkt->pkt_resid));
+ (caddr_t)sd->sd_inq, (SUN_INQSIZE - inq_pkt->pkt_resid));
rval = SCSIPROBE_EXISTS;
}
@@ -708,9 +742,9 @@ out:
* the "real" lun needs to be embedded into the cdb.
*/
if ((rval == SCSIPROBE_EXISTS) && (pass == 1) &&
- (devp->sd_address.a_lun > 0) && (devp->sd_inq->inq_ansi == 0x1)) {
+ (sd->sd_address.a_lun > 0) && (sd->sd_inq->inq_ansi == 0x1)) {
pass++;
- if (devp->sd_address.a_lun <= 7)
+ if (sd->sd_address.a_lun <= 7)
goto again;
/*
@@ -739,70 +773,171 @@ out:
* Convert from a scsi_device structure pointer to a scsi_hba_tran structure
* pointer. The correct way to do this is
*
- * #define DEVP_TO_TRAN(devp) ((devp)->sd_address.a_hba_tran)
+ * #define DEVP_TO_TRAN(sd) ((sd)->sd_address.a_hba_tran)
*
* however we have some consumers that place their own vector in a_hba_tran. To
* avoid problems, we implement this using the sd_tran_safe. See
* scsi_hba_initchild for more details.
*/
-#define DEVP_TO_TRAN(devp) ((devp)->sd_tran_safe)
+#define DEVP_TO_TRAN(sd) ((sd)->sd_tran_safe)
/*
- * Function to get human readable REPORTDEV addressing information from
- * scsi address structure for SPI when tran_get_bus_addr is not implemented.
+ * Function, callable from SCSA framework, to get 'human' readable REPORTDEV
+ * addressing information from scsi_device properties.
*/
int
-scsi_get_bus_addr(struct scsi_device *devp, char *ba, int len)
+scsi_ua_get_reportdev(struct scsi_device *sd, char *ra, int len)
{
- struct scsi_address *ap;
+ /* use deprecated tran_get_bus_addr interface if it is defined */
+ /* NOTE: tran_get_bus_addr is a poor name choice for interface */
+ if (DEVP_TO_TRAN(sd)->tran_get_bus_addr)
+ return ((*DEVP_TO_TRAN(sd)->tran_get_bus_addr)(sd, ra, len));
+ return (scsi_hba_ua_get_reportdev(sd, ra, len));
+}
- /* use tran_get_bus_addr interface if it is defined */
- if (DEVP_TO_TRAN(devp)->tran_get_bus_addr)
- return ((*DEVP_TO_TRAN(devp)->tran_get_bus_addr)
- (devp, ba, len));
+/*
+ * Function, callable from HBA driver's tran_get_bus_addr(9E) implementation,
+ * to get standard form of human readable REPORTDEV addressing information
+ * from scsi_device properties.
+ */
+int
+scsi_hba_ua_get_reportdev(struct scsi_device *sd, char *ra, int len)
+{
+ int tgt, lun, sfunc;
+ char *tgt_port;
+ scsi_lun64_t lun64;
+
+ /* get device unit-address properties */
+ tgt = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET, -1);
+ if (scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_PROP_SUCCESS)
+ tgt_port = NULL;
+ if ((tgt == -1) && (tgt_port == NULL))
+ return (0); /* no target */
+
+ lun = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_LUN, 0);
+ lun64 = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_LUN64, lun);
+ sfunc = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_SFUNC, -1);
+
+ /*
+ * XXX should the default be to print this in decimal for
+ * "human readable" form, so it matches conf files?
+ */
+ if (tgt_port) {
+ if (sfunc == -1)
+ (void) snprintf(ra, len,
+ "%s %s lun %" PRIx64,
+ SCSI_ADDR_PROP_TARGET_PORT, tgt_port, lun64);
+ else
+ (void) snprintf(ra, len,
+ "%s %s lun %" PRIx64 " sfunc %x",
+ SCSI_ADDR_PROP_TARGET_PORT, tgt_port, lun64, sfunc);
+ scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
+ } else {
+ if (sfunc == -1)
+ (void) snprintf(ra, len,
+ "%s %x lun %" PRIx64,
+ SCSI_ADDR_PROP_TARGET, tgt, lun64);
+ else
+ (void) snprintf(ra, len,
+ "%s %x lun %" PRIx64 " sfunc %x",
+ SCSI_ADDR_PROP_TARGET, tgt, lun64, sfunc);
+ }
- ap = &devp->sd_address;
- (void) snprintf(ba, len, "target %x lun %x", ap->a_target, ap->a_lun);
return (1);
}
/*
- * scsi_set_name: using properties, return "unit-address" string.
+ * scsi_ua_get: using properties, return "unit-address" string.
+ * Called by SCSA framework, may call HBAs tran function.
+ */
+int
+scsi_ua_get(struct scsi_device *sd, char *ua, int len)
+{
+ char *eua;
+
+ /* See if we already have established the unit-address. */
+ if ((eua = scsi_device_unit_address(sd)) != NULL) {
+ (void) strlcpy(ua, eua, len);
+ return (1);
+ }
+
+ /* Use deprecated tran_get_name interface if it is defined. */
+ /* NOTE: tran_get_name is a poor name choice for interface */
+ if (DEVP_TO_TRAN(sd)->tran_get_name)
+ return ((*DEVP_TO_TRAN(sd)->tran_get_name)(sd, ua, len));
+
+ /* Use generic property implementation */
+ return (scsi_hba_ua_get(sd, ua, len));
+}
+
+/*
+ * scsi_hba_ua_get: using properties, return "unit-address" string.
+ * This function may be called from an HBAs tran function.
*
* Function to get "unit-address" in "name@unit-address" /devices path
- * 'name' form from the unit-address properties on a node.
+ * component form from the scsi_device unit-address properties on a node.
*
- * NOTE: a better name for this function would be scsi_get_unit_address.
+ * NOTE: This function works in conjunction with scsi_hba_ua_set().
*/
int
-scsi_get_name(struct scsi_device *devp, char *ua, int len)
+scsi_hba_ua_get(struct scsi_device *sd, char *ua, int len)
{
- struct scsi_address *ap;
-
- /* use tran_get_name interface if it is defined */
- if (DEVP_TO_TRAN(devp)->tran_get_name)
- return ((*DEVP_TO_TRAN(devp)->tran_get_name)
- (devp, ua, len));
-
- ap = &devp->sd_address;
- (void) snprintf(ua, len, "%x,%x", ap->a_target, ap->a_lun);
+ int tgt, lun, sfunc;
+ char *tgt_port;
+ scsi_lun64_t lun64;
+
+ /* get device unit-address properties */
+ tgt = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET, -1);
+ if (scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_PROP_SUCCESS)
+ tgt_port = NULL;
+ if ((tgt == -1) && (tgt_port == NULL))
+ return (0); /* no target */
+
+ lun = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_LUN, 0);
+ lun64 = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_LUN64, lun);
+ sfunc = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
+ SCSI_ADDR_PROP_SFUNC, -1);
+ if (tgt_port) {
+ if (sfunc == -1)
+ (void) snprintf(ua, len, "%s,%" PRIx64,
+ tgt_port, lun64);
+ else
+ (void) snprintf(ua, len, "%s,%" PRIx64 ",%x",
+ tgt_port, lun64, sfunc);
+ scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
+ } else {
+ if (sfunc == -1)
+ (void) snprintf(ua, len, "%x,%" PRIx64, tgt, lun64);
+ else
+ (void) snprintf(ua, len, "%x,%" PRIx64 ",%x",
+ tgt, lun64, sfunc);
+ }
return (1);
}
void
-create_inquiry_props(struct scsi_device *devp)
+create_inquiry_props(struct scsi_device *sd)
{
- struct scsi_inquiry *inq = devp->sd_inq;
+ struct scsi_inquiry *inq = sd->sd_inq;
- (void) ndi_prop_update_int(DDI_DEV_T_NONE, devp->sd_dev,
+ (void) ndi_prop_update_int(DDI_DEV_T_NONE, sd->sd_dev,
INQUIRY_DEVICE_TYPE, (int)inq->inq_dtype);
/*
* Create the following properties:
*
- * inquiry-vendor-id Vendor id (INQUIRY data bytes 8-15)
- * inquiry-product-id Product id (INQUIRY data bytes 16-31)
- * inquiry-revision-id Product Rev level (INQUIRY data bytes 32-35)
+ * inquiry-vendor-id Vendor id (INQUIRY data bytes 8-15)
+ * inquiry-product-id Product id (INQUIRY data bytes 16-31)
+ * inquiry-revision-id Product Rev level (INQUIRY data bytes 32-35)
*
* Note we don't support creation of these properties for scsi-1
* devices (as the vid, pid and revision were not defined) and we
@@ -810,21 +945,21 @@ create_inquiry_props(struct scsi_device *devp)
* stripped of Nulls and spaces.
*/
if (inq->inq_ansi != 1) {
- if (ddi_prop_exists(DDI_DEV_T_NONE, devp->sd_dev,
+ if (ddi_prop_exists(DDI_DEV_T_NONE, sd->sd_dev,
DDI_PROP_TYPE_STRING, INQUIRY_VENDOR_ID) == 0)
- (void) scsi_hba_prop_update_inqstring(devp,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_VENDOR_ID,
inq->inq_vid, sizeof (inq->inq_vid));
- if (ddi_prop_exists(DDI_DEV_T_NONE, devp->sd_dev,
+ if (ddi_prop_exists(DDI_DEV_T_NONE, sd->sd_dev,
DDI_PROP_TYPE_STRING, INQUIRY_PRODUCT_ID) == 0)
- (void) scsi_hba_prop_update_inqstring(devp,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_PRODUCT_ID,
inq->inq_pid, sizeof (inq->inq_pid));
- if (ddi_prop_exists(DDI_DEV_T_NONE, devp->sd_dev,
+ if (ddi_prop_exists(DDI_DEV_T_NONE, sd->sd_dev,
DDI_PROP_TYPE_STRING, INQUIRY_REVISION_ID) == 0)
- (void) scsi_hba_prop_update_inqstring(devp,
+ (void) scsi_device_prop_update_inqstring(sd,
INQUIRY_REVISION_ID,
inq->inq_revision, sizeof (inq->inq_revision));
}
@@ -835,7 +970,7 @@ create_inquiry_props(struct scsi_device *devp)
* treatment to trim trailing blanks (etc) and ensure null termination.
*/
int
-scsi_hba_prop_update_inqstring(struct scsi_device *devp,
+scsi_device_prop_update_inqstring(struct scsi_device *sd,
char *name, char *data, size_t len)
{
int ilen;
@@ -851,7 +986,7 @@ scsi_hba_prop_update_inqstring(struct scsi_device *devp,
data_string = kmem_zalloc(ilen + 1, KM_SLEEP);
bcopy(data, data_string, ilen);
rv = ndi_prop_update_string(DDI_DEV_T_NONE,
- devp->sd_dev, name, data_string);
+ sd->sd_dev, name, data_string);
kmem_free(data_string, ilen + 1);
return (rv);
}
@@ -944,11 +1079,11 @@ scsi_device_hba_private_get(struct scsi_device *sd)
* else return 1, meaning do *NOT* probe this target/LUN
*/
static int
-scsi_check_ss2_LUN_limit(struct scsi_device *devp)
+scsi_check_ss2_LUN_limit(struct scsi_device *sd)
{
- struct scsi_address *ap = &(devp->sd_address);
+ struct scsi_address *ap = &(sd->sd_address);
dev_info_t *pdevi =
- (dev_info_t *)DEVI(devp->sd_dev)->devi_parent;
+ (dev_info_t *)DEVI(sd->sd_dev)->devi_parent;
int ret_val = 0; /* default return value */
uchar_t *tgt_list;
uint_t tgt_nelements;
@@ -1023,11 +1158,11 @@ scsi_check_ss2_LUN_limit(struct scsi_device *devp)
* and if it is we mark our parent node with this information
*/
static void
-scsi_establish_LUN_limit(struct scsi_device *devp)
+scsi_establish_LUN_limit(struct scsi_device *sd)
{
- struct scsi_address *ap = &(devp->sd_address);
- struct scsi_inquiry *inq = devp->sd_inq;
- dev_info_t *devi = devp->sd_dev;
+ struct scsi_address *ap = &(sd->sd_address);
+ struct scsi_inquiry *inq = sd->sd_inq;
+ dev_info_t *devi = sd->sd_dev;
char *vid = NULL;
char *pid = NULL;
char *rev = NULL;
@@ -1286,3 +1421,246 @@ scsi_update_parent_ss2_prop(dev_info_t *devi, int tgt, int add_tgt)
}
#endif
}
+
+
+/* XXX BEGIN: find a better place for this: inquiry.h? */
+/*
+ * Definitions used by device id registration routines
+ */
+#define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
+#define VPD_PAGE_LENGTH 3 /* offset for pge length data */
+#define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
+
+/* size for devid inquiries */
+#define MAX_INQUIRY_SIZE 0xF0
+#define MAX_INQUIRY_SIZE_EVPD 0xFF /* XXX why is this longer */
+/* XXX END: find a better place for these */
+
+
+/*
+ * Decorate devinfo node with identity properties using information obtained
+ * from device. These properties are used by device enumeration code to derive
+ * the devid, and guid for the device. These properties are also used to
+ * determine if a device should be enumerated under the physical HBA (PHCI) or
+ * the virtual HBA (VHCI, for mpxio support).
+ *
+ * Return zero on success. If commands that should succeed fail or allocations
+ * fail then return failure (non-zero). It is possible for this function to
+ * return success and not have decorated the node with any additional identity
+ * information if the device correctly responds indicating that they are not
+ * supported. When failure occurs the caller should consider not making the
+ * device accessible.
+ */
+int
+scsi_device_identity(struct scsi_device *sd, int (*callback)())
+{
+ dev_info_t *devi = sd->sd_dev;
+ uchar_t *inq80 = NULL;
+ uchar_t *inq83 = NULL;
+ int rval;
+ size_t len;
+ int pg80, pg83;
+
+ /* find out what pages are supported by device */
+ if (check_vpd_page_support8083(sd, callback, &pg80, &pg83) == -1)
+ return (-1);
+
+ /* if available, collect page 80 data and add as property */
+ if (pg80) {
+ inq80 = kmem_zalloc(MAX_INQUIRY_SIZE,
+ ((callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP));
+ if (inq80 == NULL) {
+ rval = -1;
+ goto out;
+ }
+
+ rval = send_scsi_INQUIRY(sd, callback, inq80,
+ MAX_INQUIRY_SIZE, 0x01, 0x80, &len);
+ if (rval)
+ goto out; /* should have worked */
+
+ if (len && (ndi_prop_update_byte_array(DDI_DEV_T_NONE, devi,
+ "inquiry-page-80", inq80, len) != DDI_PROP_SUCCESS)) {
+ cmn_err(CE_WARN, "scsi_device_identity: "
+ "failed to add page80 prop");
+ rval = -1;
+ goto out;
+ }
+ }
+
+ /* if available, collect page 83 data and add as property */
+ if (pg83) {
+ inq83 = kmem_zalloc(MAX_INQUIRY_SIZE,
+ ((callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP));
+ if (inq83 == NULL) {
+ rval = -1;
+ goto out;
+ }
+
+ rval = send_scsi_INQUIRY(sd, callback, inq83,
+ MAX_INQUIRY_SIZE, 0x01, 0x83, &len);
+ if (rval)
+ goto out; /* should have worked */
+
+ if (len && (ndi_prop_update_byte_array(DDI_DEV_T_NONE, devi,
+ "inquiry-page-83", inq83, len) != DDI_PROP_SUCCESS)) {
+ cmn_err(CE_WARN, "scsi_device_identity: "
+ "failed to add page83 prop");
+ rval = -1;
+ goto out;
+ }
+ }
+
+ /* Commands worked, identity information that exists has been added. */
+ rval = 0;
+
+ /* clean up resources */
+out: if (inq80 != NULL)
+ kmem_free(inq80, MAX_INQUIRY_SIZE);
+ if (inq83 != NULL)
+ kmem_free(inq83, MAX_INQUIRY_SIZE);
+
+ return (rval);
+}
+
+/*
+ * Send an INQUIRY command with the EVPD bit set and a page code of 0x00 to
+ * the device, returning zero on success. Returned INQUIRY data is used to
+ * determine which vital product pages are supported. The device idenity
+ * information we are looking for is in pages 0x83 and/or 0x80. If the device
+ * fails the EVPD inquiry then no pages are supported but the call succeeds.
+ * Return -1 (failure) if there were memory allocation failures or if a
+ * command faild that should have worked.
+ */
+static int
+check_vpd_page_support8083(struct scsi_device *sd, int (*callback)(),
+ int *ppg80, int *ppg83)
+{
+ uchar_t *page_list;
+ int counter;
+ int rval;
+
+ /* pages are not supported */
+ *ppg80 = 0;
+ *ppg83 = 0;
+
+ /*
+ * We'll set the page length to the maximum to save figuring it out
+ * with an additional call.
+ */
+ page_list = kmem_zalloc(MAX_INQUIRY_SIZE_EVPD,
+ ((callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP));
+ if (page_list == NULL)
+ return (-1); /* memory allocation problem */
+
+ /* issue page 0 (Supported VPD Pages) INQUIRY with evpd set */
+ rval = send_scsi_INQUIRY(sd, callback,
+ page_list, MAX_INQUIRY_SIZE_EVPD, 1, 0, NULL);
+
+ /*
+ * Now we must validate that the device accepted the command (some
+ * devices do not support it) and if the idenity pages we are
+ * interested in are supported.
+ */
+ if ((rval == 0) &&
+ (page_list[VPD_MODE_PAGE] == 0x00)) {
+ /* Loop to find one of the 2 pages we need */
+ counter = 4; /* Supported pages start at byte 4, with 0x00 */
+
+ /*
+ * Pages are returned in ascending order, and 0x83 is the
+ * last page we are hoping to find.
+ */
+ while ((page_list[counter] <= 0x83) &&
+ (counter <= (page_list[VPD_PAGE_LENGTH] +
+ VPD_HEAD_OFFSET))) {
+ /*
+ * Add 3 because page_list[3] is the number of
+ * pages minus 3
+ */
+
+ switch (page_list[counter]) {
+ case 0x80:
+ *ppg80 = 1;
+ break;
+ case 0x83:
+ *ppg83 = 1;
+ break;
+ }
+ counter++;
+ }
+ }
+
+ kmem_free(page_list, MAX_INQUIRY_SIZE_EVPD);
+ return (0);
+}
+
+/*
+ * Send INQUIRY command with specified EVPD and page code. Return
+ * zero on success. On success, the amount of data transferred
+ * is returned in *lenp.
+ */
+static int
+send_scsi_INQUIRY(struct scsi_device *sd, int (*callback)(),
+ uchar_t *bufaddr, size_t buflen,
+ uchar_t evpd, uchar_t page_code, size_t *lenp)
+{
+ int (*cb_flag)();
+ struct buf *inq_bp;
+ struct scsi_pkt *inq_pkt = NULL;
+ int rval = -1;
+
+ if (lenp)
+ *lenp = 0;
+ if (callback != SLEEP_FUNC && callback != NULL_FUNC)
+ cb_flag = NULL_FUNC;
+ else
+ cb_flag = callback;
+ inq_bp = scsi_alloc_consistent_buf(ROUTE,
+ (struct buf *)NULL, buflen, B_READ, cb_flag, NULL);
+ if (inq_bp == NULL)
+ goto out; /* memory allocation problem */
+
+ inq_pkt = scsi_init_pkt(ROUTE, (struct scsi_pkt *)NULL,
+ inq_bp, CDB_GROUP0, sizeof (struct scsi_arq_status),
+ 0, PKT_CONSISTENT, callback, NULL);
+ if (inq_pkt == NULL)
+ goto out; /* memory allocation problem */
+
+ ASSERT(inq_bp->b_error == 0);
+
+ /* form INQUIRY cdb with specified EVPD and page code */
+ (void) scsi_setup_cdb((union scsi_cdb *)inq_pkt->pkt_cdbp,
+ SCMD_INQUIRY, 0, buflen, 0);
+ inq_pkt->pkt_cdbp[1] = evpd;
+ inq_pkt->pkt_cdbp[2] = page_code;
+
+ inq_pkt->pkt_time = SCSI_POLL_TIMEOUT; /* in seconds */
+ inq_pkt->pkt_flags = FLAG_NOINTR|FLAG_NOPARITY;
+
+ /*
+ * Issue inquiry command thru scsi_test
+ *
+ * NOTE: This is important data about device identity, not sure why
+ * NOPARITY is used. Also seems like we should check pkt_stat for
+ * STATE_XFERRED_DATA.
+ */
+ if ((scsi_test(inq_pkt) == 0) &&
+ (inq_pkt->pkt_reason == CMD_CMPLT) &&
+ (((*inq_pkt->pkt_scbp) & STATUS_MASK) == 0)) {
+ ASSERT(inq_pkt->pkt_resid >= 0);
+ ASSERT(inq_pkt->pkt_resid <= buflen);
+
+ bcopy(inq_bp->b_un.b_addr,
+ bufaddr, buflen - inq_pkt->pkt_resid);
+ if (lenp)
+ *lenp = (buflen - inq_pkt->pkt_resid);
+ rval = 0;
+ }
+
+out: if (inq_pkt)
+ scsi_destroy_pkt(inq_pkt);
+ if (inq_bp)
+ scsi_free_consistent_buf(inq_bp);
+ return (rval);
+}
diff --git a/usr/src/uts/common/io/scsi/impl/sas_transport.c b/usr/src/uts/common/io/scsi/impl/sas_transport.c
index 361a4fa167..a9db4e0689 100644
--- a/usr/src/uts/common/io/scsi/impl/sas_transport.c
+++ b/usr/src/uts/common/io/scsi/impl/sas_transport.c
@@ -55,23 +55,24 @@ int
sas_hba_probe_smp(struct smp_device *smp_devp)
{
smp_pkt_t smp_pkt_data, *smp_pkt = &smp_pkt_data;
- struct smp_report_general_req smp_req;
- struct smp_report_general_rsp smp_rsp;
+ uint8_t reqbuf[SMP_REQ_MINLEN];
+ uint8_t respbuf[SMP_RESP_MINLEN + sizeof (smp_report_general_resp_t)];
+ smp_request_frame_t *qfp = (smp_request_frame_t *)reqbuf;
+ smp_report_general_resp_t *sfp = (smp_report_general_resp_t *)respbuf;
int rval = DDI_PROBE_SUCCESS;
- bzero(&smp_req, sizeof (struct smp_report_general_req));
- bzero(&smp_rsp, sizeof (struct smp_report_general_rsp));
- smp_req.frametype = (uint8_t)SMP_FRAME_TYPE_REQUEST;
- smp_req.function = (uint8_t)SMP_REPORT_GENERAL;
- smp_req.reqsize = 0x00;
+ bzero(reqbuf, sizeof (reqbuf));
+ bzero(respbuf, sizeof (respbuf));
+ qfp->srf_frame_type = SMP_FRAME_TYPE_REQUEST;
+ qfp->srf_function = SMP_FUNC_REPORT_GENERAL;
bzero(smp_pkt, sizeof (struct smp_pkt));
smp_pkt->pkt_address = &smp_devp->smp_addr;
smp_pkt->pkt_reason = 0;
- smp_pkt->pkt_req = (caddr_t)&smp_req;
- smp_pkt->pkt_reqsize = sizeof (struct smp_report_general_req);
- smp_pkt->pkt_rsp = (caddr_t)&smp_rsp;
- smp_pkt->pkt_rspsize = sizeof (struct smp_report_general_rsp);
+ smp_pkt->pkt_req = (caddr_t)qfp;
+ smp_pkt->pkt_reqsize = sizeof (reqbuf);
+ smp_pkt->pkt_rsp = (caddr_t)sfp;
+ smp_pkt->pkt_rspsize = sizeof (respbuf);
smp_pkt->pkt_timeout = SMP_DEFAULT_TIMEOUT;
if (sas_smp_transport(smp_pkt) != DDI_SUCCESS) {
diff --git a/usr/src/uts/common/io/scsi/impl/scsi_control.c b/usr/src/uts/common/io/scsi/impl/scsi_control.c
index 8f8fc62e8c..0ccb1a2834 100644
--- a/usr/src/uts/common/io/scsi/impl/scsi_control.c
+++ b/usr/src/uts/common/io/scsi/impl/scsi_control.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 1996-2002 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Generic Abort, Reset and Misc Routines
*/
@@ -50,6 +47,9 @@ scsi_reset(struct scsi_address *ap, int level)
((*A_TO_TRAN(ap)->tran_getcap)(ap, "lun-reset", 1) != 1)) {
return (0);
}
+ if ((A_TO_TRAN(ap)->tran_reset) == NULL) {
+ return (0);
+ }
return (*A_TO_TRAN(ap)->tran_reset)(ap, level);
}
diff --git a/usr/src/uts/common/io/scsi/impl/scsi_hba.c b/usr/src/uts/common/io/scsi/impl/scsi_hba.c
index 69f3ad9a96..d3a710095d 100644
--- a/usr/src/uts/common/io/scsi/impl/scsi_hba.c
+++ b/usr/src/uts/common/io/scsi/impl/scsi_hba.c
@@ -29,19 +29,28 @@
* Generic SCSI Host Bus Adapter interface implementation
*/
#include <sys/scsi/scsi.h>
+#include <sys/scsi/generic/sas.h>
#include <sys/file.h>
+#include <sys/disp.h> /* for minclsyspri */
#include <sys/ddi_impldefs.h>
#include <sys/ndi_impldefs.h>
+#include <sys/sunndi.h>
#include <sys/ddi.h>
#include <sys/sunmdi.h>
+#include <sys/mdi_impldefs.h>
+#include <sys/callb.h>
#include <sys/epm.h>
+#include <sys/damap.h>
+#include <sys/time.h>
+#include <sys/sunldi.h>
extern struct scsi_pkt *scsi_init_cache_pkt(struct scsi_address *,
struct scsi_pkt *, struct buf *, int, int, int, int,
int (*)(caddr_t), caddr_t);
-extern void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
-extern void scsi_cache_dmafree(struct scsi_address *, struct scsi_pkt *);
-extern void scsi_sync_cache_pkt(struct scsi_address *, struct scsi_pkt *);
+extern void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
+extern void scsi_cache_dmafree(struct scsi_address *, struct scsi_pkt *);
+extern void scsi_sync_cache_pkt(struct scsi_address *, struct scsi_pkt *);
+extern int modrootloaded;
/*
* Round up all allocations so that we can guarantee
@@ -57,9 +66,112 @@ kmutex_t scsi_flag_nointr_mutex;
kcondvar_t scsi_flag_nointr_cv;
kmutex_t scsi_log_mutex;
+/* asynchronous probe barrier deletion data structures */
+static kmutex_t scsi_hba_barrier_mutex;
+static kcondvar_t scsi_hba_barrier_cv;
+static struct scsi_hba_barrier {
+ struct scsi_hba_barrier *barrier_next;
+ clock_t barrier_endtime;
+ dev_info_t *barrier_probe;
+} *scsi_hba_barrier_list;
+static int scsi_hba_devi_is_barrier(dev_info_t *probe);
+static void scsi_hba_barrier_tran_tgt_free(dev_info_t *probe);
+static void scsi_hba_barrier_add(dev_info_t *probe, int seconds);
+static int scsi_hba_remove_node(dev_info_t *child);
+static void scsi_hba_barrier_daemon(void *arg);
+
+/* LUN-change ASC/ASCQ processing data structures (stage1 and stage2) */
+static kmutex_t scsi_lunchg1_mutex;
+static kcondvar_t scsi_lunchg1_cv;
+static struct scsi_pkt *scsi_lunchg1_list;
+static void scsi_lunchg1_daemon(void *arg);
+static kmutex_t scsi_lunchg2_mutex;
+static kcondvar_t scsi_lunchg2_cv;
+static struct scsi_lunchg2 {
+ struct scsi_lunchg2 *lunchg2_next;
+ char *lunchg2_path;
+} *scsi_lunchg2_list;
+static void scsi_lunchg2_daemon(void *arg);
+
+static int scsi_hba_find_child(dev_info_t *self, char *name, char *addr,
+ int init, dev_info_t **dchildp, mdi_pathinfo_t **pchildp, int *ppi);
+
+/* return value defines for scsi_hba_find_child */
+#define CHILD_TYPE_NONE 0
+#define CHILD_TYPE_DEVINFO 1
+#define CHILD_TYPE_PATHINFO 2
+
+/*
+ * Enumeration code path currently being followed. SE_BUSCONFIG results in
+ * DEVI_SID_NODEID, and SE_HP (hotplug) results in DEVI_SID_HP_NODEID.
+ *
+ * Since hotplug enumeration is based on information obtained from hardware
+ * (tgtmap/report_lun) the type/severity of enumeration error messages is
+ * sometimes based SE_HP (indirectly via ndi_dev_is_hotplug_node()). By
+ * convention, these messages all contain "enumeration failed during ...".
+ */
+typedef enum { SE_BUSCONFIG = 0, SE_HP = 1 } scsi_enum_t;
+
+/* compatible properties of driver to use during probe/enumeration operations */
+static char *compatible_probe = "scsa,probe";
+static char *compatible_nodev = "scsa,nodev";
+static char *scsi_probe_ascii[] = SCSIPROBE_ASCII;
+
+/* number of LUNs we attempt to get on the first SCMD_REPORT_LUNS command */
+int scsi_lunrpt_default_max = 256;
+int scsi_lunrpt_timeout = 3; /* seconds */
+
+/* 'scsi-binding-set' value for legacy enumerated 'spi' transports */
+char *scsi_binding_set_spi = "spi";
+
+/* enable NDI_DEVI_DEBUG for bus_[un]config operations */
+int scsi_hba_busconfig_debug = 0;
+
+/* number of probe serilization messages */
+int scsi_hba_wait_msg = 5;
+
/*
- * Prototypes for static functions
+ * Establish the timeout used to cache (in the probe node) the fact that the
+ * device does not exist. This replaces the target specific probe cache.
*/
+int scsi_hba_barrier_timeout = (60); /* seconds */
+
+/*
+ * Structure for scsi_hba_tgtmap_* implementation.
+ *
+ * Every call to scsi_hba_tgtmap_set_begin will increment tgtmap_reports,
+ * and a call to scsi_hba_tgtmap_set_end will reset tgtmap_reports to zero.
+ * If, in scsi_hba_tgtmap_set_begin, we detect a tgtmap_reports value of
+ * scsi_hba_tgtmap_reports_max we produce a message to indicate that
+ * the caller is never completing an observation (i.e. we are not making
+ * any forward progress). If this message occurs, it indicates that the
+ * solaris hotplug ramifications at the target and lun level are no longer
+ * tracking.
+ *
+ * NOTE: LUNMAPSIZE OK for now, but should be dynamic in reportlun code.
+ */
+typedef struct impl_scsi_tgtmap {
+ scsi_hba_tran_t *tgtmap_tran;
+ int tgtmap_reports; /* _begin without _end */
+ int tgtmap_noisy;
+ scsi_tgt_activate_cb_t tgtmap_activate_cb;
+ scsi_tgt_deactivate_cb_t tgtmap_deactivate_cb;
+ void *tgtmap_mappriv;
+ damap_t *tgtmap_dam[SCSI_TGT_NTYPES];
+} impl_scsi_tgtmap_t;
+#define LUNMAPSIZE 256 /* 256 LUNs/target */
+
+/* Produce warning if number of begins without an end exceed this value */
+int scsi_hba_tgtmap_reports_max = 256;
+
+/* Prototype for static dev_ops devo_*() functions */
+static int scsi_hba_info(
+ dev_info_t *self,
+ ddi_info_cmd_t infocmd,
+ void *arg,
+ void **result);
+
+/* Prototypes for static bus_ops bus_*() functions */
static int scsi_hba_bus_ctl(
dev_info_t *self,
dev_info_t *child,
@@ -106,12 +218,6 @@ static int scsi_hba_post_event(
ddi_eventcookie_t event,
void *bus_impldata);
-static int scsi_hba_info(
- dev_info_t *self,
- ddi_info_cmd_t infocmd,
- void *arg,
- void **result);
-
static int scsi_hba_bus_config(
dev_info_t *self,
uint_t flags,
@@ -138,7 +244,7 @@ static int scsi_hba_bus_power(
void *arg,
void *result);
-/* busops vector for SCSI HBA's. */
+/* bus_ops vector for SCSI HBA's. */
static struct bus_ops scsi_hba_busops = {
BUSO_REV,
nullbusmap, /* bus_map */
@@ -192,10 +298,47 @@ static struct cb_ops scsi_hba_cbops = {
nodev /* int (*cb_awrite)() */
};
+/* Prototypes for static scsi_hba.c/SCSA private lunmap interfaces */
+static int scsi_lunmap_create(
+ dev_info_t *self,
+ impl_scsi_tgtmap_t *tgtmap,
+ char *tgt_addr);
+static void scsi_lunmap_destroy(
+ dev_info_t *self,
+ impl_scsi_tgtmap_t *tgtmap,
+ char *tgt_addr);
+static void scsi_lunmap_set_begin(
+ dev_info_t *self,
+ damap_t *lundam);
+static int scsi_lunmap_set_add(
+ dev_info_t *self,
+ damap_t *lundam,
+ char *taddr,
+ scsi_lun64_t lun_num,
+ int lun_sfunc);
+static void scsi_lunmap_set_end(
+ dev_info_t *self,
+ damap_t *lundam);
+
+/* Prototypes for static misc. scsi_hba.c private bus_config interfaces */
+static int scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
+static int scsi_hba_bus_config_spi(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
+static dev_info_t *scsi_hba_bus_config_port(dev_info_t *self,
+ char *nameaddr, scsi_enum_t se);
+
+#ifdef sparc
+static int scsi_hba_bus_config_prom_node(dev_info_t *self, uint_t flags,
+ void *arg, dev_info_t **childp);
+#endif /* sparc */
+
+
/*
* SCSI_HBA_LOG is used for all messages. A logging level is specified when
* generating a message. Some levels correspond directly to cmn_err levels,
- * the others are associated with increasing levels diagnostic/debug output.
+ * some are associated with increasing levels diagnostic/debug output (LOG1-4),
+ * and others are associated with specific levels of interface (LOGMAP).
* For _LOG() messages, a __func__ prefix will identify the function origin
* of the message. For _LOG_NF messages, there is no function prefix or
* self/child context. Filtering of messages is provided based on logging
@@ -216,7 +359,13 @@ static struct cb_ops scsi_hba_cbops = {
#define SCSI_HBA_LOG2 0x00000020 /* DIAG2 level enable */
#define SCSI_HBA_LOG3 0x00000040 /* DIAG3 level enable */
#define SCSI_HBA_LOG4 0x00000080 /* DIAG4 level enable */
-#define SCSI_HBA_LOGTRACE 0x00000100 /* TRACE enable */
+#define SCSI_HBA_LOGMAPPHY 0x00000100 /* MAPPHY level enable */
+#define SCSI_HBA_LOGMAPIPT 0x00000200 /* MAPIPT level enable */
+#define SCSI_HBA_LOGMAPTGT 0x00000400 /* MAPTGT level enable */
+#define SCSI_HBA_LOGMAPLUN 0x00000800 /* MAPLUN level enable */
+#define SCSI_HBA_LOGMAPCFG 0x00001000 /* MAPCFG level enable */
+#define SCSI_HBA_LOGMAPUNCFG 0x00002000 /* MAPUNCFG level enable */
+#define SCSI_HBA_LOGTRACE 0x00010000 /* TRACE enable */
#if (CE_CONT | CE_NOTE | CE_WARN | CE_PANIC | CE_IGNORE) > SCSI_HBA_LOG_CE_MASK
Error, problem with CE_ definitions
#endif
@@ -226,16 +375,23 @@ Error, problem with CE_ definitions
* SCSI_HBA_LOG_CE_MASK level messages or LOG_NF() messages.
*
* An example set of /etc/system tunings to simplify debug a SCSA pHCI HBA
- * driver called "pmcs", including "scsi_vhci" operation, might be:
+ * driver called "pmcs", including "scsi_vhci" operation, by capturing
+ * log information in the system log might be:
*
- * echo "set scsi:scsi_hba_log_filter_level=0xf0" >> /etc/system
+ * echo "set scsi:scsi_hba_log_filter_level=0x3ff0" >> /etc/system
* echo "set scsi:scsi_hba_log_filter_phci=\"pmcs\"" >> /etc/system
* echo "set scsi:scsi_hba_log_filter_vhci=\"scsi_vhci\"" >> /etc/system
+ *
+ * To capture information on just HBA-SCSAv3 *map operation, use
+ * echo "set scsi:scsi_hba_log_filter_level=0x3f10" >> /etc/system
+ *
+ * For debugging an HBA driver, you may also want to set:
+ *
* echo "set scsi:scsi_hba_log_align=1" >> /etc/system
- * echo "set scsi:scsi_hba_log_fcif=0x21" >> /etc/system
* echo "set scsi:scsi_hba_log_mt_disable=0x6" >> /etc/system
* echo "set mtc_off=1" >> /etc/system
* echo "set mdi_mtc_off=1" >> /etc/system
+ * echo "set scsi:scsi_hba_log_fcif=0" >> /etc/system
*/
int scsi_hba_log_filter_level =
SCSI_HBA_LOG1 |
@@ -243,9 +399,12 @@ int scsi_hba_log_filter_level =
char *scsi_hba_log_filter_phci = "\0\0\0\0\0\0\0\0\0\0\0\0";
char *scsi_hba_log_filter_vhci = "\0\0\0\0\0\0\0\0\0\0\0\0";
int scsi_hba_log_align = 0; /* NOTE: will not cause truncation */
-int scsi_hba_log_fcif = '\0'; /* "^!?" first char in format */
- /* ^==0x5e, !==0x21, ?==0x3F */
- /* See cmn_err(9F) */
+int scsi_hba_log_fcif = '!'; /* "^!?" first char in format */
+ /* NOTE: iff level > SCSI_HBA_LOG1 */
+ /* '\0'0x00 -> console and system log */
+ /* '^' 0x5e -> console_only */
+ /* '!' 0x21 -> system log only */
+ /* '?' 0x2F -> See cmn_err(9F) */
int scsi_hba_log_info = /* augmentation: extra info output */
(0 << 0) | /* 0x0001: process information */
(0 << 1) | /* 0x0002: full /devices path */
@@ -265,11 +424,18 @@ static char scsi_hba_fmt[512];
/* Macros to use in scsi_hba.c source code below */
#define SCSI_HBA_LOG(x) scsi_hba_log x
#define _LOG(level) SCSI_HBA_LOG##level, __func__
+#define _MAP(map) SCSI_HBA_LOGMAP##map, __func__
#define _LOG_NF(level) SCSI_HBA_LOG##level, NULL, NULL, NULL
#define _LOG_TRACE _LOG(TRACE)
+#define _LOGLUN _MAP(LUN)
+#define _LOGTGT _MAP(TGT)
+#define _LOGIPT _MAP(IPT)
+#define _LOGPHY _MAP(PHY)
+#define _LOGCFG _MAP(CFG)
+#define _LOGUNCFG _MAP(UNCFG)
/*PRINTFLIKE5*/
-void
+static void
scsi_hba_log(int level, const char *func, dev_info_t *self, dev_info_t *child,
const char *fmt, ...)
{
@@ -383,7 +549,7 @@ scsi_hba_log(int level, const char *func, dev_info_t *self, dev_info_t *child,
f = scsi_hba_fmt;
if (fmt[0] && strchr("^!?", fmt[0]))
*f++ = fmt[0];
- else if (scsi_hba_log_fcif)
+ else if (scsi_hba_log_fcif && (level > SCSI_HBA_LOG1))
*f++ = (char)scsi_hba_log_fcif; /* add global fcif */
if (align)
(void) sprintf(f, "%s", "%-18.18s: %36.36s: %s%s");
@@ -400,6 +566,61 @@ scsi_hba_log(int level, const char *func, dev_info_t *self, dev_info_t *child,
}
/*
+ * scsi_hba version of [nm]di_devi_enter/[nm]di_devi_exit that detects if HBA
+ * is a PHCI, and chooses mdi/ndi locking implementation.
+ */
+static void
+scsi_hba_devi_enter(dev_info_t *self, int *circp)
+{
+ if (MDI_PHCI(self))
+ mdi_devi_enter(self, circp);
+ else
+ ndi_devi_enter(self, circp);
+}
+
+static int
+scsi_hba_devi_tryenter(dev_info_t *self, int *circp)
+{
+ if (MDI_PHCI(self))
+ return (mdi_devi_tryenter(self, circp));
+ else
+ return (ndi_devi_tryenter(self, circp));
+}
+
+static void
+scsi_hba_devi_exit(dev_info_t *self, int circ)
+{
+ if (MDI_PHCI(self))
+ mdi_devi_exit(self, circ);
+ else
+ ndi_devi_exit(self, circ);
+}
+
+static void
+scsi_hba_devi_enter_phci(dev_info_t *self, int *circp)
+{
+ if (MDI_PHCI(self))
+ mdi_devi_enter_phci(self, circp);
+}
+
+static void
+scsi_hba_devi_exit_phci(dev_info_t *self, int circ)
+{
+ if (MDI_PHCI(self))
+ mdi_devi_exit_phci(self, circ);
+}
+
+static int
+scsi_hba_dev_is_sid(dev_info_t *child)
+{
+ /*
+ * Use ndi_dev_is_persistent_node instead of ddi_dev_is_sid to avoid
+ * any possible locking issues in mixed nexus devctl code (like usb).
+ */
+ return (ndi_dev_is_persistent_node(child));
+}
+
+/*
* Called from _init() when loading "scsi" module
*/
void
@@ -407,10 +628,61 @@ scsi_initialize_hba_interface()
{
SCSI_HBA_LOG((_LOG_TRACE, NULL, NULL, __func__));
+ /* We need "scsiprobe" and "scsinodev" as an alias or a driver. */
+ if (ddi_name_to_major(compatible_probe) == DDI_MAJOR_T_NONE) {
+ SCSI_HBA_LOG((_LOG_NF(WARN), "failed to resolve '%s' "
+ "driver alias, defaulting to 'nulldriver'",
+ compatible_probe));
+
+ /* If no "nulldriver" driver nothing will work... */
+ compatible_probe = "nulldriver";
+ if (ddi_name_to_major(compatible_probe) == DDI_MAJOR_T_NONE)
+ SCSI_HBA_LOG((_LOG_NF(WARN), "no probe '%s' driver, "
+ "system misconfigured", compatible_probe));
+ }
+ if (ddi_name_to_major(compatible_nodev) == DDI_MAJOR_T_NONE) {
+ SCSI_HBA_LOG((_LOG_NF(WARN), "failed to resolve '%s' "
+ "driver alias, defaulting to 'nulldriver'",
+ compatible_nodev));
+
+ /* If no "nulldriver" driver nothing will work... */
+ compatible_nodev = "nulldriver";
+ if (ddi_name_to_major(compatible_nodev) == DDI_MAJOR_T_NONE)
+ SCSI_HBA_LOG((_LOG_NF(WARN), "no nodev '%s' driver, "
+ "system misconfigured", compatible_nodev));
+ }
+
+ /*
+ * Verify our special node name "probe" will not be used in other ways.
+ * Don't expect things to work if they are.
+ */
+ if (ddi_major_to_name(ddi_name_to_major("probe")))
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "driver already using special node name 'probe'"));
+
mutex_init(&scsi_log_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_init(&scsi_flag_nointr_mutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&scsi_flag_nointr_cv, NULL, CV_DRIVER, NULL);
mutex_init(&scsi_hba_log_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ /* initialize the asynchronous barrier deletion daemon */
+ mutex_init(&scsi_hba_barrier_mutex, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&scsi_hba_barrier_cv, NULL, CV_DRIVER, NULL);
+ (void) thread_create(NULL, 0,
+ (void (*)())scsi_hba_barrier_daemon, NULL,
+ 0, &p0, TS_RUN, minclsyspri);
+
+ /* initialize lun change ASC/ASCQ processing daemon (stage1 & stage2) */
+ mutex_init(&scsi_lunchg1_mutex, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&scsi_lunchg1_cv, NULL, CV_DRIVER, NULL);
+ (void) thread_create(NULL, 0,
+ (void (*)())scsi_lunchg1_daemon, NULL,
+ 0, &p0, TS_RUN, minclsyspri);
+ mutex_init(&scsi_lunchg2_mutex, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&scsi_lunchg2_cv, NULL, CV_DRIVER, NULL);
+ (void) thread_create(NULL, 0,
+ (void (*)())scsi_lunchg2_daemon, NULL,
+ 0, &p0, TS_RUN, minclsyspri);
}
int
@@ -451,8 +723,7 @@ scsi_hba_pkt_constructor(void *buf, void *arg, int kmflag)
*/
pktw->pcw_granular = tran->tran_dma_attr.dma_attr_granular;
- if (ddi_dma_alloc_handle(tran->tran_hba_dip,
- &tran->tran_dma_attr,
+ if (ddi_dma_alloc_handle(tran->tran_hba_dip, &tran->tran_dma_attr,
kmflag == KM_SLEEP ? SLEEP_FUNC: NULL_FUNC, NULL,
&pkt->pkt_handle) != DDI_SUCCESS) {
@@ -543,9 +814,9 @@ scsi_hba_init(struct modlinkage *modlp)
}
/*
- * Called by an HBA attach(9E) to allocate a scsi_hba_tran structure. An HBA
- * driver will then initialize the structure and then call
- * scsi_hba_attach_setup.
+ * Called by an HBA attach(9E) to allocate a scsi_hba_tran(9S) structure. An
+ * HBA driver will then initialize the structure and then call
+ * scsi_hba_attach_setup(9F).
*/
/*ARGSUSED*/
scsi_hba_tran_t *
@@ -565,6 +836,16 @@ scsi_hba_tran_alloc(
if (tran) {
tran->tran_interconnect_type = INTERCONNECT_PARALLEL;
+
+ /*
+ * HBA driver called scsi_hba_tran_alloc(), so tran structure
+ * is proper size and unused/newer fields are zero.
+ *
+ * NOTE: We use SCSA_HBA_SCSA_TA as an obtuse form of
+ * versioning to detect old HBA drivers that do not use
+ * scsi_hba_tran_alloc, and would present garbage data
+ * (instead of valid/zero data) for newer tran fields.
+ */
tran->tran_hba_flags |= SCSI_HBA_SCSA_TA;
}
@@ -613,222 +894,50 @@ scsi_tran_ext_free(
}
/*
- * Return the unit-address of an 'iport' node, or NULL for non-iport node.
- */
-char *
-scsi_hba_iport_unit_address(dev_info_t *self)
-{
- /*
- * NOTE: Since 'self' could be a SCSA iport node or a SCSA HBA node,
- * we can't use SCSA flavors: the flavor of a SCSA HBA node is not
- * established/owned by SCSA, it is established by the nexus that
- * created the SCSA HBA node (PCI) as a child.
- *
- * NOTE: If we want to support a node_name other than "iport" for
- * an iport node then we can add support for a "scsa-iport-node-name"
- * property on the SCSA HBA node. A SCSA HBA driver would set this
- * property on the SCSA HBA node prior to using the iport API.
- */
- if (strcmp(ddi_node_name(self), "iport") == 0)
- return (ddi_get_name_addr(self));
- else
- return (NULL);
-}
-
-/*
- * Define a SCSI initiator port (bus/channel) for an HBA card that needs to
- * support multiple SCSI ports, but only has a single HBA devinfo node. This
- * function should be called from the HBA's attach(9E) implementation (when
- * processing the HBA devinfo node attach) after the number of SCSI ports on
- * the card is known or DR handler once DR handler detects a new port added.
- * The function returns 0 on failure and 1 on success.
- *
- * The implementation will add the port value into the "scsi-iports" property
- * value maintained on the HBA node as. These properties are used by the generic
- * scsi bus_config implementation to dynamicaly enumerate the specified iport
- * children. The enumeration code will, on demand, create the appropriate
- * iport children with a "scsi-iport" unit address. This node will bind to the
- * same driver as the HBA node itself. This means that an HBA driver that
- * uses iports should expect probe(9E), attach(9E), and detach(9E) calls on
- * the iport children of the HBA. If configuration for all ports was already
- * done during HBA node attach, the driver should just return DDI_SUCCESS when
- * confronted with an iport node.
- *
- * A maximum of 32 iport ports are supported per HBA devinfo node.
- *
- * A NULL "port" can be used to indicate that the framework should enumerate
- * target children on the HBA node itself, in addition to enumerating target
- * children on any iport nodes declared. There are two reasons that an HBA may
- * wish to have target children enumerated on both the HBA node and iport
- * node(s):
- *
- * o If, in the past, HBA hardware had only a single physical port but now
- * supports multiple physical ports, the updated driver that supports
- * multiple physical ports may want to avoid /devices path upgrade issues
- * by enumerating the first physical port under the HBA instead of as a
- * iport.
- *
- * o Some hardware RAID HBA controllers (mlx, chs, etc) support multiple
- * SCSI physical ports configured so that various physical devices on
- * the physical ports are amalgamated into virtual devices on a virtual
- * port. Amalgamated physical devices no longer appear to the host OS
- * on the physical ports, but other non-amalgamated devices may still be
- * visible on the physical ports. These drivers use a model where the
- * physical ports are iport nodes and the HBA node is the virtual port to
- * the configured virtual devices.
- *
- */
-
-int
-scsi_hba_iport_register(dev_info_t *self, char *port)
-{
- unsigned int ports = 0;
- int rval, i;
- char **iports, **newiports;
-
- ASSERT(self);
- if (self == NULL)
- return (DDI_FAILURE);
-
- rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
- &ports);
-
- if (ports == SCSI_HBA_MAX_IPORTS) {
- ddi_prop_free(iports);
- return (DDI_FAILURE);
- }
-
- if (rval == DDI_PROP_SUCCESS) {
- for (i = 0; i < ports; i++) {
- if (strcmp(port, iports[i]) == 0) {
- ddi_prop_free(iports);
- return (DDI_SUCCESS);
- }
- }
- }
-
- newiports = kmem_alloc((sizeof (char *) * (ports + 1)), KM_SLEEP);
-
- for (i = 0; i < ports; i++) {
- newiports[i] = strdup(iports[i]);
- }
- newiports[ports] = strdup(port);
- ports++;
-
- rval = 1;
-
- if (ddi_prop_update_string_array(DDI_DEV_T_NONE, self,
- "scsi-iports", newiports, ports) != DDI_PROP_SUCCESS) {
- SCSI_HBA_LOG((_LOG(WARN), self, NULL,
- "Failed to establish scsi-iport %s", port));
- rval = DDI_FAILURE;
- } else {
- rval = DDI_SUCCESS;
- }
-
- /* If there is iport exist, free property */
- if (ports > 1)
- ddi_prop_free(iports);
- for (i = 0; i < ports; i++) {
- strfree(newiports[i]);
- }
- kmem_free(newiports, (sizeof (char *)) * ports);
-
- return (rval);
-}
-
-/*
- * Check if the HBA is with scsi-iport under it
+ * Obsolete: Called by an HBA to attach an instance of the driver
+ * Implement this older interface in terms of the new.
*/
+/*ARGSUSED*/
int
-scsi_hba_iport_exist(dev_info_t *self)
-{
- unsigned int ports = 0;
- char **iports;
- int rval;
-
- rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
- &ports);
-
- if (rval != DDI_PROP_SUCCESS)
- return (0);
-
- /* If there is now at least 1 iport, then iports is valid */
- if (ports > 0) {
- rval = 1;
- } else
- rval = 0;
- ddi_prop_free(iports);
-
- return (rval);
-}
-
-dev_info_t *
-scsi_hba_iport_find(dev_info_t *self, char *portnm)
+scsi_hba_attach(
+ dev_info_t *self,
+ ddi_dma_lim_t *hba_lim,
+ scsi_hba_tran_t *tran,
+ int flags,
+ void *hba_options)
{
- char *addr = NULL;
- char **iports;
- unsigned int num_iports = 0;
- int rval = DDI_FAILURE;
- int i = 0;
- dev_info_t *child = NULL;
-
- /* check to see if this is an HBA that defined scsi iports */
- rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
- &num_iports);
-
- if (rval != DDI_SUCCESS) {
- return (NULL);
- }
- ASSERT(num_iports > 0);
-
- /* check to see if this port was registered */
- for (i = 0; i < num_iports; i++) {
- if (strcmp(iports[i], portnm) == 0)
- break;
- }
-
- if (i == num_iports) {
- child = NULL;
- goto out;
- }
+ ddi_dma_attr_t hba_dma_attr;
- addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
- (void) sprintf(addr, "iport@%s", portnm);
- rval = ndi_devi_config_one(self, addr, &child, NDI_NO_EVENT);
- kmem_free(addr, SCSI_MAXNAMELEN);
+ bzero(&hba_dma_attr, sizeof (ddi_dma_attr_t));
+ hba_dma_attr.dma_attr_burstsizes = hba_lim->dlim_burstsizes;
+ hba_dma_attr.dma_attr_minxfer = hba_lim->dlim_minxfer;
- if (rval != DDI_SUCCESS) {
- child = NULL;
- }
-out:
- ddi_prop_free(iports);
- return (child);
+ return (scsi_hba_attach_setup(self, &hba_dma_attr, tran, flags));
}
/*
* Common nexus teardown code: used by both scsi_hba_detach() on SCSA HBA node
- * and iport_devctl_uninitchild() on a SCSA HBA iport node (and for failure
- * cleanup). Undo scsa_nexus_setup in reverse order.
+ * and iport_postdetach_tran_scsi_device() on a SCSA HBA iport node (and for
+ * failure cleanup). Undo scsa_nexus_setup in reverse order.
+ *
+ * NOTE: Since we are in the Solaris IO framework, we can depend on
+ * undocumented cleanup operations performed by other parts of the framework:
+ * like detach_node() calling ddi_prop_remove_all() and
+ * ddi_remove_minor_node(,NULL).
*/
static void
scsa_nexus_teardown(dev_info_t *self, scsi_hba_tran_t *tran)
{
- if ((self == NULL) || (tran == NULL))
- return;
/* Teardown FMA. */
if (tran->tran_hba_flags & SCSI_HBA_SCSA_FM) {
ddi_fm_fini(self);
- tran->tran_hba_flags &= SCSI_HBA_SCSA_FM;
+ tran->tran_hba_flags &= ~SCSI_HBA_SCSA_FM;
}
}
/*
* Common nexus setup code: used by both scsi_hba_attach_setup() on SCSA HBA
- * node and iport_devctl_initchild() on a SCSA HBA iport node.
+ * node and iport_preattach_tran_scsi_device() on a SCSA HBA iport node.
*
* This code makes no assumptions about tran use by scsi_device children.
*/
@@ -853,34 +962,35 @@ scsa_nexus_setup(dev_info_t *self, scsi_hba_tran_t *tran)
tran->tran_fm_capable = ddi_prop_get_int(DDI_DEV_T_ANY, self,
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
"fm-capable", scsi_fm_capable);
+
/*
* If an HBA is *not* doing its own fma support by calling
- * ddi_fm_init() prior to scsi_hba_attach_setup(), we provide a
- * minimal common SCSA implementation so that scsi_device children
- * can generate ereports via scsi_fm_ereport_post(). We use
- * ddi_fm_capable() to detect an HBA calling ddi_fm_init() prior to
- * scsi_hba_attach_setup().
+ * ddi_fm_init() prior to scsi_hba_attach_setup(), we provide a minimal
+ * common SCSA implementation so that scsi_device children can generate
+ * ereports via scsi_fm_ereport_post(). We use ddi_fm_capable() to
+ * detect an HBA calling ddi_fm_init() prior to scsi_hba_attach_setup().
*/
if (tran->tran_fm_capable &&
(ddi_fm_capable(self) == DDI_FM_NOT_CAPABLE)) {
/*
- * We are capable of something, pass our capabilities up
- * the tree, but use a local variable so our parent can't
- * limit our capabilities (we don't want our parent to
- * clear DDI_FM_EREPORT_CAPABLE).
+ * We are capable of something, pass our capabilities up the
+ * tree, but use a local variable so our parent can't limit
+ * our capabilities (we don't want our parent to clear
+ * DDI_FM_EREPORT_CAPABLE).
*
- * NOTE: iblock cookies are not important because scsi
- * HBAs always interrupt below LOCK_LEVEL.
+ * NOTE: iblock cookies are not important because scsi HBAs
+ * always interrupt below LOCK_LEVEL.
*/
capable = tran->tran_fm_capable;
ddi_fm_init(self, &capable, NULL);
/*
- * Set SCSI_HBA_SCSA_FM bit to mark us as usiung the
- * common minimal SCSA fm implementation - we called
- * ddi_fm_init(), so we are responsible for calling
- * ddi_fm_fini() in scsi_hba_detach().
- * NOTE: if ddi_fm_init fails in any reason, SKIP.
+ * Set SCSI_HBA_SCSA_FM bit to mark us as using the common
+ * minimal SCSA fm implementation - we called ddi_fm_init(),
+ * so we are responsible for calling ddi_fm_fini() in
+ * scsi_hba_detach().
+ *
+ * NOTE: if ddi_fm_init fails to establish handle, SKIP cleanup.
*/
if (DEVI(self)->devi_fmhdl)
tran->tran_hba_flags |= SCSI_HBA_SCSA_FM;
@@ -893,40 +1003,53 @@ scsa_nexus_setup(dev_info_t *self, scsi_hba_tran_t *tran)
INST2DEVCTL(ddi_get_instance(self)), DDI_NT_SCSI_NEXUS, 0) !=
DDI_SUCCESS))) {
SCSI_HBA_LOG((_LOG(WARN), self, NULL,
- "can't create devctl minor nodes"));
- scsa_nexus_teardown(self, tran);
- return (DDI_FAILURE);
+ "can't create :devctl minor node"));
+ goto fail;
}
return (DDI_SUCCESS);
+
+fail: scsa_nexus_teardown(self, tran);
+ return (DDI_FAILURE);
}
/*
- * Common tran teardown code: used by iport_devctl_uninitchild() on a SCSA HBA
- * iport node and (possibly) by scsi_hba_detach() on SCSA HBA node (and for
- * failure cleanup). Undo scsa_tran_setup in reverse order.
+ * Common tran teardown code: used by iport_postdetach_tran_scsi_device() on a
+ * SCSA HBA iport node and (possibly) by scsi_hba_detach() on SCSA HBA node
+ * (and for failure cleanup). Undo scsa_tran_setup in reverse order.
+ *
+ * NOTE: Since we are in the Solaris IO framework, we can depend on
+ * undocumented cleanup operations performed by other parts of the framework:
+ * like detach_node() calling ddi_prop_remove_all() and
+ * ddi_remove_minor_node(,NULL).
*/
-/*ARGSUSED*/
static void
scsa_tran_teardown(dev_info_t *self, scsi_hba_tran_t *tran)
{
- if (tran == NULL)
- return;
tran->tran_iport_dip = NULL;
- /*
- * In the future, if PHCI was registered in the SCSA
- * scsa_tran_teardown is able to unregiter PHCI
- */
+ /* Teardown pHCI registration */
+ if (tran->tran_hba_flags & SCSI_HBA_SCSA_PHCI) {
+ (void) mdi_phci_unregister(self, 0);
+ tran->tran_hba_flags &= ~SCSI_HBA_SCSA_PHCI;
+ }
}
+/*
+ * Common tran setup code: used by iport_preattach_tran_scsi_device() on a
+ * SCSA HBA iport node and (possibly) by scsi_hba_attach_setup() on SCSA HBA
+ * node.
+ */
static int
scsa_tran_setup(dev_info_t *self, scsi_hba_tran_t *tran)
{
int scsa_minor;
int id;
+ char *scsi_binding_set;
static const char *interconnect[] = INTERCONNECT_TYPE_ASCII;
+ SCSI_HBA_LOG((_LOG_TRACE, self, NULL, __func__));
+
/* If SCSA responsible for for minor nodes, create ":scsi" */
scsa_minor = (ddi_get_driver(self)->devo_cb_ops->cb_open ==
scsi_hba_open) ? 1 : 0;
@@ -934,8 +1057,7 @@ scsa_tran_setup(dev_info_t *self, scsi_hba_tran_t *tran)
INST2SCSI(ddi_get_instance(self)),
DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS)) {
SCSI_HBA_LOG((_LOG(WARN), self, NULL,
- "can't create scsi minor nodes"));
- scsa_nexus_teardown(self, tran);
+ "can't create :scsi minor node"));
goto fail;
}
@@ -943,36 +1065,34 @@ scsa_tran_setup(dev_info_t *self, scsi_hba_tran_t *tran)
* If the property does not already exist on self then see if we can
* pull it from further up the tree and define it on self. If the
* property does not exist above (including options.conf) then use the
- * default value specified (global variable).
+ * default value specified (global variable). We pull things down from
+ * above for faster "DDI_PROP_NOTPROM | DDI_PROP_DONTPASS" runtime
+ * access.
+ *
+ * Future: Should we avoid creating properties when value == global?
*/
-#define CONFIG_INT_PROP(s, p, dv) { \
- if ((ddi_prop_exists(DDI_DEV_T_ANY, s, \
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, p) == 0) && \
- (ndi_prop_update_int(DDI_DEV_T_NONE, s, p, \
- ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(s), \
- DDI_PROP_NOTPROM, p, dv)) != DDI_PROP_SUCCESS)) \
- SCSI_HBA_LOG((_LOG(WARN), NULL, s, \
- "can't create property '%s'", p)); \
+#define CONFIG_INT_PROP(s, p, dv) { \
+ if ((ddi_prop_exists(DDI_DEV_T_ANY, s, \
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, p) == 0) && \
+ (ndi_prop_update_int(DDI_DEV_T_NONE, s, p, \
+ ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(s), \
+ DDI_PROP_NOTPROM, p, dv)) != DDI_PROP_SUCCESS)) \
+ SCSI_HBA_LOG((_LOG(WARN), NULL, s, \
+ "can't create property '%s'", p)); \
}
- /*
- * Attach scsi configuration property parameters not already defined
- * via driver.conf to this instance of the HBA using global variable
- * value. Pulling things down from above us to use
- * "DDI_PROP_NOTPROM | DDI_PROP_DONTPASS" for faster access.
- */
+ /* Decorate with scsi configuration properties */
+ CONFIG_INT_PROP(self, "scsi-enumeration", scsi_enumeration);
CONFIG_INT_PROP(self, "scsi-options", scsi_options);
CONFIG_INT_PROP(self, "scsi-reset-delay", scsi_reset_delay);
- CONFIG_INT_PROP(self, "scsi-tag-age-limit", scsi_tag_age_limit);
CONFIG_INT_PROP(self, "scsi-watchdog-tick", scsi_watchdog_tick);
CONFIG_INT_PROP(self, "scsi-selection-timeout", scsi_selection_timeout);
+ CONFIG_INT_PROP(self, "scsi-tag-age-limit", scsi_tag_age_limit);
/*
- * cache the scsi-initiator-id as an property defined further up
- * the tree or defined by OBP on the HBA node so can use
- * "DDI_PROP_NOTPROM | DDI_PROP_DONTPASS" during enumeration.
- * We perform the same type of operation that an HBA driver would
- * use to obtain the 'initiator-id' capability.
+ * Pull down the scsi-initiator-id from further up the tree, or as
+ * defined by OBP. Place on node for faster access. NOTE: there is
+ * some confusion about what the name of the property should be.
*/
id = ddi_prop_get_int(DDI_DEV_T_ANY, self, 0, "initiator-id", -1);
if (id == -1)
@@ -981,7 +1101,10 @@ scsa_tran_setup(dev_info_t *self, scsi_hba_tran_t *tran)
if (id != -1)
CONFIG_INT_PROP(self, "scsi-initiator-id", id);
- /* Establish 'initiator-interconnect-type' */
+ /*
+ * If we are responsible for tran allocation, establish
+ * 'initiator-interconnect-type'.
+ */
if ((tran->tran_hba_flags & SCSI_HBA_SCSA_TA) &&
(tran->tran_interconnect_type > 0) &&
(tran->tran_interconnect_type < INTERCONNECT_MAX)) {
@@ -992,46 +1115,66 @@ scsa_tran_setup(dev_info_t *self, scsi_hba_tran_t *tran)
SCSI_HBA_LOG((_LOG(WARN), self, NULL,
"failed to establish "
"'initiator-interconnect-type'"));
- return (DDI_FAILURE);
+ goto fail;
}
}
- tran->tran_iport_dip = self;
/*
- * In the future SCSA v3, PHCI could be registered in the SCSA
- * here.
+ * The 'scsi-binding-set' property can be defined in driver.conf
+ * files of legacy drivers on an as-needed basis. If 'scsi-binding-set'
+ * is not driver.conf defined, and the HBA is not implementing its own
+ * private bus_config, we define scsi-binding-set to the default
+ * 'spi' legacy value.
+ *
+ * NOTE: This default 'spi' value will be deleted if an HBA driver
+ * ends up using the scsi_hba_tgtmap_create() enumeration services.
+ *
+ * NOTE: If we were ever to decide to derive 'scsi-binding-set' from
+ * the IEEE-1275 'device_type' property then this is where that code
+ * should go - there is not enough consistency in 'device_type' to do
+ * this correctly at this point in time.
*/
- return (DDI_SUCCESS);
-fail:
- scsa_tran_teardown(self, tran);
- return (DDI_FAILURE);
-}
-
-/*
- * Obsolete: Called by an HBA to attach an instance of the driver
- * Implement this older interface in terms of the new.
- */
-/*ARGSUSED*/
-int
-scsi_hba_attach(
- dev_info_t *self,
- ddi_dma_lim_t *hba_lim,
- scsi_hba_tran_t *tran,
- int flags,
- void *hba_options)
-{
- ddi_dma_attr_t hba_dma_attr;
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
+ &scsi_binding_set) == DDI_PROP_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), NULL, self,
+ "external 'scsi-binding-set' \"%s\"", scsi_binding_set));
+ ddi_prop_free(scsi_binding_set);
+ } else if (scsi_binding_set_spi &&
+ ((tran->tran_bus_config == NULL) ||
+ (tran->tran_bus_config == scsi_hba_bus_config_spi))) {
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, self,
+ "scsi-binding-set", scsi_binding_set_spi) !=
+ DDI_PROP_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
+ "failed to establish 'scsi_binding_set' default"));
+ goto fail;
+ }
+ SCSI_HBA_LOG((_LOG(2), NULL, self,
+ "default 'scsi-binding-set' \"%s\"", scsi_binding_set_spi));
+ } else
+ SCSI_HBA_LOG((_LOG(2), NULL, self,
+ "no 'scsi-binding-set'"));
- bzero(&hba_dma_attr, sizeof (ddi_dma_attr_t));
+ /*
+ * If SCSI_HBA_TRAN_PHCI is set, take care of pHCI registration of the
+ * initiator.
+ */
+ if ((tran->tran_hba_flags & SCSI_HBA_TRAN_PHCI) &&
+ (mdi_phci_register(MDI_HCI_CLASS_SCSI, self, 0) == MDI_SUCCESS))
+ tran->tran_hba_flags |= SCSI_HBA_SCSA_PHCI;
- hba_dma_attr.dma_attr_burstsizes = hba_lim->dlim_burstsizes;
- hba_dma_attr.dma_attr_minxfer = hba_lim->dlim_minxfer;
+ /* NOTE: tran_hba_dip is for DMA operation at the HBA node level */
+ tran->tran_iport_dip = self; /* for iport association */
+ return (DDI_SUCCESS);
- return (scsi_hba_attach_setup(self, &hba_dma_attr, tran, flags));
+fail: scsa_tran_teardown(self, tran);
+ return (DDI_FAILURE);
}
/*
- * Called by an HBA to attach an instance of the driver.
+ * Called by a SCSA HBA driver to attach an instance of the driver to
+ * SCSA HBA node enumerated by PCI.
*/
int
scsi_hba_attach_setup(
@@ -1040,78 +1183,100 @@ scsi_hba_attach_setup(
scsi_hba_tran_t *tran,
int flags)
{
+ int len;
+ char cache_name[96];
+
SCSI_HBA_LOG((_LOG_TRACE, self, NULL, __func__));
- /* Verify that we are a driver */
+ /*
+ * Verify that we are a driver so other code does not need to
+ * check for NULL ddi_get_driver() result.
+ */
if (ddi_get_driver(self) == NULL)
return (DDI_FAILURE);
+ /*
+ * Verify that we are called on a SCSA HBA node (function enumerated
+ * by PCI), not on an iport node.
+ */
ASSERT(scsi_hba_iport_unit_address(self) == NULL);
if (scsi_hba_iport_unit_address(self))
- return (DDI_FAILURE);
+ return (DDI_FAILURE); /* self can't be an iport */
+ /* Caller must provide the tran. */
ASSERT(tran);
if (tran == NULL)
return (DDI_FAILURE);
/*
* Verify correct scsi_hba_tran_t form:
- * o both or none of tran_get_name/tran_get_addr.
+ *
+ * o Both or none of tran_get_name/tran_get_addr.
+ * NOTE: Older SCSA HBA drivers for SCSI transports with addressing
+ * that did not fit the SPI "struct scsi_address" model were required
+ * to implement tran_get_name and tran_get_addr. This is no longer
+ * true - modern transport drivers should now use common SCSA
+ * enumeration services. The SCSA enumeration code will represent
+ * the unit-address using well-known address properties
+ * (SCSI_ADDR_PROP_TARGET_PORT, SCSI_ADDR_PROP_LUN64) during
+ * devinfo/pathinfo node creation. The HBA driver can obtain values
+ * using scsi_device_prop_lookup_*() from its tran_tgt_init(9E).
+ *
*/
- if ((tran->tran_get_name == NULL) ^
- (tran->tran_get_bus_addr == NULL)) {
+ if ((tran->tran_get_name == NULL) ^ (tran->tran_get_bus_addr == NULL)) {
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
+ "should support both or neither: "
+ "tran_get_name, tran_get_bus_addr"));
return (DDI_FAILURE);
}
/*
- * Save all the important HBA information that must be accessed
- * later by scsi_hba_bus_ctl(), and scsi_hba_map().
+ * Establish the devinfo context of this tran structure, preserving
+ * knowledge of how the tran was allocated.
*/
- tran->tran_hba_dip = self;
- tran->tran_hba_flags &= SCSI_HBA_SCSA_TA;
- tran->tran_hba_flags |= (flags & ~SCSI_HBA_SCSA_TA);
+ tran->tran_hba_dip = self; /* for DMA */
+ tran->tran_hba_flags = (flags & ~SCSI_HBA_SCSA_TA) |
+ (tran->tran_hba_flags & SCSI_HBA_SCSA_TA);
/* Establish flavor of transport (and ddi_get_driver_private()) */
ndi_flavorv_set(self, SCSA_FLAVOR_SCSI_DEVICE, tran);
/*
- * Note: we only need dma_attr_minxfer and dma_attr_burstsizes
- * from the DMA attributes. scsi_hba_attach(9f) only
- * guarantees that these two fields are initialized properly.
- * If this changes, be sure to revisit the implementation
- * of scsi_hba_attach(9F).
+ * Note: We only need dma_attr_minxfer and dma_attr_burstsizes
+ * from the DMA attributes. scsi_hba_attach(9f) only guarantees
+ * that these two fields are initialized properly. If this
+ * changes, be sure to revisit the implementation of
+ * scsi_hba_attach(9F).
*/
(void) memcpy(&tran->tran_dma_attr, hba_dma_attr,
sizeof (ddi_dma_attr_t));
- /* create kmem_cache, if needed */
+ /* Create tran_setup_pkt(9E) kmem_cache. */
if (tran->tran_setup_pkt) {
- char tmp[96];
- int hbalen;
- int cmdlen = 0;
- int statuslen = 0;
-
ASSERT(tran->tran_init_pkt == NULL);
ASSERT(tran->tran_destroy_pkt == NULL);
+ if (tran->tran_init_pkt || tran->tran_destroy_pkt)
+ goto fail;
tran->tran_init_pkt = scsi_init_cache_pkt;
tran->tran_destroy_pkt = scsi_free_cache_pkt;
tran->tran_sync_pkt = scsi_sync_cache_pkt;
tran->tran_dmafree = scsi_cache_dmafree;
- hbalen = ROUNDUP(tran->tran_hba_len);
- if (flags & SCSI_HBA_TRAN_CDB)
- cmdlen = ROUNDUP(DEFAULT_CDBLEN);
- if (flags & SCSI_HBA_TRAN_SCB)
- statuslen = ROUNDUP(DEFAULT_SCBLEN);
+ len = sizeof (struct scsi_pkt_cache_wrapper);
+ len += ROUNDUP(tran->tran_hba_len);
+ if (tran->tran_hba_flags & SCSI_HBA_TRAN_CDB)
+ len += ROUNDUP(DEFAULT_CDBLEN);
+ if (tran->tran_hba_flags & SCSI_HBA_TRAN_SCB)
+ len += ROUNDUP(DEFAULT_SCBLEN);
+
+ (void) snprintf(cache_name, sizeof (cache_name),
+ "pkt_cache_%s_%d", ddi_driver_name(self),
+ ddi_get_instance(self));
- (void) snprintf(tmp, sizeof (tmp), "pkt_cache_%s_%d",
- ddi_driver_name(self), ddi_get_instance(self));
- tran->tran_pkt_cache_ptr = kmem_cache_create(tmp,
- sizeof (struct scsi_pkt_cache_wrapper) +
- hbalen + cmdlen + statuslen, 8,
- scsi_hba_pkt_constructor, scsi_hba_pkt_destructor,
- NULL, tran, NULL, 0);
+ tran->tran_pkt_cache_ptr = kmem_cache_create(
+ cache_name, len, 8, scsi_hba_pkt_constructor,
+ scsi_hba_pkt_destructor, NULL, tran, NULL, 0);
}
/* Perform node setup independent of initiator role */
@@ -1144,25 +1309,24 @@ scsi_hba_attach_setup(
return (DDI_SUCCESS);
-fail:
- (void) scsi_hba_detach(self);
+fail: (void) scsi_hba_detach(self);
return (DDI_FAILURE);
}
/*
- * Called by an HBA to detach an instance of the driver
+ * Called by an HBA to detach an instance of the driver. This may be called
+ * for SCSA HBA nodes and for SCSA iport nodes.
*/
int
scsi_hba_detach(dev_info_t *self)
{
- scsi_hba_tran_t *tran;
-
- SCSI_HBA_LOG((_LOG_TRACE, self, NULL, __func__));
+ scsi_hba_tran_t *tran;
ASSERT(scsi_hba_iport_unit_address(self) == NULL);
if (scsi_hba_iport_unit_address(self))
- return (DDI_FAILURE);
+ return (DDI_FAILURE); /* self can't be an iport */
+ /* Check all error return conditions upfront */
tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
ASSERT(tran);
if (tran == NULL)
@@ -1176,26 +1340,23 @@ scsi_hba_detach(dev_info_t *self)
scsa_tran_teardown(self, tran);
scsa_nexus_teardown(self, tran);
- /*
- * XXX - scsi_transport.h states that these data fields should not be
- * referenced by the HBA. However, to be consistent with
- * scsi_hba_attach(), they are being reset.
- */
- tran->tran_hba_dip = (dev_info_t *)NULL;
- tran->tran_hba_flags = 0;
- (void) memset(&tran->tran_dma_attr, 0, sizeof (ddi_dma_attr_t));
-
- if (tran->tran_pkt_cache_ptr != NULL) {
+ /* Teardown tran_setup_pkt(9E) kmem_cache. */
+ if (tran->tran_pkt_cache_ptr) {
kmem_cache_destroy(tran->tran_pkt_cache_ptr);
tran->tran_pkt_cache_ptr = NULL;
}
+ (void) memset(&tran->tran_dma_attr, 0, sizeof (ddi_dma_attr_t));
+
/* Teardown flavor of transport (and ddi_get_driver_private()) */
ndi_flavorv_set(self, SCSA_FLAVOR_SCSI_DEVICE, NULL);
+ tran->tran_hba_dip = NULL;
+
return (DDI_SUCCESS);
}
+
/*
* Called by an HBA from _fini()
*/
@@ -1221,11 +1382,8 @@ scsi_hba_fini(struct modlinkage *modlp)
/*
* SAS specific functions
*/
-/*ARGSUSED*/
sas_hba_tran_t *
-sas_hba_tran_alloc(
- dev_info_t *self,
- int flags)
+sas_hba_tran_alloc(dev_info_t *self)
{
/* allocate SCSA flavors for self */
ndi_flavorv_alloc(self, SCSA_NFLAVORS);
@@ -1233,8 +1391,7 @@ sas_hba_tran_alloc(
}
void
-sas_hba_tran_free(
- sas_hba_tran_t *tran)
+sas_hba_tran_free(sas_hba_tran_t *tran)
{
kmem_free(tran, sizeof (sas_hba_tran_t));
}
@@ -1246,7 +1403,8 @@ sas_hba_attach_setup(
{
ASSERT(scsi_hba_iport_unit_address(self) == NULL);
if (scsi_hba_iport_unit_address(self))
- return (DDI_FAILURE);
+ return (DDI_FAILURE); /* self can't be an iport */
+
/*
* The owner of the this devinfo_t was responsible
* for informing the framework already about
@@ -1261,7 +1419,7 @@ sas_hba_detach(dev_info_t *self)
{
ASSERT(scsi_hba_iport_unit_address(self) == NULL);
if (scsi_hba_iport_unit_address(self))
- return (DDI_FAILURE);
+ return (DDI_FAILURE); /* self can't be an iport */
ndi_flavorv_set(self, SCSA_FLAVOR_SMP, NULL);
return (DDI_SUCCESS);
@@ -1270,102 +1428,283 @@ sas_hba_detach(dev_info_t *self)
/*
* SMP child flavored functions
*/
+static int
+smp_busctl_ua(dev_info_t *child, char *addr, int maxlen)
+{
+ char *tport;
+ char *wwn;
+
+ /* limit ndi_devi_findchild_by_callback to expected flavor */
+ if (ndi_flavor_get(child) != SCSA_FLAVOR_SMP)
+ return (DDI_FAILURE);
+
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SCSI_ADDR_PROP_TARGET_PORT, &tport) == DDI_SUCCESS) {
+ (void) snprintf(addr, maxlen, "%s", tport);
+ ddi_prop_free(tport);
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * NOTE: the following code should be deleted when mpt is changed to
+ * use SCSI_ADDR_PROP_TARGET_PORT instead of SMP_WWN.
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SMP_WWN, &wwn) == DDI_SUCCESS) {
+ (void) snprintf(addr, maxlen, "w%s", wwn);
+ ddi_prop_free(wwn);
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
+}
static int
smp_busctl_reportdev(dev_info_t *child)
{
- dev_info_t *self = ddi_get_parent(child);
- char *smp_wwn;
+ dev_info_t *self = ddi_get_parent(child);
+ char *tport;
+ char *wwn;
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
- SMP_WWN, &smp_wwn) != DDI_SUCCESS) {
- return (DDI_FAILURE);
+ SCSI_ADDR_PROP_TARGET_PORT, &tport) == DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: target-port %s",
+ ddi_driver_name(child), ddi_get_instance(child),
+ ddi_driver_name(self), ddi_get_instance(self), tport));
+ ddi_prop_free(tport);
+ return (DDI_SUCCESS);
}
- SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: wwn %s\n",
- ddi_driver_name(child), ddi_get_instance(child),
- ddi_driver_name(self), ddi_get_instance(self), smp_wwn));
- ddi_prop_free(smp_wwn);
- return (DDI_SUCCESS);
+
+ /*
+ * NOTE: the following code should be deleted when mpt is changed to
+ * use SCSI_ADDR_PROP_TARGET_PORT instead of SMP_WWN.
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SMP_WWN, &wwn) == DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: wwn %s",
+ ddi_driver_name(child), ddi_get_instance(child),
+ ddi_driver_name(self), ddi_get_instance(self), wwn));
+ ddi_prop_free(wwn);
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
}
static int
smp_busctl_initchild(dev_info_t *child)
{
dev_info_t *self = ddi_get_parent(child);
- sas_hba_tran_t *tran = ndi_flavorv_get(self, SCSA_FLAVOR_SMP);
- struct smp_device *smp;
+ sas_hba_tran_t *tran;
+ dev_info_t *dup;
char addr[SCSI_MAXNAMELEN];
- dev_info_t *ndip;
- char *smp_wwn = NULL;
+ struct smp_device *smp;
uint64_t wwn;
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SMP);
ASSERT(tran);
if (tran == NULL)
return (DDI_FAILURE);
- smp = kmem_zalloc(sizeof (struct smp_device), KM_SLEEP);
- smp->dip = child;
- smp->smp_addr.a_hba_tran = tran;
+ if (smp_busctl_ua(child, addr, sizeof (addr)) != DDI_SUCCESS)
+ return (DDI_NOT_WELL_FORMED);
+ if (scsi_wwnstr_to_wwn(addr, &wwn))
+ return (DDI_NOT_WELL_FORMED);
- if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
- SMP_WWN, &smp_wwn) != DDI_SUCCESS) {
- goto failure;
+ /* Prevent duplicate nodes. */
+ dup = ndi_devi_findchild_by_callback(self, ddi_node_name(child), addr,
+ smp_busctl_ua);
+ if (dup) {
+ ASSERT(ndi_flavor_get(dup) == SCSA_FLAVOR_SMP);
+ if (ndi_flavor_get(dup) != SCSA_FLAVOR_SMP) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child,
+ "init failed: %s@%s: not SMP flavored",
+ ddi_node_name(child), addr));
+ return (DDI_FAILURE);
+ }
+ if (dup != child) {
+ SCSI_HBA_LOG((_LOG(4), NULL, child,
+ "init failed: %s@%s: detected duplicate %p",
+ ddi_node_name(child), addr, (void *)dup));
+ return (DDI_FAILURE);
+ }
}
- if (scsi_wwnstr_to_wwn(smp_wwn, &wwn)) {
- goto failure;
- }
- bcopy(&wwn, smp->smp_addr.a_wwn, SAS_WWN_BYTE_SIZE);
- (void) snprintf(addr, SCSI_MAXNAMELEN, "w%s", smp_wwn);
+ /* set the node @addr string */
+ ddi_set_name_addr(child, addr);
- /* Prevent duplicate nodes. */
- ndip = ndi_devi_find(self, ddi_node_name(child), addr);
- if (ndip && (ndip != child)) {
- goto failure;
- }
+ /* Allocate and initialize smp_device. */
+ smp = kmem_zalloc(sizeof (struct smp_device), KM_SLEEP);
+ smp->dip = child;
+ smp->smp_addr.a_hba_tran = tran;
+ bcopy(&wwn, smp->smp_addr.a_wwn, SAS_WWN_BYTE_SIZE);
- ddi_set_name_addr(child, addr);
ddi_set_driver_private(child, smp);
- ddi_prop_free(smp_wwn);
- return (DDI_SUCCESS);
-failure:
- kmem_free(smp, sizeof (struct smp_device));
- if (smp_wwn) {
- ddi_prop_free(smp_wwn);
+ if (tran->tran_smp_init &&
+ ((*tran->tran_smp_init)(self, child, tran, smp) != DDI_SUCCESS)) {
+ kmem_free(smp, sizeof (struct smp_device));
+ if (ndi_dev_is_hotplug_node(child))
+ SCSI_HBA_LOG((_LOG(WARN), NULL, child,
+ "enumeration failed during tran_smp_init"));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "enumeration failed during tran_smp_init"));
+ ddi_set_driver_private(child, NULL);
+ ddi_set_name_addr(child, NULL);
+ return (DDI_FAILURE);
}
- return (DDI_FAILURE);
+
+ return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
smp_busctl_uninitchild(dev_info_t *child)
{
+ dev_info_t *self = ddi_get_parent(child);
struct smp_device *smp = ddi_get_driver_private(child);
+ sas_hba_tran_t *tran;
- ASSERT(smp);
- if (smp == NULL)
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SMP);
+ ASSERT(smp && tran);
+ if ((smp == NULL) || (tran == NULL))
return (DDI_FAILURE);
+
+ if (tran->tran_smp_free) {
+ (*tran->tran_smp_free) (self, child, tran, smp);
+ }
kmem_free(smp, sizeof (*smp));
ddi_set_driver_private(child, NULL);
ddi_set_name_addr(child, NULL);
return (DDI_SUCCESS);
}
+/* Find an "smp" child at the specified address. */
+static dev_info_t *
+smp_hba_find_child(dev_info_t *self, char *addr)
+{
+ dev_info_t *child;
+
+ /* Search "smp" devinfo child at specified address. */
+ ASSERT(self && DEVI_BUSY_OWNED(self) && addr);
+ for (child = ddi_get_child(self); child;
+ child = ddi_get_next_sibling(child)) {
+ /* skip non-"smp" nodes */
+ if (ndi_flavor_get(child) != SCSA_FLAVOR_SMP)
+ continue;
+
+ /* Attempt initchild to establish unit-address */
+ if (i_ddi_node_state(child) < DS_INITIALIZED)
+ (void) ddi_initchild(self, child);
+
+ /* Verify state and non-NULL unit-address. */
+ if ((i_ddi_node_state(child) < DS_INITIALIZED) ||
+ (ddi_get_name_addr(child) == NULL))
+ continue;
+
+ /* Return "smp" child if unit-address matches. */
+ if (strcmp(ddi_get_name_addr(child), addr) == 0)
+ return (child);
+ }
+ return (NULL);
+}
+
+/*
+ * Search for "smp" child of self at the specified address. If found, online
+ * and return with a hold. Unlike general SCSI configuration, we can assume
+ * the the device is actually there when we are called (i.e., device is
+ * created by hotplug, not by bus_config).
+ */
+int
+smp_hba_bus_config(dev_info_t *self, char *addr, dev_info_t **childp)
+{
+ dev_info_t *child;
+ int circ;
+
+ ASSERT(self && addr && childp);
+ *childp = NULL;
+
+ /* Search for "smp" child. */
+ scsi_hba_devi_enter(self, &circ);
+ if ((child = smp_hba_find_child(self, addr)) == NULL) {
+ scsi_hba_devi_exit(self, circ);
+ return (NDI_FAILURE);
+ }
+
+ /* Attempt online. */
+ if (ndi_devi_online(child, 0) != NDI_SUCCESS) {
+ scsi_hba_devi_exit(self, circ);
+ return (NDI_FAILURE);
+ }
+
+ /* On success, return with active hold. */
+ ndi_hold_devi(child);
+ scsi_hba_devi_exit(self, circ);
+ *childp = child;
+ return (NDI_SUCCESS);
+}
+
+
+
+/* Create "smp" child devinfo node at specified unit-address. */
+int
+smp_hba_bus_config_taddr(dev_info_t *self, char *addr)
+{
+ dev_info_t *child;
+ int circ;
+
+ /* Search for "smp" child. */
+ scsi_hba_devi_enter(self, &circ);
+ child = smp_hba_find_child(self, addr);
+ if (child) {
+ /* Child exists, note if this was a new reinsert. */
+ if (ndi_devi_device_insert(child))
+ SCSI_HBA_LOG((_LOGCFG, self, NULL,
+ "devinfo smp@%s device_reinsert", addr));
+
+ scsi_hba_devi_exit(self, circ);
+ return (NDI_SUCCESS);
+ }
+
+ /* Allocate "smp" child devinfo node and establish flavor of child. */
+ ndi_devi_alloc_sleep(self, "smp", DEVI_SID_HP_NODEID, &child);
+ ndi_flavor_set(child, SCSA_FLAVOR_SMP);
+
+ /* Add unit-address property to child. */
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, child,
+ SCSI_ADDR_PROP_TARGET_PORT, addr) != DDI_PROP_SUCCESS) {
+ (void) ndi_devi_free(child);
+ scsi_hba_devi_exit(self, circ);
+ return (NDI_FAILURE);
+ }
+
+ /* Attempt to online the new "smp" node. */
+ (void) ndi_devi_online(child, 0);
+
+ scsi_hba_devi_exit(self, circ);
+ return (NDI_SUCCESS);
+}
+
/*
- * Wrapper to scsi_get_name which takes a devinfo argument instead of a
+ * Wrapper to scsi_ua_get which takes a devinfo argument instead of a
* scsi_device structure.
*/
static int
-scsi_hba_name_child(dev_info_t *child, char *addr, int maxlen)
+scsi_busctl_ua(dev_info_t *child, char *addr, int maxlen)
{
- struct scsi_device *sd = ddi_get_driver_private(child);
+ struct scsi_device *sd;
+
+ /* limit ndi_devi_findchild_by_callback to expected flavor */
+ if (ndi_flavor_get(child) != SCSA_FLAVOR_SCSI_DEVICE)
+ return (DDI_FAILURE);
/* nodes are named by tran_get_name or default "tgt,lun" */
- if (sd && (scsi_get_name(sd, addr, maxlen) == 1))
+ sd = ddi_get_driver_private(child);
+ if (sd && (scsi_ua_get(sd, addr, maxlen) == 1))
return (DDI_SUCCESS);
return (DDI_FAILURE);
@@ -1375,20 +1714,21 @@ static int
scsi_busctl_reportdev(dev_info_t *child)
{
dev_info_t *self = ddi_get_parent(child);
- scsi_hba_tran_t *tran = ddi_get_driver_private(self);
struct scsi_device *sd = ddi_get_driver_private(child);
+ scsi_hba_tran_t *tran;
char ua[SCSI_MAXNAMELEN];
- char ba[SCSI_MAXNAMELEN];
+ char ra[SCSI_MAXNAMELEN];
SCSI_HBA_LOG((_LOG_TRACE, NULL, child, __func__));
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
ASSERT(tran && sd);
if ((tran == NULL) || (sd == NULL))
return (DDI_FAILURE);
/* get the unit_address and bus_addr information */
- if ((scsi_get_name(sd, ua, sizeof (ua)) == 0) ||
- (scsi_get_bus_addr(sd, ba, sizeof (ba)) == 0)) {
+ if ((scsi_ua_get(sd, ua, sizeof (ua)) == 0) ||
+ (scsi_ua_get_reportdev(sd, ra, sizeof (ra)) == 0)) {
SCSI_HBA_LOG((_LOG(WARN), NULL, child, "REPORTDEV failure"));
return (DDI_FAILURE);
}
@@ -1396,27 +1736,35 @@ scsi_busctl_reportdev(dev_info_t *child)
if (tran->tran_get_name == NULL)
SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: %s",
ddi_driver_name(child), ddi_get_instance(child),
- ddi_driver_name(self), ddi_get_instance(self), ba));
- else
+ ddi_driver_name(self), ddi_get_instance(self), ra));
+ else if (*ra)
SCSI_HBA_LOG((_LOG_NF(CONT),
"?%s%d at %s%d: unit-address %s: %s",
ddi_driver_name(child), ddi_get_instance(child),
- ddi_driver_name(self), ddi_get_instance(self), ua, ba));
+ ddi_driver_name(self), ddi_get_instance(self), ua, ra));
+ else
+ SCSI_HBA_LOG((_LOG_NF(CONT),
+ "?%s%d at %s%d: unit-address %s",
+ ddi_driver_name(child), ddi_get_instance(child),
+ ddi_driver_name(self), ddi_get_instance(self), ua));
+
return (DDI_SUCCESS);
}
+
/*
* scsi_busctl_initchild is called to initialize the SCSA transport for
* communication with a particular child scsi target device. Successful
* initialization requires properties on the node which describe the address
* of the target device. If the address of the target device can't be
* determined from properties then DDI_NOT_WELL_FORMED is returned. Nodes that
- * are DDI_NOT_WELL_FORMED are considered an implementation artifact.
+ * are DDI_NOT_WELL_FORMED are considered an implementation artifact and
+ * are hidden from devinfo snapshots by calling ndi_devi_set_hidden().
* The child may be one of the following types of devinfo nodes:
*
* OBP node:
* OBP does not enumerate target devices attached a SCSI bus. These
- * template/stub/wildcard nodes are a legacy artifact for support of old
+ * template/stub/wild-card nodes are a legacy artifact for support of old
* driver loading methods. Since they have no properties,
* DDI_NOT_WELL_FORMED will be returned.
*
@@ -1437,8 +1785,16 @@ scsi_busctl_reportdev(dev_info_t *child)
*
* For HBA drivers that implement the deprecated tran_get_name interface,
* "@addr" construction involves having that driver interpret properties via
- * scsi_hba_name_child -> scsi_get_name -> tran_get_name: there is no
- * requiremnt for the property names to be well-known.
+ * scsi_busctl_ua -> scsi_ua_get -> tran_get_name: there is no
+ * requirement for the property names to be well-known.
+ *
+ * NOTE: We don't currently support "merge". When this support is added a
+ * specific property, like "unit-address", should *always* identify a
+ * driver.conf node that needs to be merged into a specific SID node. When
+ * enumeration is enabled, a .conf node without the "unit-address" property
+ * should be ignored. The best way to establish the "unit-address" property
+ * would be to have the system assign parent= and unit-address= from an
+ * instance=# driver.conf entry (by using the instance tree).
*/
static int
scsi_busctl_initchild(dev_info_t *child)
@@ -1448,9 +1804,10 @@ scsi_busctl_initchild(dev_info_t *child)
scsi_hba_tran_t *tran;
struct scsi_device *sd;
scsi_hba_tran_t *tran_clone;
- int tgt = 0;
- int lun = 0;
- int sfunc = 0;
+ char *class;
+ int tgt;
+ int lun;
+ int sfunc;
int err = DDI_FAILURE;
char addr[SCSI_MAXNAMELEN];
@@ -1463,19 +1820,19 @@ scsi_busctl_initchild(dev_info_t *child)
* and have the load of fcp (which does scsi_hba_attach_setup)
* to fail. In this case we may get here with a NULL hba.
*/
- tran = ddi_get_driver_private(self);
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
if (tran == NULL)
return (DDI_NOT_WELL_FORMED);
/*
- * OBP may create template/stub/wildcard nodes for legacy driver
+ * OBP may create template/stub/wild-card nodes for legacy driver
* loading methods. These nodes have no properties, so we lack the
* addressing properties to initchild them. Hide the node and return
* DDI_NOT_WELL_FORMED.
*
- * XXX need ndi_devi_has_properties(dip) type interface?
+ * Future: define/use a ndi_devi_has_properties(dip) type interface.
*
- * XXX It would be nice if we could delete these ill formed nodes by
+ * NOTE: It would be nice if we could delete these ill formed nodes by
* implementing a DDI_NOT_WELL_FORMED_DELETE return code. This can't
* be done until leadville debug code removes its dependencies
* on the devinfo still being present after a failed ndi_devi_online.
@@ -1485,14 +1842,11 @@ scsi_busctl_initchild(dev_info_t *child)
(DEVI(child)->devi_sys_prop_ptr == NULL)) {
SCSI_HBA_LOG((_LOG(4), NULL, child,
"init failed: no properties"));
+ ndi_devi_set_hidden(child);
return (DDI_NOT_WELL_FORMED);
}
/* get legacy SPI addressing properties */
- sfunc = ddi_prop_get_int(DDI_DEV_T_ANY, child,
- DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, SCSI_ADDR_PROP_SFUNC, -1);
- lun = ddi_prop_get_int(DDI_DEV_T_ANY, child,
- DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
if ((tgt = ddi_prop_get_int(DDI_DEV_T_ANY, child,
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
SCSI_ADDR_PROP_TARGET, -1)) == -1) {
@@ -1502,14 +1856,20 @@ scsi_busctl_initchild(dev_info_t *child)
* even if it is just a dummy that does not contain the real
* target address. However drivers that register devids may
* create stub driver.conf nodes without a target= property so
- * that pathological devid resolution works.
+ * that pathological devid resolution works. Hide the stub
+ * node and return DDI_NOT_WELL_FORMED.
*/
- if (ndi_dev_is_persistent_node(child) == 0) {
+ if (!scsi_hba_dev_is_sid(child)) {
SCSI_HBA_LOG((_LOG(4), NULL, child,
"init failed: stub .conf node"));
+ ndi_devi_set_hidden(child);
return (DDI_NOT_WELL_FORMED);
}
}
+ lun = ddi_prop_get_int(DDI_DEV_T_ANY, child,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
+ sfunc = ddi_prop_get_int(DDI_DEV_T_ANY, child,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, SCSI_ADDR_PROP_SFUNC, -1);
/*
* The scsi_address structure may not specify all the addressing
@@ -1521,12 +1881,13 @@ scsi_busctl_initchild(dev_info_t *child)
((tgt >= USHRT_MAX) || (lun >= 256))) {
SCSI_HBA_LOG((_LOG(1), NULL, child,
"init failed: illegal/missing properties"));
+ ndi_devi_set_hidden(child);
return (DDI_NOT_WELL_FORMED);
}
/*
* We need to initialize a fair amount of our environment to invoke
- * tran_get_name (via scsi_hba_name_child and scsi_get_name) to
+ * tran_get_name (via scsi_busctl_ua and scsi_ua_get) to
* produce the "@addr" name from addressing properties. Allocate and
* initialize scsi device structure.
*/
@@ -1534,6 +1895,7 @@ scsi_busctl_initchild(dev_info_t *child)
mutex_init(&sd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
sd->sd_dev = child;
sd->sd_pathinfo = NULL;
+ sd->sd_uninit_prevent = 0;
ddi_set_driver_private(child, sd);
if (tran->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
@@ -1541,7 +1903,7 @@ scsi_busctl_initchild(dev_info_t *child)
* For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
* scsi_device in the scsi_address structure. This allows an
* HBA driver to find its per-scsi_device private data
- * (accessable to the HBA given just the scsi_address by using
+ * (accessible to the HBA given just the scsi_address by using
* scsi_address_device(9F)/scsi_device_hba_private_get(9F)).
*/
sd->sd_address.a.a_sd = sd;
@@ -1562,10 +1924,9 @@ scsi_busctl_initchild(dev_info_t *child)
sd->sd_address.a_sublun = (uchar_t)sfunc + 1;
/*
- * XXX TODO: apply target/lun limitation logic for SPI
- * binding_set. If spi this is based on scsi_options WIDE
- * NLUNS some forms of lun limitation are based on the
- * device @lun 0
+ * NOTE: Don't limit LUNs to scsi_options value because a
+ * scsi_device discovered via SPI dynamic enumeration might
+ * still support SCMD_REPORT_LUNS.
*/
/*
@@ -1602,9 +1963,25 @@ scsi_busctl_initchild(dev_info_t *child)
*/
sd->sd_tran_safe = tran;
+ /*
+ * If the class property is not already established, set it to "scsi".
+ * This is done so that parent= driver.conf nodes have class.
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "class",
+ &class) == DDI_PROP_SUCCESS) {
+ ddi_prop_free(class);
+ } else if (ndi_prop_update_string(DDI_DEV_T_NONE, child,
+ "class", "scsi") != DDI_PROP_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child, "init failed: class"));
+ ndi_devi_set_hidden(child);
+ err = DDI_NOT_WELL_FORMED;
+ goto failure;
+ }
+
/* Establish the @addr name of the child. */
*addr = '\0';
- if (scsi_hba_name_child(child, addr, SCSI_MAXNAMELEN) != DDI_SUCCESS) {
+ if (scsi_busctl_ua(child, addr, sizeof (addr)) != DDI_SUCCESS) {
/*
* Some driver.conf files add bogus target properties (relative
* to their nexus representation of target) to their stub
@@ -1612,6 +1989,7 @@ scsi_busctl_initchild(dev_info_t *child)
*/
SCSI_HBA_LOG((_LOG(3), NULL, child,
"init failed: scsi_busctl_ua call"));
+ ndi_devi_set_hidden(child);
err = DDI_NOT_WELL_FORMED;
goto failure;
}
@@ -1622,24 +2000,40 @@ scsi_busctl_initchild(dev_info_t *child)
goto failure;
}
+ /* Prevent duplicate nodes. */
+ dup = ndi_devi_findchild_by_callback(self, ddi_node_name(child), addr,
+ scsi_busctl_ua);
+ if (dup) {
+ ASSERT(ndi_flavor_get(dup) == SCSA_FLAVOR_SCSI_DEVICE);
+ if (ndi_flavor_get(dup) != SCSA_FLAVOR_SCSI_DEVICE) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child,
+ "init failed: %s@%s: not SCSI_DEVICE flavored",
+ ddi_node_name(child), addr));
+ goto failure;
+ }
+ if (dup != child) {
+ SCSI_HBA_LOG((_LOG(4), NULL, child,
+ "init failed: %s@%s: detected duplicate %p",
+ ddi_node_name(child), addr, (void *)dup));
+ goto failure;
+ }
+ }
+
/* set the node @addr string */
ddi_set_name_addr(child, addr);
- /* prevent sibling duplicates */
- dup = ndi_devi_find(self, ddi_node_name(child), addr);
- if (dup && (dup != child)) {
- SCSI_HBA_LOG((_LOG(4), NULL, child,
- "init failed: detected duplicate %p", (void *)dup));
- goto failure;
- }
-
/* call HBA's target init entry point if it exists */
if (tran->tran_tgt_init != NULL) {
SCSI_HBA_LOG((_LOG(4), NULL, child, "init tran_tgt_init"));
+
if ((*tran->tran_tgt_init)
(self, child, tran, sd) != DDI_SUCCESS) {
- SCSI_HBA_LOG((_LOG(2), NULL, child,
- "init failed: tran_tgt_init failed"));
+ if (ndi_dev_is_hotplug_node(child))
+ SCSI_HBA_LOG((_LOG(WARN), NULL, child,
+ "enumeration failed during tran_tgt_init"));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "enumeration failed during tran_tgt_init"));
goto failure;
}
}
@@ -1662,16 +2056,35 @@ static int
scsi_busctl_uninitchild(dev_info_t *child)
{
dev_info_t *self = ddi_get_parent(child);
- scsi_hba_tran_t *tran = ddi_get_driver_private(self);
struct scsi_device *sd = ddi_get_driver_private(child);
+ scsi_hba_tran_t *tran;
scsi_hba_tran_t *tran_clone;
ASSERT(DEVI_BUSY_OWNED(self));
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
ASSERT(tran && sd);
if ((tran == NULL) || (sd == NULL))
return (DDI_FAILURE);
+ /*
+ * We use sd_uninit_prevent to avoid uninitializing barrier/probe
+ * nodes that are still in use. Since barrier/probe nodes are not
+ * attached we can't prevent their state demotion via ndi_hold_devi.
+ */
+ if (sd->sd_uninit_prevent) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child, "uninit prevented"));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Don't uninitialize a client node if it still has paths.
+ */
+ if (MDI_CLIENT(child) && mdi_client_get_path_count(child)) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "uninit prevented, client has paths"));
+ return (DDI_FAILURE);
+ }
SCSI_HBA_LOG((_LOG(3), NULL, child, "uninit begin"));
@@ -1703,19 +2116,36 @@ scsi_busctl_uninitchild(dev_info_t *child)
/*
* To simplify host adapter drivers we guarantee that multiple
* tran_tgt_init(9E) calls of the same unit address are never
- * active at the same time.
+ * active at the same time. This requires that we call
+ * tran_tgt_free on probe/barrier nodes directly prior to
+ * uninitchild.
+ *
+ * NOTE: To correctly support SCSI_HBA_TRAN_CLONE, we must use
+ * the (possibly cloned) hba_tran pointer from the scsi_device
+ * instead of hba_tran.
*/
- if (tran->tran_tgt_free)
- (*tran->tran_tgt_free) (self, child, tran, sd);
+ if (tran->tran_tgt_free) {
+ if (!scsi_hba_devi_is_barrier(child)) {
+ SCSI_HBA_LOG((_LOG(4), NULL, child,
+ "uninit tran_tgt_free"));
+
+ (*tran->tran_tgt_free) (self, child, tran, sd);
+ } else {
+ SCSI_HBA_LOG((_LOG(4), NULL, child,
+ "uninit tran_tgt_free already done"));
+ }
+ }
/*
* If a inquiry data is still allocated (by scsi_probe()) we
* free the allocation here. This keeps scsi_inq valid for the
* same duration as the corresponding inquiry properties. It
* also allows a tran_tgt_init() implementation that establishes
- * sd_inq (common/io/dktp/controller/ata/ata_disk.c) to deal
- * with deallocation in its tran_tgt_free (setting sd_inq back
- * to NULL) without upsetting the framework.
+ * sd_inq to deal with deallocation in its tran_tgt_free
+ * (setting sd_inq back to NULL) without upsetting the
+ * framework. Moving the inquiry free here also allows setting
+ * of sd_uninit_prevent to preserve the data for lun0 based
+ * scsi_get_device_type_scsi_options() calls.
*/
if (sd->sd_inq) {
kmem_free(sd->sd_inq, SUN_INQSIZE);
@@ -1734,31 +2164,56 @@ scsi_busctl_uninitchild(dev_info_t *child)
}
static int
-iport_busctl_reportdev(dev_info_t *child)
+iport_busctl_ua(dev_info_t *child, char *addr, int maxlen)
{
- dev_info_t *self = ddi_get_parent(child);
- char *iport;
+ char *iport_ua;
+ /* limit ndi_devi_findchild_by_callback to expected flavor */
+ if (ndi_flavor_get(child) != SCSA_FLAVOR_IPORT)
+ return (DDI_FAILURE);
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
- "scsi-iport", &iport) != DDI_SUCCESS)
+ SCSI_ADDR_PROP_IPORTUA, &iport_ua) != DDI_SUCCESS) {
return (DDI_FAILURE);
+ }
- SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: iport %s\n",
- ddi_driver_name(child), ddi_get_instance(child),
- ddi_driver_name(self), ddi_get_instance(self),
- iport));
-
- ddi_prop_free(iport);
+ (void) snprintf(addr, maxlen, "%s", iport_ua);
+ ddi_prop_free(iport_ua);
return (DDI_SUCCESS);
}
-/* uninitchild SCSA iport 'child' node */
static int
-iport_busctl_uninitchild(dev_info_t *child)
+iport_busctl_reportdev(dev_info_t *child)
{
- ddi_set_name_addr(child, NULL);
+ dev_info_t *self = ddi_get_parent(child);
+ char *iport_ua;
+ char *initiator_port = NULL;
+
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SCSI_ADDR_PROP_IPORTUA, &iport_ua) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
+ SCSI_ADDR_PROP_INITIATOR_PORT, &initiator_port);
+
+ if (initiator_port) {
+ SCSI_HBA_LOG((_LOG_NF(CONT),
+ "?%s%d at %s%d: %s %s %s %s",
+ ddi_driver_name(child), ddi_get_instance(child),
+ ddi_driver_name(self), ddi_get_instance(self),
+ SCSI_ADDR_PROP_INITIATOR_PORT, initiator_port,
+ SCSI_ADDR_PROP_IPORTUA, iport_ua));
+ ddi_prop_free(initiator_port);
+ } else {
+ SCSI_HBA_LOG((_LOG_NF(CONT), "?%s%d at %s%d: %s %s",
+ ddi_driver_name(child), ddi_get_instance(child),
+ ddi_driver_name(self), ddi_get_instance(self),
+ SCSI_ADDR_PROP_IPORTUA, iport_ua));
+ }
+ ddi_prop_free(iport_ua);
return (DDI_SUCCESS);
}
@@ -1769,29 +2224,40 @@ iport_busctl_initchild(dev_info_t *child)
dev_info_t *self = ddi_get_parent(child);
dev_info_t *dup = NULL;
char addr[SCSI_MAXNAMELEN];
- char *iport;
-
- if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
- DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
- "scsi-iport", &iport) != DDI_SUCCESS) {
+ if (iport_busctl_ua(child, addr, sizeof (addr)) != DDI_SUCCESS)
return (DDI_NOT_WELL_FORMED);
- }
-
- (void) snprintf(addr, SCSI_MAXNAMELEN, "%s", iport);
- ddi_prop_free(iport);
+ /* Prevent duplicate nodes. */
+ dup = ndi_devi_findchild_by_callback(self, ddi_node_name(child), addr,
+ iport_busctl_ua);
+ if (dup) {
+ ASSERT(ndi_flavor_get(dup) == SCSA_FLAVOR_IPORT);
+ if (ndi_flavor_get(dup) != SCSA_FLAVOR_IPORT) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child,
+ "init failed: %s@%s: not IPORT flavored",
+ ddi_node_name(child), addr));
+ return (DDI_FAILURE);
+ }
+ if (dup != child) {
+ SCSI_HBA_LOG((_LOG(4), NULL, child,
+ "init failed: %s@%s: detected duplicate %p",
+ ddi_node_name(child), addr, (void *)dup));
+ return (DDI_FAILURE);
+ }
+ }
/* set the node @addr string */
ddi_set_name_addr(child, addr);
- /* Prevent duplicate nodes. */
- dup = ndi_devi_find(self, ddi_node_name(child), addr);
- if (dup && (dup != child)) {
- ddi_set_name_addr(child, NULL);
- return (DDI_FAILURE);
- }
+ return (DDI_SUCCESS);
+}
+/* uninitchild SCSA iport 'child' node */
+static int
+iport_busctl_uninitchild(dev_info_t *child)
+{
+ ddi_set_name_addr(child, NULL);
return (DDI_SUCCESS);
}
@@ -1812,13 +2278,7 @@ iport_postdetach_tran_scsi_device(dev_info_t *child)
scsi_hba_tran_free(tran);
}
-/*
- * Initialize scsi_device flavor of transport on SCSA iport 'child' node.
- *
- * NOTE: Given our past history with SCSI_HBA_TRAN_CLONE (structure-copy tran
- * per scsi_device), using structure-copy of tran at the iport level should
- * not be a problem (the most risky thing is likely tran_hba_dip).
- */
+/* Initialize scsi_device flavor of transport on SCSA iport 'child' node. */
static void
iport_preattach_tran_scsi_device(dev_info_t *child)
{
@@ -1828,14 +2288,11 @@ iport_preattach_tran_scsi_device(dev_info_t *child)
/* parent HBA node scsi_device tran is required */
htran = ndi_flavorv_get(hba, SCSA_FLAVOR_SCSI_DEVICE);
- ASSERT(htran != NULL);
- if (htran == NULL)
- return;
+ ASSERT(htran);
/* Allocate iport child's scsi_device transport vector */
tran = scsi_hba_tran_alloc(child, SCSI_HBA_CANSLEEP);
- if (tran == NULL)
- return;
+ ASSERT(tran);
/* Structure-copy scsi_device transport of HBA to iport. */
*tran = *htran;
@@ -1846,11 +2303,14 @@ iport_preattach_tran_scsi_device(dev_info_t *child)
*/
tran->tran_open_flag = 0;
tran->tran_hba_private = NULL;
+
+ /* Establish the devinfo context of this tran structure. */
tran->tran_iport_dip = child;
/* Clear SCSI_HBA_SCSA flags (except TA) */
- tran->tran_hba_flags &= ~SCSI_HBA_SCSA_FM; /* clear parent state */
- tran->tran_hba_flags |= SCSI_HBA_SCSA_TA;
+ tran->tran_hba_flags &=
+ ~(SCSI_HBA_SCSA_FM | SCSI_HBA_SCSA_PHCI); /* clear parent state */
+ tran->tran_hba_flags |= SCSI_HBA_SCSA_TA; /* always TA */
tran->tran_hba_flags &= ~SCSI_HBA_HBA; /* never HBA */
/* Establish flavor of transport (and ddi_get_driver_private()) */
@@ -1858,9 +2318,8 @@ iport_preattach_tran_scsi_device(dev_info_t *child)
/* Setup iport node */
if ((scsa_nexus_setup(child, tran) != DDI_SUCCESS) ||
- (scsa_tran_setup(child, tran) != DDI_SUCCESS)) {
+ (scsa_tran_setup(child, tran) != DDI_SUCCESS))
iport_postdetach_tran_scsi_device(child);
- }
}
/* Uninitialize smp_device flavor of transport on SCSA iport 'child' node. */
@@ -1893,7 +2352,7 @@ iport_preattach_tran_smp_device(dev_info_t *child)
}
/* Allocate iport child's smp_device transport vector */
- tran = sas_hba_tran_alloc(child, 0);
+ tran = sas_hba_tran_alloc(child);
/* Structure-copy smp_device transport of HBA to iport. */
*tran = *htran;
@@ -1902,7 +2361,6 @@ iport_preattach_tran_smp_device(dev_info_t *child)
ndi_flavorv_set(child, SCSA_FLAVOR_SMP, tran);
}
-
/*
* Generic bus_ctl operations for SCSI HBA's,
* hiding the busctl interface from the HBA.
@@ -1927,37 +2385,49 @@ scsi_hba_bus_ctl(
if ((op == DDI_CTLOPS_INITCHILD) || (op == DDI_CTLOPS_UNINITCHILD))
child = (dev_info_t *)arg;
- /* Determine the flavor of the child: smp .vs. scsi */
+ /* Determine the flavor of the child: scsi, smp, iport */
child_flavor = ndi_flavor_get(child);
switch (op) {
case DDI_CTLOPS_INITCHILD:
switch (child_flavor) {
- case SCSA_FLAVOR_IPORT:
- return (iport_busctl_initchild(child));
+ case SCSA_FLAVOR_SCSI_DEVICE:
+ return (scsi_busctl_initchild(child));
case SCSA_FLAVOR_SMP:
return (smp_busctl_initchild(child));
+ case SCSA_FLAVOR_IPORT:
+ return (iport_busctl_initchild(child));
default:
- return (scsi_busctl_initchild(child));
+ return (DDI_FAILURE);
}
+ /* NOTREACHED */
+
case DDI_CTLOPS_UNINITCHILD:
switch (child_flavor) {
- case SCSA_FLAVOR_IPORT:
- return (iport_busctl_uninitchild(child));
+ case SCSA_FLAVOR_SCSI_DEVICE:
+ return (scsi_busctl_uninitchild(child));
case SCSA_FLAVOR_SMP:
return (smp_busctl_uninitchild(child));
+ case SCSA_FLAVOR_IPORT:
+ return (iport_busctl_uninitchild(child));
default:
- return (scsi_busctl_uninitchild(child));
+ return (DDI_FAILURE);
}
+ /* NOTREACHED */
+
case DDI_CTLOPS_REPORTDEV:
switch (child_flavor) {
- case SCSA_FLAVOR_IPORT:
- return (iport_busctl_reportdev(child));
+ case SCSA_FLAVOR_SCSI_DEVICE:
+ return (scsi_busctl_reportdev(child));
case SCSA_FLAVOR_SMP:
return (smp_busctl_reportdev(child));
+ case SCSA_FLAVOR_IPORT:
+ return (iport_busctl_reportdev(child));
default:
- return (scsi_busctl_reportdev(child));
+ return (DDI_FAILURE);
}
+ /* NOTREACHED */
+
case DDI_CTLOPS_ATTACH:
as = (struct attachspec *)arg;
@@ -1976,6 +2446,7 @@ scsi_hba_bus_ctl(
iport_postdetach_tran_smp_device(child);
}
return (DDI_SUCCESS);
+
case DDI_CTLOPS_DETACH:
ds = (struct detachspec *)arg;
@@ -1990,6 +2461,7 @@ scsi_hba_bus_ctl(
iport_postdetach_tran_smp_device(child);
}
return (DDI_SUCCESS);
+
case DDI_CTLOPS_IOMIN:
tran = ddi_get_driver_private(self);
ASSERT(tran);
@@ -2015,7 +2487,6 @@ scsi_hba_bus_ctl(
return (ndi_dev_is_persistent_node(child) ?
DDI_SUCCESS : DDI_FAILURE);
- /* XXX these should be handled */
case DDI_CTLOPS_POWER:
return (DDI_SUCCESS);
@@ -2042,6 +2513,7 @@ scsi_hba_bus_ctl(
default:
return (ddi_ctlops(self, child, op, arg, result));
}
+ /* NOTREACHED */
}
/*
@@ -2064,7 +2536,7 @@ _NOTE(SCHEME_PROTECTS_DATA("Unshared Data", dev_ops))
/*ARGSUSED*/
struct scsi_pkt *
scsi_hba_pkt_alloc(
- dev_info_t *dip,
+ dev_info_t *self,
struct scsi_address *ap,
int cmdlen,
int statuslen,
@@ -2081,7 +2553,7 @@ scsi_hba_pkt_alloc(
/* Sanity check */
if (callback != SLEEP_FUNC && callback != NULL_FUNC)
- SCSI_HBA_LOG((_LOG(WARN), dip, NULL,
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
"callback must be SLEEP_FUNC or NULL_FUNC"));
/*
@@ -2276,16 +2748,16 @@ scsi_device_size()
* SCSI_SIZE_CLEAN_VERIFY to ensure compliance (see scsi_pkt.h).
*/
void
-scsi_size_clean(dev_info_t *dip)
+scsi_size_clean(dev_info_t *self)
{
major_t major;
struct devnames *dnp;
- ASSERT(dip);
- major = ddi_driver_major(dip);
+ ASSERT(self);
+ major = ddi_driver_major(self);
ASSERT(major < devcnt);
if (major >= devcnt) {
- SCSI_HBA_LOG((_LOG(WARN), dip, NULL,
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
"scsi_pkt_size: bogus major: %d", major));
return;
}
@@ -2353,7 +2825,7 @@ scsi_hba_in_panic()
/*ARGSUSED*/
static int
scsi_hba_map_fault(
- dev_info_t *dip,
+ dev_info_t *self,
dev_info_t *child,
struct hat *hat,
struct seg *seg,
@@ -2559,33 +3031,34 @@ scsi_hba_close(dev_t dev, int flag, int otyp, cred_t *credp)
/* ARGSUSED */
int
scsi_hba_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
- int *rvalp)
+ int *rvalp)
{
dev_info_t *self;
struct devctl_iocdata *dcp = NULL;
dev_info_t *child = NULL;
+ mdi_pathinfo_t *path = NULL;
struct scsi_device *sd;
scsi_hba_tran_t *tran;
uint_t bus_state;
int rv = 0;
int circ;
+ char *name;
+ char *addr;
- if ((self = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
+ self = e_ddi_hold_devi_by_dev(dev, 0);
+ if (self == NULL) {
rv = ENXIO;
goto out;
}
- if ((tran = ddi_get_driver_private(self)) == NULL) {
+ tran = ddi_get_driver_private(self);
+ if (tran == NULL) {
rv = ENXIO;
goto out;
}
/* Ioctls for which the generic implementation suffices. */
switch (cmd) {
- case DEVCTL_DEVICE_GETSTATE:
- case DEVCTL_DEVICE_ONLINE:
- case DEVCTL_DEVICE_OFFLINE:
- case DEVCTL_DEVICE_REMOVE:
case DEVCTL_BUS_GETSTATE:
rv = ndi_devctl_ioctl(self, cmd, arg, mode, 0);
goto out;
@@ -2599,65 +3072,106 @@ scsi_hba_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
/* Ioctls that require child identification */
switch (cmd) {
+ case DEVCTL_DEVICE_GETSTATE:
+ case DEVCTL_DEVICE_ONLINE:
+ case DEVCTL_DEVICE_OFFLINE:
+ case DEVCTL_DEVICE_REMOVE:
case DEVCTL_DEVICE_RESET:
- /* child identification from unit-address */
- if (ndi_dc_getname(dcp) == NULL ||
- ndi_dc_getaddr(dcp) == NULL) {
+ name = ndi_dc_getname(dcp);
+ addr = ndi_dc_getaddr(dcp);
+ if ((name == NULL) || (addr == NULL)) {
rv = EINVAL;
goto out;
}
- ndi_devi_enter(self, &circ);
- child = ndi_devi_find(self,
- ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
- if (child == NULL) {
- ndi_devi_exit(self, circ);
- rv = ENXIO;
+ /*
+ * Find child with name@addr - might find a devinfo
+ * child (child), a pathinfo child (path), or nothing.
+ */
+ scsi_hba_devi_enter(self, &circ);
+
+ (void) scsi_hba_find_child(self, name, addr,
+ 1, &child, &path, NULL);
+ if (path) {
+ /* Found a pathinfo */
+ ASSERT(path && (child == NULL));
+ mdi_hold_path(path);
+ scsi_hba_devi_exit_phci(self, circ);
+ sd = NULL;
+ } else if (child) {
+ /* Found a devinfo */
+ ASSERT(child && (path == NULL));
+
+ /* verify scsi_device of child */
+ if (ndi_flavor_get(child) == SCSA_FLAVOR_SCSI_DEVICE)
+ sd = ddi_get_driver_private(child);
+ else
+ sd = NULL;
+ } else {
+ ASSERT((path == NULL) && (child == NULL));
+ scsi_hba_devi_exit(self, circ);
+ rv = ENXIO; /* found nothing */
goto out;
}
- ndi_hold_devi(child);
- ndi_devi_exit(self, circ);
break;
- case DEVCTL_BUS_RESETALL:
+ case DEVCTL_BUS_RESETALL: /* ioctl that operate on any child */
/*
- * Find a child's scsi_address so we can invoke tran_reset
- * below.
+ * Find a child's scsi_address so we can invoke tran_reset.
*
- * XXX If no child exists, one may to able to fake a child.
- * This will be a enhancement for the future.
- * For now, we fall back to BUS_RESET.
- * XXX We sould be looking at node state to get one
- * that is initialized...
+ * Future: If no child exists, we could fake a child. This will
+ * be a enhancement for the future - for now, we fall back to
+ * BUS_RESET.
*/
- ndi_devi_enter(self, &circ);
+ scsi_hba_devi_enter(self, &circ);
child = ddi_get_child(self);
sd = NULL;
while (child) {
- /* XXX verify scsi_device 'flavor' of child */
- if ((sd = ddi_get_driver_private(child)) != NULL) {
+ /* verify scsi_device of child */
+ if (ndi_flavor_get(child) == SCSA_FLAVOR_SCSI_DEVICE)
+ sd = ddi_get_driver_private(child);
+ if (sd != NULL) {
+ /*
+ * NOTE: node has a scsi_device structure, so
+ * it must be initialized.
+ */
ndi_hold_devi(child);
break;
}
child = ddi_get_next_sibling(child);
}
- ndi_devi_exit(self, circ);
+ scsi_hba_devi_exit(self, circ);
break;
}
switch (cmd) {
+ case DEVCTL_DEVICE_GETSTATE:
+ if (path) {
+ if (mdi_dc_return_dev_state(path, dcp) != MDI_SUCCESS)
+ rv = EFAULT;
+ } else if (child) {
+ if (ndi_dc_return_dev_state(child, dcp) != NDI_SUCCESS)
+ rv = EFAULT;
+ } else {
+ rv = ENXIO;
+ }
+ break;
+
case DEVCTL_DEVICE_RESET:
- ASSERT(child);
- if (tran->tran_reset == NULL)
+ if (sd == NULL) {
+ rv = ENOTTY;
+ break;
+ }
+ if (tran->tran_reset == NULL) {
rv = ENOTSUP;
- else {
- sd = ddi_get_driver_private(child);
- /* XXX verify scsi_device 'flavor' of child */
- if ((sd == NULL) ||
- (tran->tran_reset(&sd->sd_address,
- RESET_TARGET) != 1))
- rv = EIO;
+ break;
}
+
+ /* Start with the small stick */
+ if (scsi_reset(&sd->sd_address, RESET_LUN) == 1)
+ break; /* LUN reset worked */
+ if (scsi_reset(&sd->sd_address, RESET_TARGET) != 1)
+ rv = EIO; /* Target reset failed */
break;
case DEVCTL_BUS_QUIESCE:
@@ -2665,11 +3179,11 @@ scsi_hba_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
(bus_state == BUS_QUIESCED))
rv = EALREADY;
else if (tran->tran_quiesce == NULL)
- rv = ENOTSUP;
- else if ((*tran->tran_quiesce)(self) != 0)
+ rv = ENOTSUP; /* man ioctl(7I) says ENOTTY */
+ else if (tran->tran_quiesce(self) != 0)
+ rv = EIO;
+ else if (ndi_set_bus_state(self, BUS_QUIESCED) != NDI_SUCCESS)
rv = EIO;
- else
- (void) ndi_set_bus_state(self, BUS_QUIESCED);
break;
case DEVCTL_BUS_UNQUIESCE:
@@ -2677,61 +3191,119 @@ scsi_hba_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
(bus_state == BUS_ACTIVE))
rv = EALREADY;
else if (tran->tran_unquiesce == NULL)
- rv = ENOTSUP;
- else if ((*tran->tran_unquiesce)(self) != 0)
+ rv = ENOTSUP; /* man ioctl(7I) says ENOTTY */
+ else if (tran->tran_unquiesce(self) != 0)
+ rv = EIO;
+ else if (ndi_set_bus_state(self, BUS_ACTIVE) != NDI_SUCCESS)
rv = EIO;
- else
- (void) ndi_set_bus_state(self, BUS_ACTIVE);
break;
case DEVCTL_BUS_RESET:
if (tran->tran_bus_reset == NULL)
- rv = ENOTSUP;
- else if ((*tran->tran_bus_reset)(self, RESET_BUS) != 1)
+ rv = ENOTSUP; /* man ioctl(7I) says ENOTTY */
+ else if (tran->tran_bus_reset(self, RESET_BUS) != 1)
rv = EIO;
break;
case DEVCTL_BUS_RESETALL:
- if (tran->tran_reset == NULL) {
- rv = ENOTSUP;
- } else {
- if (sd) {
- if ((*tran->tran_reset)
- (&sd->sd_address, RESET_ALL) != 1)
- rv = EIO;
- } else {
- if ((tran->tran_bus_reset == NULL) ||
- ((*tran->tran_bus_reset)
- (self, RESET_BUS) != 1))
- rv = EIO;
- }
+ if ((sd != NULL) &&
+ (scsi_reset(&sd->sd_address, RESET_ALL) == 1)) {
+ break; /* reset all worked */
}
+ if (tran->tran_bus_reset == NULL) {
+ rv = ENOTSUP; /* man ioctl(7I) says ENOTTY */
+ break;
+ }
+ if (tran->tran_bus_reset(self, RESET_BUS) != 1)
+ rv = EIO; /* bus reset failed */
break;
case DEVCTL_BUS_CONFIGURE:
- if (ndi_devi_config(self, NDI_DEVFS_CLEAN|
- NDI_DEVI_PERSIST|NDI_CONFIG_REPROBE) != NDI_SUCCESS) {
+ if (ndi_devi_config(self, NDI_DEVFS_CLEAN | NDI_DEVI_PERSIST |
+ NDI_CONFIG_REPROBE) != NDI_SUCCESS) {
rv = EIO;
}
break;
case DEVCTL_BUS_UNCONFIGURE:
if (ndi_devi_unconfig(self,
- NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
+ NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE) != NDI_SUCCESS) {
rv = EBUSY;
}
break;
+ case DEVCTL_DEVICE_ONLINE:
+ ASSERT(child || path);
+ if (path) {
+ if (mdi_pi_online(path, NDI_USER_REQ) != MDI_SUCCESS)
+ rv = EIO;
+ } else {
+ if (ndi_devi_online(child, 0) != NDI_SUCCESS)
+ rv = EIO;
+ }
+ break;
+
+ case DEVCTL_DEVICE_OFFLINE:
+ ASSERT(child || path);
+ if (sd != NULL)
+ (void) scsi_clear_task_set(&sd->sd_address);
+ if (path) {
+ if (mdi_pi_offline(path, NDI_USER_REQ) != MDI_SUCCESS)
+ rv = EIO;
+ } else {
+ if (ndi_devi_offline(child,
+ NDI_DEVFS_CLEAN) != NDI_SUCCESS)
+ rv = EIO;
+ }
+ break;
+
+ case DEVCTL_DEVICE_REMOVE:
+ ASSERT(child || path);
+ if (sd != NULL)
+ (void) scsi_clear_task_set(&sd->sd_address);
+ if (path) {
+ /* NOTE: don't pass NDI_DEVI_REMOVE to mdi_pi_offline */
+ if (mdi_pi_offline(path, NDI_USER_REQ) == MDI_SUCCESS) {
+ scsi_hba_devi_enter_phci(self, &circ);
+ mdi_rele_path(path);
+
+ /* ... here is the DEVICE_REMOVE part. */
+ (void) mdi_pi_free(path, 0);
+ path = NULL;
+ } else {
+ rv = EIO;
+ }
+ } else {
+ if (ndi_devi_offline(child,
+ NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE) != NDI_SUCCESS)
+ rv = EIO;
+ }
+ break;
+
default:
+ ASSERT(dcp != NULL);
rv = ENOTTY;
+ break;
}
-out: if (child)
- ndi_rele_devi(child);
+ /* all done -- clean up and return */
+out:
+ /* release hold on what we found */
+ if (path) {
+ scsi_hba_devi_enter_phci(self, &circ);
+ mdi_rele_path(path);
+ }
+ if (path || child)
+ scsi_hba_devi_exit(self, circ);
+
if (dcp)
ndi_dc_freehdl(dcp);
+
if (self)
ddi_release_devi(self);
+
+ *rvalp = rv;
+
return (rv);
}
@@ -2761,14 +3333,15 @@ scsi_hba_bus_power(dev_info_t *self, void *impl_arg, pm_bus_power_op_t op,
}
/*
- * Return the lun from an address string. Either the lun is after the
- * first ',' or the entire addr is the lun. Return SCSI_LUN64_ILLEGAL
- * if the format is incorrect.
+ * Return the lun64 value from a address string: "addr,lun[,sfunc]". Either
+ * the lun is after the first ',' or the entire address string is the lun.
+ * Return SCSI_LUN64_ILLEGAL if the format is incorrect. A lun64 is at most
+ * 16 hex digits long.
*
- * If the addr specified has incorrect syntax (busconfig one of
+ * If the address string specified has incorrect syntax (busconfig one of
* bogus /devices path) then scsi_addr_to_lun64 can return SCSI_LUN64_ILLEGAL.
*/
-scsi_lun64_t
+static scsi_lun64_t
scsi_addr_to_lun64(char *addr)
{
scsi_lun64_t lun64;
@@ -2776,9 +3349,9 @@ scsi_addr_to_lun64(char *addr)
int i;
if (addr) {
- s = strchr(addr, ','); /* "addr,lun[,sfunc]" */
+ s = strchr(addr, ','); /* "addr,lun" */
if (s)
- s++; /* skip ',' */
+ s++; /* skip ',', at lun */
else
s = addr; /* "lun" */
@@ -2792,7 +3365,7 @@ scsi_addr_to_lun64(char *addr)
else
break;
}
- if (*s && (*s != ',')) /* addr,lun[,sfunc] is OK */
+ if (*s && (*s != ',')) /* [,sfunc] is OK */
lun64 = SCSI_LUN64_ILLEGAL;
} else
lun64 = SCSI_LUN64_ILLEGAL;
@@ -2805,6 +3378,47 @@ scsi_addr_to_lun64(char *addr)
}
/*
+ * Return the sfunc value from a address string: "addr,lun[,sfunc]". Either the
+ * sfunc is after the second ',' or the entire address string is the sfunc.
+ * Return -1 if there is only one ',' in the address string or the string is
+ * invalid. An sfunc is at most two hex digits long.
+ */
+static int
+scsi_addr_to_sfunc(char *addr)
+{
+ int sfunc;
+ char *s;
+ int i;
+
+ if (addr) {
+ s = strchr(addr, ','); /* "addr,lun" */
+ if (s) {
+ s++; /* skip ',', at lun */
+ s = strchr(s, ','); /* "lun,sfunc]" */
+ if (s == NULL)
+ return (-1); /* no ",sfunc" */
+ s++; /* skip ',', at sfunc */
+ } else
+ s = addr; /* "sfunc" */
+
+ for (sfunc = 0, i = 0; *s && (i < 2); s++, i++) {
+ if (*s >= '0' && *s <= '9')
+ sfunc = (sfunc << 4) + (*s - '0');
+ else if (*s >= 'A' && *s <= 'F')
+ sfunc = (sfunc << 4) + 10 + (*s - 'A');
+ else if (*s >= 'a' && *s <= 'f')
+ sfunc = (sfunc << 4) + 10 + (*s - 'a');
+ else
+ break;
+ }
+ if (*s)
+ sfunc = -1; /* illegal */
+ } else
+ sfunc = -1;
+ return (sfunc);
+}
+
+/*
* Convert scsi ascii string data to NULL terminated (semi) legal IEEE 1275
* "compatible" (name) property form.
*
@@ -2908,37 +3522,37 @@ string_scsi_to_1275(char *s_1275, char *s_scsi, int len)
*
* where:
*
- * v is the letter 'v'. Denotest the
+ * v is the letter 'v'. Denotest the
* beginning of VVVVVVVV.
*
- * VVVVVVVV Translated scsi_vendor.
+ * VVVVVVVV Translated scsi_vendor.
*
- * p is the letter 'p'. Denotes the
+ * p is the letter 'p'. Denotes the
* beginning of PPPPPPPPPPPPPPPP.
*
* PPPPPPPPPPPPPPPP Translated scsi_product.
*
- * r is the letter 'r'. Denotes the
+ * r is the letter 'r'. Denotes the
* beginning of RRRR.
*
- * RRRR Translated scsi_revision.
+ * RRRR Translated scsi_revision.
*
- * DD is a two digit ASCII hexadecimal
+ * DD is a two digit ASCII hexadecimal
* number. The value of the two digits is
* based one the SCSI "Peripheral device
* type" command set associated with the
* node. On a primary node this is the
* scsi_dtype of the primary command set,
* on a secondary node this is the
- * scsi_dtype associated with the embedded
- * function command set.
+ * scsi_dtype associated with the secondary
+ * function embedded command set.
*
- * EE Same encoding used for DD. This form is
+ * EE Same encoding used for DD. This form is
* only generated on secondary function
- * nodes. The DD function is embedded in
- * an EE device.
+ * nodes. The DD secondary function is embedded
+ * in an EE device.
*
- * FFF Concatenation, in alphabetical order,
+ * FFF Concatenation, in alphabetical order,
* of the flag characters within a form-group.
* For a given form-group, the following
* flags are defined.
@@ -2961,10 +3575,10 @@ string_scsi_to_1275(char *s_1275, char *s_scsi, int len)
* if there are applicable flag
* characters.
*
- * b is the letter 'b'. Denotes the
+ * b is the letter 'b'. Denotes the
* beginning of BBBBBBBB.
*
- * BBBBBBBB Binding-set. Operating System Specific:
+ * BBBBBBBB Binding-set. Operating System Specific:
* scsi-binding-set property of HBA.
*/
#define NCOMPAT (1 + (13 + 2) + 1)
@@ -2976,10 +3590,11 @@ string_scsi_to_1275(char *s_1275, char *s_scsi, int len)
* to determine GUID FFF support.
*/
static void
-scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
+scsi_hba_ident_nodename_compatible_get(struct scsi_inquiry *inq,
uchar_t *inq80, size_t inq80len, uchar_t *inq83, size_t inq83len,
char *binding_set, int dtype_node, char *compat0,
- char **nodenamep, char ***compatiblep, int *ncompatiblep)
+ char **nodenamep, char **drivernamep,
+ char ***compatiblep, int *ncompatiblep)
{
char vid[sizeof (inq->inq_vid) + 1 ];
char pid[sizeof (inq->inq_pid) + 1];
@@ -3106,13 +3721,6 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
/* # for x86 fcp and vhci use generic nodenames */
#endif /* sparc */
-#ifdef notdef
- /*
- * The following binding-set specific mappings are not being
- * delivered at this time, but are listed here as an examples of
- * the type of mappings needed.
- */
-
/* # legacy mapping to driver nodenames for spi binding-set */
{"sd", "scsa,00.bspi"},
{"sd", "scsa,05.bspi"},
@@ -3138,7 +3746,6 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
{"comm", "scsa,09.busb"},
{"array_ctlr", "scsa,0c.busb"},
{"esi", "scsa,0d.busb"},
-#endif /* notdef */
/*
* mapping nodenames for mpt based on scsi dtype
@@ -3155,6 +3762,7 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
};
struct nodename_aliases *nap;
+ /* NOTE: drivernamep can be NULL */
ASSERT(nodenamep && compatiblep && ncompatiblep &&
(binding_set == NULL || (strlen(binding_set) <= 8)));
if ((nodenamep == NULL) || (compatiblep == NULL) ||
@@ -3186,7 +3794,7 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
/*
* Form flags in ***ALPHABETICAL*** order within form-group:
*
- * NOTE: When adding a new flag to an existing form-group, carefull
+ * NOTE: When adding a new flag to an existing form-group, careful
* consideration must be given to not breaking existing bindings
* based on that form-group.
*/
@@ -3203,7 +3811,7 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
*/
i = 0;
dtype_device = inq->inq_dtype & DTYPE_MASK;
- if (rootvp && (inq->inq_rmb ||
+ if (modrootloaded && (inq->inq_rmb ||
(dtype_device == DTYPE_WORM) ||
(dtype_device == DTYPE_RODIRECT) ||
(dtype_device == DTYPE_OPTICAL)))
@@ -3433,32 +4041,34 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
}
/*
- * If no nodename_aliases mapping exists then use the
- * driver_aliases specified driver binding as a nodename.
- * Determine the driver based on compatible (which may
- * have the passed in compat0 as the first item). The
- * driver_aliases file has entries like
+ * Determine the driver name based on compatible (which may
+ * have the passed in compat0 as the first item). The driver_aliases
+ * file has entries like
*
* sd "scsiclass,00"
*
- * that map compatible forms to specific drivers. These
- * entries are established by add_drv. We use the most specific
+ * that map compatible forms to specific drivers. These entries are
+ * established by add_drv/update_drv. We use the most specific
* driver binding as the nodename. This matches the eventual
* ddi_driver_compatible_major() binding that will be
* established by bind_node()
*/
- if (nname == NULL) {
- for (dname = NULL, csp = compatp; *csp; csp++) {
- major = ddi_name_to_major(*csp);
- if ((major == (major_t)-1) ||
- (devnamesp[major].dn_flags & DN_DRIVER_REMOVED))
- continue;
- if (dname = ddi_major_to_name(major))
- break;
- }
- nname = dname;
+ for (dname = NULL, csp = compatp; *csp; csp++) {
+ major = ddi_name_to_major(*csp);
+ if ((major == DDI_MAJOR_T_NONE) ||
+ (devnamesp[major].dn_flags & DN_DRIVER_REMOVED))
+ continue;
+ if (dname = ddi_major_to_name(major))
+ break;
}
+ /*
+ * If no nodename_aliases mapping exists then use the
+ * driver_aliases specified driver binding as a nodename.
+ */
+ if (nname == NULL)
+ nname = dname;
+
/* return results */
if (nname) {
*nodenamep = kmem_alloc(strlen(nname) + 1, KM_SLEEP);
@@ -3472,52 +4082,55 @@ scsi_hba_identity_nodename_compatible_get(struct scsi_inquiry *inq,
* compatible contains all compatible forms concatenated
* into a single string pointed to by the first element.
*/
- if (nname == NULL) {
- for (csp = compatp; *(csp + 1); csp++)
- *((*csp) + strlen(*csp)) = ' ';
- *(compatp + 1) = NULL;
- ncompat = 1;
- }
+ for (csp = compatp; *(csp + 1); csp++)
+ *((*csp) + strlen(*csp)) = ' ';
+ *(compatp + 1) = NULL;
+ ncompat = 1;
}
+ if (drivernamep) {
+ if (dname) {
+ *drivernamep = kmem_alloc(strlen(dname) + 1, KM_SLEEP);
+ (void) strcpy(*drivernamep, dname);
+ } else
+ *drivernamep = NULL;
+ }
*compatiblep = compatp;
*ncompatiblep = ncompat;
}
+/*
+ * Free allocations associated with scsi_hba_ident_nodename_compatible_get.
+ */
+static void
+scsi_hba_ident_nodename_compatible_free(char *nodename, char *drivername,
+ char **compatible)
+{
+ if (nodename)
+ kmem_free(nodename, strlen(nodename) + 1);
+ if (drivername)
+ kmem_free(drivername, strlen(drivername) + 1);
+ if (compatible)
+ kmem_free(compatible, (NCOMPAT * sizeof (char *)) +
+ (NCOMPAT * COMPAT_LONGEST));
+}
+
void
scsi_hba_nodename_compatible_get(struct scsi_inquiry *inq,
char *binding_set, int dtype_node, char *compat0,
char **nodenamep, char ***compatiblep, int *ncompatiblep)
{
- scsi_hba_identity_nodename_compatible_get(inq,
+ scsi_hba_ident_nodename_compatible_get(inq,
NULL, 0, NULL, 0, binding_set, dtype_node, compat0, nodenamep,
- compatiblep, ncompatiblep);
+ NULL, compatiblep, ncompatiblep);
}
-/*
- * Free allocations associated with scsi_hba_nodename_compatible_get or
- * scsi_hba_identity_nodename_compatible_get use.
- */
void
scsi_hba_nodename_compatible_free(char *nodename, char **compatible)
{
- if (nodename)
- kmem_free(nodename, strlen(nodename) + 1);
-
- if (compatible)
- kmem_free(compatible, (NCOMPAT * sizeof (char *)) +
- (NCOMPAT * COMPAT_LONGEST));
+ scsi_hba_ident_nodename_compatible_free(nodename, NULL, compatible);
}
-/* scsi_device property interfaces */
-#define _TYPE_DEFINED(flags) \
- (((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_PATH) || \
- ((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_DEVICE))
-
-#define _DEVICE_PIP(sd, flags) \
- ((((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_PATH) && \
- sd->sd_pathinfo) ? (mdi_pathinfo_t *)sd->sd_pathinfo : NULL)
-
/* return the unit_address associated with a scsi_device */
char *
scsi_device_unit_address(struct scsi_device *sd)
@@ -3528,13 +4141,22 @@ scsi_device_unit_address(struct scsi_device *sd)
if ((sd == NULL) || (sd->sd_dev == NULL))
return (NULL);
- pip = _DEVICE_PIP(sd, SCSI_DEVICE_PROP_PATH);
+ pip = (mdi_pathinfo_t *)sd->sd_pathinfo;
if (pip)
return (mdi_pi_get_addr(pip));
else
return (ddi_get_name_addr(sd->sd_dev));
}
+/* scsi_device property interfaces */
+#define _TYPE_DEFINED(flags) \
+ (((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_PATH) || \
+ ((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_DEVICE))
+
+#define _DEVICE_PIP(sd, flags) \
+ ((((flags & SCSI_DEVICE_PROP_TYPE_MSK) == SCSI_DEVICE_PROP_PATH) && \
+ sd->sd_pathinfo) ? (mdi_pathinfo_t *)sd->sd_pathinfo : NULL)
+
int
scsi_device_prop_get_int(struct scsi_device *sd, uint_t flags,
char *name, int defval)
@@ -3837,26 +4459,4086 @@ scsi_device_prop_free(struct scsi_device *sd, uint_t flags, void *data)
ddi_prop_free(data);
}
+/*
+ * scsi_hba_ua_set: given "unit-address" string, set properties.
+ *
+ * Function to set the properties on a devinfo or pathinfo node from
+ * the "unit-address" part of a "name@unit-address" /devices path 'name'
+ * string.
+ *
+ * This function works in conjunction with scsi_ua_get()/scsi_hba_ua_get()
+ * (and possibly with an HBA driver's tran_tgt_init() implementation).
+ */
+static int
+scsi_hba_ua_set(char *ua, dev_info_t *dchild, mdi_pathinfo_t *pchild)
+{
+ char *p;
+ int tgt;
+ char *tgt_port_end;
+ char *tgt_port;
+ int tgt_port_len;
+ int sfunc;
+ scsi_lun64_t lun64;
+
+ /* Caller must choose to decorate devinfo *or* pathinfo */
+ ASSERT((dchild != NULL) ^ (pchild != NULL));
+ if (dchild && pchild)
+ return (0);
+
+ /*
+ * generic implementation based on "tgt,lun[,sfunc]" address form.
+ * parse hex "tgt" part of "tgt,lun[,sfunc]"
+ */
+ p = ua;
+ tgt_port_end = NULL;
+ for (tgt = 0; *p && *p != ','; p++) {
+ if (*p >= '0' && *p <= '9')
+ tgt = (tgt << 4) + (*p - '0');
+ else if (*p >= 'a' && *p <= 'f')
+ tgt = (tgt << 4) + 10 + (*p - 'a');
+ else
+ tgt = -1; /* non-numeric */
+
+ /*
+ * if non-numeric or our of range set tgt to -1 and
+ * skip forward
+ */
+ if (tgt < 0) {
+ tgt = -1;
+ for (; *p && *p != ','; p++)
+ ;
+ break;
+ }
+ }
+ tgt_port_end = p;
+
+ /* parse hex ",lun" part of "tgt,lun[,sfunc]" */
+ if (*p)
+ p++;
+ for (lun64 = 0; *p && *p != ','; p++) {
+ if (*p >= '0' && *p <= '9')
+ lun64 = (lun64 << 4) + (*p - '0');
+ else if (*p >= 'a' && *p <= 'f')
+ lun64 = (lun64 << 4) + 10 + (*p - 'a');
+ else
+ return (0);
+ }
+
+ /* parse hex ",sfunc" part of "tgt,lun[,sfunc]" */
+ if (*p) {
+ p++;
+ for (sfunc = 0; *p; p++) {
+ if (*p >= '0' && *p <= '9')
+ sfunc = (sfunc << 4) + (*p - '0');
+ else if (*p >= 'a' && *p <= 'f')
+ sfunc = (sfunc << 4) + 10 + (*p - 'a');
+ else
+ return (0);
+ }
+ } else
+ sfunc = -1;
+
+ if (dchild) {
+ /*
+ * Decorate a devinfo node with unit address properties.
+ * This adds the the addressing properties needed to
+ * DDI_CTLOPS_UNINITCHILD the devinfo node (i.e. perform
+ * the reverse operation - form unit address from properties).
+ */
+ if ((tgt != -1) && (ndi_prop_update_int(DDI_DEV_T_NONE, dchild,
+ SCSI_ADDR_PROP_TARGET, tgt) != DDI_PROP_SUCCESS))
+ return (0);
+
+ if (tgt_port_end) {
+ tgt_port_len = tgt_port_end - ua + 1;
+ tgt_port = kmem_alloc(tgt_port_len, KM_SLEEP);
+ (void) strlcpy(tgt_port, ua, tgt_port_len);
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, dchild,
+ SCSI_ADDR_PROP_TARGET_PORT, tgt_port) !=
+ DDI_PROP_SUCCESS) {
+ kmem_free(tgt_port, tgt_port_len);
+ return (0);
+ }
+ kmem_free(tgt_port, tgt_port_len);
+ }
+
+ /* Set the appropriate lun properties. */
+ if (lun64 < SCSI_32LUNS_PER_TARGET) {
+ if (ndi_prop_update_int(DDI_DEV_T_NONE, dchild,
+ SCSI_ADDR_PROP_LUN, (int)lun64) != DDI_PROP_SUCCESS)
+ return (0);
+ }
+ if (ndi_prop_update_int64(DDI_DEV_T_NONE, dchild,
+ SCSI_ADDR_PROP_LUN64, lun64) != DDI_PROP_SUCCESS)
+ return (0);
+
+ /* Set the sfunc property */
+ if ((sfunc != -1) &&
+ (ndi_prop_update_int(DDI_DEV_T_NONE, dchild,
+ SCSI_ADDR_PROP_SFUNC, (int)sfunc) != DDI_PROP_SUCCESS))
+ return (0);
+ } else if (pchild) {
+ /*
+ * Decorate a pathinfo node with unit address properties.
+ */
+ if ((tgt != -1) && (mdi_prop_update_int(pchild,
+ SCSI_ADDR_PROP_TARGET, tgt) != DDI_PROP_SUCCESS))
+ return (0);
+
+ if (tgt_port_end) {
+ tgt_port_len = tgt_port_end - ua + 1;
+ tgt_port = kmem_alloc(tgt_port_len, KM_SLEEP);
+ (void) strlcpy(tgt_port, ua, tgt_port_len);
+ if (mdi_prop_update_string(pchild,
+ SCSI_ADDR_PROP_TARGET_PORT, tgt_port) !=
+ DDI_PROP_SUCCESS) {
+ kmem_free(tgt_port, tgt_port_len);
+ return (0);
+ }
+ kmem_free(tgt_port, tgt_port_len);
+ }
+
+ /* Set the appropriate lun properties */
+ if (lun64 < SCSI_32LUNS_PER_TARGET) {
+ if (mdi_prop_update_int(pchild, SCSI_ADDR_PROP_LUN,
+ (int)lun64) != DDI_PROP_SUCCESS)
+ return (0);
+ }
+
+ if (mdi_prop_update_int64(pchild, SCSI_ADDR_PROP_LUN64,
+ lun64) != DDI_PROP_SUCCESS)
+ return (0);
+
+ /* Set the sfunc property */
+ if ((sfunc != -1) &&
+ (mdi_prop_update_int(pchild,
+ SCSI_ADDR_PROP_SFUNC, (int)sfunc) != DDI_PROP_SUCCESS))
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * Private ndi_devi_find/mdi_pi_find implementation - find the child
+ * dev_info/path_info of self whose phci name matches "name@caddr".
+ * We have our own implementation because we need to search with both
+ * forms of sibling lists (dev_info and path_info) and we need to be able
+ * to search with a NULL name in order to find siblings already associated
+ * with a given unit-address (same @addr). NOTE: NULL name search will never
+ * return probe node.
+ *
+ * If pchildp is NULL and we find a pathinfo child, we return the client
+ * devinfo node in *dchildp.
+ *
+ * The init flag argument should be clear when called from places where
+ * recursion could occur (like scsi_busctl_initchild) and when the caller
+ * has already performed a search for name@addr with init set (performance).
+ *
+ * Future: Integrate ndi_devi_findchild_by_callback into scsi_hba_find_child.
+ */
+static int
+scsi_hba_find_child(dev_info_t *self, char *name, char *addr, int init,
+ dev_info_t **dchildp, mdi_pathinfo_t **pchildp, int *ppi)
+{
+ dev_info_t *dchild; /* devinfo child */
+ mdi_pathinfo_t *pchild; /* pathinfo child */
+ int found = CHILD_TYPE_NONE;
+ char *daddr;
+
+ ASSERT(self && DEVI_BUSY_OWNED(self));
+ ASSERT(addr && dchildp);
+ if ((self == NULL) || (addr == NULL) || (dchildp == NULL))
+ return (CHILD_TYPE_NONE);
+
+ *dchildp = NULL;
+ if (pchildp)
+ *pchildp = NULL;
+ if (ppi)
+ *ppi = 0;
+
+ /* Walk devinfo child list to find a match */
+ for (dchild = ddi_get_child(self); dchild;
+ dchild = ddi_get_next_sibling(dchild)) {
+ if (i_ddi_node_state(dchild) < DS_INITIALIZED)
+ continue;
+
+ daddr = ddi_get_name_addr(dchild);
+ if (daddr && (strcmp(addr, daddr) == 0) &&
+ ((name == NULL) ||
+ (strcmp(name, DEVI(dchild)->devi_node_name) == 0))) {
+ /*
+ * If we are asked to find "anything" at a given
+ * unit-address (name == NULL), we don't realy want
+ * to find the 'probe' node. The existance of
+ * a probe node on a 'name == NULL' search should
+ * fail. This will trigger slow-path code where
+ * we explicity look for, and synchronize against,
+ * a node named "probe" at the unit-address.
+ */
+ if ((name == NULL) &&
+ scsi_hba_devi_is_barrier(dchild)) {
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s@%s 'probe' devinfo found, skip",
+ name ? name : "", addr));
+ continue;
+ }
+
+ /* We have found a match. */
+ found |= CHILD_TYPE_DEVINFO;
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s@%s devinfo found", name ? name : "", addr));
+ *dchildp = dchild; /* devinfo found */
+ break;
+ }
+ }
+
+ /*
+ * Walk pathinfo child list to find a match.
+ *
+ * NOTE: Unlike devinfo nodes, pathinfo nodes have a string searchable
+ * unit-address from creation - so there is no need for an 'init'
+ * search block of code for pathinfo nodes below.
+ */
+ pchild = mdi_pi_find(self, NULL, addr);
+ if (pchild) {
+ /*
+ * NOTE: If name specified and we match a pathinfo unit
+ * address, we don't check the client node name.
+ */
+ if (ppi)
+ *ppi = mdi_pi_get_path_instance(pchild);
+ found |= CHILD_TYPE_PATHINFO;
+
+ if (pchildp) {
+ SCSI_HBA_LOG((_LOG(4), self, NULL,
+ "%s pathinfo found", mdi_pi_spathname(pchild)));
+ *pchildp = pchild; /* pathinfo found */
+ } else if (*dchildp == NULL) {
+ /*
+ * Did not find a devinfo node, found a pathinfo node,
+ * but caller did not ask us to return a pathinfo node:
+ * we return the 'client' devinfo node instead (but
+ * with CHILD_TYPE_PATHINFO 'found' return value).
+ */
+ dchild = mdi_pi_get_client(pchild);
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s pathinfo found, client switch",
+ mdi_pi_spathname(pchild)));
+
+ /*
+ * A pathinfo node always has a 'client' devinfo node,
+ * but we need to ensure that the 'client' is
+ * initialized and has a scsi_device structure too.
+ */
+ ASSERT(dchild);
+ if (i_ddi_node_state(dchild) < DS_INITIALIZED) {
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s found client, initchild",
+ mdi_pi_spathname(pchild)));
+ (void) ddi_initchild(ddi_get_parent(dchild),
+ dchild);
+ }
+ if (i_ddi_node_state(dchild) >= DS_INITIALIZED) {
+ /* client found and initialized */
+ *dchildp = dchild;
+ } else {
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s found client, but failed initchild",
+ mdi_pi_spathname(pchild)));
+ }
+ }
+ }
+
+ /* Try devinfo again with initchild of uninitialized nodes */
+ if ((found == CHILD_TYPE_NONE) && init) {
+ for (dchild = ddi_get_child(self); dchild;
+ dchild = ddi_get_next_sibling(dchild)) {
+ /* skip if checked above */
+ if (i_ddi_node_state(dchild) >= DS_INITIALIZED)
+ continue;
+ /* attempt initchild to establish unit-address */
+ (void) ddi_initchild(self, dchild);
+ if (i_ddi_node_state(dchild) < DS_INITIALIZED)
+ continue;
+ daddr = ddi_get_name_addr(dchild);
+ if (daddr &&
+ ((name == NULL) || (strcmp(name,
+ DEVI(dchild)->devi_node_name) == 0)) &&
+ (strcmp(addr, daddr) == 0)) {
+ found |= CHILD_TYPE_DEVINFO;
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "%s@%s devinfo found post initchild",
+ name ? name : "", addr));
+ *dchildp = dchild; /* devinfo found */
+ break; /* node found */
+ }
+ }
+ }
+
+ /*
+ * We should never find devinfo and pathinfo at the same
+ * unit-address.
+ */
+ ASSERT(found != (CHILD_TYPE_DEVINFO | CHILD_TYPE_PATHINFO));
+ if (found == (CHILD_TYPE_DEVINFO | CHILD_TYPE_PATHINFO)) {
+ found = CHILD_TYPE_NONE;
+ *dchildp = NULL;
+ *pchildp = NULL;
+ }
+ return (found);
+}
+
+/*
+ * Given information about a child device (contained on probe node) construct
+ * and return a pointer to the dynamic SID devinfo node associated with the
+ * device. In the creation of this SID node a compatible property for the
+ * device is formed and used to establish a nodename (via
+ * /etc/nodename_aliases) and to bind a driver (via /etc/driver_aliases).
+ *
+ * If this routine is called then we got a response from a device and
+ * obtained the inquiry data from the device. Some inquiry results indicate
+ * that the specific LUN we addressed does not exist, and we don't want to
+ * bind a standard target driver to the node we create. Even though the
+ * specific LUN is not usable, the framework may still want to bind a
+ * target driver to the device for internal communication with the device -
+ * an example would be issuing a report_lun to enumerate other LUNs under a
+ * DPQ_NEVER LUN0. Another example would be wanting to known that the
+ * DPQ_NEVER LUN0 device exists in BUS_CONFIG_ONE for non-existent LUN
+ * caching optimizations. To support this we let the caller specify a
+ * compatible property (or driver). If LUN0 inquiry data indicates that the
+ * LUN does not exist then we establish compat0 as the highest precedence(0)
+ * compatible form. If used, this compat0 driver will never be called on to
+ * issue external commands to the device.
+ *
+ * If no driver binds to the device using driver_alias we establish the driver
+ * passed in as the node name.
+ */
+static int
+scsi_device_createchild(dev_info_t *self, char *addr, scsi_enum_t se,
+ struct scsi_device *sdprobe, dev_info_t **dchildp, mdi_pathinfo_t **pchildp)
+{
+ scsi_lun64_t lun64;
+ int dtype;
+ int dpq;
+ int dpq_vu;
+ int dtype_node;
+ int lunexists;
+ char *compat0;
+ char *nname;
+ char **compat = NULL;
+ int ncompat;
+ dev_info_t *dchild = NULL;
+ mdi_pathinfo_t *pchild = NULL;
+ dev_info_t *probe = sdprobe->sd_dev;
+ struct scsi_inquiry *inq = sdprobe->sd_inq;
+ uchar_t *inq80 = NULL;
+ uchar_t *inq83 = NULL;
+ uint_t inq80len, inq83len;
+ char *binding_set = NULL;
+ char *dname = NULL;
+ ddi_devid_t devid;
+ int have_devid = 0;
+ char *devid_str;
+ char *guid = NULL;
+
+ ASSERT(self && addr && *addr && DEVI_BUSY_OWNED(self));
+ ASSERT(dchildp && pchildp);
+
+ /*
+ * Determine the lun and whether the lun exists. We may need to create
+ * a node for LUN0 (with compat0 driver binding) even if the lun does
+ * not exist - so we can run report_lun to find additional LUNs.
+ */
+ lun64 = scsi_addr_to_lun64(addr);
+ dtype = inq->inq_dtype & DTYPE_MASK; /* device */
+ dpq = inq->inq_dtype & DPQ_MASK;
+ dpq_vu = inq->inq_dtype & DPQ_VUNIQ ? 1 : 0;
+
+ dtype_node = scsi_addr_to_sfunc(addr); /* secondary function */
+ if (dtype_node == -1)
+ dtype_node = dtype; /* node for device */
+
+ lunexists = (dtype != dtype_node) || /* override */
+ ((dpq_vu == 0) && (dpq == DPQ_POSSIBLE)) || /* ANSII */
+ (dpq_vu && (lun64 == 0)); /* VU LUN0 */
+ if (dtype == DTYPE_UNKNOWN)
+ lunexists = 0;
+
+ SCSI_HBA_LOG((_LOG(4), self, NULL,
+ "@%s dtype %x %x dpq_vu %d dpq %x: %d",
+ addr, dtype, dtype_node, dpq_vu, dpq, lunexists));
+
+ /* A non-existent LUN0 uses compatible_nodev. */
+ if (lunexists) {
+ compat0 = NULL; /* compat0 not needed */
+ } else if (lun64 == 0) {
+ compat0 = compatible_nodev;
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "@%s lun 0 with compat0 %s", addr, compat0));
+ } else
+ goto out; /* no node created */
+
+ /* Obtain identity information from probe node. */
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, probe,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "inquiry-page-80",
+ &inq80, &inq80len) != DDI_PROP_SUCCESS)
+ inq80 = NULL;
+ if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, probe,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "inquiry-page-83",
+ &inq83, &inq83len) != DDI_PROP_SUCCESS)
+ inq83 = NULL;
+
+ /* Get "scsi-binding-set" property (if there is one). */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "scsi-binding-set", &binding_set) == DDI_PROP_SUCCESS)
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "binding_set '%s'", binding_set));
+
+ /* determine the node name and compatible information */
+ scsi_hba_ident_nodename_compatible_get(inq,
+ inq80, inq80len, inq83, inq83len, binding_set, dtype_node,
+ compat0, &nname, &dname, &compat, &ncompat);
+
+ if (nname == NULL) {
+ /*
+ * We will not be able to create a node because we could not
+ * determine a node name. Print out a NODRIVER level warning
+ * message with the compatible forms for the device. Note that
+ * there may be a driver.conf node that attaches to the device,
+ * which is why we only produce this warning message for debug
+ * kernels.
+ */
+ SCSI_HBA_LOG((_LOG(1), NULL, self,
+ "no node_name for device @%s:\n compatible: %s",
+ addr, *compat));
+ goto out;
+ }
+
+ /*
+ * FUTURE: some day we may want an accurate "compatible" on the probe
+ * node so that vhci_is_dev_supported() in scsi_vhci could, at
+ * least in part, determine/configure based on "compatible".
+ *
+ * if (ndi_prop_update_string_array(DDI_DEV_T_NONE, probe,
+ * "compatible", compat, ncompat) != DDI_PROP_SUCCESS) {
+ * SCSI_HBA_LOG((_LOG(3), self, NULL,
+ * "%s@%s failed probe compatible decoration",
+ * nname, addr));
+ * goto out;
+ * }
+ */
+
+ /* Encode devid from identity information. */
+ if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, dname,
+ (uchar_t *)inq, sizeof (*inq), inq80, inq80len, inq83, inq83len,
+ &devid) == DDI_SUCCESS) {
+ have_devid = 1;
+
+ /* Attempt to form guid from devid. */
+ guid = ddi_devid_to_guid(devid);
+
+ /* Produce string devid for debug. */
+ devid_str = ddi_devid_str_encode(devid, NULL);
+ SCSI_HBA_LOG((_LOG(3), self, probe, "devid '%s' guid '%s'",
+ devid_str ? devid_str : "NULL", guid ? guid : "NULL"));
+ ddi_devid_str_free(devid_str);
+ }
+
+
+ /*
+ * Determine if the device should be enumerated as under the vHCI
+ * (client node) or under the pHCI. By convention scsi_vhci expects
+ * the "cinfo" argument identity information to be represented as a
+ * devinfo node with the needed information (i.e. the pHCI probe node).
+ */
+ if ((guid == NULL) ||
+ (mdi_is_dev_supported(MDI_HCI_CLASS_SCSI, self, sdprobe) !=
+ MDI_SUCCESS)) {
+ SCSI_HBA_LOG((_LOG(3), self, probe, "==> devinfo"));
+
+ /*
+ * Enumerate under pHCI:
+ *
+ * Create dynamic SID dchild node. No attempt is made to
+ * transfer information (except the addressing and identity
+ * information) from the probe node to the dynamic node since
+ * there may be HBA specific side effects that the framework
+ * does not known how to transfer.
+ */
+ ndi_devi_alloc_sleep(self, nname,
+ (se == SE_HP) ? DEVI_SID_HP_NODEID : DEVI_SID_NODEID,
+ &dchild);
+ ASSERT(dchild);
+
+ /*
+ * Decorate new node with addressing properties (via
+ * scsi_hba_ua_set()), compatible, identity information, and
+ * class.
+ */
+ if ((scsi_hba_ua_set(addr, dchild, NULL) == 0) ||
+ (ndi_prop_update_string_array(DDI_DEV_T_NONE, dchild,
+ "compatible", compat, ncompat) != DDI_PROP_SUCCESS) ||
+ (inq80 && (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
+ dchild, "inquiry-page-80", inq80, inq80len) !=
+ DDI_PROP_SUCCESS)) ||
+ (inq83 && (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
+ dchild, "inquiry-page-83", inq83, inq83len) !=
+ DDI_PROP_SUCCESS)) ||
+ (ndi_prop_update_string(DDI_DEV_T_NONE, dchild,
+ "class", "scsi") != DDI_PROP_SUCCESS)) {
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "@%s failed devinfo decoration", addr));
+ (void) scsi_hba_remove_node(dchild);
+ dchild = NULL;
+ goto out;
+ }
+
+ /* Bind the driver */
+ if (ndi_devi_bind_driver(dchild, 0) != NDI_SUCCESS) {
+ /* need to bind in order to register a devid */
+ SCSI_HBA_LOG((_LOGCFG, NULL, dchild,
+ "devinfo @%s created, no driver-> "
+ "no devid_register", addr));
+ goto out;
+ }
+
+ /* Register devid */
+ if (have_devid) {
+ if (ddi_devid_register(dchild, devid) == DDI_FAILURE)
+ SCSI_HBA_LOG((_LOG(1), NULL, dchild,
+ "devinfo @%s created, devid failed", addr));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, dchild,
+ "devinfo @%s created devid", addr));
+ } else
+ SCSI_HBA_LOG((_LOG(2), NULL, dchild,
+ "devinfo @%s created, no devid", addr));
+ } else {
+ /*
+ * Enumerate under vHCI:
+ *
+ * Create a pathinfo pchild node.
+ */
+ SCSI_HBA_LOG((_LOG(3), self, probe, "==>pathinfo"));
+
+ if (mdi_pi_alloc_compatible(self, nname, guid, addr, compat,
+ ncompat, 0, &pchild) != MDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), self, probe,
+ "pathinfo alloc failed"));
+ goto out;
+ }
+ ASSERT(pchild);
+
+ /*
+ * Decorate new node with addressing properties via
+ * scsi_hba_ua_set().
+ */
+ if (scsi_hba_ua_set(addr, NULL, pchild) == 0) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "%s pathinfo decoration failed",
+ mdi_pi_spathname(pchild)));
+
+ /* For hotplug, path exists even on failure. */
+ if (se != SE_HP)
+ (void) mdi_pi_free(pchild, 0);
+ goto out;
+ }
+ }
+
+ /* free the node name and compatible information */
+out: if (have_devid)
+ ddi_devid_free(devid);
+ if (guid)
+ ddi_devid_free_guid(guid);
+ if (compat)
+ scsi_hba_ident_nodename_compatible_free(nname, dname, compat);
+ if (inq80)
+ ddi_prop_free(inq80);
+ if (inq83)
+ ddi_prop_free(inq83);
+ if (binding_set)
+ ddi_prop_free(binding_set);
+
+ /* return child_type results */
+ ASSERT(!(dchild && pchild));
+ if (dchild) {
+ *dchildp = dchild;
+ *pchildp = NULL;
+ return (CHILD_TYPE_DEVINFO);
+ }
+ if (pchild) {
+ *dchildp = NULL;
+ *pchildp = pchild;
+ return (CHILD_TYPE_PATHINFO);
+ }
+ return (CHILD_TYPE_NONE);
+}
+
+/*
+ * Call scsi_device_createchild and then initchild the new node.
+ */
+static dev_info_t *
+scsi_device_configchild(dev_info_t *self, char *addr, scsi_enum_t se,
+ struct scsi_device *sdprobe, int *circp, int *ppi)
+{
+ int child_type;
+ dev_info_t *dchild;
+ mdi_pathinfo_t *pchild;
+ dev_info_t *child;
+ int rval;
+
+ ASSERT(self && addr && *addr && DEVI_BUSY_OWNED(self));
+ if (ppi)
+ *ppi = 0;
+
+ child_type = scsi_device_createchild(self, addr, se, sdprobe,
+ &dchild, &pchild);
+
+ /*
+ * Prevent multiple initialized (tran_tgt_init) nodes associated with
+ * the same @addr at the same time by calling tran_tgt_free() on the
+ * probe node prior to promotion of the 'real' node. After the call
+ * to scsi_hba_barrier_tran_tgt_free(), the HBA no longer has any
+ * probe node context.
+ */
+ scsi_hba_barrier_tran_tgt_free(sdprobe->sd_dev);
+
+ switch (child_type) {
+ case CHILD_TYPE_NONE:
+ child = NULL;
+ break;
+
+ case CHILD_TYPE_PATHINFO:
+ /*
+ * Online pathinfo: Hold the path and exit the pHCI while
+ * calling mdi_pi_online() to avoid deadlock with power
+ * management of pHCI.
+ */
+ ASSERT(MDI_PHCI(self));
+ mdi_hold_path(pchild);
+ scsi_hba_devi_exit_phci(self, *circp);
+
+ rval = mdi_pi_online(pchild, 0);
+
+ scsi_hba_devi_enter_phci(self, circp);
+ mdi_rele_path(pchild);
+
+ if (rval != MDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "%s pathinfo online failed",
+ mdi_pi_spathname(pchild)));
+
+ /* For hotplug, path exists even when online fails */
+ if (se != SE_HP)
+ (void) mdi_pi_free(pchild, 0);
+ return (NULL);
+ }
+
+ /*
+ * Return the path_instance of the pathinfo node.
+ *
+ * NOTE: We assume that sd_inq is not path-specific.
+ */
+ if (ppi)
+ *ppi = mdi_pi_get_path_instance(pchild);
+
+
+ /*
+ * Fallthrough into CHILD_TYPE_DEVINFO code to promote
+ * the 'client' devinfo node as a dchild.
+ */
+ dchild = mdi_pi_get_client(pchild);
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "pathinfo online successful"));
+ /* FALLTHROUGH */
+
+ case CHILD_TYPE_DEVINFO:
+ /*
+ * For now, we ndi_devi_online() the child because some other
+ * parts of the IO framework, like degenerate devid code,
+ * depend on bus_config driving nodes to DS_ATTACHED. At some
+ * point in the future, to keep things light-weight, we would
+ * like to change the ndi_devi_online call below to be
+ *
+ * if (ddi_initchild(self, dchild) != DDI_SUCCESS)
+ *
+ * This would promote the node so that framework code could
+ * find the child with an @addr search, but does not incur
+ * attach(9E) overhead for BUS_CONFIG_ALL cases where the
+ * framework is not interested in attach of the node.
+ *
+ * NOTE: If the addr specified has incorrect syntax (busconfig
+ * one of bogus /devices path) then call below can fail.
+ */
+ if (ndi_devi_online(dchild, 0) != NDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), NULL, dchild,
+ "devinfo online failed"));
+
+ /* failed online does not remove the node */
+ (void) scsi_hba_remove_node(dchild);
+ return (NULL);
+ }
+ SCSI_HBA_LOG((_LOG(4), NULL, dchild,
+ "devinfo initchild successful"));
+ child = dchild;
+ break;
+ }
+ return (child);
+}
+
+void
+scsi_hba_pkt_comp(struct scsi_pkt *pkt)
+{
+ scsi_hba_tran_t *tran;
+ uint8_t *sensep;
+
+ ASSERT(pkt);
+ if (pkt->pkt_comp == NULL)
+ return;
+
+ /*
+ * For HBA drivers that implement tran_setup_pkt(9E), if we are
+ * completing a 'consistent' mode DMA operation then we must
+ * perform dma_sync prior to calling pkt_comp to ensure that
+ * the target driver sees the correct data in memory.
+ */
+ ASSERT((pkt->pkt_flags & FLAG_NOINTR) == 0);
+ if (((pkt->pkt_dma_flags & DDI_DMA_CONSISTENT) &&
+ (pkt->pkt_dma_flags & DDI_DMA_READ)) &&
+ ((P_TO_TRAN(pkt)->tran_setup_pkt) != NULL)) {
+ scsi_sync_pkt(pkt);
+ }
+
+ /*
+ * If the HBA driver is using SCSAv3 scsi_hba_tgtmap_create enumeration
+ * then we detect the special ASC/ASCQ completion codes that indicate
+ * that the lun configuration of a target has changed. Since we need to
+ * be determine scsi_device given scsi_address enbedded in
+ * scsi_pkt (via scsi_address_device(9F)), we also require use of
+ * SCSI_HBA_ADDR_COMPLEX.
+ */
+ tran = pkt->pkt_address.a_hba_tran;
+ ASSERT(tran);
+ if ((tran->tran_tgtmap == NULL) ||
+ !(tran->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX))
+ goto comp; /* not using tgtmap */
+
+ /*
+ * Check for lun-change notification and queue the scsi_pkt for
+ * lunchg1 processing. The 'pkt_comp' call to the target driver
+ * is part of lunchg1 processing.
+ */
+ if ((pkt->pkt_reason == CMD_CMPLT) &&
+ (((*pkt->pkt_scbp) & STATUS_MASK) == STATUS_CHECK) &&
+ (pkt->pkt_state & STATE_ARQ_DONE)) {
+ sensep = (uint8_t *)&(((struct scsi_arq_status *)(uintptr_t)
+ (pkt->pkt_scbp))->sts_sensedata);
+ if (((scsi_sense_key(sensep) == KEY_UNIT_ATTENTION) &&
+ (scsi_sense_asc(sensep) == 0x3f) &&
+ (scsi_sense_ascq(sensep) == 0x0e)) ||
+
+ ((scsi_sense_key(sensep) == KEY_UNIT_ATTENTION) &&
+ (scsi_sense_asc(sensep) == 0x25) &&
+ (scsi_sense_ascq(sensep) == 0x00))) {
+ /*
+ * The host adaptor is done with the packet, we use
+ * pkt_stmp stage-temporary to link the packet for
+ * lunchg1 processing.
+ *
+ * NOTE: pkt_ha_private is not available since its use
+ * extends to tran_teardown_pkt.
+ */
+ mutex_enter(&scsi_lunchg1_mutex);
+ pkt->pkt_stmp = scsi_lunchg1_list;
+ scsi_lunchg1_list = pkt;
+ if (pkt->pkt_stmp == NULL)
+ (void) cv_signal(&scsi_lunchg1_cv);
+ mutex_exit(&scsi_lunchg1_mutex);
+ return;
+ }
+ }
+
+comp: (*pkt->pkt_comp)(pkt);
+}
+
+/*
+ * return 1 if the specified node is a barrier/probe node
+ */
+static int
+scsi_hba_devi_is_barrier(dev_info_t *probe)
+{
+ if (probe && (strcmp(ddi_node_name(probe), "probe") == 0))
+ return (1);
+ return (0);
+}
+
+/*
+ * A host adapter driver is easier to write if we prevent multiple initialized
+ * (tran_tgt_init) scsi_device structures to the same unit-address at the same
+ * time. We prevent this from occurring all the time during the barrier/probe
+ * node to real child hand-off by calling scsi_hba_barrier_tran_tgt_free
+ * on the probe node prior to ddi_inichild of the 'real' node. As part of
+ * this early tran_tgt_free implementation, we must also call this function
+ * as we put a probe node on the scsi_hba_barrier_list.
+ */
+static void
+scsi_hba_barrier_tran_tgt_free(dev_info_t *probe)
+{
+ struct scsi_device *sdprobe;
+ dev_info_t *self;
+ scsi_hba_tran_t *tran;
+
+ ASSERT(probe && scsi_hba_devi_is_barrier(probe));
+
+ /* Return if we never called tran_tgt_init(9E). */
+ if (i_ddi_node_state(probe) < DS_INITIALIZED)
+ return;
+
+ sdprobe = ddi_get_driver_private(probe);
+ self = ddi_get_parent(probe);
+ ASSERT(sdprobe && self);
+ tran = ddi_get_driver_private(self);
+ ASSERT(tran);
+
+ if (tran->tran_tgt_free) {
+ /*
+ * To correctly support TRAN_CLONE, we need to use the same
+ * cloned scsi_hba_tran(9S) structure for both tran_tgt_init(9E)
+ * and tran_tgt_free(9E).
+ */
+ if (tran->tran_hba_flags & SCSI_HBA_TRAN_CLONE)
+ tran = sdprobe->sd_address.a_hba_tran;
+ SCSI_HBA_LOG((_LOG(4), NULL, probe, "tran_tgt_free EARLY"));
+
+ (*tran->tran_tgt_free) (self, probe, tran, sdprobe);
+ }
+}
+
+/*
+ * Add an entry to the list of barrier nodes to be asynchronously deleted by
+ * the scsi_hba_barrier_daemon after the specified timeout. Nodes on
+ * the barrier list are used to implement the bus_config probe cache
+ * of non-existent devices. The nodes are at DS_INITIALIZED, so their
+ * @addr is established for searching. Since devi_ref of a DS_INITIALIZED
+ * node will *not* prevent demotion, demotion is prevented by setting
+ * sd_uninit_prevent. Devinfo snapshots attempt to attach probe cache
+ * nodes, and on failure attempt to demote the node (without the participation
+ * of bus_unconfig) to DS_BOUND - this demotion is prevented via
+ * sd_uninit_prevent causing any attempted DDI_CTLOPS_UNINITCHILD to fail.
+ * Probe nodes are bound to nulldriver. The list is sorted by
+ * expiration time.
+ *
+ * NOTE: If we drove a probe node to DS_ATTACHED, we could use ndi_hold_devi()
+ * to prevent demotion (instead of sd_uninit_prevent).
+ */
+static void
+scsi_hba_barrier_add(dev_info_t *probe, int seconds)
+{
+ struct scsi_hba_barrier *nb;
+ struct scsi_hba_barrier *b;
+ struct scsi_hba_barrier **bp;
+ clock_t endtime;
+
+ ASSERT(scsi_hba_devi_is_barrier(probe));
+
+ /* HBA is no longer responsible for nodes on the barrier list. */
+ scsi_hba_barrier_tran_tgt_free(probe);
+
+ nb = kmem_alloc(sizeof (struct scsi_hba_barrier), KM_SLEEP);
+ mutex_enter(&scsi_hba_barrier_mutex);
+ endtime = ddi_get_lbolt() + drv_usectohz(seconds * MICROSEC);
+ for (bp = &scsi_hba_barrier_list; (b = *bp) != NULL;
+ bp = &b->barrier_next)
+ if (b->barrier_endtime > endtime)
+ break;
+ nb->barrier_next = *bp;
+ nb->barrier_endtime = endtime;
+ nb->barrier_probe = probe;
+ *bp = nb;
+ if (bp == &scsi_hba_barrier_list)
+ (void) cv_signal(&scsi_hba_barrier_cv);
+ mutex_exit(&scsi_hba_barrier_mutex);
+}
+
+/*
+ * Attempt to remove devinfo node node, return 1 if removed. We don't
+ * don't try to remove barrier nodes that have sd_uninit_prevent set
+ * (even though they should fail device_uninitchild).
+ */
+static int
+scsi_hba_remove_node(dev_info_t *child)
+{
+ dev_info_t *self = ddi_get_parent(child);
+ struct scsi_device *sd;
+ int circ;
+ int remove = 1;
+ int ret = 0;
+ char na[SCSI_MAXNAMELEN];
+
+ scsi_hba_devi_enter(self, &circ);
+
+ /* Honor sd_uninit_prevent on barrier nodes */
+ if (scsi_hba_devi_is_barrier(child)) {
+ sd = ddi_get_driver_private(child);
+ if (sd && sd->sd_uninit_prevent)
+ remove = 0;
+ }
+
+ if (remove) {
+ (void) ddi_deviname(child, na);
+ if (ddi_remove_child(child, 0) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "remove_node failed"));
+ } else {
+ child = NULL; /* child is gone */
+ SCSI_HBA_LOG((_LOG(4), self, NULL,
+ "remove_node removed %s", *na ? &na[1] : na));
+ ret = 1;
+ }
+ } else {
+ SCSI_HBA_LOG((_LOG(4), NULL, child, "remove_node prevented"));
+ }
+ scsi_hba_devi_exit(self, circ);
+ return (ret);
+}
+
+/*
+ * The asynchronous barrier deletion daemon. Waits for a barrier timeout
+ * to expire, then deletes the barrier (removes it as a child).
+ */
+/*ARGSUSED*/
+static void
+scsi_hba_barrier_daemon(void *arg)
+{
+ struct scsi_hba_barrier *b;
+ dev_info_t *probe;
+ callb_cpr_t cprinfo;
+ int circ;
+ dev_info_t *self;
+
+ CALLB_CPR_INIT(&cprinfo, &scsi_hba_barrier_mutex,
+ callb_generic_cpr, "scsi_hba_barrier_daemon");
+again: mutex_enter(&scsi_hba_barrier_mutex);
+ for (;;) {
+ b = scsi_hba_barrier_list;
+ if (b == NULL) {
+ /* all barriers expired, wait for barrier_add */
+ CALLB_CPR_SAFE_BEGIN(&cprinfo);
+ (void) cv_wait(&scsi_hba_barrier_cv,
+ &scsi_hba_barrier_mutex);
+ CALLB_CPR_SAFE_END(&cprinfo, &scsi_hba_barrier_mutex);
+ } else {
+ if (ddi_get_lbolt() >= b->barrier_endtime) {
+ /*
+ * Drop and retry if ordering issue. Do this
+ * before calling scsi_hba_remove_node() and
+ * deadlocking.
+ */
+ probe = b->barrier_probe;
+ self = ddi_get_parent(probe);
+ if (scsi_hba_devi_tryenter(self, &circ) == 0) {
+delay: mutex_exit(&scsi_hba_barrier_mutex);
+ delay_random(5);
+ goto again;
+ }
+
+ /* process expired barrier */
+ if (!scsi_hba_remove_node(probe)) {
+ /* remove failed, delay and retry */
+ SCSI_HBA_LOG((_LOG(4), NULL, probe,
+ "delay expire"));
+ scsi_hba_devi_exit(self, circ);
+ goto delay;
+ }
+ scsi_hba_barrier_list = b->barrier_next;
+ kmem_free(b, sizeof (struct scsi_hba_barrier));
+ scsi_hba_devi_exit(self, circ);
+ } else {
+ /* establish timeout for next barrier expire */
+ (void) cv_timedwait(&scsi_hba_barrier_cv,
+ &scsi_hba_barrier_mutex,
+ b->barrier_endtime);
+ }
+ }
+ }
+}
+
+/*
+ * Remove all barriers associated with the specified HBA. This is called
+ * from from the bus_unconfig implementation to remove probe nodes associated
+ * with the specified HBA (self) so that probe nodes that have not expired
+ * will not prevent DR of the HBA.
+ */
+static void
+scsi_hba_barrier_purge(dev_info_t *self)
+{
+ struct scsi_hba_barrier **bp;
+ struct scsi_hba_barrier *b;
+
+ mutex_enter(&scsi_hba_barrier_mutex);
+ for (bp = &scsi_hba_barrier_list; (b = *bp) != NULL; ) {
+ if (ddi_get_parent(b->barrier_probe) == self) {
+ if (scsi_hba_remove_node(b->barrier_probe)) {
+ *bp = b->barrier_next;
+ kmem_free(b, sizeof (struct scsi_hba_barrier));
+ } else {
+ SCSI_HBA_LOG((_LOG(4), NULL, b->barrier_probe,
+ "skip purge"));
+ }
+ } else
+ bp = &b->barrier_next;
+ }
+
+ mutex_exit(&scsi_hba_barrier_mutex);
+}
+
+/*
+ * LUN-change processing daemons: processing occurs in two stages:
+ *
+ * Stage 1: Daemon waits for a lunchg1 queued scsi_pkt, dequeues the pkt,
+ * forms the path, completes the scsi_pkt (pkt_comp), and
+ * queues the path for stage 2 processing. The use of stage 1
+ * avoids issues related to memory allocation in interrupt context
+ * (scsi_hba_pkt_comp()). We delay the pkt_comp completion until
+ * after lunchg1 processing forms the path for stage 2 - this is
+ * done to prevent the target driver from detaching until the
+ * path formation is complete (driver with outstanding commands
+ * should not detach).
+ *
+ * Stage 2: Daemon waits for a lunchg2 queued request, dequeues the
+ * request, and opens the path using ldi_open_by_name(). The
+ * path opened uses a special "@taddr,*" unit address that will
+ * trigger lun enumeration in scsi_hba_bus_configone(). We
+ * trigger lun enumeration in stage 2 to avoid problems when
+ * initial ASC/ASCQ trigger occurs during discovery.
+ */
+/*ARGSUSED*/
+static void
+scsi_lunchg1_daemon(void *arg)
+{
+ callb_cpr_t cprinfo;
+ struct scsi_pkt *pkt;
+ scsi_hba_tran_t *tran;
+ dev_info_t *self;
+ struct scsi_device *sd;
+ char *ua, *p;
+ char taddr[SCSI_MAXNAMELEN];
+ char path[MAXPATHLEN];
+ struct scsi_lunchg2 *lunchg2;
+
+ CALLB_CPR_INIT(&cprinfo, &scsi_lunchg1_mutex,
+ callb_generic_cpr, "scsi_lunchg1_daemon");
+ mutex_enter(&scsi_lunchg1_mutex);
+ for (;;) {
+ pkt = scsi_lunchg1_list;
+ if (pkt == NULL) {
+ /* All lunchg1 processing requests serviced, wait. */
+ CALLB_CPR_SAFE_BEGIN(&cprinfo);
+ (void) cv_wait(&scsi_lunchg1_cv,
+ &scsi_lunchg1_mutex);
+ CALLB_CPR_SAFE_END(&cprinfo, &scsi_lunchg1_mutex);
+ continue;
+ }
+
+ /* Unlink and perform lunchg1 processing on pkt. */
+ scsi_lunchg1_list = pkt->pkt_stmp;
+
+ /* Determine initiator port (self) from the pkt_address. */
+ tran = pkt->pkt_address.a_hba_tran;
+ ASSERT(tran && tran->tran_tgtmap && tran->tran_iport_dip);
+ self = tran->tran_iport_dip;
+
+ /*
+ * Determine scsi_devie from pkt_address (depends on
+ * SCSI_HBA_ADDR_COMPLEX).
+ */
+ sd = scsi_address_device(&(pkt->pkt_address));
+ ASSERT(sd);
+ if (sd == NULL) {
+ (*pkt->pkt_comp)(pkt);
+ continue;
+ }
+
+ /* Determine unit-address from scsi_device. */
+ ua = scsi_device_unit_address(sd);
+
+ /* Extract taddr from the unit-address. */
+ for (p = taddr; (*ua != ',') && (*ua != '\0'); )
+ *p++ = *ua++;
+ *p = '\0'; /* NULL terminate taddr */
+
+ /*
+ * Form path using special "@taddr,*" notation to trigger
+ * lun enumeration.
+ */
+ (void) ddi_pathname(self, path);
+ (void) strcat(path, "/luns@");
+ (void) strcat(path, taddr);
+ (void) strcat(path, ",*");
+
+ /*
+ * Now that we have the path, complete the pkt that
+ * triggered lunchg1 processing.
+ */
+ (*pkt->pkt_comp)(pkt);
+
+ /* Allocate element for stage2 processing queue. */
+ lunchg2 = kmem_alloc(sizeof (*lunchg2), KM_SLEEP);
+ lunchg2->lunchg2_path = strdup(path);
+
+ /* Queue and dispatch to stage 2. */
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "lunchg stage1: queue %s", lunchg2->lunchg2_path));
+ mutex_enter(&scsi_lunchg2_mutex);
+ lunchg2->lunchg2_next = scsi_lunchg2_list;
+ scsi_lunchg2_list = lunchg2;
+ if (lunchg2->lunchg2_next == NULL)
+ (void) cv_signal(&scsi_lunchg2_cv);
+ mutex_exit(&scsi_lunchg2_mutex);
+ }
+}
+
/*ARGSUSED*/
+static void
+scsi_lunchg2_daemon(void *arg)
+{
+ callb_cpr_t cprinfo;
+ struct scsi_lunchg2 *lunchg2;
+ ldi_ident_t li;
+ ldi_handle_t lh;
+
+ CALLB_CPR_INIT(&cprinfo, &scsi_lunchg2_mutex,
+ callb_generic_cpr, "scsi_lunchg2_daemon");
+
+ li = ldi_ident_from_anon();
+ mutex_enter(&scsi_lunchg2_mutex);
+ for (;;) {
+ lunchg2 = scsi_lunchg2_list;
+ if (lunchg2 == NULL) {
+ /* All lunchg2 processing requests serviced, wait. */
+ CALLB_CPR_SAFE_BEGIN(&cprinfo);
+ (void) cv_wait(&scsi_lunchg2_cv,
+ &scsi_lunchg2_mutex);
+ CALLB_CPR_SAFE_END(&cprinfo, &scsi_lunchg2_mutex);
+ continue;
+ }
+
+ /* Unlink and perform lunchg2 processing on pkt. */
+ scsi_lunchg2_list = lunchg2->lunchg2_next;
+
+ /*
+ * Open and close the path to trigger lun enumeration. We
+ * don't expect the open to succeed, but we do expect code in
+ * scsi_hba_bus_configone() to trigger lun enumeration.
+ */
+ SCSI_HBA_LOG((_LOG(2), NULL, NULL,
+ "lunchg stage2: open %s", lunchg2->lunchg2_path));
+ if (ldi_open_by_name(lunchg2->lunchg2_path,
+ FREAD, kcred, &lh, li) == 0)
+ (void) ldi_close(lh, FREAD, kcred);
+
+ /* Free path and linked element. */
+ strfree(lunchg2->lunchg2_path);
+ kmem_free(lunchg2, sizeof (*lunchg2));
+ }
+}
+
+/*
+ * Enumerate a child at the specified @addr. If a device exists @addr then
+ * ensure that we have the appropriately named devinfo node for it. Name is
+ * NULL in the bus_config_all case. This routine has no knowledge of the
+ * format of an @addr string or associated addressing properties.
+ *
+ * The caller must guarantee that there is an open scsi_hba_devi_enter on the
+ * parent. We return the scsi_device structure for the child device. This
+ * scsi_device structure is valid until the caller scsi_hba_devi_exit the
+ * parent. The caller can add do ndi_hold_devi of the child prior to the
+ * scsi_hba_devi_exit to extend the validity of the child.
+ *
+ * In some cases the returned scsi_device structure may be used to drive
+ * additional SCMD_REPORT_LUNS operations by bus_config_all callers.
+ *
+ * The first operation performed is to see if there is a dynamic SID nodes
+ * already attached at the specified "name@addr". This is the fastpath
+ * case for resolving a reference to a node that has already been created.
+ * All other references are serialized for a given @addr prior to probing
+ * to determine the type of device, if any, at the specified @addr.
+ * If no device is present then NDI_FAILURE is returned. The fact that a
+ * device does not exist may be determined via the barrier/probe cache,
+ * minimizing the probes of non-existent devices.
+ *
+ * When there is a device present the dynamic SID node is created based on
+ * the device found. If a driver.conf node exists for the same @addr it
+ * will either merge into the dynamic SID node (if the SID node bound to
+ * that driver), or exist independently. To prevent the actions of one driver
+ * causing side effects in another, code prevents multiple SID nodes from
+ * binding to the same "@addr" at the same time. There is autodetach code
+ * to allow one device to be replaced with another at the same @addr for
+ * slot addressed SCSI bus implementations (SPI). For compatibility with
+ * legacy driver.conf behavior, the code does not prevent multiple driver.conf
+ * nodes from attaching to the same @addr at the same time.
+ *
+ * This routine may have the side effect of creating nodes for devices other
+ * than the one being sought. It is possible that there is a different type of
+ * target device at that target/lun address than we were asking for. In that
+ * It is the caller's responsibility to determine whether the device we found,
+ * if any, at the specified address, is the one it really wanted.
+ */
+static struct scsi_device *
+scsi_device_config(dev_info_t *self, char *name, char *addr, scsi_enum_t se,
+ int *circp, int *ppi)
+{
+ dev_info_t *child = NULL;
+ dev_info_t *probe = NULL;
+ struct scsi_device *sdchild;
+ struct scsi_device *sdprobe;
+ dev_info_t *dsearch;
+ mdi_pathinfo_t *psearch;
+ major_t major;
+ int sp;
+ int pi = 0;
+ int wait_msg = scsi_hba_wait_msg;
+ int chg;
+
+ ASSERT(self && addr && DEVI_BUSY_OWNED(self));
+
+ SCSI_HBA_LOG((_LOG(4), self, NULL, "%s@%s wanted",
+ name ? name : "", addr));
+
+ /* playing with "probe" node name is dangerous */
+ if (name && (strcmp(name, "probe") == 0))
+ return (NULL);
+
+ /*
+ * NOTE: use 'goto done;' or 'goto fail;'. There should only be one
+ * 'return' statement from here to the end of the function - the one
+ * on the last line of the function.
+ */
+
+ /*
+ * Fastpath: search to see if we are requesting a named SID node that
+ * already exists (we already created) - probe node does not count.
+ * scsi_hba_find_child() does not hold the returned devinfo node, but
+ * this is OK since the caller has a scsi_hba_devi_enter on the
+ * attached parent HBA (self). The caller is responsible for attaching
+ * and placing a hold on the child (directly via ndi_hold_devi or
+ * indirectly via ndi_busop_bus_config) before doing an
+ * scsi_hba_devi_exit on the parent.
+ *
+ * NOTE: This fastpath prevents detecting a driver binding change
+ * (autodetach) if the same nodename is used for old and new binding.
+ */
+ /* first call is with init set */
+ (void) scsi_hba_find_child(self, name, addr,
+ 1, &dsearch, NULL, &pi);
+ if (dsearch && scsi_hba_dev_is_sid(dsearch) &&
+ !scsi_hba_devi_is_barrier(dsearch)) {
+ SCSI_HBA_LOG((_LOG(4), NULL, dsearch,
+ "%s@%s devinfo fastpath", name ? name : "", addr));
+ child = dsearch;
+ goto done;
+ }
+
+ /*
+ * Create a barrier devinfo node used to "probe" the device with. We
+ * need to drive this node to DS_INITIALIZED so that the
+ * DDI_CTLOPS_INITCHILD has occurred, bringing the SCSA transport to
+ * a state useable state for issuing our "probe" commands. We establish
+ * this barrier node with a node name of "probe" and compatible
+ * property of "scsiprobe". The compatible property must be associated
+ * in /etc/driver_aliases with a scsi target driver available in the
+ * root file system (sd).
+ *
+ * The "probe" that we perform on the barrier node, after it is
+ * DS_INITIALIZED, is used to find the information needed to create a
+ * dynamic devinfo (SID) node. This "probe" is separate from the
+ * probe(9E) call associated with the transition of a node from
+ * DS_INITIALIZED to DS_PROBED. The probe(9E) call that eventually
+ * occurs against the created SID node should find ddi_dev_is_sid and
+ * just return DDI_PROBE_DONTCARE.
+ *
+ * Trying to avoid the use of a barrier node is not a good idea
+ * because we may have an HBA driver that uses generic bus_config
+ * (this code) but implements its own DDI_CTLOPS_INITCHILD with side
+ * effects that we can't duplicate (such as the ATA nexus driver).
+ *
+ * The probe/barrier node plays an integral part of the locking scheme.
+ * The objective is to single thread probes of the same device (same
+ * @addr) while allowing parallelism for probes of different devices
+ * with the same parent. At this point we are serialized on our self.
+ * For parallelism we will need to release our self. Prior to release
+ * we construct a barrier for probes of the same device to serialize
+ * against. The "probe@addr" node acts as this barrier. An entering
+ * thread must wait until the probe node does not exist - it can then
+ * create and link the probe node - dropping the HBA (self) lock after
+ * the node is linked and visible (after ddi_initchild). A side effect
+ * of this is that transports should not "go over the wire" (i.e. do
+ * things that incur significant delays) until after tran_target_init.
+ * This means that the first "over the wire" operation should occur
+ * at tran_target_probe time - when things are running in parallel
+ * again.
+ *
+ * If the probe node exists then another probe with the same @addr is
+ * in progress, we must wait until there is no probe in progress
+ * before proceeding, and when we proceed we must continue to hold the
+ * HBA (self) until we have linked a new probe node as a barrier.
+ *
+ * When a device is found to *not* exist, its probe/barrier node may be
+ * marked with DEVICE_REMOVED with node deletion scheduled for some
+ * future time (seconds). This asynchronous deletion allows the
+ * framework to detect repeated requests to the same non-existent
+ * device and avoid overhead associated with contacting a non-existent
+ * device again and again.
+ */
+ for (;;) {
+ /*
+ * Search for probe node - they should only exist as devinfo
+ * nodes.
+ */
+ (void) scsi_hba_find_child(self, "probe", addr,
+ 0, &probe, &psearch, NULL);
+ if (probe == NULL) {
+ if (psearch)
+ SCSI_HBA_LOG((_LOG(2), self,
+ mdi_pi_get_client(psearch),
+ "???? @%s 'probe' search found "
+ "pathinfo: %p", addr, (void *)psearch));
+ break;
+ }
+
+ /*
+ * The barrier node may cache the non-existence of a device
+ * by leaving the barrier node in place (with
+ * DEVI_DEVICE_REMOVED flag set ) for some amount of time after
+ * the failure of a probe. This flag is used to fail
+ * additional probes until the barrier probe node is deleted,
+ * which will occur from a timeout some time after a failed
+ * probe. The failed probe will use DEVI_SET_DEVICE_REMOVED
+ * and schedule probe node deletion from a timeout. The callers
+ * scsi_hba_devi_exit on the way out of the first failure will
+ * do the cv_broadcast associated with the cv_wait below - this
+ * handles threads that wait prior to DEVI_DEVICE_REMOVED being
+ * set.
+ */
+ if (DEVI_IS_DEVICE_REMOVED(probe)) {
+ SCSI_HBA_LOG((_LOG(3), NULL, probe,
+ "detected probe DEVICE_REMOVED"));
+ probe = NULL; /* deletion already scheduled */
+ goto fail;
+ }
+
+ /*
+ * Drop the lock on the HBA (self) and wait until the probe in
+ * progress has completed. A changes in the sibling list from
+ * removing the probe node will cause cv_wait to return
+ * (scsi_hba_devi_exit does the cv_broadcast).
+ */
+ if (wait_msg) {
+ wait_msg--;
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "exists, probe already in progress: %s", wait_msg ?
+ "waiting..." : "last msg, but still waiting..."));
+ }
+
+ /*
+ * NOTE: we could avoid rare case of one second delay by
+ * implementing scsi_hba_devi_exit_and_wait based on
+ * ndi/mdi_devi_exit_and_wait (and consider switching devcfg.c
+ * code to use these ndi/mdi interfaces too).
+ */
+ scsi_hba_devi_exit(self, *circp);
+ mutex_enter(&DEVI(self)->devi_lock);
+ (void) cv_timedwait(&DEVI(self)->devi_cv,
+ &DEVI(self)->devi_lock,
+ ddi_get_lbolt() + drv_usectohz(MICROSEC));
+ mutex_exit(&DEVI(self)->devi_lock);
+ scsi_hba_devi_enter(self, circp);
+ }
+ ASSERT(probe == NULL);
+
+ /*
+ * Search to see if we are requesting a SID node that already exists.
+ * We hold the HBA (self) and there is not another probe in progress at
+ * the same @addr. scsi_hba_find_child() does not hold the returned
+ * devinfo node but this is OK since we hold the HBA (self).
+ */
+ if (name) {
+ (void) scsi_hba_find_child(self, name, addr,
+ 1, &dsearch, NULL, &pi);
+ if (dsearch && scsi_hba_dev_is_sid(dsearch)) {
+ SCSI_HBA_LOG((_LOG(4), NULL, dsearch,
+ "%s@%s probe devinfo fastpath",
+ name ? name : "", addr));
+ child = dsearch;
+ goto done;
+ }
+ }
+
+ /*
+ * We are looking for a SID node that does not exist or a driver.conf
+ * node.
+ *
+ * To avoid probe side effects, before we probe the device at the
+ * specified address we need to check to see if there is already an
+ * initialized child "@addr".
+ *
+ * o If we find an initialized SID child and name is NULL or matches
+ * the name or the name of the attached driver then we return the
+ * existing node.
+ *
+ * o If we find a non-matching SID node, we will attempt to autodetach
+ * and remove the node in preference to our new node.
+ *
+ * o If SID node found does not match and can't be autodetached, we
+ * fail: we only allow one SID node at an address.
+ *
+ * NOTE: This code depends on SID nodes showing up prior to
+ * driver.conf nodes in the sibling list.
+ */
+ for (;;) {
+ /* first NULL name call is with init set */
+ (void) scsi_hba_find_child(self, NULL, addr,
+ 1, &dsearch, NULL, &pi);
+ if (dsearch == NULL)
+ break;
+ ASSERT(!scsi_hba_devi_is_barrier(dsearch));
+
+ /*
+ * To detect changes in driver binding that should attempt
+ * autodetach we determine the major number of the driver
+ * that should currently be associated with the device based
+ * on the compatible property.
+ */
+ major = DDI_MAJOR_T_NONE;
+ if (scsi_hba_dev_is_sid(dsearch))
+ major = ddi_compatible_driver_major(dsearch, NULL);
+ if ((major == DDI_MAJOR_T_NONE) && (name == NULL))
+ major = ddi_driver_major(dsearch);
+
+ if ((scsi_hba_dev_is_sid(dsearch) ||
+ (i_ddi_node_state(dsearch) >= DS_INITIALIZED)) &&
+ ((name == NULL) ||
+ (strcmp(ddi_node_name(dsearch), name) == 0) ||
+ (strcmp(ddi_driver_name(dsearch), name) == 0)) &&
+ (major == ddi_driver_major(dsearch))) {
+ SCSI_HBA_LOG((_LOG(3), NULL, dsearch,
+ "already attached @addr"));
+ child = dsearch;
+ goto done;
+ }
+
+ if (!scsi_hba_dev_is_sid(dsearch))
+ break; /* driver.conf node */
+
+ /*
+ * Implement autodetach of SID node for situations like a
+ * previously "scsinodev" LUN0 coming into existence (or a
+ * disk/tape on an SPI transport at same addr but never both
+ * powered on at the same time). Try to autodetach the existing
+ * SID node @addr. If that works, search again - otherwise fail.
+ */
+ SCSI_HBA_LOG((_LOG(2), NULL, dsearch,
+ "looking for %s@%s: SID @addr exists, autodetach",
+ name ? name : "", addr));
+ if (!scsi_hba_remove_node(dsearch)) {
+ SCSI_HBA_LOG((_LOG(2), NULL, dsearch,
+ "autodetach @%s failed: fail %s@%s",
+ addr, name ? name : "", addr));
+ goto fail;
+ }
+ SCSI_HBA_LOG((_LOG(2), self, NULL, "autodetach @%s OK", addr));
+ }
+
+ /*
+ * We will be creating a new SID node, allocate probe node
+ * used to find out information about the device located @addr.
+ * The probe node also acts as a barrier against additional
+ * configuration at the same address, and in the case of non-existent
+ * devices it will (for some amount of time) avoid re-learning that
+ * the device does not exist on every reference. Once the probe
+ * node is DS_LINKED we can drop the HBA (self).
+ *
+ * The probe node is allocated as a hidden node so that it does not
+ * show up in devinfo snapshots.
+ */
+ ndi_devi_alloc_sleep(self, "probe",
+ (se == SE_HP) ? DEVI_SID_HP_HIDDEN_NODEID : DEVI_SID_HIDDEN_NODEID,
+ &probe);
+ ASSERT(probe);
+
+ /*
+ * Decorate the probe node with the property representation of @addr
+ * unit-address string prior to initchild so that initchild can
+ * construct the name of the node from properties and tran_tgt_init
+ * implementation can determine what LUN is being referenced.
+ *
+ * If the addr specified has incorrect syntax (busconfig one of bogus
+ * /devices path) then scsi_hba_ua_set can fail. If the address
+ * is not understood by the SCSA HBA driver then this operation will
+ * work, but tran_tgt_init may still fail (for example the HBA
+ * driver may not support secondary functions).
+ */
+ if (scsi_hba_ua_set(addr, probe, NULL) == 0) {
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "@%s failed scsi_hba_ua_set", addr));
+ goto fail;
+ }
+
+ /*
+ * Set the class property to "scsi". This is sufficient to distinguish
+ * the node for HBAs that have multiple classes of children (like uata
+ * - which has "dada" class for ATA children and "scsi" class for
+ * ATAPI children) and may not use our scsi_busctl_initchild()
+ * implementation. We also add a "compatible" property of "scsiprobe"
+ * to select the probe driver.
+ */
+ if ((ndi_prop_update_string(DDI_DEV_T_NONE, probe,
+ "class", "scsi") != DDI_PROP_SUCCESS) ||
+ (ndi_prop_update_string_array(DDI_DEV_T_NONE, probe,
+ "compatible", &compatible_probe, 1) != DDI_PROP_SUCCESS)) {
+ SCSI_HBA_LOG((_LOG(1), NULL, probe,
+ "@%s failed node decoration", addr));
+ goto fail;
+ }
+
+ /*
+ * Promote probe node to DS_INITIALIZED so that transport can be used
+ * for scsi_probe. After this the node is linked and visible as a
+ * barrier for serialization of other @addr operations.
+ *
+ * NOTE: If we attached the probe node, we could get rid of
+ * uninit_prevent.
+ */
+ if (ddi_initchild(self, probe) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "@%s failed initchild", addr));
+
+ /* probe node will be removed in fail exit path */
+ goto fail;
+ }
+
+ /* get the scsi_device structure of the probe node */
+ sdprobe = ddi_get_driver_private(probe);
+ ASSERT(sdprobe);
+
+ /*
+ * Do scsi_probe. The probe node is linked and visible as a barrier.
+ * We prevent uninitialization of the probe node and drop our HBA (self)
+ * while we run scsi_probe() of this "@addr". This allows the framework
+ * to support multiple scsi_probes for different devices attached to
+ * the same HBA (self) in parallel. We prevent node demotion of the
+ * probe node from DS_INITIALIZED by setting sd_uninit_prevent. The
+ * probe node can not be successfully demoted below DS_INITIALIZED
+ * (scsi_busctl_uninitchild will fail) until we zero sd_uninit_prevent
+ * as we are freeing the node via scsi_hba_remove_node(probe).
+ */
+ sdprobe->sd_uninit_prevent++;
+ scsi_hba_devi_exit(self, *circp);
+ sp = scsi_probe(sdprobe, SLEEP_FUNC);
+
+ /* Introduce a small delay here to increase parallelism. */
+ delay_random(5);
+
+ if (sp == SCSIPROBE_EXISTS) {
+ /*
+ * For a device that exists, while still running in parallel,
+ * also get identity information from device. This is done
+ * separate from scsi_probe/tran_tgt_probe/scsi_hba_probe
+ * since the probe code path may still be used for HBAs
+ * that don't use common bus_config services (we don't want
+ * to expose that code path to a behavior change). This
+ * operation is called 'identity' to avoid confusion with
+ * deprecated identify(9E).
+ *
+ * Future: We may eventually want to allow HBA customization via
+ * scsi_identity/tran_tgt_identity/scsi_device_identity, but for
+ * now we just scsi_device_identity.
+ *
+ * The identity operation will establish additional properties
+ * on the probe node related to device identity:
+ *
+ * "inquiry-page-80" byte array of SCSI page 80
+ * "inquiry-page-83" byte array of SCSI page 83
+ *
+ * These properties will be used to generate a devid
+ * (ddi_devid_scsi_encode) and guid - and to register
+ * (ddi_devid_register) a devid for the device.
+ *
+ * If identify fails (non-zero return), the we had allocation
+ * problems or the device returned inconsistent results then
+ * we pretend that device does not exist.
+ */
+ if (scsi_device_identity(sdprobe, SLEEP_FUNC)) {
+ if (ndi_dev_is_hotplug_node(probe))
+ SCSI_HBA_LOG((_LOG(WARN), NULL, probe,
+ "enumeration failed during identity"));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "enumeration failed during identity"));
+ sp = SCSIPROBE_FAILURE;
+ }
+
+ /*
+ * Future: Is there anything more we can do here to help avoid
+ * serialization on iport parent during scsi_device attach(9E)?
+ */
+ }
+ scsi_hba_devi_enter(self, circp);
+ sdprobe->sd_uninit_prevent--;
+
+ if (sp != SCSIPROBE_EXISTS) {
+ if (ndi_dev_is_hotplug_node(probe))
+ SCSI_HBA_LOG((_LOG(WARN), NULL, probe,
+ "enumeration failed during probe"));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "enumeration failed durning probe"));
+
+ if (scsi_hba_barrier_timeout) {
+ /*
+ * Target does not exist. Mark the barrier probe node
+ * as DEVICE_REMOVED and schedule an asynchronous
+ * deletion of the node in scsi_hba_barrier_timeout
+ * seconds. We keep our hold on the probe node
+ * until we are ready perform the asynchronous node
+ * deletion.
+ */
+ SCSI_HBA_LOG((_LOG(3), NULL, probe,
+ "set probe DEVICE_REMOVED"));
+ mutex_enter(&DEVI(probe)->devi_lock);
+ DEVI_SET_DEVICE_REMOVED(probe);
+ mutex_exit(&DEVI(probe)->devi_lock);
+
+ scsi_hba_barrier_add(probe, scsi_hba_barrier_timeout);
+ probe = NULL;
+ }
+ goto fail;
+ }
+
+ /* Create the child node from the inquiry data in the probe node. */
+ if ((child = scsi_device_configchild(self, addr, se, sdprobe,
+ circp, &pi)) == NULL) {
+ /*
+ * This may fail because there was no driver binding identified
+ * via driver_alias. We may still have a conf node.
+ */
+ if (name) {
+ (void) scsi_hba_find_child(self, name, addr,
+ 0, &child, NULL, &pi);
+ if (child)
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "using driver.conf driver binding"));
+ }
+ if (child == NULL) {
+ SCSI_HBA_LOG((_LOG(2), NULL, probe,
+ "device not configured"));
+ goto fail;
+ }
+ }
+
+ /*
+ * Transfer the inquiry data from the probe node to the child
+ * SID node to avoid an extra scsi_probe. Callers depend on
+ * established inquiry data for the returned scsi_device.
+ */
+ sdchild = ddi_get_driver_private(child);
+ if (sdchild && (sdchild->sd_inq == NULL)) {
+ sdchild->sd_inq = sdprobe->sd_inq;
+ sdprobe->sd_inq = NULL;
+ }
+
+ /*
+ * If we are doing a bus_configone and the node we created has the
+ * wrong node and driver name then switch the return result to a
+ * driver.conf node with the correct name - if such a node exists.
+ */
+ if (name && (strcmp(ddi_node_name(child), name) != 0) &&
+ (strcmp(ddi_driver_name(child), name) != 0)) {
+ (void) scsi_hba_find_child(self, name, addr,
+ 0, &dsearch, NULL, &pi);
+ if (dsearch == NULL) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "wrong device configured %s@%s", name, addr));
+ /*
+ * We can't remove when modrootloaded == 0 in case
+ * boot-device a uses generic name and
+ * scsi_hba_nodename_compatible_get() returned a
+ * legacy binding-set driver oriented name.
+ */
+ if (modrootloaded) {
+ (void) scsi_hba_remove_node(child);
+ child = NULL;
+ goto fail;
+ }
+ } else {
+ SCSI_HBA_LOG((_LOG(2), NULL, dsearch,
+ "device configured, but switching to driver.conf"));
+ child = dsearch;
+ }
+ }
+
+ /* get the scsi_device structure from the node */
+ SCSI_HBA_LOG((_LOG(3), NULL, child, "device configured"));
+
+ if (child) {
+done: ASSERT(child);
+ sdchild = ddi_get_driver_private(child);
+ ASSERT(sdchild);
+ /*
+ * We may have ended up here after promotion of a previously
+ * demoted node, where demotion deleted sd_inq data in
+ * scsi_busctl_uninitchild. We redo the scsi_probe() to
+ * reestablish sd_inq. We also want to redo the scsi_probe
+ * for devices are currently device_isremove in order to
+ * detect new device_insert.
+ */
+ if ((sdchild->sd_inq == NULL) ||
+ ndi_devi_device_isremoved(child)) {
+
+ /* hotplug_node can only be revived via hotplug. */
+ if ((se == SE_HP) || !ndi_dev_is_hotplug_node(child)) {
+ SCSI_HBA_LOG((_LOG(3), NULL, child,
+ "scsi_probe() demoted devinfo"));
+
+ sp = scsi_probe(sdchild, SLEEP_FUNC);
+
+ if (sp == SCSIPROBE_EXISTS) {
+ ASSERT(sdchild->sd_inq);
+
+ /*
+ * Devinfo child exists and we are
+ * talking to the device, report
+ * reinsert and note if this was a
+ * new reinsert.
+ */
+ chg = ndi_devi_device_insert(child);
+ SCSI_HBA_LOG((_LOGCFG, self, NULL,
+ "devinfo %s@%s device_reinsert%s",
+ name ? name : "", addr,
+ chg ? "" : "ed already"));
+ } else {
+ if (se == SE_HP)
+ SCSI_HBA_LOG((_LOG(WARN),
+ NULL, child,
+ "enumeration failed during "
+ "reprobe"));
+ else
+ SCSI_HBA_LOG((_LOG(2),
+ NULL, child,
+ "enumeration failed during "
+ "reprobe"));
+
+ chg = ndi_devi_device_remove(child);
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "%s device_remove%s",
+ (sp > (sizeof (scsi_probe_ascii) /
+ sizeof (scsi_probe_ascii[0]))) ?
+ "UNKNOWN" : scsi_probe_ascii[sp],
+ chg ? "" : "ed already"));
+
+ child = NULL;
+ sdchild = NULL;
+ }
+ } else {
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "no reprobe"));
+
+ child = NULL;
+ sdchild = NULL;
+ }
+ }
+ } else {
+fail: ASSERT(child == NULL);
+ sdchild = NULL;
+ }
+ if (probe) {
+ /*
+ * Clean up probe node, destroying node if uninit_prevent
+ * it is going to zero. Destroying the probe node (deleting
+ * from the sibling list) will wake up any people waiting on
+ * the probe node barrier.
+ */
+ SCSI_HBA_LOG((_LOG(4), NULL, probe, "remove probe"));
+ if (!scsi_hba_remove_node(probe)) {
+ /*
+ * Probe node removal should not fail, but if it
+ * does we hand that responsibility over to the
+ * async barrier deletion thread - other references
+ * to the same unit-address can hang until the
+ * probe node delete completes.
+ */
+ SCSI_HBA_LOG((_LOG(4), NULL, probe,
+ "remove probe failed, go async"));
+ scsi_hba_barrier_add(probe, 1);
+ }
+ probe = NULL;
+ }
+
+ /*
+ * If we successfully resolved via a pathinfo node, we need to find
+ * the pathinfo node and ensure that it is online (if possible). This
+ * is done for the case where the device was open when
+ * scsi_device_unconfig occurred, so mdi_pi_free did not occur. If the
+ * device has now been reinserted, we want the path back online.
+ * NOTE: This needs to occur after destruction of the probe node to
+ * avoid ASSERT related to two nodes at the same unit-address.
+ */
+ if (sdchild && pi && (probe == NULL)) {
+ ASSERT(MDI_PHCI(self));
+
+ (void) scsi_hba_find_child(self, NULL, addr,
+ 0, &dsearch, &psearch, NULL);
+ ASSERT((psearch == NULL) ||
+ (mdi_pi_get_client(psearch) == child));
+
+ if (psearch && mdi_pi_device_isremoved(psearch)) {
+ /*
+ * Verify that we can talk to the device, and if
+ * so note if this is a new device_insert.
+ *
+ * NOTE: We depend on mdi_path_select(), when given
+ * a specific path_instance, to select that path
+ * even if the path is offline.
+ *
+ * NOTE: A Client node is not ndi_dev_is_hotplug_node().
+ */
+ if (se == SE_HP) {
+ SCSI_HBA_LOG((_LOG(3), NULL, child,
+ "%s scsi_probe() demoted pathinfo",
+ mdi_pi_spathname(psearch)));
+
+ sp = scsi_hba_probe_pi(sdchild, SLEEP_FUNC, pi);
+
+ if (sp == SCSIPROBE_EXISTS) {
+ /*
+ * Pathinfo child exists and we are
+ * talking to the device, report
+ * reinsert and note if this
+ * was a new reinsert.
+ */
+ chg = mdi_pi_device_insert(psearch);
+ SCSI_HBA_LOG((_LOGCFG, self, NULL,
+ "pathinfo %s device_reinsert%s",
+ mdi_pi_spathname(psearch),
+ chg ? "" : "ed already"));
+
+ if (chg)
+ (void) mdi_pi_online(psearch,
+ 0);
+
+ } else {
+ SCSI_HBA_LOG((_LOG(WARN), NULL, child,
+ "%s enumeration failed "
+ "during reprobe",
+ mdi_pi_spathname(psearch)));
+
+ child = NULL;
+ sdchild = NULL;
+ }
+
+ } else {
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "%s no reprobe",
+ mdi_pi_spathname(psearch)));
+
+ child = NULL;
+ sdchild = NULL;
+ }
+ }
+ }
+
+ /* If asked for path_instance, return it. */
+ if (ppi)
+ *ppi = pi;
+
+ return (sdchild);
+}
+
+static void
+scsi_device_unconfig(dev_info_t *self, char *name, char *addr, int *circp)
+{
+ dev_info_t *child = NULL;
+ mdi_pathinfo_t *path = NULL;
+ char *spathname;
+ int rval;
+
+ ASSERT(self && addr && DEVI_BUSY_OWNED(self));
+
+ /*
+ * We have a catch-22. We may have a demoted node that we need to find
+ * and offline/remove. To find the node it it isn't demoted, we
+ * use scsi_hba_find_child. If it's demoted, we then use
+ * ndi_devi_findchild_by_callback.
+ */
+ (void) scsi_hba_find_child(self, name, addr, 0, &child, &path, NULL);
+
+ if ((child == NULL) && (path == NULL)) {
+ child = ndi_devi_findchild_by_callback(self, name, addr,
+ scsi_busctl_ua);
+ if (child) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo %s@%s found by callback",
+ name ? name : "", addr));
+ ASSERT(ndi_flavor_get(child) ==
+ SCSA_FLAVOR_SCSI_DEVICE);
+ if (ndi_flavor_get(child) != SCSA_FLAVOR_SCSI_DEVICE) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo %s@%s not SCSI_DEVICE flavored",
+ name ? name : "", addr));
+ child = NULL;
+ }
+ }
+ }
+
+ if (child) {
+ ASSERT(child && (path == NULL));
+
+ /* Don't unconfig probe nodes. */
+ if (scsi_hba_devi_is_barrier(child)) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo %s@%s is_barrier, skip",
+ name ? name : "", addr));
+ return;
+ }
+
+ /* Attempt to offline/remove the devinfo node */
+ if (ndi_devi_offline(child,
+ NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE) == DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo %s@%s offlined and removed",
+ name ? name : "", addr));
+ } else if (ndi_devi_device_remove(child)) {
+ /* Offline/remove failed, note new device_remove */
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo %s@%s offline failed, device_remove",
+ name ? name : "", addr));
+ }
+ } else if (path) {
+ ASSERT(path && (child == NULL));
+
+ /*
+ * Attempt to offline/remove the pathinfo node.
+ *
+ * NOTE: mdi_pi_offline of last path will fail if the
+ * device is open (i.e. the client can't be offlined).
+ *
+ * NOTE: For mdi there is no REMOVE flag for mdi_pi_offline().
+ * When mdi_pi_offline returns MDI_SUCCESS, we are responsible
+ * for remove via mdi_pi_free().
+ */
+ mdi_hold_path(path);
+ spathname = mdi_pi_spathname(path); /* valid after free */
+ scsi_hba_devi_exit_phci(self, *circp);
+ rval = mdi_pi_offline(path, 0);
+ scsi_hba_devi_enter_phci(self, circp);
+ if (rval == MDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "pathinfo %s offlined and removed", spathname));
+ } else if (mdi_pi_device_remove(path)) {
+ /* Offline/remove failed, note new device_remove */
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "pathinfo %s offline failed, "
+ "device_remove", spathname));
+ }
+ mdi_rele_path(path);
+ if ((rval == MDI_SUCCESS) &&
+ (mdi_pi_free(path, 0) != MDI_SUCCESS)) { /* REMOVE */
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "pathinfo %s mdi_pi_free failed, "
+ "device_remove", spathname));
+ (void) mdi_pi_device_remove(path);
+ }
+ } else {
+ ASSERT((path == NULL) && (child == NULL));
+
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "%s@%s not found", name ? name : "", addr));
+ }
+}
+
+/*
+ * configure the device at the specified "@addr" address.
+ */
+static struct scsi_device *
+scsi_hba_bus_configone_addr(dev_info_t *self, char *addr, scsi_enum_t se)
+{
+ int circ;
+ struct scsi_device *sd;
+
+ scsi_hba_devi_enter(self, &circ);
+ sd = scsi_device_config(self, NULL, addr, se, &circ, NULL);
+ scsi_hba_devi_exit(self, circ);
+ return (sd);
+}
+
+/*
+ * unconfigure the device at the specified "@addr" address.
+ */
+static void
+scsi_hba_bus_unconfigone_addr(dev_info_t *self, char *addr)
+{
+ int circ;
+
+ scsi_hba_devi_enter(self, &circ);
+ (void) scsi_device_unconfig(self, NULL, addr, &circ);
+ scsi_hba_devi_exit(self, circ);
+}
+
+/*
+ * The bus_config_all operations are multi-threaded for performance. A
+ * separate thread per target and per LUN is used. The config handle is used
+ * to coordinate all the threads at a given level and the config thread data
+ * contains the required information for a specific thread to identify what it
+ * is processing and the handle under which this is being processed.
+ */
+
+/* multi-threaded config handle */
+struct scsi_hba_mte_h {
+ dev_info_t *h_self; /* initiator port */
+ int h_thr_count;
+ kmutex_t h_lock;
+ kcondvar_t h_cv;
+};
+
+/* target of 'self' config thread data */
+struct scsi_hba_mte_td {
+ struct scsi_hba_mte_h *td_h;
+ char *td_taddr; /* target port */
+ int td_mt;
+ scsi_enum_t td_se;
+};
+
+/* Invoke callback on a vector of taddrs from multiple threads */
+static void
+scsi_hba_thread_taddrs(dev_info_t *self, char **taddrs, int mt,
+ scsi_enum_t se, void (*callback)(void *arg))
+{
+ struct scsi_hba_mte_h *h; /* HBA header */
+ struct scsi_hba_mte_td *td; /* target data */
+ char **taddr;
+
+ /* allocate and initialize the handle */
+ h = kmem_zalloc(sizeof (*h), KM_SLEEP);
+ mutex_init(&h->h_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&h->h_cv, NULL, CV_DEFAULT, NULL);
+ h->h_self = self;
+
+ /* loop over all the targets */
+ for (taddr = taddrs; *taddr; taddr++) {
+ /* allocate a thread data structure for target */
+ td = kmem_alloc(sizeof (*td), KM_SLEEP);
+ td->td_h = h;
+ td->td_taddr = *taddr;
+ td->td_mt = mt;
+ td->td_se = se;
+
+ /* process the target */
+ mutex_enter(&h->h_lock);
+ h->h_thr_count++;
+ mutex_exit(&h->h_lock);
+
+ mt |= scsi_hba_log_mt_disable;
+ if (mt & SCSI_ENUMERATION_MT_LUN_DISABLE)
+ callback((void *)td);
+ else
+ (void) thread_create(NULL, 0, callback, (void *)td,
+ 0, &p0, TS_RUN, minclsyspri);
+ }
+
+ /* wait for all the target threads to complete */
+ mutex_enter(&h->h_lock);
+ while (h->h_thr_count > 0)
+ cv_wait(&h->h_cv, &h->h_lock);
+ mutex_exit(&h->h_lock);
+
+ /* free the handle */
+ cv_destroy(&h->h_cv);
+ mutex_destroy(&h->h_lock);
+ kmem_free(h, sizeof (*h));
+}
+
+
+/* lun/secondary function of lun0 config thread data */
+struct scsi_hba_mte_ld {
+ struct scsi_hba_mte_h *ld_h;
+ char *ld_taddr; /* target port */
+ scsi_lun64_t ld_lun64; /* lun */
+ int ld_sfunc; /* secondary function */
+ scsi_enum_t ld_se;
+};
+
+/*
+ * Enumerate the LUNs and secondary functions of the specified target. The
+ * target portion of the "@addr" is already represented as a string in the
+ * thread data, we add a ",lun" representation to this and perform a
+ * bus_configone byte of enumeration on that "@addr".
+ */
+static void
+scsi_hba_enum_lsf_of_tgt_thr(void *arg)
+{
+ struct scsi_hba_mte_ld *ld = (struct scsi_hba_mte_ld *)arg;
+ struct scsi_hba_mte_h *h = ld->ld_h;
+ dev_info_t *self = h->h_self;
+ char addr[SCSI_MAXNAMELEN];
+
+ /* make string form of "@taddr,lun[,sfunc]" and see if it exists */
+ if (ld->ld_sfunc == -1)
+ (void) snprintf(addr, sizeof (addr),
+ "%s,%" PRIx64, ld->ld_taddr, ld->ld_lun64);
+ else
+ (void) snprintf(addr, sizeof (addr),
+ "%s,%" PRIx64 ",%x",
+ ld->ld_taddr, ld->ld_lun64, ld->ld_sfunc);
+
+ /* configure device at that unit-address address */
+ (void) scsi_hba_bus_configone_addr(self, addr, ld->ld_se);
+
+ /* signal completion of this LUN thread to the target */
+ mutex_enter(&h->h_lock);
+ if (--h->h_thr_count == 0)
+ cv_broadcast(&h->h_cv);
+ mutex_exit(&h->h_lock);
+
+ /* free config thread data */
+ kmem_free(ld, sizeof (*ld));
+}
+
+/* Format of SCSI REPORT_LUNS report */
+typedef struct scsi_lunrpt {
+ uchar_t lunrpt_len_msb; /* # LUNs being reported */
+ uchar_t lunrpt_len_mmsb;
+ uchar_t lunrpt_len_mlsb;
+ uchar_t lunrpt_len_lsb;
+ uchar_t lunrpt_reserved[4];
+ scsi_lun_t lunrpt_luns[1]; /* LUNs, variable size */
+} scsi_lunrpt_t;
+
+/*
+ * scsi_device_reportluns()
+ *
+ * Callers of this routine should ensure that the 'sd0' scsi_device structure
+ * and 'pi' path_instance specified are associated with a responding LUN0.
+ * This should not be called for SCSI-1 devices.
+ *
+ * To get a LUN report, we must allocate a buffer. To know how big to make the
+ * buffer, we must know the number of LUNs. To know the number of LUNs, we must
+ * get a LUN report. We first issue a SCMD_REPORT_LUNS command using a
+ * reasonably sized buffer that's big enough to report all LUNs for most
+ * typical devices. If it turns out that we needed a bigger buffer, we attempt
+ * to allocate a buffer of sufficient size, and reissue the command. If the
+ * first command succeeds, but the second fails, we return whatever we were
+ * able to get the first time. We return enough information for the caller to
+ * tell whether he got all the LUNs or only a subset.
+ *
+ * If successful, we allocate an array of scsi_lun_t to hold the results. The
+ * caller must kmem_free(*lunarrayp, *sizep) when finished with it. Upon
+ * successful return return value is NDI_SUCCESS and:
+ *
+ * *lunarrayp points to the allocated array,
+ * *nlunsp is the number of valid LUN entries in the array,
+ * *tlunsp is the total number of LUNs in the target,
+ * *sizep is the size of the lunarrayp array, which must be freed.
+ *
+ * If the *nlunsp is less than *tlunsp, then we were only able to retrieve a
+ * subset of the total set of LUNs in the target.
+ */
+static int
+scsi_device_reportluns(struct scsi_device *sd0, char *taddr, int pi,
+ scsi_lun_t **lunarrayp, uint32_t *nlunsp, uint32_t *tlunsp, size_t *sizep)
+{
+ struct buf *lunrpt_bp;
+ struct scsi_pkt *lunrpt_pkt;
+ scsi_lunrpt_t *lunrpt;
+ uint32_t bsize;
+ uint32_t tluns, nluns;
+ int default_maxluns = scsi_lunrpt_default_max;
+ dev_info_t *child;
+
+ ASSERT(sd0 && lunarrayp && nlunsp && tlunsp && sizep);
+
+ /*
+ * NOTE: child should only be used in SCSI_HBA_LOG context since with
+ * vHCI enumeration it may be the vHCI 'client' devinfo child instead
+ * of a child of the 'self' pHCI we are enumerating.
+ */
+ child = sd0->sd_dev;
+
+ /* first try, look for up to scsi_lunrpt_default_max LUNs */
+ nluns = default_maxluns;
+
+again: bsize = sizeof (struct scsi_lunrpt) +
+ ((nluns - 1) * sizeof (struct scsi_lun));
+
+ lunrpt_bp = scsi_alloc_consistent_buf(&sd0->sd_address,
+ (struct buf *)NULL, bsize, B_READ, SLEEP_FUNC, NULL);
+ if (lunrpt_bp == NULL) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child, "failed alloc"));
+ return (NDI_NOMEM);
+ }
+
+ lunrpt_pkt = scsi_init_pkt(&sd0->sd_address,
+ (struct scsi_pkt *)NULL, lunrpt_bp, CDB_GROUP5,
+ sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
+ SLEEP_FUNC, NULL);
+ if (lunrpt_pkt == NULL) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child, "failed init"));
+ scsi_free_consistent_buf(lunrpt_bp);
+ return (NDI_NOMEM);
+ }
+
+ (void) scsi_setup_cdb((union scsi_cdb *)lunrpt_pkt->pkt_cdbp,
+ SCMD_REPORT_LUNS, 0, bsize, 0);
+
+ lunrpt_pkt->pkt_time = scsi_lunrpt_timeout;
+
+ /*
+ * When sd0 is a vHCI scsi device, we need reportlun to be issued
+ * against a specific LUN0 path_instance that we are enumerating.
+ */
+ lunrpt_pkt->pkt_path_instance = pi;
+ lunrpt_pkt->pkt_flags |= FLAG_PKT_PATH_INSTANCE;
+
+ /*
+ * NOTE: scsi_poll may not allow HBA specific recovery from TRAN_BUSY.
+ */
+ if (scsi_poll(lunrpt_pkt) < 0) {
+ SCSI_HBA_LOG((_LOG(2), NULL, child, "reportlun not supported"));
+ scsi_destroy_pkt(lunrpt_pkt);
+ scsi_free_consistent_buf(lunrpt_bp);
+ return (NDI_FAILURE);
+ }
+
+ scsi_destroy_pkt(lunrpt_pkt);
+
+ lunrpt = (scsi_lunrpt_t *)lunrpt_bp->b_un.b_addr;
+
+ /* Compute the total number of LUNs in the target */
+ tluns = (((uint_t)lunrpt->lunrpt_len_msb << 24) |
+ ((uint_t)lunrpt->lunrpt_len_mmsb << 16) |
+ ((uint_t)lunrpt->lunrpt_len_mlsb << 8) |
+ ((uint_t)lunrpt->lunrpt_len_lsb)) >> 3;
+
+ if (tluns == 0) {
+ /* Illegal response -- this target is broken */
+ SCSI_HBA_LOG((_LOG(1), NULL, child, "illegal tluns of zero"));
+ scsi_free_consistent_buf(lunrpt_bp);
+ return (DDI_NOT_WELL_FORMED);
+ }
+
+ if (tluns > nluns) {
+ /* have more than we allocated space for */
+ if (nluns == default_maxluns) {
+ /* first time around, reallocate larger */
+ scsi_free_consistent_buf(lunrpt_bp);
+ nluns = tluns;
+ goto again;
+ }
+
+ /* uh oh, we got a different tluns the second time! */
+ SCSI_HBA_LOG((_LOG(1), NULL, child,
+ "tluns changed from %d to %d", nluns, tluns));
+ } else
+ nluns = tluns;
+
+ /*
+ * Now we have:
+ * lunrpt_bp is the buffer we're using;
+ * tluns is the total number of LUNs the target says it has;
+ * nluns is the number of LUNs we were able to get into the buffer.
+ *
+ * Copy the data out of scarce iopb memory into regular kmem.
+ * The caller must kmem_free(*lunarrayp, *sizep) when finished with it.
+ */
+ *lunarrayp = (scsi_lun_t *)kmem_alloc(
+ nluns * sizeof (scsi_lun_t), KM_SLEEP);
+ if (*lunarrayp == NULL) {
+ SCSI_HBA_LOG((_LOG(1), NULL, child, "NULL lunarray"));
+ scsi_free_consistent_buf(lunrpt_bp);
+ return (NDI_NOMEM);
+ }
+
+ *sizep = nluns * sizeof (scsi_lun_t);
+ *nlunsp = nluns;
+ *tlunsp = tluns;
+ bcopy((void *)&lunrpt->lunrpt_luns, (void *)*lunarrayp, *sizep);
+ scsi_free_consistent_buf(lunrpt_bp);
+ SCSI_HBA_LOG((_LOG(3), NULL, child,
+ "@%s,0 path %d: %d/%d luns", taddr, pi, nluns, tluns));
+ return (NDI_SUCCESS);
+}
+
+/*
+ * Enumerate all the LUNs and secondary functions of the specified 'taddr'
+ * target port as accessed via 'self' pHCI. Note that sd0 may be associated
+ * with a child of the vHCI instead of 'self' - in this case the 'pi'
+ * path_instance is used to ensure that the SCMD_REPORT_LUNS command is issued
+ * through the 'self' pHCI path.
+ *
+ * We multi-thread across all the LUNs and secondary functions and enumerate
+ * them. Which LUNs exist is based on SCMD_REPORT_LUNS data.
+ *
+ * The scsi_device we are called with should be for LUN0 and has been probed.
+ *
+ * This function is structured so that an HBA that has a different target
+ * addressing structure can still use this function to enumerate the its
+ * LUNs if it uses "taddr,lun" for its LUN space.
+ *
+ * We make assumptions about other LUNs associated with the target:
+ *
+ * For SCSI-2 and SCSI-3 target we will issue the SCSI report_luns
+ * command. If this fails or we have a SCSI-1 then the number of
+ * LUNs is determined based on SCSI_OPTIONS_NLUNS. For a SCSI-1
+ * target we never probe above LUN 8, even if SCSI_OPTIONS_NLUNS
+ * indicates we should.
+ *
+ * HBA drivers wanting a different set of assumptions should implement their
+ * own LUN enumeration code.
+ */
+static int
+scsi_hba_enum_lsf_of_t(struct scsi_device *sd0,
+ dev_info_t *self, char *taddr, int pi, int mt, scsi_enum_t se)
+{
+ dev_info_t *child;
+ scsi_hba_tran_t *tran;
+ impl_scsi_tgtmap_t *tgtmap;
+ damap_id_t tgtid;
+ damap_t *tgtdam;
+ damap_t *lundam = NULL;
+ struct scsi_hba_mte_h *h;
+ struct scsi_hba_mte_ld *ld;
+ int aver;
+ scsi_lun_t *lunp = NULL;
+ int lun;
+ uint32_t nluns;
+ uint32_t tluns;
+ size_t size;
+ scsi_lun64_t lun64;
+ int maxluns;
+
+ /*
+ * If LUN0 failed then we have no other LUNs.
+ *
+ * NOTE: We need sd_inq to be valid to check ansi version. Since
+ * scsi_unprobe is now a noop (sd_inq freeded in
+ * scsi_busctl_uninitchild) sd_inq remains valid even if a target
+ * driver detach(9E) occurs, resulting in a scsi_unprobe call
+ * (sd_uninit_prevent keeps sd_inq valid by failing any
+ * device_uninitchild attempts).
+ */
+ ASSERT(sd0 && sd0->sd_uninit_prevent && sd0->sd_dev && sd0->sd_inq);
+ if ((sd0 == NULL) || (sd0->sd_dev == NULL) || (sd0->sd_inq == NULL)) {
+ SCSI_HBA_LOG((_LOG(1), NULL, sd0 ? sd0->sd_dev : NULL,
+ "not setup correctly:%s%s%s",
+ (sd0 == NULL) ? " device" : "",
+ (sd0 && (sd0->sd_dev == NULL)) ? " dip" : "",
+ (sd0 && (sd0->sd_inq == NULL)) ? " inq" : ""));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * NOTE: child should only be used in SCSI_HBA_LOG context since with
+ * vHCI enumeration it may be the vHCI 'client' devinfo child instead
+ * of a child of the 'self' pHCI we are enumerating.
+ */
+ child = sd0->sd_dev;
+
+ /* Determine if we are reporting lun observations into lunmap. */
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
+ tgtmap = (impl_scsi_tgtmap_t *)tran->tran_tgtmap;
+ if (tgtmap) {
+ tgtdam = tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE];
+ tgtid = damap_lookup(tgtdam, taddr);
+ if (tgtid != NODAM) {
+ lundam = damap_id_priv_get(tgtdam, tgtid);
+ damap_id_rele(tgtdam, tgtid);
+ ASSERT(lundam);
+ }
+ }
+
+ if (lundam) {
+ /* If using lunmap, start the observation */
+ scsi_lunmap_set_begin(self, lundam);
+ } else {
+ /* allocate and initialize the LUN handle */
+ h = kmem_zalloc(sizeof (*h), KM_SLEEP);
+ mutex_init(&h->h_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&h->h_cv, NULL, CV_DEFAULT, NULL);
+ h->h_self = self;
+ }
+
+ /* See if SCMD_REPORT_LUNS works for SCSI-2 and beyond */
+ aver = sd0->sd_inq->inq_ansi;
+ if ((aver >= SCSI_VERSION_2) && (scsi_device_reportluns(sd0,
+ taddr, pi, &lunp, &nluns, &tluns, &size) == NDI_SUCCESS)) {
+
+ ASSERT(lunp && (size > 0) && (nluns > 0) && (tluns > 0));
+
+ /* loop over the reported LUNs */
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "@%s,0 path %d: enumerating %d reported lun%s", taddr, pi,
+ nluns, nluns > 1 ? "s" : ""));
+
+ for (lun = 0; lun < nluns; lun++) {
+ lun64 = scsi_lun_to_lun64(lunp[lun]);
+
+ if (lundam) {
+ if (scsi_lunmap_set_add(self, lundam,
+ taddr, lun64, -1) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "@%s,%" PRIx64 " failed to create",
+ taddr, lun64));
+ }
+ } else {
+ if (lun64 == 0)
+ continue;
+
+ /* allocate a thread data structure for LUN */
+ ld = kmem_alloc(sizeof (*ld), KM_SLEEP);
+ ld->ld_h = h;
+ ld->ld_taddr = taddr;
+ ld->ld_lun64 = lun64;
+ ld->ld_sfunc = -1;
+ ld->ld_se = se;
+
+ /* process the LUN */
+ mutex_enter(&h->h_lock);
+ h->h_thr_count++;
+ mutex_exit(&h->h_lock);
+
+ mt |= scsi_hba_log_mt_disable;
+ if (mt & SCSI_ENUMERATION_MT_TARGET_DISABLE)
+ scsi_hba_enum_lsf_of_tgt_thr(
+ (void *)ld);
+ else
+ (void) thread_create(NULL, 0,
+ scsi_hba_enum_lsf_of_tgt_thr,
+ (void *)ld, 0, &p0, TS_RUN,
+ minclsyspri);
+ }
+ }
+
+ /* free the LUN array allocated by scsi_device_reportluns */
+ kmem_free(lunp, size);
+ } else {
+ /* Couldn't get SCMD_REPORT_LUNS data */
+ if (aver >= SCSI_VERSION_3) {
+ if (se == SE_HP)
+ SCSI_HBA_LOG((_LOG(WARN), NULL, child,
+ "enumeration failed during report_lun"));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "enumeration failed during report_lun"));
+ }
+
+ /* Determine the number of LUNs to enumerate. */
+ maxluns = scsi_get_scsi_maxluns(sd0);
+
+ /* loop over possible LUNs, skipping LUN0 */
+ if (maxluns > 1)
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "@%s,0 path %d: enumerating luns 1-%d", taddr, pi,
+ maxluns - 1));
+ else
+ SCSI_HBA_LOG((_LOG(2), NULL, child,
+ "@%s,0 path %d: enumerating just lun0", taddr, pi));
+
+ for (lun64 = 0; lun64 < maxluns; lun64++) {
+ if (lundam) {
+ if (scsi_lunmap_set_add(self, lundam,
+ taddr, lun64, -1) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "@%s,%" PRIx64 " failed to create",
+ taddr, lun64));
+ }
+ } else {
+ if (lun64 == 0)
+ continue;
+
+ /* allocate a thread data structure for LUN */
+ ld = kmem_alloc(sizeof (*ld), KM_SLEEP);
+ ld->ld_h = h;
+ ld->ld_taddr = taddr;
+ ld->ld_lun64 = lun64;
+ ld->ld_sfunc = -1;
+ ld->ld_se = se;
+
+ /* process the LUN */
+ mutex_enter(&h->h_lock);
+ h->h_thr_count++;
+ mutex_exit(&h->h_lock);
+
+ mt |= scsi_hba_log_mt_disable;
+ if (mt & SCSI_ENUMERATION_MT_TARGET_DISABLE)
+ scsi_hba_enum_lsf_of_tgt_thr(
+ (void *)ld);
+ else
+ (void) thread_create(NULL, 0,
+ scsi_hba_enum_lsf_of_tgt_thr,
+ (void *)ld, 0, &p0, TS_RUN,
+ minclsyspri);
+ }
+ }
+ }
+
+ /*
+ * If we have an embedded service as a secondary function on LUN0 and
+ * the primary LUN0 function is different than the secondary function
+ * then enumerate the secondary function. The sfunc value is the dtype
+ * associated with the embedded service.
+ *
+ * inq_encserv: enclosure service and our dtype is not DTYPE_ESI
+ * or DTYPE_UNKNOWN then create a separate DTYPE_ESI node for
+ * enclosure service access.
+ */
+ ASSERT(sd0->sd_inq);
+ if (sd0->sd_inq->inq_encserv &&
+ ((sd0->sd_inq->inq_dtype & DTYPE_MASK) != DTYPE_UNKNOWN) &&
+ ((sd0->sd_inq->inq_dtype & DTYPE_MASK) != DTYPE_ESI) &&
+ ((sd0->sd_inq->inq_ansi >= SCSI_VERSION_3))) {
+ if (lundam) {
+ if (scsi_lunmap_set_add(self, lundam,
+ taddr, 0, DTYPE_ESI) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "@%s,0,%x failed to create",
+ taddr, DTYPE_ESI));
+ }
+ } else {
+ /* allocate a thread data structure for sfunc */
+ ld = kmem_alloc(sizeof (*ld), KM_SLEEP);
+ ld->ld_h = h;
+ ld->ld_taddr = taddr;
+ ld->ld_lun64 = 0;
+ ld->ld_sfunc = DTYPE_ESI;
+ ld->ld_se = se;
+
+ /* process the LUN */
+ mutex_enter(&h->h_lock);
+ h->h_thr_count++;
+ mutex_exit(&h->h_lock);
+
+ mt |= scsi_hba_log_mt_disable;
+ if (mt & SCSI_ENUMERATION_MT_TARGET_DISABLE)
+ scsi_hba_enum_lsf_of_tgt_thr((void *)ld);
+ else
+ (void) thread_create(NULL, 0,
+ scsi_hba_enum_lsf_of_tgt_thr, (void *)ld,
+ 0, &p0, TS_RUN, minclsyspri);
+ }
+ }
+
+ /*
+ * Future: Add secondary function support for:
+ * inq_mchngr (DTYPE_CHANGER)
+ * inq_sccs (DTYPE_ARRAY_CTRL)
+ */
+
+ if (lundam) {
+ /* If using lunmap, end the observation */
+ scsi_lunmap_set_end(self, lundam);
+ } else {
+ /* wait for all the LUN threads of this target to complete */
+ mutex_enter(&h->h_lock);
+ while (h->h_thr_count > 0)
+ cv_wait(&h->h_cv, &h->h_lock);
+ mutex_exit(&h->h_lock);
+
+ /* free the target handle */
+ cv_destroy(&h->h_cv);
+ mutex_destroy(&h->h_lock);
+ kmem_free(h, sizeof (*h));
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Enumerate LUN0 and all other LUNs and secondary functions associated with
+ * the specified target address.
+ *
+ * Return NDI_SUCCESS if we might have created a new node.
+ * Return NDI_FAILURE if we definitely did not create a new node.
+ */
+static int
+scsi_hba_bus_config_taddr(dev_info_t *self, char *taddr, int mt, scsi_enum_t se)
+{
+ char addr[SCSI_MAXNAMELEN];
+ struct scsi_device *sd;
+ int circ;
+ int ret;
+ int pi;
+
+ /* See if LUN0 of the specified target exists. */
+ (void) snprintf(addr, sizeof (addr), "%s,0", taddr);
+
+ scsi_hba_devi_enter(self, &circ);
+ sd = scsi_device_config(self, NULL, addr, se, &circ, &pi);
+
+ if (sd) {
+ /*
+ * LUN0 exists, enumerate all the other LUNs.
+ *
+ * With vHCI enumeration, when 'self' is a pHCI the sd
+ * scsi_device may be associated with the vHCI 'client'.
+ * In this case 'pi' is the path_instance needed to
+ * continue enumeration communication LUN0 via 'self'
+ * pHCI and specific 'taddr' target address.
+ *
+ * We prevent the removal of LUN0 until we are done with
+ * prevent/allow because we must exit the parent for
+ * multi-threaded scsi_hba_enum_lsf_of_t().
+ *
+ * NOTE: scsi_unprobe is a noop, sd->sd_inq is valid until
+ * device_uninitchild - so sd_uninit_prevent keeps sd_inq valid
+ * by failing any device_uninitchild attempts.
+ */
+ ret = NDI_SUCCESS;
+ sd->sd_uninit_prevent++;
+ scsi_hba_devi_exit(self, circ);
+
+ (void) scsi_hba_enum_lsf_of_t(sd, self, taddr, pi, mt, se);
+
+ scsi_hba_devi_enter(self, &circ);
+ sd->sd_uninit_prevent--;
+ } else
+ ret = NDI_FAILURE;
+ scsi_hba_devi_exit(self, circ);
+ return (ret);
+}
+
+/* Config callout from scsi_hba_thread_taddrs */
+static void
+scsi_hba_taddr_config_thr(void *arg)
+{
+ struct scsi_hba_mte_td *td = (struct scsi_hba_mte_td *)arg;
+ struct scsi_hba_mte_h *h = td->td_h;
+
+ (void) scsi_hba_bus_config_taddr(h->h_self, td->td_taddr,
+ td->td_mt, td->td_se);
+
+ /* signal completion of this target thread to the HBA */
+ mutex_enter(&h->h_lock);
+ if (--h->h_thr_count == 0)
+ cv_broadcast(&h->h_cv);
+ mutex_exit(&h->h_lock);
+
+ /* free config thread data */
+ kmem_free(td, sizeof (*td));
+}
+
+/*
+ * Enumerate all the children of the specified SCSI parallel interface (spi).
+ * An HBA associated with a non-parallel scsi bus should be using another bus
+ * level enumeration implementation (possibly their own) and calling
+ * scsi_hba_bus_config_taddr to do enumeration of devices associated with a
+ * particular target address.
+ *
+ * On an spi bus the targets are sequentially enumerated based on the
+ * width of the bus. We also take care to try to skip the HBAs own initiator
+ * id. See scsi_hba_enum_lsf_of_t() for LUN and secondary function enumeration.
+ *
+ * Return NDI_SUCCESS if we might have created a new node.
+ * Return NDI_FAILURE if we definitely did not create a new node.
+ *
+ * Note: At some point we may want to expose this interface in transport.h
+ * if we find an hba that implements bus_config but still uses spi-like target
+ * addresses.
+ */
+static int
+scsi_hba_bus_configall_spi(dev_info_t *self, int mt)
+{
+ int options;
+ int ntargets;
+ int id;
+ int tgt;
+ char **taddrs;
+ char **taddr;
+ char *tbuf;
+
+ /*
+ * Find the number of targets supported on the bus. Look at the per
+ * bus scsi-options property on the HBA node and check its
+ * SCSI_OPTIONS_WIDE setting.
+ */
+ options = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-options", -1);
+ if ((options != -1) && ((options & SCSI_OPTIONS_WIDE) == 0))
+ ntargets = NTARGETS; /* 8 */
+ else
+ ntargets = NTARGETS_WIDE; /* 16 */
+
+ /*
+ * Find the initiator-id for the HBA so we can skip that. We get the
+ * cached value on the HBA node, established in scsi_hba_attach_setup.
+ * If we were unable to determine the id then we rely on the HBA to
+ * fail gracefully when asked to enumerate itself.
+ */
+ id = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-initiator-id", -1);
+ if (id > ntargets) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "'scsi-initiator-id' bogus for %d target bus: %d",
+ ntargets, id));
+ id = -1;
+ }
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "enumerating targets 0-%d skip %d", ntargets, id));
+
+ /* form vector of target addresses */
+ taddrs = kmem_zalloc(sizeof (char *) * (ntargets + 1), KM_SLEEP);
+ for (tgt = 0, taddr = taddrs; tgt < ntargets; tgt++) {
+ /* skip initiator */
+ if (tgt == id)
+ continue;
+
+ /* convert to string and enumerate the target address */
+ tbuf = kmem_alloc(((tgt/16) + 1) + 1, KM_SLEEP);
+ (void) sprintf(tbuf, "%x", tgt);
+ ASSERT(strlen(tbuf) == ((tgt/16) + 1));
+ *taddr++ = tbuf;
+ }
+
+ /* null terminate vector of target addresses */
+ *taddr = NULL;
+
+ /* configure vector of target addresses */
+ scsi_hba_thread_taddrs(self, taddrs, mt, SE_BUSCONFIG,
+ scsi_hba_taddr_config_thr);
+
+ /* free vector of target addresses */
+ for (taddr = taddrs; *taddr; taddr++)
+ kmem_free(*taddr, strlen(*taddr) + 1);
+ kmem_free(taddrs, sizeof (char *) * (ntargets + 1));
+ return (NDI_SUCCESS);
+}
+
+/*
+ * Transport independent bus_configone BUS_CONFIG_ONE implementation. Takes
+ * same arguments, minus op, as scsi_hba_bus_config(), tran_bus_config(),
+ * and scsi_hba_bus_config_spi().
+ */
+int
+scsi_hba_bus_configone(dev_info_t *self, uint_t flags, char *arg,
+ dev_info_t **childp)
+{
+ int ret;
+ int circ;
+ char *name, *addr;
+ char *lcp;
+ char sc1, sc2;
+ char nameaddr[SCSI_MAXNAMELEN];
+ extern int i_ndi_make_spec_children(dev_info_t *, uint_t);
+ struct scsi_device *sd0, *sd;
+ scsi_lun64_t lun64;
+ int mt;
+
+ /* parse_name modifies arg1, we must duplicate "name@addr" */
+ (void) strcpy(nameaddr, arg);
+ i_ddi_parse_name(nameaddr, &name, &addr, NULL);
+
+ /* verify the form of the node - we need an @addr */
+ if ((name == NULL) || (addr == NULL) ||
+ (*name == '\0') || (*addr == '\0')) {
+ /*
+ * OBP may create ill formed template/stub/wild-card
+ * nodes (no @addr) for legacy driver loading methods -
+ * ignore them.
+ */
+ SCSI_HBA_LOG((_LOG(2), self, NULL, "%s ill formed", arg));
+ return (NDI_FAILURE);
+ }
+
+ /*
+ * Check to see if this is a non-scsi flavor configuration operation.
+ */
+ if (strcmp(name, "smp") == 0) {
+ /*
+ * Configure the child, and if we're successful return with
+ * active hold.
+ */
+ return (smp_hba_bus_config(self, addr, childp));
+ }
+
+ /*
+ * The framework does not ensure the creation of driver.conf
+ * nodes prior to calling a nexus bus_config. For legacy
+ * support of driver.conf file nodes we want to create our
+ * driver.conf file children now so that we can detect if we
+ * are being asked to bus_configone one of these nodes.
+ *
+ * Needing driver.conf file nodes prior to bus config is unique
+ * to scsi_enumeration mixed mode (legacy driver.conf and
+ * dynamic SID node) support. There is no general need for the
+ * framework to make driver.conf children prior to bus_config.
+ *
+ * We enter our HBA (self) prior to scsi_device_config, and
+ * pass it our circ. The scsi_device_config may exit the
+ * HBA around scsi_probe() operations to allow for parallelism.
+ * This is done after the probe node "@addr" is available as a
+ * barrier to prevent parallel probes of the same device. The
+ * probe node is also configured in a way that it can't be
+ * removed by the framework until we are done with it.
+ *
+ * NOTE: The framework is currently preventing many parallel
+ * sibling operations (such as attaches), so the parallelism
+ * we are providing is of marginal use until that is improved.
+ * The most logical way to solve this would be to have separate
+ * target and lun nodes. This would be a large change in the
+ * format of /devices paths and is not being pursued at this
+ * time. The need for parallelism will become more of an issue
+ * with top-down attach for mpxio/vhci and for iSCSI support.
+ * We may want to eventually want a dual mode implementation,
+ * where the HBA determines if we should construct separate
+ * target and lun devinfo nodes.
+ */
+ scsi_hba_devi_enter(self, &circ);
+ SCSI_HBA_LOG((_LOG(4), self, NULL, "%s@%s config_one", name, addr));
+ (void) i_ndi_make_spec_children(self, flags);
+
+ /*
+ * For bus_configone, we make sure that we can find LUN0
+ * first. This allows the delayed probe/barrier deletion for a
+ * non-existent LUN0 (if enabled in scsi_device_config) to
+ * cover all LUNs on the target. This is done to minimize the
+ * number of independent target selection timeouts that occur
+ * when a target with many LUNs is no longer accessible
+ * (powered off). This removes the need for target driver
+ * probe cache implementations.
+ *
+ * This optimization may not be desirable in a pure bridge
+ * environment where targets on the other side of the bridge
+ * show up as LUNs to the host. If we ever need to support
+ * such a configuration then we should consider implementing a
+ * SCSI_OPTIONS_ILUN0 bit.
+ *
+ * NOTE: we are *not* applying any target limitation filtering
+ * to bus_configone, which means that we are relying on the
+ * HBA tran_tgt_init entry point invoked by scsi_busctl_initchild
+ * to fail.
+ */
+ sd0 = (struct scsi_device *)-1;
+ lcp = strchr(addr, ','); /* "addr,lun[,sfunc]" */
+ if (lcp) {
+ /*
+ * With "tgt,lun[,sfunc]" addressing, multiple addressing levels
+ * have been compressed into single devinfo node unit-address.
+ * This presents a mismatch - there is no bus_config to discover
+ * LUNs below a specific target, the only choice is to
+ * BUS_CONFIG_ALL the HBA. To support BUS_CONFIG_ALL_LUNS below
+ * a specific target, a bus_configone with lun address of "*"
+ * triggers lun discovery below a target.
+ */
+ if (*(lcp + 1) == '*') {
+ mt = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "scsi-enumeration", scsi_enumeration);
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "%s@%s lun enumeration triggered", name, addr));
+ *lcp = '\0'; /* turn ',' into '\0' */
+ scsi_hba_devi_exit(self, circ);
+ (void) scsi_hba_bus_config_taddr(self, addr,
+ mt, SE_BUSCONFIG);
+ return (NDI_FAILURE);
+ }
+
+ /* convert hex lun number from ascii */
+ lun64 = scsi_addr_to_lun64(lcp + 1);
+
+ if ((lun64 != 0) && (lun64 != SCSI_LUN64_ILLEGAL)) {
+ /*
+ * configure ",0" lun first, saving off
+ * original lun characters.
+ */
+ sc1 = *(lcp + 1);
+ sc2 = *(lcp + 2);
+ *(lcp + 1) = '0';
+ *(lcp + 2) = '\0';
+ sd0 = scsi_device_config(self, NULL, addr,
+ SE_BUSCONFIG, &circ, NULL);
+
+ /* restore original lun */
+ *(lcp + 1) = sc1;
+ *(lcp + 2) = sc2;
+
+ /*
+ * Apply maxlun filtering.
+ *
+ * Future: We still have the kludged
+ * scsi_check_ss2_LUN_limit() filtering off
+ * scsi_probe() to catch bogus driver.conf
+ * entries.
+ */
+ if (sd0 && (lun64 < SCSI_32LUNS_PER_TARGET) &&
+ (lun64 >= scsi_get_scsi_maxluns(sd0))) {
+ sd0 = NULL;
+ SCSI_HBA_LOG((_LOG(4), self, NULL,
+ "%s@%s filtered", name, addr));
+ } else
+ SCSI_HBA_LOG((_LOG(4), self, NULL,
+ "%s@%s lun 0 %s", name, addr,
+ sd0 ? "worked" : "failed"));
+ }
+ }
+
+ /*
+ * configure the requested device if LUN0 exists or we were
+ * unable to determine the lun format to determine if LUN0
+ * exists.
+ */
+ if (sd0) {
+ sd = scsi_device_config(self, name, addr,
+ SE_BUSCONFIG, &circ, NULL);
+ } else {
+ sd = NULL;
+ SCSI_HBA_LOG((_LOG(2), self, NULL,
+ "%s@%s no lun 0 or filtered lun", name, addr));
+ }
+
+ /*
+ * We know what we found, to reduce overhead we finish BUS_CONFIG_ONE
+ * processing without calling back to the frameworks
+ * ndi_busop_bus_config (unless we goto framework below).
+ *
+ * If the reference is to a driver name and we created a generic name
+ * (bound to that driver) we will still succeed. This is important
+ * for correctly resolving old drivername references to device that now
+ * uses a generic names across the transition to generic naming. This
+ * is effectively an internal implementation of the NDI_DRIVERNAME flag.
+ *
+ * We also need to special case the resolve_pathname OBP boot-device
+ * case (modrootloaded == 0) where reference is to a generic name but
+ * we created a legacy driver name node by returning just returning
+ * the node created.
+ */
+ if (sd && sd->sd_dev &&
+ ((strcmp(ddi_node_name(sd->sd_dev), name) == 0) ||
+ (strcmp(ddi_driver_name(sd->sd_dev), name) == 0) ||
+ (modrootloaded == 0)) &&
+ (ndi_devi_online(sd->sd_dev,
+ flags & NDI_NO_EVENT) == NDI_SUCCESS)) {
+
+ /* device attached, return devinfo node with hold */
+ ret = NDI_SUCCESS;
+ *childp = sd->sd_dev;
+ ndi_hold_devi(sd->sd_dev);
+ } else {
+ /*
+ * In the process of failing we may have added nodes to the HBA
+ * (self), clearing DEVI_MADE_CHILDREN. To reduce the overhead
+ * associated with the frameworks reaction to this we clear the
+ * flag here.
+ */
+ mutex_enter(&DEVI(self)->devi_lock);
+ DEVI(self)->devi_flags &= ~DEVI_MADE_CHILDREN;
+ mutex_exit(&DEVI(self)->devi_lock);
+ ret = NDI_FAILURE;
+
+ /*
+ * The framework may still be able to succeed with
+ * with its GENERIC_PROP code.
+ */
+ scsi_hba_devi_exit(self, circ);
+ if (flags & NDI_DRV_CONF_REPROBE)
+ flags |= NDI_CONFIG_REPROBE;
+ flags |= NDI_MDI_FALLBACK; /* devinfo&pathinfo children */
+ return (ndi_busop_bus_config(self, flags, BUS_CONFIG_ONE,
+ (void *)arg, childp, 0));
+ }
+
+ scsi_hba_devi_exit(self, circ);
+ return (ret);
+}
+
+/*
+ * Perform SCSI Parallel Interconnect bus_config
+ */
+static int
+scsi_hba_bus_config_spi(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
+{
+ int ret;
+ int mt;
+
+ /*
+ * Enumerate scsi target devices: See if we are doing generic dynamic
+ * enumeration: if driver.conf has not specified the 'scsi-enumeration'
+ * knob then use the global scsi_enumeration knob.
+ */
+ mt = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "scsi-enumeration", scsi_enumeration);
+ if ((mt & SCSI_ENUMERATION_ENABLE) == 0) {
+ /*
+ * Static driver.conf file enumeration:
+ *
+ * Force reprobe for BUS_CONFIG_ONE or when manually
+ * reconfiguring via devfsadm(1m) to emulate deferred attach.
+ * Reprobe only discovers driver.conf enumerated nodes, more
+ * dynamic implementations probably require their own
+ * bus_config.
+ */
+ if ((op == BUS_CONFIG_ONE) || (flags & NDI_DRV_CONF_REPROBE))
+ flags |= NDI_CONFIG_REPROBE;
+ flags |= NDI_MDI_FALLBACK; /* devinfo&pathinfo children */
+ return (ndi_busop_bus_config(self, flags, op, arg, childp, 0));
+ }
+
+ if (scsi_hba_busconfig_debug)
+ flags |= NDI_DEVI_DEBUG;
+
+ /*
+ * Generic spi dynamic bus config enumeration to discover and enumerate
+ * the target device nodes we are looking for.
+ */
+ switch (op) {
+ case BUS_CONFIG_ONE: /* enumerate the named child */
+ ret = scsi_hba_bus_configone(self, flags, (char *)arg, childp);
+ break;
+
+ case BUS_CONFIG_ALL: /* enumerate all children on the bus */
+ case BUS_CONFIG_DRIVER: /* enumerate all children that bind to driver */
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "BUS_CONFIG_%s mt %x",
+ (op == BUS_CONFIG_ALL) ? "ALL" : "DRIVER", mt));
+
+ /*
+ * Enumerate targets on SCSI parallel interconnect and let the
+ * framework finish the operation (attach the nodes).
+ */
+ if ((ret = scsi_hba_bus_configall_spi(self, mt)) == NDI_SUCCESS)
+ ret = ndi_busop_bus_config(self, flags, op,
+ arg, childp, 0);
+ break;
+
+ default:
+ ret = NDI_FAILURE;
+ break;
+ }
+ return (ret);
+}
+
+/*
+ * Perform SCSI Parallel Interconnect bus_unconfig
+ */
+static int
+scsi_hba_bus_unconfig_spi(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg)
+{
+ int mt;
+ int circ;
+ int ret;
+
+ /*
+ * See if we are doing generic dynamic enumeration: if driver.conf has
+ * not specified the 'scsi-enumeration' knob then use the global
+ * scsi_enumeration knob.
+ */
+ mt = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "scsi-enumeration", scsi_enumeration);
+ if ((mt & SCSI_ENUMERATION_ENABLE) == 0)
+ return (ndi_busop_bus_unconfig(self, flags, op, arg));
+
+ if (scsi_hba_busconfig_debug)
+ flags |= NDI_DEVI_DEBUG;
+
+ scsi_hba_devi_enter(self, &circ);
+ switch (op) {
+ case BUS_UNCONFIG_ONE:
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "unconfig one: %s", (char *)arg));
+ ret = NDI_SUCCESS;
+ break;
+
+ case BUS_UNCONFIG_ALL:
+ case BUS_UNCONFIG_DRIVER:
+ ret = NDI_SUCCESS;
+ break;
+
+ default:
+ ret = NDI_FAILURE;
+ break;
+ }
+
+ /* Perform the generic default bus unconfig */
+ if (ret == NDI_SUCCESS)
+ ret = ndi_busop_bus_unconfig(self, flags, op, arg);
+
+ scsi_hba_devi_exit(self, circ);
+
+ return (ret);
+}
+
+static int
+scsi_hba_bus_config_tgtmap(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
+{
+ int ret = NDI_FAILURE;
+
+ switch (op) {
+ case BUS_CONFIG_ONE:
+ ret = scsi_hba_bus_configone(self, flags, arg, childp);
+ break;
+
+ case BUS_CONFIG_ALL:
+ case BUS_CONFIG_DRIVER:
+ ret = ndi_busop_bus_config(self, flags, op, arg, childp, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+scsi_hba_bus_unconfig_tgtmap(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg)
+{
+ int ret = NDI_FAILURE;
+
+ switch (op) {
+ case BUS_UNCONFIG_ONE:
+ case BUS_UNCONFIG_DRIVER:
+ case BUS_UNCONFIG_ALL:
+ ret = NDI_SUCCESS;
+ break;
+ default:
+ break;
+ }
+
+ if (ret == NDI_SUCCESS) {
+ flags &= ~NDI_DEVI_REMOVE;
+ ret = ndi_busop_bus_unconfig(self, flags, op, arg);
+ }
+ return (ret);
+}
+
+static int
+scsi_hba_bus_config_iportmap(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
+{
+ dev_info_t *child;
+ int circ;
+ int ret = NDI_FAILURE;
+
+ /*
+ * MPXIO is never a sure thing (and we have mixed children), so
+ * set NDI_NDI_FALLBACK so that ndi_busop_bus_config will
+ * search for both devinfo and pathinfo children.
+ *
+ * Future: Remove NDI_MDI_FALLBACK since devcfg.c now looks for
+ * devinfo/pathinfo children in parallel (instead of old way of
+ * looking for one form of child and then doing "fallback" to
+ * look for other form of child).
+ */
+ flags |= NDI_MDI_FALLBACK; /* devinfo&pathinfo children */
+ switch (op) {
+ case BUS_CONFIG_ONE:
+ scsi_hba_devi_enter(self, &circ);
+ /* create the iport node child */
+ if ((child = scsi_hba_bus_config_port(self, (char *)arg,
+ SE_BUSCONFIG)) != NULL) {
+ if (childp) {
+ ndi_hold_devi(child);
+ *childp = child;
+ }
+ ret = NDI_SUCCESS;
+ }
+ scsi_hba_devi_exit(self, circ);
+ break;
+
+ case BUS_CONFIG_ALL:
+ case BUS_CONFIG_DRIVER:
+ ret = ndi_busop_bus_config(self, flags, op, arg, childp, 0);
+ break;
+
+ default:
+ break;
+ }
+ return (ret);
+}
+
+static int
+scsi_hba_bus_unconfig_iportmap(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg)
+{
+ flags &= ~NDI_DEVI_REMOVE;
+ return (ndi_busop_bus_unconfig(self, flags, op, arg));
+}
+
+/*
+ * SCSI HBA bus config enumeration entry point. Called via the bus_ops
+ * bus_config entry point for all SCSA HBA drivers.
+ *
+ * o If an HBA implements its own bus_config via tran_bus_config then we
+ * invoke it. An HBA that implements its own tran_bus_config entry point
+ * may still call back into common SCSA code bus_config code for:
+ *
+ * o SPI bus_config (scsi_hba_bus_spi)
+ * o LUN and secondary function enumeration (scsi_hba_enum_lsf_of_t()).
+ * o configuration of a specific device (scsi_device_config).
+ * o determining 1275 SCSI nodename and compatible property
+ * (scsi_hba_nodename_compatible_get/_free).
+ *
+ * o Otherwise we implement a SCSI parallel interface (spi) bus config.
+ *
+ * Return NDI_SUCCESS if we might have created a new node.
+ * Return NDI_FAILURE if we definitely did not create a new node.
+ */
+static int
+scsi_hba_bus_config(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
+{
+ scsi_hba_tran_t *tran;
+ int ret;
+
+ /* make sure that we will not disappear */
+ ASSERT(DEVI(self)->devi_ref);
+
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
+ if (tran == NULL) {
+ /* NULL tran driver.conf config (used by cmdk). */
+ if ((op == BUS_CONFIG_ONE) || (flags & NDI_DRV_CONF_REPROBE))
+ flags |= NDI_CONFIG_REPROBE;
+ return (ndi_busop_bus_config(self, flags, op, arg, childp, 0));
+ }
+
+ /* Check if self is HBA-only node. */
+ if (tran->tran_hba_flags & SCSI_HBA_HBA) {
+ /* The bus_config request is to configure iports below HBA. */
+
+#ifdef sparc
+ /*
+ * Sparc's 'boot-device' OBP property value lacks an /iport@X/
+ * component. Prior to the mount of root, we drive a disk@
+ * BUS_CONFIG_ONE operatino down a level to resolve an
+ * OBP 'boot-device' path.
+ *
+ * Future: Add (modrootloaded == 0) below, and insure that
+ * all attempts bus_conf of 'bo_name' (in OBP form) occur
+ * prior to 'modrootloaded = 1;' assignment in vfs_mountroot.
+ */
+ if ((op == BUS_CONFIG_ONE) &&
+ (strncmp((char *)arg, "disk@", strlen("disk@")) == 0)) {
+ return (scsi_hba_bus_config_prom_node(self,
+ flags, arg, childp));
+ }
+#endif /* sparc */
+
+ if (tran->tran_iportmap) {
+ /* config based on scsi_hba_iportmap API */
+ ret = scsi_hba_bus_config_iportmap(self,
+ flags, op, arg, childp);
+ } else {
+ /* config based on 'iport_register' API */
+ ret = scsi_hba_bus_config_iports(self,
+ flags, op, arg, childp);
+ }
+ return (ret);
+ }
+
+ /* Check to see how the iport/HBA does target/lun bus config. */
+ if (tran->tran_bus_config) {
+ /* HBA config based on Sun-private/legacy tran_bus_config */
+ ret = tran->tran_bus_config(self, flags, op, arg, childp);
+ } else if (tran->tran_tgtmap) {
+ /* SCSAv3 config based on scsi_hba_tgtmap_*() API */
+ ret = scsi_hba_bus_config_tgtmap(self, flags, op, arg, childp);
+ } else {
+ /* SCSA config based on SCSI Parallel Interconnect */
+ ret = scsi_hba_bus_config_spi(self, flags, op, arg, childp);
+ }
+ return (ret);
+}
+
+/*
+ * Called via the bus_ops bus_unconfig entry point for SCSI HBA drivers.
+ */
+static int
+scsi_hba_bus_unconfig(dev_info_t *self, uint_t flags,
+ ddi_bus_config_op_t op, void *arg)
+{
+ int circ;
+ scsi_hba_tran_t *tran;
+ int ret;
+
+ tran = ddi_get_driver_private(self);
+ if (tran == NULL) {
+ /* NULL tran driver.conf unconfig (used by cmdk). */
+ return (ndi_busop_bus_unconfig(self, flags, op, arg));
+ }
+
+ /*
+ * Purge barrier/probe node children. We do this prior to
+ * tran_bus_unconfig in case the unconfig implementation calls back
+ * into the common code at a different enumeration level, such a
+ * scsi_device_config, which still creates barrier/probe nodes.
+ */
+ scsi_hba_devi_enter(self, &circ);
+ scsi_hba_barrier_purge(self);
+ scsi_hba_devi_exit(self, circ);
+
+ /* Check if self is HBA-only node. */
+ if (tran->tran_hba_flags & SCSI_HBA_HBA) {
+ /* The bus_config request is to unconfigure iports below HBA. */
+ if (tran->tran_iportmap) {
+ /* unconfig based on scsi_hba_iportmap API */
+ ret = scsi_hba_bus_unconfig_iportmap(self,
+ flags, op, arg);
+ }
+ return (ret);
+ }
+
+ /* Check to see how the iport/HBA does target/lun bus unconfig. */
+ if (tran->tran_bus_unconfig) {
+ /* HBA unconfig based on Sun-private/legacy tran_bus_unconfig */
+ ret = tran->tran_bus_unconfig(self, flags, op, arg);
+ } else if (tran->tran_tgtmap) {
+ /* SCSAv3 unconfig based on scsi_hba_tgtmap_*() API */
+ ret = scsi_hba_bus_unconfig_tgtmap(self, flags, op, arg);
+ } else {
+ /* SCSA unconfig based on SCSI Parallel Interconnect */
+ ret = scsi_hba_bus_unconfig_spi(self, flags, op, arg);
+ }
+ return (ret);
+}
+
+static void
+scsi_tgtmap_scsi_config(void *arg, damap_t *mapp, damap_id_list_t id_list)
+{
+ scsi_hba_tran_t *tran = (scsi_hba_tran_t *)arg;
+ dev_info_t *self = tran->tran_iport_dip;
+ impl_scsi_tgtmap_t *tgtmap;
+ int mt;
+ damap_id_t tgtid;
+ int ntargets;
+ char **tgt_addrv;
+ char **tgt_addr;
+
+ tgtmap = (impl_scsi_tgtmap_t *)tran->tran_tgtmap;
+ mt = ddi_prop_get_int(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "scsi-enumeration", scsi_enumeration);
+
+ /* count the number of targets we need to config */
+ for (ntargets = 0,
+ tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid))
+ ntargets++;
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s %d target%s",
+ damap_name(mapp), ntargets, (ntargets == 1) ? "" : "s"));
+ if (ntargets == 0)
+ return;
+
+ /* allocate and form vector of addresses */
+ tgt_addrv = kmem_zalloc(sizeof (char *) * (ntargets + 1), KM_SLEEP);
+ for (tgt_addr = tgt_addrv,
+ tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid),
+ tgt_addr++) {
+ *tgt_addr = damap_id2addr(mapp, tgtid);
+
+ if (scsi_lunmap_create(self, tgtmap, *tgt_addr) != DDI_SUCCESS)
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "failed to create lunmap for %s", *tgt_addr));
+ else
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(mapp), *tgt_addr));
+ }
+
+ /* null terminate vector */
+ *tgt_addr = NULL;
+
+ /* configure vector of addresses (with multi-threading) */
+ scsi_hba_thread_taddrs(self, tgt_addrv, mt, SE_HP,
+ scsi_hba_taddr_config_thr);
+
+ /* free vector */
+ kmem_free(tgt_addrv, sizeof (char *) * (ntargets + 1));
+}
+
+static void
+scsi_tgtmap_scsi_unconfig(void *arg, damap_t *mapp, damap_id_list_t id_list)
+{
+ scsi_hba_tran_t *tran = (scsi_hba_tran_t *)arg;
+ dev_info_t *self = tran->tran_iport_dip;
+ impl_scsi_tgtmap_t *tgtmap;
+ damap_id_t tgtid;
+ char *tgt_addr;
+
+ tgtmap = (impl_scsi_tgtmap_t *)tran->tran_tgtmap;
+
+ for (tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid)) {
+ tgt_addr = damap_id2addr(mapp, tgtid);
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(mapp), tgt_addr));
+ scsi_lunmap_destroy(self, tgtmap, tgt_addr);
+ }
+}
+
+static void
+scsi_tgtmap_smp_config(void *arg, damap_t *mapp, damap_id_list_t id_list)
+{
+ scsi_hba_tran_t *tran = (scsi_hba_tran_t *)arg;
+ dev_info_t *self = tran->tran_iport_dip;
+ damap_id_t tgtid;
+ char *addr;
+
+ for (tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid)) {
+ addr = damap_id2addr(mapp, tgtid);
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(mapp), addr));
+
+ (void) smp_hba_bus_config_taddr(self, addr);
+ }
+}
+
+static void
+scsi_tgtmap_smp_unconfig(void *arg, damap_t *mapp, damap_id_list_t id_list)
+{
+ scsi_hba_tran_t *tran = (scsi_hba_tran_t *)arg;
+ dev_info_t *self = tran->tran_iport_dip;
+ damap_id_t tgtid;
+ char *addr;
+ dev_info_t *child;
+ char nameaddr[SCSI_MAXNAMELEN];
+ int circ;
+
+ for (tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid)) {
+ addr = damap_id2addr(mapp, tgtid);
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(mapp), addr));
+
+ (void) snprintf(nameaddr, sizeof (nameaddr), "smp@%s", addr);
+ scsi_hba_devi_enter(self, &circ);
+ if ((child = ndi_devi_findchild(self, nameaddr)) == NULL) {
+ scsi_hba_devi_exit(self, circ);
+ continue;
+ }
+
+ if (ndi_devi_offline(child,
+ NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE) == DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo smp@%s offlined and removed", addr));
+ } else if (ndi_devi_device_remove(child)) {
+ /* Offline/remove failed, note new device_remove */
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo smp@%s offline failed, device_remove",
+ addr));
+ }
+ scsi_hba_devi_exit(self, circ);
+ }
+}
+
+/* ARGSUSED1 */
+static void
+scsi_tgtmap_smp_activate(void *map_priv, char *tgt_addr, int addrid,
+ void **tgt_privp)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)map_priv;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgtmap->tgtmap_activate_cb) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s @%s activated",
+ damap_name(tgtmap->tgtmap_dam[SCSI_TGT_SMP_DEVICE]),
+ tgt_addr));
+
+ (*tgtmap->tgtmap_activate_cb)(tgtmap->tgtmap_mappriv,
+ tgt_addr, SCSI_TGT_SMP_DEVICE, tgt_privp);
+ }
+}
+
+/* ARGSUSED1 */
+static void
+scsi_tgtmap_smp_deactivate(void *map_priv, char *tgt_addr, int addrid,
+ void *tgt_privp)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)map_priv;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgtmap->tgtmap_deactivate_cb) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s @%s deactivated",
+ damap_name(tgtmap->tgtmap_dam[SCSI_TGT_SMP_DEVICE]),
+ tgt_addr));
+
+
+ (*tgtmap->tgtmap_deactivate_cb)(tgtmap->tgtmap_mappriv,
+ tgt_addr, SCSI_TGT_SMP_DEVICE, tgt_privp);
+ }
+}
+
+/* ARGSUSED1 */
+static void
+scsi_tgtmap_scsi_activate(void *map_priv, char *tgt_addr, int addrid,
+ void **tgt_privp)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)map_priv;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgtmap->tgtmap_activate_cb) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s @%s activated",
+ damap_name(tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE]),
+ tgt_addr));
+
+ (*tgtmap->tgtmap_activate_cb)(tgtmap->tgtmap_mappriv,
+ tgt_addr, SCSI_TGT_SCSI_DEVICE, tgt_privp);
+ }
+}
+
+/* ARGSUSED1 */
+static void
+scsi_tgtmap_scsi_deactivate(void *map_priv, char *tgt_addr, int addrid,
+ void *tgt_privp)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)map_priv;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgtmap->tgtmap_deactivate_cb) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s @%s deactivated",
+ damap_name(tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE]),
+ tgt_addr));
+
+ (*tgtmap->tgtmap_deactivate_cb)(tgtmap->tgtmap_mappriv,
+ tgt_addr, SCSI_TGT_SCSI_DEVICE, tgt_privp);
+
+ }
+}
+
+
+int
+scsi_hba_tgtmap_create(dev_info_t *self, scsi_tgtmap_mode_t mode,
+ clock_t settle, int n_entries,
+ void *tgtmap_priv, scsi_tgt_activate_cb_t activate_cb,
+ scsi_tgt_deactivate_cb_t deactivate_cb,
+ scsi_hba_tgtmap_t **handle)
+{
+ scsi_hba_tran_t *tran;
+ damap_t *mapp;
+ char context[64];
+ impl_scsi_tgtmap_t *tgtmap;
+ damap_rptmode_t rpt_style;
+ char *scsi_binding_set;
+
+ if (self == NULL || settle == 0 || n_entries == 0 || handle == NULL)
+ return (DDI_FAILURE);
+
+ *handle = NULL;
+
+ if (scsi_hba_iport_unit_address(self) == NULL)
+ return (DDI_FAILURE);
+
+ switch (mode) {
+ case SCSI_TM_FULLSET:
+ rpt_style = DAMAP_REPORT_FULLSET;
+ break;
+ case SCSI_TM_PERADDR:
+ rpt_style = DAMAP_REPORT_PERADDR;
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ tran = (scsi_hba_tran_t *)ddi_get_driver_private(self);
+ ASSERT(tran);
+ if (tran == NULL)
+ return (DDI_FAILURE);
+
+ tgtmap = kmem_zalloc(sizeof (*tgtmap), KM_SLEEP);
+ tgtmap->tgtmap_tran = tran;
+ tgtmap->tgtmap_activate_cb = activate_cb;
+ tgtmap->tgtmap_deactivate_cb = deactivate_cb;
+ tgtmap->tgtmap_mappriv = tgtmap_priv;
+
+ (void) snprintf(context, sizeof (context), "%s%d.tgtmap.scsi",
+ ddi_driver_name(self), ddi_get_instance(self));
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s", context));
+ if (damap_create(context, n_entries, rpt_style, settle,
+ tgtmap, scsi_tgtmap_scsi_activate, scsi_tgtmap_scsi_deactivate,
+ tran, scsi_tgtmap_scsi_config, scsi_tgtmap_scsi_unconfig,
+ &mapp) != DAM_SUCCESS) {
+ kmem_free(tgtmap, sizeof (*tgtmap));
+ return (DDI_FAILURE);
+ }
+ tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE] = mapp;
+
+ (void) snprintf(context, sizeof (context), "%s%d.tgtmap.smp",
+ ddi_driver_name(self), ddi_get_instance(self));
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s", context));
+ if (damap_create(context, n_entries, rpt_style, settle,
+ tgtmap, scsi_tgtmap_smp_activate, scsi_tgtmap_smp_deactivate,
+ tran, scsi_tgtmap_smp_config, scsi_tgtmap_smp_unconfig,
+ &mapp) != DAM_SUCCESS) {
+ damap_destroy(tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE]);
+ kmem_free(tgtmap, sizeof (*tgtmap));
+ return (DDI_FAILURE);
+ }
+ tgtmap->tgtmap_dam[SCSI_TGT_SMP_DEVICE] = mapp;
+
+ tran->tran_tgtmap = (scsi_hba_tgtmap_t *)tgtmap;
+ *handle = (scsi_hba_tgtmap_t *)tgtmap;
+
+ /*
+ * We have now set tran_tgtmap, marking the tran as using tgtmap
+ * enumeration services. To prevent the generation of legacy spi
+ * 'binding-set' compatible forms, remove the 'scsi-binding-set'
+ * property.
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, self,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
+ &scsi_binding_set) == DDI_PROP_SUCCESS) {
+ if (strcmp(scsi_binding_set, scsi_binding_set_spi) == 0)
+ (void) ndi_prop_remove(DDI_DEV_T_NONE, self,
+ "scsi-binding-set");
+ ddi_prop_free(scsi_binding_set);
+ }
+ return (DDI_SUCCESS);
+}
+
+void
+scsi_hba_tgtmap_destroy(scsi_hba_tgtmap_t *handle)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+ int i;
+
+ for (i = 0; i < SCSI_TGT_NTYPES; i++) {
+ if (tgtmap->tgtmap_dam[i]) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s", damap_name(tgtmap->tgtmap_dam[i])));
+
+ damap_destroy(tgtmap->tgtmap_dam[i]);
+ }
+ }
+ kmem_free(tgtmap, sizeof (*tgtmap));
+}
+
+static int
+scsi_tgtmap_sync(scsi_hba_tgtmap_t *handle)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+ int empty = 1;
+ int i;
+
+ for (i = 0; i < SCSI_TGT_NTYPES; i++) {
+ if (tgtmap->tgtmap_dam[i]) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s sync begin",
+ damap_name(tgtmap->tgtmap_dam[i])));
+
+ /* return 1 if all maps ended up empty */
+ empty &= damap_sync(tgtmap->tgtmap_dam[i]);
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s sync end",
+ damap_name(tgtmap->tgtmap_dam[i])));
+ }
+ }
+ return (empty);
+}
+
+int
+scsi_hba_tgtmap_set_begin(scsi_hba_tgtmap_t *handle)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+ char *context;
+ int rv = DDI_SUCCESS;
+ int i;
+
+ for (i = 0; i < SCSI_TGT_NTYPES; i++) {
+ if (tgtmap->tgtmap_dam[i] == NULL)
+ continue;
+
+ context = damap_name(tgtmap->tgtmap_dam[i]);
+
+ if (i == SCSI_TGT_SCSI_DEVICE) {
+ /*
+ * In scsi_device context, so we have the 'context'
+ * string, diagnose the case where the tgtmap caller
+ * is failing to make forward progress, i.e. the caller
+ * is never completing an observation, and calling
+ * scsi_hbg_tgtmap_set_end. If this occurs, the solaris
+ * target/lun state may be out of sync with hardware.
+ */
+ if (tgtmap->tgtmap_reports++ >=
+ scsi_hba_tgtmap_reports_max) {
+ tgtmap->tgtmap_noisy++;
+ if (tgtmap->tgtmap_noisy == 1)
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
+ "%s: failing to complete a tgtmap "
+ "observation", context));
+ }
+ }
+
+ if (damap_addrset_begin(
+ tgtmap->tgtmap_dam[i]) != DAM_SUCCESS) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s FAIL", context));
+ rv = DDI_FAILURE;
+ continue;
+ }
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s", context));
+ }
+ return (rv);
+}
+
+
+int
+scsi_hba_tgtmap_set_add(scsi_hba_tgtmap_t *handle,
+ scsi_tgtmap_tgt_type_t tgt_type, char *tgt_addr, void *tgt_priv)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgt_type >= SCSI_TGT_NTYPES || !tgtmap->tgtmap_dam[tgt_type])
+ return (DDI_FAILURE);
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(tgtmap->tgtmap_dam[tgt_type]), tgt_addr));
+
+ return ((damap_addrset_add(tgtmap->tgtmap_dam[tgt_type], tgt_addr,
+ NULL, NULL, tgt_priv) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+/*ARGSUSED*/
+int
+scsi_hba_tgtmap_set_end(scsi_hba_tgtmap_t *handle, uint_t flags)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+ char *context;
+ int rv = DDI_SUCCESS;
+ int i;
+
+ tgtmap->tgtmap_reports = tgtmap->tgtmap_noisy = 0;
+
+ for (i = 0; i < SCSI_TGT_NTYPES; i++) {
+ if (tgtmap->tgtmap_dam[i] == NULL)
+ continue;
+
+ context = damap_name(tgtmap->tgtmap_dam[i]);
+ if (damap_addrset_end(
+ tgtmap->tgtmap_dam[i], 0) != DAM_SUCCESS) {
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s FAIL", context));
+ rv = DDI_FAILURE;
+ continue;
+ }
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL, "%s", context));
+ }
+ return (rv);
+}
+
+int
+scsi_hba_tgtmap_tgt_add(scsi_hba_tgtmap_t *handle,
+ scsi_tgtmap_tgt_type_t tgt_type, char *tgt_addr, void *tgt_priv)
+
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgt_type >= SCSI_TGT_NTYPES || !tgtmap->tgtmap_dam[tgt_type])
+ return (DDI_FAILURE);
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(tgtmap->tgtmap_dam[tgt_type]), tgt_addr));
+
+ return ((damap_addr_add(tgtmap->tgtmap_dam[tgt_type], tgt_addr, NULL,
+ NULL, tgt_priv) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+scsi_hba_tgtmap_tgt_remove(scsi_hba_tgtmap_t *handle,
+ scsi_tgtmap_tgt_type_t tgt_type, char *tgt_addr)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+
+ if (tgt_type >= SCSI_TGT_NTYPES || !tgtmap->tgtmap_dam[tgt_type])
+ return (DDI_FAILURE);
+
+ SCSI_HBA_LOG((_LOGTGT, self, NULL,
+ "%s @%s", damap_name(tgtmap->tgtmap_dam[tgt_type]), tgt_addr));
+
+ return ((damap_addr_del(tgtmap->tgtmap_dam[tgt_type],
+ tgt_addr) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+scsi_hba_tgtmap_lookup(scsi_hba_tgtmap_t *handle,
+ char *tgt_addr, scsi_tgtmap_tgt_type_t *r_type)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)handle;
+ dev_info_t *self = tgtmap->tgtmap_tran->tran_iport_dip;
+ damap_id_t tgtid;
+ int i;
+
+ for (i = 0; i < SCSI_TGT_NTYPES; i++) {
+ tgtid = damap_lookup(tgtmap->tgtmap_dam[i], tgt_addr);
+ if (tgtid != NODAM) {
+ *r_type = i;
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s @%s found: type %d",
+ damap_name(tgtmap->tgtmap_dam[i]), tgt_addr, i));
+ damap_id_rele(tgtmap->tgtmap_dam[i], tgtid);
+ return (DDI_SUCCESS);
+ }
+ }
+
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s%d.tgtmap @%s not found",
+ ddi_driver_name(self), ddi_get_instance(self), tgt_addr));
+ return (DDI_FAILURE);
+}
+
+/*
+ * Return the unit-address of an 'iport' node, or NULL for non-iport node.
+ */
+char *
+scsi_hba_iport_unit_address(dev_info_t *self)
+{
+ /*
+ * NOTE: Since 'self' could be a SCSA iport node or a SCSA HBA node,
+ * we can't use SCSA flavors: the flavor of a SCSA HBA node is not
+ * established/owned by SCSA, it is established by the nexus that
+ * created the SCSA HBA node (PCI) as a child.
+ *
+ * NOTE: If we want to support a node_name other than "iport" for
+ * an iport node then we can add support for a "scsa-iport-node-name"
+ * property on the SCSA HBA node. A SCSA HBA driver would set this
+ * property on the SCSA HBA node prior to using the iport API.
+ */
+ if (strcmp(ddi_node_name(self), "iport") == 0)
+ return (ddi_get_name_addr(self));
+ else
+ return (NULL);
+}
+
+/*
+ * Define a SCSI initiator port (bus/channel) for an HBA card that needs to
+ * support multiple SCSI ports, but only has a single HBA devinfo node. This
+ * function should be called from the HBA's attach(9E) implementation (when
+ * processing the HBA devinfo node attach) after the number of SCSI ports on
+ * the card is known or when the HBA driver DR handler detects a new port.
+ * The function returns 0 on failure and 1 on success.
+ *
+ * The implementation will add the port value into the "scsi-iports" property
+ * value maintained on the HBA node as. These properties are used by the generic
+ * scsi bus_config implementation to dynamicaly enumerate the specified iport
+ * children. The enumeration code will, on demand, create the appropriate
+ * iport children with a SCSI_ADDR_PROP_IPORTUA unit address. This node will
+ * bind to the same driver as the HBA node itself. This means that an HBA
+ * driver that uses iports should expect probe(9E), attach(9E), and detach(9E)
+ * calls on the iport children of the HBA. If configuration for all ports was
+ * already done during HBA node attach, the driver should just return
+ * DDI_SUCCESS when confronted with an iport node.
+ *
+ * A maximum of 32 iport ports are supported per HBA devinfo node.
+ *
+ * A NULL "port" can be used to indicate that the framework should enumerate
+ * target children on the HBA node itself, in addition to enumerating target
+ * children on any iport nodes declared. There are two reasons that an HBA may
+ * wish to have target children enumerated on both the HBA node and iport
+ * node(s):
+ *
+ * o If, in the past, HBA hardware had only a single physical port but now
+ * supports multiple physical ports, the updated driver that supports
+ * multiple physical ports may want to avoid /devices path upgrade issues
+ * by enumerating the first physical port under the HBA instead of as a
+ * iport.
+ *
+ * o Some hardware RAID HBA controllers (mlx, chs, etc) support multiple
+ * SCSI physical ports configured so that various physical devices on
+ * the physical ports are amalgamated into virtual devices on a virtual
+ * port. Amalgamated physical devices no longer appear to the host OS
+ * on the physical ports, but other non-amalgamated devices may still be
+ * visible on the physical ports. These drivers use a model where the
+ * physical ports are iport nodes and the HBA node is the virtual port to
+ * the configured virtual devices.
+ */
+int
+scsi_hba_iport_register(dev_info_t *self, char *port)
+{
+ unsigned int ports = 0;
+ int rval, i;
+ char **iports, **newiports;
+
+ ASSERT(self);
+ if (self == NULL)
+ return (DDI_FAILURE);
+
+ rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
+ &ports);
+
+ if (ports >= SCSI_HBA_MAX_IPORTS) {
+ ddi_prop_free(iports);
+ return (DDI_FAILURE);
+ }
+
+ if (rval == DDI_PROP_SUCCESS) {
+ for (i = 0; i < ports; i++) {
+ if (strcmp(port, iports[i]) == 0) {
+ /* iport already registered */
+ ddi_prop_free(iports);
+ return (DDI_SUCCESS);
+ }
+ }
+ }
+
+ newiports = kmem_alloc((sizeof (char *) * (ports + 1)), KM_SLEEP);
+
+ for (i = 0; i < ports; i++) {
+ newiports[i] = strdup(iports[i]);
+ }
+ newiports[ports] = strdup(port);
+ ports++;
+
+ if (ddi_prop_update_string_array(DDI_DEV_T_NONE, self,
+ "scsi-iports", newiports, ports) != DDI_PROP_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(WARN), self, NULL,
+ "failed to establish %s %s",
+ SCSI_ADDR_PROP_IPORTUA, port));
+ rval = DDI_FAILURE;
+ } else {
+ rval = DDI_SUCCESS;
+ }
+
+ /* If there is iport exist, free property */
+ if (ports > 1)
+ ddi_prop_free(iports);
+ for (i = 0; i < ports; i++) {
+ strfree(newiports[i]);
+ }
+ kmem_free(newiports, (sizeof (char *)) * ports);
+
+ return (rval);
+}
+
+/*
+ * Check if the HBA has any scsi_hba_iport_register()ed children.
+ */
+int
+scsi_hba_iport_exist(dev_info_t *self)
+{
+ unsigned int ports = 0;
+ char **iports;
+ int rval;
+
+ rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
+ &ports);
+
+ if (rval != DDI_PROP_SUCCESS)
+ return (0);
+
+ /* If there is now at least 1 iport, then iports is valid */
+ if (ports > 0) {
+ rval = 1;
+ } else
+ rval = 0;
+ ddi_prop_free(iports);
+
+ return (rval);
+}
+
+dev_info_t *
+scsi_hba_iport_find(dev_info_t *self, char *portnm)
+{
+ char *addr = NULL;
+ char **iports;
+ unsigned int num_iports = 0;
+ int rval = DDI_FAILURE;
+ int i = 0;
+ dev_info_t *child = NULL;
+
+ /* check to see if this is an HBA that defined scsi iports */
+ rval = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, self,
+ DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "scsi-iports", &iports,
+ &num_iports);
+
+ if (rval != DDI_SUCCESS) {
+ return (NULL);
+ }
+ ASSERT(num_iports > 0);
+
+ /* check to see if this port was registered */
+ for (i = 0; i < num_iports; i++) {
+ if (strcmp(iports[i], portnm) == 0)
+ break;
+ }
+
+ if (i == num_iports) {
+ child = NULL;
+ goto out;
+ }
+
+ addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
+ (void) snprintf(addr, SCSI_MAXNAMELEN, "iport@%s", portnm);
+ rval = ndi_devi_config_one(self, addr, &child, NDI_NO_EVENT);
+ kmem_free(addr, SCSI_MAXNAMELEN);
+
+ if (rval != DDI_SUCCESS) {
+ child = NULL;
+ }
+out:
+ ddi_prop_free(iports);
+ return (child);
+}
+
/*
* Search/create the specified iport node
*/
static dev_info_t *
-scsi_hba_bus_config_port(dev_info_t *self, char *nameaddr)
+scsi_hba_bus_config_port(dev_info_t *self, char *nameaddr, scsi_enum_t se)
{
- dev_info_t *child;
- char *mcompatible, *addr;
+ dev_info_t *child; /* iport child of HBA node */
+ scsi_hba_tran_t *tran;
+ char *addr;
+ char *compat;
/*
* See if the iport node already exists.
*/
-
+ addr = nameaddr + strlen("iport@");
if (child = ndi_devi_findchild(self, nameaddr)) {
+ if (ndi_devi_device_isremoved(child)) {
+ if ((se == SE_HP) || !ndi_dev_is_hotplug_node(child)) {
+ if (ndi_devi_device_insert(child))
+ SCSI_HBA_LOG((_LOGCFG, self, NULL,
+ "devinfo iport@%s device_reinsert",
+ addr));
+ } else
+ return (NULL);
+ }
return (child);
}
+
+ /*
+ * If config based on scsi_hba_iportmap API, only allow create
+ * from hotplug.
+ */
+ tran = ndi_flavorv_get(self, SCSA_FLAVOR_SCSI_DEVICE);
+ ASSERT(tran);
+ if (tran->tran_iportmap && (se != SE_HP))
+ return (NULL);
+
/* allocate and initialize a new "iport" node */
- ndi_devi_alloc_sleep(self, "iport", DEVI_SID_NODEID, &child);
+ ndi_devi_alloc_sleep(self, "iport",
+ (se == SE_HP) ? DEVI_SID_HP_NODEID : DEVI_SID_NODEID,
+ &child);
ASSERT(child);
/*
* Set the flavor of the child to be IPORT flavored
@@ -3864,12 +8546,13 @@ scsi_hba_bus_config_port(dev_info_t *self, char *nameaddr)
ndi_flavor_set(child, SCSA_FLAVOR_IPORT);
/*
- * Add the "scsi-iport" addressing property for this child. This
- * property is used to identify a iport node, and to represent the
+ * Add the SCSI_ADDR_PROP_IPORTUA addressing property for this child.
+ * This property is used to identify a iport node, and to represent the
* nodes @addr form via node properties.
*
* Add "compatible" property to the "scsi-iport" node to cause it bind
- * to the same driver as the HBA driver.
+ * to the same driver as the HBA driver. Use the "driver" name
+ * instead of the "binding name" to distinguish from hw node.
*
* Give the HBA a chance, via tran_set_name_prop, to set additional
* iport node properties or to change the "compatible" binding
@@ -3878,31 +8561,40 @@ scsi_hba_bus_config_port(dev_info_t *self, char *nameaddr)
* NOTE: the order of these operations is important so that
* scsi_hba_iport works when called.
*/
- mcompatible = ddi_binding_name(self);
- addr = nameaddr + strlen("iport@");
-
+ compat = (char *)ddi_driver_name(self);
if ((ndi_prop_update_string(DDI_DEV_T_NONE, child,
- "scsi-iport", addr) != DDI_PROP_SUCCESS) ||
+ SCSI_ADDR_PROP_IPORTUA, addr) != DDI_PROP_SUCCESS) ||
(ndi_prop_update_string_array(DDI_DEV_T_NONE, child,
- "compatible", &mcompatible, 1) != DDI_PROP_SUCCESS) ||
+ "compatible", &compat, 1) != DDI_PROP_SUCCESS) ||
ddi_pathname_obp_set(child, NULL) != DDI_SUCCESS) {
- SCSI_HBA_LOG((_LOG(WARN), self, NULL,
- "scsi_hba_bus_config_port:%s failed dynamic decoration",
+ SCSI_HBA_LOG((_LOG_NF(WARN), "%s failed dynamic decoration",
nameaddr));
(void) ddi_remove_child(child, 0);
child = NULL;
} else {
- if (ddi_initchild(self, child) != DDI_SUCCESS) {
+ /*
+ * Online/attach in order to get events so devfsadm will
+ * create public names.
+ */
+ ndi_hold_devi(child);
+ if (ndi_devi_online(child, 0) != NDI_SUCCESS) {
+ ndi_rele_devi(child);
ndi_prop_remove_all(child);
(void) ndi_devi_free(child);
child = NULL;
- }
+ } else
+ ndi_rele_devi(child);
}
return (child);
}
#ifdef sparc
+/*
+ * Future: When iportmap boot support is added, consider rewriting this to
+ * perform a scsi_hba_bus_config(BUS_CONFIG_ALL) on self (HBA) followed by
+ * a scsi_hba_bus_config(BUS_CONFIG_ONE) on each child of self (each iport).
+ */
/* ARGSUSED */
static int
scsi_hba_bus_config_prom_node(dev_info_t *self, uint_t flags,
@@ -3930,7 +8622,7 @@ scsi_hba_bus_config_prom_node(dev_info_t *self, uint_t flags,
ret = NDI_FAILURE;
- ndi_devi_enter(self, &circ);
+ scsi_hba_devi_enter(self, &circ);
/* create iport nodes for each scsi port/bus */
for (i = 0; i < num_iports; i++) {
@@ -3938,7 +8630,7 @@ scsi_hba_bus_config_prom_node(dev_info_t *self, uint_t flags,
/* Prepend the iport name */
(void) snprintf(addr, SCSI_MAXNAMELEN, "iport@%s",
iports[i]);
- if (pdip = scsi_hba_bus_config_port(self, addr)) {
+ if (pdip = scsi_hba_bus_config_port(self, addr, SE_HP)) {
if (ndi_busop_bus_config(self, NDI_NO_EVENT,
BUS_CONFIG_ONE, addr, &pdip, 0) !=
NDI_SUCCESS) {
@@ -3956,7 +8648,7 @@ scsi_hba_bus_config_prom_node(dev_info_t *self, uint_t flags,
}
}
- ndi_devi_exit(self, circ);
+ scsi_hba_devi_exit(self, circ);
kmem_free(addr, SCSI_MAXNAMELEN);
@@ -3990,7 +8682,7 @@ scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
ASSERT(num_iports > 0);
- ndi_devi_enter(self, &circ);
+ scsi_hba_devi_enter(self, &circ);
switch (op) {
case BUS_CONFIG_ONE:
@@ -3999,15 +8691,12 @@ scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
if ((nameaddr == NULL) ||
(strncmp(nameaddr, "iport@", strlen("iport@")) != 0)) {
ret = NDI_FAILURE;
- ndi_devi_exit(self, circ);
+ scsi_hba_devi_exit(self, circ);
ddi_prop_free(iports);
return (ret);
}
- /*
- * parse the port number from "iport@%x"
- * XXX use atoi (hex)
- */
+ /* parse the port number from "iport@%s" */
addr = nameaddr + strlen("iport@");
/* check to see if this port was registered */
@@ -4021,11 +8710,12 @@ scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
break;
}
- /* create the iport node */
- if (scsi_hba_bus_config_port(self, nameaddr)) {
+ /* create the iport node child */
+ if (scsi_hba_bus_config_port(self, nameaddr, SE_BUSCONFIG)) {
ret = NDI_SUCCESS;
}
break;
+
case BUS_CONFIG_ALL:
case BUS_CONFIG_DRIVER:
addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
@@ -4035,7 +8725,8 @@ scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
/* Prepend the iport name */
(void) snprintf(addr, SCSI_MAXNAMELEN, "iport@%s",
iports[i]);
- (void) scsi_hba_bus_config_port(self, addr);
+ (void) scsi_hba_bus_config_port(self, addr,
+ SE_BUSCONFIG);
}
kmem_free(addr, SCSI_MAXNAMELEN);
@@ -4050,115 +8741,1125 @@ scsi_hba_bus_config_iports(dev_info_t *self, uint_t flags,
*/
flags &= (~NDI_PROMNAME);
#endif
+ flags |= NDI_MDI_FALLBACK; /* devinfo&pathinfo children */
ret = ndi_busop_bus_config(self, flags, op,
arg, childp, 0);
}
- ndi_devi_exit(self, circ);
+ scsi_hba_devi_exit(self, circ);
ddi_prop_free(iports);
return (ret);
}
+typedef struct impl_scsi_iportmap {
+ dev_info_t *iportmap_hba_dip;
+ damap_t *iportmap_dam;
+} impl_scsi_iportmap_t;
-/*ARGSUSED*/
-static int
-scsi_hba_bus_config(dev_info_t *self, uint_t flag, ddi_bus_config_op_t op,
- void *arg, dev_info_t **childp)
+static void
+scsi_iportmap_config(void *arg, damap_t *mapp, damap_id_list_t id_list)
{
- scsi_hba_tran_t *tran = NULL;
+ dev_info_t *self = (dev_info_t *)arg;
+ int circ;
+ damap_id_t tgtid;
+ char nameaddr[SCSI_MAXNAMELEN];
+ char *iport_addr;
+
+ scsi_hba_devi_enter(self, &circ);
+
+ for (tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid)) {
+ iport_addr = damap_id2addr(mapp, tgtid);
+ SCSI_HBA_LOG((_LOGIPT, self, NULL,
+ "%s @%s", damap_name(mapp), iport_addr));
+
+ (void) snprintf(nameaddr, sizeof (nameaddr),
+ "iport@%s", iport_addr);
+ (void) scsi_hba_bus_config_port(self, nameaddr, SE_HP);
+ }
- tran = ddi_get_driver_private(self);
+ scsi_hba_devi_exit(self, circ);
+}
- if (tran && (tran->tran_hba_flags & SCSI_HBA_HBA)) {
-#ifdef sparc
- char *nameaddr = NULL;
- nameaddr = (char *)arg;
- switch (op) {
- case BUS_CONFIG_ONE:
- if (nameaddr == NULL)
- return (NDI_FAILURE);
- if (strncmp(nameaddr, "iport", strlen("iport")) == 0) {
- break;
- }
- /*
- * If this operation is not against an iport node, it's
- * possible the operation is requested to configure
- * root disk by OBP. Unfortunately, prom path is without
- * iport string in the boot path.
- */
- if (strncmp(nameaddr, "disk@", strlen("disk@")) == 0) {
- return (scsi_hba_bus_config_prom_node(self,
- flag, arg, childp));
- }
- break;
- default:
- break;
+static void
+scsi_iportmap_unconfig(void *arg, damap_t *mapp, damap_id_list_t id_list)
+{
+ dev_info_t *self = arg;
+ dev_info_t *child; /* iport child of HBA node */
+ int circ;
+ damap_id_t tgtid;
+ char *addr;
+ char nameaddr[SCSI_MAXNAMELEN];
+ scsi_hba_tran_t *tran;
+ int empty;
+
+ for (tgtid = damap_id_next(mapp, id_list, NODAM);
+ tgtid != NODAM;
+ tgtid = damap_id_next(mapp, id_list, tgtid)) {
+ addr = damap_id2addr(mapp, tgtid);
+ SCSI_HBA_LOG((_LOGIPT, self, NULL,
+ "%s @%s", damap_name(mapp), addr));
+
+ (void) snprintf(nameaddr, sizeof (nameaddr), "iport@%s", addr);
+ scsi_hba_devi_enter(self, &circ);
+ if ((child = ndi_devi_findchild(self, nameaddr)) == NULL) {
+ scsi_hba_devi_exit(self, circ);
+ continue;
}
-#endif
+
+ tran = ddi_get_driver_private(child);
+ ASSERT(tran);
+
+ ndi_hold_devi(child);
+ scsi_hba_devi_exit(self, circ);
+
/*
- * The request is to configure multi-port HBA.
- * Now start to configure iports, for the end
- * devices attached to iport, should be configured
- * by bus_configure routine of iport
+ * A begin/end (clear) against the iport's
+ * tgtmap will trigger unconfigure of all
+ * targets on the iport.
+ *
+ * Future: This bit of code only works if the
+ * target map reporting style is are full
+ * reports and not per-address. Maybe we
+ * should plan on handling this by
+ * auto-unconfiguration when destroying the
+ * target map(s).
*/
- return (scsi_hba_bus_config_iports(self, flag, op, arg,
- childp));
+ (void) scsi_hba_tgtmap_set_begin(
+ tran->tran_tgtmap);
+ (void) scsi_hba_tgtmap_set_end(
+ tran->tran_tgtmap, 0);
+
+ /* wait for unconfigure */
+ empty = scsi_tgtmap_sync(tran->tran_tgtmap);
+
+ scsi_hba_devi_enter(self, &circ);
+ ndi_rele_devi(child);
+
+ /* If begin/end/sync ends in empty map, offline/remove. */
+ if (empty) {
+ if (ndi_devi_offline(child,
+ NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE) == DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo iport@%s offlined and removed",
+ addr));
+ } else if (ndi_devi_device_remove(child)) {
+ /* Offline/rem failed, note new device_remove */
+ SCSI_HBA_LOG((_LOGUNCFG, self, NULL,
+ "devinfo iport@%s offline failed, "
+ "device_remove", addr));
+ }
+ }
+ scsi_hba_devi_exit(self, circ);
}
-#ifdef sparc
- if (scsi_hba_iport_unit_address(self)) {
- flag &= (~NDI_PROMNAME);
+}
+
+int
+scsi_hba_iportmap_create(dev_info_t *self, clock_t settle, int n_entries,
+ scsi_hba_iportmap_t **handle)
+{
+ scsi_hba_tran_t *tran;
+ damap_t *mapp;
+ char context[64];
+ impl_scsi_iportmap_t *iportmap;
+
+ if (self == NULL || settle == 0 || n_entries == 0 || handle == NULL)
+ return (DDI_FAILURE);
+
+ *handle = NULL;
+
+ if (scsi_hba_iport_unit_address(self) != NULL)
+ return (DDI_FAILURE);
+
+ tran = (scsi_hba_tran_t *)ddi_get_driver_private(self);
+ ASSERT(tran);
+ if (tran == NULL)
+ return (DDI_FAILURE);
+
+ (void) snprintf(context, sizeof (context), "%s%d.iportmap",
+ ddi_driver_name(self), ddi_get_instance(self));
+
+ if (damap_create(context, n_entries, DAMAP_REPORT_PERADDR, settle,
+ NULL, NULL, NULL, self,
+ scsi_iportmap_config, scsi_iportmap_unconfig, &mapp) !=
+ DAM_SUCCESS) {
+ return (DDI_FAILURE);
}
-#endif
- if (tran && tran->tran_bus_config) {
- return (tran->tran_bus_config(self, flag, op, arg, childp));
+ iportmap = kmem_zalloc(sizeof (*iportmap), KM_SLEEP);
+ iportmap->iportmap_hba_dip = self;
+ iportmap->iportmap_dam = mapp;
+
+ tran->tran_iportmap = (scsi_hba_iportmap_t *)iportmap;
+ *handle = (scsi_hba_iportmap_t *)iportmap;
+
+ SCSI_HBA_LOG((_LOGIPT, self, NULL, "%s", damap_name(mapp)));
+ return (DDI_SUCCESS);
+}
+
+void
+scsi_hba_iportmap_destroy(scsi_hba_iportmap_t *handle)
+{
+ impl_scsi_iportmap_t *iportmap = (impl_scsi_iportmap_t *)handle;
+ dev_info_t *self = iportmap->iportmap_hba_dip;
+
+ SCSI_HBA_LOG((_LOGIPT, self, NULL,
+ "%s", damap_name(iportmap->iportmap_dam)));
+
+ damap_destroy(iportmap->iportmap_dam);
+ kmem_free(iportmap, sizeof (*iportmap));
+}
+
+int
+scsi_hba_iportmap_iport_add(scsi_hba_iportmap_t *handle,
+ char *iport_addr, void *iport_priv)
+{
+ impl_scsi_iportmap_t *iportmap = (impl_scsi_iportmap_t *)handle;
+ dev_info_t *self = iportmap->iportmap_hba_dip;
+
+ SCSI_HBA_LOG((_LOGIPT, self, NULL,
+ "%s @%s", damap_name(iportmap->iportmap_dam), iport_addr));
+
+ return ((damap_addr_add(iportmap->iportmap_dam, iport_addr, NULL,
+ NULL, iport_priv) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+scsi_hba_iportmap_iport_remove(scsi_hba_iportmap_t *handle,
+ char *iport_addr)
+{
+ impl_scsi_iportmap_t *iportmap = (impl_scsi_iportmap_t *)handle;
+ dev_info_t *self = iportmap->iportmap_hba_dip;
+
+ SCSI_HBA_LOG((_LOGIPT, self, NULL,
+ "%s @%s", damap_name(iportmap->iportmap_dam), iport_addr));
+
+ return ((damap_addr_del(iportmap->iportmap_dam,
+ iport_addr) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+int
+scsi_hba_iportmap_lookup(scsi_hba_iportmap_t *handle,
+ char *iport_addr)
+{
+ impl_scsi_iportmap_t *iportmap = (impl_scsi_iportmap_t *)handle;
+ dev_info_t *self = iportmap->iportmap_hba_dip;
+ damap_id_t iportid;
+
+ iportid = damap_lookup(iportmap->iportmap_dam, iport_addr);
+ if (iportid != NODAM) {
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s @%s found",
+ damap_name(iportmap->iportmap_dam), iport_addr));
+ damap_id_rele(iportmap->iportmap_dam, iportid);
+ return (DDI_SUCCESS);
}
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s @%s not found",
+ damap_name(iportmap->iportmap_dam), iport_addr));
+ return (DDI_FAILURE);
+}
+
+static void
+scsi_lunmap_config(void *arg, damap_t *lundam, damap_id_list_t id_list)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)arg;
+ scsi_hba_tran_t *tran = tgtmap->tgtmap_tran;
+ dev_info_t *self = tran->tran_iport_dip;
+ damap_id_t lunid;
+ char *addr;
+
+ /* iterate over the LUNS we need to config */
+ for (lunid = damap_id_next(lundam, id_list, NODAM);
+ lunid != NODAM;
+ lunid = damap_id_next(lundam, id_list, lunid)) {
+ addr = damap_id2addr(lundam, lunid);
+ SCSI_HBA_LOG((_LOGLUN, self, NULL,
+ "%s @%s", damap_name(lundam), addr));
+
+ (void) scsi_hba_bus_configone_addr(self, addr, SE_HP);
+ }
+}
+
+static void
+scsi_lunmap_unconfig(void *arg, damap_t *lundam, damap_id_list_t id_list)
+{
+ impl_scsi_tgtmap_t *tgtmap = (impl_scsi_tgtmap_t *)arg;
+ scsi_hba_tran_t *tran = tgtmap->tgtmap_tran;
+ dev_info_t *self = tran->tran_iport_dip;
+ damap_id_t lunid;
+ char *addr;
+
+ for (lunid = damap_id_next(lundam, id_list, NODAM);
+ lunid != NODAM;
+ lunid = damap_id_next(lundam, id_list, lunid)) {
+ addr = damap_id2addr(lundam, lunid);
+ SCSI_HBA_LOG((_LOGLUN, self, NULL,
+ "%s @%s", damap_name(lundam), addr));
+
+ scsi_hba_bus_unconfigone_addr(self, addr);
+ }
+}
+
+static int
+scsi_lunmap_create(dev_info_t *self, impl_scsi_tgtmap_t *tgtmap, char *taddr)
+{
+ char context[64];
+ damap_t *tgtdam;
+ damap_id_t tgtid;
+ damap_t *lundam;
+
+ (void) snprintf(context, sizeof (context), "%s%d.%s.lunmap",
+ ddi_driver_name(self), ddi_get_instance(self), taddr);
+
+ tgtdam = tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE];
+ tgtid = damap_lookup(tgtdam, taddr);
+ if (tgtid == NODAM) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "target %s not found", context));
+ return (DDI_FAILURE);
+ }
+
+ lundam = damap_id_priv_get(tgtdam, tgtid);
+ if (lundam) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "lunmap %s already created", context));
+ damap_id_rele(tgtdam, tgtid);
+ return (DDI_FAILURE);
+ }
+
+ /* NOTE: expected ref at tgtid/taddr: 2: caller + lookup. */
+ SCSI_HBA_LOG((_LOGLUN, self, NULL, "%s creat, id %d ref %d",
+ context, tgtid, damap_id_ref(tgtdam, tgtid)));
+
+ /* create lundam */
+ if (damap_create(context, LUNMAPSIZE, DAMAP_REPORT_FULLSET, 1,
+ NULL, NULL, NULL,
+ tgtmap, scsi_lunmap_config, scsi_lunmap_unconfig,
+ &lundam) != DAM_SUCCESS) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "%s create failed, id %d ref %d",
+ context, tgtid, damap_id_ref(tgtdam, tgtid)));
+ damap_id_rele(tgtdam, tgtid);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Return with damap_id_hold at tgtid/taddr from damap_lookup to
+ * account for damap_id_prv_set below.
+ */
+ damap_id_priv_set(tgtdam, tgtid, lundam);
+ return (DDI_SUCCESS);
+}
+
+static void
+scsi_lunmap_destroy(dev_info_t *self, impl_scsi_tgtmap_t *tgtmap, char *taddr)
+{
+ char context[64];
+ damap_t *tgtdam;
+ damap_id_t tgtid;
+ damap_t *lundam;
+
+ (void) snprintf(context, sizeof (context), "%s%d.%s.lunmap",
+ ddi_driver_name(self), ddi_get_instance(self), taddr);
+
+ tgtdam = tgtmap->tgtmap_dam[SCSI_TGT_SCSI_DEVICE];
+ tgtid = damap_lookup(tgtdam, taddr);
+ if (tgtid == NODAM) {
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "target %s not found", context));
+ return;
+ }
+
+ lundam = (damap_t *)damap_id_priv_get(tgtdam, tgtid);
+ if (lundam == NULL) {
+ damap_id_rele(tgtdam, tgtid); /* from damap_lookup */
+ SCSI_HBA_LOG((_LOG(1), self, NULL,
+ "lunmap %s already destroyed", context));
+ return;
+ }
+
+ /* NOTE: expected ref at tgtid/taddr: 3: priv_set + caller + lookup. */
+ SCSI_HBA_LOG((_LOGLUN, self, NULL, "%s, id %d ref %d",
+ damap_name(lundam), tgtid, damap_id_ref(tgtdam, tgtid)));
+
/*
- * Force reprobe for BUS_CONFIG_ONE or when manually reconfiguring
- * via devfsadm(1m) to emulate deferred attach.
- * Reprobe only discovers driver.conf enumerated nodes, more
- * dynamic implementations probably require their own bus_config.
+ * A begin/end (clear) against a target's lunmap will trigger
+ * unconfigure of all LUNs on the target.
*/
- if ((op == BUS_CONFIG_ONE) || (flag & NDI_DRV_CONF_REPROBE))
- flag |= NDI_CONFIG_REPROBE;
+ scsi_lunmap_set_begin(self, lundam);
+ scsi_lunmap_set_end(self, lundam);
+
+ SCSI_HBA_LOG((_LOGLUN, self, NULL,
+ "%s sync begin", damap_name(lundam)));
+
+ (void) damap_sync(lundam); /* wait for unconfigure */
+
+ SCSI_HBA_LOG((_LOGLUN, self, NULL,
+ "%s sync end", damap_name(lundam)));
+
+ damap_id_priv_set(tgtdam, tgtid, NULL);
+
+ /* release hold established by damap_lookup above */
+ damap_id_rele(tgtdam, tgtid);
- return (ndi_busop_bus_config(self, flag, op, arg, childp, 0));
+ /* release hold established since scsi_lunmap_create() */
+ damap_id_rele(tgtdam, tgtid);
+
+ damap_destroy(lundam);
+}
+
+static void
+scsi_lunmap_set_begin(dev_info_t *self, damap_t *lundam)
+{
+ SCSI_HBA_LOG((_LOGLUN, self, NULL, "%s", damap_name(lundam)));
+
+ (void) damap_addrset_begin(lundam);
}
static int
-scsi_hba_bus_unconfig(dev_info_t *self, uint_t flag, ddi_bus_config_op_t op,
- void *arg)
+scsi_lunmap_set_add(dev_info_t *self, damap_t *lundam,
+ char *taddr, scsi_lun64_t lun64, int sfunc)
{
- scsi_hba_tran_t *tran = NULL;
+ char ua[SCSI_MAXNAMELEN];
- tran = ddi_get_driver_private(self);
+ /* make unit address string form of "@taddr,lun[,sfunc]" */
+ if (sfunc == -1)
+ (void) snprintf(ua, sizeof (ua), "%s,%" PRIx64, taddr, lun64);
+ else
+ (void) snprintf(ua, sizeof (ua), "%s,%" PRIx64 ",%x",
+ taddr, lun64, sfunc);
+
+ SCSI_HBA_LOG((_LOGLUN, self, NULL, "%s @%s", damap_name(lundam), ua));
+
+ return ((damap_addrset_add(lundam, ua, NULL, NULL,
+ NULL) == DAM_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+static void
+scsi_lunmap_set_end(dev_info_t *self, damap_t *lundam)
+{
+ SCSI_HBA_LOG((_LOGLUN, self, NULL, "%s", damap_name(lundam)));
+
+ (void) damap_addrset_end(lundam, 0);
+}
+
+int
+scsi_lunmap_lookup(dev_info_t *self, damap_t *lundam, char *addr)
+{
+ damap_id_t lunid;
+
+ if ((lunid = damap_lookup(lundam, addr)) != NODAM) {
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s @%s found", damap_name(lundam), addr));
+ damap_id_rele(lundam, lunid);
+ return (DDI_SUCCESS);
+ }
+
+ SCSI_HBA_LOG((_LOG(3), self, NULL,
+ "%s @%s not found", damap_name(lundam), addr));
+ return (DDI_FAILURE);
+}
+
+/*
+ * phymap implementation
+ *
+ * We manage the timed aggregation of phys into a phy map * by creating a
+ * SAS port construct (based upon 'name' of "local,remote" SAS addresses)
+ * upon the first link up. As time goes on additional phys may join that port.
+ * After an appropriate amount of settle time, we trigger the activation
+ * callback which will then take the resultant bit mask of phys (phymask) in
+ * the SAS port and use that to call back to the callback function
+ * provided by the additional caller.
+ *
+ * We cross check to make sure that phys only exist in one SAS port at a
+ * time by having soft state for each phy point back to the created
+ * SAS port.
+ *
+ * NOTE: Make SAS_PHY_UA_LEN max(SAS_PHY_PHYMASK_LEN, SAS_PHY_NAME_LEN)
+ * so we have enough space if sas_phymap_bitset2phymaskua phymask address
+ * is already in use, and we end up using port name as unit address.
+ */
+#define SAS_PHY_NAME_FMT "%" PRIx64 ",%" PRIx64
+#define SAS_PHY_NAME_LEN (16 + 1 + 16 + 1)
+#define SAS_PHY_NPHY (SAS2_PHYNUM_MAX + 1)
+#define SAS_PHY_PHYMASK_LEN ((roundup(SAS_PHY_NPHY, 4)) / 4)
+#if (SAS_PHY_PHYMASK_LEN > SAS_PHY_NAME_LEN)
+#define SAS_PHY_UA_LEN SAS_PHY_PHYMASK_LEN
+#else
+#define SAS_PHY_UA_LEN SAS_PHY_NAME_LEN
+#endif
+typedef struct impl_sas_phymap {
+ dev_info_t *phymap_self;
+
+ kmutex_t phymap_lock;
+ damap_t *phymap_dam;
+ void *phymap_phy2name;
+ ddi_soft_state_bystr *phymap_name2phys; /* bitset */
+ ddi_soft_state_bystr *phymap_name2ua;
+ ddi_soft_state_bystr *phymap_ua2name;
+
+ /* Noisy phy information - ensure forward progress for noisy phys */
+ int phymap_phy_max; /* max phy# */
+ int phymap_reports; /* per period */
+ int phymap_reports_max; /* scales */
+ int phymap_phys_noisy; /* detected */
+
+ /* These are for callbacks to the consumer. */
+ sas_phymap_activate_cb_t phymap_acp;
+ sas_phymap_deactivate_cb_t phymap_dcp;
+ void *phymap_private;
+} impl_sas_phymap_t;
+
+/* Detect noisy phy: max changes per stabilization period per phy. */
+static int sas_phymap_phy_max_factor = 16;
+
+/*
+ * Convert bitset into a unit-address string. The maximum string length would
+ * be the maximum number of phys, rounded up by 4 and divided by 4.
+ */
+static void
+sas_phymap_bitset2phymaskua(bitset_t *phys, char *buf)
+{
+ char *ptr;
+ int grp;
+ int cur;
+ uint_t bit;
+
+ bit = roundup(SAS_PHY_NPHY, 4);
+ grp = 4;
+ ptr = buf;
+ cur = 0;
+ do {
+ bit -= 1;
+ grp -= 1;
+ if (bitset_in_set(phys, bit)) {
+ cur |= (1 << grp);
+ }
+ if (grp == 0) {
+ grp = 4;
+ if (cur || ptr != buf) {
+ *ptr++ = "0123456789abcdef"[cur];
+ *ptr = 0;
+ }
+ cur = 0;
+ }
+ } while (bit != 0);
+ if (ptr == buf) {
+ *ptr++ = '0';
+ *ptr = 0;
+ }
+}
+
+static void
+sas_phymap_config(void *arg, damap_t *phydam, damap_id_list_t dl)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)arg;
+ char *context = damap_name(phymap->phymap_dam);
+ int pairs;
+ damap_id_t phyid;
+ char *damn;
+ char *name;
+ bitset_t *phys;
+ char *ua;
+ void *ua_priv;
+
+ ASSERT(context);
+
+ mutex_enter(&phymap->phymap_lock);
+ phymap->phymap_reports = phymap->phymap_phys_noisy = 0;
+
+ /* count the number of local,remove pairs we need to config */
+ for (pairs = 0, phyid = damap_id_next(phydam, dl, NODAM);
+ phyid != NODAM;
+ phyid = damap_id_next(phydam, dl, phyid))
+ pairs++;
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: config %d local,remote pairs", context, pairs));
+
+ for (phyid = damap_id_next(phydam, dl, NODAM);
+ phyid != NODAM;
+ phyid = damap_id_next(phydam, dl, phyid)) {
+ /* Get the name ("local,remote" address string) from damap. */
+ damn = damap_id2addr(phydam, phyid);
+
+ /* Get the bitset of phys currently forming the port. */
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, damn);
+ if (phys == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no phys", context, damn));
+ continue;
+ }
+
+ /* allocate, get, and initialize name index of name2ua map */
+ if (ddi_soft_state_bystr_zalloc(
+ phymap->phymap_name2ua, damn) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: failed name2ua alloc", context, damn));
+ continue;
+ }
+ ua = ddi_soft_state_bystr_get(phymap->phymap_name2ua, damn);
+ if (ua == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no name2ua", context, damn));
+ continue;
+ }
+ sas_phymap_bitset2phymaskua(phys, ua); /* set ua */
+
+ /* see if phymask ua index already allocated in ua2name map */
+ name = ddi_soft_state_bystr_get(phymap->phymap_ua2name, ua);
+ if (name) {
+ /*
+ * The 'phymask' sas_phymap_bitset2phymaskua ua is
+ * already in use. This means that original phys have
+ * formed into a new port, and that the original port
+ * still exists (it has migrated to some completely
+ * different set of phys). In this corner-case we use
+ * "local,remote" name as a 'temporary' unit address.
+ * Reset ua in name2ua map.
+ */
+ (void) strlcpy(ua, damn, SAS_PHY_NAME_LEN);
+
+ name = ddi_soft_state_bystr_get(
+ phymap->phymap_ua2name, ua);
+ if (name) {
+ /* The "local,remote" ua should be new... */
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s ua already configured",
+ context, ua));
+ continue;
+ }
+ }
- if (tran && tran->tran_bus_unconfig) {
- return (tran->tran_bus_unconfig(self, flag, op, arg));
+ /* allocate, get, and init ua index of ua2name map */
+ if (ddi_soft_state_bystr_zalloc(
+ phymap->phymap_ua2name, ua) != DDI_SUCCESS) {
+ ddi_soft_state_bystr_free(
+ phymap->phymap_name2ua, damn);
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: failed ua2name alloc",
+ context, damn));
+ continue;
+ }
+ name = ddi_soft_state_bystr_get(
+ phymap->phymap_ua2name, ua);
+ if (name == NULL) {
+ ddi_soft_state_bystr_free(
+ phymap->phymap_name2ua, damn);
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no ua2name", context, ua));
+ continue;
+ }
+
+ /* set name in ua2name map */
+ (void) strlcpy(name, damn, SAS_PHY_NAME_LEN);
+
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: ua %s: activate", context, damn, ua));
+ if (phymap->phymap_acp) {
+ mutex_exit(&phymap->phymap_lock);
+ ua_priv = NULL;
+ (phymap->phymap_acp)(phymap->phymap_private,
+ ua, &ua_priv);
+ mutex_enter(&phymap->phymap_lock);
+
+ damap_id_priv_set(phydam, phyid, ua_priv);
+ }
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: ua %s: activate complete", context, damn, ua));
+ }
+
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: config complete", context));
+ mutex_exit(&phymap->phymap_lock);
+}
+
+/*ARGSUSED*/
+static void
+sas_phymap_unconfig(void *arg, damap_t *phydam, damap_id_list_t dl)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)arg;
+ char *context = damap_name(phymap->phymap_dam);
+ int pairs;
+ damap_id_t phyid;
+ char *damn;
+ char *ua;
+ void *ua_priv;
+
+ ASSERT(context);
+
+ mutex_enter(&phymap->phymap_lock);
+ phymap->phymap_reports = phymap->phymap_phys_noisy = 0;
+
+ /* count the number of local,remove pairs we need to unconfig */
+ for (pairs = 0, phyid = damap_id_next(phydam, dl, NODAM);
+ phyid != NODAM;
+ phyid = damap_id_next(phydam, dl, phyid))
+ pairs++;
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: unconfig %d local,remote pairs", context, pairs));
+
+ for (phyid = damap_id_next(phydam, dl, NODAM);
+ phyid != NODAM;
+ phyid = damap_id_next(phydam, dl, phyid)) {
+ /* Get the name ("local,remote" address string) from damap. */
+ damn = damap_id2addr(phydam, phyid);
+
+ ua = ddi_soft_state_bystr_get(phymap->phymap_name2ua, damn);
+ if (ua == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no name2ua", context, damn));
+ continue;
+ }
+
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: ua %s: deactivate", context, damn, ua));
+ if (phymap->phymap_dcp) {
+ ua_priv = damap_id_priv_get(phydam, phyid);
+ mutex_exit(&phymap->phymap_lock);
+ (phymap->phymap_dcp)(phymap->phymap_private,
+ ua, ua_priv);
+ mutex_enter(&phymap->phymap_lock);
+ }
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: ua %s: deactivate complete", context, damn, ua));
+
+ /* delete ua<->name mappings */
+ ddi_soft_state_bystr_free(phymap->phymap_ua2name, ua);
+ ddi_soft_state_bystr_free(phymap->phymap_name2ua, damn);
}
- return (ndi_busop_bus_unconfig(self, flag, op, arg));
+
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: unconfig complete", context));
+ mutex_exit(&phymap->phymap_lock);
+}
+
+int
+sas_phymap_create(dev_info_t *self, clock_t settle,
+ sas_phymap_mode_t mode, void *mode_argument, void *phymap_priv,
+ sas_phymap_activate_cb_t activate_cb,
+ sas_phymap_deactivate_cb_t deactivate_cb,
+ sas_phymap_t **handlep)
+{
+ _NOTE(ARGUNUSED(mode_argument));
+ char context[64];
+ impl_sas_phymap_t *phymap;
+
+ if (self == NULL || settle == 0 || handlep == NULL)
+ return (DDI_FAILURE);
+
+ if (mode != PHYMAP_MODE_SIMPLE)
+ return (DDI_FAILURE);
+
+ phymap = kmem_zalloc(sizeof (*phymap), KM_SLEEP);
+ phymap->phymap_self = self;
+ phymap->phymap_reports_max = 1 * sas_phymap_phy_max_factor;
+ phymap->phymap_acp = activate_cb;
+ phymap->phymap_dcp = deactivate_cb;
+ phymap->phymap_private = phymap_priv;
+ mutex_init(&phymap->phymap_lock, NULL, MUTEX_DRIVER, NULL);
+
+ (void) snprintf(context, sizeof (context), "%s%d.phymap",
+ ddi_driver_name(self), ddi_get_instance(self));
+ SCSI_HBA_LOG((_LOGPHY, self, NULL, "%s", context));
+
+ if (damap_create(context, SAS_PHY_NPHY, DAMAP_REPORT_PERADDR, settle,
+ NULL, NULL, NULL,
+ phymap, sas_phymap_config, sas_phymap_unconfig,
+ &phymap->phymap_dam) != DAM_SUCCESS)
+ goto fail;
+
+ if (ddi_soft_state_init(&phymap->phymap_phy2name,
+ SAS_PHY_NAME_LEN, SAS_PHY_NPHY) != 0)
+ goto fail;
+
+ if (ddi_soft_state_bystr_init(&phymap->phymap_name2phys,
+ sizeof (bitset_t), SAS_PHY_NPHY) != 0)
+ goto fail;
+
+ if (ddi_soft_state_bystr_init(&phymap->phymap_name2ua,
+ SAS_PHY_UA_LEN, SAS_PHY_NPHY) != 0)
+ goto fail;
+ if (ddi_soft_state_bystr_init(&phymap->phymap_ua2name,
+ SAS_PHY_NAME_LEN, SAS_PHY_NPHY) != 0)
+ goto fail;
+
+ *handlep = (sas_phymap_t *)phymap;
+ return (DDI_SUCCESS);
+
+fail: sas_phymap_destroy((sas_phymap_t *)phymap);
+ *handlep = NULL;
+ return (DDI_FAILURE);
}
void
-scsi_hba_pkt_comp(struct scsi_pkt *pkt)
+sas_phymap_destroy(sas_phymap_t *handle)
{
- ASSERT(pkt);
- if (pkt->pkt_comp == NULL)
- return;
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *context;
+
+ context = phymap->phymap_dam ?
+ damap_name(phymap->phymap_dam) : "unknown";
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL, "%s", context));
+
+ if (phymap->phymap_ua2name)
+ ddi_soft_state_bystr_fini(&phymap->phymap_ua2name);
+ if (phymap->phymap_name2ua)
+ ddi_soft_state_bystr_fini(&phymap->phymap_name2ua);
+
+ if (phymap->phymap_name2phys)
+ ddi_soft_state_bystr_fini(&phymap->phymap_name2phys);
+
+ if (phymap->phymap_phy2name)
+ ddi_soft_state_fini(&phymap->phymap_phy2name);
+
+ if (phymap->phymap_dam)
+ damap_destroy(phymap->phymap_dam);
+ mutex_destroy(&phymap->phymap_lock);
+ kmem_free(phymap, sizeof (*phymap));
+}
+
+
+int
+sas_phymap_phy_add(sas_phymap_t *handle,
+ int phy, uint64_t local, uint64_t remote)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *context = damap_name(phymap->phymap_dam);
+ char port[SAS_PHY_NAME_LEN];
+ char *name;
+ bitset_t *phys;
+ int phy2name_allocated = 0;
+ int name2phys_allocated = 0;
+ int rv;
+
+ /* Create the SAS port name from the local and remote addresses. */
+ (void) snprintf(port, SAS_PHY_NAME_LEN, SAS_PHY_NAME_FMT,
+ local, remote);
+
+ mutex_enter(&phymap->phymap_lock);
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL, "%s: %s: add phy %d",
+ context, port, phy));
+
+ /* Check for conflict in phy2name map */
+ name = ddi_get_soft_state(phymap->phymap_phy2name, phy);
+ if (name) {
+ if (strcmp(name, port) != 0)
+ SCSI_HBA_LOG((_LOG_NF(WARN), "%s: %s: add phy %d: "
+ "already in %s", context, port, phy, name));
+ else
+ SCSI_HBA_LOG((_LOG_NF(WARN), "%s: %s: add phy %d: "
+ "duplicate add", context, port, phy));
+ mutex_exit(&phymap->phymap_lock);
+ return (DDI_FAILURE);
+ }
+
+ /* allocate, get, and initialize phy index of phy2name map */
+ if (ddi_soft_state_zalloc(
+ phymap->phymap_phy2name, phy) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: failed phy2name alloc", context, port));
+ goto fail;
+ }
+ name = ddi_get_soft_state(phymap->phymap_phy2name, phy);
+ if (name == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no phy2name", context, port));
+ goto fail;
+ }
+ phy2name_allocated = 1;
+ (void) strlcpy(name, port, SAS_PHY_NAME_LEN); /* set name */
+
+ /* Find/alloc, initialize name index of name2phys map */
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, name);
+ if (phys == NULL) {
+ if (ddi_soft_state_bystr_zalloc(phymap->phymap_name2phys,
+ name) != DDI_SUCCESS) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: failed name2phys alloc", context, name));
+ goto fail;
+ }
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, name);
+ if (phys == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: no name2phys", context, name));
+ goto fail;
+ }
+ name2phys_allocated = 1;
+
+ /* Initialize bitset of phys */
+ bitset_init(phys);
+ bitset_resize(phys, SAS_PHY_NPHY);
+
+ /* NOTE: no bitset_fini of phys needed */
+ }
+ ASSERT(phys);
+
+ /* Reflect 'add' in phys bitset. */
+ if (bitset_atomic_test_and_add(phys, phy) < 0) {
+ /* It is an error if the phy was already recorded. */
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: phy bit %d already in port", context, name, phy));
+ goto fail;
+ }
/*
- * For HBA drivers that implement tran_setup_pkt(9E), if we are
- * completing a 'consistent' mode DMA operation then we must
- * perform dma_sync prior to calling pkt_comp to ensure that
- * the target driver sees the correct data in memory.
+ * Check to see if we have a new phy_max for this map, and if so
+ * scale phymap_reports_max to the new number of phys.
*/
- ASSERT((pkt->pkt_flags & FLAG_NOINTR) == 0);
- if (((pkt->pkt_dma_flags & DDI_DMA_CONSISTENT) &&
- (pkt->pkt_dma_flags & DDI_DMA_READ)) &&
- ((P_TO_TRAN(pkt)->tran_setup_pkt) != NULL)) {
- scsi_sync_pkt(pkt);
+ if (phy > phymap->phymap_phy_max) {
+ phymap->phymap_phy_max = phy + 1;
+ phymap->phymap_reports_max = phymap->phymap_phy_max *
+ sas_phymap_phy_max_factor;
+ }
+
+ /*
+ * If we have not reached phymap_reports_max, start/restart the
+ * activate timer. Otherwise, if phymap->phymap_reports add/rem reports
+ * ever exceeds phymap_reports_max due to noisy phys, then report the
+ * noise and force stabilization by stopping reports into the damap.
+ *
+ * The first config/unconfig callout out of the damap will reset
+ * phymap->phymap_reports.
+ */
+ rv = DDI_SUCCESS;
+ if (phymap->phymap_reports++ < phymap->phymap_reports_max) {
+ if (damap_addr_add(phymap->phymap_dam, name,
+ NULL, NULL, NULL) == DAM_SUCCESS) {
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: damap_addr_add", context, name));
+ } else {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: damap_addr_add failed", context, name));
+ rv = DDI_FAILURE;
+ }
+ } else {
+ phymap->phymap_phys_noisy++;
+ if (phymap->phymap_phys_noisy == 1)
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: noisy phys", context, name));
+ }
+ mutex_exit(&phymap->phymap_lock);
+ return (rv);
+
+fail: if (phy2name_allocated)
+ ddi_soft_state_free(phymap->phymap_phy2name, phy);
+ if (name2phys_allocated)
+ ddi_soft_state_bystr_free(phymap->phymap_name2phys, name);
+ mutex_exit(&phymap->phymap_lock);
+ return (DDI_FAILURE);
+}
+
+int
+sas_phymap_phy_rem(sas_phymap_t *handle, int phy)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *context = damap_name(phymap->phymap_dam);
+ char *name;
+ bitset_t *phys;
+ int rv = DDI_FAILURE;
+
+ ASSERT(context);
+
+ mutex_enter(&phymap->phymap_lock);
+ phymap->phymap_reports++;
+
+ /* Find and free phy index of phy2name map */
+ name = ddi_get_soft_state(phymap->phymap_phy2name, phy);
+ if (name == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN), "%s: rem phy %d: never added",
+ context, phy));
+ goto fail;
+ }
+ /* NOTE: always free phy index of phy2name map before return... */
+
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL, "%s: %s: rem phy %d",
+ context, name, phy));
+
+ /* Get bitset of phys currently associated with named port. */
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, name);
+ if (phys == NULL) {
+ SCSI_HBA_LOG((_LOG_NF(WARN), "%s: %s: name2phys failed",
+ context, name));
+ goto fail;
+ }
+
+ /* Reflect 'rem' in phys bitset. */
+ if (bitset_atomic_test_and_del(phys, phy) < 0) {
+ /* It is an error if the phy wasn't one of the port's phys. */
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: phy bit %d not in port", context, name, phy));
+ goto fail;
+ }
+
+ /* If this was the last phy in the port, start the deactivate timer. */
+ if (bitset_is_null(phys) &&
+ (phymap->phymap_reports++ < phymap->phymap_reports_max)) {
+ if (damap_addr_del(phymap->phymap_dam, name) == DAM_SUCCESS) {
+ SCSI_HBA_LOG((_LOGPHY, phymap->phymap_self, NULL,
+ "%s: %s: damap_addr_del", context, name));
+ } else {
+ SCSI_HBA_LOG((_LOG_NF(WARN),
+ "%s: %s: damap_addr_del failure", context, name));
+ goto fail;
+ }
+ }
+ rv = DDI_SUCCESS;
+
+ /* free phy index of phy2name map */
+fail: if (name)
+ ddi_soft_state_free(phymap->phymap_phy2name, phy); /* free */
+ mutex_exit(&phymap->phymap_lock);
+ return (rv);
+}
+
+char *
+sas_phymap_lookup_ua(sas_phymap_t *handle, uint64_t local, uint64_t remote)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *context = damap_name(phymap->phymap_dam);
+ char name[SAS_PHY_NAME_LEN];
+ char *ua;
+
+ ASSERT(context);
+
+ (void) snprintf(name, SAS_PHY_NAME_LEN, SAS_PHY_NAME_FMT,
+ local, remote);
+
+ mutex_enter(&phymap->phymap_lock);
+ ua = ddi_soft_state_bystr_get(phymap->phymap_name2ua, name);
+ SCSI_HBA_LOG((_LOG(3), phymap->phymap_self, NULL,
+ "%s: %s: ua %s", context, name, ua ? ua : "NULL"));
+ mutex_exit(&phymap->phymap_lock);
+ return (ua);
+}
+
+void *
+sas_phymap_lookup_uapriv(sas_phymap_t *handle, char *ua)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *context = damap_name(phymap->phymap_dam);
+ char *name;
+ damap_id_t phyid;
+ void *ua_priv = NULL;
+
+ ASSERT(context);
+
+ mutex_enter(&phymap->phymap_lock);
+ name = ddi_soft_state_bystr_get(phymap->phymap_ua2name, ua);
+ if (name) {
+ phyid = damap_lookup(phymap->phymap_dam, name);
+ if (phyid != NODAM) {
+ ua_priv = damap_id_priv_get(phymap->phymap_dam, phyid);
+ damap_id_rele(phymap->phymap_dam, phyid);
+ }
+ }
+
+ SCSI_HBA_LOG((_LOG(3), phymap->phymap_self, NULL,
+ "%s: %s: ua %s ua_priv %p", context, name,
+ ua ? ua : "NULL", ua_priv));
+ mutex_exit(&phymap->phymap_lock);
+ return (ua_priv);
+}
+
+int
+sas_phymap_uahasphys(sas_phymap_t *handle, char *ua)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *name;
+ bitset_t *phys;
+ int n = 0;
+
+ mutex_enter(&phymap->phymap_lock);
+ name = ddi_soft_state_bystr_get(phymap->phymap_ua2name, ua);
+ if (name) {
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, name);
+ if (phys)
+ n = bitset_is_null(phys) ? 0 : 1;
}
- (*pkt->pkt_comp)(pkt);
+ mutex_exit(&phymap->phymap_lock);
+ return (n);
+}
+
+sas_phymap_phys_t *
+sas_phymap_ua2phys(sas_phymap_t *handle, char *ua)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *name;
+ bitset_t *phys;
+ bitset_t *cphys = NULL;
+
+ mutex_enter(&phymap->phymap_lock);
+ name = ddi_soft_state_bystr_get(phymap->phymap_ua2name, ua);
+ if (name == NULL)
+ goto fail;
+
+ phys = ddi_soft_state_bystr_get(phymap->phymap_name2phys, name);
+ if (phys == NULL)
+ goto fail;
+
+ /* dup the phys and return */
+ cphys = kmem_alloc(sizeof (*cphys), KM_SLEEP);
+ bitset_init(cphys);
+ bitset_resize(cphys, SAS_PHY_NPHY);
+ bitset_copy(phys, cphys);
+
+fail: mutex_exit(&phymap->phymap_lock);
+ return ((sas_phymap_phys_t *)cphys);
+}
+
+int
+sas_phymap_phys_next(sas_phymap_phys_t *phys)
+{
+ bitset_t *cphys = (bitset_t *)phys;
+ int phy;
+
+ phy = bitset_find(cphys);
+ if (phy != -1)
+ bitset_del(cphys, phy);
+ return (phy);
+}
+
+void
+sas_phymap_phys_free(sas_phymap_phys_t *phys)
+{
+ bitset_t *cphys = (bitset_t *)phys;
+
+ if (cphys) {
+ bitset_fini(cphys);
+ kmem_free(cphys, sizeof (*cphys));
+ }
+}
+
+char *
+sas_phymap_phy2ua(sas_phymap_t *handle, int phy)
+{
+ impl_sas_phymap_t *phymap = (impl_sas_phymap_t *)handle;
+ char *name;
+ char *ua;
+ char *rua = NULL;
+
+ mutex_enter(&phymap->phymap_lock);
+ name = ddi_get_soft_state(phymap->phymap_phy2name, phy);
+ if (name == NULL)
+ goto fail;
+ ua = ddi_soft_state_bystr_get(phymap->phymap_name2ua, name);
+ if (ua == NULL)
+ goto fail;
+
+ /* dup the ua and return */
+ rua = strdup(ua);
+
+fail: mutex_exit(&phymap->phymap_lock);
+ return (rua);
+}
+
+void
+sas_phymap_ua_free(char *ua)
+{
+ if (ua)
+ strfree(ua);
}
diff --git a/usr/src/uts/common/io/scsi/impl/scsi_resource.c b/usr/src/uts/common/io/scsi/impl/scsi_resource.c
index 592f2838a1..356ffee8eb 100644
--- a/usr/src/uts/common/io/scsi/impl/scsi_resource.c
+++ b/usr/src/uts/common/io/scsi/impl/scsi_resource.c
@@ -324,12 +324,12 @@ scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
pktw->pcw_flags = 0;
in_pktp = &(pktw->pcw_pkt);
in_pktp->pkt_address = *ap;
+
/*
* target drivers should initialize pkt_comp and
* pkt_time, but sometimes they don't so initialize
* them here to be safe.
*/
- in_pktp->pkt_address = *ap;
in_pktp->pkt_flags = 0;
in_pktp->pkt_time = 0;
in_pktp->pkt_resid = 0;
diff --git a/usr/src/uts/common/io/scsi/impl/scsi_transport.c b/usr/src/uts/common/io/scsi/impl/scsi_transport.c
index 29436c4cf8..81837d4fc1 100644
--- a/usr/src/uts/common/io/scsi/impl/scsi_transport.c
+++ b/usr/src/uts/common/io/scsi/impl/scsi_transport.c
@@ -188,7 +188,6 @@ scsi_transport(struct scsi_pkt *pkt)
} else {
uint_t savef;
void (*savec)();
- int status;
#ifdef SCSI_POLL_STAT
mutex_enter(&scsi_flag_nointr_mutex);
@@ -202,7 +201,7 @@ scsi_transport(struct scsi_pkt *pkt)
pkt->pkt_flags &= ~FLAG_NOINTR;
pkt->pkt_flags |= FLAG_IMMEDIATE_CB;
- if ((status = (*A_TO_TRAN(ap)->tran_start)(ap, pkt)) ==
+ if ((rval = (*A_TO_TRAN(ap)->tran_start)(ap, pkt)) ==
TRAN_ACCEPT) {
mutex_enter(&scsi_flag_nointr_mutex);
while (pkt->pkt_comp != CALLBACK_DONE) {
@@ -214,6 +213,6 @@ scsi_transport(struct scsi_pkt *pkt)
pkt->pkt_flags = savef;
pkt->pkt_comp = savec;
- return (status);
+ return (rval);
}
}
diff --git a/usr/src/uts/common/io/scsi/targets/sd.c b/usr/src/uts/common/io/scsi/targets/sd.c
index d8ed738be2..93b2d05519 100644
--- a/usr/src/uts/common/io/scsi/targets/sd.c
+++ b/usr/src/uts/common/io/scsi/targets/sd.c
@@ -16751,8 +16751,12 @@ sdintr(struct scsi_pkt *pktp)
* state if needed.
*/
if (pktp->pkt_reason == CMD_DEV_GONE) {
- scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
- "Command failed to complete...Device is gone\n");
+ /* Prevent multiple console messages for the same failure. */
+ if (un->un_last_pkt_reason != CMD_DEV_GONE) {
+ un->un_last_pkt_reason = CMD_DEV_GONE;
+ scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
+ "Command failed to complete...Device is gone\n");
+ }
if (un->un_mediastate != DKIO_DEV_GONE) {
un->un_mediastate = DKIO_DEV_GONE;
cv_broadcast(&un->un_state_cv);
diff --git a/usr/src/uts/common/io/scsi/targets/ses.c b/usr/src/uts/common/io/scsi/targets/ses.c
index b05041dff0..5e18e1c4a5 100644
--- a/usr/src/uts/common/io/scsi/targets/ses.c
+++ b/usr/src/uts/common/io/scsi/targets/ses.c
@@ -21,7 +21,7 @@
/*
* Enclosure Services Device target driver
*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -245,21 +245,27 @@ ses_probe(dev_info_t *dip)
* been able to get one to attach.
*
*/
+ if (dip == NULL)
+ return (DDI_PROBE_FAILURE);
+ /* SES_LOG(NULL, SES_CE_DEBUG1, "ses_probe: OK"); */
+ if (ddi_dev_is_sid(dip) == DDI_SUCCESS) {
+ return (DDI_PROBE_DONTCARE);
+ }
+ devp = ddi_get_driver_private(dip);
- if (dip == NULL)
+ /* Legacy: prevent driver.conf specified ses nodes on atapi. */
+ if (scsi_ifgetcap(&devp->sd_address, "interconnect-type", -1) ==
+ INTERCONNECT_ATAPI)
return (DDI_PROBE_FAILURE);
+
/*
* XXX: Breakage from the x86 folks.
*/
if (strcmp(ddi_get_name(ddi_get_parent(dip)), "ata") == 0) {
return (DDI_PROBE_FAILURE);
}
- /* SES_LOG(NULL, SES_CE_DEBUG1, "ses_probe: OK"); */
- if (ddi_dev_is_sid(dip) == DDI_SUCCESS) {
- return (DDI_PROBE_DONTCARE);
- }
- devp = ddi_get_driver_private(dip);
+
switch (err = scsi_probe(devp, SLEEP_FUNC)) {
case SCSIPROBE_EXISTS:
if (is_enc_dev(NULL, devp->sd_inq, SUN_INQSIZE, &ep)) {
diff --git a/usr/src/uts/common/io/scsi/targets/sgen.conf b/usr/src/uts/common/io/scsi/targets/sgen.conf
index 8f6a886e0b..e18c06928c 100644
--- a/usr/src/uts/common/io/scsi/targets/sgen.conf
+++ b/usr/src/uts/common/io/scsi/targets/sgen.conf
@@ -18,9 +18,7 @@
#
# CDDL HEADER END
#
-
-#
-# Copyright 1999 by Sun Microsystems, Inc. All rights reserved.
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
@@ -28,17 +26,38 @@
# Portions Copyright (c) Siemens 1999
# All rights reserved.
#
-#ident "%Z%%M% %I% %E% SMI"
-#
-#
# WARNING: enabling this driver may impact the security and data integrity of
# devices on your system. Please refer to sgen(7d) for details.
+#
+# There are two ways of configuring sgen: by establishing an association
+# between a compatible alias for a device and the sgen driver via
+# "add_drv -i", or by using this file (sgen.conf).
+
+#--------------------------add_drv binding method-----------------------------
+# SCSI target devices are now self-identifying in Solaris. Add_drv is the
+# preferred method to control driver binding, it avoids issues associated
+# with multiple driver.conf files associating more than one driver with a
+# device. The compatible property forms for SCSI target devices used in the
+# add_drv command are described in scsi(4).
+#
+# USAGE EXAMPLE (add_drv)
+#
+# In this example, sgen is configured to bind to all scanner and ocrw devices
+# in the system, as well as the UltraToast 4000 disk from ACME using the
+# add_drv configuration method.
+#
+# add_drv -i \
+# '"scsiclass,06" "scsiclass,0f" "scsiclass,00.vACME,pUltraToast_4000"' sgen
+#-------------------------sgen.conf binding method----------------------------
+# NOTE: Support for sgen.conf configuration may be removed in a future release
+# of Solaris.
#
-# sgen may be configured to bind to SCSI devices exporting a particular device
-# type, using the device-type-config-list, which is a ',' delimited list of
-# strings.
+# The the remainder of this file is concerned with the .conf file
+# configuration method. Sgen may be configured to bind to SCSI devices
+# exporting a particular device type, using the device-type-config-list, which
+# is a ',' delimited list of strings.
#
#device-type-config-list=
# "direct" (type 0x00)
@@ -60,7 +79,6 @@
# "bridge" (type 0x10)
# "type_0x<typenum>" (types 0x11-0x1e are undefined by SCSI-3)
# "type_unknown" (type 0x1f)
-
#
# In addition to binding to device types, sgen can be configured to bind to one
# or more particular devices. The latter is accomplished by specifying the
@@ -69,9 +87,8 @@
# strings in the inquiry-config-list property, below. "*" may be substituted
# for the vendor ID as a wildcard. See sgen(7D) for details and extended usage
# examples.
-
#
-# USAGE EXAMPLE
+# USAGE EXAMPLE (sgen.conf)
#
# In this example, sgen is configured to bind to all scanner and ocrw devices in
# the system, as well as the UltraToast 4000 from ACME, and the PowerToast
@@ -81,15 +98,14 @@
#
#inquiry-config-list= "ACME", "UltraToast 4000",
# "*", "PowerToast";
-
-
#
-# After configuring the device-type-config-list and/or the inquiry-config-list,
-# the administrator must uncomment those target/lun pairs at which there are
-# devices for sgen to control. If it is expected that devices controlled by
-# sgen will be hotplugged or added into the system later, it is recommended
-# that all of the following lines be uncommented.
-
+# When using the sgen.conf method, after configuring the
+# device-type-config-list and/or the inquiry-config-list, the administrator
+# must uncomment those target/lun pairs at which there are devices for sgen to
+# control. If it is expected that devices controlled by sgen will be hotplugged
+# or added into the system later, it is recommended that all of the following
+# lines be uncommented.
+#
#name="sgen" class="scsi" target=0 lun=0;
#name="sgen" class="scsi" target=1 lun=0;
#name="sgen" class="scsi" target=2 lun=0;
@@ -106,3 +122,4 @@
#name="sgen" class="scsi" target=13 lun=0;
#name="sgen" class="scsi" target=14 lun=0;
#name="sgen" class="scsi" target=15 lun=0;
+
diff --git a/usr/src/uts/common/io/scsi/targets/smp.c b/usr/src/uts/common/io/scsi/targets/smp.c
index 12ecfc3392..ac3e089b03 100644
--- a/usr/src/uts/common/io/scsi/targets/smp.c
+++ b/usr/src/uts/common/io/scsi/targets/smp.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -65,7 +65,14 @@ static int smp_handle_func(dev_t, intptr_t, int, cred_t *, int *);
*/
static void smp_log(smp_state_t *, int, const char *, ...);
-int smp_retry_times = SMP_DEFAULT_RETRY_TIMES;
+int smp_retry_times = SMP_DEFAULT_RETRY_TIMES;
+int smp_retry_delay = 10000; /* 10msec */
+int smp_delay_cmd = 1; /* 1usec */
+int smp_single_command = 1; /* one command at a time */
+
+static int smp_retry_recovered = 0; /* retry recovery counter */
+static int smp_retry_failed = 0; /* retry failed counter */
+static int smp_failed = 0;
static struct cb_ops smp_cb_ops = {
smp_open, /* open */
@@ -147,7 +154,7 @@ _info(struct modinfo *modinfop)
/*
* smp_attach()
- * attach(9e) entrypoint.
+ * attach(9e) entrypoint.
*/
static int
smp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
@@ -224,7 +231,7 @@ smp_do_attach(dev_info_t *dip)
/*
* smp_detach()
- * detach(9E) entrypoint
+ * detach(9E) entrypoint
*/
static int
smp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
@@ -255,7 +262,7 @@ smp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
/*
* smp_do_detach()
- * detach the driver, tearing down resources.
+ * detach the driver, tearing down resources.
*/
static int
smp_do_detach(dev_info_t *dip)
@@ -459,7 +466,6 @@ smp_handle_func(dev_t dev,
DTRACE_PROBE1(smp__transport__start, caddr_t, smp_pkt->pkt_req);
smp_pkt->pkt_address = &smp_state->smp_dev->smp_addr;
- smp_pkt->pkt_reason = 0;
if (usmp_cmd->usmp_timeout <= 0) {
smp_pkt->pkt_timeout = SMP_DEFAULT_TIMEOUT;
} else {
@@ -469,20 +475,56 @@ smp_handle_func(dev_t dev,
/* call sas_smp_transport entry and send smp_pkt to HBA driver */
cmd_flags |= SMP_FLAG_XFER;
for (retrycount = 0; retrycount <= smp_retry_times; retrycount++) {
- if (sas_smp_transport(smp_pkt) == DDI_SUCCESS) {
- rval = DDI_SUCCESS;
+
+ /*
+ * To improve transport reliability, only allow one command
+ * outstanding at a time in sas_smp_transport().
+ *
+ * NOTE: Some expanders have issues with heavy smp load.
+ */
+ if (smp_single_command) {
+ mutex_enter(&smp_state->smp_mutex);
+ while (smp_state->smp_busy)
+ cv_wait(&smp_state->smp_cv,
+ &smp_state->smp_mutex);
+ smp_state->smp_busy = 1;
+ mutex_exit(&smp_state->smp_mutex);
+ }
+
+ /* Let the transport know if more retries are possible. */
+ smp_pkt->pkt_will_retry =
+ (retrycount < smp_retry_times) ? 1 : 0;
+
+ smp_pkt->pkt_reason = 0;
+ rval = sas_smp_transport(smp_pkt); /* put on the wire */
+
+ if (smp_delay_cmd)
+ delay(drv_usectohz(smp_delay_cmd));
+
+ if (smp_single_command) {
+ mutex_enter(&smp_state->smp_mutex);
+ smp_state->smp_busy = 0;
+ cv_signal(&smp_state->smp_cv);
+ mutex_exit(&smp_state->smp_mutex);
+ }
+
+ if (rval == DDI_SUCCESS) {
+ if (retrycount)
+ smp_retry_recovered++;
+ rval = 0;
break;
}
switch (smp_pkt->pkt_reason) {
case EAGAIN:
if (retrycount < smp_retry_times) {
- smp_pkt->pkt_reason = 0;
bzero(smp_pkt->pkt_rsp,
(size_t)usmp_cmd->usmp_rspsize);
- delay(drv_usectohz(10000)); /* 10 ms */
+ if (smp_retry_delay)
+ delay(drv_usectohz(smp_retry_delay));
continue;
} else {
+ smp_retry_failed++;
smp_log(smp_state, CE_NOTE,
"!sas_smp_transport failed, pkt_reason %d",
smp_pkt->pkt_reason);
@@ -516,6 +558,9 @@ done:
if ((cmd_flags & SMP_FLAG_RSPBUF) != 0) {
kmem_free(smp_pkt->pkt_rsp, smp_pkt->pkt_rspsize);
}
+
+ if (rval)
+ smp_failed++;
return (rval);
}
diff --git a/usr/src/uts/common/os/bitset.c b/usr/src/uts/common/os/bitset.c
index 175b7f5f14..3277b4efa6 100644
--- a/usr/src/uts/common/os/bitset.c
+++ b/usr/src/uts/common/os/bitset.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -260,3 +260,80 @@ bitset_find(bitset_t *b)
return (elt);
}
+
+/*
+ * AND, OR, and XOR bitset computations
+ * Returns 1 if resulting bitset has any set bits
+ */
+int
+bitset_and(bitset_t *bs1, bitset_t *bs2, bitset_t *res)
+{
+ int i, anyset;
+
+ for (anyset = 0, i = 0; i < bs1->bs_words; i++) {
+ if ((res->bs_set[i] = (bs1->bs_set[i] & bs2->bs_set[i])) != 0)
+ anyset = 1;
+ }
+ return (anyset);
+}
+
+int
+bitset_or(bitset_t *bs1, bitset_t *bs2, bitset_t *res)
+{
+ int i, anyset;
+
+ for (anyset = 0, i = 0; i < bs1->bs_words; i++) {
+ if ((res->bs_set[i] = (bs1->bs_set[i] | bs2->bs_set[i])) != 0)
+ anyset = 1;
+ }
+ return (anyset);
+}
+
+int
+bitset_xor(bitset_t *bs1, bitset_t *bs2, bitset_t *res)
+{
+ int i;
+ int anyset = 0;
+
+ for (i = 0; i < bs1->bs_words; i++) {
+ if ((res->bs_set[i] = (bs1->bs_set[i] ^ bs2->bs_set[i])) != 0)
+ anyset = 1;
+ }
+ return (anyset);
+}
+
+/*
+ * return 1 if bitmaps are identical
+ */
+int
+bitset_match(bitset_t *bs1, bitset_t *bs2)
+{
+ int i;
+
+ if (bs1->bs_words != bs2->bs_words)
+ return (0);
+
+ for (i = 0; i < bs1->bs_words; i++)
+ if (bs1->bs_set[i] != bs2->bs_set[i])
+ return (0);
+ return (1);
+}
+
+/*
+ * Zero a bitset_t
+ */
+void
+bitset_zero(bitset_t *b)
+{
+ bzero(b->bs_set, sizeof (ulong_t) * b->bs_words);
+}
+
+
+/*
+ * Copy a bitset_t
+ */
+void
+bitset_copy(bitset_t *src, bitset_t *dest)
+{
+ bcopy(src->bs_set, dest->bs_set, sizeof (ulong_t) * src->bs_words);
+}
diff --git a/usr/src/uts/common/os/callout.c b/usr/src/uts/common/os/callout.c
index ed1ae9aa83..f671d2250a 100644
--- a/usr/src/uts/common/os/callout.c
+++ b/usr/src/uts/common/os/callout.c
@@ -55,10 +55,10 @@ static callout_cache_t *callout_caches; /* linked list of caches */
static callout_table_t *callout_table; /* global callout table array */
/*
- * We run normal callouts from PIL 10. This means that no other handler that
- * runs at PIL 10 is allowed to wait for normal callouts directly or indirectly
- * as it will cause a deadlock. This has always been an unwritten rule.
- * We are making it explicit here.
+ * We run 'realtime' callouts at PIL 1 (CY_LOW_LEVEL). For 'normal'
+ * callouts, from PIL 10 (CY_LOCK_LEVEL) we dispatch the callout,
+ * via taskq, to a thread that executes at PIL 0 - so we end up running
+ * 'normal' callouts at PIL 0.
*/
static volatile int callout_realtime_level = CY_LOW_LEVEL;
static volatile int callout_normal_level = CY_LOCK_LEVEL;
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index cacb9f5f85..384e17b57d 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -67,6 +67,8 @@
#include <sys/task.h>
#include <sys/sdt.h>
#include <sys/ddi_timer.h>
+#include <sys/random.h>
+#include <sys/modctl.h>
/*
* for NTP support
@@ -87,6 +89,7 @@ extern kcondvar_t fsflush_cv;
extern sysinfo_t sysinfo;
extern vminfo_t vminfo;
extern int idleswtch; /* flag set while idle in pswtch() */
+extern hrtime_t volatile devinfo_freeze;
/*
* high-precision avenrun values. These are needed to make the
@@ -269,6 +272,10 @@ static int tod_fault_reset_flag = 0;
/* patchable via /etc/system */
int tod_validate_enable = 1;
+/* Diagnose/Limit messages about delay(9F) called from interrupt context */
+int delay_from_interrupt_diagnose = 0;
+volatile uint32_t delay_from_interrupt_msg = 20;
+
/*
* On non-SPARC systems, TOD validation must be deferred until gethrtime
* returns non-zero values (after mach_clkinit's execution).
@@ -1575,59 +1582,124 @@ profil_tick(uintptr_t upc)
static void
delay_wakeup(void *arg)
{
- kthread_t *t = arg;
+ kthread_t *t = arg;
mutex_enter(&t->t_delay_lock);
cv_signal(&t->t_delay_cv);
mutex_exit(&t->t_delay_lock);
}
-void
-delay(clock_t ticks)
-{
- kthread_t *t = curthread;
- clock_t deadline = lbolt + ticks;
- clock_t timeleft;
- timeout_id_t id;
- extern hrtime_t volatile devinfo_freeze;
+/*
+ * The delay(9F) man page indicates that it can only be called from user or
+ * kernel context - detect and diagnose bad calls. The following macro will
+ * produce a limited number of messages identifying bad callers. This is done
+ * in a macro so that caller() is meaningful. When a bad caller is identified,
+ * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
+ */
+#define DELAY_CONTEXT_CHECK() { \
+ uint32_t m; \
+ char *f; \
+ ulong_t off; \
+ \
+ m = delay_from_interrupt_msg; \
+ if (delay_from_interrupt_diagnose && servicing_interrupt() && \
+ !panicstr && !devinfo_freeze && \
+ atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
+ f = modgetsymname((uintptr_t)caller(), &off); \
+ cmn_err(CE_WARN, "delay(9F) called from " \
+ "interrupt context: %s`%s", \
+ mod_containing_pc(caller()), f ? f : "..."); \
+ } \
+}
- if ((panicstr || devinfo_freeze) && ticks > 0) {
- /*
- * Timeouts aren't running, so all we can do is spin.
- */
- drv_usecwait(TICK_TO_USEC(ticks));
+/*
+ * delay_common: common delay code.
+ */
+static void
+delay_common(clock_t ticks)
+{
+ kthread_t *t = curthread;
+ clock_t deadline;
+ clock_t timeleft;
+ callout_id_t id;
+
+ /* If timeouts aren't running all we can do is spin. */
+ if (panicstr || devinfo_freeze) {
+ /* Convert delay(9F) call into drv_usecwait(9F) call. */
+ if (ticks > 0)
+ drv_usecwait(TICK_TO_USEC(ticks));
return;
}
+ deadline = lbolt + ticks;
while ((timeleft = deadline - lbolt) > 0) {
mutex_enter(&t->t_delay_lock);
- id = timeout(delay_wakeup, t, timeleft);
+ id = timeout_default(delay_wakeup, t, timeleft);
cv_wait(&t->t_delay_cv, &t->t_delay_lock);
mutex_exit(&t->t_delay_lock);
- (void) untimeout(id);
+ (void) untimeout_default(id, 0);
}
}
/*
+ * Delay specified number of clock ticks.
+ */
+void
+delay(clock_t ticks)
+{
+ DELAY_CONTEXT_CHECK();
+
+ delay_common(ticks);
+}
+
+/*
+ * Delay a random number of clock ticks between 1 and ticks.
+ */
+void
+delay_random(clock_t ticks)
+{
+ int r;
+
+ DELAY_CONTEXT_CHECK();
+
+ (void) random_get_pseudo_bytes((void *)&r, sizeof (r));
+ if (ticks == 0)
+ ticks = 1;
+ ticks = (r % ticks) + 1;
+ delay_common(ticks);
+}
+
+/*
* Like delay, but interruptible by a signal.
*/
int
delay_sig(clock_t ticks)
{
- clock_t deadline = lbolt + ticks;
- clock_t rc;
+ kthread_t *t = curthread;
+ clock_t deadline;
+ clock_t rc;
+
+ /* If timeouts aren't running all we can do is spin. */
+ if (panicstr || devinfo_freeze) {
+ if (ticks > 0)
+ drv_usecwait(TICK_TO_USEC(ticks));
+ return (0);
+ }
- mutex_enter(&curthread->t_delay_lock);
+ deadline = lbolt + ticks;
+ mutex_enter(&t->t_delay_lock);
do {
- rc = cv_timedwait_sig(&curthread->t_delay_cv,
- &curthread->t_delay_lock, deadline);
+ rc = cv_timedwait_sig(&t->t_delay_cv,
+ &t->t_delay_lock, deadline);
+ /* loop until past deadline or signaled */
} while (rc > 0);
- mutex_exit(&curthread->t_delay_lock);
+ mutex_exit(&t->t_delay_lock);
if (rc == 0)
return (EINTR);
return (0);
}
+
#define SECONDS_PER_DAY 86400
/*
diff --git a/usr/src/uts/common/os/damap.c b/usr/src/uts/common/os/damap.c
new file mode 100644
index 0000000000..b25860e20e
--- /dev/null
+++ b/usr/src/uts/common/os/damap.c
@@ -0,0 +1,1258 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/note.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/debug.h>
+#include <sys/sunndi.h>
+#include <sys/kstat.h>
+#include <sys/conf.h>
+#include <sys/ddi_timer.h>
+#include <sys/devctl.h>
+#include <sys/callb.h>
+#include <sys/sysevent.h>
+#include <sys/taskq.h>
+#include <sys/ddi.h>
+#include <sys/bitset.h>
+#include <sys/damap.h>
+#include <sys/damap_impl.h>
+
+#ifdef DEBUG
+static int damap_debug = 0;
+#endif /* DEBUG */
+
+static void dam_addrset_activate(dam_t *, bitset_t *);
+static void dam_addrset_release(dam_t *, bitset_t *);
+static void dam_activate_taskq(void *);
+static void dam_addr_stable_cb(void *);
+static void dam_set_stable_cb(void *);
+static void dam_sched_tmo(dam_t *, clock_t, void (*tmo_cb)());
+static void dam_add_report(dam_t *, dam_da_t *, id_t, int);
+static void dam_release(dam_t *, id_t);
+static void dam_release_report(dam_t *, id_t);
+static void dam_deactivate_addr(dam_t *, id_t);
+static id_t dam_get_addrid(dam_t *, char *);
+static int dam_kstat_create(dam_t *);
+static void dam_kstat_destroy(dam_t *);
+
+#define DAM_INCR_STAT(mapp, stat) \
+ if ((mapp)->dam_kstatsp) { \
+ struct dam_kstats *stp = (mapp)->dam_kstatsp->ks_data; \
+ stp->stat.value.ui32++; \
+ }
+
+#define DAM_SET_STAT(mapp, stat, val) \
+ if ((mapp)->dam_kstatsp) { \
+ struct dam_kstats *stp = (mapp)->dam_kstatsp->ks_data; \
+ stp->stat.value.ui32 = (val); \
+ }
+
+/*
+ * Create new device address map
+ *
+ * ident: map name (kstat)
+ * size: max # of map entries
+ * rptmode: type or mode of reporting
+ * stable_usec: # of quiescent microseconds before report/map is stable
+ *
+ * activate_arg: address provider activation-callout private
+ * activate_cb: address provider activation callback handler
+ * deactivate_cb: address provider deactivation callback handler
+ *
+ * config_arg: configuration-callout private
+ * config_cb: class configuration callout
+ * unconfig_cb: class unconfiguration callout
+ *
+ * damapp: pointer to map handle (return)
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_FAILURE General failure
+ */
+int
+damap_create(char *ident, size_t size, damap_rptmode_t rptmode,
+ clock_t stable_usec,
+ void *activate_arg, damap_activate_cb_t activate_cb,
+ damap_deactivate_cb_t deactivate_cb,
+ void *config_arg, damap_configure_cb_t configure_cb,
+ damap_unconfig_cb_t unconfig_cb,
+ damap_t **damapp)
+{
+ dam_t *mapp;
+ void *softstate_p;
+
+ DTRACE_PROBE1(damap__create__entry, char *, ident);
+ if ((configure_cb == NULL) || (unconfig_cb == NULL))
+ return (DAM_EINVAL);
+
+ if (ddi_soft_state_init(&softstate_p, sizeof (dam_da_t), size) !=
+ DDI_SUCCESS)
+ return (DAM_FAILURE);
+
+ mapp = kmem_zalloc(sizeof (*mapp), KM_SLEEP);
+ if (ddi_strid_init(&mapp->dam_addr_hash, size) != DDI_SUCCESS) {
+ ddi_soft_state_fini(&softstate_p);
+ kmem_free(mapp, sizeof (*mapp));
+ return (DAM_FAILURE);
+ }
+
+ mapp->dam_da = softstate_p;
+ mapp->dam_stabletmo = drv_usectohz(stable_usec);
+ mapp->dam_size = size;
+ mapp->dam_high = 1;
+ mapp->dam_rptmode = rptmode;
+
+ mapp->dam_activate_arg = activate_arg;
+ mapp->dam_activate_cb = (activate_cb_t)activate_cb;
+ mapp->dam_deactivate_cb = (deactivate_cb_t)deactivate_cb;
+
+ mapp->dam_config_arg = config_arg;
+ mapp->dam_configure_cb = (configure_cb_t)configure_cb;
+ mapp->dam_unconfig_cb = (unconfig_cb_t)unconfig_cb;
+
+ if (ident)
+ mapp->dam_name = i_ddi_strdup(ident, KM_SLEEP);
+
+ bitset_init(&mapp->dam_active_set);
+ bitset_resize(&mapp->dam_active_set, size);
+ bitset_init(&mapp->dam_stable_set);
+ bitset_resize(&mapp->dam_stable_set, size);
+ bitset_init(&mapp->dam_report_set);
+ bitset_resize(&mapp->dam_report_set, size);
+ mutex_init(&mapp->dam_lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&mapp->dam_cv, NULL, CV_DRIVER, NULL);
+ mapp->dam_taskqp = ddi_taskq_create(NULL, ident, 1, TASKQ_DEFAULTPRI,
+ 0);
+ *damapp = (damap_t *)mapp;
+ if (dam_kstat_create(mapp) != DDI_SUCCESS) {
+ damap_destroy((damap_t *)mapp);
+ return (DAM_FAILURE);
+ }
+
+ DTRACE_PROBE1(damap__create__exit, dam_t *, mapp);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Destroy device address map
+ *
+ * damapp: address map
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_FAILURE General failure
+ */
+void
+damap_destroy(damap_t *damapp)
+{
+ int i;
+ dam_t *mapp = (dam_t *)damapp;
+
+ ASSERT(mapp);
+
+ DTRACE_PROBE2(damap__destroy__entry, dam_t *, mapp, char *,
+ mapp->dam_name);
+
+ DAM_FLAG_SET(mapp, DAM_DESTROYPEND);
+ (void) damap_sync(damapp);
+
+ /*
+ * cancel pending timeouts and kill off the taskq
+ */
+ dam_sched_tmo(mapp, 0, NULL);
+ ddi_taskq_wait(mapp->dam_taskqp);
+ ddi_taskq_destroy(mapp->dam_taskqp);
+
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (ddi_get_soft_state(mapp->dam_da, i) == NULL)
+ continue;
+ if (DAM_IN_REPORT(mapp, i))
+ dam_release_report(mapp, i);
+ if (DAM_IS_STABLE(mapp, i))
+ dam_deactivate_addr(mapp, i);
+ ddi_strid_free(mapp->dam_addr_hash, i);
+ ddi_soft_state_free(mapp->dam_da, i);
+ }
+ ddi_strid_fini(&mapp->dam_addr_hash);
+ ddi_soft_state_fini(&mapp->dam_da);
+ bitset_fini(&mapp->dam_active_set);
+ bitset_fini(&mapp->dam_stable_set);
+ bitset_fini(&mapp->dam_report_set);
+ dam_kstat_destroy(mapp);
+ mutex_destroy(&mapp->dam_lock);
+ cv_destroy(&mapp->dam_cv);
+ if (mapp->dam_name)
+ kmem_free(mapp->dam_name, strlen(mapp->dam_name) + 1);
+ kmem_free(mapp, sizeof (*mapp));
+ DTRACE_PROBE(damap__destroy__exit);
+}
+
+/*
+ * Wait for map stability.
+ *
+ * damapp: address map
+ */
+int
+damap_sync(damap_t *damapp)
+{
+
+#define WAITFOR_FLAGS (DAM_SETADD | DAM_SPEND | MAP_LOCK)
+
+ dam_t *mapp = (dam_t *)damapp;
+ int none_active;
+
+ ASSERT(mapp);
+
+ DTRACE_PROBE1(damap__sync__entry, dam_t *, mapp);
+
+ mutex_enter(&mapp->dam_lock);
+ while ((mapp->dam_flags & WAITFOR_FLAGS) ||
+ (!bitset_is_null(&mapp->dam_report_set)) || (mapp->dam_tid != 0)) {
+ cv_wait(&mapp->dam_cv, &mapp->dam_lock);
+ }
+
+ none_active = bitset_is_null(&mapp->dam_active_set);
+
+ mutex_exit(&mapp->dam_lock);
+ DTRACE_PROBE2(damap__sync__exit, dam_t *, mapp, int, none_active);
+
+ return (none_active);
+}
+
+/*
+ * Get the name of a device address map
+ *
+ * damapp: address map
+ *
+ * Returns: name
+ */
+char *
+damap_name(damap_t *damapp)
+{
+ dam_t *mapp = (dam_t *)damapp;
+
+ return (mapp ? mapp->dam_name : "UNKNOWN_damap");
+}
+
+/*
+ * Report an address to per-address report
+ *
+ * damapp: address map handle
+ * address: address in ascii string representation
+ * rindx: index if address stabilizes
+ * nvl: optional nvlist of configuration-private data
+ * addr_priv: optional provider-private (passed to activate/deactivate cb)
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_MAPFULL address map exhausted
+ */
+int
+damap_addr_add(damap_t *damapp, char *address, damap_id_t *ridx, nvlist_t *nvl,
+ void *addr_priv)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ id_t addrid;
+ dam_da_t *passp;
+
+ DTRACE_PROBE2(damap__addr__add__entry, dam_t *, mapp,
+ char *, address);
+ if (!mapp || !address || (mapp->dam_rptmode != DAMAP_REPORT_PERADDR) ||
+ (mapp->dam_flags & DAM_DESTROYPEND))
+ return (DAM_EINVAL);
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ if ((addrid = dam_get_addrid(mapp, address)) == 0) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (DAM_MAPFULL);
+ }
+
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp != NULL);
+
+ /*
+ * If re-reporting the same address (add or remove) clear
+ * the existing report
+ */
+ if (DAM_IN_REPORT(mapp, addrid)) {
+ DAM_INCR_STAT(mapp, dam_rereport);
+ dam_release_report(mapp, addrid);
+ passp->da_jitter++;
+ }
+ passp->da_ppriv_rpt = addr_priv;
+ if (nvl)
+ (void) nvlist_dup(nvl, &passp->da_nvl_rpt, KM_SLEEP);
+
+ dam_add_report(mapp, passp, addrid, RPT_ADDR_ADD);
+ if (ridx != NULL)
+ *ridx = (damap_id_t)addrid;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ DTRACE_PROBE3(damap__addr__add__exit, dam_t *, mapp, char *,
+ address, int, addrid);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Report removal of address from per-address report
+ *
+ * damapp: address map
+ * address: address in ascii string representation
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_FAILURE General failure
+ */
+int
+damap_addr_del(damap_t *damapp, char *address)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ id_t addrid;
+ dam_da_t *passp;
+
+ DTRACE_PROBE2(damap__addr__del__entry, dam_t *, mapp,
+ char *, address);
+ if (!mapp || !address || (mapp->dam_rptmode != DAMAP_REPORT_PERADDR) ||
+ (mapp->dam_flags & DAM_DESTROYPEND))
+ return (DAM_EINVAL);
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ if (!(addrid = ddi_strid_str2id(mapp->dam_addr_hash, address))) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (DAM_SUCCESS);
+ }
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+ if (DAM_IN_REPORT(mapp, addrid)) {
+ DAM_INCR_STAT(mapp, dam_rereport);
+ dam_release_report(mapp, addrid);
+ passp->da_jitter++;
+ }
+ dam_add_report(mapp, passp, addrid, RPT_ADDR_DEL);
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ DTRACE_PROBE3(damap__addr__del__exit, dam_t *, mapp,
+ char *, address, int, addrid);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Initiate full-set report
+ *
+ * damapp: address map
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ */
+int
+damap_addrset_begin(damap_t *damapp)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ int i;
+
+ DTRACE_PROBE1(damap__addrset__begin__entry, dam_t *, mapp);
+
+ if ((mapp->dam_rptmode != DAMAP_REPORT_FULLSET) ||
+ (mapp->dam_flags & DAM_DESTROYPEND))
+ return (DAM_EINVAL);
+
+ DAM_LOCK(mapp, MAP_LOCK);
+ /*
+ * reset any pending reports
+ */
+ if (mapp->dam_flags & DAM_SETADD) {
+ /*
+ * cancel stabilization timeout
+ */
+ dam_sched_tmo(mapp, 0, NULL);
+ DAM_INCR_STAT(mapp, dam_rereport);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DAM_LOCK(mapp, ADDR_LOCK);
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (DAM_IN_REPORT(mapp, i))
+ dam_release_report(mapp, i);
+ }
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ DAM_LOCK(mapp, MAP_LOCK);
+ }
+ DAM_FLAG_SET(mapp, DAM_SETADD);
+ bitset_zero(&mapp->dam_report_set);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DTRACE_PROBE(damap__addrset__begin__exit);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Report address to full-set report
+ *
+ * damapp: address map handle
+ * address: address in ascii string representation
+ * rindx: index if address stabilizes
+ * nvl: optional nvlist of configuration-private data
+ * addr_priv: optional provider-private data (passed to activate/release cb)
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_MAPFULL address map exhausted
+ * DAM_FAILURE General failure
+ */
+int
+damap_addrset_add(damap_t *damapp, char *address, damap_id_t *ridx,
+ nvlist_t *nvl, void *addr_priv)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ id_t addrid;
+ dam_da_t *passp;
+
+ DTRACE_PROBE2(damap__addrset__add__entry, dam_t *, mapp,
+ char *, address);
+
+ if (!mapp || !address || (mapp->dam_rptmode != DAMAP_REPORT_FULLSET) ||
+ (mapp->dam_flags & DAM_DESTROYPEND))
+ return (DAM_EINVAL);
+
+ if (!(mapp->dam_flags & DAM_SETADD))
+ return (DAM_FAILURE);
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ if ((addrid = dam_get_addrid(mapp, address)) == 0) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (DAM_MAPFULL);
+ }
+
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+ if (DAM_IN_REPORT(mapp, addrid)) {
+ dam_release_report(mapp, addrid);
+ passp->da_jitter++;
+ }
+ passp->da_ppriv_rpt = addr_priv;
+ if (nvl)
+ (void) nvlist_dup(nvl, &passp->da_nvl_rpt, KM_SLEEP);
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_add(&mapp->dam_report_set, addrid);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ if (ridx)
+ *ridx = (damap_id_t)addrid;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ DTRACE_PROBE3(damap__addr__addset__exit, dam_t *, mapp, char *,
+ address, int, addrid);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Commit full-set report for stabilization
+ *
+ * damapp: address map handle
+ * flags: (currently 0)
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_EINVAL Invalid argument(s)
+ * DAM_FAILURE General failure
+ */
+int
+damap_addrset_end(damap_t *damapp, int flags)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ int i;
+
+ DTRACE_PROBE1(damap__addrset__end__entry, dam_t *, mapp);
+
+ if (!mapp || (mapp->dam_rptmode != DAMAP_REPORT_FULLSET) ||
+ (mapp->dam_flags & DAM_DESTROYPEND))
+ return (DAM_EINVAL);
+
+ if (!(mapp->dam_flags & DAM_SETADD))
+ return (DAM_FAILURE);
+
+ if (flags & DAMAP_RESET) {
+ DAM_LOCK(mapp, MAP_LOCK);
+ dam_sched_tmo(mapp, 0, NULL);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DAM_LOCK(mapp, ADDR_LOCK);
+ for (i = 1; i < mapp->dam_high; i++)
+ if (DAM_IN_REPORT(mapp, i))
+ dam_release_report(mapp, i);
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ } else {
+ mapp->dam_last_update = gethrtime();
+ DAM_LOCK(mapp, MAP_LOCK);
+ dam_sched_tmo(mapp, mapp->dam_stabletmo, dam_set_stable_cb);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ }
+ DTRACE_PROBE(damap__addrset__end__exit);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Return nvlist registered with reported address
+ *
+ * damapp: address map handle
+ * aid: address ID
+ *
+ * Returns: nvlist_t * provider supplied via damap_addr{set}_add())
+ * NULL
+ */
+nvlist_t *
+damap_id2nvlist(damap_t *damapp, damap_id_t addrid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ id_t aid = (id_t)addrid;
+ dam_da_t *pass;
+
+ if (ddi_strid_id2str(mapp->dam_addr_hash, aid)) {
+ if (pass = ddi_get_soft_state(mapp->dam_da, aid))
+ return (pass->da_nvl);
+ }
+ return (NULL);
+}
+
+/*
+ * Return address string
+ *
+ * damapp: address map handle
+ * aid: address ID
+ *
+ * Returns: char * Address string
+ * NULL
+ */
+char *
+damap_id2addr(damap_t *damapp, damap_id_t aid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+
+ return (ddi_strid_id2str(mapp->dam_addr_hash, (id_t)aid));
+}
+
+/*
+ * Hold address reference in map
+ *
+ * damapp: address map handle
+ * aid: address ID
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_FAILURE
+ */
+int
+damap_id_hold(damap_t *damapp, damap_id_t aid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ dam_da_t *passp;
+
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ passp = ddi_get_soft_state(mapp->dam_da, (id_t)aid);
+ if (!passp) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (DAM_FAILURE);
+ }
+ passp->da_ref++;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (DAM_SUCCESS);
+}
+
+/*
+ * Release address reference in map
+ *
+ * damapp: address map handle
+ * aid: address ID
+ */
+void
+damap_id_rele(damap_t *damapp, damap_id_t addrid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ dam_release(mapp, (id_t)addrid);
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+}
+
+/*
+ * Return current reference count on address reference in map
+ *
+ * damapp: address map handle
+ * aid: address ID
+ *
+ * Returns: DAM_SUCCESS
+ * DAM_FAILURE
+ */
+int
+damap_id_ref(damap_t *damapp, damap_id_t aid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ dam_da_t *passp;
+ int ref = -1;
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ passp = ddi_get_soft_state(mapp->dam_da, (id_t)aid);
+ if (passp)
+ ref = passp->da_ref;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (ref);
+}
+
+/*
+ * Return next address ID in list
+ *
+ * damapp: address map handle
+ * damap_list: address ID list passed to config|unconfig
+ * returned by look by lookup_all
+ * last: last ID returned, 0 is start of list
+ *
+ * Returns: addrid Next ID from the list
+ * 0 End of the list
+ */
+damap_id_t
+damap_id_next(damap_t *damapp, damap_id_list_t damap_list, damap_id_t last)
+{
+ int i, start;
+ dam_t *mapp = (dam_t *)damapp;
+ bitset_t *dam_list = (bitset_t *)damap_list;
+
+ if (!mapp || !dam_list)
+ return ((damap_id_t)0);
+
+ start = (int)last + 1;
+ for (i = start; i < mapp->dam_high; i++)
+ if (bitset_in_set(dam_list, i))
+ return ((damap_id_t)i);
+ return ((damap_id_t)0);
+}
+
+/*
+ * Set config private data
+ *
+ * damapp: address map handle
+ * aid: address ID
+ * cfg_priv: configuration private data
+ *
+ */
+void
+damap_id_priv_set(damap_t *damapp, damap_id_t aid, void *cfg_priv)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ dam_da_t *passp;
+
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ passp = ddi_get_soft_state(mapp->dam_da, (id_t)aid);
+ if (!passp) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return;
+ }
+ passp->da_cfg_priv = cfg_priv;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+}
+
+/*
+ * Get config private data
+ *
+ * damapp: address map handle
+ * aid: address ID
+ *
+ * Returns: configuration private data
+ */
+void *
+damap_id_priv_get(damap_t *damapp, damap_id_t aid)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ dam_da_t *passp;
+ void *rv;
+
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ passp = ddi_get_soft_state(mapp->dam_da, (id_t)aid);
+ if (!passp) {
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (NULL);
+ }
+ rv = passp->da_cfg_priv;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return (rv);
+}
+
+/*
+ * Lookup a single address in the active address map
+ *
+ * damapp: address map handle
+ * address: address string
+ *
+ * Returns: ID of active/stable address
+ * 0 Address not in stable set
+ *
+ * Future: Allow the caller to wait for stabilize before returning not found.
+ */
+damap_id_t
+damap_lookup(damap_t *damapp, char *address)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ id_t addrid = 0;
+ dam_da_t *passp = NULL;
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ addrid = ddi_strid_str2id(mapp->dam_addr_hash, address);
+ if (addrid) {
+ DAM_LOCK(mapp, MAP_LOCK);
+ if (DAM_IS_STABLE(mapp, addrid)) {
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+ if (passp) {
+ passp->da_ref++;
+ } else {
+ addrid = 0;
+ }
+ } else {
+ addrid = 0;
+ }
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ }
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ return ((damap_id_t)addrid);
+}
+
+
+/*
+ * Return the list of stable addresses in the map
+ *
+ * damapp: address map handle
+ * id_listp: pointer to list of address IDs in stable map (returned)
+ *
+ * Returns: # of entries returned in alist
+ */
+int
+damap_lookup_all(damap_t *damapp, damap_id_list_t *id_listp)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ int mapsz = mapp->dam_size;
+ int n_ids, i;
+ bitset_t *bsp;
+ dam_da_t *passp;
+
+ bsp = kmem_alloc(sizeof (*bsp), KM_SLEEP);
+ bitset_init(bsp);
+ bitset_resize(bsp, mapsz);
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_copy(&mapp->dam_active_set, bsp);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DAM_LOCK(mapp, ADDR_LOCK);
+ for (n_ids = 0, i = 1; i < mapsz; i++) {
+ if (bitset_in_set(bsp, i)) {
+ passp = ddi_get_soft_state(mapp->dam_da, i);
+ ASSERT(passp);
+ if (passp) {
+ passp->da_ref++;
+ n_ids++;
+ }
+ }
+ }
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ if (n_ids) {
+ *id_listp = (damap_id_list_t)bsp;
+ return (n_ids);
+ } else {
+ *id_listp = (damap_id_list_t)NULL;
+ bitset_fini(bsp);
+ kmem_free(bsp, sizeof (*bsp));
+ return (0);
+ }
+}
+
+/*
+ * Release the address list returned by damap_lookup_all()
+ *
+ * mapp: address map handle
+ * id_list: list of address IDs returned in damap_lookup_all()
+ */
+void
+damap_id_list_rele(damap_t *damapp, damap_id_list_t id_list)
+{
+ dam_t *mapp = (dam_t *)damapp;
+ int i;
+
+ if (id_list == NULL)
+ return;
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (bitset_in_set((bitset_t *)id_list, i))
+ (void) dam_release(mapp, i);
+ }
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ bitset_fini((bitset_t *)id_list);
+ kmem_free((void *)id_list, sizeof (bitset_t));
+}
+
+/*
+ * Activate a set of stabilized addresses
+ */
+static void
+dam_addrset_activate(dam_t *mapp, bitset_t *active_set)
+{
+ dam_da_t *passp;
+ char *addrstr;
+ int i;
+ uint32_t n_active = 0;
+
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (bitset_in_set(&mapp->dam_active_set, i))
+ n_active++;
+ if (!bitset_in_set(active_set, i))
+ continue;
+ n_active++;
+ passp = ddi_get_soft_state(mapp->dam_da, i);
+ ASSERT(passp);
+ if (mapp->dam_activate_cb) {
+ addrstr = ddi_strid_id2str(mapp->dam_addr_hash, i);
+ (*mapp->dam_activate_cb)(
+ mapp->dam_activate_arg, addrstr, i,
+ &passp->da_ppriv_rpt);
+ }
+ DTRACE_PROBE2(damap__addrset__activate, dam_t *, mapp, int, i);
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_add(&mapp->dam_active_set, i);
+ /*
+ * copy the reported nvlist and provider private data
+ */
+ passp->da_nvl = passp->da_nvl_rpt;
+ passp->da_ppriv = passp->da_ppriv_rpt;
+ passp->da_ppriv_rpt = NULL;
+ passp->da_nvl_rpt = NULL;
+ passp->da_last_stable = gethrtime();
+ passp->da_stable_cnt++;
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DAM_SET_STAT(mapp, dam_numstable, n_active);
+ }
+}
+
+/*
+ * Release a set of stabilized addresses
+ */
+static void
+dam_addrset_release(dam_t *mapp, bitset_t *release_set)
+{
+ int i;
+
+ DAM_LOCK(mapp, ADDR_LOCK);
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (bitset_in_set(release_set, i)) {
+ DTRACE_PROBE2(damap__addrset__release, dam_t *, mapp,
+ int, i);
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_del(&mapp->dam_active_set, i);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ (void) dam_release(mapp, i);
+ }
+ }
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+}
+
+/*
+ * release a previously activated address
+ */
+static void
+dam_release(dam_t *mapp, id_t addrid)
+{
+ dam_da_t *passp;
+
+ DAM_ASSERT_LOCKED(mapp, ADDR_LOCK);
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+
+ /*
+ * invoke the deactivation callback to notify
+ * this address is no longer active
+ */
+ dam_deactivate_addr(mapp, addrid);
+
+ /*
+ * allow pending reports for this address to stabilize
+ */
+ if (DAM_IN_REPORT(mapp, addrid))
+ return;
+
+ /*
+ * defer teardown until outstanding references are released
+ */
+ if (--passp->da_ref) {
+ passp->da_flags |= DA_RELE;
+ return;
+ }
+ ddi_strid_free(mapp->dam_addr_hash, addrid);
+ ddi_soft_state_free(mapp->dam_da, addrid);
+}
+
+/*
+ * process stabilized address reports
+ */
+static void
+dam_activate_taskq(void *arg)
+{
+ dam_t *mapp = (dam_t *)arg;
+ bitset_t delta;
+ bitset_t cfg;
+ bitset_t uncfg;
+ int has_cfg, has_uncfg;
+
+ bitset_init(&delta);
+ bitset_resize(&delta, mapp->dam_size);
+ bitset_init(&cfg);
+ bitset_resize(&cfg, mapp->dam_size);
+ bitset_init(&uncfg);
+ bitset_resize(&uncfg, mapp->dam_size);
+
+ DTRACE_PROBE1(damap__activate__taskq__entry, dam_t, mapp);
+ DAM_LOCK(mapp, MAP_LOCK);
+ if (!bitset_xor(&mapp->dam_active_set, &mapp->dam_stable_set,
+ &delta)) {
+ bitset_zero(&mapp->dam_stable_set);
+ DAM_FLAG_CLR(mapp, DAM_SPEND);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ bitset_fini(&uncfg);
+ bitset_fini(&cfg);
+ bitset_fini(&delta);
+ return;
+ }
+ has_cfg = bitset_and(&delta, &mapp->dam_stable_set, &cfg);
+ has_uncfg = bitset_and(&delta, &mapp->dam_active_set, &uncfg);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ if (has_cfg) {
+ dam_addrset_activate(mapp, &cfg);
+ (*mapp->dam_configure_cb)(mapp->dam_config_arg, mapp, &cfg);
+ }
+ if (has_uncfg) {
+ (*mapp->dam_unconfig_cb)(mapp->dam_config_arg, mapp, &uncfg);
+ dam_addrset_release(mapp, &uncfg);
+ }
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_zero(&mapp->dam_stable_set);
+ DAM_FLAG_CLR(mapp, DAM_SPEND);
+ mapp->dam_last_stable = gethrtime();
+ mapp->dam_stable_cnt++;
+ DAM_INCR_STAT(mapp, dam_stable);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ bitset_fini(&uncfg);
+ bitset_fini(&cfg);
+ bitset_fini(&delta);
+ DTRACE_PROBE1(damap__activate__taskq__exit, dam_t, mapp);
+}
+
+/*
+ * per-address stabilization timeout
+ */
+static void
+dam_addr_stable_cb(void *arg)
+{
+ dam_t *mapp = (dam_t *)arg;
+ int i;
+ dam_da_t *passp;
+ int spend = 0;
+ int tpend = 0;
+ int64_t next_tmov = mapp->dam_stabletmo;
+ int64_t tmo_delta;
+ int64_t ts = lbolt64;
+
+ DTRACE_PROBE1(damap__addr__stable__cb__entry, dam_t *, mapp);
+ DAM_LOCK(mapp, MAP_LOCK);
+ if (mapp->dam_tid == 0) {
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ return;
+ }
+ mapp->dam_tid = 0;
+ /*
+ * If still under stabilization, reschedule timeout,
+ * else dispatch the task to activate & deactivate the stable
+ * set.
+ */
+ if (mapp->dam_flags & DAM_SPEND) {
+ DAM_INCR_STAT(mapp, dam_stable_blocked);
+ mapp->dam_stable_overrun++;
+ dam_sched_tmo(mapp, mapp->dam_stabletmo, dam_addr_stable_cb);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DTRACE_PROBE1(damap__addr__stable__cb__overrun,
+ dam_t *, mapp);
+ return;
+ }
+
+ bitset_copy(&mapp->dam_active_set, &mapp->dam_stable_set);
+ for (i = 1; i < mapp->dam_high; i++) {
+ if (!bitset_in_set(&mapp->dam_report_set, i))
+ continue;
+ /*
+ * Stabilize each address
+ */
+ passp = ddi_get_soft_state(mapp->dam_da, i);
+ ASSERT(passp);
+ if (!passp) {
+ cmn_err(CE_WARN, "Clearing report no softstate %d", i);
+ bitset_del(&mapp->dam_report_set, i);
+ continue;
+ }
+
+ /* report has stabilized */
+ if (passp->da_deadline <= ts) {
+ bitset_del(&mapp->dam_report_set, i);
+ if (passp->da_flags & DA_RELE) {
+ DTRACE_PROBE2(damap__addr__stable__del,
+ dam_t *, mapp, int, i);
+ bitset_del(&mapp->dam_stable_set, i);
+ } else {
+ DTRACE_PROBE2(damap__addr__stable__add,
+ dam_t *, mapp, int, i);
+ bitset_add(&mapp->dam_stable_set, i);
+ }
+ spend++;
+ continue;
+ }
+
+ /*
+ * not stabilized, determine next (future) map timeout
+ */
+ tpend++;
+ tmo_delta = passp->da_deadline - ts;
+ if (tmo_delta < next_tmov)
+ next_tmov = tmo_delta;
+ }
+
+ /*
+ * schedule taskq activation of stabilized reports
+ */
+ if (spend) {
+ if (ddi_taskq_dispatch(mapp->dam_taskqp, dam_activate_taskq,
+ mapp, DDI_NOSLEEP) == DDI_SUCCESS) {
+ DAM_FLAG_SET(mapp, DAM_SPEND);
+ } else
+ tpend++;
+ }
+
+ /*
+ * schedule timeout to handle future stabalization of active reports
+ */
+ if (tpend)
+ dam_sched_tmo(mapp, (clock_t)next_tmov, dam_addr_stable_cb);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DTRACE_PROBE1(damap__addr__stable__cb__exit, dam_t *, mapp);
+}
+
+/*
+ * fullset stabilization timeout
+ */
+static void
+dam_set_stable_cb(void *arg)
+{
+ dam_t *mapp = (dam_t *)arg;
+
+ DTRACE_PROBE1(damap__set__stable__cb__enter, dam_t *, mapp);
+
+ DAM_LOCK(mapp, MAP_LOCK);
+ if (mapp->dam_tid == 0) {
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ return;
+ }
+ mapp->dam_tid = 0;
+
+ /*
+ * If still under stabilization, reschedule timeout,
+ * else dispatch the task to activate & deactivate the stable
+ * set.
+ */
+ if (mapp->dam_flags & DAM_SPEND) {
+ DAM_INCR_STAT(mapp, dam_stable_blocked);
+ mapp->dam_stable_overrun++;
+ dam_sched_tmo(mapp, mapp->dam_stabletmo, dam_set_stable_cb);
+ DTRACE_PROBE1(damap__set__stable__cb__overrun,
+ dam_t *, mapp);
+ } else if (ddi_taskq_dispatch(mapp->dam_taskqp, dam_activate_taskq,
+ mapp, DDI_NOSLEEP) == DDI_FAILURE) {
+ dam_sched_tmo(mapp, mapp->dam_stabletmo, dam_set_stable_cb);
+ } else {
+ bitset_copy(&mapp->dam_report_set, &mapp->dam_stable_set);
+ bitset_zero(&mapp->dam_report_set);
+ DAM_FLAG_CLR(mapp, DAM_SETADD);
+ DAM_FLAG_SET(mapp, DAM_SPEND);
+ }
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ DTRACE_PROBE1(damap__set__stable__cb__exit, dam_t *, mapp);
+}
+
+/*
+ * reschedule map timeout 'tmo_ms' ticks
+ */
+static void
+dam_sched_tmo(dam_t *mapp, clock_t tmo_ms, void (*tmo_cb)())
+{
+ timeout_id_t tid;
+
+ if ((tid = mapp->dam_tid) != 0) {
+ mapp->dam_tid = 0;
+ DAM_UNLOCK(mapp, MAP_LOCK);
+ (void) untimeout(tid);
+ DAM_LOCK(mapp, MAP_LOCK);
+ }
+
+ if (tmo_cb && (tmo_ms != 0))
+ mapp->dam_tid = timeout(tmo_cb, mapp, tmo_ms);
+}
+
+/*
+ * record report addition or removal of an address
+ */
+static void
+dam_add_report(dam_t *mapp, dam_da_t *passp, id_t addrid, int report)
+{
+ ASSERT(!DAM_IN_REPORT(mapp, addrid));
+ passp->da_last_report = gethrtime();
+ mapp->dam_last_update = gethrtime();
+ passp->da_report_cnt++;
+ passp->da_deadline = lbolt64 + mapp->dam_stabletmo;
+ if (report == RPT_ADDR_DEL)
+ passp->da_flags |= DA_RELE;
+ else if (report == RPT_ADDR_ADD)
+ passp->da_flags &= ~DA_RELE;
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_add(&mapp->dam_report_set, addrid);
+ dam_sched_tmo(mapp, mapp->dam_stabletmo, dam_addr_stable_cb);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+
+}
+
+/*
+ * release an address report
+ */
+static void
+dam_release_report(dam_t *mapp, id_t addrid)
+{
+ dam_da_t *passp;
+
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+ passp->da_ppriv_rpt = NULL;
+ if (passp->da_nvl_rpt)
+ nvlist_free(passp->da_nvl_rpt);
+ passp->da_nvl_rpt = NULL;
+ DAM_LOCK(mapp, MAP_LOCK);
+ bitset_del(&mapp->dam_report_set, addrid);
+ DAM_UNLOCK(mapp, MAP_LOCK);
+}
+
+/*
+ * deactivate a previously stable address
+ */
+static void
+dam_deactivate_addr(dam_t *mapp, id_t addrid)
+{
+ dam_da_t *passp;
+
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ ASSERT(passp);
+ if (passp == NULL)
+ return;
+ DAM_UNLOCK(mapp, ADDR_LOCK);
+ if (mapp->dam_deactivate_cb)
+ (*mapp->dam_deactivate_cb)(
+ mapp->dam_activate_arg,
+ ddi_strid_id2str(mapp->dam_addr_hash,
+ addrid), addrid, passp->da_ppriv);
+ DAM_LOCK(mapp, ADDR_LOCK);
+ passp->da_ppriv = NULL;
+ if (passp->da_nvl)
+ nvlist_free(passp->da_nvl);
+ passp->da_nvl = NULL;
+}
+
+/*
+ * return the map ID of an address
+ */
+static id_t
+dam_get_addrid(dam_t *mapp, char *address)
+{
+ damap_id_t addrid;
+ dam_da_t *passp;
+
+ if ((addrid = ddi_strid_str2id(mapp->dam_addr_hash, address)) == 0) {
+ if ((addrid = ddi_strid_fixed_alloc(mapp->dam_addr_hash,
+ address)) == (damap_id_t)0) {
+ return (0);
+ }
+ if (ddi_soft_state_zalloc(mapp->dam_da, addrid) !=
+ DDI_SUCCESS) {
+ ddi_strid_free(mapp->dam_addr_hash, addrid);
+ return (0);
+ }
+ if (addrid >= mapp->dam_high)
+ mapp->dam_high = addrid + 1;
+ }
+ passp = ddi_get_soft_state(mapp->dam_da, addrid);
+ if (passp == NULL)
+ return (0);
+ passp->da_ref++;
+ if (passp->da_addr == NULL)
+ passp->da_addr = ddi_strid_id2str(
+ mapp->dam_addr_hash, addrid); /* for mdb */
+ return (addrid);
+}
+
+/*
+ * create and install map statistics
+ */
+static int
+dam_kstat_create(dam_t *mapp)
+{
+ kstat_t *mapsp;
+ struct dam_kstats *statsp;
+
+ mapsp = kstat_create("dam", 0, mapp->dam_name, "damap",
+ KSTAT_TYPE_NAMED,
+ sizeof (struct dam_kstats) / sizeof (kstat_named_t), 0);
+ if (mapsp == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ statsp = (struct dam_kstats *)mapsp->ks_data;
+ kstat_named_init(&statsp->dam_stable, "stable cycles",
+ KSTAT_DATA_UINT32);
+ kstat_named_init(&statsp->dam_stable_blocked,
+ "stable cycle overrun", KSTAT_DATA_UINT32);
+ kstat_named_init(&statsp->dam_rereport,
+ "restarted reports", KSTAT_DATA_UINT32);
+ kstat_named_init(&statsp->dam_numstable,
+ "# of stable map entries", KSTAT_DATA_UINT32);
+ kstat_install(mapsp);
+ mapp->dam_kstatsp = mapsp;
+ return (DDI_SUCCESS);
+}
+
+/*
+ * destroy map stats
+ */
+static void
+dam_kstat_destroy(dam_t *mapp)
+{
+
+ kstat_delete(mapp->dam_kstatsp);
+}
diff --git a/usr/src/uts/common/os/devcfg.c b/usr/src/uts/common/os/devcfg.c
index 525de8deb9..0c35c932ba 100644
--- a/usr/src/uts/common/os/devcfg.c
+++ b/usr/src/uts/common/os/devcfg.c
@@ -179,8 +179,6 @@ static void link_to_driver_list(dev_info_t *);
static void unlink_from_driver_list(dev_info_t *);
static void add_to_dn_list(struct devnames *, dev_info_t *);
static void remove_from_dn_list(struct devnames *, dev_info_t *);
-static dev_info_t *find_child_by_callback(dev_info_t *, char *, char *,
- int (*)(dev_info_t *, char *, int));
static dev_info_t *find_duplicate_child();
static void add_global_props(dev_info_t *);
static void remove_global_props(dev_info_t *);
@@ -277,11 +275,14 @@ i_ddi_alloc_node(dev_info_t *pdip, char *node_name, pnode_t nodeid,
* DEVI_PSEUDO_NODEID DDI_NC_PSEUDO A
* DEVI_SID_NODEID DDI_NC_PSEUDO A,P
* DEVI_SID_HIDDEN_NODEID DDI_NC_PSEUDO A,P,H
+ * DEVI_SID_HP_NODEID DDI_NC_PSEUDO A,P,h
+ * DEVI_SID_HP_HIDDEN_NODEID DDI_NC_PSEUDO A,P,H,h
* other DDI_NC_PROM P
*
* Where A = DDI_AUTO_ASSIGNED_NODEID (auto-assign a nodeid)
* and P = DDI_PERSISTENT
* and H = DDI_HIDDEN_NODE
+ * and h = DDI_HOTPLUG_NODE
*
* auto-assigned nodeids are also auto-freed.
*/
@@ -289,12 +290,23 @@ i_ddi_alloc_node(dev_info_t *pdip, char *node_name, pnode_t nodeid,
switch (nodeid) {
case DEVI_SID_HIDDEN_NODEID:
devi->devi_node_attributes |= DDI_HIDDEN_NODE;
- /*FALLTHROUGH*/
+ goto sid;
+
+ case DEVI_SID_HP_NODEID:
+ devi->devi_node_attributes |= DDI_HOTPLUG_NODE;
+ goto sid;
+
+ case DEVI_SID_HP_HIDDEN_NODEID:
+ devi->devi_node_attributes |= DDI_HIDDEN_NODE;
+ devi->devi_node_attributes |= DDI_HOTPLUG_NODE;
+ goto sid;
+
case DEVI_SID_NODEID:
- devi->devi_node_attributes |= DDI_PERSISTENT;
+sid: devi->devi_node_attributes |= DDI_PERSISTENT;
if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL)
goto fail;
/*FALLTHROUGH*/
+
case DEVI_PSEUDO_NODEID:
devi->devi_node_attributes |= DDI_AUTO_ASSIGNED_NODEID;
devi->devi_node_class = DDI_NC_PSEUDO;
@@ -303,9 +315,11 @@ i_ddi_alloc_node(dev_info_t *pdip, char *node_name, pnode_t nodeid,
/*NOTREACHED*/
}
break;
+
default:
if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL)
goto fail;
+
/*
* the nodetype is 'prom', try to 'take' the nodeid now.
* This requires memory allocation, so check for failure.
@@ -318,7 +332,7 @@ i_ddi_alloc_node(dev_info_t *pdip, char *node_name, pnode_t nodeid,
devi->devi_nodeid = nodeid;
devi->devi_node_class = DDI_NC_PROM;
devi->devi_node_attributes = DDI_PERSISTENT;
-
+ break;
}
if (ndi_dev_is_persistent_node((dev_info_t *)devi)) {
@@ -2142,7 +2156,9 @@ find_sibling(dev_info_t *head, char *cname, char *caddr, uint_t flag,
/*
* Walk the child list to find a match
*/
-
+ if (head == NULL)
+ return (NULL);
+ ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(head)));
for (dip = head; dip; dip = ddi_get_next_sibling(dip)) {
if (by == FIND_NODE_BY_NODENAME) {
/* match node name */
@@ -2210,12 +2226,15 @@ find_duplicate_child(dev_info_t *pdip, dev_info_t *dip)
* Find a child of a given name and address, using a callback to name
* unnamed children. cname is the binding name.
*/
-static dev_info_t *
-find_child_by_callback(dev_info_t *pdip, char *cname, char *caddr,
- int (*name_node)(dev_info_t *, char *, int))
+dev_info_t *
+ndi_devi_findchild_by_callback(dev_info_t *pdip, char *dname, char *ua,
+ int (*make_ua)(dev_info_t *, char *, int))
{
- return (find_sibling(ddi_get_child(pdip), cname, caddr,
- FIND_NODE_BY_DRIVER|FIND_ADDR_BY_CALLBACK, name_node));
+ int by = FIND_ADDR_BY_CALLBACK;
+
+ ASSERT(DEVI_BUSY_OWNED(pdip));
+ by |= dname ? FIND_NODE_BY_DRIVER : FIND_NODE_BY_ADDR;
+ return (find_sibling(ddi_get_child(pdip), dname, ua, by, make_ua));
}
/*
@@ -2531,15 +2550,15 @@ i_ddi_unload_drvconf(major_t major)
* It returns DDI_SUCCESS if the node is merged and DDI_FAILURE otherwise.
*/
int
-ndi_merge_node(dev_info_t *dip, int (*name_node)(dev_info_t *, char *, int))
+ndi_merge_node(dev_info_t *dip, int (*make_ua)(dev_info_t *, char *, int))
{
dev_info_t *hwdip;
ASSERT(ndi_dev_is_persistent_node(dip) == 0);
ASSERT(ddi_get_name_addr(dip) != NULL);
- hwdip = find_child_by_callback(ddi_get_parent(dip),
- ddi_binding_name(dip), ddi_get_name_addr(dip), name_node);
+ hwdip = ndi_devi_findchild_by_callback(ddi_get_parent(dip),
+ ddi_binding_name(dip), ddi_get_name_addr(dip), make_ua);
/*
* Look for the hardware node that is the target of the merge;
@@ -4506,11 +4525,17 @@ i_ndi_devi_report_status_change(dev_info_t *dip, char *path)
char *status;
if (!DEVI_NEED_REPORT(dip) ||
- (i_ddi_node_state(dip) < DS_INITIALIZED)) {
+ (i_ddi_node_state(dip) < DS_INITIALIZED) ||
+ ndi_dev_is_hidden_node(dip)) {
return;
}
- if (DEVI_IS_DEVICE_OFFLINE(dip)) {
+ /* Invalidate the devinfo snapshot cache */
+ i_ddi_di_cache_invalidate();
+
+ if (DEVI_IS_DEVICE_REMOVED(dip)) {
+ status = "removed";
+ } else if (DEVI_IS_DEVICE_OFFLINE(dip)) {
status = "offline";
} else if (DEVI_IS_DEVICE_DOWN(dip)) {
status = "down";
@@ -4547,26 +4572,25 @@ i_ndi_devi_report_status_change(dev_info_t *dip, char *path)
static int
i_log_devfs_add_devinfo(dev_info_t *dip, uint_t flags)
{
- int se_err;
- char *pathname;
- sysevent_t *ev;
- sysevent_id_t eid;
- sysevent_value_t se_val;
- sysevent_attr_list_t *ev_attr_list = NULL;
- char *class_name;
- int no_transport = 0;
+ int se_err;
+ char *pathname;
+ sysevent_t *ev;
+ sysevent_id_t eid;
+ sysevent_value_t se_val;
+ sysevent_attr_list_t *ev_attr_list = NULL;
+ char *class_name;
+ int no_transport = 0;
- ASSERT(dip);
-
- /*
- * Invalidate the devinfo snapshot cache
- */
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ ASSERT(dip && ddi_get_parent(dip) &&
+ DEVI_BUSY_OWNED(ddi_get_parent(dip)));
/* do not generate ESC_DEVFS_DEVI_ADD event during boot */
if (!i_ddi_io_initialized())
return (DDI_SUCCESS);
+ /* Invalidate the devinfo snapshot cache */
+ i_ddi_di_cache_invalidate();
+
ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_ADD, EP_DDI, SE_SLEEP);
pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
@@ -4649,18 +4673,19 @@ static int
i_log_devfs_remove_devinfo(char *pathname, char *class_name, char *driver_name,
int instance, uint_t flags)
{
- sysevent_t *ev;
- sysevent_id_t eid;
- sysevent_value_t se_val;
- sysevent_attr_list_t *ev_attr_list = NULL;
- int se_err;
- int no_transport = 0;
-
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ sysevent_t *ev;
+ sysevent_id_t eid;
+ sysevent_value_t se_val;
+ sysevent_attr_list_t *ev_attr_list = NULL;
+ int se_err;
+ int no_transport = 0;
if (!i_ddi_io_initialized())
return (DDI_SUCCESS);
+ /* Invalidate the devinfo snapshot cache */
+ i_ddi_di_cache_invalidate();
+
ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_REMOVE, EP_DDI, SE_SLEEP);
se_val.value_type = SE_DATA_TYPE_STRING;
@@ -4739,6 +4764,37 @@ fail:
return (DDI_SUCCESS);
}
+static void
+i_ddi_log_devfs_device_remove(dev_info_t *dip)
+{
+ char *path;
+
+ ASSERT(dip && ddi_get_parent(dip) &&
+ DEVI_BUSY_OWNED(ddi_get_parent(dip)));
+ ASSERT(DEVI_IS_DEVICE_REMOVED(dip));
+
+ ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
+ if (i_ddi_node_state(dip) < DS_INITIALIZED)
+ return;
+
+ path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+ (void) i_log_devfs_remove_devinfo(ddi_pathname(dip, path),
+ i_ddi_devi_class(dip), (char *)ddi_driver_name(dip),
+ ddi_get_instance(dip), 0);
+ kmem_free(path, MAXPATHLEN);
+}
+
+static void
+i_ddi_log_devfs_device_insert(dev_info_t *dip)
+{
+ ASSERT(dip && ddi_get_parent(dip) &&
+ DEVI_BUSY_OWNED(ddi_get_parent(dip)));
+ ASSERT(!DEVI_IS_DEVICE_REMOVED(dip));
+
+ (void) i_log_devfs_add_devinfo(dip, 0);
+}
+
+
/*
* log an event that a dev_info branch has been configured or unconfigured.
*/
@@ -4756,6 +4812,9 @@ i_log_devfs_branch(char *node_path, char *subclass)
if (!i_ddi_io_initialized())
return (DDI_SUCCESS);
+ /* Invalidate the devinfo snapshot cache */
+ i_ddi_di_cache_invalidate();
+
ev = sysevent_alloc(EC_DEVFS, subclass, EP_DDI, SE_SLEEP);
se_val.value_type = SE_DATA_TYPE_STRING;
@@ -5461,7 +5520,6 @@ ndi_devi_config_one(dev_info_t *dip, char *devnm, dev_info_t **dipp, int flags)
* by the BUS_CONFIG_ONE.
*/
ASSERT(*dipp);
-
error = devi_config_common(*dipp, flags, DDI_MAJOR_T_NONE);
pm_post_config(dip, devnm);
@@ -6365,7 +6423,7 @@ ndi_devi_offline(dev_info_t *dip, uint_t flags)
}
ndi_devi_enter(pdip, &circ);
- if (i_ddi_node_state(dip) == DS_READY) {
+ if (i_ddi_devi_attached(dip)) {
/*
* If dip is in DS_READY state, there may be cached dv_nodes
* referencing this dip, so we invoke devfs code path.
@@ -6380,11 +6438,21 @@ ndi_devi_offline(dev_info_t *dip, uint_t flags)
ndi_devi_exit(vdip, v_circ);
/*
- * If we own parent lock, this is part of a branch
- * operation. We skip the devfs_clean() step.
+ * If we are explictly told to clean, then clean. If we own the
+ * parent lock then this is part of a branch operation, and we
+ * skip the devfs_clean() step.
+ *
+ * NOTE: A thread performing a devfs file system lookup/
+ * bus_config can't call devfs_clean to unconfig without
+ * causing rwlock problems in devfs. For ndi_devi_offline, this
+ * means that the NDI_DEVFS_CLEAN flag is safe from ioctl code
+ * or from an async hotplug thread, but is not safe from a
+ * nexus driver's bus_config implementation.
*/
- if (!DEVI_BUSY_OWNED(pdip))
+ if ((flags & NDI_DEVFS_CLEAN) ||
+ (!DEVI_BUSY_OWNED(pdip)))
(void) devfs_clean(pdip, devname + 1, DV_CLEAN_FORCE);
+
kmem_free(devname, MAXNAMELEN + 1);
rval = devi_unconfig_branch(dip, NULL, flags|NDI_UNCONFIG,
@@ -6416,7 +6484,7 @@ ndi_devi_offline(dev_info_t *dip, uint_t flags)
}
/*
- * Find the child dev_info node of parent nexus 'p' whose name
+ * Find the child dev_info node of parent nexus 'p' whose unit address
* matches "cname@caddr". Recommend use of ndi_devi_findchild() instead.
*/
dev_info_t *
@@ -6436,7 +6504,7 @@ ndi_devi_find(dev_info_t *pdip, char *cname, char *caddr)
}
/*
- * Find the child dev_info node of parent nexus 'p' whose name
+ * Find the child dev_info node of parent nexus 'p' whose unit address
* matches devname "name@addr". Permits caller to hold the parent.
*/
dev_info_t *
@@ -6573,7 +6641,7 @@ path_to_major(char *path)
* Some callers expect to be able to perform a hold_devi() while in a context
* where using ndi_devi_enter() to ensure the hold might cause deadlock (see
* open-from-attach code in consconfig_dacf.c). Such special-case callers
- * must ensure that an ndi_devi_enter(parent)/ndi_devi_hold() from a safe
+ * must ensure that an ndi_devi_enter(parent)/ndi_hold_devi() from a safe
* context is already active. The hold_devi() implementation must accommodate
* these callers.
*/
@@ -6601,7 +6669,7 @@ hold_devi(major_t major, int instance, int flags)
if (DEVI(dip)->devi_instance == instance) {
/*
* To accommodate callers that can't block in
- * ndi_devi_enter() we do an ndi_devi_hold(), and
+ * ndi_devi_enter() we do an ndi_hold_devi(), and
* afterwards check that the node is in a state where
* the hold prevents detach(). If we did not manage to
* prevent detach then we ndi_rele_devi() and perform
@@ -7713,7 +7781,7 @@ i_ddi_di_cache_free(struct di_cache *cache)
}
void
-i_ddi_di_cache_invalidate(int kmflag)
+i_ddi_di_cache_invalidate()
{
int cache_valid;
@@ -7729,13 +7797,18 @@ i_ddi_di_cache_invalidate(int kmflag)
/* Invalidate the in-core cache and dispatch free on valid->invalid */
cache_valid = atomic_swap_uint(&di_cache.cache_valid, 0);
if (cache_valid) {
+ /*
+ * This is an optimization to start cleaning up a cached
+ * snapshot early. For this reason, it is OK for
+ * taskq_dispatach to fail (and it is OK to not track calling
+ * context relative to sleep, and assume NOSLEEP).
+ */
(void) taskq_dispatch(system_taskq, free_cache_task, NULL,
- (kmflag == KM_SLEEP) ? TQ_SLEEP : TQ_NOSLEEP);
+ TQ_NOSLEEP);
}
if (di_cache_debug) {
- cmn_err(CE_NOTE, "invalidation with km_flag: %s",
- kmflag == KM_SLEEP ? "KM_SLEEP" : "KM_NOSLEEP");
+ cmn_err(CE_NOTE, "invalidation");
}
}
@@ -7865,6 +7938,100 @@ ndi_devi_config_vhci(char *drvname, int flags)
}
/*
+ * Maintain DEVI_DEVICE_REMOVED hotplug devi_state for remove/reinsert hotplug
+ * of open devices. Currently, because of tight coupling between the devfs file
+ * system and the Solaris device tree, a driver can't always make the device
+ * tree state (esp devi_node_state) match device hardware hotplug state. Until
+ * resolved, to overcome this deficiency we use the following interfaces that
+ * maintain the DEVI_DEVICE_REMOVED devi_state status bit. These interface
+ * report current state, and drive operation (like events and cache
+ * invalidation) when a driver changes remove/insert state of an open device.
+ *
+ * The ndi_devi_device_isremoved() returns 1 if the device is currently removed.
+ *
+ * The ndi_devi_device_remove() interface declares the device as removed, and
+ * returns 1 if there was a state change associated with this declaration.
+ *
+ * The ndi_devi_device_insert() declares the device as inserted, and returns 1
+ * if there was a state change associated with this declaration.
+ */
+int
+ndi_devi_device_isremoved(dev_info_t *dip)
+{
+ return (DEVI_IS_DEVICE_REMOVED(dip));
+}
+
+int
+ndi_devi_device_remove(dev_info_t *dip)
+{
+ ASSERT(dip && ddi_get_parent(dip) &&
+ DEVI_BUSY_OWNED(ddi_get_parent(dip)));
+
+ /* Return if already marked removed. */
+ if (ndi_devi_device_isremoved(dip))
+ return (0);
+
+ /* Mark the device as having been physically removed. */
+ mutex_enter(&(DEVI(dip)->devi_lock));
+ ndi_devi_set_hidden(dip); /* invisible: lookup/snapshot */
+ DEVI_SET_DEVICE_REMOVED(dip);
+ DEVI_SET_EVREMOVE(dip); /* this clears EVADD too */
+ mutex_exit(&(DEVI(dip)->devi_lock));
+
+ /* report remove (as 'removed') */
+ i_ndi_devi_report_status_change(dip, NULL);
+
+ /*
+ * Invalidate the cache to ensure accurate
+ * (di_state() & DI_DEVICE_REMOVED).
+ */
+ i_ddi_di_cache_invalidate();
+
+ /*
+ * Generate sysevent for those interested in removal (either directly
+ * via EC_DEVFS or indirectly via devfsadmd generated EC_DEV).
+ */
+ i_ddi_log_devfs_device_remove(dip);
+
+ return (1); /* DEVICE_REMOVED state changed */
+}
+
+int
+ndi_devi_device_insert(dev_info_t *dip)
+{
+ ASSERT(dip && ddi_get_parent(dip) &&
+ DEVI_BUSY_OWNED(ddi_get_parent(dip)));
+
+ /* Return if not marked removed. */
+ if (!ndi_devi_device_isremoved(dip))
+ return (0);
+
+ /* Mark the device as having been physically reinserted. */
+ mutex_enter(&(DEVI(dip)->devi_lock));
+ ndi_devi_clr_hidden(dip); /* visible: lookup/snapshot */
+ DEVI_SET_DEVICE_REINSERTED(dip);
+ DEVI_SET_EVADD(dip); /* this clears EVREMOVE too */
+ mutex_exit(&(DEVI(dip)->devi_lock));
+
+ /* report insert (as 'online') */
+ i_ndi_devi_report_status_change(dip, NULL);
+
+ /*
+ * Invalidate the cache to ensure accurate
+ * (di_state() & DI_DEVICE_REMOVED).
+ */
+ i_ddi_di_cache_invalidate();
+
+ /*
+ * Generate sysevent for those interested in removal (either directly
+ * via EC_DEVFS or indirectly via devfsadmd generated EC_DEV).
+ */
+ i_ddi_log_devfs_device_insert(dip);
+
+ return (1); /* DEVICE_REMOVED state changed */
+}
+
+/*
* ibt_hw_is_present() returns 0 when there is no IB hardware actively
* running. This is primarily useful for modules like rpcmod which
* needs a quick check to decide whether or not it should try to use
diff --git a/usr/src/uts/common/os/id_space.c b/usr/src/uts/common/os/id_space.c
index 89f83dfc89..07b1a630ea 100644
--- a/usr/src/uts/common/os/id_space.c
+++ b/usr/src/uts/common/os/id_space.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/id_space.h>
#include <sys/debug.h>
@@ -39,17 +36,19 @@
* unless there are no larger slots remaining in the range. In this case,
* the ID space will return the first available slot in the lower part of the
* range (viewing the previous identifier as a partitioning element). If no
- * slots are available, id_alloc() will sleep until an identifier becomes
- * available. Accordingly, id_space allocations must be initiated from
- * contexts where sleeping is acceptable. id_alloc_nosleep() will return
- * -1 if no slots are available or if the system is low on memory. If
- * id_alloc_nosleep() fails, callers should not try to extend the ID
- * space. This is to avoid making a possible low-memory situation
- * worse.
+ * slots are available, id_alloc()/id_allocff() will sleep until an
+ * identifier becomes available. Accordingly, id_space allocations must be
+ * initiated from contexts where sleeping is acceptable. id_alloc_nosleep()/
+ * id_allocff_nosleep() will return -1 if no slots are available or if the
+ * system is low on memory. If id_alloc_nosleep() fails, callers should
+ * not try to extend the ID space. This is to avoid making a possible
+ * low-memory situation worse.
*
* As an ID space is designed for representing a range of id_t's, there
- * is a preexisting maximal range: [0, MAXUID]. ID space requests
- * outside that range will fail on a DEBUG kernel.
+ * is a preexisting maximal range: [0, MAXUID]. ID space requests outside
+ * that range will fail on a DEBUG kernel. The id_allocff*() functions
+ * return the first available id, and should be used when there is benifit
+ * to having a compact allocated range.
*
* (Presently, the id_space_t abstraction supports only direct allocations; ID
* reservation, in which an ID is allocated but placed in a internal
@@ -112,6 +111,29 @@ id_alloc_nosleep(id_space_t *isp)
}
/*
+ * Allocate an id_t from specified ID space using FIRSTFIT.
+ * Caller must be in a context in which VM_SLEEP is legal.
+ */
+id_t
+id_allocff(id_space_t *isp)
+{
+ return ((id_t)(uintptr_t)
+ vmem_alloc(isp, 1, VM_SLEEP | VM_FIRSTFIT) - 1);
+}
+
+/*
+ * Allocate an id_t from specified ID space using FIRSTFIT
+ * Returns -1 on failure (see module block comments for more information on
+ * failure modes).
+ */
+id_t
+id_allocff_nosleep(id_space_t *isp)
+{
+ return ((id_t)(uintptr_t)
+ vmem_alloc(isp, 1, VM_NOSLEEP | VM_FIRSTFIT) - 1);
+}
+
+/*
* Free a previously allocated ID.
* No restrictions on caller's context.
*/
diff --git a/usr/src/uts/common/os/modctl.c b/usr/src/uts/common/os/modctl.c
index d21d601c90..08d88fed17 100644
--- a/usr/src/uts/common/os/modctl.c
+++ b/usr/src/uts/common/os/modctl.c
@@ -661,7 +661,7 @@ modctl_update_driver_aliases(int add, int *data)
* driver now exists.
*/
i_ddi_bind_devs();
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
error:
if (mc.num_aliases > 0) {
@@ -711,7 +711,7 @@ modctl_rem_major(major_t major)
(void) i_ddi_unload_drvconf(major);
i_ddi_unbind_devs(major);
i_ddi_bind_devs();
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
/* purge all the bindings to this driver */
purge_mbind(major, mb_hashtab);
diff --git a/usr/src/uts/common/os/sunddi.c b/usr/src/uts/common/os/sunddi.c
index 330399f31e..18a79c23d1 100644
--- a/usr/src/uts/common/os/sunddi.c
+++ b/usr/src/uts/common/os/sunddi.c
@@ -83,6 +83,7 @@
#include <net/if.h>
#include <sys/rctl.h>
#include <sys/zone.h>
+#include <sys/ddi.h>
extern pri_t minclsyspri;
@@ -106,7 +107,6 @@ static kthread_t *ddi_umem_unlock_thread;
static struct ddi_umem_cookie *ddi_umem_unlock_head = NULL;
static struct ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
-
/*
* DDI(Sun) Function and flag definitions:
*/
@@ -4597,7 +4597,7 @@ i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
{
/* for now we invalidate the entire cached snapshot */
if (dip && dp)
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
}
/* ARGSUSED */
@@ -4605,7 +4605,7 @@ void
ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
{
/* for now we invalidate the entire cached snapshot */
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
}
@@ -5750,7 +5750,7 @@ i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
- i_ddi_di_cache_invalidate(kmem_flag);
+ i_ddi_di_cache_invalidate();
#ifdef DEBUG
if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
@@ -5857,7 +5857,7 @@ i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
return (DDI_SUCCESS);
}
- i_ddi_di_cache_invalidate(KM_SLEEP);
+ i_ddi_di_cache_invalidate();
ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
if (ev == NULL) {
@@ -6244,10 +6244,10 @@ ddi_fls(long mask)
}
/*
- * The next five routines comprise generic storage management utilities
- * for driver soft state structures (in "the old days," this was done
- * with a statically sized array - big systems and dynamic loading
- * and unloading make heap allocation more attractive)
+ * The ddi_soft_state_* routines comprise generic storage management utilities
+ * for driver soft state structures (in "the old days," this was done with
+ * statically sized array - big systems and dynamic loading and unloading
+ * make heap allocation more attractive).
*/
/*
@@ -6276,9 +6276,9 @@ ddi_fls(long mask)
int
ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
{
- struct i_ddi_soft_state *ss;
+ i_ddi_soft_state *ss;
- if (state_p == NULL || *state_p != NULL || size == 0)
+ if (state_p == NULL || size == 0)
return (EINVAL);
ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
@@ -6300,11 +6300,9 @@ ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
*state_p = ss;
-
return (0);
}
-
/*
* Allocate a state structure of size 'size' to be associated
* with item 'item'.
@@ -6315,11 +6313,11 @@ ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
int
ddi_soft_state_zalloc(void *state, int item)
{
- struct i_ddi_soft_state *ss;
- void **array;
- void *new_element;
+ i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
+ void **array;
+ void *new_element;
- if ((ss = state) == NULL || item < 0)
+ if ((state == NULL) || (item < 0))
return (DDI_FAILURE);
mutex_enter(&ss->lock);
@@ -6350,9 +6348,9 @@ ddi_soft_state_zalloc(void *state, int item)
* Check if the array is big enough, if not, grow it.
*/
if (item >= ss->n_items) {
- void **new_array;
- size_t new_n_items;
- struct i_ddi_soft_state *dirty;
+ void **new_array;
+ size_t new_n_items;
+ struct i_ddi_soft_state *dirty;
/*
* Allocate a new array of the right length, copy
@@ -6405,7 +6403,6 @@ ddi_soft_state_zalloc(void *state, int item)
return (DDI_SUCCESS);
}
-
/*
* Fetch a pointer to the allocated soft state structure.
*
@@ -6428,9 +6425,9 @@ ddi_soft_state_zalloc(void *state, int item)
void *
ddi_get_soft_state(void *state, int item)
{
- struct i_ddi_soft_state *ss = state;
+ i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
- ASSERT(ss != NULL && item >= 0);
+ ASSERT((ss != NULL) && (item >= 0));
if (item < ss->n_items && ss->array != NULL)
return (ss->array[item]);
@@ -6450,12 +6447,12 @@ ddi_get_soft_state(void *state, int item)
void
ddi_soft_state_free(void *state, int item)
{
- struct i_ddi_soft_state *ss;
- void **array;
- void *element;
- static char msg[] = "ddi_soft_state_free:";
+ i_ddi_soft_state *ss = (i_ddi_soft_state *)state;
+ void **array;
+ void *element;
+ static char msg[] = "ddi_soft_state_free:";
- if ((ss = state) == NULL) {
+ if (ss == NULL) {
cmn_err(CE_WARN, "%s null handle: %s",
msg, mod_containing_pc(caller()));
return;
@@ -6482,7 +6479,6 @@ ddi_soft_state_free(void *state, int item)
kmem_free(element, ss->size);
}
-
/*
* Free the entire set of pointers, and any
* soft state structures contained therein.
@@ -6499,11 +6495,12 @@ ddi_soft_state_free(void *state, int item)
void
ddi_soft_state_fini(void **state_p)
{
- struct i_ddi_soft_state *ss, *dirty;
- int item;
- static char msg[] = "ddi_soft_state_fini:";
+ i_ddi_soft_state *ss, *dirty;
+ int item;
+ static char msg[] = "ddi_soft_state_fini:";
- if (state_p == NULL || (ss = *state_p) == NULL) {
+ if (state_p == NULL ||
+ (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
cmn_err(CE_WARN, "%s null handle: %s",
msg, mod_containing_pc(caller()));
return;
@@ -6536,6 +6533,287 @@ ddi_soft_state_fini(void **state_p)
*state_p = NULL;
}
+#define SS_N_ITEMS_PER_HASH 16
+#define SS_MIN_HASH_SZ 16
+#define SS_MAX_HASH_SZ 4096
+
+int
+ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
+ int n_items)
+{
+ i_ddi_soft_state_bystr *sss;
+ int hash_sz;
+
+ ASSERT(state_p && size && n_items);
+ if ((state_p == NULL) || (size == 0) || (n_items == 0))
+ return (EINVAL);
+
+ /* current implementation is based on hash, convert n_items to hash */
+ hash_sz = n_items / SS_N_ITEMS_PER_HASH;
+ if (hash_sz < SS_MIN_HASH_SZ)
+ hash_sz = SS_MIN_HASH_SZ;
+ else if (hash_sz > SS_MAX_HASH_SZ)
+ hash_sz = SS_MAX_HASH_SZ;
+
+ /* allocate soft_state pool */
+ sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
+ sss->ss_size = size;
+ sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
+ hash_sz, mod_hash_null_valdtor);
+ *state_p = (ddi_soft_state_bystr *)sss;
+ return (0);
+}
+
+int
+ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
+{
+ i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
+ void *sso;
+ char *dup_str;
+
+ ASSERT(sss && str && sss->ss_mod_hash);
+ if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
+ return (DDI_FAILURE);
+ sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
+ dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
+ if (mod_hash_insert(sss->ss_mod_hash,
+ (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
+ return (DDI_SUCCESS);
+
+ /*
+ * The only error from an strhash insert is caused by a duplicate key.
+ * We refuse to tread on an existing elements, so free and fail.
+ */
+ kmem_free(dup_str, strlen(dup_str) + 1);
+ kmem_free(sso, sss->ss_size);
+ return (DDI_FAILURE);
+}
+
+void *
+ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
+{
+ i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
+ void *sso;
+
+ ASSERT(sss && str && sss->ss_mod_hash);
+ if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
+ return (NULL);
+
+ if (mod_hash_find(sss->ss_mod_hash,
+ (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
+ return (sso);
+ return (NULL);
+}
+
+void
+ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
+{
+ i_ddi_soft_state_bystr *sss = (i_ddi_soft_state_bystr *)state;
+ void *sso;
+
+ ASSERT(sss && str && sss->ss_mod_hash);
+ if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
+ return;
+
+ (void) mod_hash_remove(sss->ss_mod_hash,
+ (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
+ kmem_free(sso, sss->ss_size);
+}
+
+void
+ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
+{
+ i_ddi_soft_state_bystr *sss;
+
+ ASSERT(state_p);
+ if (state_p == NULL)
+ return;
+
+ sss = (i_ddi_soft_state_bystr *)(*state_p);
+ if (sss == NULL)
+ return;
+
+ ASSERT(sss->ss_mod_hash);
+ if (sss->ss_mod_hash) {
+ mod_hash_destroy_strhash(sss->ss_mod_hash);
+ sss->ss_mod_hash = NULL;
+ }
+
+ kmem_free(sss, sizeof (*sss));
+ *state_p = NULL;
+}
+
+/*
+ * The ddi_strid_* routines provide string-to-index management utilities.
+ */
+/* allocate and initialize an strid set */
+int
+ddi_strid_init(ddi_strid **strid_p, int n_items)
+{
+ i_ddi_strid *ss;
+ int hash_sz;
+
+ if (strid_p == NULL)
+ return (DDI_FAILURE);
+
+ /* current implementation is based on hash, convert n_items to hash */
+ hash_sz = n_items / SS_N_ITEMS_PER_HASH;
+ if (hash_sz < SS_MIN_HASH_SZ)
+ hash_sz = SS_MIN_HASH_SZ;
+ else if (hash_sz > SS_MAX_HASH_SZ)
+ hash_sz = SS_MAX_HASH_SZ;
+
+ ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
+ ss->strid_space = id_space_create("strid", 1, n_items);
+ ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
+ mod_hash_null_valdtor);
+ ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
+ mod_hash_null_valdtor);
+ *strid_p = (ddi_strid *)ss;
+ return (DDI_SUCCESS);
+}
+
+#define ID_FIXED_SIZE 0x1
+
+/* allocate an id mapping within the specified set for str, return id */
+static id_t
+i_ddi_strid_alloc(ddi_strid *strid, char *str, int flags)
+{
+ i_ddi_strid *ss = (i_ddi_strid *)strid;
+ id_t id;
+ char *s;
+
+ ASSERT(ss && str);
+ if ((ss == NULL) || (str == NULL))
+ return (0);
+
+ /*
+ * Allocate an id using VM_FIRSTFIT in order to keep allocated id
+ * range as compressed as possible. This is important to minimize
+ * the amount of space used when the id is used as a ddi_soft_state
+ * index by the caller.
+ *
+ * If ID_FIXED_SIZE, use the _nosleep variant to fail rather
+ * than sleep in id_allocff()
+ */
+ if (flags & ID_FIXED_SIZE) {
+ id = id_allocff_nosleep(ss->strid_space);
+ if (id == (id_t)-1)
+ return (0);
+ } else {
+ id = id_allocff(ss->strid_space);
+ }
+
+ /*
+ * NOTE: since we create and destroy in unison we can save space by
+ * using bystr key as the byid value. This means destroy must occur
+ * in (byid, bystr) order.
+ */
+ s = i_ddi_strdup(str, KM_SLEEP);
+ if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
+ (mod_hash_val_t)(intptr_t)id) != 0) {
+ ddi_strid_free(strid, id);
+ return (0);
+ }
+ if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
+ (mod_hash_val_t)s) != 0) {
+ ddi_strid_free(strid, id);
+ return (0);
+ }
+
+ /* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
+ return (id);
+}
+
+/* allocate an id mapping within the specified set for str, return id */
+id_t
+ddi_strid_alloc(ddi_strid *strid, char *str)
+{
+ return (i_ddi_strid_alloc(strid, str, 0));
+}
+
+/* allocate an id mapping within the specified set for str, return id */
+id_t
+ddi_strid_fixed_alloc(ddi_strid *strid, char *str)
+{
+ return (i_ddi_strid_alloc(strid, str, ID_FIXED_SIZE));
+}
+
+/* return the id within the specified strid given the str */
+id_t
+ddi_strid_str2id(ddi_strid *strid, char *str)
+{
+ i_ddi_strid *ss = (i_ddi_strid *)strid;
+ id_t id = 0;
+ mod_hash_val_t hv;
+
+ ASSERT(ss && str);
+ if (ss && str && (mod_hash_find(ss->strid_bystr,
+ (mod_hash_key_t)str, &hv) == 0))
+ id = (int)(intptr_t)hv;
+ return (id);
+}
+
+/* return str within the specified strid given the id */
+char *
+ddi_strid_id2str(ddi_strid *strid, id_t id)
+{
+ i_ddi_strid *ss = (i_ddi_strid *)strid;
+ char *str = NULL;
+ mod_hash_val_t hv;
+
+ ASSERT(ss && id > 0);
+ if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
+ (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
+ str = (char *)hv;
+ return (str);
+}
+
+/* free the id mapping within the specified strid */
+void
+ddi_strid_free(ddi_strid *strid, id_t id)
+{
+ i_ddi_strid *ss = (i_ddi_strid *)strid;
+ char *str;
+
+ ASSERT(ss && id > 0);
+ if ((ss == NULL) || (id <= 0))
+ return;
+
+ /* bystr key is byid value: destroy order must be (byid, bystr) */
+ str = ddi_strid_id2str(strid, id);
+ (void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
+ id_free(ss->strid_space, id);
+
+ if (str)
+ (void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
+}
+
+/* destroy the strid set */
+void
+ddi_strid_fini(ddi_strid **strid_p)
+{
+ i_ddi_strid *ss;
+
+ ASSERT(strid_p);
+ if (strid_p == NULL)
+ return;
+
+ ss = (i_ddi_strid *)(*strid_p);
+ if (ss == NULL)
+ return;
+
+ /* bystr key is byid value: destroy order must be (byid, bystr) */
+ if (ss->strid_byid)
+ mod_hash_destroy_hash(ss->strid_byid);
+ if (ss->strid_byid)
+ mod_hash_destroy_hash(ss->strid_bystr);
+ if (ss->strid_space)
+ id_space_destroy(ss->strid_space);
+ kmem_free(ss, sizeof (*ss));
+ *strid_p = NULL;
+}
+
/*
* This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
* Storage is double buffered to prevent updates during devi_addr use -
diff --git a/usr/src/uts/common/os/sunmdi.c b/usr/src/uts/common/os/sunmdi.c
index 1f620f3b74..fe8d854afa 100644
--- a/usr/src/uts/common/os/sunmdi.c
+++ b/usr/src/uts/common/os/sunmdi.c
@@ -73,12 +73,15 @@
#include <sys/debug.h>
int mdi_debug = 1;
int mdi_debug_logonly = 0;
-#define MDI_DEBUG(level, stmnt) \
- if (mdi_debug >= (level)) i_mdi_log stmnt
-static void i_mdi_log(int, dev_info_t *, const char *fmt, ...);
+#define MDI_DEBUG(dbglevel, pargs) if (mdi_debug >= (dbglevel)) i_mdi_log pargs
+#define MDI_WARN CE_WARN, __func__
+#define MDI_NOTE CE_NOTE, __func__
+#define MDI_CONT CE_CONT, __func__
+static void i_mdi_log(int, const char *, dev_info_t *, const char *, ...);
#else /* !DEBUG */
-#define MDI_DEBUG(level, stmnt)
+#define MDI_DEBUG(dbglevel, pargs)
#endif /* DEBUG */
+int mdi_debug_consoleonly = 0;
extern pri_t minclsyspri;
extern int modrootloaded;
@@ -167,6 +170,7 @@ static int mdi_pathmap_hash_size = 256;
static kmutex_t mdi_pathmap_mutex;
static mod_hash_t *mdi_pathmap_bypath; /* "path"->instance */
static mod_hash_t *mdi_pathmap_byinstance; /* instance->"path" */
+static mod_hash_t *mdi_pathmap_sbyinstance; /* inst->shortpath */
/*
* MDI component property name/value string definitions
@@ -333,6 +337,9 @@ i_mdi_init()
mdi_pathmap_byinstance = mod_hash_create_idhash(
"mdi_pathmap_byinstance", mdi_pathmap_hash_size,
mod_hash_null_valdtor);
+ mdi_pathmap_sbyinstance = mod_hash_create_idhash(
+ "mdi_pathmap_sbyinstance", mdi_pathmap_hash_size,
+ mod_hash_null_valdtor);
}
/*
@@ -618,7 +625,6 @@ mdi_phci_register(char *class, dev_info_t *pdip, int flags)
mdi_phci_t *ph;
mdi_vhci_t *vh;
char *data;
- char *pathname;
/*
* Some subsystems, like fcp, perform pHCI registration from a
@@ -630,9 +636,6 @@ mdi_phci_register(char *class, dev_info_t *pdip, int flags)
*/
ASSERT(DEVI_BUSY_CHANGING(ddi_get_parent(pdip)));
- pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
- (void) ddi_pathname(pdip, pathname);
-
/*
* Check for mpxio-disable property. Enable mpxio if the property is
* missing or not set to "yes".
@@ -641,20 +644,15 @@ mdi_phci_register(char *class, dev_info_t *pdip, int flags)
if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 0, "mpxio-disable",
&data) == DDI_SUCCESS)) {
if (strcmp(data, "yes") == 0) {
- MDI_DEBUG(1, (CE_CONT, pdip,
- "?%s (%s%d) multipath capabilities "
- "disabled via %s.conf.\n", pathname,
- ddi_driver_name(pdip), ddi_get_instance(pdip),
+ MDI_DEBUG(1, (MDI_CONT, pdip,
+ "?multipath capabilities disabled via %s.conf.",
ddi_driver_name(pdip)));
ddi_prop_free(data);
- kmem_free(pathname, MAXPATHLEN);
return (MDI_FAILURE);
}
ddi_prop_free(data);
}
- kmem_free(pathname, MAXPATHLEN);
-
/*
* Search for a matching vHCI
*/
@@ -713,21 +711,20 @@ mdi_phci_unregister(dev_info_t *pdip, int flags)
mdi_phci_t *ph;
mdi_phci_t *tmp;
mdi_phci_t *prev = NULL;
+ mdi_pathinfo_t *pip;
ASSERT(DEVI_BUSY_CHANGING(ddi_get_parent(pdip)));
ph = i_devi_get_phci(pdip);
if (ph == NULL) {
- MDI_DEBUG(1, (CE_WARN, pdip,
- "!pHCI unregister: Not a valid pHCI"));
+ MDI_DEBUG(1, (MDI_WARN, pdip, "!not a valid pHCI"));
return (MDI_FAILURE);
}
vh = ph->ph_vhci;
ASSERT(vh != NULL);
if (vh == NULL) {
- MDI_DEBUG(1, (CE_WARN, pdip,
- "!pHCI unregister: Not a valid vHCI"));
+ MDI_DEBUG(1, (MDI_WARN, pdip, "!not a valid vHCI"));
return (MDI_FAILURE);
}
@@ -754,6 +751,13 @@ mdi_phci_unregister(dev_info_t *pdip, int flags)
vh->vh_phci_count--;
MDI_VHCI_PHCI_UNLOCK(vh);
+ /* Walk remaining pathinfo nodes and disassociate them from pHCI */
+ MDI_PHCI_LOCK(ph);
+ for (pip = (mdi_pathinfo_t *)ph->ph_path_head; pip;
+ pip = (mdi_pathinfo_t *)MDI_PI(pip)->pi_phci_link)
+ MDI_PI(pip)->pi_phci = NULL;
+ MDI_PHCI_UNLOCK(ph);
+
i_mdi_log_sysevent(pdip, ph->ph_vhci->vh_class,
ESC_DDI_INITIATOR_UNREGISTER);
vhcache_phci_remove(vh->vh_config, ph);
@@ -823,8 +827,15 @@ mdi_devi_enter(dev_info_t *phci_dip, int *circular)
} else if (DEVI_IS_DETACHING(phci_dip)) {
vcircular = -1;
break;
+ } else if (servicing_interrupt()) {
+ /*
+ * Don't delay an interrupt (and ensure adaptive
+ * mutex inversion support).
+ */
+ ndi_devi_enter(vdip, &vcircular);
+ break;
} else {
- delay(1);
+ delay_random(2);
}
}
@@ -896,6 +907,9 @@ mdi_devi_exit_phci(dev_info_t *phci_dip, int circular)
/* Verify calling context */
ASSERT(MDI_PHCI(phci_dip));
+ /* Keep hold on pHCI until we reenter in mdi_devi_enter_phci */
+ ndi_hold_devi(phci_dip);
+
pcircular = (short)(circular & 0xFFFF);
ndi_devi_exit(phci_dip, pcircular);
}
@@ -910,6 +924,9 @@ mdi_devi_enter_phci(dev_info_t *phci_dip, int *circular)
ndi_devi_enter(phci_dip, &pcircular);
+ /* Drop hold from mdi_devi_exit_phci. */
+ ndi_rele_devi(phci_dip);
+
/* verify matching mdi_devi_exit_phci/mdi_devi_enter_phci use */
ASSERT(pcircular == ((short)(*circular & 0xFFFF)));
}
@@ -1037,15 +1054,24 @@ i_mdi_phci_lock(mdi_phci_t *ph, mdi_pathinfo_t *pip)
if (pip) {
/* Reverse locking is requested. */
while (MDI_PHCI_TRYLOCK(ph) == 0) {
- /*
- * tryenter failed. Try to grab again
- * after a small delay
- */
- MDI_PI_HOLD(pip);
- MDI_PI_UNLOCK(pip);
- delay(1);
- MDI_PI_LOCK(pip);
- MDI_PI_RELE(pip);
+ if (servicing_interrupt()) {
+ MDI_PI_HOLD(pip);
+ MDI_PI_UNLOCK(pip);
+ MDI_PHCI_LOCK(ph);
+ MDI_PI_LOCK(pip);
+ MDI_PI_RELE(pip);
+ break;
+ } else {
+ /*
+ * tryenter failed. Try to grab again
+ * after a small delay
+ */
+ MDI_PI_HOLD(pip);
+ MDI_PI_UNLOCK(pip);
+ delay_random(2);
+ MDI_PI_LOCK(pip);
+ MDI_PI_RELE(pip);
+ }
}
} else {
MDI_PHCI_LOCK(ph);
@@ -1083,8 +1109,8 @@ i_mdi_devinfo_create(mdi_vhci_t *vh, char *name, char *guid,
ASSERT(cdip == NULL);
if (cdip) {
cmn_err(CE_WARN,
- "i_mdi_devinfo_create: client dip %p already exists",
- (void *)cdip);
+ "i_mdi_devinfo_create: client %s@%s already exists",
+ name ? name : "", guid ? guid : "");
}
ndi_devi_alloc_sleep(vh->vh_dip, name, DEVI_SID_NODEID, &cdip);
@@ -1169,10 +1195,10 @@ i_mdi_devinfo_remove(dev_info_t *vdip, dev_info_t *cdip, int flags)
if (i_mdi_is_child_present(vdip, cdip) == MDI_SUCCESS ||
(flags & MDI_CLIENT_FLAGS_DEV_NOT_SUPPORTED)) {
- rv = ndi_devi_offline(cdip, NDI_DEVI_REMOVE);
+ rv = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
if (rv != NDI_SUCCESS) {
- MDI_DEBUG(1, (CE_NOTE, NULL, "!i_mdi_devinfo_remove:"
- " failed. cdip = %p\n", (void *)cdip));
+ MDI_DEBUG(1, (MDI_NOTE, cdip,
+ "!failed: cdip %p", (void *)cdip));
}
/*
* Convert to MDI error code
@@ -1252,15 +1278,24 @@ i_mdi_client_lock(mdi_client_t *ct, mdi_pathinfo_t *pip)
* Reverse locking is requested.
*/
while (MDI_CLIENT_TRYLOCK(ct) == 0) {
- /*
- * tryenter failed. Try to grab again
- * after a small delay
- */
- MDI_PI_HOLD(pip);
- MDI_PI_UNLOCK(pip);
- delay(1);
- MDI_PI_LOCK(pip);
- MDI_PI_RELE(pip);
+ if (servicing_interrupt()) {
+ MDI_PI_HOLD(pip);
+ MDI_PI_UNLOCK(pip);
+ MDI_CLIENT_LOCK(ct);
+ MDI_PI_LOCK(pip);
+ MDI_PI_RELE(pip);
+ break;
+ } else {
+ /*
+ * tryenter failed. Try to grab again
+ * after a small delay
+ */
+ MDI_PI_HOLD(pip);
+ MDI_PI_UNLOCK(pip);
+ delay_random(2);
+ MDI_PI_LOCK(pip);
+ MDI_PI_RELE(pip);
+ }
}
} else {
MDI_CLIENT_LOCK(ct);
@@ -1543,8 +1578,8 @@ i_mdi_client_compute_state(mdi_client_t *ct, mdi_phci_t *ph)
if (online_count == 0) {
if (standby_count == 0) {
state = MDI_CLIENT_STATE_FAILED;
- MDI_DEBUG(2, (CE_NOTE, NULL, "!client state: failed"
- " ct = %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, ct->ct_dip,
+ "client state failed: ct = %p", (void *)ct));
} else if (standby_count == 1) {
state = MDI_CLIENT_STATE_DEGRADED;
} else {
@@ -1937,15 +1972,9 @@ i_mdi_lba_lb(mdi_client_t *ct, mdi_pathinfo_t **ret_pip, struct buf *bp)
MDI_PI_UNLOCK(pip);
pip = next;
}
- if (pip == NULL) {
- MDI_DEBUG(4, (CE_NOTE, NULL,
- "!lba %llx, no pip !!\n",
- bp->b_lblkno));
- } else {
- MDI_DEBUG(4, (CE_NOTE, NULL,
- "!lba %llx, no pip for path_index, "
- "pip %p\n", bp->b_lblkno, (void *)pip));
- }
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "lba %llx: path %s %p",
+ bp->b_lblkno, mdi_pi_spathname(pip), (void *)pip));
}
return (MDI_FAILURE);
}
@@ -2026,7 +2055,6 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
/* determine type of arg based on flags */
if (flags & MDI_SELECT_PATH_INSTANCE) {
- flags &= ~MDI_SELECT_PATH_INSTANCE;
path_instance = (int)(intptr_t)arg;
start_pip = NULL;
} else {
@@ -2056,8 +2084,8 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
* Client is not ready to accept any I/O requests.
* Fail this request.
*/
- MDI_DEBUG(2, (CE_NOTE, cdip, "!mdi_select_path: "
- "client state offline ct = %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, cdip,
+ "client state offline ct = %p", (void *)ct));
MDI_CLIENT_UNLOCK(ct);
return (MDI_FAILURE);
}
@@ -2067,8 +2095,8 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
* Check for Failover is in progress. If so tell the
* caller that this device is busy.
*/
- MDI_DEBUG(2, (CE_NOTE, cdip, "!mdi_select_path: "
- "client failover in progress ct = %p\n",
+ MDI_DEBUG(2, (MDI_NOTE, cdip,
+ "client failover in progress ct = %p",
(void *)ct));
MDI_CLIENT_UNLOCK(ct);
return (MDI_BUSY);
@@ -2080,8 +2108,8 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
* (standby) and let the probe/attach process to continue.
*/
if (MDI_CLIENT_IS_DETACHED(ct) || !i_ddi_devi_attached(cdip)) {
- MDI_DEBUG(4, (CE_NOTE, cdip, "!Devi is onlining "
- "ct = %p\n", (void *)ct));
+ MDI_DEBUG(4, (MDI_NOTE, cdip,
+ "devi is onlining ct = %p", (void *)ct));
MDI_CLIENT_UNLOCK(ct);
return (MDI_DEVI_ONLINING);
}
@@ -2111,9 +2139,19 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
return (MDI_FAILURE);
}
- /* verify state of path */
+ /*
+ * Verify state of path. When asked to select a specific
+ * path_instance, we select the requested path in any
+ * state (ONLINE, OFFLINE, STANDBY, FAULT) other than INIT.
+ * We don't however select paths where the pHCI has detached.
+ * NOTE: last pathinfo node of an opened client device may
+ * exist in an OFFLINE state after the pHCI associated with
+ * that path has detached (but pi_phci will be NULL if that
+ * has occurred).
+ */
MDI_PI_LOCK(pip);
- if (MDI_PI(pip)->pi_state != MDI_PATHINFO_STATE_ONLINE) {
+ if ((MDI_PI(pip)->pi_state == MDI_PATHINFO_STATE_INIT) ||
+ (MDI_PI(pip)->pi_phci == NULL)) {
MDI_PI_UNLOCK(pip);
MDI_CLIENT_UNLOCK(ct);
return (MDI_FAILURE);
@@ -2125,7 +2163,6 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
*/
MDI_PI_HOLD(pip);
MDI_PI_UNLOCK(pip);
- ct->ct_path_last = pip;
*ret_pip = pip;
MDI_CLIENT_UNLOCK(ct);
return (MDI_SUCCESS);
@@ -2213,7 +2250,7 @@ mdi_select_path(dev_info_t *cdip, struct buf *bp, int flags,
return (MDI_SUCCESS);
}
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case LOAD_BALANCE_RR:
/*
* Load balancing is Round Robin. Start looking for a online
@@ -2598,8 +2635,8 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
mdi_client_t *ct;
mdi_pathinfo_t *pip = NULL;
- MDI_DEBUG(2, (CE_NOTE, pdip, "!mdi_pi_find: %s %s",
- caddr ? caddr : "NULL", paddr ? paddr : "NULL"));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "caddr@%s paddr@%s", caddr ? caddr : "", paddr ? paddr : ""));
if ((pdip == NULL) || (paddr == NULL)) {
return (NULL);
}
@@ -2608,8 +2645,7 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
/*
* Invalid pHCI device, Nothing more to do.
*/
- MDI_DEBUG(2, (CE_WARN, pdip,
- "!mdi_pi_find: invalid phci"));
+ MDI_DEBUG(2, (MDI_WARN, pdip, "invalid phci"));
return (NULL);
}
@@ -2618,8 +2654,7 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
/*
* Invalid vHCI device, Nothing more to do.
*/
- MDI_DEBUG(2, (CE_WARN, pdip,
- "!mdi_pi_find: invalid vhci"));
+ MDI_DEBUG(2, (MDI_WARN, pdip, "invalid vhci"));
return (NULL);
}
@@ -2633,8 +2668,8 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
*/
MDI_PHCI_LOCK(ph);
if (MDI_PHCI_IS_OFFLINE(ph)) {
- MDI_DEBUG(2, (CE_WARN, pdip,
- "!mdi_pi_find: offline phci %p", (void *)ph));
+ MDI_DEBUG(2, (MDI_WARN, pdip,
+ "offline phci %p", (void *)ph));
MDI_PHCI_UNLOCK(ph);
return (NULL);
}
@@ -2647,8 +2682,8 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
pip = (mdi_pathinfo_t *)MDI_PI(pip)->pi_phci_link;
}
MDI_PHCI_UNLOCK(ph);
- MDI_DEBUG(2, (CE_NOTE, pdip, "!mdi_pi_find: found %p",
- (void *)pip));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "found %s %p", mdi_pi_spathname(pip), (void *)pip));
return (pip);
}
@@ -2676,8 +2711,8 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
* created yet.
*/
MDI_VHCI_CLIENT_UNLOCK(vh);
- MDI_DEBUG(2, (CE_NOTE, pdip, "!mdi_pi_find: client not "
- "found for caddr %s", caddr ? caddr : "NULL"));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "client not found for caddr @%s", caddr ? caddr : ""));
return (NULL);
}
@@ -2705,7 +2740,8 @@ mdi_pi_find(dev_info_t *pdip, char *caddr, char *paddr)
pip = (mdi_pathinfo_t *)MDI_PI(pip)->pi_client_link;
}
MDI_CLIENT_UNLOCK(ct);
- MDI_DEBUG(2, (CE_NOTE, pdip, "!mdi_pi_find: found:: %p", (void *)pip));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "found: %s %p", mdi_pi_spathname(pip), (void *)pip));
return (pip);
}
@@ -2742,9 +2778,9 @@ mdi_pi_alloc_compatible(dev_info_t *pdip, char *cname, char *caddr, char *paddr,
int rv = MDI_NOMEM;
int path_allocated = 0;
- MDI_DEBUG(2, (CE_NOTE, pdip, "!mdi_pi_alloc_compatible: %s %s %s",
- cname ? cname : "NULL", caddr ? caddr : "NULL",
- paddr ? paddr : "NULL"));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "cname %s: caddr@%s paddr@%s",
+ cname ? cname : "", caddr ? caddr : "", paddr ? paddr : ""));
if (pdip == NULL || cname == NULL || caddr == NULL || paddr == NULL ||
ret_pip == NULL) {
@@ -2757,8 +2793,8 @@ mdi_pi_alloc_compatible(dev_info_t *pdip, char *cname, char *caddr, char *paddr,
/* No allocations on detaching pHCI */
if (DEVI_IS_DETACHING(pdip)) {
/* Invalid pHCI device, return failure */
- MDI_DEBUG(1, (CE_WARN, pdip,
- "!mdi_pi_alloc: detaching pHCI=%p", (void *)pdip));
+ MDI_DEBUG(1, (MDI_WARN, pdip,
+ "!detaching pHCI=%p", (void *)pdip));
return (MDI_FAILURE);
}
@@ -2766,8 +2802,8 @@ mdi_pi_alloc_compatible(dev_info_t *pdip, char *cname, char *caddr, char *paddr,
ASSERT(ph != NULL);
if (ph == NULL) {
/* Invalid pHCI device, return failure */
- MDI_DEBUG(1, (CE_WARN, pdip,
- "!mdi_pi_alloc: invalid pHCI=%p", (void *)pdip));
+ MDI_DEBUG(1, (MDI_WARN, pdip,
+ "!invalid pHCI=%p", (void *)pdip));
return (MDI_FAILURE);
}
@@ -2775,8 +2811,8 @@ mdi_pi_alloc_compatible(dev_info_t *pdip, char *cname, char *caddr, char *paddr,
vh = ph->ph_vhci;
if (vh == NULL) {
/* Invalid vHCI device, return failure */
- MDI_DEBUG(1, (CE_WARN, pdip,
- "!mdi_pi_alloc: invalid vHCI=%p", (void *)pdip));
+ MDI_DEBUG(1, (MDI_WARN, pdip,
+ "!invalid vHCI=%p", (void *)pdip));
MDI_PHCI_UNLOCK(ph);
return (MDI_FAILURE);
}
@@ -2786,8 +2822,8 @@ mdi_pi_alloc_compatible(dev_info_t *pdip, char *cname, char *caddr, char *paddr,
* Do not allow new node creation when pHCI is in
* offline/suspended states
*/
- MDI_DEBUG(1, (CE_WARN, pdip,
- "mdi_pi_alloc: pHCI=%p is not ready", (void *)ph));
+ MDI_DEBUG(1, (MDI_WARN, pdip,
+ "pHCI=%p is not ready", (void *)ph));
MDI_PHCI_UNLOCK(ph);
return (MDI_BUSY);
}
@@ -2857,8 +2893,8 @@ fail:
MDI_PHCI_UNLOCK(ph);
*ret_pip = pip;
- MDI_DEBUG(2, (CE_NOTE, pdip,
- "!mdi_pi_alloc_compatible: alloc %p", (void *)pip));
+ MDI_DEBUG(2, (MDI_NOTE, pdip,
+ "alloc %s %p", mdi_pi_spathname(pip), (void *)pip));
if (path_allocated)
vhcache_pi_add(vh->vh_config, MDI_PI(pip));
@@ -2888,7 +2924,7 @@ i_mdi_pi_alloc(mdi_phci_t *ph, char *paddr, mdi_client_t *ct)
mdi_pathinfo_t *pip;
int ct_circular;
int ph_circular;
- static char path[MAXPATHLEN];
+ static char path[MAXPATHLEN]; /* mdi_pathmap_mutex protects */
char *path_persistent;
int path_instance;
mod_hash_val_t hv;
@@ -2927,7 +2963,7 @@ i_mdi_pi_alloc(mdi_phci_t *ph, char *paddr, mdi_client_t *ct)
*/
mutex_enter(&mdi_pathmap_mutex);
(void) ddi_pathname(ph->ph_dip, path);
- (void) sprintf(path + strlen(path), "/%s@%s",
+ (void) sprintf(path + strlen(path), "/%s@%s",
mdi_pi_get_node_name(pip), mdi_pi_get_addr(pip));
if (mod_hash_find(mdi_pathmap_bypath, (mod_hash_key_t)path, &hv) == 0) {
path_instance = (uint_t)(intptr_t)hv;
@@ -2941,6 +2977,15 @@ i_mdi_pi_alloc(mdi_phci_t *ph, char *paddr, mdi_client_t *ct)
(void) mod_hash_insert(mdi_pathmap_byinstance,
(mod_hash_key_t)(intptr_t)path_instance,
(mod_hash_val_t)path_persistent);
+
+ /* create shortpath name */
+ (void) snprintf(path, sizeof(path), "%s%d/%s@%s",
+ ddi_driver_name(ph->ph_dip), ddi_get_instance(ph->ph_dip),
+ mdi_pi_get_node_name(pip), mdi_pi_get_addr(pip));
+ path_persistent = i_ddi_strdup(path, KM_SLEEP);
+ (void) mod_hash_insert(mdi_pathmap_sbyinstance,
+ (mod_hash_key_t)(intptr_t)path_instance,
+ (mod_hash_val_t)path_persistent);
}
mutex_exit(&mdi_pathmap_mutex);
MDI_PI(pip)->pi_path_instance = path_instance;
@@ -3000,6 +3045,29 @@ mdi_pi_pathname_by_instance(int path_instance)
}
/*
+ * mdi_pi_spathname_by_instance():
+ * Lookup of "shortpath" by 'path_instance'. Return "shortpath".
+ * NOTE: returned "shortpath" remains valid forever (until reboot).
+ */
+char *
+mdi_pi_spathname_by_instance(int path_instance)
+{
+ char *path;
+ mod_hash_val_t hv;
+
+ /* mdi_pathmap lookup of "path" by 'path_instance' */
+ mutex_enter(&mdi_pathmap_mutex);
+ if (mod_hash_find(mdi_pathmap_sbyinstance,
+ (mod_hash_key_t)(intptr_t)path_instance, &hv) == 0)
+ path = (char *)hv;
+ else
+ path = NULL;
+ mutex_exit(&mdi_pathmap_mutex);
+ return (path);
+}
+
+
+/*
* i_mdi_phci_add_path():
* Add a mdi_pathinfo node to pHCI list.
* Notes:
@@ -3068,8 +3136,9 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
/*
* Invalid pHCI device, return failure
*/
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_free: invalid pHCI pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, NULL,
+ "!invalid pHCI: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
MDI_PI_UNLOCK(pip);
return (MDI_FAILURE);
}
@@ -3078,8 +3147,9 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
ASSERT(vh != NULL);
if (vh == NULL) {
/* Invalid pHCI device, return failure */
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_free: invalid vHCI pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ph->ph_dip,
+ "!invalid vHCI: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
MDI_PI_UNLOCK(pip);
return (MDI_FAILURE);
}
@@ -3090,8 +3160,9 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
/*
* Invalid Client device, return failure
*/
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_free: invalid client pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ph->ph_dip,
+ "!invalid client: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
MDI_PI_UNLOCK(pip);
return (MDI_FAILURE);
}
@@ -3106,8 +3177,8 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
/*
* Node is busy
*/
- MDI_DEBUG(1, (CE_WARN, ct->ct_dip,
- "!mdi_pi_free: pathinfo node is busy pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
+ "!busy: pip %s %p", mdi_pi_spathname(pip), (void *)pip));
MDI_PI_UNLOCK(pip);
return (MDI_BUSY);
}
@@ -3116,9 +3187,10 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
/*
* Give a chance for pending I/Os to complete.
*/
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip, "!mdi_pi_free: "
- "%d cmds still pending on path: %p\n",
- MDI_PI(pip)->pi_ref_cnt, (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!%d cmds still pending on path: %s %p",
+ MDI_PI(pip)->pi_ref_cnt,
+ mdi_pi_spathname(pip), (void *)pip));
if (cv_timedwait(&MDI_PI(pip)->pi_ref_cv,
&MDI_PI(pip)->pi_mutex,
ddi_get_lbolt() + drv_usectohz(60 * 1000000)) == -1) {
@@ -3126,14 +3198,13 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
* The timeout time reached without ref_cnt being zero
* being signaled.
*/
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip,
- "!mdi_pi_free: "
- "Timeout reached on path %p without the cond\n",
- (void *)pip));
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip,
- "!mdi_pi_free: "
- "%d cmds still pending on path: %p\n",
- MDI_PI(pip)->pi_ref_cnt, (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!Timeout reached on path %s %p without the cond",
+ mdi_pi_spathname(pip), (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!%d cmds still pending on path %s %p",
+ MDI_PI(pip)->pi_ref_cnt,
+ mdi_pi_spathname(pip), (void *)pip));
MDI_PI_UNLOCK(pip);
return (MDI_BUSY);
}
@@ -3172,7 +3243,7 @@ mdi_pi_free(mdi_pathinfo_t *pip, int flags)
*/
if (rv == MDI_SUCCESS) {
if (client_held) {
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "mdi_pi_free "
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
"i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, 1);
}
@@ -3356,8 +3427,9 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* Invalid pHCI device, fail the request
*/
MDI_PI_UNLOCK(pip);
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_state_change: invalid phci pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, NULL,
+ "!invalid phci: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
return (MDI_FAILURE);
}
@@ -3368,8 +3440,9 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* Invalid vHCI device, fail the request
*/
MDI_PI_UNLOCK(pip);
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_state_change: invalid vhci pip=%p", (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ph->ph_dip,
+ "!invalid vhci: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
return (MDI_FAILURE);
}
@@ -3380,9 +3453,9 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* Invalid client device, fail the request
*/
MDI_PI_UNLOCK(pip);
- MDI_DEBUG(1, (CE_WARN, NULL,
- "!mdi_pi_state_change: invalid client pip=%p",
- (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ph->ph_dip,
+ "!invalid client: pip %s %p",
+ mdi_pi_spathname(pip), (void *)pip));
return (MDI_FAILURE);
}
@@ -3397,9 +3470,10 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
if (f != NULL) {
rv = (*f)(vh->vh_dip, pip, 0);
if (rv != MDI_SUCCESS) {
- MDI_DEBUG(1, (CE_WARN, ct->ct_dip,
- "!vo_pi_init: failed vHCI=0x%p, pip=0x%p",
- (void *)vh, (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
+ "!vo_pi_init failed: vHCI %p, pip %s %p",
+ (void *)vh, mdi_pi_spathname(pip),
+ (void *)pip));
return (MDI_FAILURE);
}
}
@@ -3413,9 +3487,8 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
*/
i_mdi_phci_lock(ph, pip);
if (MDI_PHCI_IS_READY(ph) == 0) {
- MDI_DEBUG(1, (CE_WARN, ct->ct_dip,
- "!mdi_pi_state_change: pHCI not ready, pHCI=%p",
- (void *)ph));
+ MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
+ "!pHCI not ready, pHCI=%p", (void *)ph));
MDI_PI_UNLOCK(pip);
i_mdi_phci_unlock(ph);
return (MDI_BUSY);
@@ -3474,15 +3547,18 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* ndi_devi_offline() cannot hold pip or ct locks.
*/
MDI_PI_UNLOCK(pip);
+
/*
- * Don't offline the client dev_info node unless we have
- * no available paths left at all.
+ * If this is a user initiated path online->offline operation
+ * who's success would transition a client from DEGRADED to
+ * FAILED then only proceed if we can offline the client first.
*/
cdip = ct->ct_dip;
- if ((flag & NDI_DEVI_REMOVE) &&
- (ct->ct_path_count == 1)) {
+ if ((flag & NDI_USER_REQ) &&
+ MDI_PI_IS_ONLINE(pip) &&
+ (MDI_CLIENT_STATE(ct) == MDI_CLIENT_STATE_DEGRADED)) {
i_mdi_client_unlock(ct);
- rv = ndi_devi_offline(cdip, 0);
+ rv = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN);
if (rv != NDI_SUCCESS) {
/*
* Convert to MDI error code
@@ -3521,8 +3597,8 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
MDI_CLIENT_SET_DEV_NOT_SUPPORTED(ct);
}
if (rv != MDI_SUCCESS) {
- MDI_DEBUG(2, (CE_WARN, ct->ct_dip,
- "!vo_pi_state_change: failed rv = %x", rv));
+ MDI_DEBUG(2, (MDI_WARN, ct->ct_dip,
+ "vo_pi_state_change failed: rv %x", rv));
}
if (MDI_PI_IS_TRANSIENT(pip)) {
if (rv == MDI_SUCCESS) {
@@ -3576,9 +3652,9 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* Reset client flags to
* offline.
*/
- MDI_DEBUG(1, (CE_WARN, cdip,
- "!ndi_devi_online: failed "
- " Error: %x", rv));
+ MDI_DEBUG(1, (MDI_WARN, cdip,
+ "!ndi_devi_online failed "
+ "error %x", rv));
MDI_CLIENT_SET_OFFLINE(ct);
}
if (rv != NDI_SUCCESS) {
@@ -3596,11 +3672,12 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* This is the last path case for
* non-user initiated events.
*/
- if (((flag & NDI_DEVI_REMOVE) == 0) &&
+ if (((flag & NDI_USER_REQ) == 0) &&
cdip && (i_ddi_node_state(cdip) >=
DS_INITIALIZED)) {
MDI_CLIENT_UNLOCK(ct);
- rv = ndi_devi_offline(cdip, 0);
+ rv = ndi_devi_offline(cdip,
+ NDI_DEVFS_CLEAN);
MDI_CLIENT_LOCK(ct);
if (rv != NDI_SUCCESS) {
@@ -3610,9 +3687,9 @@ i_mdi_pi_state_change(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state, int flag)
* online as the path could not
* be offlined.
*/
- MDI_DEBUG(1, (CE_WARN, cdip,
- "!ndi_devi_offline: failed "
- " Error: %x", rv));
+ MDI_DEBUG(1, (MDI_WARN, cdip,
+ "!ndi_devi_offline failed: "
+ "error %x", rv));
MDI_CLIENT_SET_ONLINE(ct);
}
}
@@ -3663,8 +3740,6 @@ mdi_pi_online(mdi_pathinfo_t *pip, int flags)
mdi_client_t *ct = MDI_PI(pip)->pi_client;
int client_held = 0;
int rv;
- int se_flag;
- int kmem_flag;
ASSERT(ct != NULL);
rv = i_mdi_pi_state_change(pip, MDI_PATHINFO_STATE_ONLINE, flags);
@@ -3673,8 +3748,8 @@ mdi_pi_online(mdi_pathinfo_t *pip, int flags)
MDI_PI_LOCK(pip);
if (MDI_PI(pip)->pi_pm_held == 0) {
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "mdi_pi_online "
- "i_mdi_pm_hold_pip %p\n", (void *)pip));
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "i_mdi_pm_hold_pip %p", (void *)pip));
i_mdi_pm_hold_pip(pip);
client_held = 1;
}
@@ -3686,19 +3761,12 @@ mdi_pi_online(mdi_pathinfo_t *pip, int flags)
rv = i_mdi_power_all_phci(ct);
}
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "mdi_pi_online "
- "i_mdi_pm_hold_client %p\n", (void *)ct));
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "i_mdi_pm_hold_client %p", (void *)ct));
i_mdi_pm_hold_client(ct, 1);
MDI_CLIENT_UNLOCK(ct);
}
- /* determine interrupt context */
- se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
- kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
-
- /* A new path is online. Invalidate DINFOCACHE snap shot. */
- i_ddi_di_cache_invalidate(kmem_flag);
-
return (rv);
}
@@ -3741,8 +3809,16 @@ mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
{
int ret, client_held = 0;
mdi_client_t *ct;
- int se_flag;
- int kmem_flag;
+
+ /*
+ * Original code overloaded NDI_DEVI_REMOVE to this interface, and
+ * used it to mean "user initiated operation" (i.e. devctl). Callers
+ * should now just use NDI_USER_REQ.
+ */
+ if (flags & NDI_DEVI_REMOVE) {
+ flags &= ~NDI_DEVI_REMOVE;
+ flags |= NDI_USER_REQ;
+ }
ret = i_mdi_pi_state_change(pip, MDI_PATHINFO_STATE_OFFLINE, flags);
@@ -3756,18 +3832,11 @@ mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
if (client_held) {
ct = MDI_PI(pip)->pi_client;
MDI_CLIENT_LOCK(ct);
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip,
- "mdi_pi_offline i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, 1);
MDI_CLIENT_UNLOCK(ct);
}
-
- /* determine interrupt context */
- se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
- kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
-
- /* pathinfo is offlined. update DINFOCACHE. */
- i_ddi_di_cache_invalidate(kmem_flag);
}
return (ret);
@@ -3794,9 +3863,10 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
/*
* Give a chance for pending I/Os to complete.
*/
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip, "!i_mdi_pi_offline: "
- "%d cmds still pending on path: %p\n",
- MDI_PI(pip)->pi_ref_cnt, (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!%d cmds still pending on path %s %p",
+ MDI_PI(pip)->pi_ref_cnt, mdi_pi_spathname(pip),
+ (void *)pip));
if (cv_timedwait(&MDI_PI(pip)->pi_ref_cv,
&MDI_PI(pip)->pi_mutex,
ddi_get_lbolt() + drv_usectohz(60 * 1000000)) == -1) {
@@ -3804,12 +3874,13 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
* The timeout time reached without ref_cnt being zero
* being signaled.
*/
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip, "!i_mdi_pi_offline: "
- "Timeout reached on path %p without the cond\n",
- (void *)pip));
- MDI_DEBUG(1, (CE_NOTE, ct->ct_dip, "!i_mdi_pi_offline: "
- "%d cmds still pending on path: %p\n",
- MDI_PI(pip)->pi_ref_cnt, (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!Timeout reached on path %s %p without the cond",
+ mdi_pi_spathname(pip), (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
+ "!%d cmds still pending on path %s %p",
+ MDI_PI(pip)->pi_ref_cnt,
+ mdi_pi_spathname(pip), (void *)pip));
}
}
vh = ct->ct_vhci;
@@ -3825,9 +3896,10 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
MDI_PI_UNLOCK(pip);
if ((rv = (*f)(vdip, pip, MDI_PATHINFO_STATE_OFFLINE, 0,
flags)) != MDI_SUCCESS) {
- MDI_DEBUG(1, (CE_WARN, ct->ct_dip,
- "!vo_path_offline failed "
- "vdip %p, pip %p", (void *)vdip, (void *)pip));
+ MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
+ "!vo_path_offline failed: vdip %s%d %p: path %s %p",
+ ddi_driver_name(vdip), ddi_get_instance(vdip),
+ (void *)vdip, mdi_pi_spathname(pip), (void *)pip));
}
MDI_PI_LOCK(pip);
}
@@ -3856,7 +3928,8 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
(i_ddi_node_state(cdip) >=
DS_INITIALIZED)) {
MDI_CLIENT_UNLOCK(ct);
- rv = ndi_devi_offline(cdip, 0);
+ rv = ndi_devi_offline(cdip,
+ NDI_DEVFS_CLEAN);
MDI_CLIENT_LOCK(ct);
if (rv != NDI_SUCCESS) {
/*
@@ -3864,9 +3937,9 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
* Reset client flags to
* online.
*/
- MDI_DEBUG(4, (CE_WARN, cdip,
- "!ndi_devi_offline: failed "
- " Error: %x", rv));
+ MDI_DEBUG(4, (MDI_WARN, cdip,
+ "ndi_devi_offline failed: "
+ "error %x", rv));
MDI_CLIENT_SET_ONLINE(ct);
}
}
@@ -3895,8 +3968,8 @@ i_mdi_pi_offline(mdi_pathinfo_t *pip, int flags)
/*
* Change in the mdi_pathinfo node state will impact the client state
*/
- MDI_DEBUG(2, (CE_NOTE, NULL, "!i_mdi_pi_offline ct = %p pip = %p",
- (void *)ct, (void *)pip));
+ MDI_DEBUG(2, (MDI_NOTE, ct->ct_dip,
+ "ct = %p pip = %p", (void *)ct, (void *)pip));
return (rv);
}
@@ -3966,6 +4039,25 @@ mdi_pi_pathname(mdi_pathinfo_t *pip)
return (mdi_pi_pathname_by_instance(mdi_pi_get_path_instance(pip)));
}
+/*
+ * mdi_pi_spathname():
+ * Return pointer to shortpath to pathinfo node. Used for debug
+ * messages, so return "" instead of NULL when unknown.
+ */
+char *
+mdi_pi_spathname(mdi_pathinfo_t *pip)
+{
+ char *spath = "";
+
+ if (pip) {
+ spath = mdi_pi_spathname_by_instance(
+ mdi_pi_get_path_instance(pip));
+ if (spath == NULL)
+ spath = "";
+ }
+ return (spath);
+}
+
char *
mdi_pi_pathname_obp(mdi_pathinfo_t *pip, char *path)
{
@@ -4007,7 +4099,7 @@ mdi_pi_pathname_obp_set(mdi_pathinfo_t *pip, char *component)
(void) strncat(obp_path, component, MAXPATHLEN);
}
rc = mdi_prop_update_string(pip, "obp-path", obp_path);
-
+
if (obp_path)
kmem_free(obp_path, MAXPATHLEN);
return (rc);
@@ -4040,8 +4132,12 @@ dev_info_t *
mdi_pi_get_phci(mdi_pathinfo_t *pip)
{
dev_info_t *dip = NULL;
+ mdi_phci_t *ph;
+
if (pip) {
- dip = MDI_PI(pip)->pi_phci->ph_dip;
+ ph = MDI_PI(pip)->pi_phci;
+ if (ph)
+ dip = ph->ph_dip;
}
return (dip);
}
@@ -4082,6 +4178,7 @@ caddr_t
mdi_pi_get_phci_private(mdi_pathinfo_t *pip)
{
caddr_t pprivate = NULL;
+
if (pip) {
pprivate = MDI_PI(pip)->pi_pprivate;
}
@@ -4125,6 +4222,16 @@ mdi_pi_get_state(mdi_pathinfo_t *pip)
}
/*
+ * mdi_pi_get_flags():
+ * Get the mdi_pathinfo node flags.
+ */
+uint_t
+mdi_pi_get_flags(mdi_pathinfo_t *pip)
+{
+ return (pip ? MDI_PI(pip)->pi_flags : 0);
+}
+
+/*
* Note that the following function needs to be the new interface for
* mdi_pi_get_state when mpxio gets integrated to ON.
*/
@@ -4188,6 +4295,9 @@ mdi_pi_set_state(mdi_pathinfo_t *pip, mdi_pathinfo_state_t state)
ext_state = MDI_PI(pip)->pi_state & MDI_PATHINFO_EXT_STATE_MASK;
MDI_PI(pip)->pi_state = state;
MDI_PI(pip)->pi_state |= ext_state;
+
+ /* Path has changed state, invalidate DINFOCACHE snap shot. */
+ i_ddi_di_cache_invalidate();
}
}
@@ -4258,7 +4368,7 @@ mdi_prop_remove(mdi_pathinfo_t *pip, char *name)
while (nvp) {
nvpair_t *next;
next = nvlist_next_nvpair(MDI_PI(pip)->pi_prop, nvp);
- (void) snprintf(nvp_name, MAXNAMELEN, "%s",
+ (void) snprintf(nvp_name, sizeof(nvp_name), "%s",
nvpair_name(nvp));
(void) nvlist_remove_all(MDI_PI(pip)->pi_prop,
nvp_name);
@@ -4632,19 +4742,21 @@ mdi_prop_free(void *data)
static void
i_mdi_report_path_state(mdi_client_t *ct, mdi_pathinfo_t *pip)
{
- char *phci_path, *ct_path;
+ char *ct_path;
char *ct_status;
char *status;
- dev_info_t *dip = ct->ct_dip;
+ dev_info_t *cdip = ct->ct_dip;
char lb_buf[64];
+ int report_lb_c = 0, report_lb_p = 0;
ASSERT(MDI_CLIENT_LOCKED(ct));
- if ((dip == NULL) || (ddi_get_instance(dip) == -1) ||
+ if ((cdip == NULL) || (ddi_get_instance(cdip) == -1) ||
(MDI_CLIENT_IS_REPORT_DEV_NEEDED(ct) == 0)) {
return;
}
if (MDI_CLIENT_STATE(ct) == MDI_CLIENT_STATE_OPTIMAL) {
ct_status = "optimal";
+ report_lb_c = 1;
} else if (MDI_CLIENT_STATE(ct) == MDI_CLIENT_STATE_DEGRADED) {
ct_status = "degraded";
} else if (MDI_CLIENT_STATE(ct) == MDI_CLIENT_STATE_FAILED) {
@@ -4653,10 +4765,15 @@ i_mdi_report_path_state(mdi_client_t *ct, mdi_pathinfo_t *pip)
ct_status = "unknown";
}
- if (MDI_PI_IS_OFFLINE(pip)) {
+ lb_buf[0] = 0; /* not interested in load balancing config */
+
+ if (MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip)) {
+ status = "removed";
+ } else if (MDI_PI_IS_OFFLINE(pip)) {
status = "offline";
} else if (MDI_PI_IS_ONLINE(pip)) {
status = "online";
+ report_lb_p = 1;
} else if (MDI_PI_IS_STANDBY(pip)) {
status = "standby";
} else if (MDI_PI_IS_FAULT(pip)) {
@@ -4665,31 +4782,44 @@ i_mdi_report_path_state(mdi_client_t *ct, mdi_pathinfo_t *pip)
status = "unknown";
}
- if (ct->ct_lb == LOAD_BALANCE_LBA) {
- (void) snprintf(lb_buf, sizeof (lb_buf),
- "%s, region-size: %d", mdi_load_balance_lba,
- ct->ct_lb_args->region_size);
- } else if (ct->ct_lb == LOAD_BALANCE_NONE) {
- (void) snprintf(lb_buf, sizeof (lb_buf),
- "%s", mdi_load_balance_none);
- } else {
- (void) snprintf(lb_buf, sizeof (lb_buf), "%s",
- mdi_load_balance_rr);
- }
-
- if (dip) {
+ if (cdip) {
ct_path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- phci_path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- cmn_err(CE_CONT, "?%s (%s%d) multipath status: %s, "
- "path %s (%s%d) to target address: %s is %s"
- " Load balancing: %s\n",
- ddi_pathname(dip, ct_path), ddi_driver_name(dip),
- ddi_get_instance(dip), ct_status,
- ddi_pathname(MDI_PI(pip)->pi_phci->ph_dip, phci_path),
- ddi_driver_name(MDI_PI(pip)->pi_phci->ph_dip),
- ddi_get_instance(MDI_PI(pip)->pi_phci->ph_dip),
- MDI_PI(pip)->pi_addr, status, lb_buf);
- kmem_free(phci_path, MAXPATHLEN);
+
+ /*
+ * NOTE: Keeping "multipath status: %s" and
+ * "Load balancing: %s" format unchanged in case someone
+ * scrubs /var/adm/messages looking for these messages.
+ */
+ if (report_lb_c && report_lb_p) {
+ if (ct->ct_lb == LOAD_BALANCE_LBA) {
+ (void) snprintf(lb_buf, sizeof (lb_buf),
+ "%s, region-size: %d", mdi_load_balance_lba,
+ ct->ct_lb_args->region_size);
+ } else if (ct->ct_lb == LOAD_BALANCE_NONE) {
+ (void) snprintf(lb_buf, sizeof (lb_buf),
+ "%s", mdi_load_balance_none);
+ } else {
+ (void) snprintf(lb_buf, sizeof (lb_buf), "%s",
+ mdi_load_balance_rr);
+ }
+
+ cmn_err(mdi_debug_consoleonly ? CE_NOTE : CE_CONT,
+ "?%s (%s%d) multipath status: %s: "
+ "path %d %s is %s: Load balancing: %s\n",
+ ddi_pathname(cdip, ct_path), ddi_driver_name(cdip),
+ ddi_get_instance(cdip), ct_status,
+ mdi_pi_get_path_instance(pip),
+ mdi_pi_spathname(pip), status, lb_buf);
+ } else {
+ cmn_err(mdi_debug_consoleonly ? CE_NOTE : CE_CONT,
+ "?%s (%s%d) multipath status: %s: "
+ "path %d %s is %s\n",
+ ddi_pathname(cdip, ct_path), ddi_driver_name(cdip),
+ ddi_get_instance(cdip), ct_status,
+ mdi_pi_get_path_instance(pip),
+ mdi_pi_spathname(pip), status);
+ }
+
kmem_free(ct_path, MAXPATHLEN);
MDI_CLIENT_CLEAR_REPORT_DEV_NEEDED(ct);
}
@@ -4700,13 +4830,20 @@ i_mdi_report_path_state(mdi_client_t *ct, mdi_pathinfo_t *pip)
* i_mdi_log():
* Utility function for error message management
*
+ * NOTE: Implementation takes care of trailing \n for cmn_err,
+ * MDI_DEBUG should not terminate fmt strings with \n.
+ *
+ * NOTE: If the level is >= 2, and there is no leading !?^
+ * then a leading ! is implied (but can be overriden via
+ * mdi_debug_consoleonly). If you are using kmdb on the console,
+ * consider setting mdi_debug_consoleonly to 1 as an aid.
*/
-/*PRINTFLIKE3*/
+/*PRINTFLIKE4*/
static void
-i_mdi_log(int level, dev_info_t *dip, const char *fmt, ...)
+i_mdi_log(int level, const char *func, dev_info_t *dip, const char *fmt, ...)
{
char name[MAXNAMELEN];
- char buf[MAXNAMELEN];
+ char buf[512];
char *bp;
va_list ap;
int log_only = 0;
@@ -4714,14 +4851,14 @@ i_mdi_log(int level, dev_info_t *dip, const char *fmt, ...)
int console_only = 0;
if (dip) {
- (void) snprintf(name, MAXNAMELEN, "%s%d: ",
+ (void) snprintf(name, sizeof(name), "%s%d: ",
ddi_driver_name(dip), ddi_get_instance(dip));
} else {
name[0] = 0;
}
va_start(ap, fmt);
- (void) vsnprintf(buf, MAXNAMELEN, fmt, ap);
+ (void) vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
switch (buf[0]) {
@@ -4738,6 +4875,8 @@ i_mdi_log(int level, dev_info_t *dip, const char *fmt, ...)
console_only = 1;
break;
default:
+ if (level >= 2)
+ log_only = 1; /* ! implied */
bp = buf;
break;
}
@@ -4746,22 +4885,41 @@ i_mdi_log(int level, dev_info_t *dip, const char *fmt, ...)
boot_only = 0;
console_only = 0;
}
+ if (mdi_debug_consoleonly) {
+ log_only = 0;
+ boot_only = 0;
+ console_only = 1;
+ level = CE_NOTE;
+ goto console;
+ }
switch (level) {
case CE_NOTE:
level = CE_CONT;
/* FALLTHROUGH */
case CE_CONT:
+ if (boot_only) {
+ cmn_err(level, "?mdi: %s%s: %s\n", name, func, bp);
+ } else if (console_only) {
+ cmn_err(level, "^mdi: %s%s: %s\n", name, func, bp);
+ } else if (log_only) {
+ cmn_err(level, "!mdi: %s%s: %s\n", name, func, bp);
+ } else {
+ cmn_err(level, "mdi: %s%s: %s\n", name, func, bp);
+ }
+ break;
+
case CE_WARN:
case CE_PANIC:
+ console:
if (boot_only) {
- cmn_err(level, "?mdi: %s%s", name, bp);
+ cmn_err(level, "?mdi: %s%s: %s", name, func, bp);
} else if (console_only) {
- cmn_err(level, "^mdi: %s%s", name, bp);
+ cmn_err(level, "^mdi: %s%s: %s", name, func, bp);
} else if (log_only) {
- cmn_err(level, "!mdi: %s%s", name, bp);
+ cmn_err(level, "!mdi: %s%s: %s", name, func, bp);
} else {
- cmn_err(level, "mdi: %s%s", name, bp);
+ cmn_err(level, "mdi: %s%s: %s", name, func, bp);
}
break;
default:
@@ -4791,8 +4949,8 @@ i_mdi_client_online(dev_info_t *ct_dip)
if (ct->ct_power_cnt == 0)
(void) i_mdi_power_all_phci(ct);
- MDI_DEBUG(4, (CE_NOTE, ct_dip, "i_mdi_client_online "
- "i_mdi_pm_hold_client %p\n", (void *)ct));
+ MDI_DEBUG(4, (MDI_NOTE, ct_dip,
+ "i_mdi_pm_hold_client %p", (void *)ct));
i_mdi_pm_hold_client(ct, 1);
MDI_CLIENT_UNLOCK(ct);
@@ -4887,8 +5045,8 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
* critical services.
*/
ph = i_devi_get_phci(dip);
- MDI_DEBUG(2, (CE_NOTE, dip, "!mdi_phci_offline called %p %p\n",
- (void *)dip, (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "called %p %p", (void *)dip, (void *)ph));
if (ph == NULL) {
return (rv);
}
@@ -4896,8 +5054,8 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
MDI_PHCI_LOCK(ph);
if (MDI_PHCI_IS_OFFLINE(ph)) {
- MDI_DEBUG(1, (CE_WARN, dip, "!pHCI %p already offlined",
- (void *)ph));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!pHCI already offlined: %p", (void *)dip));
MDI_PHCI_UNLOCK(ph);
return (NDI_SUCCESS);
}
@@ -4906,10 +5064,10 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
* Check to see if the pHCI can be offlined
*/
if (ph->ph_unstable) {
- MDI_DEBUG(1, (CE_WARN, dip,
- "!One or more target devices are in transient "
- "state. This device can not be removed at "
- "this moment. Please try again later."));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!One or more target devices are in transient state. "
+ "This device can not be removed at this moment. "
+ "Please try again later."));
MDI_PHCI_UNLOCK(ph);
return (NDI_BUSY);
}
@@ -4930,11 +5088,10 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
/*
* Failover is in progress, Fail the DR
*/
- MDI_DEBUG(1, (CE_WARN, dip,
- "!pHCI device (%s%d) is Busy. %s",
- ddi_driver_name(dip), ddi_get_instance(dip),
- "This device can not be removed at "
- "this moment. Please try again later."));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!pHCI device is busy. "
+ "This device can not be removed at this moment. "
+ "Please try again later."));
MDI_PI_UNLOCK(pip);
i_mdi_client_unlock(ct);
MDI_PHCI_UNLOCK(ph);
@@ -4952,7 +5109,8 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
MDI_CLIENT_STATE_FAILED)) {
i_mdi_client_unlock(ct);
MDI_PHCI_UNLOCK(ph);
- if (ndi_devi_offline(cdip, 0) != NDI_SUCCESS) {
+ if (ndi_devi_offline(cdip,
+ NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
/*
* ndi_devi_offline() failed.
* This pHCI provides the critical path
@@ -4960,11 +5118,10 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
* Return busy.
*/
MDI_PHCI_LOCK(ph);
- MDI_DEBUG(1, (CE_WARN, dip,
- "!pHCI device (%s%d) is Busy. %s",
- ddi_driver_name(dip), ddi_get_instance(dip),
- "This device can not be removed at "
- "this moment. Please try again later."));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!pHCI device is busy. "
+ "This device can not be removed at this "
+ "moment. Please try again later."));
failed_pip = pip;
break;
} else {
@@ -5004,7 +5161,8 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
MDI_PI_UNLOCK(pip);
i_mdi_client_unlock(ct);
MDI_PHCI_UNLOCK(ph);
- (void) ndi_devi_offline(cdip, 0);
+ (void) ndi_devi_offline(cdip,
+ NDI_DEVFS_CLEAN);
MDI_PHCI_LOCK(ph);
pip = next;
continue;
@@ -5039,7 +5197,7 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
/*
* Give a chance for any pending commands to execute
*/
- delay(1);
+ delay_random(5);
MDI_PHCI_LOCK(ph);
pip = ph->ph_path_head;
while (pip != NULL) {
@@ -5048,11 +5206,10 @@ i_mdi_phci_offline(dev_info_t *dip, uint_t flags)
MDI_PI_LOCK(pip);
ct = MDI_PI(pip)->pi_client;
if (!MDI_PI_IS_OFFLINE(pip)) {
- MDI_DEBUG(1, (CE_WARN, dip,
- "!pHCI device (%s%d) is Busy. %s",
- ddi_driver_name(dip), ddi_get_instance(dip),
- "This device can not be removed at "
- "this moment. Please try again later."));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!pHCI device is busy. "
+ "This device can not be removed at this moment. "
+ "Please try again later."));
MDI_PI_UNLOCK(pip);
MDI_PHCI_SET_ONLINE(ph);
MDI_PHCI_UNLOCK(ph);
@@ -5166,7 +5323,7 @@ mdi_phci_retire_notify(dev_info_t *dip, int *constraint)
if ((MDI_CLIENT_IS_FAILOVER_IN_PROGRESS(ct)) ||
(ct->ct_unstable)) {
/*
- * Failover is in progress, can't check for constraints
+ * Failover is in progress, can't check for constraints
*/
MDI_PI_UNLOCK(pip);
i_mdi_client_unlock(ct);
@@ -5201,7 +5358,7 @@ mdi_phci_retire_notify(dev_info_t *dip, int *constraint)
}
/*
- * offline the path(s) hanging off the PHCI. If the
+ * offline the path(s) hanging off the pHCI. If the
* last path to any client, check that constraints
* have been applied.
*/
@@ -5291,8 +5448,9 @@ mdi_phci_retire_finalize(dev_info_t *dip, int phci_only)
* Cannot offline pip(s)
*/
if (unstable) {
- cmn_err(CE_WARN, "PHCI in transient state, cannot "
- "retire, dip = %p", (void *)dip);
+ cmn_err(CE_WARN, "%s%d: mdi_phci_retire_finalize: "
+ "pHCI in transient state, cannot retire",
+ ddi_driver_name(dip), ddi_get_instance(dip));
MDI_PHCI_UNLOCK(ph);
return;
}
@@ -5317,7 +5475,7 @@ mdi_phci_retire_finalize(dev_info_t *dip, int phci_only)
/*
* Give a chance for any pending commands to execute
*/
- delay(1);
+ delay_random(5);
MDI_PHCI_LOCK(ph);
pip = ph->ph_path_head;
while (pip != NULL) {
@@ -5326,8 +5484,10 @@ mdi_phci_retire_finalize(dev_info_t *dip, int phci_only)
MDI_PI_LOCK(pip);
ct = MDI_PI(pip)->pi_client;
if (!MDI_PI_IS_OFFLINE(pip)) {
- cmn_err(CE_WARN, "PHCI busy, cannot offline path: "
- "PHCI dip = %p", (void *)dip);
+ cmn_err(CE_WARN, "mdi_phci_retire_finalize: "
+ "path %d %s busy, cannot offline",
+ mdi_pi_get_path_instance(pip),
+ mdi_pi_spathname(pip));
MDI_PI_UNLOCK(pip);
MDI_PHCI_SET_ONLINE(ph);
MDI_PHCI_UNLOCK(ph);
@@ -5365,8 +5525,8 @@ i_mdi_client_offline(dev_info_t *dip, uint_t flags)
* accordingly
*/
ct = i_devi_get_client(dip);
- MDI_DEBUG(2, (CE_NOTE, dip, "!i_mdi_client_offline called %p %p\n",
- (void *)dip, (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "called %p %p", (void *)dip, (void *)ct));
if (ct != NULL) {
MDI_CLIENT_LOCK(ct);
if (ct->ct_unstable) {
@@ -5374,10 +5534,10 @@ i_mdi_client_offline(dev_info_t *dip, uint_t flags)
* One or more paths are in transient state,
* Dont allow offline of a client device
*/
- MDI_DEBUG(1, (CE_WARN, dip,
- "!One or more paths to this device is "
- "in transient state. This device can not "
- "be removed at this moment. "
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!One or more paths to "
+ "this device are in transient state. "
+ "This device can not be removed at this moment. "
"Please try again later."));
MDI_CLIENT_UNLOCK(ct);
return (NDI_BUSY);
@@ -5387,11 +5547,10 @@ i_mdi_client_offline(dev_info_t *dip, uint_t flags)
* Failover is in progress, Dont allow DR of
* a client device
*/
- MDI_DEBUG(1, (CE_WARN, dip,
- "!Client device (%s%d) is Busy. %s",
- ddi_driver_name(dip), ddi_get_instance(dip),
- "This device can not be removed at "
- "this moment. Please try again later."));
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!Client device is Busy. "
+ "This device can not be removed at this moment. "
+ "Please try again later."));
MDI_CLIENT_UNLOCK(ct);
return (NDI_BUSY);
}
@@ -5443,26 +5602,26 @@ mdi_post_attach(dev_info_t *dip, ddi_attach_cmd_t cmd, int error)
MDI_PHCI_LOCK(ph);
switch (cmd) {
case DDI_ATTACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI post_attach: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "phci post_attach called %p", (void *)ph));
if (error == DDI_SUCCESS) {
MDI_PHCI_SET_ATTACH(ph);
} else {
- MDI_DEBUG(1, (CE_NOTE, dip,
- "!pHCI post_attach: failed error=%d\n",
+ MDI_DEBUG(1, (MDI_NOTE, dip,
+ "!pHCI post_attach failed: error %d",
error));
MDI_PHCI_SET_DETACH(ph);
}
break;
case DDI_RESUME:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI post_resume: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "pHCI post_resume: called %p", (void *)ph));
if (error == DDI_SUCCESS) {
MDI_PHCI_SET_RESUME(ph);
} else {
- MDI_DEBUG(1, (CE_NOTE, dip,
- "!pHCI post_resume: failed error=%d\n",
+ MDI_DEBUG(1, (MDI_NOTE, dip,
+ "!pHCI post_resume failed: error %d",
error));
MDI_PHCI_SET_SUSPEND(ph);
}
@@ -5478,15 +5637,15 @@ mdi_post_attach(dev_info_t *dip, ddi_attach_cmd_t cmd, int error)
MDI_CLIENT_LOCK(ct);
switch (cmd) {
case DDI_ATTACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client post_attach: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "client post_attach called %p", (void *)ct));
if (error != DDI_SUCCESS) {
- MDI_DEBUG(1, (CE_NOTE, dip,
- "!Client post_attach: failed error=%d\n",
+ MDI_DEBUG(1, (MDI_NOTE, dip,
+ "!client post_attach failed: error %d",
error));
MDI_CLIENT_SET_DETACH(ct);
- MDI_DEBUG(4, (CE_WARN, dip,
- "mdi_post_attach i_mdi_pm_reset_client\n"));
+ MDI_DEBUG(4, (MDI_WARN, dip,
+ "i_mdi_pm_reset_client"));
i_mdi_pm_reset_client(ct);
break;
}
@@ -5503,13 +5662,13 @@ mdi_post_attach(dev_info_t *dip, ddi_attach_cmd_t cmd, int error)
break;
case DDI_RESUME:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client post_attach: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "client post_attach: called %p", (void *)ct));
if (error == DDI_SUCCESS) {
MDI_CLIENT_SET_RESUME(ct);
} else {
- MDI_DEBUG(1, (CE_NOTE, dip,
- "!Client post_resume: failed error=%d\n",
+ MDI_DEBUG(1, (MDI_NOTE, dip,
+ "!client post_resume failed: error %d",
error));
MDI_CLIENT_SET_SUSPEND(ct);
}
@@ -5559,17 +5718,16 @@ i_mdi_phci_pre_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
MDI_PHCI_LOCK(ph);
switch (cmd) {
case DDI_DETACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI pre_detach: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "pHCI pre_detach: called %p", (void *)ph));
if (!MDI_PHCI_IS_OFFLINE(ph)) {
/*
* mdi_pathinfo nodes are still attached to
* this pHCI. Fail the detach for this pHCI.
*/
- MDI_DEBUG(2, (CE_WARN, dip,
- "!pHCI pre_detach: "
- "mdi_pathinfo nodes are still attached "
- "%p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_WARN, dip,
+ "pHCI pre_detach: paths are still attached %p",
+ (void *)ph));
rv = DDI_FAILURE;
break;
}
@@ -5584,8 +5742,8 @@ i_mdi_phci_pre_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
* client devices before pHCI can be suspended.
*/
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI pre_suspend: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "pHCI pre_suspend: called %p", (void *)ph));
/*
* Suspend all the client devices accessible through this pHCI
*/
@@ -5608,8 +5766,8 @@ i_mdi_phci_pre_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
* Suspend of one of the client
* device has failed.
*/
- MDI_DEBUG(1, (CE_WARN, dip,
- "!Suspend of device (%s%d) failed.",
+ MDI_DEBUG(1, (MDI_WARN, dip,
+ "!suspend of device (%s%d) failed.",
ddi_driver_name(cdip),
ddi_get_instance(cdip)));
failed_pip = pip;
@@ -5676,14 +5834,16 @@ i_mdi_client_pre_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
MDI_CLIENT_LOCK(ct);
switch (cmd) {
case DDI_DETACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client pre_detach: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "client pre_detach: called %p",
+ (void *)ct));
MDI_CLIENT_SET_DETACH(ct);
break;
case DDI_SUSPEND:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client pre_suspend: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "client pre_suspend: called %p",
+ (void *)ct));
MDI_CLIENT_SET_SUSPEND(ct);
break;
@@ -5736,15 +5896,17 @@ i_mdi_phci_post_detach(dev_info_t *dip, ddi_detach_cmd_t cmd, int error)
*/
switch (cmd) {
case DDI_DETACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI post_detach: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "pHCI post_detach: called %p",
+ (void *)ph));
if (error != DDI_SUCCESS)
MDI_PHCI_SET_ATTACH(ph);
break;
case DDI_SUSPEND:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!pHCI post_suspend: called %p\n", (void *)ph));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "pHCI post_suspend: called %p",
+ (void *)ph));
if (error != DDI_SUCCESS)
MDI_PHCI_SET_RESUME(ph);
break;
@@ -5769,14 +5931,14 @@ i_mdi_client_post_detach(dev_info_t *dip, ddi_detach_cmd_t cmd, int error)
*/
switch (cmd) {
case DDI_DETACH:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client post_detach: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "client post_detach: called %p", (void *)ct));
if (DEVI_IS_ATTACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, dip, "i_mdi_client_post_detach "
+ MDI_DEBUG(4, (MDI_NOTE, dip,
"i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, ct->ct_path_count);
} else {
- MDI_DEBUG(4, (CE_NOTE, dip, "i_mdi_client_post_detach "
+ MDI_DEBUG(4, (MDI_NOTE, dip,
"i_mdi_pm_reset_client\n"));
i_mdi_pm_reset_client(ct);
}
@@ -5785,8 +5947,8 @@ i_mdi_client_post_detach(dev_info_t *dip, ddi_detach_cmd_t cmd, int error)
break;
case DDI_SUSPEND:
- MDI_DEBUG(2, (CE_NOTE, dip,
- "!Client post_suspend: called %p\n", (void *)ct));
+ MDI_DEBUG(2, (MDI_NOTE, dip,
+ "called %p", (void *)ct));
if (error != DDI_SUCCESS)
MDI_CLIENT_SET_RESUME(ct);
break;
@@ -5926,18 +6088,19 @@ mdi_pi_enable_path(mdi_pathinfo_t *pip, int flags)
{
mdi_phci_t *ph;
- ph = i_devi_get_phci(mdi_pi_get_phci(pip));
+ ph = MDI_PI(pip)->pi_phci;
if (ph == NULL) {
- MDI_DEBUG(1, (CE_NOTE, NULL, "!mdi_pi_enable_path:"
- " failed. pip: %p ph = NULL\n", (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, mdi_pi_get_phci(pip),
+ "!failed: path %s %p: NULL ph",
+ mdi_pi_spathname(pip), (void *)pip));
return (MDI_FAILURE);
}
(void) i_mdi_enable_disable_path(pip, ph->ph_vhci, flags,
MDI_ENABLE_OP);
- MDI_DEBUG(5, (CE_NOTE, NULL, "!mdi_pi_enable_path:"
- " Returning success pip = %p. ph = %p\n",
- (void *)pip, (void *)ph));
+ MDI_DEBUG(5, (MDI_NOTE, ph->ph_dip,
+ "!returning success pip = %p. ph = %p",
+ (void *)pip, (void *)ph));
return (MDI_SUCCESS);
}
@@ -5952,18 +6115,19 @@ mdi_pi_disable_path(mdi_pathinfo_t *pip, int flags)
{
mdi_phci_t *ph;
- ph = i_devi_get_phci(mdi_pi_get_phci(pip));
+ ph = MDI_PI(pip)->pi_phci;
if (ph == NULL) {
- MDI_DEBUG(1, (CE_NOTE, NULL, "!mdi_pi_disable_path:"
- " failed. pip: %p ph = NULL\n", (void *)pip));
+ MDI_DEBUG(1, (MDI_NOTE, mdi_pi_get_phci(pip),
+ "!failed: path %s %p: NULL ph",
+ mdi_pi_spathname(pip), (void *)pip));
return (MDI_FAILURE);
}
(void) i_mdi_enable_disable_path(pip,
- ph->ph_vhci, flags, MDI_DISABLE_OP);
- MDI_DEBUG(5, (CE_NOTE, NULL, "!mdi_pi_disable_path:"
- "Returning success pip = %p. ph = %p",
- (void *)pip, (void *)ph));
+ ph->ph_vhci, flags, MDI_DISABLE_OP);
+ MDI_DEBUG(5, (MDI_NOTE, ph->ph_dip,
+ "!returning success pip = %p. ph = %p",
+ (void *)pip, (void *)ph));
return (MDI_SUCCESS);
}
@@ -6035,8 +6199,8 @@ i_mdi_enable_disable_path(mdi_pathinfo_t *pip, mdi_vhci_t *vh, int flags,
MDI_EXT_STATE_CHANGE | sync_flag |
op | MDI_BEFORE_STATE_CHANGE);
if (rv != MDI_SUCCESS) {
- MDI_DEBUG(2, (CE_WARN, vh->vh_dip,
- "!vo_pi_state_change: failed rv = %x", rv));
+ MDI_DEBUG(2, (MDI_WARN, vh->vh_dip,
+ "vo_pi_state_change: failed rv = %x", rv));
}
}
MDI_PI_LOCK(pip);
@@ -6076,8 +6240,8 @@ i_mdi_enable_disable_path(mdi_pathinfo_t *pip, mdi_vhci_t *vh, int flags,
MDI_EXT_STATE_CHANGE | sync_flag |
op | MDI_AFTER_STATE_CHANGE);
if (rv != MDI_SUCCESS) {
- MDI_DEBUG(2, (CE_WARN, vh->vh_dip,
- "!vo_pi_state_change: failed rv = %x", rv));
+ MDI_DEBUG(2, (MDI_WARN, vh->vh_dip,
+ "vo_pi_state_change failed: rv = %x", rv));
}
}
return (next);
@@ -6099,18 +6263,18 @@ i_mdi_pi_enable_disable(dev_info_t *cdip, dev_info_t *pdip, int flags, int op)
int found_it;
ph = i_devi_get_phci(pdip);
- MDI_DEBUG(5, (CE_NOTE, NULL, "!i_mdi_pi_enable_disable: "
- "Op = %d pdip = %p cdip = %p\n", op, (void *)pdip,
- (void *)cdip));
+ MDI_DEBUG(5, (MDI_NOTE, cdip ? cdip : pdip,
+ "!op = %d pdip = %p cdip = %p", op, (void *)pdip,
+ (void *)cdip));
if (ph == NULL) {
- MDI_DEBUG(1, (CE_NOTE, NULL, "!i_mdi_pi_enable_disable:"
- "Op %d failed. ph = NULL\n", op));
+ MDI_DEBUG(1, (MDI_NOTE, cdip ? cdip : pdip,
+ "!failed: operation %d: NULL ph", op));
return (MDI_FAILURE);
}
if ((op != MDI_ENABLE_OP) && (op != MDI_DISABLE_OP)) {
- MDI_DEBUG(1, (CE_NOTE, NULL, "!i_mdi_pi_enable_disable: "
- "Op Invalid operation = %d\n", op));
+ MDI_DEBUG(1, (MDI_NOTE, cdip ? cdip : pdip,
+ "!failed: invalid operation %d", op));
return (MDI_FAILURE);
}
@@ -6120,8 +6284,8 @@ i_mdi_pi_enable_disable(dev_info_t *cdip, dev_info_t *pdip, int flags, int op)
/*
* Need to mark the Phci as enabled/disabled.
*/
- MDI_DEBUG(3, (CE_NOTE, NULL, "!i_mdi_pi_enable_disable: "
- "Op %d for the phci\n", op));
+ MDI_DEBUG(4, (MDI_NOTE, cdip ? cdip : pdip,
+ "op %d for the phci", op));
MDI_PHCI_LOCK(ph);
switch (flags) {
case USER_DISABLE:
@@ -6147,9 +6311,8 @@ i_mdi_pi_enable_disable(dev_info_t *cdip, dev_info_t *pdip, int flags, int op)
break;
default:
MDI_PHCI_UNLOCK(ph);
- MDI_DEBUG(1, (CE_NOTE, NULL,
- "!i_mdi_pi_enable_disable:"
- " Invalid flag argument= %d\n", flags));
+ MDI_DEBUG(1, (MDI_NOTE, cdip ? cdip : pdip,
+ "!invalid flag argument= %d", flags));
}
/*
@@ -6168,9 +6331,8 @@ i_mdi_pi_enable_disable(dev_info_t *cdip, dev_info_t *pdip, int flags, int op)
*/
ct = i_devi_get_client(cdip);
if (ct == NULL) {
- MDI_DEBUG(1, (CE_NOTE, NULL,
- "!i_mdi_pi_enable_disable:"
- " failed. ct = NULL operation = %d\n", op));
+ MDI_DEBUG(1, (MDI_NOTE, cdip ? cdip : pdip,
+ "!failed: operation = %d: NULL ct", op));
return (MDI_FAILURE);
}
@@ -6192,18 +6354,17 @@ i_mdi_pi_enable_disable(dev_info_t *cdip, dev_info_t *pdip, int flags, int op)
MDI_CLIENT_UNLOCK(ct);
if (found_it == 0) {
- MDI_DEBUG(1, (CE_NOTE, NULL,
- "!i_mdi_pi_enable_disable:"
- " failed. Could not find corresponding pip\n"));
+ MDI_DEBUG(1, (MDI_NOTE, cdip ? cdip : pdip,
+ "!failed. Could not find corresponding pip\n"));
return (MDI_FAILURE);
}
(void) i_mdi_enable_disable_path(pip, vh, flags, op);
}
- MDI_DEBUG(5, (CE_NOTE, NULL, "!i_mdi_pi_enable_disable: "
- "Op %d Returning success pdip = %p cdip = %p\n",
- op, (void *)pdip, (void *)cdip));
+ MDI_DEBUG(5, (MDI_NOTE, cdip ? cdip : pdip,
+ "!op %d returning success pdip = %p cdip = %p",
+ op, (void *)pdip, (void *)cdip));
return (MDI_SUCCESS);
}
@@ -6223,19 +6384,17 @@ i_mdi_pm_hold_pip(mdi_pathinfo_t *pip)
}
ph_dip = mdi_pi_get_phci(pip);
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "i_mdi_pm_hold_pip for %s%d %p\n",
- ddi_driver_name(ph_dip), ddi_get_instance(ph_dip), (void *)pip));
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "%s %p", mdi_pi_spathname(pip), (void *)pip));
if (ph_dip == NULL) {
return;
}
MDI_PI_UNLOCK(pip);
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "kidsupcnt was %d\n",
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip, "kidsupcnt was %d",
DEVI(ph_dip)->devi_pm_kidsupcnt));
-
pm_hold_power(ph_dip);
-
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "kidsupcnt is %d\n",
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip, "kidsupcnt is %d",
DEVI(ph_dip)->devi_pm_kidsupcnt));
MDI_PI_LOCK(pip);
@@ -6262,17 +6421,17 @@ i_mdi_pm_rele_pip(mdi_pathinfo_t *pip)
ph_dip = mdi_pi_get_phci(pip);
ASSERT(ph_dip != NULL);
- MDI_PI_UNLOCK(pip);
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "i_mdi_pm_rele_pip for %s%d %p\n",
- ddi_driver_name(ph_dip), ddi_get_instance(ph_dip), (void *)pip));
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "%s %p", mdi_pi_spathname(pip), (void *)pip));
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "kidsupcnt was %d\n",
- DEVI(ph_dip)->devi_pm_kidsupcnt));
+ MDI_PI_UNLOCK(pip);
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "kidsupcnt was %d", DEVI(ph_dip)->devi_pm_kidsupcnt));
pm_rele_power(ph_dip);
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "kidsupcnt is %d\n",
- DEVI(ph_dip)->devi_pm_kidsupcnt));
-
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "kidsupcnt is %d", DEVI(ph_dip)->devi_pm_kidsupcnt));
MDI_PI_LOCK(pip);
+
MDI_PI(pip)->pi_pm_held = 0;
}
@@ -6282,9 +6441,9 @@ i_mdi_pm_hold_client(mdi_client_t *ct, int incr)
ASSERT(MDI_CLIENT_LOCKED(ct));
ct->ct_power_cnt += incr;
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "i_mdi_pm_hold_client %p "
- "ct_power_cnt = %d incr = %d\n", (void *)ct,
- ct->ct_power_cnt, incr));
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "%p ct_power_cnt = %d incr = %d",
+ (void *)ct, ct->ct_power_cnt, incr));
ASSERT(ct->ct_power_cnt >= 0);
}
@@ -6312,8 +6471,8 @@ i_mdi_pm_rele_client(mdi_client_t *ct, int decr)
if (i_ddi_devi_attached(ct->ct_dip)) {
ct->ct_power_cnt -= decr;
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "i_mdi_pm_rele_client %p "
- "ct_power_cnt = %d decr = %d\n",
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "%p ct_power_cnt = %d decr = %d",
(void *)ct, ct->ct_power_cnt, decr));
}
@@ -6327,8 +6486,8 @@ i_mdi_pm_rele_client(mdi_client_t *ct, int decr)
static void
i_mdi_pm_reset_client(mdi_client_t *ct)
{
- MDI_DEBUG(4, (CE_NOTE, ct->ct_dip, "i_mdi_pm_reset_client %p "
- "ct_power_cnt = %d\n", (void *)ct, ct->ct_power_cnt));
+ MDI_DEBUG(4, (MDI_NOTE, ct->ct_dip,
+ "%p ct_power_cnt = %d", (void *)ct, ct->ct_power_cnt));
ASSERT(MDI_CLIENT_LOCKED(ct));
ct->ct_power_cnt = 0;
i_mdi_rele_all_phci(ct);
@@ -6350,15 +6509,15 @@ i_mdi_power_one_phci(mdi_pathinfo_t *pip)
MDI_PI_UNLOCK(pip);
/* bring all components of phci to full power */
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "i_mdi_power_one_phci "
- "pm_powerup for %s%d %p\n", ddi_driver_name(ph_dip),
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "pm_powerup for %s%d %p", ddi_driver_name(ph_dip),
ddi_get_instance(ph_dip), (void *)pip));
ret = pm_powerup(ph_dip);
if (ret == DDI_FAILURE) {
- MDI_DEBUG(4, (CE_NOTE, ph_dip, "i_mdi_power_one_phci "
- "pm_powerup FAILED for %s%d %p\n",
+ MDI_DEBUG(4, (MDI_NOTE, ph_dip,
+ "pm_powerup FAILED for %s%d %p",
ddi_driver_name(ph_dip), ddi_get_instance(ph_dip),
(void *)pip));
@@ -6456,10 +6615,10 @@ mdi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
MDI_CLIENT_LOCK(ct);
switch (op) {
case BUS_POWER_PRE_NOTIFICATION:
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip, "mdi_bus_power "
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
"BUS_POWER_PRE_NOTIFICATION:"
- "%s@%s, olevel=%d, nlevel=%d, comp=%d\n",
- PM_NAME(bpc->bpc_dip), PM_ADDR(bpc->bpc_dip),
+ "%s@%s, olevel=%d, nlevel=%d, comp=%d",
+ ddi_node_name(bpc->bpc_dip), PM_ADDR(bpc->bpc_dip),
bpc->bpc_olevel, bpc->bpc_nlevel, bpc->bpc_comp));
/* serialize power level change per client */
@@ -6480,17 +6639,17 @@ mdi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
*/
if (bpc->bpc_nlevel > 0) {
if (!DEVI_IS_ATTACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip,
- "mdi_bus_power i_mdi_pm_hold_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
+ "i_mdi_pm_hold_client\n"));
i_mdi_pm_hold_client(ct, ct->ct_path_count);
}
}
break;
case BUS_POWER_POST_NOTIFICATION:
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip, "mdi_bus_power "
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
"BUS_POWER_POST_NOTIFICATION:"
- "%s@%s, olevel=%d, nlevel=%d, comp=%d result=%d\n",
- PM_NAME(bpc->bpc_dip), PM_ADDR(bpc->bpc_dip),
+ "%s@%s, olevel=%d, nlevel=%d, comp=%d result=%d",
+ ddi_node_name(bpc->bpc_dip), PM_ADDR(bpc->bpc_dip),
bpc->bpc_olevel, bpc->bpc_nlevel, bpc->bpc_comp,
*(int *)result));
@@ -6505,21 +6664,21 @@ mdi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
/* release the hold we did in pre-notification */
if (bpc->bpc_nlevel > 0 && (*(int *)result != DDI_SUCCESS) &&
!DEVI_IS_ATTACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip,
- "mdi_bus_power i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
+ "i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, ct->ct_path_count);
}
if (bpc->bpc_nlevel == 0 && (*(int *)result == DDI_SUCCESS)) {
/* another thread might started attaching */
if (DEVI_IS_ATTACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip,
- "mdi_bus_power i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
+ "i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, ct->ct_path_count);
/* detaching has been taken care in pm_post_unconfig */
} else if (!DEVI_IS_DETACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, bpc->bpc_dip,
- "mdi_bus_power i_mdi_pm_reset_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bpc->bpc_dip,
+ "i_mdi_pm_reset_client\n"));
i_mdi_pm_reset_client(ct);
}
}
@@ -6531,10 +6690,10 @@ mdi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
/* need to do more */
case BUS_POWER_HAS_CHANGED:
- MDI_DEBUG(4, (CE_NOTE, bphc->bphc_dip, "mdi_bus_power "
+ MDI_DEBUG(4, (MDI_NOTE, bphc->bphc_dip,
"BUS_POWER_HAS_CHANGED:"
- "%s@%s, olevel=%d, nlevel=%d, comp=%d\n",
- PM_NAME(bphc->bphc_dip), PM_ADDR(bphc->bphc_dip),
+ "%s@%s, olevel=%d, nlevel=%d, comp=%d",
+ ddi_node_name(bphc->bphc_dip), PM_ADDR(bphc->bphc_dip),
bphc->bphc_olevel, bphc->bphc_nlevel, bphc->bphc_comp));
if (bphc->bphc_nlevel > 0 &&
@@ -6542,14 +6701,14 @@ mdi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
if (ct->ct_power_cnt == 0) {
ret = i_mdi_power_all_phci(ct);
}
- MDI_DEBUG(4, (CE_NOTE, bphc->bphc_dip,
- "mdi_bus_power i_mdi_pm_hold_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bphc->bphc_dip,
+ "i_mdi_pm_hold_client\n"));
i_mdi_pm_hold_client(ct, ct->ct_path_count);
}
if (bphc->bphc_nlevel == 0 && bphc->bphc_olevel != -1) {
- MDI_DEBUG(4, (CE_NOTE, bphc->bphc_dip,
- "mdi_bus_power i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, bphc->bphc_dip,
+ "i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, ct->ct_path_count);
}
break;
@@ -6575,23 +6734,20 @@ i_mdi_pm_pre_config_one(dev_info_t *child)
if (!MDI_CLIENT_IS_FAILED(ct)) {
MDI_CLIENT_UNLOCK(ct);
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_config_one already configured\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "already configured\n"));
return (MDI_SUCCESS);
}
if (ct->ct_powercnt_config) {
MDI_CLIENT_UNLOCK(ct);
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_config_one ALREADY held\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "already held\n"));
return (MDI_SUCCESS);
}
if (ct->ct_power_cnt == 0) {
ret = i_mdi_power_all_phci(ct);
}
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_config_one i_mdi_pm_hold_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_hold_client\n"));
i_mdi_pm_hold_client(ct, ct->ct_path_count);
ct->ct_powercnt_config = 1;
ct->ct_powercnt_reset = 0;
@@ -6644,23 +6800,20 @@ i_mdi_pm_pre_unconfig_one(dev_info_t *child, int *held, int flags)
cv_wait(&ct->ct_powerchange_cv, &ct->ct_mutex);
if (!i_ddi_devi_attached(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_unconfig node detached already\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "node detached already\n"));
MDI_CLIENT_UNLOCK(ct);
return (MDI_SUCCESS);
}
if (MDI_CLIENT_IS_POWERED_DOWN(ct) &&
(flags & NDI_AUTODETACH)) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_unconfig auto-modunload\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "auto-modunload\n"));
MDI_CLIENT_UNLOCK(ct);
return (MDI_FAILURE);
}
if (ct->ct_powercnt_unconfig) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_unconfig ct_powercnt_held\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "ct_powercnt_held\n"));
MDI_CLIENT_UNLOCK(ct);
*held = 1;
return (MDI_SUCCESS);
@@ -6669,8 +6822,7 @@ i_mdi_pm_pre_unconfig_one(dev_info_t *child, int *held, int flags)
if (ct->ct_power_cnt == 0) {
ret = i_mdi_power_all_phci(ct);
}
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_pre_unconfig i_mdi_pm_hold_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_hold_client\n"));
i_mdi_pm_hold_client(ct, ct->ct_path_count);
ct->ct_powercnt_unconfig = 1;
ct->ct_powercnt_reset = 0;
@@ -6728,16 +6880,14 @@ i_mdi_pm_post_config_one(dev_info_t *child)
cv_wait(&ct->ct_powerchange_cv, &ct->ct_mutex);
if (ct->ct_powercnt_reset || !ct->ct_powercnt_config) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_config_one NOT configured\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "not configured\n"));
MDI_CLIENT_UNLOCK(ct);
return;
}
/* client has not been updated */
if (MDI_CLIENT_IS_FAILED(ct)) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_config_one NOT configured\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "client failed\n"));
MDI_CLIENT_UNLOCK(ct);
return;
}
@@ -6747,15 +6897,13 @@ i_mdi_pm_post_config_one(dev_info_t *child)
!DEVI_IS_ATTACHING(ct->ct_dip)) ||
(!i_ddi_devi_attached(ct->ct_dip) &&
!DEVI_IS_ATTACHING(ct->ct_dip))) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_config i_mdi_pm_reset_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_reset_client\n"));
i_mdi_pm_reset_client(ct);
} else {
mdi_pathinfo_t *pip, *next;
int valid_path_count = 0;
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_config i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_rele_client\n"));
pip = ct->ct_path_head;
while (pip != NULL) {
MDI_PI_LOCK(pip);
@@ -6812,8 +6960,7 @@ i_mdi_pm_post_unconfig_one(dev_info_t *child)
cv_wait(&ct->ct_powerchange_cv, &ct->ct_mutex);
if (!ct->ct_powercnt_unconfig || ct->ct_powercnt_reset) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_unconfig NOT held\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "not held\n"));
MDI_CLIENT_UNLOCK(ct);
return;
}
@@ -6823,15 +6970,13 @@ i_mdi_pm_post_unconfig_one(dev_info_t *child)
i_ddi_devi_attached(ct->ct_dip)) ||
(!i_ddi_devi_attached(ct->ct_dip) &&
!DEVI_IS_ATTACHING(ct->ct_dip))) {
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_unconfig i_mdi_pm_reset_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_reset_client\n"));
i_mdi_pm_reset_client(ct);
} else {
mdi_pathinfo_t *pip, *next;
int valid_path_count = 0;
- MDI_DEBUG(4, (CE_NOTE, child,
- "i_mdi_pm_post_unconfig i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, child, "i_mdi_pm_rele_client\n"));
pip = ct->ct_path_head;
while (pip != NULL) {
MDI_PI_LOCK(pip);
@@ -6857,8 +7002,7 @@ i_mdi_pm_post_unconfig(dev_info_t *vdip, dev_info_t *child, int held)
ASSERT(MDI_VHCI(vdip));
if (!held) {
- MDI_DEBUG(4, (CE_NOTE, vdip,
- "i_mdi_pm_post_unconfig held = %d\n", held));
+ MDI_DEBUG(4, (MDI_NOTE, vdip, "held = %d", held));
return;
}
@@ -6898,8 +7042,8 @@ mdi_power(dev_info_t *vdip, mdi_pm_op_t op, void *args, char *devnm, int flags)
client_dip = ndi_devi_findchild(vdip, devnm);
}
- MDI_DEBUG(4, (CE_NOTE, vdip, "mdi_power op = %d %s %p\n",
- op, devnm ? devnm : "NULL", (void *)client_dip));
+ MDI_DEBUG(4, (MDI_NOTE, vdip,
+ "op = %d %s %p", op, devnm ? devnm : "", (void *)client_dip));
switch (op) {
case MDI_PM_PRE_CONFIG:
@@ -6932,18 +7076,18 @@ mdi_power(dev_info_t *vdip, mdi_pm_op_t op, void *args, char *devnm, int flags)
if (op == MDI_PM_HOLD_POWER) {
if (ct->ct_power_cnt == 0) {
(void) i_mdi_power_all_phci(ct);
- MDI_DEBUG(4, (CE_NOTE, client_dip,
- "mdi_power i_mdi_pm_hold_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, client_dip,
+ "i_mdi_pm_hold_client\n"));
i_mdi_pm_hold_client(ct, ct->ct_path_count);
}
} else {
if (DEVI_IS_ATTACHING(ct->ct_dip)) {
- MDI_DEBUG(4, (CE_NOTE, client_dip,
- "mdi_power i_mdi_pm_rele_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, client_dip,
+ "i_mdi_pm_rele_client\n"));
i_mdi_pm_rele_client(ct, ct->ct_path_count);
} else {
- MDI_DEBUG(4, (CE_NOTE, client_dip,
- "mdi_power i_mdi_pm_reset_client\n"));
+ MDI_DEBUG(4, (MDI_NOTE, client_dip,
+ "i_mdi_pm_reset_client\n"));
i_mdi_pm_reset_client(ct);
}
}
@@ -7093,6 +7237,60 @@ mdi_phci_set_vhci_private(dev_info_t *dip, void *priv)
}
}
+int
+mdi_pi_ishidden(mdi_pathinfo_t *pip)
+{
+ return (MDI_PI_FLAGS_IS_HIDDEN(pip));
+}
+
+int
+mdi_pi_device_isremoved(mdi_pathinfo_t *pip)
+{
+ return (MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip));
+}
+
+/*
+ * When processing hotplug, if mdi_pi_offline-mdi_pi_free fails then this
+ * interface is used to represent device removal.
+ */
+int
+mdi_pi_device_remove(mdi_pathinfo_t *pip)
+{
+ MDI_PI_LOCK(pip);
+ if (mdi_pi_device_isremoved(pip)) {
+ MDI_PI_UNLOCK(pip);
+ return (0);
+ }
+ MDI_PI_FLAGS_SET_DEVICE_REMOVED(pip);
+ MDI_PI_FLAGS_SET_HIDDEN(pip);
+ MDI_PI_UNLOCK(pip);
+
+ i_ddi_di_cache_invalidate();
+
+ return (1);
+}
+
+/*
+ * When processing hotplug, if a path marked mdi_pi_device_isremoved()
+ * is now accessible then this interfaces is used to represent device insertion.
+ */
+int
+mdi_pi_device_insert(mdi_pathinfo_t *pip)
+{
+ MDI_PI_LOCK(pip);
+ if (!mdi_pi_device_isremoved(pip)) {
+ MDI_PI_UNLOCK(pip);
+ return (0);
+ }
+ MDI_PI_FLAGS_CLR_DEVICE_REMOVED(pip);
+ MDI_PI_FLAGS_CLR_HIDDEN(pip);
+ MDI_PI_UNLOCK(pip);
+
+ i_ddi_di_cache_invalidate();
+
+ return (1);
+}
+
/*
* List of vhci class names:
* A vhci class name must be in this list only if the corresponding vhci
@@ -7196,7 +7394,7 @@ setup_vhci_cache(mdi_vhci_t *vh)
vhcache->vhcache_flags |= MDI_VHCI_CACHE_SETUP_DONE;
else {
cmn_err(CE_WARN,
- "%s: data file corrupted, will recreate\n",
+ "%s: data file corrupted, will recreate",
vhc->vhc_vhcache_filename);
}
rw_exit(&vhcache->vhcache_lock);
@@ -7269,7 +7467,7 @@ stop_vhcache_async_threads(mdi_vhci_config_t *vhc)
while ((vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_THREAD) ||
vhc->vhc_acc_thrcount != 0) {
mutex_exit(&vhc->vhc_lock);
- delay(1);
+ delay_random(5);
mutex_enter(&vhc->vhc_lock);
}
@@ -7314,7 +7512,7 @@ stop_vhcache_flush_thread(void *arg, int code)
while (vhc->vhc_flags & MDI_VHC_VHCACHE_FLUSH_THREAD) {
mutex_exit(&vhc->vhc_lock);
- delay(1);
+ delay_random(5);
mutex_enter(&vhc->vhc_lock);
}
@@ -7542,9 +7740,9 @@ caddrmapnvl_to_vhcache(mdi_vhci_cache_t *vhcache, nvlist_t *nvl,
* ...
* where pi_addr1, pi_addr2, ... are bus specific addresses of pathinfo nodes
* (so called pi_addrs, for example: "w2100002037cd9f72,0");
- * phci-ids are integers that identify PHCIs to which the
+ * phci-ids are integers that identify pHCIs to which the
* the bus specific address belongs to. These integers are used as an index
- * into to the phcis string array in the main nvlist to get the PHCI path.
+ * into to the phcis string array in the main nvlist to get the pHCI path.
*/
static int
mainnvl_to_vhcache(mdi_vhci_cache_t *vhcache, nvlist_t *nvl)
@@ -8977,8 +9175,8 @@ mdi_vhci_bus_config(dev_info_t *vdip, uint_t flags, ddi_bus_config_op_t op,
* the vhci node.
*/
if (DEVI_BUSY_OWNED(vdip)) {
- MDI_DEBUG(2, (CE_NOTE, vdip, "!MDI: vhci bus config: "
- "vhci dip is busy owned %p\n", (void *)vdip));
+ MDI_DEBUG(2, (MDI_NOTE, vdip,
+ "vhci dip is busy owned %p", (void *)vdip));
goto default_bus_config;
}
@@ -9063,10 +9261,10 @@ read_on_disk_vhci_cache(char *vhci_class)
kmem_free(filename, strlen(filename) + 1);
return (nvl);
} else if (err == EIO)
- cmn_err(CE_WARN, "%s: I/O error, will recreate\n", filename);
+ cmn_err(CE_WARN, "%s: I/O error, will recreate", filename);
else if (err == EINVAL)
cmn_err(CE_WARN,
- "%s: data file corrupted, will recreate\n", filename);
+ "%s: data file corrupted, will recreate", filename);
kmem_free(filename, strlen(filename) + 1);
return (NULL);
@@ -9298,8 +9496,7 @@ error:
return;
alloc_failed:
- MDI_DEBUG(1, (CE_WARN, dip,
- "!i_mdi_log_sysevent: Unable to send sysevent"));
+ MDI_DEBUG(1, (MDI_WARN, dip, "!unable to send sysevent"));
}
char **
diff --git a/usr/src/uts/common/os/sunndi.c b/usr/src/uts/common/os/sunndi.c
index 71f54e5429..04a07fde29 100644
--- a/usr/src/uts/common/os/sunndi.c
+++ b/usr/src/uts/common/os/sunndi.c
@@ -2174,6 +2174,12 @@ ndi_dev_is_hidden_node(dev_info_t *dip)
return ((DEVI(dip)->devi_node_attributes & DDI_HIDDEN_NODE) != 0);
}
+int
+ndi_dev_is_hotplug_node(dev_info_t *dip)
+{
+ return ((DEVI(dip)->devi_node_attributes & DDI_HOTPLUG_NODE) != 0);
+}
+
void
ndi_devi_set_hidden(dev_info_t *dip)
{
@@ -2556,15 +2562,15 @@ ndi_flavorv_alloc(dev_info_t *self, int nflavors)
void
ndi_flavorv_set(dev_info_t *self, ndi_flavor_t child_flavor, void *v)
{
- ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
- DEVI(self)->devi_flavorv != NULL);
- if (child_flavor > DEVI(self)->devi_flavorv_n ||
- DEVI(self)->devi_flavorv == NULL) {
- return;
- }
if (child_flavor == NDI_FLAVOR_VANILLA) {
ddi_set_driver_private(self, v);
} else {
+ ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
+ DEVI(self)->devi_flavorv != NULL);
+ if (child_flavor > DEVI(self)->devi_flavorv_n ||
+ DEVI(self)->devi_flavorv == NULL) {
+ return;
+ }
DEVI(self)->devi_flavorv[child_flavor - 1] = v;
}
}
@@ -2572,15 +2578,15 @@ ndi_flavorv_set(dev_info_t *self, ndi_flavor_t child_flavor, void *v)
void *
ndi_flavorv_get(dev_info_t *self, ndi_flavor_t child_flavor)
{
- ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
- DEVI(self)->devi_flavorv != NULL);
- if (child_flavor > DEVI(self)->devi_flavorv_n ||
- DEVI(self)->devi_flavorv == NULL) {
- return (NULL);
- }
if (child_flavor == NDI_FLAVOR_VANILLA) {
return (ddi_get_driver_private(self));
} else {
+ ASSERT(child_flavor < DEVI(self)->devi_flavorv_n &&
+ DEVI(self)->devi_flavorv != NULL);
+ if (child_flavor > DEVI(self)->devi_flavorv_n ||
+ DEVI(self)->devi_flavorv == NULL) {
+ return (NULL);
+ }
return (DEVI(self)->devi_flavorv[child_flavor - 1]);
}
}
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index 29518c52ea..5300263225 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -142,6 +142,8 @@ CHKHDRS= \
cyclic_impl.h \
dacf.h \
dacf_impl.h \
+ damap.h \
+ damap_impl.h \
dc_ki.h \
ddi.h \
ddifm.h \
diff --git a/usr/src/uts/common/sys/autoconf.h b/usr/src/uts/common/sys/autoconf.h
index 5aa9351981..df743597b7 100644
--- a/usr/src/uts/common/sys/autoconf.h
+++ b/usr/src/uts/common/sys/autoconf.h
@@ -265,7 +265,7 @@ extern void i_ddi_forceattach_drivers(void);
extern int i_ddi_io_initialized(void);
extern dev_info_t *i_ddi_create_branch(dev_info_t *, int);
extern void i_ddi_add_devimap(dev_info_t *dip);
-extern void i_ddi_di_cache_invalidate(int kmflag);
+extern void i_ddi_di_cache_invalidate(void);
extern void i_ddi_di_cache_free(struct di_cache *cache);
/* devname_state - for /dev to denote reconfig and system available */
diff --git a/usr/src/uts/common/sys/bitset.h b/usr/src/uts/common/sys/bitset.h
index b14636373b..daf608e835 100644
--- a/usr/src/uts/common/sys/bitset.h
+++ b/usr/src/uts/common/sys/bitset.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -72,6 +72,20 @@ int bitset_in_set(bitset_t *, uint_t);
int bitset_is_null(bitset_t *);
uint_t bitset_find(bitset_t *);
+/*
+ * Bitset computations
+ */
+int bitset_and(bitset_t *, bitset_t *, bitset_t *);
+int bitset_or(bitset_t *, bitset_t *, bitset_t *);
+int bitset_xor(bitset_t *, bitset_t *, bitset_t *);
+
+/*
+ * Miscellaneous bitset operations
+ */
+void bitset_zero(bitset_t *);
+void bitset_copy(bitset_t *, bitset_t *);
+int bitset_match(bitset_t *, bitset_t *);
+
#endif /* !_KERNEL && !_KMEMUSER */
#ifdef __cplusplus
diff --git a/usr/src/uts/common/sys/damap.h b/usr/src/uts/common/sys/damap.h
new file mode 100644
index 0000000000..59777e8033
--- /dev/null
+++ b/usr/src/uts/common/sys/damap.h
@@ -0,0 +1,156 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_DAMAP_H
+#define _SYS_DAMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Delta (device) Address Map Interfaces
+ *
+ * These interfaces provide time-stablized sets of 'addresses',
+ * where addresses are string representations of device
+ * or bus-specific address. The mechanisms include interfaces to
+ * report and remove address from a map, time stabilization, callouts
+ * to higher-level configuration and unconfiguration actions, and
+ * address lookup functions.
+ *
+ * Per Address Reports
+ * With per-address reporting, the caller reports the addition and removal
+ * each address visible to it. Each report is independently time stabilized;
+ * Once a report has stabilized, the reported address is either
+ * activated & configured, or unconfigured & released.
+ *
+ * Full Set Reports
+ * When using fullset reporting, the report provider enumerates the entire
+ * set of addresses visible to the provider at a given point in time.
+ * The entire set is then stabilized.
+ * Upon stabilizing, any newly reported addresses are activated & configured
+ * and any previously active addresses which are no longer visible are
+ * automatically unconfigured and released, freeing the provider from
+ * the need to explicitly unconfigure addresses no longer present.
+ *
+ * Stabilization
+ * Once an address has been reported (or reported as removed), the report
+ * is time stabilized before the framework initiates a configuration
+ * or unconfiguration action. If the address is re-reported while undergoing
+ * stabilization, the timer is reset for either the address or the full
+ * set of addresses reported to the map.
+ *
+ * Activation/Release
+ * Once a reported address has passed its stabilization, the address is
+ * 'activated' by the framework. Once activated, the address is passed
+ * to a configuration callout to perform whatever actions are necessary.
+ * If a reported address is deleted or fails to stabilize, the address
+ * is released by the map.
+ * A report provider may register callback functions to be invoked
+ * as part of the address activation & release process. In addition to
+ * the callbacks, a provider can also supply a handle to provider-private
+ * data at the time an address is reported. This handle is returned to
+ * provider as an argument to the activation & release callbacks.
+ *
+ * Lookup/Access
+ * The set of stable addresses contained in a map can be obtained by
+ * calling interfaces to lookup either a single address or the full
+ * list of stable addresses.
+ */
+
+/*
+ * damap_t: Handle to a delta address map
+ * damap_id_t: Handle to an entry of damap_t
+ * damap_id_list_t: List of damap_id_handles
+ */
+typedef struct __damap_dm *damap_t;
+typedef struct __damap_id_list *damap_id_list_t;
+typedef id_t damap_id_t;
+
+#define NODAM (damap_id_t)0
+
+/*
+ * activate_cb: Provider callback when reported address is activated
+ * deactivate_cb: Provider callback when address has been released
+ *
+ * configure_cb: Class callout to configure newly activated addresses
+ * unconfig_cb: Class callout to unconfigure deactivated addresses
+ */
+typedef void (*damap_activate_cb_t)(void *, char *, int, void **);
+typedef void (*damap_deactivate_cb_t)(void *, char *, int, void *);
+
+typedef void (*damap_configure_cb_t)(void *, damap_t *, damap_id_list_t);
+typedef void (*damap_unconfig_cb_t)(void *, damap_t *, damap_id_list_t);
+
+/*
+ * Map reporting mode
+ */
+typedef enum {DAMAP_REPORT_PERADDR, DAMAP_REPORT_FULLSET} damap_rptmode_t;
+
+#define DAMAP_RESET 1 /* flag to damap_addrset_end */
+
+int damap_create(char *, size_t, damap_rptmode_t, clock_t,
+ void *, damap_activate_cb_t, damap_deactivate_cb_t,
+ void *, damap_configure_cb_t, damap_unconfig_cb_t,
+ damap_t **);
+void damap_destroy(damap_t *);
+
+char *damap_name(damap_t *);
+int damap_sync(damap_t *);
+
+int damap_addr_add(damap_t *, char *, damap_id_t *, nvlist_t *, void *);
+int damap_addr_del(damap_t *, char *);
+int damap_addrid_del(damap_t *, int);
+
+int damap_addrset_begin(damap_t *);
+int damap_addrset_add(damap_t *, char *, damap_id_t *, nvlist_t *, void *);
+int damap_addrset_end(damap_t *, int);
+int damap_addrset_reset(damap_t *, int);
+
+damap_id_t damap_id_next(damap_t *, damap_id_list_t, damap_id_t);
+char *damap_id2addr(damap_t *, damap_id_t);
+nvlist_t *damap_id2nvlist(damap_t *, damap_id_t);
+int damap_id_hold(damap_t *, damap_id_t);
+void damap_id_rele(damap_t *, damap_id_t);
+int damap_id_ref(damap_t *, damap_id_t);
+void damap_id_list_rele(damap_t *, damap_id_list_t);
+void *damap_id_priv_get(damap_t *, damap_id_t);
+void damap_id_priv_set(damap_t *, damap_id_t, void *);
+damap_id_t damap_lookup(damap_t *, char *);
+int damap_lookup_all(damap_t *, damap_id_list_t *);
+
+#define DAM_SUCCESS 0
+#define DAM_EEXIST 1
+#define DAM_MAPFULL 2
+#define DAM_EINVAL 3
+#define DAM_FAILURE 4
+#define DAM_SHAME 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DAMAP_H */
diff --git a/usr/src/uts/common/sys/damap_impl.h b/usr/src/uts/common/sys/damap_impl.h
new file mode 100644
index 0000000000..1cbdad3716
--- /dev/null
+++ b/usr/src/uts/common/sys/damap_impl.h
@@ -0,0 +1,181 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_DAMAP_IMPL_H
+#define _SYS_DAMAP_IMPL_H
+
+#include <sys/isa_defs.h>
+#include <sys/dditypes.h>
+#include <sys/time.h>
+#include <sys/cmn_err.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ddi_implfuncs.h>
+#include <sys/ddi_isa.h>
+#include <sys/model.h>
+#include <sys/devctl.h>
+#include <sys/nvpair.h>
+#include <sys/sysevent.h>
+#include <sys/bitset.h>
+#include <sys/sdt.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct dam dam_t;
+
+/*
+ * activate_cb: Provider callback when reported address is activated
+ * deactivate_cb: Provider callback when address has been released
+ *
+ * configure_cb: Class callout to configure newly activated addresses
+ * unconfig_cb: Class callout to unconfigure deactivated addresses
+ */
+typedef void (*activate_cb_t)(void *, char *addr, int idx, void **privp);
+typedef void (*deactivate_cb_t)(void *, char *addr, int idx, void *priv);
+
+typedef void (*configure_cb_t)(void *, dam_t *mapp, bitset_t *config_ids);
+typedef void (*unconfig_cb_t)(void *, dam_t *mapp, bitset_t *unconfig_ids);
+
+struct dam {
+ char *dam_name;
+ int dam_flags; /* state and locks */
+ int dam_options; /* options at map creation */
+ int dam_rptmode; /* report mode */
+ clock_t dam_stabletmo; /* stabilization (ticks) */
+ uint_t dam_size; /* max index for addr hash */
+ id_t dam_high; /* highest index allocated */
+ timeout_id_t dam_tid; /* timeout(9F) ID */
+
+ void *dam_activate_arg; /* activation private */
+ activate_cb_t dam_activate_cb; /* activation callback */
+ deactivate_cb_t dam_deactivate_cb; /* deactivation callback */
+
+ void *dam_config_arg; /* config-private */
+ configure_cb_t dam_configure_cb; /* configure callout */
+ unconfig_cb_t dam_unconfig_cb; /* unconfigure callout */
+
+ ddi_strid *dam_addr_hash; /* addresss to ID hash */
+ bitset_t dam_active_set; /* activated address set */
+ bitset_t dam_stable_set; /* stable address set */
+ bitset_t dam_report_set; /* reported address set */
+ void *dam_da; /* per-address soft state */
+ ddi_taskq_t *dam_taskqp; /* taskq for map activation */
+ hrtime_t dam_last_update; /* last map update */
+ hrtime_t dam_last_stable; /* last map stable */
+ int dam_stable_cnt; /* # of times map stabilized */
+ int dam_stable_overrun;
+ kcondvar_t dam_cv;
+ kmutex_t dam_lock;
+ kstat_t *dam_kstatsp;
+};
+
+/*
+ * damap.dam_flags
+ */
+#define ADDR_LOCK 0x1000 /* per-addr lock */
+#define MAP_LOCK 0x2000 /* global map lock */
+#define DAM_LOCK(m, l) { \
+ mutex_enter(&(m)->dam_lock); \
+ while ((m)->dam_flags & (l)) \
+ cv_wait(&(m)->dam_cv, &(m)->dam_lock); \
+ (m)->dam_flags |= (l); \
+ mutex_exit(&(m)->dam_lock); \
+}
+#define DAM_UNLOCK(m, l) { \
+ mutex_enter(&(m)->dam_lock); \
+ (m)->dam_flags &= ~(l); \
+ cv_signal(&(m)->dam_cv); \
+ mutex_exit(&(m)->dam_lock); \
+}
+#define DAM_ASSERT_LOCKED(m, l) ASSERT((m)->dam_flags & (l))
+
+#define DAM_SPEND 0x10 /* stable pending */
+#define DAM_DESTROYPEND 0x20 /* in process of being destroyed */
+#define DAM_SETADD 0x100 /* fullset update pending */
+#define DAM_FLAG_SET(m, f) { \
+ mutex_enter(&(m)->dam_lock); \
+ (m)->dam_flags |= f; \
+ mutex_exit(&(m)->dam_lock); \
+}
+#define DAM_FLAG_CLR(m, f) { \
+ mutex_enter(&(m)->dam_lock); \
+ (m)->dam_flags &= ~f; \
+ mutex_exit(&(m)->dam_lock); \
+}
+
+/*
+ * per address softstate stucture
+ */
+typedef struct {
+ uint_t da_flags; /* flags */
+ int da_jitter; /* address report count */
+ int da_ref; /* refcount on address */
+ void *da_ppriv; /* stable provider private */
+ void *da_cfg_priv; /* config/unconfig private */
+ nvlist_t *da_nvl; /* stable nvlist */
+ void *da_ppriv_rpt; /* reported provider-private */
+ nvlist_t *da_nvl_rpt; /* reported nvlist */
+ int64_t da_deadline; /* lbolt64 value when stable */
+ hrtime_t da_last_report; /* timestamp of last report */
+ int da_report_cnt; /* # of times address reported */
+ hrtime_t da_last_stable; /* timestamp of last stable address */
+ int da_stable_cnt; /* # of times address has stabilized */
+ char *da_addr; /* string in dam_addr_hash (for mdb) */
+} dam_da_t;
+
+/*
+ * dam_da_t.da_flags
+ */
+#define DA_INIT 0x1 /* address initizized */
+#define DA_ACTIVE 0x2 /* address stable */
+#define DA_RELE 0x4 /* adddress released */
+
+
+/*
+ * report type
+ */
+#define RPT_ADDR_ADD 0
+#define RPT_ADDR_DEL 1
+
+#define DAM_IN_REPORT(m, i) (bitset_in_set(&(m)->dam_report_set, (i)))
+#define DAM_IS_STABLE(m, i) (bitset_in_set(&(m)->dam_active_set, (i)))
+
+/*
+ * DAM statistics
+ */
+struct dam_kstats {
+ struct kstat_named dam_stable;
+ struct kstat_named dam_stable_blocked;
+ struct kstat_named dam_rereport;
+ struct kstat_named dam_numstable;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DAMAP_IMPL_H */
diff --git a/usr/src/uts/common/sys/ddi_impldefs.h b/usr/src/uts/common/sys/ddi_impldefs.h
index 7be6c896ad..5b5d103e08 100644
--- a/usr/src/uts/common/sys/ddi_impldefs.h
+++ b/usr/src/uts/common/sys/ddi_impldefs.h
@@ -40,6 +40,9 @@
#include <sys/ddidmareq.h>
#include <sys/ddi_intr.h>
#include <sys/ddi_isa.h>
+#include <sys/id_space.h>
+#include <sys/modhash.h>
+#include <sys/bitset.h>
#ifdef __cplusplus
extern "C" {
@@ -105,7 +108,7 @@ typedef struct devi_port {
} port;
uint64_t type64;
} info;
- void *priv_p;
+ void *priv_p;
} devi_port_t;
typedef struct devi_bus_priv {
@@ -145,7 +148,7 @@ struct dev_info {
struct dev_info *devi_next; /* Next instance of this device */
kmutex_t devi_lock; /* Protects per-devinfo data */
- /* logical parents for busop primitives */
+ /* logical parents for busop primitives */
struct dev_info *devi_bus_map_fault; /* bus_map_fault parent */
struct dev_info *devi_bus_dma_map; /* bus_dma_map parent */
@@ -395,12 +398,13 @@ struct dev_info {
#define DEVI_SET_DEVICE_REMOVED(dip) { \
ASSERT(mutex_owned(&DEVI(dip)->devi_lock)); \
- DEVI(dip)->devi_state |= DEVI_DEVICE_REMOVED; \
+ DEVI(dip)->devi_state |= DEVI_DEVICE_REMOVED | DEVI_S_REPORT; \
}
#define DEVI_SET_DEVICE_REINSERTED(dip) { \
ASSERT(mutex_owned(&DEVI(dip)->devi_lock)); \
DEVI(dip)->devi_state &= ~DEVI_DEVICE_REMOVED; \
+ DEVI(dip)->devi_state |= DEVI_S_REPORT; \
}
/* Bus state change macros */
@@ -691,11 +695,14 @@ struct ddi_minor {
*
* DDI_HIDDEN_NODE indicates that the node should not show up in snapshots
* or in /devices.
+ *
+ * DDI_HOTPLUG_NODE indicates that the node created by nexus hotplug.
*/
#define DDI_PERSISTENT 0x01
#define DDI_AUTO_ASSIGNED_NODEID 0x02
#define DDI_VHCI_NODE 0x04
#define DDI_HIDDEN_NODE 0x08
+#define DDI_HOTPLUG_NODE 0x10
#define DEVI_VHCI_NODE(dip) \
(DEVI(dip)->devi_node_attributes & DDI_VHCI_NODE)
@@ -741,16 +748,35 @@ struct ddi_parent_private_data {
#define sparc_pd_getintr(dev, n) (&DEVI_PD(dev)->par_intr[(n)])
#define sparc_pd_getrng(dev, n) (&DEVI_PD(dev)->par_rng[(n)])
+#ifdef _KERNEL
/*
- * This data structure is entirely private to the soft state allocator.
+ * This data structure is private to the indexed soft state allocator.
*/
-struct i_ddi_soft_state {
+typedef struct i_ddi_soft_state {
void **array; /* the array of pointers */
- kmutex_t lock; /* serialize access to this struct */
- size_t size; /* how many bytes per state struct */
+ kmutex_t lock; /* serialize access to this struct */
+ size_t size; /* how many bytes per state struct */
size_t n_items; /* how many structs herein */
struct i_ddi_soft_state *next; /* 'dirty' elements */
-};
+} i_ddi_soft_state;
+
+/*
+ * This data structure is private to the stringhashed soft state allocator.
+ */
+typedef struct i_ddi_soft_state_bystr {
+ size_t ss_size; /* how many bytes per state struct */
+ mod_hash_t *ss_mod_hash; /* hash implementation */
+} i_ddi_soft_state_bystr;
+
+/*
+ * This data structure is private to the ddi_strid_* implementation
+ */
+typedef struct i_ddi_strid {
+ id_space_t *strid_space;
+ mod_hash_t *strid_byid;
+ mod_hash_t *strid_bystr;
+} i_ddi_strid;
+#endif /* _KERNEL */
/*
* Solaris DDI DMA implementation structure and function definitions.
diff --git a/usr/src/uts/common/sys/devctl.h b/usr/src/uts/common/sys/devctl.h
index a201fded95..5dce50c72a 100644
--- a/usr/src/uts/common/sys/devctl.h
+++ b/usr/src/uts/common/sys/devctl.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -41,7 +41,7 @@ extern "C" {
* and nexus driver devctl IOCTL interface.
*
* Applications and nexus drivers may not access the contents of this
- * structure directly. Instead, drivers must use the ndi_dc_XXX(9n)
+ * structure directly. Instead, drivers must use the ndi_dc_*(9n)
* interfaces, while applications must use the interfaces provided by
* libdevice.so.1.
*/
@@ -230,6 +230,9 @@ typedef struct devctl_ap_state32 {
#define BUS_QUIESCED 0x20
#define BUS_SHUTDOWN 0x40
+#define DEVICE_STATES_ASCII "Dev_Online", "Dev_Busy", "Dev_Offline", \
+ "Dev_Down", "Bus_Active", "Bus_Quiesced", "Bus_Shutdown"
+
#define DC_DEVI_NODENAME "ndi_dc.devi_nodename"
#define DEVCTL_CONSTRUCT 0x1
diff --git a/usr/src/uts/common/sys/devinfo_impl.h b/usr/src/uts/common/sys/devinfo_impl.h
index e64f5fc601..4f874ea1e3 100644
--- a/usr/src/uts/common/sys/devinfo_impl.h
+++ b/usr/src/uts/common/sys/devinfo_impl.h
@@ -317,6 +317,7 @@ struct di_path {
uint_t path_snap_state; /* describes valid fields */
int path_instance; /* path instance */
uint64_t user_private_data;
+ uint_t path_flags; /* path flags */
};
/*
@@ -331,6 +332,11 @@ struct di_path {
#define DI_PATH_SNAP_LINKS 0x40 /* linkages have been postprocessed */
/*
+ * Flags for path_flags
+ */
+#define DI_PATH_FLAGS_DEVICE_REMOVED 0x01 /* peer of DI_DEVICE_REMOVED */
+
+/*
* path properties
*/
struct di_path_prop {
diff --git a/usr/src/uts/common/sys/id_space.h b/usr/src/uts/common/sys/id_space.h
index c6f7246658..1f7762316f 100644
--- a/usr/src/uts/common/sys/id_space.h
+++ b/usr/src/uts/common/sys/id_space.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _ID_SPACE_H
#define _ID_SPACE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -47,6 +44,8 @@ void id_space_destroy(id_space_t *);
void id_space_extend(id_space_t *, id_t, id_t);
id_t id_alloc(id_space_t *);
id_t id_alloc_nosleep(id_space_t *);
+id_t id_allocff(id_space_t *);
+id_t id_allocff_nosleep(id_space_t *);
void id_free(id_space_t *, id_t);
#endif /* _KERNEL */
diff --git a/usr/src/uts/common/sys/mdi_impldefs.h b/usr/src/uts/common/sys/mdi_impldefs.h
index 0f2b0dfe58..563c62af08 100644
--- a/usr/src/uts/common/sys/mdi_impldefs.h
+++ b/usr/src/uts/common/sys/mdi_impldefs.h
@@ -342,7 +342,7 @@ typedef struct mdi_phci {
struct mdi_phci *ph_next; /* next pHCI link */
struct mdi_phci *ph_prev; /* prev pHCI link */
dev_info_t *ph_dip; /* pHCI devi handle */
- struct mdi_vhci *ph_vhci; /* pHCI back ref. to vHCI */
+ struct mdi_vhci *ph_vhci; /* pHCI back ref. to vHCI */
/* protected by MDI_PHCI_LOCK ph_mutex... */
kmutex_t ph_mutex; /* per-pHCI mutex */
@@ -398,7 +398,7 @@ typedef struct mdi_phci {
#define MDI_PHCI_IS_READY(ph) \
(((ph)->ph_flags & MDI_PHCI_DISABLE_MASK) == 0)
-#define MDI_PHCI_SET_OFFLINE(ph) {\
+#define MDI_PHCI_SET_OFFLINE(ph) {\
ASSERT(MDI_PHCI_LOCKED(ph)); \
(ph)->ph_flags |= MDI_PHCI_FLAGS_OFFLINE; }
#define MDI_PHCI_SET_ONLINE(ph) {\
@@ -407,7 +407,7 @@ typedef struct mdi_phci {
#define MDI_PHCI_IS_OFFLINE(ph) \
((ph)->ph_flags & MDI_PHCI_FLAGS_OFFLINE)
-#define MDI_PHCI_SET_SUSPEND(ph) {\
+#define MDI_PHCI_SET_SUSPEND(ph) {\
ASSERT(MDI_PHCI_LOCKED(ph)); \
(ph)->ph_flags |= MDI_PHCI_FLAGS_SUSPEND; }
#define MDI_PHCI_SET_RESUME(ph) {\
@@ -501,7 +501,7 @@ typedef struct mdi_phci {
* OPTIMAL - Client device has at least one redundant path.
* DEGRADED - No redundant paths (critical). Failure in the current active
* path would result in data access failures.
- * FAILED - No paths are available to access this device.
+ * FAILED - No paths are available to access this device.
*
* Locking order:
*
@@ -517,7 +517,7 @@ typedef struct mdi_client {
char *ct_drvname; /* client driver name */
char *ct_guid; /* client guid */
client_lb_t ct_lb; /* load balancing scheme */
- client_lb_args_t *ct_lb_args; /* load balancing args */
+ client_lb_args_t *ct_lb_args; /* load balancing args */
/* protected by MDI_CLIENT_LOCK ct_mutex... */
@@ -726,8 +726,9 @@ struct mdi_pathinfo {
kcondvar_t pi_ref_cv; /* condition variable */
struct mdi_pi_kstats *pi_kstats; /* aggregate kstats */
int pi_pm_held; /* phci's kidsup incremented */
- int pi_preferred; /* Preferred path */
+ int pi_preferred; /* Preferred path */
void *pi_vprivate; /* vhci private info */
+ uint_t pi_flags; /* path flags */
};
/*
@@ -959,6 +960,26 @@ struct pi_errs {
#define MDI_PI_IS_SUSPENDED(pip) \
((MDI_PI(pip))->pi_phci->ph_flags & MDI_PHCI_FLAGS_SUSPEND)
+#define MDI_PI_FLAGS_SET_HIDDEN(pip) {\
+ ASSERT(MDI_PI_LOCKED(pip)); \
+ MDI_PI(pip)->pi_flags |= MDI_PATHINFO_FLAGS_HIDDEN; }
+#define MDI_PI_FLAGS_CLR_HIDDEN(pip) {\
+ ASSERT(MDI_PI_LOCKED(pip)); \
+ MDI_PI(pip)->pi_flags &= ~MDI_PATHINFO_FLAGS_HIDDEN; }
+#define MDI_PI_FLAGS_IS_HIDDEN(pip) \
+ ((MDI_PI(pip)->pi_flags & MDI_PATHINFO_FLAGS_HIDDEN) == \
+ MDI_PATHINFO_FLAGS_HIDDEN)
+
+#define MDI_PI_FLAGS_SET_DEVICE_REMOVED(pip) {\
+ ASSERT(MDI_PI_LOCKED(pip)); \
+ MDI_PI(pip)->pi_flags |= MDI_PATHINFO_FLAGS_DEVICE_REMOVED; }
+#define MDI_PI_FLAGS_CLR_DEVICE_REMOVED(pip) {\
+ ASSERT(MDI_PI_LOCKED(pip)); \
+ MDI_PI(pip)->pi_flags &= ~MDI_PATHINFO_FLAGS_DEVICE_REMOVED; }
+#define MDI_PI_FLAGS_IS_DEVICE_REMOVED(pip) \
+ ((MDI_PI(pip)->pi_flags & MDI_PATHINFO_FLAGS_DEVICE_REMOVED) == \
+ MDI_PATHINFO_FLAGS_DEVICE_REMOVED)
+
/*
* mdi_vhcache_client, mdi_vhcache_pathinfo, and mdi_vhcache_phci structures
* hold the vhci to phci client mappings of the on-disk vhci busconfig cache.
diff --git a/usr/src/uts/common/sys/scsi/adapters/mpapi_impl.h b/usr/src/uts/common/sys/scsi/adapters/mpapi_impl.h
index b54df92613..1f69323c62 100644
--- a/usr/src/uts/common/sys/scsi/adapters/mpapi_impl.h
+++ b/usr/src/uts/common/sys/scsi/adapters/mpapi_impl.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_ADAPTERS_MPAPI_IMPL_H
#define _SYS_SCSI_ADAPTERS_MPAPI_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/sunmdi.h>
#include <sys/sunddi.h>
#include <sys/mdi_impldefs.h>
@@ -253,6 +251,7 @@ typedef struct mp_path_prop {
#define MP_DRVR_PATH_STATE_REMOVED 5
#define MP_DRVR_PATH_STATE_TRANSITIONING 6
#define MP_DRVR_PATH_STATE_UNKNOWN 7
+#define MP_DRVR_PATH_STATE_UNINIT 8
/* Structure for MP_PROPRIETARY_LOAD_BALANCE_PROPERTIES */
diff --git a/usr/src/uts/common/sys/scsi/adapters/mpapi_scsi_vhci.h b/usr/src/uts/common/sys/scsi/adapters/mpapi_scsi_vhci.h
index dcb359c13e..652258acff 100644
--- a/usr/src/uts/common/sys/scsi/adapters/mpapi_scsi_vhci.h
+++ b/usr/src/uts/common/sys/scsi/adapters/mpapi_scsi_vhci.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_ADAPTERS_MPAPI_SCSI_VHCI_H
#define _SYS_SCSI_ADAPTERS_MPAPI_SCSI_VHCI_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -110,11 +108,18 @@ typedef struct mpapi_lu_data {
/*
* Structure to maintain mpapi path data.
+ *
+ * The hide flag is set when pip was detroyed or should
+ * have been destroyed(MDI_PATHINFO_FLAGS_DEVICE_REMOVED).
+ * The valid flag is set to 0 when the path is neither online
+ * nor standby state. When hide flag is set the valid flag set
+ * to 0 also.
*/
typedef struct mpapi_path_data {
void *resp; /* pip */
char *path_name;
int valid;
+ int hide;
char pclass[MPAPI_SCSI_MAXPCLASSLEN];
mp_path_prop_t prop;
} mpapi_path_data_t;
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/ata.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/ata.h
new file mode 100644
index 0000000000..f503d856ff
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/ata.h
@@ -0,0 +1,279 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Misc ATA definitions
+ */
+#ifndef _ATA_H
+#define _ATA_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ata8-acs.h"
+#include "atapi7v3.h"
+
+/*
+ * IDENTIFY Data
+ */
+typedef struct {
+ uint16_t word0;
+ uint16_t word1;
+ uint16_t word2;
+ uint16_t word3;
+ uint16_t word4;
+ uint16_t word5;
+ uint16_t word6;
+ uint16_t word7;
+ uint16_t word8;
+ uint16_t word9;
+ uint16_t serial_number[10];
+ uint16_t word20;
+ uint16_t word21;
+ uint16_t word22;
+ uint16_t firmware_revision[4];
+ uint16_t model_number[20];
+ uint16_t word47;
+ uint16_t word48;
+ uint16_t word49;
+ uint16_t word50;
+ uint16_t word51;
+ uint16_t word52;
+ uint16_t word53;
+ uint16_t word54;
+ uint16_t word55;
+ uint16_t word56;
+ uint16_t word57;
+ uint16_t word58;
+ uint16_t word59;
+ uint16_t word60;
+ uint16_t word61;
+ uint16_t word62;
+ uint16_t word63;
+ uint16_t word64;
+ uint16_t word65;
+ uint16_t word66;
+ uint16_t word67;
+ uint16_t word68;
+ uint16_t word69;
+ uint16_t word70;
+ uint16_t word71;
+ uint16_t word72;
+ uint16_t word73;
+ uint16_t word74;
+ uint16_t word75;
+ uint16_t word76;
+ uint16_t word77;
+ uint16_t word78;
+ uint16_t word79;
+ uint16_t word80;
+ uint16_t word81;
+ uint16_t word82;
+ uint16_t word83;
+ uint16_t word84;
+ uint16_t word85;
+ uint16_t word86;
+ uint16_t word87;
+ uint16_t word88;
+ uint16_t word89;
+ uint16_t word90;
+ uint16_t word91;
+ uint16_t word92;
+ uint16_t word93;
+ uint16_t word94;
+ uint16_t word95;
+ uint16_t word96;
+ uint16_t word97;
+ uint16_t word98;
+ uint16_t word99;
+ uint16_t word100;
+ uint16_t word101;
+ uint16_t word102;
+ uint16_t word103;
+ uint16_t word104;
+ uint16_t word105;
+ uint16_t word106;
+ uint16_t word107;
+ uint16_t word108;
+ uint16_t word109;
+ uint16_t word110;
+ uint16_t word111;
+ uint16_t word112;
+ uint16_t word113;
+ uint16_t word114;
+ uint16_t word115;
+ uint16_t word116;
+ uint16_t word117;
+ uint16_t word118;
+ uint16_t word119;
+ uint16_t word120;
+ uint16_t word121;
+ uint16_t word122;
+ uint16_t word123;
+ uint16_t word124;
+ uint16_t word125;
+ uint16_t word126;
+ uint16_t word127;
+ uint16_t word128;
+ uint16_t word129;
+ uint16_t word130;
+ uint16_t word131;
+ uint16_t word132;
+ uint16_t word133;
+ uint16_t word134;
+ uint16_t word135;
+ uint16_t word136;
+ uint16_t word137;
+ uint16_t word138;
+ uint16_t word139;
+ uint16_t word140;
+ uint16_t word141;
+ uint16_t word142;
+ uint16_t word143;
+ uint16_t word144;
+ uint16_t word145;
+ uint16_t word146;
+ uint16_t word147;
+ uint16_t word148;
+ uint16_t word149;
+ uint16_t word150;
+ uint16_t word151;
+ uint16_t word152;
+ uint16_t word153;
+ uint16_t word154;
+ uint16_t word155;
+ uint16_t word156;
+ uint16_t word157;
+ uint16_t word158;
+ uint16_t word159;
+ uint16_t word160;
+ uint16_t word161;
+ uint16_t word162;
+ uint16_t word163;
+ uint16_t word164;
+ uint16_t word165;
+ uint16_t word166;
+ uint16_t word167;
+ uint16_t word168;
+ uint16_t word169;
+ uint16_t word170;
+ uint16_t word171;
+ uint16_t word172;
+ uint16_t word173;
+ uint16_t word174;
+ uint16_t word175;
+ uint16_t word176;
+ uint16_t word177;
+ uint16_t word178;
+ uint16_t word179;
+ uint16_t word180;
+ uint16_t word181;
+ uint16_t word182;
+ uint16_t word183;
+ uint16_t word184;
+ uint16_t word185;
+ uint16_t word186;
+ uint16_t word187;
+ uint16_t word188;
+ uint16_t word189;
+ uint16_t word190;
+ uint16_t word191;
+ uint16_t word192;
+ uint16_t word193;
+ uint16_t word194;
+ uint16_t word195;
+ uint16_t word196;
+ uint16_t word197;
+ uint16_t word198;
+ uint16_t word199;
+ uint16_t word200;
+ uint16_t word201;
+ uint16_t word202;
+ uint16_t word203;
+ uint16_t word204;
+ uint16_t word205;
+ uint16_t word206;
+ uint16_t word207;
+ uint16_t word208;
+ uint16_t word209;
+ uint16_t word210;
+ uint16_t word211;
+ uint16_t word212;
+ uint16_t word213;
+ uint16_t word214;
+ uint16_t word215;
+ uint16_t word216;
+ uint16_t word217;
+ uint16_t word218;
+ uint16_t word219;
+ uint16_t word220;
+ uint16_t word221;
+ uint16_t word222;
+ uint16_t word223;
+ uint16_t word224;
+ uint16_t word225;
+ uint16_t word226;
+ uint16_t word227;
+ uint16_t word228;
+ uint16_t word229;
+ uint16_t word230;
+ uint16_t word231;
+ uint16_t word232;
+ uint16_t word233;
+ uint16_t word234;
+ uint16_t word235;
+ uint16_t word236;
+ uint16_t word237;
+ uint16_t word238;
+ uint16_t word239;
+ uint16_t word240;
+ uint16_t word241;
+ uint16_t word242;
+ uint16_t word243;
+ uint16_t word244;
+ uint16_t word245;
+ uint16_t word246;
+ uint16_t word247;
+ uint16_t word248;
+ uint16_t word249;
+ uint16_t word250;
+ uint16_t word251;
+ uint16_t word252;
+ uint16_t word253;
+ uint16_t word254;
+ uint16_t word255;
+} ata_identify_t;
+
+#define LBA_CAPACITY(ati) \
+ ((LE_16(ati->word83) & (1 << 10)) == 0)? \
+ (LE_16(ati->word60) | ((LE_16(ati->word61)) << 16)) : \
+ ((LE_16(ati->word100)) | ((LE_16(ati->word101)) << 16) | \
+ (((uint64_t)LE_16(ati->word102)) << 32) | \
+ (((uint64_t)LE_16(ati->word103)) << 48))
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _ATA_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/ata8-acs.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/ata8-acs.h
new file mode 100644
index 0000000000..6f0cc2d2cf
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/ata8-acs.h
@@ -0,0 +1,122 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * ATA8-ACS Definitions (subset) Working Draft AT Attachment 8 - ATA/ATAPI
+ * Command Set (D1699r4c)
+ */
+#ifndef _ATA8_ACS_H
+#define _ATA8_ACS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * ATA Command Set
+ */
+enum ata_opcode {
+ ATA_NOP = 0x00,
+ CFA_REQUEST_EXTENDED_ERROR = 0x03,
+ DEVICE_RESET = 0x08,
+ READ_SECTORS = 0x20,
+ READ_SECTORS_EXT = 0x24,
+ READ_DMA_EXT = 0x25,
+ READ_DMA_QUEUED_EXT = 0x26,
+ READ_NATIVE_MAX_ADDRESS_EXT = 0x27,
+ READ_MULTIPLE_EXT = 0x29,
+ READ_STREAM_DMA_EXT = 0x2A,
+ READ_STREAM_EXT = 0x2B,
+ READ_LOG_EXT = 0x2F,
+ WRITE_SECTORS = 0x30,
+ WRITE_SECTORS_EXT = 0x34,
+ WRITE_DMA_EXT = 0x35,
+ WRITE_DMA_QUEUED_EXT = 0x36,
+ SET_MAX_ADDRESS_EXT = 0x37,
+ CFA_WRITE_SECTORS_WITHOUT_ERASE = 0x38,
+ WRITE_MULTIPLE_EXT = 0x39,
+ WRITE_STREAM_DMA_EXT = 0x3A,
+ WRITE_STREAM_EXT = 0x3B,
+ WRITE_DMA_FUA_EXT = 0x3D,
+ WRITE_DMA_QUEUED_FUA_EXT = 0x3E,
+ WRITE_LOG_EXT = 0x3F,
+ READ_VERIFY_SECTORS = 0x40,
+ READ_VERIFY_SECTORS_EXT = 0x42,
+ WRITE_UNCORRECTABLE_EXT = 0x45,
+ READ_LOG_DMA_EXT = 0x47,
+ CONFIGURE_STREAM = 0x51,
+ WRITE_LOG_DMA_EXT = 0x57,
+ TRUSTED_NON_DATA = 0x5B,
+ TRUSTED_RECEIVE = 0x5C,
+ TRUSTED_RECEIVE_DMA = 0x5D,
+ TRUSTED_SEND = 0x5E,
+ TRUSTED_SEND_DMA = 0x5E,
+ READ_FPDMA_QUEUED = 0x60,
+ WRITE_FPDMA_QUEUED = 0x61,
+ CFA_TRANSLATE_SECTOR = 0x87,
+ EXECUTE_DEVICE_DIAGNOSTIC = 0x90,
+ DOWNLOAD_MICROCODE = 0x92,
+ PACKET = 0xA0,
+ IDENTIFY_PACKET_DEVICE = 0xA1,
+ SERVICE = 0xA2,
+ SMART = 0xB0,
+ DEVICE_CONFIGURATION_OVERLAY = 0xB1,
+ NV_CACHE = 0xB6,
+ CFA_ERASE_SECTORS = 0xC0,
+ READ_MULTIPLE = 0xC4,
+ WRITE_MULTIPLE = 0xC5,
+ SET_MULTIPLE_MODE = 0xC6,
+ READ_DMA_QUEUED = 0xC7,
+ READ_DMA = 0xC8,
+ WRITE_DMA = 0xCA,
+ WRITE_DMA_QUEUED = 0xCC,
+ CFA_WRITE_MULTIPLE_WITHOUT_ERASE = 0xCD,
+ WRITE_MULTIPLE_FUA_EXT = 0xCE,
+ CHECK_MEDIA_CARD_TYPE = 0xD1,
+ STANDBY_IMMEDIATE = 0xE0,
+ IDLE_IMMEDIATE = 0xE1,
+ STANDBY = 0xE2,
+ IDLE = 0xE3,
+ ATA_READ_BUFFER = 0xE4,
+ CHECK_POWER_MODE = 0xE5,
+ SLEEP = 0xE6,
+ FLUSH_CACHE = 0xE7,
+ ATA_WRITE_BUFFER = 0xE8,
+ FLUSH_CACHE_EXT = 0xEA,
+ IDENTIFY_DEVICE = 0xEC,
+ MEDIA_EJECT = 0xED,
+ SET_FEATURES = 0xEF,
+ SECURITY_SET_PASSWORD = 0xF1,
+ SECURITY_UNLOCK = 0xF2,
+ SECURITY_ERASE_PREPARE = 0xF3,
+ SECURITY_ERASE_UNIT = 0xF4,
+ SECURITY_FREEZE_LOCK = 0xF5,
+ SECURITY_DISABLE_PASSWORD = 0xF6,
+ READ_NATIVE_MAX_ADDRESS = 0xF8,
+ SET_MAX_ADDRESS = 0xF9
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _ATA8_ACS_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/atapi7v3.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/atapi7v3.h
new file mode 100644
index 0000000000..e1bdf10af2
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/atapi7v3.h
@@ -0,0 +1,228 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * ATAPI-7 Definitions (subset) that include Serial ATA
+ * ATA/ATAPI-7 V3 (d1532v3r4b-ATA-ATAPI-7)
+ */
+#ifndef _ATAPI7V3_H
+#define _ATAPI7V3_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Register - Host to Device FIS
+ */
+typedef struct {
+ uint8_t fis_type;
+ uint8_t idcbits;
+ uint8_t cmd;
+ uint8_t features;
+#define FEATURE_LBA 0x40
+ uint8_t lba_low;
+ uint8_t lba_mid;
+ uint8_t lba_hi;
+ uint8_t device;
+ uint8_t lba_low_exp;
+ uint8_t lba_mid_exp;
+ uint8_t lba_hi_exp;
+ uint8_t features_exp;
+ uint8_t sector_count;
+ uint8_t sector_count_exp;
+ uint8_t reserved0;
+ uint8_t control;
+ uint8_t reserved1[4];
+} register_h2d_fis_t;
+
+/*
+ * Register - Device to Host FIS
+ */
+typedef struct {
+ uint8_t fis_type;
+ uint8_t idcbits;
+ uint8_t status;
+ uint8_t error;
+ uint8_t lba_low;
+ uint8_t lba_mid;
+ uint8_t lba_hi;
+ uint8_t device;
+ uint8_t lba_low_exp;
+ uint8_t lba_mid_exp;
+ uint8_t lba_hi_exp;
+ uint8_t reserved0;
+ uint8_t sector_count;
+ uint8_t sector_count_exp;
+ uint8_t reserved1[6];
+} register_d2h_fis_t;
+
+typedef struct {
+ uint8_t fis_type;
+ uint8_t idcbits;
+ uint8_t status_bits;
+#define STATUS_HI_MASK 0xE
+#define STATUS_HI_SHIFT 4
+#define STATUS_LO_MASK 0x7
+ uint8_t error;
+ uint8_t reserved;
+} set_device_bits_fis_t;
+
+typedef struct {
+ uint8_t fis_type;
+ uint8_t reserved[3];
+} dma_activate_fis_type;
+
+typedef struct {
+ uint8_t fis_type;
+ uint8_t idcbits;
+ uint8_t reserved0[2];
+ uint32_t dma_buffer_id_lo;
+ uint32_t dma_buffer_id_hi;
+ uint32_t reserved1;
+ uint32_t dma_buffer_offset;
+ uint32_t dma_buffer_count;
+ uint32_t reserved2;
+} dma_fpactivate_fis_t;
+
+typedef struct {
+ uint8_t fis_type;
+ uint8_t reserved0;
+ uint8_t bist_bits;
+ uint8_t reserved1;
+ uint8_t data[8];
+} bist_activate_fis_t;
+#define BIST_T 0x80
+#define BIST_A 0x40
+#define BIST_S 0x20
+#define BIST_L 0x10
+#define BIST_F 0x08
+#define BIST_P 0x04
+#define BIST_V 0x01
+
+typedef struct {
+ uint8_t fis_type;
+ uint8_t idcbits;
+ uint8_t status;
+ uint8_t error;
+ uint8_t lba_low;
+ uint8_t lba_mid;
+ uint8_t lba_high;
+ uint8_t device;
+ uint8_t lba_low_exp;
+ uint8_t lba_mid_exp;
+ uint8_t lba_high_exp;
+ uint8_t reserved0;
+ uint8_t sector_count;
+ uint8_t sector_count_exp;
+ uint8_t reserved1;
+ uint8_t E_status;
+ uint16_t transfer_count;
+ uint16_t reserved2;
+} pio_setup_fis_t;
+
+typedef struct {
+ uint8_t fis_type;
+ uint32_t dwords[1];
+} bidirectional_fis_t;
+
+/*
+ * FIS Types
+ */
+
+#define FIS_REG_H2DEV 0x27 /* 5 DWORDS */
+#define FIS_REG_D2H 0x34 /* 5 DWORDS */
+#define FIS_SET_DEVICE_BITS 0xA1 /* 2 DWORDS */
+#define FIS_DMA_ACTIVATE 0x39 /* 1 DWORD */
+#define FIS_DMA_FPSETUP 0x41 /* 7 DWORDS */
+#define FIS_BIST_ACTIVATE 0x58 /* 3 DWORDS */
+#define FIS_PIO_SETUP 0x5F /* 5 DWORDS */
+#define FIS_BI 0x46 /* 1 DWORD min, 2048 DWORD max */
+
+/*
+ * IDC bits
+ */
+#define C_BIT 0x80
+#define I_BIT 0x40
+#define D_BIT 0x20
+
+/*
+ * 28-Bit Command Mapping from ACS to FIS
+ *
+ * ACS Field FIS Field
+ * --------------------------------------
+ * Feature (7:0) -> Feature
+ * Count (7:0) -> Sector Count
+ * LBA (7:0) -> LBA Low
+ * LBA (15:8) -> LBA Mid
+ * LBA (23:16) -> LBA High
+ * LBA (27:24) -> Device (3:0)
+ * Device (15:12) -> Device (7:4)
+ * Command -> Command
+ *
+ * 48- Bit Command Mapping from ACS to FIS
+ *
+ * ACS Field FIS Field
+ * --------------------------------------
+ * Feature (7:0) -> Feature
+ * Feature (15:8) -> Feature (exp)
+ * Count (7:0) -> Sector Count
+ * Count (15:8) -> Sector Count (exp)
+ * LBA (7:0) -> LBA Low
+ * LBA (15:8) -> LBA Mid
+ * LBA (23:16) -> LBA High
+ * LBA (31:24) -> LBA Low (exp)
+ * LBA (39:32) -> LBA Mid (exp)
+ * LBA (47:40) -> LBA High (exp)
+ * Device (15:12) -> Device (7:4)
+ * Command -> Command
+ *
+ * FIS (FIS_REG_H2DEV) layout:
+ *
+ * 31.........24 23...........16 15....................8.7.............0
+ * FEATURE | COMMAND | C R R RESERVED | FIS TYPE 0x27
+ * DEVICE | LBA HIGH | LBA MID | LBA LOW
+ * FEATURE(exp) | LBA HIGH(exp) | LBA MID(exp) | LBA LOW(exp)
+ * CONTROL | RESERVED | Sector Count(exp) | Sector Count
+ * RESERVED | RESERVED | RESERVED | RESERVED
+ *
+ * FIS (FIS_REG_D2H) layout:
+ *
+ * 31.........24 23...........16 15....................8.7.............0
+ * ERROR | STATUS | R I R RESERVED | FIS TYPE 0x34
+ * DEVICE | LBA HIGH | LBA MID | LBA LOW
+ * RESERVED | LBA HIGH(exp) | LBA MID(exp) | LBA LOW(exp)
+ * RESERVED | RESERVED | Sector Count(exp) | Sector Count
+ * RESERVED | RESERVED | RESERVED | RESERVED
+ */
+
+
+/*
+ * Reasonable size to reserve for holding the most common FIS types.
+ */
+typedef uint32_t fis_t[5];
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _ATAPI7V3_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs.h
new file mode 100644
index 0000000000..074560eeb6
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs.h
@@ -0,0 +1,627 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is the principle header file for the PMCS driver
+ */
+#ifndef _PMCS_H
+#define _PMCS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <sys/cpuvar.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/pcie.h>
+#include <sys/isa_defs.h>
+#include <sys/sunmdi.h>
+#include <sys/mdi_impldefs.h>
+#include <sys/scsi/scsi.h>
+#include <sys/scsi/impl/scsi_reset_notify.h>
+#include <sys/scsi/impl/sas_transport.h>
+#include <sys/scsi/generic/sas.h>
+#include <sys/atomic.h>
+#include <sys/byteorder.h>
+#include <sys/bitmap.h>
+#include <sys/queue.h>
+#include <sys/sdt.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+#include <sys/scsi/impl/spc3_types.h>
+
+typedef struct pmcs_hw pmcs_hw_t;
+typedef struct pmcs_iport pmcs_iport_t;
+typedef struct pmcs_phy pmcs_phy_t;
+typedef struct lsas_cmd lsas_cmd_t;
+typedef struct lsas_result lsas_result_t;
+typedef struct lsata_cmd lsata_cmd_t;
+typedef struct lsata_result lsata_result_t;
+typedef struct pmcwork pmcwork_t;
+typedef struct pmcs_cmd pmcs_cmd_t;
+typedef struct pmcs_xscsi pmcs_xscsi_t;
+typedef struct pmcs_lun pmcs_lun_t;
+typedef struct pmcs_chunk pmcs_chunk_t;
+
+#include <sys/scsi/adapters/pmcs/pmcs_param.h>
+#include <sys/scsi/adapters/pmcs/pmcs_reg.h>
+#include <sys/scsi/adapters/pmcs/pmcs_mpi.h>
+#include <sys/scsi/adapters/pmcs/pmcs_iomb.h>
+#include <sys/scsi/adapters/pmcs/pmcs_sgl.h>
+
+#include <sys/scsi/adapters/pmcs/smp_defs.h>
+#include <sys/scsi/adapters/pmcs/ata.h>
+#include <sys/scsi/adapters/pmcs/pmcs_def.h>
+#include <sys/scsi/adapters/pmcs/pmcs_proto.h>
+#include <sys/scsi/adapters/pmcs/pmcs_scsa.h>
+#include <sys/scsi/adapters/pmcs/pmcs_smhba.h>
+
+#define PMCS_MAX_UA_SIZE 32
+
+struct pmcs_xscsi {
+ uint32_t
+ ca : 1, /* SATA specific */
+ ncq : 1, /* SATA specific */
+ pio : 1, /* SATA specific */
+ special_needed : 1, /* SATA specific */
+ special_running : 1, /* SATA specific */
+ reset_success : 1, /* last reset ok */
+ reset_wait : 1, /* wait for reset */
+ resetting : 1, /* now resetting */
+ recover_wait : 1, /* wait for recovery */
+ recovering : 1, /* now recovering */
+ event_recovery : 1, /* event recovery */
+ draining : 1,
+ dying : 1,
+ new : 1,
+ assigned : 1,
+ dev_gone : 1,
+ phy_addressable : 1, /* Direct attach SATA */
+ dev_state : 4;
+ uint16_t maxdepth;
+ uint16_t qdepth;
+ uint16_t actv_cnt;
+ uint16_t target_num;
+ /* statlock protects both target stats and the special queue (sq) */
+ kmutex_t statlock;
+ int32_t ref_count;
+ dev_info_t *dip; /* Solaris device dip */
+ pmcs_phy_t *phy;
+ STAILQ_HEAD(wqh, pmcs_cmd) wq;
+ pmcs_cmd_t *wq_recovery_tail; /* See below */
+ kmutex_t wqlock;
+ STAILQ_HEAD(aqh, pmcs_cmd) aq;
+ kmutex_t aqlock;
+ STAILQ_HEAD(sqh, pmcs_cmd) sq; /* SATA specific */
+ uint32_t tagmap; /* SATA specific */
+ pmcs_hw_t *pwp;
+ ddi_soft_state_bystr *lun_sstate;
+ uint64_t capacity; /* SATA specific */
+ char unit_address[PMCS_MAX_UA_SIZE];
+ kcondvar_t reset_cv;
+ kcondvar_t abort_cv;
+ char *ua;
+ pmcs_dtype_t dtype;
+};
+
+/*
+ * wq_recovery_tail in the pmcs_xscsi structure is a pointer to a command in
+ * the wait queue (wq). That pointer is the last command in the wait queue
+ * that needs to be reissued after device state recovery is complete. Commands
+ * that need to be retried are reinserted into the wq after wq_recovery_tail
+ * to maintain the order in which the commands were originally submitted.
+ */
+
+#define PMCS_INVALID_TARGET_NUM (uint16_t)-1
+
+#define PMCS_TGT_WAIT_QUEUE 0x01
+#define PMCS_TGT_ACTIVE_QUEUE 0x02
+#define PMCS_TGT_SPECIAL_QUEUE 0x04
+#define PMCS_TGT_ALL_QUEUES 0xff
+
+/*
+ * LUN representation. Just a LUN (number) and pointer to the target
+ * structure (pmcs_xscsi).
+ */
+
+struct pmcs_lun {
+ pmcs_xscsi_t *target;
+ uint64_t lun_num; /* lun64 */
+ scsi_lun_t scsi_lun; /* Wire format */
+ char unit_address[PMCS_MAX_UA_SIZE];
+};
+
+/*
+ * Interrupt coalescing values
+ */
+#define PMCS_MAX_IO_COMPS_PER_INTR 12
+#define PMCS_MAX_IO_COMPS_HIWAT_SHIFT 6
+#define PMCS_MAX_IO_COMPS_LOWAT_SHIFT 10
+#define PMCS_QUANTUM_TIME_USECS (1000000 / 10) /* 1/10th sec. */
+#define PMCS_MAX_COAL_TIMER 0x200 /* Don't set > than this */
+#define PMCS_MAX_CQ_THREADS 4
+#define PMCS_COAL_TIMER_GRAN 2 /* Go up/down by 2 usecs */
+#define PMCS_INTR_THRESHOLD(x) ((x) * 6 / 10)
+
+/*
+ * This structure is used to maintain state with regard to I/O interrupt
+ * coalescing.
+ */
+
+typedef struct pmcs_io_intr_coal_s {
+ hrtime_t nsecs_between_intrs;
+ hrtime_t last_io_comp;
+ clock_t quantum;
+ uint32_t num_io_completions;
+ uint32_t num_intrs;
+ uint32_t max_io_completions;
+ uint32_t intr_latency;
+ uint32_t intr_threshold;
+ uint16_t intr_coal_timer;
+ boolean_t timer_on;
+ boolean_t stop_thread;
+ boolean_t int_cleared;
+} pmcs_io_intr_coal_t;
+
+typedef struct pmcs_cq_thr_info_s {
+ kthread_t *cq_thread;
+ kmutex_t cq_thr_lock;
+ kcondvar_t cq_cv;
+ pmcs_hw_t *cq_pwp;
+} pmcs_cq_thr_info_t;
+
+typedef struct pmcs_cq_info_s {
+ uint32_t cq_threads;
+ uint32_t cq_next_disp_thr;
+ boolean_t cq_stop;
+ pmcs_cq_thr_info_t *cq_thr_info;
+} pmcs_cq_info_t;
+
+typedef struct pmcs_iocomp_cb_s {
+ pmcwork_t *pwrk;
+ char iomb[PMCS_QENTRY_SIZE << 1];
+ struct pmcs_iocomp_cb_s *next;
+} pmcs_iocomp_cb_t;
+
+typedef struct pmcs_iqp_trace_s {
+ char *head;
+ char *curpos;
+ uint32_t size_left;
+} pmcs_iqp_trace_t;
+
+/*
+ * Used by string-based softstate as hint to possible size.
+ */
+
+#define PMCS_TGT_SSTATE_SZ 64
+#define PMCS_LUN_SSTATE_SZ 4
+
+/*
+ * HBA iport node softstate
+ */
+#define PMCS_IPORT_INVALID_PORT_ID 0xffff
+
+struct pmcs_iport {
+ kmutex_t lock; /* iport lock */
+ list_node_t list_node; /* list node for pwp->iports list_t */
+ kmutex_t refcnt_lock; /* refcnt lock */
+ kcondvar_t refcnt_cv; /* refcnt cv */
+ int refcnt; /* refcnt for this iport */
+ dev_info_t *dip; /* iport dip */
+ pmcs_hw_t *pwp; /* back pointer to HBA state */
+ pmcs_phy_t *pptr; /* pointer to this port's primary phy */
+ enum { /* unit address state in the phymap */
+ UA_INACTIVE,
+ UA_PEND_ACTIVATE,
+ UA_ACTIVE,
+ UA_PEND_DEACTIVATE
+ } ua_state;
+ char *ua; /* unit address (phy mask) */
+ int portid; /* portid */
+ int report_skip; /* skip or report during discovery */
+ list_t phys; /* list of phys on this port */
+ int nphy; /* number of phys in this port */
+ scsi_hba_tgtmap_t *iss_tgtmap; /* tgtmap */
+ ddi_soft_state_bystr *tgt_sstate; /* tgt softstate */
+};
+
+struct pmcs_chunk {
+ pmcs_chunk_t *next;
+ ddi_acc_handle_t acc_handle;
+ ddi_dma_handle_t dma_handle;
+ uint8_t *addrp;
+ uint64_t dma_addr;
+};
+
+/*
+ * HBA node (i.e. non-iport) softstate
+ */
+struct pmcs_hw {
+ /*
+ * Identity
+ */
+ dev_info_t *dip;
+
+ /*
+ * 16 possible initiator PHY WWNs
+ */
+ uint64_t sas_wwns[PMCS_MAX_PORTS];
+
+ /*
+ * Card State
+ */
+ enum pwpstate {
+ STATE_NIL,
+ STATE_PROBING,
+ STATE_RUNNING,
+ STATE_UNPROBING,
+ STATE_DEAD
+ } state;
+
+ uint32_t
+ fw_disable_update : 1,
+ fw_force_update : 1,
+ blocked : 1,
+ stuck : 1,
+ locks_initted : 1,
+ mpi_table_setup : 1,
+ hba_attached : 1,
+ iports_attached : 1,
+ suspended : 1,
+ separate_ports : 1,
+ fwlog : 4,
+ phymode : 3,
+ physpeed : 3,
+ resource_limited : 1,
+ configuring : 1,
+ ds_err_recovering : 1;
+
+ /*
+ * This HBA instance's iportmap and list of iport states.
+ * Note: iports_lock protects iports, iports_attached, and
+ * num_iports on the HBA softstate.
+ */
+ krwlock_t iports_lock;
+ scsi_hba_iportmap_t *hss_iportmap;
+ list_t iports;
+ int num_iports;
+
+ sas_phymap_t *hss_phymap;
+ int phymap_active;
+
+ /*
+ * Locks
+ */
+ kmutex_t lock;
+ kmutex_t dma_lock;
+ kmutex_t axil_lock;
+ kcondvar_t drain_cv;
+
+ /*
+ * FMA Capabilities
+ */
+ int fm_capabilities;
+
+ /*
+ * Register Access Handles
+ */
+ ddi_device_acc_attr_t dev_acc_attr;
+ ddi_device_acc_attr_t reg_acc_attr;
+ ddi_acc_handle_t pci_acc_handle;
+ ddi_acc_handle_t msg_acc_handle;
+ ddi_acc_handle_t top_acc_handle;
+ ddi_acc_handle_t mpi_acc_handle;
+ ddi_acc_handle_t gsm_acc_handle;
+ ddi_acc_handle_t iqp_acchdls[PMCS_MAX_IQ];
+ ddi_acc_handle_t oqp_acchdls[PMCS_MAX_IQ];
+ ddi_acc_handle_t cip_acchdls;
+ ddi_acc_handle_t fwlog_acchdl;
+ ddi_acc_handle_t regdump_acchdl;
+
+ /*
+ * DMA Handles
+ */
+ ddi_dma_attr_t iqp_dma_attr;
+ ddi_dma_attr_t oqp_dma_attr;
+ ddi_dma_attr_t cip_dma_attr;
+ ddi_dma_attr_t fwlog_dma_attr;
+ ddi_dma_attr_t regdump_dma_attr;
+ ddi_dma_handle_t iqp_handles[PMCS_MAX_IQ];
+ ddi_dma_handle_t oqp_handles[PMCS_MAX_OQ];
+ ddi_dma_handle_t cip_handles;
+ ddi_dma_handle_t fwlog_hndl;
+ ddi_dma_handle_t regdump_hndl;
+
+ /*
+ * Register Pointers
+ */
+ uint32_t *msg_regs; /* message unit registers */
+ uint32_t *top_regs; /* top unit registers */
+ uint32_t *mpi_regs; /* message passing unit registers */
+ uint32_t *gsm_regs; /* GSM registers */
+
+ /*
+ * Message Passing and other offsets.
+ *
+ * mpi_offset is the offset within the fourth register set (mpi_regs)
+ * that contains the base of the MPI structures. Since this is actually
+ * set by the card firmware, it can change from startup to startup.
+ *
+ * The other offsets (gst, iqc, oqc) are for similar tables in
+ * MPI space, typically only accessed during setup.
+ */
+ uint32_t mpi_offset;
+ uint32_t mpi_gst_offset;
+ uint32_t mpi_iqc_offset;
+ uint32_t mpi_oqc_offset;
+
+ /*
+ * Inbound and outbound queue depth
+ */
+ uint32_t ioq_depth;
+
+ /*
+ * Kernel addresses and offsets for Inbound Queue Producer Indices
+ *
+ * See comments in pmcs_iomb.h about Inbound Queues. Since it
+ * is relatively expensive to go across the PCIe bus to read or
+ * write inside the card, we maintain shadow copies in kernel
+ * memory and update the card as needed.
+ */
+ uint32_t shadow_iqpi[PMCS_MAX_IQ];
+ uint32_t iqpi_offset[PMCS_MAX_IQ];
+ uint32_t *iqp[PMCS_MAX_IQ];
+ kmutex_t iqp_lock[PMCS_NIQ];
+
+ pmcs_iqp_trace_t *iqpt;
+
+ /*
+ * Kernel addresses and offsets for Outbound Queue Consumer Indices
+ */
+ uint32_t *oqp[PMCS_MAX_OQ];
+ uint32_t oqci_offset[PMCS_MAX_OQ];
+
+ /*
+ * Driver's copy of the outbound queue indices
+ */
+
+ uint32_t oqci[PMCS_NOQ];
+ uint32_t oqpi[PMCS_NOQ];
+
+ /*
+ * DMA addresses for both Inbound and Outbound queues.
+ */
+ uint64_t oqaddr[PMCS_MAX_OQ];
+ uint64_t iqaddr[PMCS_MAX_IQ];
+
+ /*
+ * Producer/Queue Host Memory Pointers and scratch areas,
+ * as well as DMA scatter/gather chunk areas.
+ *
+ * See discussion in pmcs_def.h about how this is laid out.
+ */
+ uint8_t *cip;
+ uint64_t ciaddr;
+
+ /*
+ * Scratch area pointer and DMA addrress for SATA and SMP operations.
+ */
+ void *scratch;
+ uint64_t scratch_dma;
+ volatile uint8_t scratch_locked; /* Scratch area ownership */
+
+ /*
+ * Firmware log pointer
+ */
+ uint32_t *fwlogp;
+ uint64_t fwaddr;
+
+ /*
+ * Internal register dump region and flash chunk DMA info
+ */
+
+ caddr_t regdumpp;
+ uint32_t *flash_chunkp;
+ uint64_t flash_chunk_addr;
+
+ /*
+ * Card information, some determined during MPI setup
+ */
+ uint32_t fw; /* firmware version */
+ uint8_t max_iq; /* maximum inbound queues this card */
+ uint8_t max_oq; /* "" outbound "" */
+ uint8_t nphy; /* number of phys this card */
+ uint8_t chiprev; /* chip revision */
+ uint16_t max_cmd; /* max number of commands supported */
+ uint16_t max_dev; /* max number of devices supported */
+ uint16_t last_wq_dev; /* last dev whose wq was serviced */
+
+
+ /*
+ * Interrupt Setup stuff.
+ *
+ * int_type defines the kind of interrupt we're using with this card.
+ * oqvec defines the relationship between an Outbound Queue Number and
+ * a MSI-X vector.
+ */
+ enum {
+ PMCS_INT_NONE,
+ PMCS_INT_TIMER,
+ PMCS_INT_MSI,
+ PMCS_INT_MSIX,
+ PMCS_INT_FIXED
+ } int_type;
+ uint8_t oqvec[PMCS_NOQ];
+
+ /*
+ * Interrupt handle table and size
+ */
+ ddi_intr_handle_t *ih_table;
+ size_t ih_table_size;
+
+ timeout_id_t wdhandle;
+ uint32_t intr_mask;
+ int intr_cnt;
+ int intr_cap;
+ uint32_t odb_auto_clear;
+
+ /*
+ * DMA S/G chunk list
+ */
+ int nchunks;
+ pmcs_chunk_t *dma_chunklist;
+
+ /*
+ * Front of the DMA S/G chunk freelist
+ */
+ pmcs_dmachunk_t *dma_freelist;
+
+ /*
+ * PHY and Discovery Related Stuff
+ *
+ * The PMC chip can have up to 16 local phys. We build a level-first
+ * traversal tree of phys starting with the physical phys on the
+ * chip itself (i.e., treating the chip as if it were an expander).
+ *
+ * Our discovery process goes through a level and discovers what
+ * each entity is (and it's phy number within that expander's
+ * address space). It then configures each non-empty item (SAS,
+ * SATA/STP, EXPANDER). For expanders, it then performs
+ * discover on that expander itself via REPORT GENERAL and
+ * DISCOVERY SMP commands, attaching the discovered entities
+ * to the next level. Then we step down a level and continue
+ * (and so on).
+ *
+ * The PMC chip maintains an I_T_NEXUS notion based upon our
+ * registering each new device found (getting back a device handle).
+ *
+ * Like with the number of physical PHYS being a maximum of 16,
+ * there are a maximum number of PORTS also being 16. Some
+ * events apply to PORTS entirely, so we track PORTS as well.
+ */
+ pmcs_phy_t *root_phys; /* HBA PHYs (level 0) */
+ pmcs_phy_t *ports[PMCS_MAX_PORTS];
+ kmutex_t dead_phylist_lock; /* Protects dead_phys */
+ pmcs_phy_t *dead_phys; /* PHYs waiting to be freed */
+
+ kmem_cache_t *phy_cache;
+
+ /*
+ * Discovery-related items.
+ * config_lock: Protects config_changed and should never be held
+ * outside of getting or setting the value of config_changed.
+ * config_changed: Boolean indicating whether discovery needs to
+ * be restarted.
+ * configuring: 1 = discovery is running, 0 = discovery not running.
+ * NOTE: configuring is now in the bitfield above.
+ */
+ kmutex_t config_lock;
+ volatile boolean_t config_changed;
+
+ /*
+ * Work Related Stuff
+ *
+ * Each command given to the PMC chip has an associated work structure.
+ * See the discussion in pmcs_def.h about work structures.
+ */
+ pmcwork_t *work; /* pool of work structures */
+ STAILQ_HEAD(wfh, pmcwork) wf; /* current freelist */
+ STAILQ_HEAD(pfh, pmcwork) pf; /* current pending freelist */
+ uint16_t wserno; /* rolling serial number */
+ kmutex_t wfree_lock; /* freelist/actvlist/wserno lock */
+ kmutex_t pfree_lock; /* freelist/actvlist/wserno lock */
+
+ /*
+ * Solaris/SCSA items.
+ */
+ scsi_hba_tran_t *tran;
+ sas_hba_tran_t *smp_tran;
+ struct scsi_reset_notify_entry *reset_notify_listf;
+
+ /*
+ * Thread Level stuff.
+ *
+ * A number of tasks are done off worker thread taskq.
+ */
+ ddi_taskq_t *tq; /* For the worker thread */
+ volatile ulong_t work_flags;
+
+ /*
+ * Solaris target representation.
+ * targets = array of pointers to xscsi structures
+ * allocated by ssoftstate.
+ */
+ pmcs_xscsi_t **targets;
+
+ STAILQ_HEAD(dqh, pmcs_cmd) dq; /* dead commands */
+ STAILQ_HEAD(cqh, pmcs_cmd) cq; /* completed commands */
+ kmutex_t cq_lock;
+ kmem_cache_t *iocomp_cb_cache;
+ pmcs_iocomp_cb_t *iocomp_cb_head;
+ pmcs_iocomp_cb_t *iocomp_cb_tail;
+
+ uint16_t debug_mask;
+ uint16_t phyid_block_mask;
+ uint16_t phys_started;
+ uint32_t hipri_queue;
+ uint32_t mpibar;
+ uint32_t intr_pri;
+
+ pmcs_io_intr_coal_t io_intr_coal;
+ pmcs_cq_info_t cq_info;
+ kmutex_t ict_lock;
+ kcondvar_t ict_cv;
+ kthread_t *ict_thread;
+
+#ifdef DEBUG
+ kmutex_t dbglock;
+ uint32_t ltags[256];
+ uint32_t ftags[256];
+ hrtime_t ltime[256];
+ hrtime_t ftime[256];
+ uint16_t ftag_lines[256];
+ uint8_t lti; /* last tag index */
+ uint8_t fti; /* first tag index */
+#endif
+};
+
+extern void *pmcs_softc_state;
+extern void *pmcs_iport_softstate;
+
+/*
+ * Some miscellaneous, oft used strings
+ */
+extern const char pmcs_nowrk[];
+extern const char pmcs_nomsg[];
+extern const char pmcs_timeo[];
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_def.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_def.h
new file mode 100644
index 0000000000..5ed39d0a79
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_def.h
@@ -0,0 +1,514 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#ifndef _PMCS_DEF_H
+#define _PMCS_DEF_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ NOTHING, /* nothing connected here */
+ SATA, /* SATA connection */
+ SAS, /* direct or indirect SAS connection */
+ EXPANDER, /* connection to an expander */
+ NEW /* Brand new device (pending state) */
+} pmcs_dtype_t;
+
+/*
+ * This structure defines a PHY device that represents what we
+ * are connected to.
+ *
+ * The eight real physical PHYs that are in the PMC8X6G are represented
+ * as an array of eight of these structures which define what these
+ * real PHYs are connected to.
+ *
+ * Depending upon what is actually connected to each PHY, the
+ * type set will define what we're connected to. If it is
+ * a direct SATA connection, the phy will describe a SATA endpoint
+ * If it is a direct SAS connection, it will describe a SAS
+ * endpoint.
+ *
+ * If it is an EXPANDER, this will describe the edge of an expander.
+ * As we perform discovery on what is in an EXPANDER we define an
+ * additional list of phys that represent what the Expander is connected to.
+ */
+#define PMCS_HW_MIN_LINK_RATE SAS_LINK_RATE_1_5GBIT
+#define PMCS_HW_MAX_LINK_RATE SAS_LINK_RATE_6GBIT
+
+#define PMCS_INVALID_DEVICE_ID 0xffffffff
+
+struct pmcs_phy {
+ pmcs_phy_t *sibling; /* sibling phy */
+ pmcs_phy_t *parent; /* parent phy */
+ pmcs_phy_t *children; /* head of list of children */
+ pmcs_phy_t *dead_next; /* dead PHY list link */
+ list_node_t list_node; /* list element */
+ uint32_t device_id; /* PMC8X6G device handle */
+ uint32_t
+ ncphy : 8, /* # of contained phys for expander */
+ hw_event_ack : 24; /* XXX: first level phy event acked */
+ uint8_t phynum; /* phy number on parent expander */
+ uint8_t width; /* how many phys wide */
+ uint8_t ds_recovery_retries; /* # error retry attempts */
+ pmcs_dtype_t dtype; /* current dtype of the phy */
+ pmcs_dtype_t pend_dtype; /* new dtype (pending change) */
+ uint32_t
+ level : 8, /* level in expander tree */
+ tolerates_sas2 : 1, /* tolerates SAS2 SMP */
+ spinup_hold : 1, /* spinup hold needs releasing */
+ atdt : 3, /* attached device type */
+ portid : 4, /* PMC8X6G port context */
+ link_rate : 4, /* current supported speeds */
+ valid_device_id : 1, /* device id is valid */
+ abort_sent : 1, /* we've sent an abort */
+ abort_pending : 1, /* we have an abort pending */
+ need_rl_ext : 1, /* need SATA RL_EXT recocvery */
+ subsidiary : 1, /* this is part of a wide phy */
+ configured : 1, /* is configured */
+ dead : 1, /* dead */
+ changed : 1; /* this phy is changing */
+ clock_t config_stop; /* When config attempts will stop */
+ hrtime_t abort_all_start;
+ kcondvar_t abort_all_cv; /* Wait for ABORT_ALL completion */
+ kmutex_t phy_lock;
+ volatile uint32_t ref_count; /* Targets & work on this PHY */
+ uint8_t sas_address[8]; /* SAS address for this PHY */
+ struct {
+ uint32_t
+ prog_min_rate :4,
+ hw_min_rate :4,
+ prog_max_rate :4,
+ hw_max_rate :4,
+ reserved :16;
+ } state;
+ char path[32]; /* path name for this phy */
+ pmcs_hw_t *pwp; /* back ptr to hba struct */
+ pmcs_iport_t *iport; /* back ptr to the iport handle */
+ pmcs_xscsi_t *target; /* back ptr to current target */
+ kstat_t *phy_stats; /* kstats for this phy */
+};
+
+/* maximum number of ds recovery retries (ds_recovery_retries) */
+#define PMCS_MAX_DS_RECOVERY_RETRIES 4
+
+
+/*
+ * Inbound and Outbound Queue Related Definitions.
+ *
+ * The PMC8X6G has a programmable number of inbound and outbound circular
+ * queues for use in message passing between the host and the PMC8X6G
+ * (up to 64 queues for the Rev C Chip). This driver does not use all
+ * possible queues.
+ *
+ * Each Queue is given 4K of consistent memory and we set a 64 byte size for
+ * the queue entry size (this gives us 256 queue entries per queue).
+ *
+ * This allocation then continues up a further PMCS_SCRATCH_SIZE bytes
+ * that the driver uses as a temporary scratch area for things like
+ * SMP discovery.
+ *
+ * This control area looks like this:
+ *
+ * Offset What
+ * ------------------------------------------------
+ * 0 IQ 0 Consumer Index
+ * 4 IQ 1 Consumer Index
+ * 8..255 ...
+ * 252..255 IQ 63 Consumer Index
+ * 256 OQ 0 Producer Index
+ * 260 OQ 1 Producer Index
+ * 264..259 ....
+ * 508..511 OQ 63 Producer Index
+ * 512..512+PMCS_SCRATCH_SIZE-1 Scratch area.
+ */
+#define IQCI_BASE_OFFSET 0
+#define IQ_OFFSET(qnum) (IQCI_BASE_OFFSET + (qnum << 2))
+#define OQPI_BASE_OFFSET 256
+#define OQ_OFFSET(qnum) (OQPI_BASE_OFFSET + (qnum << 2))
+
+/*
+ * Work related structures. Each one of these structures is paired
+ * with *any* command that is fed to the PMC8X6G via one of the
+ * Inbound Queues. The work structure has a tag to compare with
+ * the message that comes back out of an Outbound Queue. The
+ * work structure also points to the phy which this command is
+ * tied to. It also has a pointer a callback function (if defined).
+ * See that TAG Architecture below for the various kinds of
+ * dispositions of a work structure.
+ */
+
+/*
+ * Work Structure States
+ *
+ * NIL -> READY
+ * READY -> NIL
+ * READY -> ONCHIP
+ * ONCHIP -> INTR
+ * INTR -> READY
+ * INTR -> NIL
+ * INTR -> ABORTED
+ * INTR -> TIMED_OUT
+ * ABORTED -> NIL
+ * TIMED_OUT -> NIL
+ */
+typedef enum {
+ PMCS_WORK_STATE_NIL = 0,
+ PMCS_WORK_STATE_READY,
+ PMCS_WORK_STATE_ONCHIP,
+ PMCS_WORK_STATE_INTR,
+ PMCS_WORK_STATE_IOCOMPQ,
+ PMCS_WORK_STATE_ABORTED,
+ PMCS_WORK_STATE_TIMED_OUT
+} pmcs_work_state_t;
+
+struct pmcwork {
+ STAILQ_ENTRY(pmcwork) next;
+ kmutex_t lock;
+ kcondvar_t sleep_cv;
+ void *ptr; /* linkage or callback function */
+ void *arg; /* command specific data */
+ pmcs_phy_t *phy; /* phy who owns this command */
+ pmcs_xscsi_t *xp; /* Back pointer to xscsi struct */
+ volatile uint32_t htag; /* tag for this structure */
+ uint32_t
+ timer : 27,
+ onwire : 1,
+ dead : 1,
+ state : 3;
+ hrtime_t start; /* timestamp start */
+ uint32_t ssp_event; /* ssp event */
+ pmcs_dtype_t dtype; /* stash, incase phy gets cleared */
+
+ /* DEBUG-only fields from here on */
+ void *last_ptr;
+ void *last_arg;
+ pmcs_phy_t *last_phy;
+ pmcs_xscsi_t *last_xp;
+ uint32_t last_htag;
+ pmcs_work_state_t last_state;
+ hrtime_t finish;
+};
+
+#define PMCS_REC_EVENT 0xffffffff /* event recovery */
+
+/*
+ * This structure defines a PMC-Sierra defined firmware header.
+ */
+#pragma pack(4)
+typedef struct {
+ char vendor_id[8];
+ uint8_t product_id;
+ uint8_t hwrev;
+ uint8_t destination_partition;
+ uint8_t reserved0;
+ uint8_t fwrev[4];
+ uint32_t firmware_length;
+ uint32_t crc;
+ uint32_t start_address;
+ uint8_t data[];
+} pmcs_fw_hdr_t;
+#pragma pack()
+
+/*
+ * Offlevel work as a bit pattern.
+ */
+#define PMCS_WORK_DISCOVER 0
+#define PMCS_WORK_REM_DEVICES 2
+#define PMCS_WORK_ABORT_HANDLE 3
+#define PMCS_WORK_SPINUP_RELEASE 4
+#define PMCS_WORK_SAS_HW_ACK 5
+#define PMCS_WORK_SATA_RUN 6
+#define PMCS_WORK_RUN_QUEUES 7
+#define PMCS_WORK_ADD_DMA_CHUNKS 8
+#define PMCS_WORK_DS_ERR_RECOVERY 9
+#define PMCS_WORK_SSP_EVT_RECOVERY 10
+
+/*
+ * The actual values as they appear in work_flags
+ */
+#define PMCS_WORK_FLAG_DISCOVER (1 << 0)
+#define PMCS_WORK_FLAG_REM_DEVICES (1 << 2)
+#define PMCS_WORK_FLAG_ABORT_HANDLE (1 << 3)
+#define PMCS_WORK_FLAG_SPINUP_RELEASE (1 << 4)
+#define PMCS_WORK_FLAG_SAS_HW_ACK (1 << 5)
+#define PMCS_WORK_FLAG_SATA_RUN (1 << 6)
+#define PMCS_WORK_FLAG_RUN_QUEUES (1 << 7)
+#define PMCS_WORK_FLAG_ADD_DMA_CHUNKS (1 << 8)
+#define PMCS_WORK_FLAG_DS_ERR_RECOVERY (1 << 9)
+#define PMCS_WORK_FLAG_SSP_EVT_RECOVERY (1 << 10)
+
+/*
+ * This structure is used by this function to test MPI (and interrupts)
+ * after MPI has been started to make sure it's working reliably.
+ */
+typedef struct {
+ uint32_t signature;
+ uint32_t count;
+ uint32_t *ptr;
+} echo_test_t;
+#define ECHO_SIGNATURE 0xbebebeef
+
+/*
+ * Tag Architecture. The PMC has 32 bit tags for MPI messages.
+ * We use this tag this way.
+ *
+ * bits what
+ * ------------------------
+ * 31 done bit
+ * 30..28 tag type
+ * 27..12 rolling serial number
+ * 11..0 index into work area to get pmcwork structure
+ *
+ * A tag type of NONE means that nobody is waiting on any results,
+ * so the interrupt code frees the work structure that has this
+ * tag.
+ *
+ * A tag type of CBACK means that the the interrupt handler
+ * takes the tag 'arg' in the work structure to be a callback
+ * function pointer (see pmcs_cb_t). The callee is responsible
+ * for freeing the work structure that has this tag.
+ *
+ * A tag type of WAIT means that the issuer of the work needs
+ * be woken up from interrupt level when the command completes
+ * (or times out). If work structure tag 'arg' is non-null,
+ * up to 2*PMCS_QENTRY_SIZE bits of data from the Outbound Queue
+ * entry may be copied to the area pointed to by 'arg'. This
+ * allows issuers to get directly at the results of the command
+ * they issed. The synchronization point for the issuer and the
+ * interrupt code for command done notification is the setting
+ * of the 'DONE' bit in the tag as stored in the work structure.
+ */
+#define PMCS_TAG_TYPE_FREE 0
+#define PMCS_TAG_TYPE_NONE 1
+#define PMCS_TAG_TYPE_CBACK 2
+#define PMCS_TAG_TYPE_WAIT 3
+#define PMCS_TAG_TYPE_SHIFT 28
+#define PMCS_TAG_SERNO_SHIFT 12
+#define PMCS_TAG_INDEX_SHIFT 0
+#define PMCS_TAG_TYPE_MASK 0x70000000
+#define PMCS_TAG_DONE 0x80000000
+#define PMCS_TAG_SERNO_MASK 0x0ffff000
+#define PMCS_TAG_INDEX_MASK 0x00000fff
+#define PMCS_TAG_TYPE(x) \
+ (((x) & PMCS_TAG_TYPE_MASK) >> PMCS_TAG_TYPE_SHIFT)
+#define PMCS_TAG_SERNO(x) \
+ (((x) & PMCS_TAG_SERNO_MASK) >> PMCS_TAG_SERNO_SHIFT)
+#define PMCS_TAG_INDEX(x) \
+ (((x) & PMCS_TAG_INDEX_MASK) >> PMCS_TAG_INDEX_SHIFT)
+#define PMCS_TAG_FREE 0
+#define PMCS_COMMAND_DONE(x) \
+ (((x)->htag == PMCS_TAG_FREE) || (((x)->htag & PMCS_TAG_DONE) != 0))
+#define PMCS_COMMAND_ACTIVE(x) \
+ ((x)->htag != PMCS_TAG_FREE && (x)->state == PMCS_WORK_STATE_ONCHIP)
+
+/*
+ * Miscellaneous Definitions
+ */
+#define CLEAN_MESSAGE(m, x) { \
+ int _j = x; \
+ while (_j < PMCS_MSG_SIZE) { \
+ m[_j++] = 0; \
+ } \
+}
+
+#define COPY_MESSAGE(t, f, a) { \
+ int _j; \
+ for (_j = 0; _j < a; _j++) { \
+ t[_j] = f[_j]; \
+ } \
+ while (_j < PMCS_MSG_SIZE) { \
+ t[_j++] = 0; \
+ } \
+}
+
+#define PMCS_PHY_ADDRESSABLE(pp) \
+ ((pp)->level == 0 && (pp)->dtype == SATA && \
+ ((pp)->sas_address[0] >> 4) != 5)
+
+#define RESTART_DISCOVERY(pwp) \
+ ASSERT(!mutex_owned(&pwp->config_lock)); \
+ mutex_enter(&pwp->config_lock); \
+ pwp->config_changed = B_TRUE; \
+ mutex_exit(&pwp->config_lock); \
+ SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
+
+#define RESTART_DISCOVERY_LOCKED(pwp) \
+ ASSERT(mutex_owned(&pwp->config_lock)); \
+ pwp->config_changed = B_TRUE; \
+ SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER);
+
+#define PHY_CHANGED(pwp, p) \
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s changed in %s line %d", \
+ p->path, __func__, __LINE__); \
+ p->changed = 1
+
+#define PHY_CHANGED_AT_LOCATION(pwp, p, func, line) \
+ pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s changed in %s line %d", \
+ p->path, func, line); \
+ p->changed = 1
+
+#define PHY_TYPE(pptr) \
+ (((pptr)->dtype == NOTHING)? "NOTHING" : \
+ (((pptr)->dtype == SATA)? "SATA" : \
+ (((pptr)->dtype == SAS)? "SAS" : "EXPANDER")))
+
+#define IS_ROOT_PHY(pptr) (pptr->parent == NULL)
+
+#define PMCS_HIPRI(pwp, oq, c) \
+ (pwp->hipri_queue & (1 << PMCS_IQ_OTHER)) ? \
+ (PMCS_IOMB_HIPRI | PMCS_IOMB_IN_SAS(oq, c)) : \
+ (PMCS_IOMB_IN_SAS(oq, c))
+
+#define SCHEDULE_WORK(hwp, wrk) \
+ (void) atomic_set_long_excl(&hwp->work_flags, wrk)
+
+/*
+ * Check to see if the requested work bit is set. Either way, the bit will
+ * be cleared upon return.
+ */
+#define WORK_SCHEDULED(hwp, wrk) \
+ (atomic_clear_long_excl(&hwp->work_flags, wrk) == 0)
+
+/*
+ * Check to see if the requested work bit is set. The value will not be
+ * changed in this case. The atomic_xx_nv operations can be quite expensive
+ * so this should not be used in non-DEBUG code.
+ */
+#define WORK_IS_SCHEDULED(hwp, wrk) \
+ ((atomic_and_ulong_nv(&hwp->work_flags, (ulong_t)-1) & (1 << wrk)) != 0)
+
+#define WAIT_FOR(p, t, r) \
+ r = 0; \
+ while (!PMCS_COMMAND_DONE(p)) { \
+ clock_t tmp = cv_timedwait(&p->sleep_cv, \
+ &p->lock, ddi_get_lbolt() + \
+ drv_usectohz(t * 1000)); \
+ if (!PMCS_COMMAND_DONE(p) && tmp < 0) { \
+ r = 1; \
+ break; \
+ } \
+ }
+
+/*
+ * Signal the next I/O completion thread to start running.
+ */
+
+#define PMCS_CQ_RUN_LOCKED(hwp) \
+ if (!STAILQ_EMPTY(&hwp->cq) || hwp->iocomp_cb_head) { \
+ pmcs_cq_thr_info_t *cqti; \
+ cqti = &hwp->cq_info.cq_thr_info \
+ [hwp->cq_info.cq_next_disp_thr]; \
+ hwp->cq_info.cq_next_disp_thr++; \
+ if (hwp->cq_info.cq_next_disp_thr == \
+ hwp->cq_info.cq_threads) { \
+ hwp->cq_info.cq_next_disp_thr = 0; \
+ } \
+ mutex_enter(&cqti->cq_thr_lock); \
+ cv_signal(&cqti->cq_cv); \
+ mutex_exit(&cqti->cq_thr_lock); \
+ } \
+
+#define PMCS_CQ_RUN(hwp) \
+ mutex_enter(&hwp->cq_lock); \
+ PMCS_CQ_RUN_LOCKED(hwp); \
+ mutex_exit(&hwp->cq_lock);
+
+
+/*
+ * Watchdog/SCSA timer definitions
+ */
+/* usecs to SCSA watchdog ticks */
+#define US2WT(x) (x)/10
+
+/*
+ * More misc
+ */
+#define BYTE0(x) (((x) >> 0) & 0xff)
+#define BYTE1(x) (((x) >> 8) & 0xff)
+#define BYTE2(x) (((x) >> 16) & 0xff)
+#define BYTE3(x) (((x) >> 24) & 0xff)
+#define BYTE4(x) (((x) >> 32) & 0xff)
+#define BYTE5(x) (((x) >> 40) & 0xff)
+#define BYTE6(x) (((x) >> 48) & 0xff)
+#define BYTE7(x) (((x) >> 56) & 0xff)
+#define WORD0(x) (((x) >> 0) & 0xffff)
+#define WORD1(x) (((x) >> 16) & 0xffff)
+#define WORD2(x) (((x) >> 32) & 0xffff)
+#define WORD3(x) (((x) >> 48) & 0xffff)
+#define DWORD0(x) ((uint32_t)(x))
+#define DWORD1(x) ((uint32_t)(((uint64_t)x) >> 32))
+
+#define SAS_ADDR_FMT "0x%02x%02x%02x%02x%02x%02x%02x%02x"
+#define SAS_ADDR_PRT(x) x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]
+
+#define PMCS_VALID_LINK_RATE(r) \
+ ((r == SAS_LINK_RATE_1_5GBIT) || (r == SAS_LINK_RATE_3GBIT) || \
+ (r == SAS_LINK_RATE_6GBIT))
+
+/*
+ * This is here to avoid inclusion of <sys/ctype.h> which is not lint clean.
+ */
+#define HEXDIGIT(x) (((x) >= '0' && (x) <= '9') || \
+ ((x) >= 'a' && (x) <= 'f') || ((x) >= 'A' && (x) <= 'F'))
+
+
+typedef void (*pmcs_cb_t) (pmcs_hw_t *, pmcwork_t *, uint32_t *);
+
+/*
+ * Defines and structure used for tracing/logging information
+ */
+
+#define PMCS_TBUF_ELEM_SIZE 120
+
+#ifdef DEBUG
+#define PMCS_TBUF_NUM_ELEMS_DEF 100000
+#else
+#define PMCS_TBUF_NUM_ELEMS_DEF 15000
+#endif
+
+typedef struct {
+ timespec_t timestamp;
+ char buf[PMCS_TBUF_ELEM_SIZE];
+} pmcs_tbuf_t;
+
+/*
+ * Firmware event log header format
+ */
+
+typedef struct pmcs_fw_event_hdr_s {
+ uint32_t fw_el_signature;
+ uint32_t fw_el_entry_start_offset;
+ uint32_t fw_el_rsvd1;
+ uint32_t fw_el_buf_size;
+ uint32_t fw_el_rsvd2;
+ uint32_t fw_el_oldest_idx;
+ uint32_t fw_el_latest_idx;
+ uint32_t fw_el_entry_size;
+} pmcs_fw_event_hdr_t;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_DEF_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_iomb.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_iomb.h
new file mode 100644
index 0000000000..0a397aee98
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_iomb.h
@@ -0,0 +1,724 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * PMC 8x6G IOMB Definitions
+ */
+#ifndef _PMCS_IOMB_H
+#define _PMCS_IOMB_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * An IOMB (IO Message Buffer) is the principle means of communication
+ * between the PMC and the HOST. The host places IOMBs on the Inbound
+ * Queues (IQ) which are in HOST memory and updates a producer index
+ * within the PMC. The PMC pulls the IOMB off the IQ and updates a
+ * consumer index in HOST memory. If appropriate, when the PMC is
+ * done with the action requested by the IOMB, the PMC writes a
+ * reply IOMB to host memory and updates its producer index and
+ * interrupts the HOST.
+ */
+/*
+ * The first word of all IOMBs is always laid out thusly:
+ *
+ * |Byte 3 |Byte 2 |Byte 1 |Byte 0 |
+ * +-------------+-------------+----------------------+
+ * |V Resvd BC|Resvd OBID |CAT | OPCODE |
+ * +--------------------------------------------------+
+ *
+ * V == Valid
+ * BC = Buffer Count
+ * OBID = Outbound Queue ID
+ * CAT = Category
+ * OPCODE = Well, uh, OPCODE.
+ */
+
+#define PMCS_IOMB_VALID (1U << 31)
+#define PMCS_IOMB_HIPRI (1U << 30)
+#define PMCS_IOMB_BC_SHIFT (24)
+#define PMCS_IOMB_BC_MASK (0xf << PMCS_IOMB_BC_SHIFT)
+#define PMCS_IOMB_OBID_SHIFT (16)
+#define PMCS_IOMB_OBID_MASK (0xf << PMCS_IOMB_OBID_SHIFT)
+#define PMCS_IOMB_CAT_SHIFT (12)
+#define PMCS_IOMB_CAT_MASK (0xf << PMCS_IOMB_CAT_SHIFT)
+#define PMCS_IOMB_OPCODE_MASK (0xfff)
+
+
+#define PMCS_IOMB_CAT_NET 0
+#define PMCS_IOMB_CAT_FC 1
+#define PMCS_IOMB_CAT_SAS 2
+#define PMCS_IOMB_CAT_SCSI 3
+
+/*
+ * Shorthand
+ */
+#define PMCS_IOMB_IN_SAS(q, opcode) \
+ (PMCS_IOMB_VALID | (1 << PMCS_IOMB_BC_SHIFT) | \
+ (PMCS_IOMB_CAT_SAS << PMCS_IOMB_CAT_SHIFT) | \
+ ((q << PMCS_IOMB_OBID_SHIFT) & PMCS_IOMB_OBID_MASK) | \
+ (opcode & PMCS_IOMB_OPCODE_MASK))
+
+/*
+ * PMC IOMB Inbound Queue Opcodes
+ */
+#define PMCIN_ECHO 0x01
+#define PMCIN_GET_INFO 0x02
+#define PMCIN_GET_VPD 0x03
+#define PMCIN_PHY_START 0x04
+#define PMCIN_PHY_STOP 0x05
+#define PMCIN_SSP_INI_IO_START 0x06
+#define PMCIN_SSP_INI_TM_START 0x07
+#define PMCIN_SSP_INI_EXT_IO_START 0x08
+#define PMCIN_DEVICE_HANDLE_ACCEPT 0x09
+#define PMCIN_SSP_TGT_IO_START 0x0A
+#define PMCIN_SSP_TGT_RESPONSE_START 0x0B
+#define PMCIN_SSP_INI_EDC_EXT_IO_START 0x0C
+#define PMCIN_SSP_INI_EDC_EXT_IO_START1 0x0D
+#define PMCIN_SSP_TGT_EDC_IO_START 0x0E
+#define PMCIN_SSP_ABORT 0x0F
+#define PMCIN_DEREGISTER_DEVICE_HANDLE 0x10
+#define PMCIN_GET_DEVICE_HANDLE 0x11
+#define PMCIN_SMP_REQUEST 0x12
+#define PMCIN_SMP_RESPONSE 0x13
+#define PMCIN_SMP_ABORT 0x14
+#define PMCIN_ASSISTED_DISCOVERY 0x15
+#define PMCIN_REGISTER_DEVICE 0x16
+#define PMCIN_SATA_HOST_IO_START 0x17
+#define PMCIN_SATA_ABORT 0x18
+#define PMCIN_LOCAL_PHY_CONTROL 0x19
+#define PMCIN_GET_DEVICE_INFO 0x1A
+#define PMCIN_TWI 0x1B
+#define PMCIN_FW_FLASH_UPDATE 0x20
+#define PMCIN_SET_VPD 0x21
+#define PMCIN_GPIO 0x22
+#define PMCIN_SAS_DIAG_MODE_START_END 0x23
+#define PMCIN_SAS_DIAG_EXECUTE 0x24
+#define PMCIN_SAW_HW_EVENT_ACK 0x25
+#define PMCIN_GET_TIME_STAMP 0x26
+#define PMCIN_PORT_CONTROL 0x27
+#define PMCIN_GET_NVMD_DATA 0x28
+#define PMCIN_SET_NVMD_DATA 0x29
+#define PMCIN_SET_DEVICE_STATE 0x2A
+#define PMCIN_GET_DEVICE_STATE 0x2B
+
+/*
+ * General Inbound Queue related parameters (DWORD 4)
+ */
+#define PMCIN_MESSAGE_REPORT (1 << 2)
+#define PMCIN_DS_ABORT_TASK (1 << 3)
+#define PMCIN_DS_IN_RECOVERY (1 << 4)
+#define PMCIN_DATADIR_NONE (0x00 << 8)
+#define PMCIN_DATADIR_2_INI (0x01 << 8)
+#define PMCIN_DATADIR_2_DEV (0x02 << 8)
+
+
+/*
+ * SATA Host IO Start ATA Protocol Types
+ * (placed into DWORD 4)
+ */
+
+#define SATA_PROTOCOL_SRST_ASSERT (0x01 << 10)
+#define SATA_PROTOCOL_SRT_DEASSERT (0x02 << 10)
+#define SATA_PROTOCOL_EXECDEVDIAG (0x03 << 10)
+#define SATA_PROTOCOL_NONDATA (0x04 << 10)
+#define SATA_PROTOCOL_PIO (0x05 << 10)
+#define SATA_PROTOCOL_DMA (0x06 << 10)
+#define SATA_PROTOCOL_FPDMA (0x07 << 10)
+
+/*
+ * SAS Host IO Start TLR definitions
+ * (placed into DWORD 4)
+ */
+#define SAS_TLR_ALL 0 /* SAS 1.1 and SAS 2.0 per device mode page */
+#define SAS_TLR_ON 1 /* unconditionally on */
+#define SAS_TLR_OFF 2 /* unconditionally off */
+#define SAS_TLR_SAS2 3 /* SAS 2.0 per device mode page */
+
+/*
+ * IOP SMP Request Information
+ */
+#define SMP_INDIRECT_RESPONSE 0x01
+#define SMP_INDIRECT_REQUEST 0x02
+#define SMP_PHY_OVERRIDE 0x04
+#define SMP_REQUEST_LENGTH_SHIFT 16
+
+/*
+ * PHY Start related definitions
+ */
+#define PHY_LINK_1_5 0x01
+#define PHY_LINK_3 0x02
+#define PHY_LINK_6 0x04
+#define PHY_LINK_ALL (PHY_LINK_1_5 | PHY_LINK_3 | PHY_LINK_6)
+#define PHY_LINK_SHIFT 8
+
+#define PHY_LM_SAS 1
+#define PHY_LM_SATA 2
+#define PHY_LM_AUTO 3
+#define PHY_MODE_SHIFT 12
+
+#define PHY_SPINUP_HOLD (1 << 14)
+
+/*
+ * LOCAL PHY CONTROL related definitions
+ */
+
+/*
+ * Device Registration related definitions
+ */
+#define PMCS_DEVREG_LINK_RATE_SHIFT 24
+#define PMCS_DEVREG_TYPE_SATA 0
+#define PMCS_DEVREG_TYPE_SAS (1 << 28)
+#define PMCS_DEVREG_TYPE_SATA_DIRECT (1 << 29)
+
+#define PMCS_PHYID_SHIFT 4 /* level 0 registration only */
+#define PMCS_DEVREG_TLR 0x1 /* Transport Layer Retry */
+
+#define PMCS_DEVREG_IT_NEXUS_TIMEOUT 200U
+
+#define PMCS_DEVREG_HA 0x2 /* Host Assigned upper 16 */
+ /* bits for device ID. */
+/*
+ * These are used for getting/setting data in the NVRAM (SEEPROM, VPD, etc.)
+ */
+
+typedef struct pmcs_get_nvmd_cmd_s {
+ uint32_t header; /* DWORD 0 */
+ uint32_t htag; /* DWORD 1 */
+ uint8_t tdas_nvmd; /* DWORD 2 */
+ uint8_t tbn_tdps;
+ uint8_t tda;
+ uint8_t ip;
+ uint8_t doa[3]; /* DWORD 3 Data Offset Addr */
+ uint8_t d_len; /* Direct Pld Data Len */
+ uint32_t rsvd[8]; /* DWORDS 4-11 */
+ uint32_t ipbal; /* 12 - Ind Pld buf addr low */
+ uint32_t ipbah; /* 13 - Ind Pld buf addr high */
+ uint32_t ipdl; /* 14 - Ind Pld data length */
+ uint32_t rsvd3;
+} pmcs_get_nvmd_cmd_t;
+
+typedef struct pmcs_set_nvmd_cmd_s {
+ uint32_t header; /* DWORD 0 */
+ uint32_t htag; /* DWORD 1 */
+ uint8_t tdas_nvmd; /* DWORD 2 */
+ uint8_t tbn_tdps;
+ uint8_t tda;
+ uint8_t ip;
+ uint8_t doa[3]; /* DWORD 3 Data Offset Addr */
+ uint8_t d_len; /* Direct Pld Data Len */
+ uint32_t signature; /* DWORD 4 */
+ uint32_t rsvd[7]; /* DWORDS 5-11 */
+ uint32_t ipbal; /* 12 - Ind Pld buf addr low */
+ uint32_t ipbah; /* 13 - Ind Pld buf addr high */
+ uint32_t ipdl; /* 14 - Ind Pld data length */
+ uint32_t rsvd2;
+} pmcs_set_nvmd_cmd_t;
+
+#define PMCIN_NVMD_DIRECT_PLD 0x00
+#define PMCIN_NVMD_INDIRECT_PLD 0x80
+
+/* TWI bus number is upper 4 bits of tbn_tdps */
+#define PMCIN_NVMD_TBN(x) (x << 4)
+
+/* TWI Device Page Size bits (lower 4 bits of tbn_tdps */
+#define PMCIN_NVMD_TDPS_1 0 /* 1 byte */
+#define PMCIN_NVMD_TDPS_8 1 /* 8 bytes */
+#define PMCIN_NVMD_TDPS_16 2 /* 16 bytes */
+#define PMCIN_NVMD_TDPS_32 3 /* 32 bytes */
+
+/* TWI Device Address Size (upper 4 bits of tdas_nvmd) */
+#define PMCIN_NVMD_TDAS_1 (0 << 4) /* 1 byte */
+#define PMCIN_NVMD_TDAS_2 (1 << 4) /* 2 bytes */
+
+/*
+ * TWI Device Address
+ * The address used to access TWI device for the 2Kb SEEPROM device is
+ * arranged as follows:
+ * Bits 7-4 are fixed (0xA)
+ * Bits 3-1 are page numbers for each 256 byte page
+ * Bit 0: Set to "1" to read, "0" to write
+ * Bit 0 is set/reset by the firmware based on whether the command is a
+ * SET or a GET.
+ */
+#define PMCIN_TDA_BASE 0xA0
+#define PMCIN_TDA_PAGE(x) (PMCIN_TDA_BASE | (x << 1))
+
+/* NVM Device bits (lower 4 bits of tdas_nvmd) */
+#define PMCIN_NVMD_TWI 0 /* TWI Device */
+#define PMCIN_NVMD_SEEPROM 1 /* SEEPROM Device */
+#define PMCIN_NVMD_VPD 4 /* VPD Flash Memory */
+#define PMCIN_NVMD_AAP1 5 /* AAP1 Register Dump */
+#define PMCIN_NVMD_IOP 6 /* IOP Register Dump */
+
+#define PMCS_SEEPROM_PAGE_SIZE 256
+
+/*
+ * Minimum and maximum sizes of SPCBoot image
+ */
+#define PMCS_SPCBOOT_MIN_SIZE 64
+#define PMCS_SPCBOOT_MAX_SIZE 512
+
+#define PMCS_SEEPROM_SIGNATURE 0xFEDCBA98
+
+/*
+ * Register dump information
+ *
+ * There are two 16KB regions for register dump information
+ */
+
+#define PMCS_REGISTER_DUMP_FLASH_SIZE (1024 * 16)
+#define PMCS_REGISTER_DUMP_BLOCK_SIZE 4096 /* Must be read 4K at a time */
+#define PMCS_FLASH_CHUNK_SIZE 4096 /* Must be read 4K at a time */
+#define PMCS_REG_DUMP_SIZE (1024 * 1024 * 12)
+#define PMCS_NVMD_EVENT_LOG_OFFSET 0x10000
+#define PMCS_IQP_TRACE_BUFFER_SIZE (1024 * 512)
+
+/*
+ * The list of items we can retrieve via the GET_NVMD_DATA command
+ */
+
+typedef enum {
+ PMCS_NVMD_VPD = 0,
+ PMCS_NVMD_REG_DUMP,
+ PMCS_NVMD_EVENT_LOG,
+ PMCS_NVMD_SPCBOOT
+} pmcs_nvmd_type_t;
+
+/*
+ * Command types, descriptors and offsets for SAS_DIAG_EXECUTE.
+ */
+#define PMCS_DIAG_CMD_DESC_SHIFT 8
+#define PMCS_DIAG_CMD_SHIFT 13
+#define PMCS_DIAG_REPORT_GET 0x04 /* Get counters */
+#define PMCS_ERR_CNT_RESET 0x05 /* Clear counters */
+#define PMCS_DISPARITY_ERR_CNT 0x02 /* Disparity error count */
+#define PMCS_LOST_DWORD_SYNC_CNT 0x05 /* Lost DWord sync count */
+#define PMCS_INVALID_DWORD_CNT 0x06 /* Invalid DWord count */
+#define PMCS_RESET_FAILED_CNT 0x0C /* PHY reset failed count */
+
+/*
+ * VPD data layout
+ */
+
+#define PMCS_VPD_DATA_PAGE 2 /* VPD starts at offset 512 */
+#define PMCS_VPD_VERSION 2 /* Expected version number */
+#define PMCS_VPD_RO_BYTE 0x90 /* Start of "read-only" data */
+#define PMCS_VPD_START 0x82 /* VPD start byte */
+#define PMCS_VPD_END 0x78 /* VPD end byte */
+
+/*
+ * This structure defines the "header" for the VPD data. Everything
+ * following this structure is self-defining. The consumer just needs
+ * to allocate a buffer large enough for vpd_length + 3 bytes of data.
+ */
+
+typedef struct {
+ uint8_t eeprom_version;
+ uint8_t vpd_length[2]; /* # bytes that follow, little-endian */
+ uint8_t hba_sas_wwid[8];
+ uint8_t subsys_pid[2];
+ uint8_t subsys_vid[2];
+ uint8_t vpd_start_byte; /* 0x82 */
+ uint8_t strid_length[2]; /* little-endian */
+ /* strid_length bytes follow */
+} pmcs_vpd_header_t;
+
+typedef struct {
+ char keyword[2];
+ uint8_t value_length;
+ char value[1]; /* Length is actually value_length */
+} pmcs_vpd_kv_t;
+
+/*
+ * From here on out are definitions related to Outbound Queues
+ * (completions of Inbound Queue requests and async events)
+ */
+
+/*
+ * PMC IOMB Outbound Queue Opcodes
+ */
+#define PMCOUT_ECHO 0x01
+#define PMCOUT_GET_INFO 0x02
+#define PMCOUT_GET_VPD 0x03
+#define PMCOUT_SAS_HW_EVENT 0x04
+#define PMCOUT_SSP_COMPLETION 0x05
+#define PMCOUT_SMP_COMPLETION 0x06
+#define PMCOUT_LOCAL_PHY_CONTROL 0x07
+#define PMCOUT_SAS_ASSISTED_DISCOVERY_EVENT 0x08
+#define PMCOUT_SATA_ASSISTED_DISCOVERY_EVENT 0x09
+#define PMCOUT_DEVICE_REGISTRATION 0x0A
+#define PMCOUT_DEREGISTER_DEVICE_HANDLE 0x0B
+#define PMCOUT_GET_DEVICE_HANDLE 0x0C
+#define PMCOUT_SATA_COMPLETION 0x0D
+#define PMCOUT_SATA_EVENT 0x0E
+#define PMCOUT_SSP_EVENT 0x0F
+#define PMCOUT_DEVICE_HANDLE_ARRIVED 0x10
+#define PMCOUT_SMP_REQUEST_RECEIVED 0x11
+#define PMCOUT_SSP_REQUEST_RECEIVED 0x12
+#define PMCOUT_DEVICE_INFO 0x13
+#define PMCOUT_FW_FLASH_UPDATE 0x14
+#define PMCOUT_SET_VPD 0x15
+#define PMCOUT_GPIO 0x16
+#define PMCOUT_GPIO_EVENT 0x17
+#define PMCOUT_GENERAL_EVENT 0x18
+#define PMCOUT_TWI 0x19
+#define PMCOUT_SSP_ABORT 0x1A
+#define PMCOUT_SATA_ABORT 0x1B
+#define PMCOUT_SAS_DIAG_MODE_START_END 0x1C
+#define PMCOUT_SAS_DIAG_EXECUTE 0x1D
+#define PMCOUT_GET_TIME_STAMP 0x1E
+#define PMCOUT_SAS_HW_EVENT_ACK_ACK 0x1F
+#define PMCOUT_PORT_CONTROL 0x20
+#define PMCOUT_SKIP_ENTRIES 0x21
+#define PMCOUT_SMP_ABORT 0x22
+#define PMCOUT_GET_NVMD_DATA 0x23
+#define PMCOUT_SET_NVMD_DATA 0x24
+#define PMCOUT_DEVICE_HANDLE_REMOVED 0x25
+#define PMCOUT_SET_DEVICE_STATE 0x26
+#define PMCOUT_GET_DEVICE_STATE 0x27
+#define PMCOUT_SET_DEVICE_INFO 0x28
+
+/*
+ * General Outbound Status Definitions
+ */
+#define PMCOUT_STATUS_OK 0x00
+#define PMCOUT_STATUS_ABORTED 0x01
+#define PMCOUT_STATUS_OVERFLOW 0x02
+#define PMCOUT_STATUS_UNDERFLOW 0x03
+#define PMCOUT_STATUS_FAILED 0x04
+#define PMCOUT_STATUS_ABORT_RESET 0x05
+#define PMCOUT_STATUS_IO_NOT_VALID 0x06
+#define PMCOUT_STATUS_NO_DEVICE 0x07
+#define PMCOUT_STATUS_ILLEGAL_PARAMETER 0x08
+#define PMCOUT_STATUS_LINK_FAILURE 0x09
+#define PMCOUT_STATUS_PROG_ERROR 0x0A
+#define PMCOUT_STATUS_EDC_IN_ERROR 0x0B
+#define PMCOUT_STATUS_EDC_OUT_ERROR 0x0C
+#define PMCOUT_STATUS_ERROR_HW_TIMEOUT 0x0D
+#define PMCOUT_STATUS_XFER_ERR_BREAK 0x0E
+#define PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY 0x0F
+#define PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED 0x10
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK 0x12
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
+#define PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION 0x14
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
+#define PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_EROOR 0x18
+#define PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED 0x19
+#define PMCOUT_STATUS_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
+#define PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED 0x1B
+#define PMCOUT_STATUS_XFER_ERROR_RX_FRAME 0x1C
+#define PMCOUT_STATUS_IO_XFER_ERROR_DMA 0x1D
+#define PMCOUT_STATUS_XFER_ERROR_CREDIT_TIMEOUT 0x1E
+#define PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
+#define PMCOUT_STATUS_XFER_ERROR_SATA 0x20
+#define PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE 0x21
+#define PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
+#define PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE 0x23
+#define PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT 0x24
+#define PMCOUT_STATUS_SMP_RESP_CONNECTION_ERROR 0x25
+#define PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE 0x26
+#define PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN 0x27
+#define PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED 0x28
+/* 0x29 */
+/* 0x2A */
+/* 0x2B */
+/* 0x2C */
+/* 0x2D */
+/* 0x2E */
+/* 0x2F */
+#define PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
+#define PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK 0x31
+#define PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
+/* 0x33 */
+#define PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH 0x34
+#define PMCOUT_STATUS_XFER_ERROR_ZERO_DATA_LEN 0x35
+#define PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 0x36
+#define PMCOUT_STATUS_ERROR_INTERNAL_SMP_RESOURCE 0x37
+#define PMCOUT_STATUS_IO_PORT_IN_RESET 0x38
+#define PMCOUT_STATUS_IO_DS_NON_OPERATIONAL 0x39
+#define PMCOUT_STATUS_IO_DS_IN_RECOVERY 0x3A
+#define PMCOUT_STATUS_IO_ABORT_IN_PROGRESS 0x40
+
+/*
+ * Device State definitions
+ */
+#define PMCS_DEVICE_STATE_OPERATIONAL 0x1
+#define PMCS_DEVICE_STATE_PORT_IN_RESET 0x2
+#define PMCS_DEVICE_STATE_IN_RECOVERY 0x3
+#define PMCS_DEVICE_STATE_IN_ERROR 0x4
+#define PMCS_DEVICE_STATE_NON_OPERATIONAL 0x7
+
+/*
+ * Reset Types
+ */
+#define PMCS_SSP_LINK_RESET 0x1
+#define PMCS_SSP_HARD_RESET 0x2
+#define PMCS_SMP_HARD_RESET 0x3
+
+/*
+ * PHYOP for LOCAL_PHY_CONTROL Command
+ */
+#define PMCS_PHYOP_LINK_RESET 0x01
+#define PMCS_PHYOP_HARD_RESET 0x02
+
+/*
+ * Specialized status values
+ */
+/* PHY Stop Status Results */
+#define IOP_PHY_STOP_OK 0x0
+#define IOP_PHY_STOP_INVALID 0x1
+#define IOP_PHY_STOP_ERROR 0x3
+#define IOP_PHY_STOP_ALREADY 0x4
+
+/* PHY Start Status Results */
+#define IOP_PHY_START_OK 0
+#define IOP_PHY_START_INVALID 1
+#define IOP_PHY_START_ALREADY 2
+#define IOP_PHY_START_ERROR 3
+
+/* SET/GET_NVMD status results */
+#define PMCS_NVMD_STAT_SUCCESS 0x0000
+#define PMCS_NVMD_STAT_PLD_NVMD_COMB_ERR 0x0001
+#define PMCS_NVMD_STAT_PLD_LEN_ERR 0x0002
+#define PMCS_NVMD_STAT_TWI_DEV_NACK 0x2001
+#define PMCS_NVMD_STAT_TWI_DEV_LOST_ARB 0x2002
+#define PMCS_NVMD_STAT_TWI_TIMEOUT 0x2021
+#define PMCS_NVMD_STAT_TWI_BUS_NACK 0x2081
+#define PMCS_NVMD_STAT_TWI_DEV_ARB_FAIL 0x2082
+#define PMCS_NVMD_STAT_TWI_BUS_SER_TIMEO 0x20FF
+#define PMCS_NVMD_STAT_PART_NOT_IN_FLASH 0x9001
+#define PMCS_NVMD_STAT_LEN_TOO_LARGE 0x9002
+#define PMCS_NVMD_STAT_FLASH_PRGRM_FAIL 0x9003
+#define PMCS_NVMD_STAT_DEVID_MATCH_FAIL 0x9004
+#define PMCS_NVMD_STAT_VENDID_MATCH_FAIL 0x9005
+#define PMCS_NVMD_STAT_SEC_ERASE_TIMEO 0x9006
+#define PMCS_NVMD_STAT_SEC_ERASE_CWE 0x9007
+#define PMCS_NVMD_STAT_FLASH_DEV_BUSY 0x9008
+#define PMCS_NVMD_STAT_FLASH_DEV_NOT_SUP 0x9009
+#define PMCS_NVMD_STAT_FLASH_NO_CFI 0x900A
+#define PMCS_NVMD_STAT_ERASE_BLOCKS 0x900B
+#define PMCS_NVMD_STAT_PART_READ_ONLY 0x900C
+#define PMCS_NVMD_STAT_PART_INV_MAP_TYPE 0x900D
+#define PMCS_NVMD_STAT_PART_INIT_STR_DIS 0x900E
+
+/*
+ * General Event Status Codes
+ */
+#define INBOUND_IOMB_V_BIT_NOT_SET 0x1
+#define INBOUND_IOMB_OPC_NOT_SUPPORTED 0x2
+
+/* Device Register Status Results */
+#define PMCS_DEVREG_OK 0x0
+#define PMCS_DEVREG_DEVICE_ALREADY_REGISTERED 0x2
+#define PMCS_DEVREG_PHY_ALREADY_REGISTERED 0x4
+
+/*
+ * Flash Update responses
+ */
+#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT 0x0
+#define FLASH_UPDATE_IN_PROGRESS 0x1
+#define FLASH_UPDATE_HDR_ERR 0x2
+#define FLASH_UPDATE_OFFSET_ERR 0x3
+#define FLASH_UPDATE_UPDATE_CRC_ERR 0x4
+#define FLASH_UPDATE_LENGTH_ERR 0x5
+#define FLASH_UPDATE_HW_ERR 0x6
+#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
+#define FLASH_UPDATE_DISABLED 0x11
+
+/*
+ * IOP SAS HW Event Related definitions
+ */
+
+#define IOP_EVENT_LINK_RATE(x) ((x >> 28) & 0xf)
+#define IOP_EVENT_STATUS(x) ((x >> 24) & 0xf)
+#define IOP_EVENT_EVENT(x) ((x >> 8) & 0xffff)
+#define IOP_EVENT_PHYNUM(x) ((x >> 4) & 0xf)
+#define IOP_EVENT_PORTID(x) ((x) & 0xf)
+
+
+#define IOP_EVENT_PHY_STOP_STATUS 0x03
+#define IOP_EVENT_SAS_PHY_UP 0x04
+#define IOP_EVENT_SATA_PHY_UP 0x05
+#define IOP_EVENT_SATA_SPINUP_HOLD 0x06
+#define IOP_EVENT_PHY_DOWN 0x07
+#define IOP_EVENT_PORT_INVALID 0x08 /* < fw 1.6 */
+#define IOP_EVENT_BROADCAST_CHANGE 0x09
+#define IOP_EVENT_BROADCAST_SES 0x0B
+#define IOP_EVENT_PHY_ERR_INBOUND_CRC 0x0C
+#define IOP_EVENT_HARD_RESET_RECEIVED 0x0D
+#define IOP_EVENT_EVENT_ID_FRAME_TIMO 0x0F
+#define IOP_EVENT_BROADCAST_EXP 0x10
+#define IOP_EVENT_PHY_START_STATUS 0x11
+#define IOP_EVENT_PHY_ERR_INVALID_DWORD 0x12
+#define IOP_EVENT_PHY_ERR_DISPARITY_ERROR 0x13
+#define IOP_EVENT_PHY_ERR_CODE_VIOLATION 0x14
+#define IOP_EVENT_PHY_ERR_LOSS_OF_DWORD_SYN 0x15
+#define IOP_EVENT_PHY_ERR_PHY_RESET_FAILD 0x16
+#define IOP_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
+#define IOP_EVENT_PORT_RECOVER 0x18
+#define IOP_EVENT_PORT_RESET_TIMER_TMO 0x19
+#define IOP_EVENT_PORT_RESET_COMPLETE 0x20
+#define IOP_EVENT_BROADCAST_ASYNC_EVENT 0x21
+
+
+#define IOP_EVENT_PORT_STATE(x) ((x) & 0xf)
+#define IOP_EVENT_NPIP(x) (((x) >> 4) & 0xf)
+
+#define IOP_EVENT_PS_NIL 0x0 /* PORT_ID not valid yet */
+#define IOP_EVENT_PS_VALID 0x1 /* PORT_ID now valid */
+#define IOP_EVENT_PS_LOSTCOMM 0x2 /* Link temporarily down */
+#define IOP_EVENT_PS_IN_RESET 0x4 /* Port in reset */
+#define IOP_EVENT_PS_INVALID 0x8 /* PORT_ID now dead */
+
+/*
+ * HW Event Acknowledge Response Values
+ */
+#define SAS_HW_EVENT_ACK_OK 0x0
+#define SAS_HW_EVENT_ACK_INVALID_SEA 0x1
+#define SAS_HW_EVENT_ACK_INVALID_PHY 0x2
+#define SAS_HW_EVENT_ACK_INVALID_PORT 0x4
+#define SAS_HW_EVENT_ACK_INVALID_PARAM 0x8
+
+/*
+ * IOMB Queue definitions and Macros
+ */
+
+#define ADDQI(ix, n, qd) ((ix + n) & (qd - 1))
+#define INCQI(ix, qd) ix = ADDQI(ix, 1, qd)
+#define QI2O(ix, n, qd) (ADDQI(ix, n, qd) * PMCS_QENTRY_SIZE)
+
+/*
+ * Inbound Queue Producer Indices live inside the PMC card.
+ *
+ * Inbound Queue Consumer indices live in host memory. We use the Consumer
+ * Index to return a pointer to an Inbound Queue entry. We then can fill
+ * it with an IOMB. We then update the the Producer index which kicks
+ * card to read the IOMB we just wrote.
+ *
+ * There is one mutex for each inbound queue that is held from the time
+ * we get an entry until we increment the producer index, or released
+ * manually if we don't actually send the message.
+ */
+
+/*
+ * NB: the appropriate iqp_lock must be held
+ */
+#define GET_IQ_ENTRY(hwp, qnum) \
+ ((ADDQI(hwp->shadow_iqpi[qnum], 1, hwp->ioq_depth) == \
+ pmcs_rd_iqci(hwp, qnum)) ? NULL : \
+ &hwp->iqp[qnum][hwp->shadow_iqpi[qnum] * (PMCS_QENTRY_SIZE >> 2)])
+
+/*
+ * NB: This releases the lock on the Inbound Queue that GET_IO_IQ_ENTRY
+ * acquired below.
+ */
+#ifdef DEBUG
+#define INC_IQ_ENTRY(hwp, qnum) \
+{ \
+ uint32_t htag; \
+ ASSERT(mutex_owned(&(hwp)->iqp_lock[qnum])); \
+ htag = hwp->iqp[qnum][(hwp->shadow_iqpi[qnum] * \
+ (PMCS_QENTRY_SIZE >> 2)) + 1]; \
+ mutex_enter(&(hwp)->dbglock); \
+ pmcs_iqp_trace(hwp, qnum); \
+ mutex_exit(&(hwp)->dbglock); \
+ INCQI(hwp->shadow_iqpi[qnum], hwp->ioq_depth); \
+ if (ddi_dma_sync(hwp->cip_handles, 0, 0, \
+ DDI_DMA_SYNC_FORDEV) != DDI_SUCCESS) { \
+ pmcs_prt(hwp, PMCS_PRT_DEBUG, "Condition failed at " \
+ " %s():%d", __func__, __LINE__); \
+ } \
+ pmcs_wr_iqpi(hwp, qnum, hwp->shadow_iqpi[qnum]); \
+ mutex_exit(&(hwp)->iqp_lock[qnum]); \
+ mutex_enter(&(hwp)->dbglock); \
+ hwp->ftag_lines[hwp->fti] = __LINE__; \
+ hwp->ftime[hwp->fti] = gethrtime(); \
+ hwp->ftags[hwp->fti++] = htag; \
+ mutex_exit(&(hwp)->dbglock); \
+}
+#else
+#define INC_IQ_ENTRY(hwp, qnum) \
+ INCQI(hwp->shadow_iqpi[qnum], hwp->ioq_depth); \
+ if (ddi_dma_sync(hwp->cip_handles, 0, 0, \
+ DDI_DMA_SYNC_FORDEV) != DDI_SUCCESS) { \
+ pmcs_prt(hwp, PMCS_PRT_DEBUG, "Condition failed at " \
+ " %s():%d", __func__, __LINE__); \
+ } \
+ pmcs_wr_iqpi(hwp, qnum, hwp->shadow_iqpi[qnum]); \
+ mutex_exit(&(hwp)->iqp_lock[qnum])
+#endif
+
+
+/*
+ * NB: sucessfull acquisition of an IO Inbound Queue
+ * entry leaves the lock on that Inbound Queue held.
+ */
+#define GET_IO_IQ_ENTRY(pwp, msg, did, iq) \
+ iq = did & PMCS_IO_IQ_MASK; \
+ mutex_enter(&(pwp)->iqp_lock[iq]); \
+ msg = GET_IQ_ENTRY(pwp, iq); \
+ if (msg == NULL) { \
+ mutex_exit(&(pwp)->iqp_lock[iq]); \
+ for (iq = 0; iq <= PMCS_NON_HIPRI_QUEUES; iq++) { \
+ mutex_enter(&(pwp)->iqp_lock[iq]); \
+ msg = GET_IQ_ENTRY(pwp, iq); \
+ if (msg) { \
+ break; \
+ } \
+ mutex_exit(&(pwp->iqp_lock[iq])); \
+ } \
+ }
+
+/*
+ * Outbound Queue Macros
+ *
+ * Outbound Queue Consumer indices live inside the card.
+ *
+ * Outbound Queue Producer indices live in host memory. When the card
+ * wants to send an IOMB, it uses the producer index to find the spot
+ * to write the IOMB. After it's done it updates the producer index
+ * and interrupts the host. The host reads the producer index (from
+ * host memory) and reads IOMBs up to but not including that index.
+ * It writes that index back to the consumer index on the card,
+ * signifying that it has read up to that which the card has sent.
+ */
+#define GET_OQ_ENTRY(hwp, qn, ix, o) \
+ &hwp->oqp[qn][QI2O(ix, o, hwp->ioq_depth) >> 2]
+
+#define STEP_OQ_ENTRY(hwp, qn, ix, n) ix = ADDQI(ix, n, hwp->ioq_depth)
+
+#define SYNC_OQ_ENTRY(hwp, qn, ci, pi) \
+ pmcs_wr_oqci(hwp, qn, ci); \
+ (hwp)->oqci[qn] = ci; \
+ (hwp)->oqpi[qn] = pi
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_IOMB_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_mpi.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_mpi.h
new file mode 100644
index 0000000000..b0c875e871
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_mpi.h
@@ -0,0 +1,222 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * PMC 8x6G Message Passing Interface Definitions
+ */
+#ifndef _PMCS_MPI_H
+#define _PMCS_MPI_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PMCS_DWRD(x) (x << 2)
+
+/*
+ * MPI Configuration Table Offsets
+ */
+#define PMCS_MPI_AS PMCS_DWRD(0) /* ASCII Signature */
+#define PMCS_SIGNATURE 0x53434D50
+
+#define PMCS_MPI_IR PMCS_DWRD(1) /* Interface Revision */
+#define PMCS_MPI_REVISION1 1
+
+#define PMCS_MPI_FW PMCS_DWRD(2) /* Firmware Version */
+#define PMCS_FW_TYPE(hwp) (hwp->fw & 0xf)
+#define PMCS_FW_TYPE_RELEASED 0
+#define PMCS_FW_TYPE_DEVELOPMENT 1
+#define PMCS_FW_TYPE_ALPHA 2
+#define PMCS_FW_TYPE_BETA 3
+#define PMCS_FW_VARIANT(hwp) ((hwp->fw >> 4) & 0xf)
+#define PMCS_FW_MAJOR(hwp) ((hwp->fw >> 24) & 0xff)
+#define PMCS_FW_MINOR(hwp) ((hwp->fw >> 16) & 0xff)
+#define PMCS_FW_MICRO(hwp) ((hwp->fw >> 8) & 0xff)
+#define PMCS_FW_REV(hwp) ((hwp->fw >> 8) & 0xffffff)
+#define PMCS_FW_VERSION(maj, min, mic) ((maj << 16)|(min << 8)|mic)
+
+#define PMCS_MPI_MOIO PMCS_DWRD(3) /* Maximum # of outstandiong I/Os */
+#define PMCS_MPI_INFO0 PMCS_DWRD(4) /* Maximum S/G Elem, Max Dev Handle */
+#define PMCS_MSGL(x) (x & 0xffff)
+#define PMCS_MD(x) ((x >> 16) & 0xffff)
+
+#define PMCS_MPI_INFO1 PMCS_DWRD(5) /* Info #0 */
+
+#define PMCS_MNIQ(x) (x & 0xff) /* Max # of Inbound Queues */
+#define PMCS_MNOQ(x) ((x >> 8) & 0xff) /* Max # of Outbound Queues */
+#define PMCS_HPIQ(x) ((x >> 16) & 0x1) /* High Pri Queue Supported */
+#define PMCS_ICS(x) ((x >> 18) & 0x1) /* Interrupt Coalescing */
+#define PMCS_NPHY(x) ((x >> 19) & 0x3f) /* Numbers of PHYs */
+#define PMCS_SASREV(x) ((x >> 25) & 0x7) /* SAS Revision Specification */
+
+#define PMCS_MPI_GSTO PMCS_DWRD(6) /* General Status Table Offset */
+#define PMCS_MPI_IQCTO PMCS_DWRD(7) /* Inbound Queue Config Table Offset */
+#define PMCS_MPI_OQCTO PMCS_DWRD(8) /* Outbound Queue Config Table Offset */
+
+#define PMCS_MPI_INFO2 PMCS_DWRD(9) /* Info #1 */
+
+#define IQ_NORMAL_PRI_DEPTH_SHIFT 0
+#define IQ_NORMAL_PRI_DEPTH_MASK 0xff
+#define IQ_HIPRI_PRI_DEPTH_SHIFT 8
+#define IQ_HIPRI_PRI_DEPTH_MASK 0xff00
+#define GENERAL_EVENT_OQ_SHIFT 16
+#define GENERAL_EVENT_OQ_MASK 0xff0000
+#define DEVICE_HANDLE_REMOVED_SHIFT 24
+#define DEVICE_HANDLE_REMOVED_MASK 0xff000000ul
+
+#define PMCS_MPI_EVQS PMCS_DWRD(0xA) /* SAS Event Queues */
+#define PMCS_MPI_EVQSET(pwp, oq, phy) { \
+ uint32_t woff = phy / 4; \
+ uint32_t shf = (phy % 4) * 8; \
+ uint32_t tmp = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_EVQS + (woff << 2)); \
+ tmp &= ~(0xff << shf); \
+ tmp |= ((oq & 0xff) << shf); \
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_EVQS + (woff << 2), tmp); \
+}
+
+#define PMCS_MPI_SNCQ PMCS_DWRD(0xC) /* Sata NCQ Notification Queues */
+#define PMCS_MPI_NCQSET(pwp, oq, phy) { \
+ uint32_t woff = phy / 4; \
+ uint32_t shf = (phy % 4) * 8; \
+ uint32_t tmp = pmcs_rd_mpi_tbl(pwp, PMCS_MPI_SNCQ + (woff << 2)); \
+ tmp &= ~(0xff << shf); \
+ tmp |= ((oq & 0xff) << shf); \
+ pmcs_wr_mpi_tbl(pwp, PMCS_MPI_SNCQ + (woff << 2), tmp); \
+}
+
+/*
+ * I_T Nexus Target Event Notification Queue
+ */
+#define PMCS_MPI_IT_NTENQ PMCS_DWRD(0xE)
+
+/*
+ * SSP Target Event Notification Queue
+ */
+#define PMCS_MPI_SSP_TENQ PMCS_DWRD(0x10)
+
+/*
+ * SMP Target Event Notification Queue
+ */
+#define PMCS_MPI_SMP_TENQ PMCS_DWRD(0x12)
+
+/*
+ * This specifies a log buffer in host memory for the MSGU.
+ */
+#define PMCS_MPI_MELBAH PMCS_DWRD(0x14) /* MSGU Log Buffer high 32 bits */
+#define PMCS_MPI_MELBAL PMCS_DWRD(0x15) /* MSGU Log Buffer low 32 bits */
+#define PMCS_MPI_MELBS PMCS_DWRD(0x16) /* size in bytes of MSGU log buffer */
+#define PMCS_MPI_MELSEV PMCS_DWRD(0x17) /* Log Severity */
+
+/*
+ * This specifies a log buffer in host memory for the IOP.
+ */
+#define PMCS_MPI_IELBAH PMCS_DWRD(0x18) /* IOP Log Buffer high 32 bits */
+#define PMCS_MPI_IELBAL PMCS_DWRD(0x19) /* IOP Log Buffer low 32 bits */
+#define PMCS_MPI_IELBS PMCS_DWRD(0x1A) /* size in bytes of IOP log buffer */
+#define PMCS_MPI_IELSEV PMCS_DWRD(0x1B) /* Log Severity */
+
+/*
+ * Fatal Error Handling
+ */
+#define PMCS_MPI_FERR PMCS_DWRD(0x1C)
+#define PMCS_FERRIE 0x1 /* Fatal Err Interrupt Enable */
+#define PMCS_FERIV_MASK 0xff00 /* Fatal Err Interrupt Mask */
+#define PMCS_FERIV_SHIFT 8 /* Fatal Err Interrupt Shift */
+
+#define PMCS_MPI_IRAE 0x20000 /* Interrupt Reassertion Enable */
+#define PMCS_MPI_IRAU 0x40000 /* Interrupt Reassertion Unit */
+#define PMCS_MPI_IRAD_MASK 0xfff80000 /* Reassertion Delay Mask */
+
+#define PMCS_FERDOMSGU PMCS_DWRD(0x1D)
+#define PMCS_FERDLMSGU PMCS_DWRD(0x1E)
+#define PMCS_FERDOIOP PMCS_DWRD(0x1F)
+#define PMCS_FERDLIOP PMCS_DWRD(0x20)
+
+/*
+ * MPI GST Table Offsets
+ */
+
+#define PMCS_GST_BASE 0
+#define PMCS_GST_IQFRZ0 (PMCS_GST_BASE + PMCS_DWRD(1))
+#define PMCS_GST_IQFRZ1 (PMCS_GST_BASE + PMCS_DWRD(2))
+#define PMCS_GST_MSGU_TICK (PMCS_GST_BASE + PMCS_DWRD(3))
+#define PMCS_GST_IOP_TICK (PMCS_GST_BASE + PMCS_DWRD(4))
+#define PMCS_GST_PHY_INFO(x) (PMCS_GST_BASE + PMCS_DWRD(0x6) + PMCS_DWRD(x))
+#define PMCS_GST_RERR_BASE (PMCS_GST_BASE + PMCS_DWRD(0x11))
+#define PMCS_GST_RERR_INFO(x) (PMCS_GST_RERR_BASE + PMCS_DWRD(x))
+
+#define PMCS_MPI_S(x) ((x) & 0x7)
+#define PMCS_QF(x) (((x) >> 3) & 0x1)
+#define PMCS_GSTLEN(x) (((x) >> 4) & 0x3fff)
+#define PMCS_HMI_ERR(x) (((x) >> 16) & 0xffff)
+
+#define PMCS_MPI_STATE_NIL 0
+#define PMCS_MPI_STATE_INIT 1
+#define PMCS_MPI_STATE_DEINIT 2
+#define PMCS_MPI_STATE_ERR 3
+
+/*
+ * MPI Inbound Queue Configuration Table Offsets
+ *
+ * Each Inbound Queue configuration area consumes 8 DWORDS (32 bit words),
+ * or 32 bytes.
+ */
+#define PMCS_IQC_PARMX(x) ((x) << 5)
+#define PMCS_IQBAHX(x) (((x) << 5) + 4)
+#define PMCS_IQBALX(x) (((x) << 5) + 8)
+#define PMCS_IQCIBAHX(x) (((x) << 5) + 12)
+#define PMCS_IQCIBALX(x) (((x) << 5) + 16)
+#define PMCS_IQPIBARX(x) (((x) << 5) + 20)
+#define PMCS_IQPIOFFX(x) (((x) << 5) + 24)
+#define PMCS_IQDX(x) ((x) & 0xffff)
+#define PMCS_IQESX(x) (((x) >> 16) & 0x3fff)
+#define PMCS_IQPX(x) (((x) >> 30) & 0x3)
+
+/*
+ * MPI Outbound Queue Configuration Table Offsets
+ *
+ * Each Outbound Queue configuration area consumes 9 DWORDS (32 bit words),
+ * or 36 bytes.
+ */
+#define PMCS_OQC_PARMX(x) (x * 36)
+#define PMCS_OQBAHX(x) ((x * 36) + 4)
+#define PMCS_OQBALX(x) ((x * 36) + 8)
+#define PMCS_OQPIBAHX(x) ((x * 36) + 12)
+#define PMCS_OQPIBALX(x) ((x * 36) + 16)
+#define PMCS_OQCIBARX(x) ((x * 36) + 20)
+#define PMCS_OQCIOFFX(x) ((x * 36) + 24)
+#define PMCS_OQIPARM(x) ((x * 36) + 28)
+#define PMCS_OQDICX(x) ((x * 36) + 32)
+
+#define PMCS_OQDX(x) ((x) & 0xffff)
+#define PMCS_OQESX(x) (((x) >> 16) & 0x3fff)
+#define PMCS_OQICT(x) ((x) & 0xffff)
+#define PMCS_OQICC(x) (((x) >> 16) & 0xff)
+#define PMCS_OQIV(x) (((x) >> 24) & 0xff)
+
+#define OQIEX (1 << 30)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_MPI_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_param.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_param.h
new file mode 100644
index 0000000000..0522ce6e88
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_param.h
@@ -0,0 +1,138 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * PMC Compile Time Tunable Parameters
+ */
+#ifndef _PMCS_PARAM_H
+#define _PMCS_PARAM_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Maximum number of microseconds we will try to configure a PHY
+ */
+#define PMCS_MAX_CONFIG_TIME (60 * 1000000)
+
+#define PMCS_MAX_OQ 64 /* maximum number of OutBound Queues */
+#define PMCS_MAX_IQ 64 /* maximum number of InBound Queues */
+
+#define PMCS_MAX_PORTS 16 /* maximum port contexts */
+
+#define PMCS_MAX_XPND 16 /* 16 levels of expansion */
+
+#define PMCS_INDICES_SIZE 512
+
+#define PMCS_MIN_CHUNK_PAGES 512
+#define PMCS_ADDTL_CHUNK_PAGES 8
+
+/*
+ * Scratch area has to hold Max SMP Request and Max SMP Response,
+ * plus some slop.
+ */
+#define PMCS_SCRATCH_SIZE 2304
+#define PMCS_INITIAL_DMA_OFF PMCS_INDICES_SIZE+PMCS_SCRATCH_SIZE
+#define PMCS_CONTROL_SIZE ptob(1)
+
+/*
+ * 2M bytes was allocated to firmware log and split between two logs
+ */
+#define PMCS_FWLOG_SIZE (2 << 20)
+#define PMCS_FWLOG_MAX 5 /* maximum logging level */
+#define SATLSIZE 1024
+
+/*
+ * PMCS_NQENTRY is tunable by setting pmcs-num-io-qentries
+ */
+#define PMCS_NQENTRY 512 /* 512 entries per queue */
+#define PMCS_MIN_NQENTRY 32 /* No less than 32 entries per queue */
+#define PMCS_QENTRY_SIZE 64 /* 64 bytes per entry */
+#define PMCS_MSG_SIZE (PMCS_QENTRY_SIZE >> 2)
+
+/*
+ * Watchdog interval, in usecs.
+ * NB: Needs to be evenly divisible by 10
+ */
+#define PMCS_WATCH_INTERVAL 250000 /* watchdog interval in us */
+
+/*
+ * Inbound Queue definitions
+ */
+#define PMCS_NIQ 9 /* 9 Inbound Queues */
+#define PMCS_IO_IQ_MASK 7 /* IO queues are 0..7 */
+#define PMCS_IQ_OTHER 8 /* "Other" queue is 8 (HiPri) */
+#define PMCS_NON_HIPRI_QUEUES PMCS_IO_IQ_MASK
+
+/*
+ * Outbound Queue definitions
+ *
+ * Note that the OQ definitions map to bits set in
+ * the Outbound Doorbell register to indicate service
+ * is needed on one of these queues.
+ */
+#define PMCS_NOQ 3 /* 3 Outbound Queues */
+
+#define PMCS_OQ_IODONE 0 /* I/O completion Outbound Queue */
+#define PMCS_OQ_GENERAL 1 /* General Outbound Queue */
+#define PMCS_OQ_EVENTS 2 /* Event Outbound Queue */
+
+
+/*
+ * External Scatter Gather come in chunks- each this many deep.
+ */
+#define PMCS_SGL_NCHUNKS 16 /* S/G List Chunk Size */
+#define PMCS_MAX_CHUNKS 32 /* max chunks per command */
+
+/*
+ * MSI/MSI-X related definitions.
+ *
+ * These are the maximum number of interrupt vectors we could use.
+ */
+#define PMCS_MAX_MSIX (PMCS_NOQ + 1)
+#define PMCS_MAX_MSI PMCS_MAX_MSIX
+#define PMCS_MAX_FIXED 1
+
+#define PMCS_MSIX_IODONE PMCS_OQ_IODONE /* I/O Interrupt vector */
+#define PMCS_MSIX_GENERAL PMCS_OQ_GENERAL /* General Interrupt vector */
+#define PMCS_MSIX_EVENTS PMCS_OQ_EVENTS /* Events Interrupt vector */
+#define PMCS_MSIX_FATAL (PMCS_MAX_MSIX-1) /* Fatal Int vector */
+
+#define PMCS_FATAL_INTERRUPT 15 /* fatal interrupt OBDB bit */
+
+/*
+ * Blessed firmware version
+ */
+#define PMCS_FIRMWARE_CODE_NAME "firmware"
+#define PMCS_FIRMWARE_ILA_NAME "ila"
+#define PMCS_FIRMWARE_SPCBOOT_NAME "SPCBoot"
+#define PMCS_FIRMWARE_START_SUF ".bin_start"
+#define PMCS_FIRMWARE_END_SUF ".bin_end"
+#define PMCS_FIRMWARE_FILENAME "misc/pmcs/pmcs8001fw"
+#define PMCS_FIRMWARE_VERSION_NAME "pmcs8001_fwversion"
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_PARAM_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h
new file mode 100644
index 0000000000..d28b0a56ef
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h
@@ -0,0 +1,353 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file provides prototype function definitions.
+ */
+#ifndef _PMCS_PROTO_H
+#define _PMCS_PROTO_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef enum {
+ PMCS_PRT_DEBUG = 0,
+ PMCS_PRT_DEBUG1,
+ PMCS_PRT_DEBUG2,
+ PMCS_PRT_DEBUG3,
+ PMCS_PRT_DEBUG_CONFIG,
+ PMCS_PRT_DEBUG_IPORT,
+ PMCS_PRT_DEBUG_MAP,
+ PMCS_PRT_DEBUG_UNDERFLOW,
+ PMCS_PRT_DEBUG_SCSI_STATUS,
+ PMCS_PRT_DEBUG_PHY_LOCKING,
+ PMCS_PRT_DEBUG_DEV_STATE,
+ PMCS_PRT_DEBUG_DEVEL,
+ PMCS_PRT_INFO,
+ PMCS_PRT_WARN,
+ PMCS_PRT_ERR
+} pmcs_prt_level_t;
+
+#define pmcs_prt(pwp, level, fmt...) { \
+ int lvl = level; \
+ if (((pwp->debug_mask & (1 << lvl)) != 0) || \
+ (lvl > PMCS_PRT_DEBUG_DEVEL)) { \
+ pmcs_prt_impl(pwp, lvl, fmt); \
+ } \
+}
+
+/*PRINTFLIKE3*/
+void
+pmcs_prt_impl(pmcs_hw_t *, pmcs_prt_level_t, const char *, ...)
+ __KPRINTFLIKE(3);
+
+boolean_t pmcs_assign_device(pmcs_hw_t *, pmcs_xscsi_t *);
+void pmcs_remove_device(pmcs_hw_t *, pmcs_phy_t *);
+void pmcs_handle_dead_phys(pmcs_hw_t *);
+
+int pmcs_acquire_scratch(pmcs_hw_t *, boolean_t);
+void pmcs_release_scratch(pmcs_hw_t *);
+
+/* get a work structure */
+pmcwork_t *pmcs_gwork(pmcs_hw_t *, uint32_t, pmcs_phy_t *);
+
+/* put a work structure */
+void pmcs_pwork(pmcs_hw_t *, struct pmcwork *);
+
+/* given a tag, find a work structure */
+pmcwork_t *pmcs_tag2wp(pmcs_hw_t *, uint32_t);
+
+/*
+ * Abort function
+ */
+int pmcs_abort(pmcs_hw_t *, pmcs_phy_t *, uint32_t, int, int);
+
+/*
+ * SSP Task Management Function
+ */
+int pmcs_ssp_tmf(pmcs_hw_t *, pmcs_phy_t *, uint8_t, uint32_t, uint64_t,
+ uint32_t *);
+
+/*
+ * Abort NCQ function
+ */
+int pmcs_sata_abort_ncq(pmcs_hw_t *, pmcs_phy_t *);
+
+/*
+ * Interrupt Functions
+ */
+void pmcs_general_intr(pmcs_hw_t *);
+void pmcs_iodone_intr(pmcs_hw_t *);
+void pmcs_event_intr(pmcs_hw_t *);
+void pmcs_timed_out(pmcs_hw_t *, uint32_t, const char *);
+
+/*
+ * Abort handler
+ */
+int pmcs_abort_handler(pmcs_hw_t *);
+
+/*
+ * Deregister all expander connected devices
+ */
+void pmcs_deregister_devices(pmcs_hw_t *, pmcs_phy_t *);
+int pmcs_register_device(pmcs_hw_t *, pmcs_phy_t *);
+void pmcs_deregister_device(pmcs_hw_t *, pmcs_phy_t *);
+
+/*
+ * endian transform a data structure
+ */
+void pmcs_endian_transform(pmcs_hw_t *, void *, void *, const uint8_t *);
+
+/* get the connection rate string */
+const char *pmcs_get_rate(unsigned int);
+
+/* get the device type string */
+const char *pmcs_get_typename(pmcs_dtype_t pmcs_dtype);
+
+/* get the SAS Task Management function name */
+const char *pmcs_tmf2str(int);
+
+/* get the PMC status string */
+const char *pmcs_status_str(uint32_t);
+
+/*
+ * WWN to Byte Array and vice versa conversion
+ */
+uint64_t pmcs_barray2wwn(uint8_t[8]);
+void pmcs_wwn2barray(uint64_t, uint8_t[8]);
+
+/*
+ * Print f/w version
+ */
+void pmcs_report_fwversion(pmcs_hw_t *);
+
+/*
+ * Build a device name.
+ */
+void pmcs_phy_name(pmcs_hw_t *, pmcs_phy_t *, char *, size_t);
+
+/*
+ * Find a PHY by device_id
+ */
+pmcs_phy_t *pmcs_find_phy_by_devid(pmcs_hw_t *, uint32_t);
+
+/*
+ * Find a PHY by wwn
+ */
+pmcs_phy_t *pmcs_find_phy_by_wwn(pmcs_hw_t *, uint64_t);
+
+/*
+ * Find a PHY by sas_address
+ */
+pmcs_phy_t *pmcs_find_phy_by_sas_address(pmcs_hw_t *, pmcs_iport_t *,
+ pmcs_phy_t *, char *);
+
+/*
+ * Print out a FIS
+ */
+void pmcs_fis_dump(pmcs_hw_t *, fis_t);
+
+/*
+ * Print an IOMB
+ */
+void pmcs_print_entry(pmcs_hw_t *, int, char *, void *);
+
+void pmcs_spinup_release(pmcs_hw_t *, pmcs_phy_t *phyp);
+
+/*
+ * Handler for events - can be called from interrupt level or from worker thread
+ */
+void pmcs_ack_events(pmcs_hw_t *);
+
+/*
+ * This function does some initial setup and hardware validation
+ */
+int pmcs_setup(pmcs_hw_t *);
+
+/*
+ * These functions start and stop the MPI (message passing interface)
+ */
+int pmcs_start_mpi(pmcs_hw_t *);
+int pmcs_stop_mpi(pmcs_hw_t *);
+
+/*
+ * This function checks firmware revisions against required revisions
+ * and attempts to flash new firmware (if possible).
+ */
+int pmcs_firmware_update(pmcs_hw_t *);
+
+/*
+ * This function runs ECHO commands to test both interrupts and queues
+ */
+int pmcs_echo_test(pmcs_hw_t *);
+
+/*
+ * These functions start, reset, and stop the physical chip PHYs
+ */
+int pmcs_start_phy(pmcs_hw_t *, int, int, int);
+int pmcs_start_phys(pmcs_hw_t *);
+void pmcs_stop_phy(pmcs_hw_t *, int);
+void pmcs_stop_phys(pmcs_hw_t *);
+
+/*
+ * These functions setup/teardown iport tgtmap
+ */
+int pmcs_iport_tgtmap_create(pmcs_iport_t *);
+int pmcs_iport_tgtmap_destroy(pmcs_iport_t *);
+
+/*
+ * Utility and wrapper functions for SAS_DIAG_EXECUTE
+ */
+int pmcs_sas_diag_execute(pmcs_hw_t *, uint32_t, uint32_t, uint8_t);
+int pmcs_get_diag_report(pmcs_hw_t *, uint32_t, uint8_t);
+int pmcs_clear_diag_counters(pmcs_hw_t *, uint8_t);
+
+/*
+ * Get current firmware timestamp
+ */
+int pmcs_get_time_stamp(pmcs_hw_t *, uint64_t *);
+
+/*
+ * Register Dump (including "internal" registers)
+ */
+void pmcs_register_dump(pmcs_hw_t *);
+void pmcs_iqp_trace(pmcs_hw_t *, uint32_t);
+void pmcs_register_dump_int(pmcs_hw_t *);
+int pmcs_dump_binary(pmcs_hw_t *, uint32_t *, uint32_t,
+ uint32_t, caddr_t, uint32_t);
+int pmcs_dump_feregs(pmcs_hw_t *, uint32_t *, uint8_t,
+ caddr_t, uint32_t);
+
+/*
+ * This function perform a soft reset.
+ * Hard reset is platform specific.
+ */
+int pmcs_soft_reset(pmcs_hw_t *, boolean_t);
+
+/*
+ * Some more reset functions
+ */
+int pmcs_reset_dev(pmcs_hw_t *, pmcs_phy_t *, uint64_t);
+int pmcs_reset_phy(pmcs_hw_t *, pmcs_phy_t *, uint8_t);
+
+/*
+ * These functions do topology configuration changes
+ */
+void pmcs_discover(pmcs_hw_t *);
+void pmcs_set_changed(pmcs_hw_t *, pmcs_phy_t *, boolean_t, int);
+void pmcs_kill_changed(pmcs_hw_t *, pmcs_phy_t *, int);
+void pmcs_clear_phy(pmcs_hw_t *, pmcs_phy_t *);
+int pmcs_kill_device(pmcs_hw_t *, pmcs_phy_t *);
+
+/*
+ * Firmware flash function
+ */
+int pmcs_fw_flash(pmcs_hw_t *, pmcs_fw_hdr_t *, uint32_t);
+
+/*
+ * Set a new value for the interrupt coalescing timer. If it's being set
+ * to zero (disabling), then re-enable auto clear if necessary. If it's
+ * being changed from zero, turn off auto clear if it was on.
+ */
+typedef enum {
+ DECREASE_TIMER = 0,
+ INCREASE_TIMER
+} pmcs_coal_timer_adj_t;
+
+void pmcs_check_intr_coal(void *arg);
+void pmcs_set_intr_coal_timer(pmcs_hw_t *pwp, pmcs_coal_timer_adj_t adj);
+
+/*
+ * Misc supporting routines
+ */
+void pmcs_check_iomb_status(pmcs_hw_t *pwp, uint32_t *iomb);
+void pmcs_clear_xp(pmcs_hw_t *, pmcs_xscsi_t *);
+
+int pmcs_run_sata_cmd(pmcs_hw_t *, pmcs_phy_t *, fis_t, uint32_t,
+ uint32_t, uint32_t);
+int pmcs_sata_identify(pmcs_hw_t *, pmcs_phy_t *);
+void pmcs_sata_work(pmcs_hw_t *);
+boolean_t pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr,
+ ddi_acc_handle_t *acch, ddi_dma_handle_t *dmah, size_t length,
+ caddr_t *kvp, uint64_t *dma_addr);
+void pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail);
+int pmcs_check_dma_handle(ddi_dma_handle_t handle);
+int pmcs_check_acc_handle(ddi_acc_handle_t handle);
+int pmcs_check_acc_dma_handle(pmcs_hw_t *pwp);
+int pmcs_get_nvmd(pmcs_hw_t *pwp, pmcs_nvmd_type_t nvmd_type, uint8_t nvmd,
+ uint32_t offset, char *buf, uint32_t size_left);
+boolean_t pmcs_set_nvmd(pmcs_hw_t *pwp, pmcs_nvmd_type_t nvmd_type,
+ uint8_t *buf, size_t len);
+void pmcs_complete_work_impl(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *iomb,
+ size_t amt);
+void pmcs_flush_target_queues(pmcs_hw_t *, pmcs_xscsi_t *, uint8_t);
+boolean_t pmcs_iport_has_targets(pmcs_hw_t *, pmcs_iport_t *);
+void pmcs_free_dma_chunklist(pmcs_hw_t *);
+int pmcs_get_dev_state(pmcs_hw_t *, pmcs_xscsi_t *, uint8_t *);
+int pmcs_set_dev_state(pmcs_hw_t *, pmcs_xscsi_t *, uint8_t);
+void pmcs_dev_state_recovery(pmcs_hw_t *, pmcs_phy_t *);
+int pmcs_send_err_recovery_cmd(pmcs_hw_t *, uint8_t, pmcs_xscsi_t *);
+void pmcs_start_ssp_event_recovery(pmcs_hw_t *pwp, pmcwork_t *pwrk,
+ uint32_t *iomb, size_t amt);
+void pmcs_ssp_event_recovery(pmcs_hw_t *);
+
+pmcs_iport_t *pmcs_get_iport_by_phy(pmcs_hw_t *pwp, pmcs_phy_t *pptr);
+void pmcs_rele_iport(pmcs_iport_t *iport);
+int pmcs_iport_configure_phys(pmcs_iport_t *iport);
+
+void pmcs_lock_phy(pmcs_phy_t *);
+void pmcs_unlock_phy(pmcs_phy_t *);
+
+void pmcs_destroy_target(pmcs_xscsi_t *);
+void pmcs_phymap_activate(void *, char *, void **);
+void pmcs_phymap_deactivate(void *, char *, void *);
+void pmcs_add_phy_to_iport(pmcs_iport_t *, pmcs_phy_t *);
+void pmcs_remove_phy_from_iport(pmcs_iport_t *, pmcs_phy_t *);
+void pmcs_free_all_phys(pmcs_hw_t *, pmcs_phy_t *);
+void pmcs_free_phys(pmcs_hw_t *, pmcs_phy_t *);
+
+int pmcs_phy_constructor(void *, void *, int);
+void pmcs_phy_destructor(void *, void *);
+
+void pmcs_inc_phy_ref_count(pmcs_phy_t *);
+void pmcs_dec_phy_ref_count(pmcs_phy_t *);
+
+/* Worker thread */
+void pmcs_worker(void *);
+
+pmcs_phy_t *pmcs_get_root_phy(pmcs_phy_t *);
+pmcs_xscsi_t *pmcs_get_target(pmcs_iport_t *, char *);
+
+void pmcs_fatal_handler(pmcs_hw_t *);
+
+/*
+ * Schedule device state recovery for this device immediately
+ */
+void pmcs_start_dev_state_recovery(pmcs_xscsi_t *, pmcs_phy_t *);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_PROTO_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_reg.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_reg.h
new file mode 100644
index 0000000000..dd85718f7e
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_reg.h
@@ -0,0 +1,378 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * PMC 8x6G register definitions
+ */
+#ifndef _PMCS_REG_H
+#define _PMCS_REG_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PCI Constants
+ */
+#define PMCS_VENDOR_ID 0x11F8
+#define PMCS_DEVICE_ID 0x8001
+
+#define PMCS_PM8001_REV_A 0
+#define PMCS_PM8001_REV_B 1
+#define PMCS_PM8001_REV_C 2
+
+#define PMCS_REGSET_0 1
+#define PMCS_REGSET_1 2
+#define PMCS_REGSET_2 3
+#define PMCS_REGSET_3 4
+
+
+/*
+ * PCIe BARs - 4 64KB memory regions
+ *
+ * BAR0-1 64KiB
+ * BAR2-3 64KiB
+ * BAR4 64KiB
+ * BAR5 64KiB
+ */
+
+/*
+ * The PMC 8x6G registers are defined by BARs in PCIe space.
+ *
+ * Four memory region BARS are used.
+ *
+ * The first is for the Messaging Unit.
+ *
+ * The second 64KiB region contains the PCS/PMA registers and some of the
+ * Top-Level registers.
+ *
+ * The third 64KiB region is a 64KiB window on the rest of the chip registers
+ * which can be shifted by writing a register in the second region.
+ *
+ * The fourth 64KiB region is for the message passing area.
+ */
+
+/*
+ * Messaging Unit Register Offsets
+ */
+#define PMCS_MSGU_IBDB 0x04 /* Inbound Doorbell */
+#define PMCS_MSGU_IBDB_CLEAR 0x20 /* InBound Doorbell Clear */
+#define PMCS_MSGU_OBDB 0x3c /* OutBound Doorbell */
+#define PMCS_MSGU_OBDB_CLEAR 0x40 /* OutBound Doorbell Clear */
+#define PMCS_MSGU_SCRATCH0 0x44 /* Scratchpad 0 */
+#define PMCS_MSGU_SCRATCH1 0x48 /* Scratchpad 1 */
+#define PMCS_MSGU_SCRATCH2 0x4C /* Scratchpad 2 */
+#define PMCS_MSGU_SCRATCH3 0x50 /* Scratchpad 3 */
+#define PMCS_MSGU_HOST_SCRATCH0 0x54 /* Host Scratchpad 0 */
+#define PMCS_MSGU_HOST_SCRATCH1 0x58 /* Host Scratchpad 1 */
+#define PMCS_MSGU_HOST_SCRATCH2 0x5C /* Host Scratchpad 2 */
+#define PMCS_MSGU_HOST_SCRATCH3 0x60 /* Host Scratchpad 3 */
+#define PMCS_MSGU_HOST_SCRATCH4 0x64 /* Host Scratchpad 4 */
+#define PMCS_MSGU_HOST_SCRATCH5 0x68 /* Host Scratchpad 5 */
+#define PMCS_MSGU_HOST_SCRATCH6 0x6C /* Host Scratchpad 6 */
+#define PMCS_MSGU_HOST_SCRATCH7 0x70 /* Host Scratchpad 7 */
+#define PMCS_MSGU_OBDB_MASK 0x74 /* Outbound Doorbell Mask */
+
+/*
+ * Inbound Doorbell and Doorbell Clear Definitions
+ * NB: The Doorbell Clear register is only used on RevA/8000 parts.
+ */
+#define PMCS_MSGU_IBDB_MPIIU 0x08 /* Initiate Unfreeze */
+#define PMCS_MSGU_IBDB_MPIIF 0x04 /* Initiate Freeze */
+#define PMCS_MSGU_IBDB_MPICTU 0x02 /* Initiate MPI Termination */
+#define PMCS_MSGU_IBDB_MPIINI 0x01 /* Initiate MPI */
+
+/*
+ * Outbound Doorbell and Doorbell Clear Register
+ *
+ * The Doorbell Clear register is only used on RevA/8000 parts.
+ *
+ * Each bit of the ODR is mapped 1-to-1 to a MSI or MSI-X vector
+ * table entry. There are 32 MSI and 16 MSI-X entries. The top
+ * 16 bits are mapped to the low 16 bits for MSI-X. For legacy
+ * INT-X, any bit will generate a host interrupt.
+ *
+ * Each bit in the Outbound Doorbell Clear is used to clear the
+ * corresponding bit in the ODR. For INT-X it also then deasserts
+ * any interrupt condition.
+ */
+#define PMCS_MSI_INTS 32
+#define PMCS_MSIX_INTS 16
+
+/*
+ * Scratchpad 0 Definitions
+ *
+ * When the AAP is ready state (see Scratchpad 1), bits 31:26 is the offset
+ * within PCIe space for another BAR that, when mapped, will point to a region
+ * that conains the MPI Configuration table (the offset of which is in bits
+ * 25:0 of this register)
+ *
+ * When the AAP is in error state, this register contains additional error
+ * information.
+ */
+#define PMCS_MSGU_MPI_BAR_SHIFT 26
+#define PMCS_MSGU_MPI_OFFSET_MASK ((1 << PMCS_MSGU_MPI_BAR_SHIFT) - 1)
+
+/*
+ * Scratchpad 1 Definitions
+ *
+ * The bottom two bits are the AAP state of the 8x6G.
+ *
+ * When the AAP is in error state, bits 31:10 contain the error indicator.
+ *
+ */
+#define PMCS_MSGU_AAP_STATE_MASK 0x03
+#define PMCS_MSGU_AAP_STATE_POR 0
+#define PMCS_MSGU_AAP_STATE_SOFT_RESET 1
+#define PMCS_MSGU_AAP_STATE_ERROR 2
+#define PMCS_MSGU_AAP_STATE_READY 3
+#define PMCS_MSGU_AAP_SFR_PROGRESS 0x04
+#define PMCS_MSGU_AAP_ERROR_MASK 0xfffffc00
+
+/*
+ * Scratchpad 2 Definitions
+ *
+ * Bits 31:10 contain error information if the IOP is in error state.
+ */
+#define PMCS_MSGU_IOP_STATE_MASK 0x03
+#define PMCS_MSGU_IOP_STATE_POR 0
+#define PMCS_MSGU_IOP_STATE_SOFT_RESET 1
+#define PMCS_MSGU_IOP_STATE_ERROR 2
+#define PMCS_MSGU_IOP_STATE_READY 3
+
+#define PMCS_MSGU_HOST_SOFT_RESET_READY 0x04
+#define PMCS_MSGU_CPU_SOFT_RESET_READY 0x08
+
+/*
+ * Scratchpad 3 Definitions
+ *
+ * Contains additional error information if the IOP is in error state
+ * (see Scratchpad 2)
+ */
+
+/*
+ * Host Scratchpad 0
+ * Soft Reset Signature
+ */
+#define HST_SFT_RESET_SIG 0x252ACBCD
+
+/*
+ * Host Scratchpad 1
+ *
+ * This is a bit mask for freeze or unfreeze operations for IQs 0..31
+ */
+
+/*
+ * Host Scratchpad 2
+ *
+ * This is a bit mask for freeze or unfreeze operations for IQs 32..63
+ */
+
+/*
+ * Outbound Doorbell Mask Register
+ *
+ * Each bit set here masks bits and interrupt assertion for the corresponding
+ * bit (and vector) in the ODR.
+ */
+
+/*
+ * GSM Registers
+ */
+#define GSM_BASE_MASK 0x00ffff
+#define NMI_EN_VPE0_IOP 0x60418
+#define NMI_EN_VPE0_AAP1 0x70418
+#define RB6_ACCESS 0x6A80C0
+#define GSM_CFG_AND_RESET 0x700000
+#define RAM_ECC_DOUBLE_ERROR_INDICATOR 0x700018
+#define READ_ADR_PARITY_CHK_EN 0x700038
+#define WRITE_ADR_PARITY_CHK_EN 0x700040
+#define WRITE_DATA_PARITY_CHK_EN 0x700048
+#define READ_ADR_PARITY_ERROR_INDICATOR 0x700058
+#define WRITE_ADR_PARITY_ERROR_INDICATOR 0x700060
+#define WRITE_DATA_PARITY_ERROR_INDICATOR 0x700068
+
+/*
+ * GSM Share Memory, IO Status Table and Ring Buffer
+ */
+#define GSM_SM_BLKSZ 0x10000
+#define GSM_SM_BASE 0x400000
+#define IO_STATUS_TABLE_BASE 0x640000
+#define RING_BUF_STORAGE_0 0x680000
+#define RING_BUF_STORAGE_1 0x690000
+#define RING_BUF_PTR_ACC_BASE 0x6A0000
+
+#define IO_STATUS_TABLE_BLKNM 0x4
+#define GSM_SM_BLKNM 0x10
+#define RING_BUF_PTR_OFF 0x1000
+#define RING_BUF_PTR_SIZE 0xFF8
+#define RING_BUF_ACC_OFF 0x8000
+#define RING_BUF_ACC_SIZE 0xFF8
+
+/*
+ * GSM Configuration and Reset Bits
+ */
+#define MST_XCBI_SW_RSTB (1 << 14)
+#define COM_SLV_SW_RSTB (1 << 13)
+#define QSSP_SW_RSTB (1 << 12)
+#define RAAE_SW_RSTB (1 << 11)
+#define RB_1_SW_RSTB (1 << 9)
+#define SM_SW_RSTB (1 << 8)
+
+#define COHERENCY_GAP_SHIFT 4
+#define COHERENCY_GAP_MASK 0xf0
+#define COHERENCY_GAP_DEFAULT (8 << COHERENCY_GAP_SHIFT)
+
+#define COHERENCY_MODE (1 << 3)
+#define RB_WSTRB_ERRCHK_EN (1 << 2)
+#define RAAE_PORT2_EN (1 << 1)
+#define GSM_WCI_MODE (1 << 0)
+#define PMCS_SOFT_RESET_BITS \
+ (COM_SLV_SW_RSTB|QSSP_SW_RSTB|RAAE_SW_RSTB|RB_1_SW_RSTB|SM_SW_RSTB)
+
+#define RB6_NMI_SIGNATURE 0x00001234
+
+/*
+ * PMCS PCI Configuration Registers
+ */
+#define PMCS_PCI_PMC 0x40
+#define PMCS_PCI_PMCSR 0x44
+#define PMCS_PCI_MSI 0x50
+#define PMCS_PCI_MAL 0x54
+#define PMCS_PCI_MAU 0x58
+#define PMCS_PCI_MD 0x5C
+#define PMCS_PCI_PCIE 0x70
+#define PMCS_PCI_DEV_CAP 0x74
+#define PMCS_PCI_DEV_CTRL 0x78
+#define PMCS_PCI_LINK_CAP 0x7C
+#define PMCS_PCI_LINK_CTRL 0x80
+#define PMCS_PCI_MSIX_CAP 0xAC
+#define PMCS_PCI_TBL_OFFSET 0xB0
+#define PMCS_PCI_PBA_OFFSET 0xB4
+#define PMCS_PCI_PCIE_CAP_HD 0x100
+#define PMCS_PCI_UE_STAT 0x104
+#define PMCS_PCI_UE_MASK 0x108
+#define PMCS_PCI_UE_SEV 0x10C
+#define PMCS_PCI_CE_STAT 0x110
+#define PMCS_PCI_CE_MASK 0x114
+#define PMCS_PCI_ADV_ERR_CTRL 0x118
+#define PMCS_PCI_HD_LOG_DW 0x11C
+
+/*
+ * Top Level Registers
+ */
+/* these registers are in MEMBASE-III */
+#define PMCS_SPC_RESET 0x0
+#define PMCS_SPC_BOOT_STRAP 0x8
+#define PMCS_SPC_DEVICE_ID 0x20
+#define PMCS_DEVICE_REVISION 0x24
+/* these registers are in MEMBASE-II */
+#define PMCS_EVENT_INT_ENABLE 0x3040
+#define PMCS_EVENT_INT_STAT 0x3044
+#define PMCS_ERROR_INT_ENABLE 0x3048
+#define PMCS_ERROR_INT_STAT 0x304C
+#define PMCS_AXI_TRANS 0x3258
+#define PMCS_OBDB_AUTO_CLR 0x335C
+#define PMCS_INT_COALESCING_TIMER 0x33C0
+#define PMCS_INT_COALESCING_CONTROL 0x33C4
+
+
+/*
+ * Chip Reset Register Bits (PMCS_SPC_RESET)
+ *
+ * NB: all bits are inverted. That is, the normal state is '1'.
+ * When '0' is set, the action is taken.
+ */
+#define PMCS_SPC_HARD_RESET 0x00
+#define PMCS_SPC_HARD_RESET_CLR 0xffffffff
+
+
+#define SW_DEVICE_RSTB (1 << 31)
+#define PCIE_PC_SXCBI_ARESETN (1 << 26)
+#define PMIC_CORE_RSTB (1 << 25)
+#define PMIC_SXCBI_ARESETN (1 << 24)
+#define LMS_SXCBI_ARESETN (1 << 23)
+#define PCS_SXCBI_ARESETN (1 << 22)
+#define PCIE_SFT_RSTB (1 << 21)
+#define PCIE_PWR_RSTB (1 << 20)
+#define PCIE_AL_SXCBI_ARESETN (1 << 19)
+#define BDMA_SXCBI_ARESETN (1 << 18)
+#define BDMA_CORE_RSTB (1 << 17)
+#define DDR2_RSTB (1 << 16)
+#define GSM_RSTB (1 << 8)
+#define PCS_RSTB (1 << 7)
+#define PCS_LM_RSTB (1 << 6)
+#define PCS_AAP2_SS_RSTB (1 << 5)
+#define PCS_AAP1_SS_RSTB (1 << 4)
+#define PCS_IOP_SS_RSTB (1 << 3)
+#define PCS_SPBC_RSTB (1 << 2)
+#define RAAE_RSTB (1 << 1)
+#define OSSP_RSTB (1 << 0)
+
+
+/*
+ * Timer Enables Register
+ */
+#define PMCS_TENABLE_WINDOW_OFFSET 0x30000
+#define PMCS_TENABLE_BASE 0x0209C
+#define PMCS_TENABLE_MULTIPLIER 0x04000
+
+/*
+ * Special register (MEMBASE-III) for Step 5.5 in soft reset sequence to set
+ * GPIO into tri-state mode (temporary workaround for 1.07.xx beta firmware)
+ */
+#define PMCS_GPIO_TRISTATE_MODE_ADDR 0x9010C
+#define PMCS_GPIO_TSMODE_BIT0 (1 << 0)
+#define PMCS_GPIO_TSMODE_BIT1 (1 << 1)
+
+
+/*
+ * Register Access Inline Functions
+ */
+uint32_t pmcs_rd_msgunit(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_gsm_reg(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_topunit(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_mpi_tbl(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_gst_tbl(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_iqc_tbl(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_oqc_tbl(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_iqci(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_iqpi(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_oqci(pmcs_hw_t *, uint32_t);
+uint32_t pmcs_rd_oqpi(pmcs_hw_t *, uint32_t);
+
+void pmcs_wr_msgunit(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_gsm_reg(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_topunit(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_mpi_tbl(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_gst_tbl(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_iqc_tbl(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_oqc_tbl(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_iqci(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_iqpi(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_oqci(pmcs_hw_t *, uint32_t, uint32_t);
+void pmcs_wr_oqpi(pmcs_hw_t *, uint32_t, uint32_t);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_REG_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_scsa.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_scsa.h
new file mode 100644
index 0000000000..19db949654
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_scsa.h
@@ -0,0 +1,92 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * SCSI (SCSA) midlayer interface for PMC drier.
+ */
+#ifndef _PMCS_SCSA_H
+#define _PMCS_SCSA_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scsi/scsi_types.h>
+
+#define ADDR2TRAN(ap) ((ap)->a_hba_tran)
+#define ADDR2PMC(ap) (ITRAN2PMC(ADDR2TRAN(ap)))
+
+#define CMD2TRAN(cmd) (CMD2PKT(cmd)->pkt_address.a_hba_tran)
+#define CMD2PMC(cmd) (ITRAN2PMC(CMD2TRAN(cmd)))
+
+#define PKT2ADDR(pkt) (&((pkt)->pkt_address))
+#define PKT2CMD(pkt) ((pmcs_cmd_t *)(pkt->pkt_ha_private))
+#define CMD2PKT(sp) (sp->cmd_pkt)
+#define PMCS_STATUS_LEN 264
+
+#define TRAN2PMC(tran) ((pmcs_hw_t *)(tran)->tran_hba_private)
+#define ITRAN2PMC(tran) \
+ (((pmcs_iport_t *)(tran)->tran_hba_private)->pwp)
+#define ITRAN2IPORT(tran) \
+ ((pmcs_iport_t *)(tran)->tran_hba_private)
+
+/*
+ * Wrapper around scsi_pkt.
+ */
+struct pmcs_cmd {
+ struct scsi_pkt *cmd_pkt; /* actual SCSI Packet */
+ STAILQ_ENTRY(pmcs_cmd) cmd_next; /* linked list */
+ pmcs_dmachunk_t *cmd_clist; /* list of dma chunks */
+ pmcs_xscsi_t *cmd_target; /* Pointer to target */
+ pmcs_lun_t *cmd_lun; /* Pointer to LU */
+ uint32_t cmd_tag; /* PMC htag */
+ uint8_t cmd_satltag; /* SATL tag */
+};
+
+#define SCSA_CDBLEN(sp) sp->cmd_pkt->pkt_cdblen
+#define SCSA_STSLEN(sp) sp->cmd_pkt->pkt_scblen
+#define SCSA_TGTLEN(sp) sp->cmd_pkt->pkt_tgtlen
+
+#define PMCS_WQ_RUN_SUCCESS 0
+#define PMCS_WQ_RUN_FAIL_RES 1 /* Failed to alloc rsrcs (e.g. DMA chunks) */
+#define PMCS_WQ_RUN_FAIL_OTHER 2 /* Any other failure */
+
+int pmcs_scsa_init(pmcs_hw_t *, const ddi_dma_attr_t *);
+
+void pmcs_latch_status(pmcs_hw_t *, pmcs_cmd_t *, uint8_t, uint8_t *,
+ size_t, char *);
+size_t pmcs_set_resid(struct scsi_pkt *, size_t, uint32_t);
+boolean_t pmcs_scsa_wq_run_one(pmcs_hw_t *, pmcs_xscsi_t *);
+void pmcs_scsa_wq_run(pmcs_hw_t *);
+void pmcs_scsa_cq_run(void *);
+
+int pmcs_config_one(pmcs_hw_t *, uint64_t, int, long, dev_info_t **);
+
+dev_info_t *pmcs_find_child_smp(pmcs_hw_t *, char *);
+int pmcs_config_one_smp(pmcs_hw_t *, uint64_t, dev_info_t **);
+
+int pmcs_run_sata_special(pmcs_hw_t *, pmcs_xscsi_t *);
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_SCSA_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_sgl.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_sgl.h
new file mode 100644
index 0000000000..e604ed751c
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_sgl.h
@@ -0,0 +1,86 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#ifndef _PMCS_SGL_H
+#define _PMCS_SGL_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is the strict physical representation of an external
+ * S/G list entry that the PMCS hardware uses. We manage them
+ * in chunks.
+ */
+typedef struct {
+ uint32_t sglal; /* Low 32 bit DMA address */
+ uint32_t sglah; /* High 32 bit DMA address */
+ uint32_t sglen; /* Length */
+ uint32_t flags;
+} pmcs_dmasgl_t;
+
+/*
+ * If this is bit is set in flags, then the address
+ * described by this structure is an array of SGLs,
+ * the last of which may contain *another* flag
+ * to continue the list.
+ */
+#define PMCS_DMASGL_EXTENSION (1U << 31)
+
+#define PMCS_SGL_CHUNKSZ (PMCS_SGL_NCHUNKS * (sizeof (pmcs_dmasgl_t)))
+
+/*
+ * This is how we keep track of chunks- we have a linked list of
+ * chunk pointers that are either on the free list or are tagged
+ * off of a SCSA command. We used to maintain offsets indices
+ * within the sglen area of the lest element of a chunk, but this
+ * is marked reserved and may not be reliably used future firmware
+ * revisions.
+ */
+typedef struct pmcs_dmachunk pmcs_dmachunk_t;
+struct pmcs_dmachunk {
+ pmcs_dmachunk_t *nxt;
+ pmcs_dmasgl_t *chunks;
+ unsigned long addr;
+ ddi_acc_handle_t acc_handle;
+ ddi_dma_handle_t dma_handle;
+};
+
+/*
+ * DMA related functions
+ */
+int pmcs_dma_load(pmcs_hw_t *, pmcs_cmd_t *, uint32_t *);
+void pmcs_dma_unload(pmcs_hw_t *, pmcs_cmd_t *);
+
+/*
+ * After allocating some DMA chunks, insert them
+ * into the free list and set them up for use.
+ */
+void pmcs_idma_chunks(pmcs_hw_t *, pmcs_dmachunk_t *,
+ pmcs_chunk_t *, unsigned long);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_SGL_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_smhba.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_smhba.h
new file mode 100644
index 0000000000..f952a34060
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_smhba.h
@@ -0,0 +1,72 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * SM-HBA interfaces/definitions for PMC-S driver.
+ */
+#ifndef _PMCS_SMHBA_H
+#define _PMCS_SMHBA_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Leverage definition of data_type_t in nvpair.h */
+#include <sys/nvpair.h>
+
+#define PMCS_NUM_PHYS "num-phys"
+#define PMCS_NUM_PHYS_HBA "num-phys-hba"
+#define PMCS_SMHBA_SUPPORTED "sm-hba-supported"
+#define PMCS_DRV_VERSION "driver-version"
+#define PMCS_HWARE_VERSION "hardware-version"
+#define PMCS_FWARE_VERSION "firmware-version"
+#define PMCS_SUPPORTED_PROTOCOL "supported-protocol"
+
+#define PMCS_MANUFACTURER "Manufacturer"
+#define PMCS_SERIAL_NUMBER "SerialNumber"
+#define PMCS_MODEL_NAME "ModelName"
+
+/*
+ * Interfaces to add properties required for SM-HBA
+ *
+ * _add_xxx_prop() interfaces add only 1 prop that is specified in the args.
+ * _set_xxx_props() interfaces add more than 1 prop for a set of phys/devices.
+ */
+void pmcs_smhba_add_hba_prop(pmcs_hw_t *, data_type_t, char *, void *);
+void pmcs_smhba_add_iport_prop(pmcs_iport_t *, data_type_t, char *, void *);
+void pmcs_smhba_add_tgt_prop(pmcs_xscsi_t *, data_type_t, char *, void *);
+
+void pmcs_smhba_set_scsi_device_props(pmcs_hw_t *, pmcs_phy_t *,
+ struct scsi_device *);
+void pmcs_smhba_set_phy_props(pmcs_iport_t *);
+
+/*
+ * Misc routines supporting SM-HBA
+ */
+void pmcs_smhba_log_sysevent(pmcs_hw_t *, char *, char *, pmcs_phy_t *);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _PMCS_SMHBA_H */
diff --git a/usr/src/uts/common/sys/scsi/adapters/pmcs/smp_defs.h b/usr/src/uts/common/sys/scsi/adapters/pmcs/smp_defs.h
new file mode 100644
index 0000000000..508d316bf2
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/smp_defs.h
@@ -0,0 +1,1100 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This header file will only be used for pmcs driver temporarily.
+ * Will be deleted after smp_frames.h is updated.
+ */
+
+#ifndef _SYS_SCSI_GENERIC_SMP_FRAMES_H
+#define _SYS_SCSI_GENERIC_SMP_FRAMES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/sysmacros.h>
+
+/*
+ * The definitions of smp frame types and functions conforming to SAS-1.1 and
+ * SAS-2. Consumers are expected to determine protocol support by examining
+ * the response to the REPORT GENERAL function.
+ */
+
+/* Redefinition conflict with smp_frames.h. */
+
+/*
+ * typedef enum smp_frame_type {
+ * SMP_FRAME_TYPE_REQUEST = 0x40,
+ * SMP_FRAME_TYPE_RESPONSE = 0x41
+ * } smp_frame_type_t;
+ */
+
+typedef enum smp_function {
+ SMP_FUNC_REPORT_GENERAL = 0x00,
+ SMP_FUNC_REPORT_MANUFACTURER_INFO = 0x01,
+ SMP_FUNC_READ_GPIO_REGISTER = 0x02,
+ SMP_FUNC_REPORT_SELF_CONFIG_STATUS = 0x03,
+ SMP_FUNC_REPORT_ZONE_PERM_TABLE = 0x04,
+ SMP_FUNC_REPORT_ZONE_MANAGER_PASSWORD = 0x05,
+ SMP_FUNC_REPORT_BROADCAST = 0x06,
+ SMP_FUNC_DISCOVER = 0x10,
+ SMP_FUNC_REPORT_PHY_ERROR_LOG = 0x11,
+ SMP_FUNC_REPORT_PHY_SATA = 0x12,
+ SMP_FUNC_REPORT_ROUTE_INFO = 0x13,
+ SMP_FUNC_REPORT_PHY_EVENT = 0x14,
+ SMP_FUNC_DISCOVER_LIST = 0x20,
+ SMP_FUNC_REPORT_PHY_EVENT_LIST = 0x21,
+ SMP_FUNC_REPORT_EXP_ROUTE_TABLE_LIST = 0x22,
+ SMP_FUNC_CONFIG_GENERAL = 0x80,
+ SMP_FUNC_ENABLE_DISABLE_ZONING = 0x81,
+ SMP_FUNC_WRITE_GPIO_REGISTER = 0x82,
+ SMP_FUNC_ZONED_BROADCAST = 0x85,
+ SMP_FUNC_ZONE_LOCK = 0x86,
+ SMP_FUNC_ZONE_ACTIVATE = 0x87,
+ SMP_FUNC_ZONE_UNLOCK = 0x88,
+ SMP_FUNC_CONFIG_ZONE_MANAGER_PASSWORD = 0x89,
+ SMP_FUNC_CONFIG_ZONE_PHY_INFO = 0x8A,
+ SMP_FUNC_CONFIG_ZONE_PERM_TABLE = 0x8B,
+ SMP_FUNC_CONFIG_ROUTE_INFO = 0x90,
+ SMP_FUNC_PHY_CONTROL = 0x91,
+ SMP_FUNC_PHY_TEST_FUNCTION = 0x92,
+ SMP_FUNC_CONFIG_PHY_EVENT = 0x93
+} smp_function_t;
+
+typedef enum smp_result {
+ SMP_RES_FUNCTION_ACCEPTED = 0x00,
+ SMP_RES_UNKNOWN_FUNCTION = 0x01,
+ SMP_RES_FUNCTION_FAILED = 0x02,
+ SMP_RES_INVALID_REQUEST_FRAME_LENGTH = 0x03,
+ SMP_RES_INVALID_EXPANDER_CHANGE_COUNT = 0x04,
+ SMP_RES_BUSY = 0x05,
+ SMP_RES_INCOMPLETE_DESCRIPTOR_LIST = 0x06,
+ SMP_RES_PHY_DOES_NOT_EXIST = 0x10,
+ SMP_RES_INDEX_DOES_NOT_EXIST = 0x11,
+ SMP_RES_PHY_DOES_NOT_SUPPORT_SATA = 0x12,
+ SMP_RES_UNKNOWN_PHY_OPERATION = 0x13,
+ SMP_RES_UNKNOWN_PHY_TEST_FUNCTION = 0x14,
+ SMP_RES_PHY_TEST_IN_PROGRESS = 0x15,
+ SMP_RES_PHY_VACANT = 0x16,
+ SMP_RES_UNKNOWN_PHY_EVENT_SOURCE = 0x17,
+ SMP_RES_UNKNOWN_DESCRIPTOR_TYPE = 0x18,
+ SMP_RES_UNKNOWN_PHY_FILTER = 0x19,
+ SMP_RES_AFFILIATION_VIOLATION = 0x1A,
+ SMP_RES_ZONE_VIOLATION = 0x20,
+ SMP_RES_NO_MANAGEMENT_ACCESS_RIGHTS = 0x21,
+ SMP_RES_UNKNOWN_ENABLE_DISABLE_ZONING = 0x22,
+ SMP_RES_ZONE_LOCK_VIOLATION = 0x23,
+ SMP_RES_NOT_ACTIVATED = 0x24,
+ SMP_RES_ZONE_GROUP_OUT_OF_RANGE = 0x25,
+ SMP_RES_NO_PHYSICAL_PRESENCE = 0x26,
+ SMP_RES_SAVING_NOT_SUPPORTED = 0x27,
+ SMP_RES_SOURCE_ZONE_GROUP_DNE = 0x28,
+ SMP_RES_NONE = -1
+} smp_result_t;
+
+/*
+ * SAS-2 10.4.3.2 request frame format
+ */
+typedef struct smp_request_frame {
+ uint8_t srf_frame_type;
+ uint8_t srf_function;
+ uint8_t srf_allocated_response_len; /* reserved in SAS-1 */
+ uint8_t srf_request_len;
+ uint8_t srf_data[1];
+} smp_request_frame_t;
+
+/*
+ * SAS-2 10.4.3.3 response frame format
+ */
+typedef struct smp_response_frame {
+ uint8_t srf_frame_type;
+ uint8_t srf_function;
+ uint8_t srf_result;
+ uint8_t srf_response_len; /* reserved in SAS-1 */
+ uint8_t srf_data[1];
+} smp_response_frame_t;
+
+typedef uint8_t smp_crc_t[4];
+
+#ifdef offsetof
+#define SMP_REQ_MINLEN \
+ (offsetof(smp_request_frame_t, srf_data[0]) + sizeof (smp_crc_t))
+#define SMP_RESP_MINLEN \
+ (offsetof(smp_response_frame_t, srf_data[0]) + sizeof (smp_crc_t))
+#endif /* offsetof */
+
+#pragma pack(1)
+
+/*
+ * SAS-2 10.4.3.4 REPORT GENERAL (no additional request bytes)
+ */
+typedef struct smp_report_general_resp {
+ uint16_t srgr_exp_change_count;
+ uint16_t srgr_exp_route_indexes;
+ DECL_BITFIELD2(
+ _reserved1 :7,
+ srgr_long_response :1);
+ uint8_t srgr_number_of_phys;
+ DECL_BITFIELD7(
+ srgr_externally_configurable_route_table :1,
+ srgr_configuring :1,
+ srgr_configures_others :1,
+ srgr_open_reject_retry_supported :1,
+ srgr_stp_continue_awt :1,
+ _reserved2 :2,
+ srgr_table_to_table_supported :1);
+ uint8_t _reserved3;
+ uint64_t srgr_enclosure_logical_identifier;
+ uint8_t _reserved4[8];
+ uint8_t _reserved5[2];
+ uint16_t srgr_stp_bus_inactivity_time_limit;
+ uint16_t srgr_stp_maximum_connect_time_limit;
+ uint16_t srgr_stp_smp_nexus_loss_time;
+ DECL_BITFIELD7(
+ srgr_zoning_enabled :1,
+ srgr_zoning_supported :1,
+ srgr_physical_presence_asserted :1,
+ srgr_physical_presence_supported :1,
+ srgr_zone_locked :1,
+ _reserved6 :1,
+ srgr_number_of_zone_grps :2);
+ DECL_BITFIELD6(
+ srgr_saving_zoning_enabled_supported :1,
+ srgr_saving_zone_perm_table_supported :1,
+ srgr_saving_zone_phy_info_supported :1,
+ srgr_saving_zone_mgr_password_supported :1,
+ srgr_saving :1,
+ _reserved7 :4);
+ uint16_t srgr_max_routed_sas_addrs;
+ uint64_t srgr_active_zm_sas_addr;
+ uint16_t srgr_zone_lock_inactivity_limit;
+ uint8_t _reserved8[2];
+ uint8_t _reserved9;
+ uint8_t srgr_first_encl_conn_elem_idx;
+ uint8_t srgr_number_encl_conn_elem_idxs;
+ uint8_t _reserved10;
+ DECL_BITFIELD2(
+ _reserved11 :7,
+ srgr_reduced_functionality :1);
+ uint8_t srgr_time_to_reduced_functionality;
+ uint8_t srgr_initial_time_to_reduced_functionality;
+ uint8_t srgr_max_reduced_functionality_time;
+ uint16_t srgr_last_self_conf_status_descr_idx;
+ uint16_t srgr_max_stored_self_config_status_descrs;
+ uint16_t srgr_last_phy_event_list_descr_idx;
+ uint16_t srgr_max_stored_phy_event_list_descrs;
+ uint16_t srgr_stp_reject_to_open_limit;
+ uint8_t _reserved12[2];
+} smp_report_general_resp_t;
+
+typedef enum smp_n_zone_grps {
+ SMP_ZONE_GROUPS_128 = 0x0,
+ SMP_ZONE_GROUPS_256 = 0x1
+} smp_n_zone_grps_t;
+
+/*
+ * SAS-2 10.4.3.5 REPORT MANUFACTURER INFORMATION (no additional request bytes)
+ */
+typedef struct smp_report_manufacturer_info_resp {
+ uint16_t srmir_exp_change_count;
+ uint8_t _reserved1[2];
+ DECL_BITFIELD2(
+ srmir_sas_1_1_format :1,
+ _reserved2 :7);
+ uint8_t _reserved3[3];
+ char srmir_vendor_identification[8];
+ char srmir_product_identification[16];
+ char srmir_product_revision_level[4];
+ char srmir_component_vendor_identification[8];
+ uint16_t srmir_component_id;
+ uint8_t srmir_component_revision_level;
+ uint8_t _reserved4;
+ uint8_t srmir_vs_52[8];
+} smp_report_manufacturer_info_resp_t;
+
+/*
+ * SAS-2 10.4.3.6 REPORT SELF_CONFIGURATION STATUS
+ */
+typedef struct smp_report_self_config_status_req {
+ uint8_t _reserved1[2];
+ uint16_t srscsr_starting_self_config_status_descr_idx;
+} smp_report_self_config_status_req_t;
+
+typedef struct smp_report_self_config_status_resp {
+ uint16_t srscsr_exp_change_count;
+ uint16_t srscsr_starting_self_config_status_descr_idx;
+ uint16_t srscsr_number_self_config_status_descrs;
+ uint16_t srscsr_last_self_config_status_descr_idx;
+ uint8_t srscsr_self_config_status_descr_len;
+ uint8_t _reserved1[3];
+ uint8_t srscsr_descrs[1];
+} smp_report_self_config_status_resp_t;
+
+typedef struct smp_self_config_status_descr {
+ uint8_t sscsd_status_type;
+ DECL_BITFIELD2(
+ sscsd_final :1,
+ _reserved1 :7);
+ uint8_t _reserved2;
+ uint8_t sscsd_phy_identifier;
+ uint8_t _reserved3[4];
+ uint64_t sscsd_sas_addr;
+} smp_self_config_status_descr_t;
+
+typedef enum smp_self_config_status_type {
+ SMP_SCST_NONSPECIFIC_ERROR = 0x01,
+ SMP_SCST_CONNECTION = 0x02,
+ SMP_SCST_ROUTE_TABLE_FULL = 0x03,
+ SMP_SCST_NOMEM = 0x04,
+ SMP_SCST_PHY_LAYER_ERROR = 0x20,
+ SMP_SCST_LOST_SYNC = 0x21,
+ SMP_SCST_LINK_LAYER_ERROR = 0x40,
+ SMP_SCST_OPEN_TIMEOUT = 0x41,
+ SMP_SCST_ABANDON_OPEN_REJECT = 0x42,
+ SMP_SCST_RETRY_OPEN_REJECTS = 0x43,
+ SMP_SCST_NEXUS_LOSS = 0x44,
+ SMP_SCST_BREAK = 0x45,
+ SMP_SCST_CRC_ERROR = 0x46,
+ SMP_SCST_PORT_LAYER_ERROR = 0x60,
+ SMP_SCST_RESPONSE_TIMEOUT = 0x61,
+ SMP_SCST_TRANSPORT_LAYER_ERROR = 0x80,
+ SMP_SCST_APP_LAYER_ERROR = 0xA0,
+ SMP_SCST_RESPONSE_TOO_SHORT = 0xA1,
+ SMP_SCST_UNSUPPORTED_VALUES = 0xA2,
+ SMP_SCST_INCONSISTENT = 0xA3,
+ SMP_SCST_CONFIGURING = 0xA4
+} smp_self_config_status_type_t;
+
+/*
+ * SAS-2 10.4.3.7 REPORT ZONE PERMISSION TABLE
+ */
+typedef struct smp_report_zone_perm_table_req {
+ DECL_BITFIELD2(
+ srzptr_report_type :2,
+ _reserved1 :6);
+ uint8_t _reserved2;
+ uint8_t srzptr_starting_src_zone_grp;
+ uint8_t srzptr_max_zone_perm_descrs;
+} smp_report_zone_perm_table_req_t;
+
+typedef enum smp_zone_perm_table_report_type {
+ SMP_ZPTRT_CURRENT = 0x0,
+ SMP_ZPTRT_SHADOW = 0x1,
+ SMP_ZPTRT_SAVED = 0x2,
+ SMP_ZPTRT_DEFAULT = 0x3
+} smp_zone_perm_table_report_type_t;
+
+typedef struct smp_report_zone_perm_table_resp {
+ uint16_t srzptr_exp_change_count;
+ DECL_BITFIELD3(
+ srzptr_report_type :2,
+ _reserved1 :5,
+ srzptr_zone_locked :1);
+ DECL_BITFIELD2(
+ _reserved2 :6,
+ srzptr_number_zone_grps :2);
+ uint8_t _reserved3[6];
+ uint8_t srzptr_starting_src_zone_grp;
+ uint8_t srzptr_number_zone_perm_descrs;
+ uint8_t srzptr_descrs[1];
+} smp_report_zone_perm_table_resp_t;
+
+typedef uint8_t smp_zone_perm_descr128_t[16];
+typedef uint8_t smp_zone_perm_descr256_t[32];
+
+#define SMP_ZONE_PERM_BIT128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] & (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_SET128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] |= (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_CLR128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] &= ~(1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_BIT256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] & (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_SET256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] |= (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_CLR256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] &= ~(1 << ((__z) & 7)))
+
+/*
+ * SAS-2 10.4.3.8 REPORT ZONE MANAGER PASSWORD (no additional request bytes)
+ */
+typedef struct smp_report_zone_mgr_password_resp {
+ uint16_t srzmpr_exp_change_count;
+ uint8_t _reserved1[2];
+ uint8_t srzmpr_zone_mgr_password[32];
+} smp_report_zone_mgr_password_resp_t;
+
+/*
+ * SAS-2 10.4.3.9 REPORT BROADCAST
+ */
+typedef struct smp_report_broadcast_req {
+ DECL_BITFIELD2(
+ srbr_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t _reserved2[3];
+} smp_report_broadcast_req_t;
+
+typedef enum smp_broadcast_type {
+ SMP_BROADCAST_CHANGE = 0x0,
+ SMP_BROADCAST_RESERVED_CHANGE_0 = 0x1,
+ SMP_BROADCAST_RESERVED_CHANGE_1 = 0x2,
+ SMP_BROADCAST_SES = 0x3,
+ SMP_BROADCAST_EXPANDER = 0x4,
+ SMP_BROADCAST_ASYNC_EVENT = 0x5,
+ SMP_BROADCAST_RESERVED_3 = 0x6,
+ SMP_BROADCAST_RESERVED_4 = 0x7,
+ SMP_BROADCAST_ZONE_ACTIVATE = 0x8
+} smp_broadcast_type_t;
+
+typedef struct smp_broadcast_descr {
+ DECL_BITFIELD2(
+ sbd_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t sbd_phy_identifier;
+ DECL_BITFIELD2(
+ sbd_broadcast_reason :4,
+ _reserved2 :4);
+ uint16_t sbd_broadcast_count;
+ uint8_t _reserved3[10];
+} smp_broadcast_descr_t;
+
+typedef struct smp_report_broadcast_resp {
+ uint16_t srbr_exp_change_count;
+ DECL_BITFIELD2(
+ srbr_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t srbr_number_broadcast_descrs;
+ smp_broadcast_descr_t srbr_descrs[1];
+} smp_report_broadcast_resp_t;
+
+/*
+ * SAS-2 10.4.3.10 DISCOVER
+ */
+typedef struct smp_discover_req {
+ uint8_t _reserved1[4];
+ DECL_BITFIELD2(
+ sdr_ignore_zone_grp :1,
+ _reserved2 :7);
+ uint8_t sdr_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_discover_req_t;
+
+typedef struct smp_snw3_phy_cap {
+ DECL_BITFIELD4(
+ sspc_requested_logical_link_rate :4, /* smp_link_rate_t */
+ _reserved1 :2,
+ sspc_tx_ssc_type :1,
+ sspc_start :1);
+ DECL_BITFIELD7(
+ _reserved2 :2,
+ sspc_g3_ssc :1,
+ sspc_g3_no_ssc :1,
+ sspc_g2_ssc :1,
+ sspc_g2_no_ssc :1,
+ sspc_g1_ssc :1,
+ sspc_g1_no_ssc :1);
+ uint8_t _reserved3;
+ DECL_BITFIELD2(
+ sspc_parity :1,
+ _reserved4 :7);
+} smp_snw3_phy_cap_t;
+
+typedef struct smp_discover_resp {
+ uint16_t sdr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t sdr_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD3(
+ sdr_attached_reason :4,
+ sdr_attached_device_type :3,
+ _reserved3 :1);
+ DECL_BITFIELD2(
+ sdr_negotiated_logical_link_rate :4, /* smp_link_rate_t */
+ _reserved4 :4);
+ DECL_BITFIELD5(
+ sdr_attached_sata_host :1,
+ sdr_attached_smp_initiator :1,
+ sdr_attached_stp_initiator :1,
+ sdr_attached_ssp_initiator :1,
+ _reserved5 :4);
+ DECL_BITFIELD6(
+ sdr_attached_sata_device :1,
+ sdr_attached_smp_target :1,
+ sdr_attached_stp_target :1,
+ sdr_attached_ssp_target :1,
+ _reserved6 :3,
+ sdr_attached_sata_port_selector :1);
+ uint64_t sdr_sas_addr;
+ uint64_t sdr_attached_sas_addr;
+ uint8_t sdr_attached_phy_identifier;
+ DECL_BITFIELD4(
+ sdr_attached_break_reply_capable :1,
+ sdr_attached_requested_inside_zpsds :1,
+ sdr_attached_inside_zpsds_persistent :1,
+ _reserved7 :5);
+ uint8_t _reserved8[6];
+ DECL_BITFIELD2(
+ sdr_hw_min_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_prog_min_phys_link_rate :4); /* smp_link_rate_t */
+ DECL_BITFIELD2(
+ sdr_hw_max_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_prog_max_phys_link_rate :4); /* smp_link_rate_t */
+ uint8_t sdr_phy_change_count;
+ DECL_BITFIELD3(
+ sdr_partial_pwy_timeout :4,
+ _reserved9 :3,
+ sdr_virtual_phy :1);
+ DECL_BITFIELD2(
+ sdr_routing_attr :4, /* smp_routing_attr_t */
+ _reserved10 :4);
+ DECL_BITFIELD2(
+ sdr_connector_type :7,
+ _reserved11 :1);
+ uint8_t sdr_connector_element_index;
+ uint8_t sdr_connector_physical_link;
+ uint8_t _reserved12[2];
+ uint8_t sdr_vendor[2];
+ uint64_t sdr_attached_device_name;
+ DECL_BITFIELD8(
+ sdr_zoning_enabled :1,
+ sdr_inside_zpsds :1,
+ sdr_zone_group_persistent :1,
+ _reserved13 :1,
+ sdr_requested_inside_zpsds :1,
+ sdr_inside_zpsds_persistent :1,
+ sdr_requested_inside_zpsds_changed_by_exp :1,
+ _reserved14 :1);
+ uint8_t _reserved15[2];
+ uint8_t sdr_zone_group;
+ uint8_t sdr_self_config_status;
+ uint8_t sdr_self_config_levels_completed;
+ uint8_t _reserved16[2];
+ uint64_t sdr_self_config_sas_addr;
+ smp_snw3_phy_cap_t sdr_prog_phy_cap;
+ smp_snw3_phy_cap_t sdr_current_phy_cap;
+ smp_snw3_phy_cap_t sdr_attached_phy_cap;
+ uint8_t _reserved17[6];
+ DECL_BITFIELD2(
+ sdr_negotiated_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_reason :4);
+ DECL_BITFIELD3(
+ sdr_hw_muxing_supported :1,
+ sdr_negotiated_ssc :1,
+ _reserved18 :6);
+ DECL_BITFIELD7(
+ sdr_default_zoning_enabled :1,
+ _reserved19 :1,
+ sdr_default_zone_group_persistent :1,
+ _reserved20 :1,
+ sdr_default_requested_inside_zpsds :1,
+ sdr_default_inside_zpsds_persistent :1,
+ _reserved21 :2);
+ uint8_t _reserved22[2];
+ uint8_t sdr_default_zone_group;
+ DECL_BITFIELD7(
+ sdr_saved_zoning_enabled :1,
+ _reserved23 :1,
+ sdr_saved_zone_group_persistent :1,
+ _reserved24 :1,
+ sdr_saved_requested_inside_zpsds :1,
+ sdr_saved_inside_zpsds_persistent :1,
+ _reserved25 :2);
+ uint8_t _reserved26[2];
+ uint8_t saved_zone_group;
+ DECL_BITFIELD6(
+ _reserved27 :2,
+ sdr_shadow_zone_group_persistent :1,
+ _reserved28 :1,
+ sdr_shadow_requested_inside_zpsds :1,
+ sdr_shadow_inside_zpsds_persistent :1,
+ _reserved29 :2);
+ uint8_t _reserved30[2];
+ uint8_t sdr_shadow_zone_group;
+} smp_discover_resp_t;
+
+typedef enum smp_link_rate {
+ SMP_LINK_RATE_NO_CHANGE = 0x0,
+ SMP_LINK_RATE_DISABLED = 0x1,
+ SMP_LINK_RATE_RESET_PROBLEM = 0x2,
+ SMP_LINK_RATE_SPINUP_HOLD = 0x3,
+ SMP_LINK_RATE_PORT_SELECTOR = 0x4,
+ SMP_LINK_RATE_RESET = 0x5,
+ SMP_LINK_RATE_UNSUPPORTED = 0x6,
+ SMP_LINK_RATE_1_5 = 0x8,
+ SMP_LINK_RATE_3 = 0x9,
+ SMP_LINK_RATE_6 = 0xA
+} smp_link_rate_t;
+
+typedef enum smp_device_type {
+ SMP_DEV_NONE = 0x0,
+ SMP_DEV_SAS_SATA = 0x1,
+ SMP_DEV_EXPANDER = 0x2,
+ SMP_DEV_EXPANDER_OLD = 0x3
+} smp_device_type_t;
+
+typedef enum smp_routing_attr {
+ SMP_ROUTING_DIRECT = 0x0,
+ SMP_ROUTING_SUBTRACTIVE = 0x1,
+ SMP_ROUTING_TABLE = 0x2
+} smp_routing_attr_t;
+
+/*
+ * SAS-2 10.4.3.11 REPORT PHY ERROR LOG
+ */
+typedef struct smp_report_phy_error_log_req {
+ uint8_t _reserved1[5];
+ uint8_t srpelr_phy_identifier;
+ uint8_t _reserved2[2];
+} smp_report_phy_error_log_req_t;
+
+typedef struct smp_report_phy_error_log_resp {
+ uint16_t srpelr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srpelr_phy_identifier;
+ uint8_t _reserved2[2];
+ uint32_t srpelr_invalid_dword_count;
+ uint32_t srpelr_running_disparity_error_count;
+ uint32_t srpelr_loss_dword_sync_count;
+ uint32_t srpelr_phy_reset_problem_count;
+} smp_report_phy_error_log_resp_t;
+
+/*
+ * SAS-2 10.4.3.12 REPORT PHY SATA
+ */
+typedef struct smp_report_phy_sata_req {
+ uint8_t _reserved1[5];
+ uint8_t srpsr_phy_identifier;
+ uint8_t srpsr_affiliation_context;
+ uint8_t _reserved2;
+} smp_report_phy_sata_req_t;
+
+typedef struct smp_report_phy_sata_resp {
+ uint16_t srpsr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srpsr_phy_identifier;
+ uint8_t _reserved2;
+ DECL_BITFIELD4(
+ srpsr_affiliation_valid :1,
+ srpsr_affiliations_supported :1,
+ srpsr_stp_nexus_loss :1,
+ _reserved3 :5);
+ uint8_t _reserved4[4];
+ uint64_t srpsr_stp_sas_addr;
+ uint8_t srpsr_register_device_host_fis[20];
+ uint8_t _reserved5[4];
+ uint64_t srpsr_affiliated_stp_init_sas_addr;
+ uint64_t srpsr_stp_nexus_loss_sas_addr;
+ uint8_t _reserved6;
+ uint8_t srpsr_affiliation_context;
+ uint8_t srpsr_current_affiliation_contexts;
+ uint8_t srpsr_max_affiliation_contexts;
+} smp_report_phy_sata_resp_t;
+
+/*
+ * SAS-2 10.4.3.13 REPORT ROUTE INFORMATION
+ */
+typedef struct smp_report_route_info_req {
+ uint8_t _reserved1[2];
+ uint16_t srrir_exp_route_index;
+ uint8_t _reserved2;
+ uint8_t srrir_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_report_route_info_req_t;
+
+typedef struct smp_report_route_info_resp {
+ uint16_t srrir_exp_change_count;
+ uint16_t srrir_exp_route_index;
+ uint8_t _reserved1;
+ uint8_t srrir_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD2(
+ _reserved3 :7,
+ srrir_exp_route_entry_disabled :1);
+ uint8_t _reserved4[3];
+ uint64_t srrir_routed_sas_addr;
+ uint8_t _reserved5[16];
+} smp_report_route_info_resp_t;
+
+/*
+ * SAS-2 10.4.3.14 SAS-2 REPORT PHY EVENT
+ */
+typedef struct smp_report_phy_event_req {
+ uint8_t _reserved1;
+ uint8_t _reserved2[4];
+ uint8_t srper_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_report_phy_event_req_t;
+
+typedef struct smp_phy_event_report_descr {
+ uint8_t _reserved1[3];
+ uint8_t sped_phy_event_source;
+ uint32_t sped_phy_event;
+ uint32_t sped_peak_detector_threshold;
+} smp_phy_event_report_descr_t;
+
+typedef struct smp_report_phy_event_resp {
+ uint16_t srper_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srper_phy_identifier;
+ uint8_t _reserved2[5];
+ uint8_t srper_n_phy_event_descrs;
+ smp_phy_event_report_descr_t srper_phy_event_descrs[1];
+} smp_report_phy_event_resp_t;
+
+/*
+ * SAS-2 10.4.3.15 SAS-2 DISCOVER LIST
+ */
+typedef struct smp_discover_list_req {
+ uint8_t _reserved1[4];
+ uint8_t sdlr_starting_phy_identifier;
+ uint8_t sdlr_max_descrs;
+ DECL_BITFIELD3(
+ sdlr_phy_filter :4,
+ _reserved2 :3,
+ sdlr_ignore_zone_group :1);
+ DECL_BITFIELD2(
+ sdlr_descr_type :4,
+ _reserved3 :4);
+ uint8_t _reserved4[4];
+ uint8_t sdlr_vendor[12];
+} smp_discover_list_req_t;
+
+typedef struct smp_discover_short_descr {
+ uint8_t sdsd_phy_identifier;
+ uint8_t sdsd_function_result;
+ DECL_BITFIELD3(
+ sdsd_attached_reason :4,
+ sdsd_attached_device_type :3,
+ _restricted1 :1);
+ DECL_BITFIELD2(
+ sdsd_negotiated_logical_link_rate :4, /* smp_link_rate_t */
+ _restricted2 :4);
+ DECL_BITFIELD5(
+ sdsd_attached_sata_host :1,
+ sdsd_attached_smp_initiator :1,
+ sdsd_attached_stp_initiator :1,
+ sdsd_attached_ssp_initiator :1,
+ _restricted3 :4);
+ DECL_BITFIELD6(
+ sdsd_attached_sata_device :1,
+ sdsd_attached_smp_target :1,
+ sdsd_attached_stp_target :1,
+ sdsd_attached_ssp_target :1,
+ _restricted4 :3,
+ sdsd_attached_sata_port_selector :1);
+ DECL_BITFIELD3(
+ sdsd_routing_attribute :4, /* smp_routing_attr_t */
+ _reserved1 :3,
+ sdsd_virtual_phy :1);
+ DECL_BITFIELD2(
+ _reserved2 :4,
+ sdsd_reason :4);
+ uint8_t sdsd_zone_group;
+ DECL_BITFIELD7(
+ _reserved3 :1,
+ sdsd_inside_zpsds :1,
+ sdsd_zone_group_persistent :1,
+ _reserved4 :1,
+ sdsd_requested_insize_zpsds :1,
+ sdsd_inside_zpsds_persistent :1,
+ _restricted5 :2);
+ uint8_t sdsd_attached_phy_identifier;
+ uint8_t sdsd_phy_change_count;
+ uint64_t sdsd_attached_sas_addr;
+ uint8_t _reserved5[4];
+} smp_discover_short_descr_t;
+
+typedef struct smp_discover_long_descr {
+ uint8_t _reserved1[2];
+ uint8_t sdld_function_result;
+ uint8_t _reserved2[1];
+ smp_discover_resp_t sdld_response;
+} smp_discover_long_descr_t;
+
+#define SMP_DISCOVER_RESP(_ld) \
+ (((smp_discover_long_descr_t *)(_ld))->sdld_function_result == \
+ SMP_FUNCTION_ACCEPTED ? \
+ &((smp_discover_long_descr_t *)(_ld))->sdld_response : \
+ NULL)
+
+typedef struct smp_discover_list_resp {
+ uint16_t sdlr_exp_change_count;
+ uint8_t _reserved1[2];
+ uint8_t sdlr_starting_phy_identifier;
+ uint8_t sdlr_n_descrs;
+ DECL_BITFIELD2(
+ sdlr_phy_filter :4,
+ _reserved2 :4);
+ DECL_BITFIELD2(
+ sdlr_descr_type :4,
+ _reserved3 :4);
+ uint8_t sdlr_descr_length;
+ uint8_t _reserved4[3];
+ DECL_BITFIELD5(
+ sdlr_externally_configurable_route_table :1,
+ sdlr_configuring :1,
+ _reserved5 :4,
+ sdlr_zoning_enabled :1,
+ sdlr_zoning_supported :1);
+ uint8_t _reserved6;
+ uint16_t sdlr_last_sc_status_descr_index;
+ uint16_t sdlr_last_phy_event_list_descr_index;
+ uint8_t _reserved7[10];
+ uint8_t sdlr_vendor[16];
+ uint8_t sdlr_descrs[1]; /* short or long format */
+} smp_discover_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.16 REPORT PHY EVENT LIST
+ */
+typedef struct smp_report_phy_event_list_req {
+ uint8_t _reserved1[2];
+ uint16_t srpelr_starting_descr_index;
+} smp_report_phy_event_list_req_t;
+
+typedef struct smp_phy_event_list_descr {
+ uint8_t _reserved1[2];
+ uint8_t speld_phy_identifier;
+ uint8_t speld_phy_event_source;
+ uint32_t speld_phy_event;
+ uint32_t speld_peak_detector_threshold;
+} smp_phy_event_list_descr_t;
+
+typedef struct smp_report_phy_event_list_resp {
+ uint16_t srpelr_exp_change_count;
+ uint16_t srpelr_starting_descr_index;
+ uint16_t srpelr_last_descr_index;
+ uint8_t srpelr_phy_event_list_descr_length;
+ uint8_t _reserved1[3];
+ uint8_t srpelr_n_descrs;
+ smp_phy_event_list_descr_t srpelr_descrs[1];
+} smp_report_phy_event_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.17 REPORT EXPANDER ROUTE TABLE LIST
+ */
+typedef struct smp_report_exp_route_table_list_req {
+ uint8_t _reserved1[4];
+ uint16_t srertlr_max_descrs;
+ uint16_t srertlr_starting_routed_sas_addr_index;
+ uint8_t _reserved2[7];
+ uint8_t srertlr_starting_phy_identifier;
+ uint8_t _reserved3[8];
+} smp_report_exp_route_table_list_req_t;
+
+typedef struct smp_route_table_descr {
+ uint64_t srtd_routed_sas_addr;
+ uint8_t srtd_phy_bitmap[6];
+ DECL_BITFIELD2(
+ _reserved1 :7,
+ srtd_zone_group_valid :1);
+ uint8_t srtd_zone_group;
+} smp_route_table_descr_t;
+
+#define SMP_ROUTE_PHY(_d, _s, _i) \
+ ((_d)->srtd_phy_bitmap[(48 - (_i) + (_s)) >> 3] & \
+ (1 << ((48 - (_i) + (_s)) & 7)))
+
+typedef struct smp_report_exp_route_table_list_resp {
+ uint16_t srertlr_exp_change_count;
+ uint16_t srertlr_route_table_change_count;
+ DECL_BITFIELD3(
+ _reserved1 :1,
+ srertlr_configuring :1,
+ _reserved2 :6);
+ uint8_t _reserved3;
+ uint16_t srertlr_n_descrs;
+ uint16_t srertlr_first_routed_sas_addr_index;
+ uint16_t srertlr_last_routed_sas_addr_index;
+ uint8_t _reserved4[3];
+ uint8_t srertlr_starting_phy_identifier;
+ uint8_t _reserved5[12];
+ smp_route_table_descr_t srertlr_descrs[1];
+} smp_report_exp_route_table_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.18 CONFIGURE GENERAL (no additional response)
+ */
+typedef struct smp_config_general_req {
+ uint16_t scgr_expected_exp_change_count;
+ uint8_t _reserved1[2];
+ DECL_BITFIELD6(
+ scgr_update_stp_bus_inactivity :1,
+ scgr_update_stp_max_conn :1,
+ scgr_update_stp_smp_nexus_loss :1,
+ scgr_update_initial_time_to_reduced_functionality :1,
+ scgr_update_stp_reject_to_open :1,
+ _reserved2 :3);
+ uint8_t _reserved3;
+ uint16_t scgr_stp_bus_inactivity;
+ uint16_t scgr_stp_max_conn;
+ uint16_t scgr_stp_smp_nexus_loss;
+ uint8_t scgr_initial_time_to_reduced_functionality;
+ uint8_t _reserved4;
+ uint16_t scgr_stp_reject_to_open;
+} smp_config_general_req_t;
+
+/*
+ * SAS-2 10.4.3.19 ENABLE DISABLE ZONING (no additional response)
+ */
+typedef struct smp_enable_disable_zoning_req {
+ uint16_t sedzr_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sedzr_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t _reserved2;
+ DECL_BITFIELD2(
+ sedzr_enable_disable_zoning :2,
+ _reserved3 :6);
+ uint8_t _reserved4[3];
+} smp_enable_disable_zoning_req_t;
+
+typedef enum smp_zoning_save {
+ SMP_ZONING_SAVE_CURRENT = 0x0,
+ SMP_ZONING_SAVE_SAVED = 0x1,
+ SMP_ZONING_SAVE_BOTH_IF_SUPP = 0x2,
+ SMP_ZONING_SAVE_BOTH = 0x3
+} smp_zoning_save_t;
+
+typedef enum smp_zoning_enable_op {
+ SMP_ZONING_ENABLE_OP_NONE = 0x0,
+ SMP_ZONING_ENABLE_OP_ENABLE = 0x1,
+ SMP_ZONING_ENABLE_OP_DISABLE = 0x2
+} smp_zoning_enable_op_t;
+
+/*
+ * SAS-2 10.4.3.20 ZONED BROADCAST (no additional response)
+ */
+typedef struct smp_zoned_broadcast_req {
+ uint8_t _restricted1[2];
+ DECL_BITFIELD2(
+ szbr_broadcast_type :4,
+ _reserved :4);
+ uint8_t szbr_n_broadcast_source_zone_groups;
+ uint8_t szbr_broadcast_source_zone_groups[1];
+} smp_zoned_broadcast_req_t;
+
+/*
+ * SAS-2 10.4.3.21 ZONE LOCK
+ */
+typedef struct smp_zone_lock_req {
+ uint16_t szlr_expected_exp_change_count;
+ uint16_t szlr_zone_lock_inactivity_timeout;
+ uint8_t szlr_zone_manager_password[32];
+} smp_zone_lock_req_t;
+
+typedef struct smp_zone_lock_resp {
+ uint8_t _reserved1[4];
+ uint64_t szlr_active_zone_manager_sas_addr;
+} smp_zone_lock_resp_t;
+
+/*
+ * SAS-2 10.4.3.22 ZONE ACTIVATE (no additional response)
+ */
+typedef struct smp_zone_activate_req {
+ uint16_t szar_expected_exp_change_count;
+ uint8_t _reserved1[2];
+} smp_zone_activate_req_t;
+
+/*
+ * SAS-2 10.4.3.23 ZONE UNLOCK (no additional response)
+ */
+typedef struct smp_zone_unlock_req {
+ uint8_t _restricted1[2];
+ DECL_BITFIELD2(
+ szur_activate_required :1,
+ _reserved1 :7);
+ uint8_t _reserved2;
+} smp_zone_unlock_req_t;
+
+/*
+ * SAS-2 10.4.3.24 CONFIGURE ZONE MANAGER PASSWORD (no additional response)
+ */
+typedef struct smp_config_zone_manager_password_req {
+ uint16_t sczmpr_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sczmpr_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t _reserved2;
+ uint8_t sczmpr_zone_manager_password[32];
+ uint8_t sczmpr_new_zone_manager_password[32];
+} smp_config_zone_manager_password_req_t;
+
+/*
+ * SAS-2 10.4.3.25 CONFIGURE ZONE PHY INFORMATION (no additional response)
+ */
+typedef struct smp_zone_phy_config_descr {
+ uint8_t szpcd_phy_identifier;
+ DECL_BITFIELD6(
+ _reserved1 :2,
+ szpcd_zone_group_persistent :1,
+ _reserved2 :1,
+ szpcd_requested_inside_zpsds :1,
+ szpcd_inside_zpsds_persistent :1,
+ _reserved3 :2);
+ uint8_t _reserved4;
+ uint8_t szpcd_zone_group;
+} smp_zone_phy_config_descr_t;
+
+typedef struct smp_config_zone_phy_info_req {
+ uint16_t sczpir_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sczpir_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t sczpir_n_descrs;
+ smp_zone_phy_config_descr_t sczpir_descrs[1];
+} smp_config_zone_phy_info_req_t;
+
+/*
+ * SAS-2 10.4.3.26 CONFIGURE ZONE PERMISSION TABLE (no additional response)
+ */
+typedef struct smp_config_zone_perm_table_req {
+ uint16_t sczptr_expected_exp_change_count;
+ uint8_t sczptr_starting_source_zone_group;
+ uint8_t sczptr_n_descrs;
+ DECL_BITFIELD3(
+ sczptr_save :2, /* smp_zoning_save_t */
+ _reserved1 :4,
+ sczptr_n_zone_groups :2); /* smp_n_zone_grps_t */
+ uint8_t _reserved2[7];
+ uint8_t sczptr_descrs[1]; /* smp_zone_perm_descrXXX_t */
+} smp_config_zone_perm_table_req_t;
+
+/*
+ * SAS-2 10.4.3.27 CONFIGURE ROUTE INFORMATION (no additional response)
+ */
+typedef struct smp_config_route_info_req {
+ uint16_t scrir_expected_exp_change_count;
+ uint16_t scrir_exp_route_index;
+ uint8_t _reserved1;
+ uint8_t scrir_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD2(
+ _reserved3 :7,
+ scrir_disable_exp_route_entry :1);
+ uint8_t _reserved4[3];
+ uint64_t scrir_routed_sas_addr;
+ uint8_t _reserved5[16];
+} smp_config_route_info_req_t;
+
+/*
+ * SAS-2 10.4.3.28 PHY CONTROL (no additional response)
+ */
+typedef struct smp_phy_control_req {
+ uint16_t spcr_expected_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t spcr_phy_identifier;
+ uint8_t spcr_phy_operation;
+ DECL_BITFIELD2(
+ spcr_update_partial_pwy_timeout :1,
+ _reserved2 :7);
+ uint8_t _reserved3[12];
+ uint64_t spcr_attached_device_name;
+ DECL_BITFIELD2(
+ _reserved4 :4,
+ spcr_prog_min_phys_link_rate :4); /* smp_link_rate_t */
+ DECL_BITFIELD2(
+ _reserved5 :4,
+ spcr_prog_max_phys_link_rate :4); /* smp_link_rate_t */
+ uint8_t _reserved6[2];
+ DECL_BITFIELD2(
+ spcr_partial_pwy_timeout :4,
+ _reserved7 :4);
+ uint8_t _reserved8[3];
+} smp_phy_control_req_t;
+
+typedef enum smp_phy_op {
+ SMP_PHY_OP_NOP = 0x00,
+ SMP_PHY_OP_LINK_RESET = 0x01,
+ SMP_PHY_OP_HARD_RESET = 0x02,
+ SMP_PHY_OP_DISABLE = 0x03,
+ SMP_PHY_OP_CLEAR_ERROR_LOG = 0x05,
+ SMP_PHY_OP_CLEAR_AFFILIATION = 0x06,
+ SMP_PHY_OP_TRANSMIT_SATA_PORT_SELECTION_SIGNAL = 0x07,
+ SMP_PHY_OP_CLEAR_STP_NEXUS_LOSS = 0x08,
+ SMP_PHY_OP_SET_ATTACHED_DEVICE_NAME = 0x09
+} smp_phy_op_t;
+
+/*
+ * SAS-2 10.4.3.29 PHY TEST FUNCTION (no additional response)
+ */
+typedef struct smp_phy_test_function_req {
+ uint16_t sptfr_expected_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t sptfr_phy_identifier;
+ uint8_t sptfr_phy_test_function;
+ uint8_t sptfr_phy_test_pattern; /* smp_phy_test_function_t */
+ uint8_t _reserved2[3];
+ DECL_BITFIELD4(
+ sptfr_test_pattern_phys_link_rate :4, /* smp_link_rate_t */
+ sptfr_test_pattern_ssc :2,
+ sptfr_test_pattern_sata :1,
+ _reserved3 :1);
+ uint8_t _reserved4[3];
+ uint8_t sptfr_phy_test_pattern_dwords_ctl;
+ uint8_t sptfr_phy_test_pattern_dwords[8];
+ uint8_t _reserved5[12];
+} smp_phy_test_function_req_t;
+
+typedef enum smp_phy_test_function {
+ SMP_PHY_TEST_FN_STOP = 0x00,
+ SMP_PHY_TEST_FN_TRANSMIT_PATTERN = 0x01
+} smp_phy_test_function_t;
+
+/*
+ * SAS-2 10.4.3.30 CONFIGURE PHY EVENT (no additional response)
+ */
+typedef struct smp_phy_event_config_descr {
+ uint8_t _reserved1[3];
+ uint8_t specd_phy_event_source;
+ uint32_t specd_peak_value_detector_threshold;
+} smp_phy_event_config_descr_t;
+
+typedef struct smp_config_phy_event_req {
+ uint16_t scper_expected_exp_change_count;
+ DECL_BITFIELD2(
+ scper_clear_peaks :1,
+ _reserved1 :7);
+ uint8_t _reserved2[2];
+ uint8_t scper_phy_identifier;
+ uint8_t _reserved3;
+ uint8_t scper_n_descrs;
+ smp_phy_event_config_descr_t scper_descrs[1];
+} smp_config_phy_event_req_t;
+
+#pragma pack()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SCSI_GENERIC_SMP_FRAMES_H */
diff --git a/usr/src/uts/common/sys/scsi/conf/autoconf.h b/usr/src/uts/common/sys/scsi/conf/autoconf.h
index 757732cb09..8d0903dee5 100644
--- a/usr/src/uts/common/sys/scsi/conf/autoconf.h
+++ b/usr/src/uts/common/sys/scsi/conf/autoconf.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_CONF_AUTOCONF_H
#define _SYS_SCSI_CONF_AUTOCONF_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -75,6 +73,9 @@ extern "C" {
#define SCSI_OPTIONS_NLUNS_8 0x20000
#define SCSI_OPTIONS_NLUNS_16 0x30000
#define SCSI_OPTIONS_NLUNS_32 0x40000
+#define SCSI_OPTIONS_NLUNS_64 0x50000
+#define SCSI_OPTIONS_NLUNS_128 0x60000
+#define SCSI_OPTIONS_NLUNS_256 0x70000
#define SCSI_OPTIONS_NLUNS(n) ((n) & SCSI_OPTIONS_NLUNS_MASK)
@@ -153,14 +154,41 @@ extern "C" {
#define SCSI_DEFAULT_SELECTION_TIMEOUT 250
/*
- * Kernel references
+ * SCSI subsystem scsi_enumeration options.
+ *
+ * Knob for SPI (SCSI Parallel Intrconnect) enumeration. Unless an HBA defines
+ * it's own tran_bus_config, SPI enumeration is used. The "scsi_enumeration"
+ * knob determines how SPI enumeration is performed.
+ *
+ * The global variable "scsi_enumeration" is used as the default value of the
+ * "scsi-enumeration" property. In addition to enabling/disabling enumeration
+ * (bit 0), target and lun threading can be specified. Having things
+ * multi-threaded does not guarantee reduce configuration time, however when
+ * the bus is marginal multi-threading can substaintaly reduce configuration
+ * time because targets negotiate to stable transfer speeds in parallel - so
+ * all targets have stabalized by the time the sequential attach(9E) operations
+ * begin. Running multi-threaded also helps verification of framework and HBA
+ * locking: a BUS_CONFIG_ALL is equivalent to every target and lun combination
+ * getting a BUS_CONFIG_ONE from a separate thread at the same time. A disable
+ * mechanism is provided to accomidate buggy HBAs (set scsi-enumeration=7
+ * driver.conf). Values are:
+ *
+ * 0 driver.conf enumeration
+ * 1 dynamic enumeration with target and lun multi-threading.
+ * 3 dynamic enumeration with lun multi-threading disabled.
+ * 5 dynamic enumeration with target multi-threading disabled;
+ * 7 dynamic enumeration with target/lun multi-threading disabled.
*/
+#define SCSI_ENUMERATION_ENABLE 0x1
+#define SCSI_ENUMERATION_MT_LUN_DISABLE 0x2
+#define SCSI_ENUMERATION_MT_TARGET_DISABLE 0x4
#ifdef _KERNEL
/*
* Global SCSI config variables / options
*/
extern int scsi_options;
+extern int scsi_enumeration;
extern unsigned int scsi_reset_delay; /* specified in milli seconds */
extern int scsi_tag_age_limit;
extern int scsi_watchdog_tick;
diff --git a/usr/src/uts/common/sys/scsi/conf/device.h b/usr/src/uts/common/sys/scsi/conf/device.h
index 305e595807..9fce4669d8 100644
--- a/usr/src/uts/common/sys/scsi/conf/device.h
+++ b/usr/src/uts/common/sys/scsi/conf/device.h
@@ -141,6 +141,13 @@ struct scsi_device {
void *sd_pathinfo;
/*
+ * Counter that prevents demotion of DS_INITIALIZED node (esp loss of
+ * devi_addr) by causing DDI_CTLOPS_UNINITCHILD failure - devi_ref
+ * will not protect demotion of DS_INITIALIZED node.
+ */
+ int sd_uninit_prevent;
+
+ /*
* The 'sd_tran_safe' field is a grotty hack that allows direct-access
* (non-scsa) drivers (like chs, ata, and mlx - which all make cmdk
* children) to *illegally* put their own vector in the scsi_address(9S)
diff --git a/usr/src/uts/common/sys/scsi/generic/sas.h b/usr/src/uts/common/sys/scsi/generic/sas.h
new file mode 100644
index 0000000000..61ba710e19
--- /dev/null
+++ b/usr/src/uts/common/sys/scsi/generic/sas.h
@@ -0,0 +1,199 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * SAS Common Structures and Definitions
+ * sas2r14, simplified/reduced
+ */
+#ifndef _SAS_H
+#define _SAS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/sysmacros.h>
+/*
+ * SAS Address Frames
+ * Trailing 4 byte CRC not included.
+ */
+typedef struct {
+ DECL_BITFIELD3(
+ address_frame_type :4,
+ device_type :3,
+ reserved0 :1);
+ DECL_BITFIELD2(
+ reason :4,
+ reserved1 :4);
+ DECL_BITFIELD5(
+ restricted0 :1,
+ smp_ini_port :1,
+ stp_ini_port :1,
+ ssp_ini_port :1,
+ reserved2 :4);
+ DECL_BITFIELD5(
+ restricted1 :1,
+ smp_tgt_port :1,
+ stp_tgt_port :1,
+ ssp_tgt_port :1,
+ reserved3 :4);
+ uint8_t device_name[8];
+ uint8_t sas_address[8];
+ uint8_t phy_identifier;
+ DECL_BITFIELD4(
+ break_reply_capable :1,
+ requested_inside_zpsds :1,
+ inside_zpsds_persistent :1,
+ reserved4 :5);
+ uint8_t reserved5[6];
+} sas_identify_af_t;
+
+typedef struct {
+ DECL_BITFIELD3(
+ address_frame_type :4,
+ protocol :3,
+ ini_port :1);
+ DECL_BITFIELD2(
+ connection_rate :4,
+ features :4);
+ uint16_t itag; /* initiator connection tag */
+ uint8_t sas_dst[8]; /* destination sas address */
+ uint8_t sas_src[8]; /* source sas address */
+ uint8_t src_zone_group; /* source zone group */
+ uint8_t path_block_count; /* pathway blocked count */
+ uint16_t arb_wait_time; /* arbitration wait time */
+ uint8_t compat[4]; /* 'more' compatible features */
+} sas_open_af_t;
+
+#define SAS_AF_IDENTIFY 0
+#define SAS_AF_OPEN 1
+
+#define SAS_IF_DTYPE_ENDPOINT 1
+#define SAS_IF_DTYPE_EDGE 2
+#define SAS_IF_DTYPE_FANOUT 3 /* obsolete */
+
+#define SAS_OF_PROTO_SMP 0
+#define SAS_OF_PROTO_SSP 1
+#define SAS_OF_PROTO_STP 2
+
+#define SAS_SSP_SUPPORT 0x8
+#define SAS_STP_SUPPORT 0x4
+#define SAS_SMP_SUPPORT 0x2
+
+
+#define SAS_CONNRATE_1_5_GBPS 0x8
+#define SAS_CONNRATE_3_0_GBPS 0x9
+#define SAS_CONNRATE_6_0_GBPS 0xA
+
+#define SAS_SATA_SUPPORT 0x1
+#define SAS_ATTACHED_NAME_OFFSET 52 /* SAS-2 only */
+
+/*
+ * SSP Definitions
+ */
+typedef struct {
+ uint8_t lun[8];
+ uint8_t reserved0;
+ DECL_BITFIELD3(
+ task_attribute :2,
+ command_priority :4,
+ enable_first_burst :1);
+ uint8_t reserved1;
+ DECL_BITFIELD2(
+ reserved2 :2,
+ addi_cdb_len :6);
+ uint8_t cdb[16];
+ /* additional cdb bytes go here, followed by 4 byte CRC */
+} sas_ssp_cmd_iu_t;
+
+#define SAS_CMD_TASK_ATTR_SIMPLE 0x00
+#define SAS_CMD_TASK_ATTR_HEAD 0x01
+#define SAS_CMD_TASK_ATTR_ORDERED 0x02
+#define SAS_CMD_TASK_ATTR_ACA 0x04
+
+typedef struct {
+ uint8_t reserved0[8];
+ uint16_t status_qualifier;
+ DECL_BITFIELD2(
+ datapres :2,
+ reserved1 :6);
+ uint8_t status;
+ uint8_t reserved2[4];
+ uint32_t sense_data_length;
+ uint32_t response_data_length;
+ uint8_t rspd[];
+ /* response data followed by sense data goes here */
+} sas_ssp_rsp_iu_t;
+
+/* length of bytes up to response data */
+#define SAS_RSP_HDR_SIZE 24
+
+#define SAS_RSP_DATAPRES_NO_DATA 0x00
+#define SAS_RSP_DATAPRES_RESPONSE_DATA 0x01
+#define SAS_RSP_DATAPRES_SENSE_DATA 0x02
+
+/*
+ * When the RSP IU is type RESPONSE_DATA,
+ * the first 4 bytes of response data should
+ * be a Big Endian representation of one of
+ * these codes.
+ */
+#define SAS_RSP_TMF_COMPLETE 0x00
+#define SAS_RSP_INVALID_FRAME 0x02
+#define SAS_RSP_TMF_NOT_SUPPORTED 0x04
+#define SAS_RSP_TMF_FAILED 0x05
+#define SAS_RSP_TMF_SUCCEEDED 0x08
+#define SAS_RSP_TMF_INCORRECT_LUN 0x09
+#define SAS_RSP_OVERLAPPED_OIPTTA 0x0A
+
+/*
+ * Task Management Functions- should be in a SAM definition file
+ */
+#define SAS_ABORT_TASK 0x01
+#define SAS_ABORT_TASK_SET 0x02
+#define SAS_CLEAR_TASK_SET 0x04
+#define SAS_LOGICAL_UNIT_RESET 0x08
+#define SAS_I_T_NEXUS_RESET 0x10
+#define SAS_CLEAR_ACA 0x40
+#define SAS_QUERY_TASK 0x80
+#define SAS_QUERY_TASK_SET 0x81
+#define SAS_QUERY_UNIT_ATTENTION 0x82
+
+/*
+ * PHY size changed from SAS1.1 to SAS2.
+ */
+#define SAS_PHYNUM_MAX 127
+#define SAS_PHYNUM_MASK 0x7f
+
+#define SAS2_PHYNUM_MAX 254
+#define SAS2_PHYNUM_MASK 0xff
+
+
+/*
+ * Maximum SMP payload size, including CRC
+ */
+#define SAS_SMP_MAX_PAYLOAD 1032
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SAS_H */
diff --git a/usr/src/uts/common/sys/scsi/generic/smp_frames.h b/usr/src/uts/common/sys/scsi/generic/smp_frames.h
index 5c2fa17d9f..dd79a19b33 100644
--- a/usr/src/uts/common/sys/scsi/generic/smp_frames.h
+++ b/usr/src/uts/common/sys/scsi/generic/smp_frames.h
@@ -20,14 +20,12 @@
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#ifndef _SYS_SCSI_IMPL_SMP_FRAME_H
-#define _SYS_SCSI_IMPL_SMP_FRAME_H
-
-#pragma ident "%Z%%M% %I% %E% SMI"
+#ifndef _SYS_SCSI_GENERIC_SMP_FRAMES_H
+#define _SYS_SCSI_GENERIC_SMP_FRAMES_H
#ifdef __cplusplus
extern "C" {
@@ -36,86 +34,1058 @@ extern "C" {
#include <sys/sysmacros.h>
/*
- * The definitions of smp frame types and functions conforming to SAS v1.1.
- * The SAS v2.0 will be supported in the future when it is publicly released.
+ * The definitions of smp frame types and functions conforming to SAS-1.1 and
+ * SAS-2. Consumers are expected to determine protocol support by examining
+ * the response to the REPORT GENERAL function.
*/
-typedef enum {
+typedef enum smp_frame_type {
SMP_FRAME_TYPE_REQUEST = 0x40,
SMP_FRAME_TYPE_RESPONSE = 0x41
-} smp_frame_types;
+} smp_frame_type_t;
+
+typedef enum smp_function {
+ SMP_FUNC_REPORT_GENERAL = 0x00,
+ SMP_FUNC_REPORT_MANUFACTURER_INFO = 0x01,
+ SMP_FUNC_READ_GPIO_REGISTER = 0x02,
+ SMP_FUNC_REPORT_SELF_CONFIG_STATUS = 0x03,
+ SMP_FUNC_REPORT_ZONE_PERM_TABLE = 0x04,
+ SMP_FUNC_REPORT_ZONE_MANAGER_PASSWORD = 0x05,
+ SMP_FUNC_REPORT_BROADCAST = 0x06,
+ SMP_FUNC_DISCOVER = 0x10,
+ SMP_FUNC_REPORT_PHY_ERROR_LOG = 0x11,
+ SMP_FUNC_REPORT_PHY_SATA = 0x12,
+ SMP_FUNC_REPORT_ROUTE_INFO = 0x13,
+ SMP_FUNC_REPORT_PHY_EVENT = 0x14,
+ SMP_FUNC_DISCOVER_LIST = 0x20,
+ SMP_FUNC_REPORT_PHY_EVENT_LIST = 0x21,
+ SMP_FUNC_REPORT_EXP_ROUTE_TABLE_LIST = 0x22,
+ SMP_FUNC_CONFIG_GENERAL = 0x80,
+ SMP_FUNC_ENABLE_DISABLE_ZONING = 0x81,
+ SMP_FUNC_WRITE_GPIO_REGISTER = 0x82,
+ SMP_FUNC_ZONED_BROADCAST = 0x85,
+ SMP_FUNC_ZONE_LOCK = 0x86,
+ SMP_FUNC_ZONE_ACTIVATE = 0x87,
+ SMP_FUNC_ZONE_UNLOCK = 0x88,
+ SMP_FUNC_CONFIG_ZONE_MANAGER_PASSWORD = 0x89,
+ SMP_FUNC_CONFIG_ZONE_PHY_INFO = 0x8A,
+ SMP_FUNC_CONFIG_ZONE_PERM_TABLE = 0x8B,
+ SMP_FUNC_CONFIG_ROUTE_INFO = 0x90,
+ SMP_FUNC_PHY_CONTROL = 0x91,
+ SMP_FUNC_PHY_TEST_FUNCTION = 0x92,
+ SMP_FUNC_CONFIG_PHY_EVENT = 0x93
+} smp_function_t;
-typedef enum {
- SMP_REPORT_GENERAL = 0x00,
- SMP_REPORT_MANUFACTURER_INFO = 0x01,
- SMP_DISCOVER = 0x10,
- SMP_REPORT_PHY_ERROR_LOG = 0x11,
- SMP_PHY_SATA = 0x12,
- SMP_REPORT_ROUTE_INFORMATION = 0x13,
- SMP_CONFIG_ROUTE_INFORMATION = 0x90,
- SMP_PHY_CONTROL = 0x91,
- SMP_PHY_TEST_FUNCTION = 0x92
-} smp_func_types;
+typedef enum smp_result {
+ SMP_RES_FUNCTION_ACCEPTED = 0x00,
+ SMP_RES_UNKNOWN_FUNCTION = 0x01,
+ SMP_RES_FUNCTION_FAILED = 0x02,
+ SMP_RES_INVALID_REQUEST_FRAME_LENGTH = 0x03,
+ SMP_RES_INVALID_EXPANDER_CHANGE_COUNT = 0x04,
+ SMP_RES_BUSY = 0x05,
+ SMP_RES_INCOMPLETE_DESCRIPTOR_LIST = 0x06,
+ SMP_RES_PHY_DOES_NOT_EXIST = 0x10,
+ SMP_RES_INDEX_DOES_NOT_EXIST = 0x11,
+ SMP_RES_PHY_DOES_NOT_SUPPORT_SATA = 0x12,
+ SMP_RES_UNKNOWN_PHY_OPERATION = 0x13,
+ SMP_RES_UNKNOWN_PHY_TEST_FUNCTION = 0x14,
+ SMP_RES_PHY_TEST_IN_PROGRESS = 0x15,
+ SMP_RES_PHY_VACANT = 0x16,
+ SMP_RES_UNKNOWN_PHY_EVENT_SOURCE = 0x17,
+ SMP_RES_UNKNOWN_DESCRIPTOR_TYPE = 0x18,
+ SMP_RES_UNKNOWN_PHY_FILTER = 0x19,
+ SMP_RES_AFFILIATION_VIOLATION = 0x1A,
+ SMP_RES_ZONE_VIOLATION = 0x20,
+ SMP_RES_NO_MANAGEMENT_ACCESS_RIGHTS = 0x21,
+ SMP_RES_UNKNOWN_ENABLE_DISABLE_ZONING = 0x22,
+ SMP_RES_ZONE_LOCK_VIOLATION = 0x23,
+ SMP_RES_NOT_ACTIVATED = 0x24,
+ SMP_RES_ZONE_GROUP_OUT_OF_RANGE = 0x25,
+ SMP_RES_NO_PHYSICAL_PRESENCE = 0x26,
+ SMP_RES_SAVING_NOT_SUPPORTED = 0x27,
+ SMP_RES_SOURCE_ZONE_GROUP_DNE = 0x28,
+ SMP_RES_NONE = -1
+} smp_result_t;
+
+#pragma pack(1)
/*
- * The reqsize and rspsize in usmp_req and usmp_rsp are reserved in
- * SAS v1.1, and the fields should be zero if target device is SAS v1.1
- * compliant.
+ * SAS-2 10.4.3.2 request frame format
*/
+typedef struct smp_request_frame {
+ uint8_t srf_frame_type;
+ uint8_t srf_function;
+ uint8_t srf_allocated_response_len; /* reserved in SAS-1 */
+ uint8_t srf_request_len;
+ uint8_t srf_data[1];
+} smp_request_frame_t;
-#pragma pack(1)
-typedef struct usmp_req {
- uint8_t smpo_frametype;
- uint8_t smpo_function;
- uint8_t smpo_reserved;
- uint8_t smpo_reqsize;
- uint8_t smpo_msgframe[1];
-} usmp_req_t;
-
-typedef struct usmp_rsp {
- uint8_t smpi_frametype;
- uint8_t smpi_function;
- uint8_t smpi_result;
- uint8_t smpi_rspsize;
- uint8_t smpi_msgframe[1];
-} usmp_rsp_t;
-
-struct smp_crc {
- uint8_t code[4];
-};
-
-struct smp_report_general_req {
- uint8_t frametype;
- uint8_t function;
- uint8_t reserved_byte2;
- uint8_t reqsize;
- struct smp_crc crc;
-};
-
-struct smp_report_general_rsp {
- uint8_t frametype;
- uint8_t function;
- uint8_t result;
- uint8_t rspsize;
- uint8_t expand_change_count1;
- uint8_t expand_change_count0;
- uint8_t expand_route_index1;
- uint8_t expand_route_index0;
- uint8_t reserved_byte8;
- uint8_t num_of_phy;
+/*
+ * SAS-2 10.4.3.3 response frame format
+ */
+typedef struct smp_response_frame {
+ uint8_t srf_frame_type;
+ uint8_t srf_function;
+ uint8_t srf_result;
+ uint8_t srf_response_len; /* reserved in SAS-1 */
+ uint8_t srf_data[1];
+} smp_response_frame_t;
+
+typedef uint8_t smp_crc_t[4];
+
+#ifdef offsetof
+#define SMP_REQ_MINLEN \
+ (offsetof(smp_request_frame_t, srf_data[0]) + sizeof (smp_crc_t))
+#define SMP_RESP_MINLEN \
+ (offsetof(smp_response_frame_t, srf_data[0]) + sizeof (smp_crc_t))
+#endif /* offsetof */
+
+/*
+ * SAS-2 10.4.3.4 REPORT GENERAL (no additional request bytes)
+ */
+typedef struct smp_report_general_resp {
+ uint16_t srgr_exp_change_count;
+ uint16_t srgr_exp_route_indexes;
+ DECL_BITFIELD2(
+ _reserved1 :7,
+ srgr_long_response :1);
+ uint8_t srgr_number_of_phys;
+ DECL_BITFIELD7(
+ srgr_externally_configurable_route_table :1,
+ srgr_configuring :1,
+ srgr_configures_others :1,
+ srgr_open_reject_retry_supported :1,
+ srgr_stp_continue_awt :1,
+ _reserved2 :2,
+ srgr_table_to_table_supported :1);
+ uint8_t _reserved3;
+ uint64_t srgr_enclosure_logical_identifier;
+ uint8_t _reserved4[8];
+ uint8_t _reserved5[2];
+ uint16_t srgr_stp_bus_inactivity_time_limit;
+ uint16_t srgr_stp_maximum_connect_time_limit;
+ uint16_t srgr_stp_smp_nexus_loss_time;
+ DECL_BITFIELD7(
+ srgr_zoning_enabled :1,
+ srgr_zoning_supported :1,
+ srgr_physical_presence_asserted :1,
+ srgr_physical_presence_supported :1,
+ srgr_zone_locked :1,
+ _reserved6 :1,
+ srgr_number_of_zone_grps :2);
+ DECL_BITFIELD6(
+ srgr_saving_zoning_enabled_supported :1,
+ srgr_saving_zone_perm_table_supported :1,
+ srgr_saving_zone_phy_info_supported :1,
+ srgr_saving_zone_mgr_password_supported :1,
+ srgr_saving :1,
+ _reserved7 :4);
+ uint16_t srgr_max_routed_sas_addrs;
+ uint64_t srgr_active_zm_sas_addr;
+ uint16_t srgr_zone_lock_inactivity_limit;
+ uint8_t _reserved8[2];
+ uint8_t _reserved9;
+ uint8_t srgr_first_encl_conn_elem_idx;
+ uint8_t srgr_number_encl_conn_elem_idxs;
+ uint8_t _reserved10;
+ DECL_BITFIELD2(
+ _reserved11 :7,
+ srgr_reduced_functionality :1);
+ uint8_t srgr_time_to_reduced_functionality;
+ uint8_t srgr_initial_time_to_reduced_functionality;
+ uint8_t srgr_max_reduced_functionality_time;
+ uint16_t srgr_last_self_conf_status_descr_idx;
+ uint16_t srgr_max_stored_self_config_status_descrs;
+ uint16_t srgr_last_phy_event_list_descr_idx;
+ uint16_t srgr_max_stored_phy_event_list_descrs;
+ uint16_t srgr_stp_reject_to_open_limit;
+ uint8_t _reserved12[2];
+} smp_report_general_resp_t;
+
+typedef enum smp_n_zone_grps {
+ SMP_ZONE_GROUPS_128 = 0x0,
+ SMP_ZONE_GROUPS_256 = 0x1
+} smp_n_zone_grps_t;
+
+/*
+ * SAS-2 10.4.3.5 REPORT MANUFACTURER INFORMATION (no additional request bytes)
+ */
+typedef struct smp_report_manufacturer_info_resp {
+ uint16_t srmir_exp_change_count;
+ uint8_t _reserved1[2];
+ DECL_BITFIELD2(
+ srmir_sas_1_1_format :1,
+ _reserved2 :7);
+ uint8_t _reserved3[3];
+ char srmir_vendor_identification[8];
+ char srmir_product_identification[16];
+ char srmir_product_revision_level[4];
+ char srmir_component_vendor_identification[8];
+ uint16_t srmir_component_id;
+ uint8_t srmir_component_revision_level;
+ uint8_t _reserved4;
+ uint8_t srmir_vs_52[8];
+} smp_report_manufacturer_info_resp_t;
+
+/*
+ * SAS-2 10.4.3.6 REPORT SELF_CONFIGURATION STATUS
+ */
+typedef struct smp_report_self_config_status_req {
+ uint8_t _reserved1[2];
+ uint16_t srscsr_starting_self_config_status_descr_idx;
+} smp_report_self_config_status_req_t;
+
+typedef struct smp_report_self_config_status_resp {
+ uint16_t srscsr_exp_change_count;
+ uint16_t srscsr_starting_self_config_status_descr_idx;
+ uint16_t srscsr_number_self_config_status_descrs;
+ uint16_t srscsr_last_self_config_status_descr_idx;
+ uint8_t srscsr_self_config_status_descr_len;
+ uint8_t _reserved1[3];
+ uint8_t srscsr_descrs[1];
+} smp_report_self_config_status_resp_t;
+
+typedef struct smp_self_config_status_descr {
+ uint8_t sscsd_status_type;
+ DECL_BITFIELD2(
+ sscsd_final :1,
+ _reserved1 :7);
+ uint8_t _reserved2;
+ uint8_t sscsd_phy_identifier;
+ uint8_t _reserved3[4];
+ uint64_t sscsd_sas_addr;
+} smp_self_config_status_descr_t;
+
+typedef enum smp_self_config_status_type {
+ SMP_SCST_NONSPECIFIC_ERROR = 0x01,
+ SMP_SCST_CONNECTION = 0x02,
+ SMP_SCST_ROUTE_TABLE_FULL = 0x03,
+ SMP_SCST_NOMEM = 0x04,
+ SMP_SCST_PHY_LAYER_ERROR = 0x20,
+ SMP_SCST_LOST_SYNC = 0x21,
+ SMP_SCST_LINK_LAYER_ERROR = 0x40,
+ SMP_SCST_OPEN_TIMEOUT = 0x41,
+ SMP_SCST_ABANDON_OPEN_REJECT = 0x42,
+ SMP_SCST_RETRY_OPEN_REJECTS = 0x43,
+ SMP_SCST_NEXUS_LOSS = 0x44,
+ SMP_SCST_BREAK = 0x45,
+ SMP_SCST_CRC_ERROR = 0x46,
+ SMP_SCST_PORT_LAYER_ERROR = 0x60,
+ SMP_SCST_RESPONSE_TIMEOUT = 0x61,
+ SMP_SCST_TRANSPORT_LAYER_ERROR = 0x80,
+ SMP_SCST_APP_LAYER_ERROR = 0xA0,
+ SMP_SCST_RESPONSE_TOO_SHORT = 0xA1,
+ SMP_SCST_UNSUPPORTED_VALUES = 0xA2,
+ SMP_SCST_INCONSISTENT = 0xA3,
+ SMP_SCST_CONFIGURING = 0xA4
+} smp_self_config_status_type_t;
+
+/*
+ * SAS-2 10.4.3.7 REPORT ZONE PERMISSION TABLE
+ */
+typedef struct smp_report_zone_perm_table_req {
+ DECL_BITFIELD2(
+ srzptr_report_type :2,
+ _reserved1 :6);
+ uint8_t _reserved2;
+ uint8_t srzptr_starting_src_zone_grp;
+ uint8_t srzptr_max_zone_perm_descrs;
+} smp_report_zone_perm_table_req_t;
+
+typedef enum smp_zone_perm_table_report_type {
+ SMP_ZPTRT_CURRENT = 0x0,
+ SMP_ZPTRT_SHADOW = 0x1,
+ SMP_ZPTRT_SAVED = 0x2,
+ SMP_ZPTRT_DEFAULT = 0x3
+} smp_zone_perm_table_report_type_t;
+
+typedef struct smp_report_zone_perm_table_resp {
+ uint16_t srzptr_exp_change_count;
DECL_BITFIELD3(
- crt :1,
- configuring :1,
- reserved_byte10 :6);
- uint8_t reserved_byte11;
- uint64_t identifier;
- uint8_t reserved_byte20[8];
- struct smp_crc crc;
-};
+ srzptr_report_type :2,
+ _reserved1 :5,
+ srzptr_zone_locked :1);
+ DECL_BITFIELD2(
+ _reserved2 :6,
+ srzptr_number_zone_grps :2);
+ uint8_t _reserved3[6];
+ uint8_t srzptr_starting_src_zone_grp;
+ uint8_t srzptr_number_zone_perm_descrs;
+ uint8_t srzptr_descrs[1];
+} smp_report_zone_perm_table_resp_t;
+
+typedef uint8_t smp_zone_perm_descr128_t[16];
+typedef uint8_t smp_zone_perm_descr256_t[32];
+
+#define SMP_ZONE_PERM_BIT128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] & (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_SET128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] |= (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_CLR128(__d, __z) \
+ ((__d)[15 - ((__z) >> 3)] &= ~(1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_BIT256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] & (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_SET256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] |= (1 << ((__z) & 7)))
+
+#define SMP_ZONE_PERM_CLR256(__d, __z) \
+ ((__d)[31 - ((__z) >> 3)] &= ~(1 << ((__z) & 7)))
+
+/*
+ * SAS-2 10.4.3.8 REPORT ZONE MANAGER PASSWORD (no additional request bytes)
+ */
+typedef struct smp_report_zone_mgr_password_resp {
+ uint16_t srzmpr_exp_change_count;
+ uint8_t _reserved1[2];
+ uint8_t srzmpr_zone_mgr_password[32];
+} smp_report_zone_mgr_password_resp_t;
+
+/*
+ * SAS-2 10.4.3.9 REPORT BROADCAST
+ */
+typedef struct smp_report_broadcast_req {
+ DECL_BITFIELD2(
+ srbr_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t _reserved2[3];
+} smp_report_broadcast_req_t;
+
+typedef enum smp_broadcast_type {
+ SMP_BROADCAST_CHANGE = 0x0,
+ SMP_BROADCAST_RESERVED_CHANGE_0 = 0x1,
+ SMP_BROADCAST_RESERVED_CHANGE_1 = 0x2,
+ SMP_BROADCAST_SES = 0x3,
+ SMP_BROADCAST_EXPANDER = 0x4,
+ SMP_BROADCAST_ASYNC_EVENT = 0x5,
+ SMP_BROADCAST_RESERVED_3 = 0x6,
+ SMP_BROADCAST_RESERVED_4 = 0x7,
+ SMP_BROADCAST_ZONE_ACTIVATE = 0x8
+} smp_broadcast_type_t;
+
+typedef struct smp_broadcast_descr {
+ DECL_BITFIELD2(
+ sbd_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t sbd_phy_identifier;
+ DECL_BITFIELD2(
+ sbd_broadcast_reason :4,
+ _reserved2 :4);
+ uint16_t sbd_broadcast_count;
+ uint8_t _reserved3[10];
+} smp_broadcast_descr_t;
+
+typedef struct smp_report_broadcast_resp {
+ uint16_t srbr_exp_change_count;
+ DECL_BITFIELD2(
+ srbr_broadcast_type :4,
+ _reserved1 :4);
+ uint8_t srbr_number_broadcast_descrs;
+ smp_broadcast_descr_t srbr_descrs[1];
+} smp_report_broadcast_resp_t;
+
+/*
+ * SAS-2 10.4.3.10 DISCOVER
+ */
+typedef struct smp_discover_req {
+ uint8_t _reserved1[4];
+ DECL_BITFIELD2(
+ sdr_ignore_zone_grp :1,
+ _reserved2 :7);
+ uint8_t sdr_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_discover_req_t;
+
+typedef struct smp_snw3_phy_cap {
+ DECL_BITFIELD4(
+ sspc_requested_logical_link_rate :4, /* smp_link_rate_t */
+ _reserved1 :2,
+ sspc_tx_ssc_type :1,
+ sspc_start :1);
+ DECL_BITFIELD7(
+ _reserved2 :2,
+ sspc_g3_ssc :1,
+ sspc_g3_no_ssc :1,
+ sspc_g2_ssc :1,
+ sspc_g2_no_ssc :1,
+ sspc_g1_ssc :1,
+ sspc_g1_no_ssc :1);
+ uint8_t _reserved3;
+ DECL_BITFIELD2(
+ sspc_parity :1,
+ _reserved4 :7);
+} smp_snw3_phy_cap_t;
+
+typedef struct smp_discover_resp {
+ uint16_t sdr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t sdr_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD3(
+ sdr_attached_reason :4,
+ sdr_attached_device_type :3,
+ _reserved3 :1);
+ DECL_BITFIELD2(
+ sdr_negotiated_logical_link_rate :4, /* smp_link_rate_t */
+ _reserved4 :4);
+ DECL_BITFIELD5(
+ sdr_attached_sata_host :1,
+ sdr_attached_smp_initiator :1,
+ sdr_attached_stp_initiator :1,
+ sdr_attached_ssp_initiator :1,
+ _reserved5 :4);
+ DECL_BITFIELD6(
+ sdr_attached_sata_device :1,
+ sdr_attached_smp_target :1,
+ sdr_attached_stp_target :1,
+ sdr_attached_ssp_target :1,
+ _reserved6 :3,
+ sdr_attached_sata_port_selector :1);
+ uint64_t sdr_sas_addr;
+ uint64_t sdr_attached_sas_addr;
+ uint8_t sdr_attached_phy_identifier;
+ DECL_BITFIELD4(
+ sdr_attached_break_reply_capable :1,
+ sdr_attached_requested_inside_zpsds :1,
+ sdr_attached_inside_zpsds_persistent :1,
+ _reserved7 :5);
+ uint8_t _reserved8[6];
+ DECL_BITFIELD2(
+ sdr_hw_min_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_prog_min_phys_link_rate :4); /* smp_link_rate_t */
+ DECL_BITFIELD2(
+ sdr_hw_max_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_prog_max_phys_link_rate :4); /* smp_link_rate_t */
+ uint8_t sdr_phy_change_count;
+ DECL_BITFIELD3(
+ sdr_partial_pwy_timeout :4,
+ _reserved9 :3,
+ sdr_virtual_phy :1);
+ DECL_BITFIELD2(
+ sdr_routing_attr :4, /* smp_routing_attr_t */
+ _reserved10 :4);
+ DECL_BITFIELD2(
+ sdr_connector_type :7,
+ _reserved11 :1);
+ uint8_t sdr_connector_element_index;
+ uint8_t sdr_connector_physical_link;
+ uint8_t _reserved12[2];
+ uint8_t sdr_vendor[2];
+ uint64_t sdr_attached_device_name;
+ DECL_BITFIELD8(
+ sdr_zoning_enabled :1,
+ sdr_inside_zpsds :1,
+ sdr_zone_group_persistent :1,
+ _reserved13 :1,
+ sdr_requested_inside_zpsds :1,
+ sdr_inside_zpsds_persistent :1,
+ sdr_requested_inside_zpsds_changed_by_exp :1,
+ _reserved14 :1);
+ uint8_t _reserved15[2];
+ uint8_t sdr_zone_group;
+ uint8_t sdr_self_config_status;
+ uint8_t sdr_self_config_levels_completed;
+ uint8_t _reserved16[2];
+ uint64_t sdr_self_config_sas_addr;
+ smp_snw3_phy_cap_t sdr_prog_phy_cap;
+ smp_snw3_phy_cap_t sdr_current_phy_cap;
+ smp_snw3_phy_cap_t sdr_attached_phy_cap;
+ uint8_t _reserved17[6];
+ DECL_BITFIELD2(
+ sdr_negotiated_phys_link_rate :4, /* smp_link_rate_t */
+ sdr_reason :4);
+ DECL_BITFIELD3(
+ sdr_hw_muxing_supported :1,
+ sdr_negotiated_ssc :1,
+ _reserved18 :6);
+ DECL_BITFIELD7(
+ sdr_default_zoning_enabled :1,
+ _reserved19 :1,
+ sdr_default_zone_group_persistent :1,
+ _reserved20 :1,
+ sdr_default_requested_inside_zpsds :1,
+ sdr_default_inside_zpsds_persistent :1,
+ _reserved21 :2);
+ uint8_t _reserved22[2];
+ uint8_t sdr_default_zone_group;
+ DECL_BITFIELD7(
+ sdr_saved_zoning_enabled :1,
+ _reserved23 :1,
+ sdr_saved_zone_group_persistent :1,
+ _reserved24 :1,
+ sdr_saved_requested_inside_zpsds :1,
+ sdr_saved_inside_zpsds_persistent :1,
+ _reserved25 :2);
+ uint8_t _reserved26[2];
+ uint8_t saved_zone_group;
+ DECL_BITFIELD6(
+ _reserved27 :2,
+ sdr_shadow_zone_group_persistent :1,
+ _reserved28 :1,
+ sdr_shadow_requested_inside_zpsds :1,
+ sdr_shadow_inside_zpsds_persistent :1,
+ _reserved29 :2);
+ uint8_t _reserved30[2];
+ uint8_t sdr_shadow_zone_group;
+} smp_discover_resp_t;
+
+typedef enum smp_link_rate {
+ SMP_LINK_RATE_NO_CHANGE = 0x0,
+ SMP_LINK_RATE_DISABLED = 0x1,
+ SMP_LINK_RATE_RESET_PROBLEM = 0x2,
+ SMP_LINK_RATE_SPINUP_HOLD = 0x3,
+ SMP_LINK_RATE_PORT_SELECTOR = 0x4,
+ SMP_LINK_RATE_RESET = 0x5,
+ SMP_LINK_RATE_UNSUPPORTED = 0x6,
+ SMP_LINK_RATE_1_5 = 0x8,
+ SMP_LINK_RATE_3 = 0x9,
+ SMP_LINK_RATE_6 = 0xA
+} smp_link_rate_t;
+
+typedef enum smp_device_type {
+ SMP_DEV_NONE = 0x0,
+ SMP_DEV_SAS_SATA = 0x1,
+ SMP_DEV_EXPANDER = 0x2,
+ SMP_DEV_EXPANDER_OLD = 0x3
+} smp_device_type_t;
+
+typedef enum smp_routing_attr {
+ SMP_ROUTING_DIRECT = 0x0,
+ SMP_ROUTING_SUBTRACTIVE = 0x1,
+ SMP_ROUTING_TABLE = 0x2
+} smp_routing_attr_t;
+
+/*
+ * SAS-2 10.4.3.11 REPORT PHY ERROR LOG
+ */
+typedef struct smp_report_phy_error_log_req {
+ uint8_t _reserved1[5];
+ uint8_t srpelr_phy_identifier;
+ uint8_t _reserved2[2];
+} smp_report_phy_error_log_req_t;
+
+typedef struct smp_report_phy_error_log_resp {
+ uint16_t srpelr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srpelr_phy_identifier;
+ uint8_t _reserved2[2];
+ uint32_t srpelr_invalid_dword_count;
+ uint32_t srpelr_running_disparity_error_count;
+ uint32_t srpelr_loss_dword_sync_count;
+ uint32_t srpelr_phy_reset_problem_count;
+} smp_report_phy_error_log_resp_t;
+
+/*
+ * SAS-2 10.4.3.12 REPORT PHY SATA
+ */
+typedef struct smp_report_phy_sata_req {
+ uint8_t _reserved1[5];
+ uint8_t srpsr_phy_identifier;
+ uint8_t srpsr_affiliation_context;
+ uint8_t _reserved2;
+} smp_report_phy_sata_req_t;
+
+typedef struct smp_report_phy_sata_resp {
+ uint16_t srpsr_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srpsr_phy_identifier;
+ uint8_t _reserved2;
+ DECL_BITFIELD4(
+ srpsr_affiliation_valid :1,
+ srpsr_affiliations_supported :1,
+ srpsr_stp_nexus_loss :1,
+ _reserved3 :5);
+ uint8_t _reserved4[4];
+ uint64_t srpsr_stp_sas_addr;
+ uint8_t srpsr_register_device_host_fis[20];
+ uint8_t _reserved5[4];
+ uint64_t srpsr_affiliated_stp_init_sas_addr;
+ uint64_t srpsr_stp_nexus_loss_sas_addr;
+ uint8_t _reserved6;
+ uint8_t srpsr_affiliation_context;
+ uint8_t srpsr_current_affiliation_contexts;
+ uint8_t srpsr_max_affiliation_contexts;
+} smp_report_phy_sata_resp_t;
+
+/*
+ * SAS-2 10.4.3.13 REPORT ROUTE INFORMATION
+ */
+typedef struct smp_report_route_info_req {
+ uint8_t _reserved1[2];
+ uint16_t srrir_exp_route_index;
+ uint8_t _reserved2;
+ uint8_t srrir_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_report_route_info_req_t;
+
+typedef struct smp_report_route_info_resp {
+ uint16_t srrir_exp_change_count;
+ uint16_t srrir_exp_route_index;
+ uint8_t _reserved1;
+ uint8_t srrir_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD2(
+ _reserved3 :7,
+ srrir_exp_route_entry_disabled :1);
+ uint8_t _reserved4[3];
+ uint64_t srrir_routed_sas_addr;
+ uint8_t _reserved5[16];
+} smp_report_route_info_resp_t;
+
+/*
+ * SAS-2 10.4.3.14 SAS-2 REPORT PHY EVENT
+ */
+typedef struct smp_report_phy_event_req {
+ uint8_t _reserved1;
+ uint8_t _reserved2[4];
+ uint8_t srper_phy_identifier;
+ uint8_t _reserved3[2];
+} smp_report_phy_event_req_t;
+
+typedef struct smp_phy_event_report_descr {
+ uint8_t _reserved1[3];
+ uint8_t sped_phy_event_source;
+ uint32_t sped_phy_event;
+ uint32_t sped_peak_detector_threshold;
+} smp_phy_event_report_descr_t;
+
+typedef struct smp_report_phy_event_resp {
+ uint16_t srper_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t srper_phy_identifier;
+ uint8_t _reserved2[5];
+ uint8_t srper_n_phy_event_descrs;
+ smp_phy_event_report_descr_t srper_phy_event_descrs[1];
+} smp_report_phy_event_resp_t;
+
+/*
+ * SAS-2 10.4.3.15 SAS-2 DISCOVER LIST
+ */
+typedef struct smp_discover_list_req {
+ uint8_t _reserved1[4];
+ uint8_t sdlr_starting_phy_identifier;
+ uint8_t sdlr_max_descrs;
+ DECL_BITFIELD3(
+ sdlr_phy_filter :4,
+ _reserved2 :3,
+ sdlr_ignore_zone_group :1);
+ DECL_BITFIELD2(
+ sdlr_descr_type :4,
+ _reserved3 :4);
+ uint8_t _reserved4[4];
+ uint8_t sdlr_vendor[12];
+} smp_discover_list_req_t;
+
+typedef struct smp_discover_short_descr {
+ uint8_t sdsd_phy_identifier;
+ uint8_t sdsd_function_result;
+ DECL_BITFIELD3(
+ sdsd_attached_reason :4,
+ sdsd_attached_device_type :3,
+ _restricted1 :1);
+ DECL_BITFIELD2(
+ sdsd_negotiated_logical_link_rate :4, /* smp_link_rate_t */
+ _restricted2 :4);
+ DECL_BITFIELD5(
+ sdsd_attached_sata_host :1,
+ sdsd_attached_smp_initiator :1,
+ sdsd_attached_stp_initiator :1,
+ sdsd_attached_ssp_initiator :1,
+ _restricted3 :4);
+ DECL_BITFIELD6(
+ sdsd_attached_sata_device :1,
+ sdsd_attached_smp_target :1,
+ sdsd_attached_stp_target :1,
+ sdsd_attached_ssp_target :1,
+ _restricted4 :3,
+ sdsd_attached_sata_port_selector :1);
+ DECL_BITFIELD3(
+ sdsd_routing_attribute :4, /* smp_routing_attr_t */
+ _reserved1 :3,
+ sdsd_virtual_phy :1);
+ DECL_BITFIELD2(
+ _reserved2 :4,
+ sdsd_reason :4);
+ uint8_t sdsd_zone_group;
+ DECL_BITFIELD7(
+ _reserved3 :1,
+ sdsd_inside_zpsds :1,
+ sdsd_zone_group_persistent :1,
+ _reserved4 :1,
+ sdsd_requested_insize_zpsds :1,
+ sdsd_inside_zpsds_persistent :1,
+ _restricted5 :2);
+ uint8_t sdsd_attached_phy_identifier;
+ uint8_t sdsd_phy_change_count;
+ uint64_t sdsd_attached_sas_addr;
+ uint8_t _reserved5[4];
+} smp_discover_short_descr_t;
+
+typedef struct smp_discover_long_descr {
+ uint8_t _reserved1[2];
+ uint8_t sdld_function_result;
+ uint8_t _reserved2[1];
+ smp_discover_resp_t sdld_response;
+} smp_discover_long_descr_t;
+
+#define SMP_DISCOVER_RESP(_ld) \
+ (((smp_discover_long_descr_t *)(_ld))->sdld_function_result == \
+ SMP_FUNCTION_ACCEPTED ? \
+ &((smp_discover_long_descr_t *)(_ld))->sdld_response : \
+ NULL)
+
+typedef struct smp_discover_list_resp {
+ uint16_t sdlr_exp_change_count;
+ uint8_t _reserved1[2];
+ uint8_t sdlr_starting_phy_identifier;
+ uint8_t sdlr_n_descrs;
+ DECL_BITFIELD2(
+ sdlr_phy_filter :4,
+ _reserved2 :4);
+ DECL_BITFIELD2(
+ sdlr_descr_type :4,
+ _reserved3 :4);
+ uint8_t sdlr_descr_length;
+ uint8_t _reserved4[3];
+ DECL_BITFIELD5(
+ sdlr_externally_configurable_route_table :1,
+ sdlr_configuring :1,
+ _reserved5 :4,
+ sdlr_zoning_enabled :1,
+ sdlr_zoning_supported :1);
+ uint8_t _reserved6;
+ uint16_t sdlr_last_sc_status_descr_index;
+ uint16_t sdlr_last_phy_event_list_descr_index;
+ uint8_t _reserved7[10];
+ uint8_t sdlr_vendor[16];
+ uint8_t sdlr_descrs[1]; /* short or long format */
+} smp_discover_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.16 REPORT PHY EVENT LIST
+ */
+typedef struct smp_report_phy_event_list_req {
+ uint8_t _reserved1[2];
+ uint16_t srpelr_starting_descr_index;
+} smp_report_phy_event_list_req_t;
+
+typedef struct smp_phy_event_list_descr {
+ uint8_t _reserved1[2];
+ uint8_t speld_phy_identifier;
+ uint8_t speld_phy_event_source;
+ uint32_t speld_phy_event;
+ uint32_t speld_peak_detector_threshold;
+} smp_phy_event_list_descr_t;
+
+typedef struct smp_report_phy_event_list_resp {
+ uint16_t srpelr_exp_change_count;
+ uint16_t srpelr_starting_descr_index;
+ uint16_t srpelr_last_descr_index;
+ uint8_t srpelr_phy_event_list_descr_length;
+ uint8_t _reserved1[3];
+ uint8_t srpelr_n_descrs;
+ smp_phy_event_list_descr_t srpelr_descrs[1];
+} smp_report_phy_event_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.17 REPORT EXPANDER ROUTE TABLE LIST
+ */
+typedef struct smp_report_exp_route_table_list_req {
+ uint8_t _reserved1[4];
+ uint16_t srertlr_max_descrs;
+ uint16_t srertlr_starting_routed_sas_addr_index;
+ uint8_t _reserved2[7];
+ uint8_t srertlr_starting_phy_identifier;
+ uint8_t _reserved3[8];
+} smp_report_exp_route_table_list_req_t;
+
+typedef struct smp_route_table_descr {
+ uint64_t srtd_routed_sas_addr;
+ uint8_t srtd_phy_bitmap[6];
+ DECL_BITFIELD2(
+ _reserved1 :7,
+ srtd_zone_group_valid :1);
+ uint8_t srtd_zone_group;
+} smp_route_table_descr_t;
+
+#define SMP_ROUTE_PHY(_d, _s, _i) \
+ ((_d)->srtd_phy_bitmap[(48 - (_i) + (_s)) >> 3] & \
+ (1 << ((48 - (_i) + (_s)) & 7)))
+
+typedef struct smp_report_exp_route_table_list_resp {
+ uint16_t srertlr_exp_change_count;
+ uint16_t srertlr_route_table_change_count;
+ DECL_BITFIELD3(
+ _reserved1 :1,
+ srertlr_configuring :1,
+ _reserved2 :6);
+ uint8_t _reserved3;
+ uint16_t srertlr_n_descrs;
+ uint16_t srertlr_first_routed_sas_addr_index;
+ uint16_t srertlr_last_routed_sas_addr_index;
+ uint8_t _reserved4[3];
+ uint8_t srertlr_starting_phy_identifier;
+ uint8_t _reserved5[12];
+ smp_route_table_descr_t srertlr_descrs[1];
+} smp_report_exp_route_table_list_resp_t;
+
+/*
+ * SAS-2 10.4.3.18 CONFIGURE GENERAL (no additional response)
+ */
+typedef struct smp_config_general_req {
+ uint16_t scgr_expected_exp_change_count;
+ uint8_t _reserved1[2];
+ DECL_BITFIELD6(
+ scgr_update_stp_bus_inactivity :1,
+ scgr_update_stp_max_conn :1,
+ scgr_update_stp_smp_nexus_loss :1,
+ scgr_update_initial_time_to_reduced_functionality :1,
+ scgr_update_stp_reject_to_open :1,
+ _reserved2 :3);
+ uint8_t _reserved3;
+ uint16_t scgr_stp_bus_inactivity;
+ uint16_t scgr_stp_max_conn;
+ uint16_t scgr_stp_smp_nexus_loss;
+ uint8_t scgr_initial_time_to_reduced_functionality;
+ uint8_t _reserved4;
+ uint16_t scgr_stp_reject_to_open;
+} smp_config_general_req_t;
+
+/*
+ * SAS-2 10.4.3.19 ENABLE DISABLE ZONING (no additional response)
+ */
+typedef struct smp_enable_disable_zoning_req {
+ uint16_t sedzr_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sedzr_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t _reserved2;
+ DECL_BITFIELD2(
+ sedzr_enable_disable_zoning :2,
+ _reserved3 :6);
+ uint8_t _reserved4[3];
+} smp_enable_disable_zoning_req_t;
+
+typedef enum smp_zoning_save {
+ SMP_ZONING_SAVE_CURRENT = 0x0,
+ SMP_ZONING_SAVE_SAVED = 0x1,
+ SMP_ZONING_SAVE_BOTH_IF_SUPP = 0x2,
+ SMP_ZONING_SAVE_BOTH = 0x3
+} smp_zoning_save_t;
+
+typedef enum smp_zoning_enable_op {
+ SMP_ZONING_ENABLE_OP_NONE = 0x0,
+ SMP_ZONING_ENABLE_OP_ENABLE = 0x1,
+ SMP_ZONING_ENABLE_OP_DISABLE = 0x2
+} smp_zoning_enable_op_t;
+
+/*
+ * SAS-2 10.4.3.20 ZONED BROADCAST (no additional response)
+ */
+typedef struct smp_zoned_broadcast_req {
+ uint8_t _restricted1[2];
+ DECL_BITFIELD2(
+ szbr_broadcast_type :4,
+ _reserved :4);
+ uint8_t szbr_n_broadcast_source_zone_groups;
+ uint8_t szbr_broadcast_source_zone_groups[1];
+} smp_zoned_broadcast_req_t;
+
+/*
+ * SAS-2 10.4.3.21 ZONE LOCK
+ */
+typedef struct smp_zone_lock_req {
+ uint16_t szlr_expected_exp_change_count;
+ uint16_t szlr_zone_lock_inactivity_timeout;
+ uint8_t szlr_zone_manager_password[32];
+} smp_zone_lock_req_t;
+
+typedef struct smp_zone_lock_resp {
+ uint8_t _reserved1[4];
+ uint64_t szlr_active_zone_manager_sas_addr;
+} smp_zone_lock_resp_t;
+
+/*
+ * SAS-2 10.4.3.22 ZONE ACTIVATE (no additional response)
+ */
+typedef struct smp_zone_activate_req {
+ uint16_t szar_expected_exp_change_count;
+ uint8_t _reserved1[2];
+} smp_zone_activate_req_t;
+
+/*
+ * SAS-2 10.4.3.23 ZONE UNLOCK (no additional response)
+ */
+typedef struct smp_zone_unlock_req {
+ uint8_t _restricted1[2];
+ DECL_BITFIELD2(
+ szur_activate_required :1,
+ _reserved1 :7);
+ uint8_t _reserved2;
+} smp_zone_unlock_req_t;
+
+/*
+ * SAS-2 10.4.3.24 CONFIGURE ZONE MANAGER PASSWORD (no additional response)
+ */
+typedef struct smp_config_zone_manager_password_req {
+ uint16_t sczmpr_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sczmpr_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t _reserved2;
+ uint8_t sczmpr_zone_manager_password[32];
+ uint8_t sczmpr_new_zone_manager_password[32];
+} smp_config_zone_manager_password_req_t;
+
+/*
+ * SAS-2 10.4.3.25 CONFIGURE ZONE PHY INFORMATION (no additional response)
+ */
+typedef struct smp_zone_phy_config_descr {
+ uint8_t szpcd_phy_identifier;
+ DECL_BITFIELD6(
+ _reserved1 :2,
+ szpcd_zone_group_persistent :1,
+ _reserved2 :1,
+ szpcd_requested_inside_zpsds :1,
+ szpcd_inside_zpsds_persistent :1,
+ _reserved3 :2);
+ uint8_t _reserved4;
+ uint8_t szpcd_zone_group;
+} smp_zone_phy_config_descr_t;
+
+typedef struct smp_config_zone_phy_info_req {
+ uint16_t sczpir_expected_exp_change_count;
+ DECL_BITFIELD2(
+ sczpir_save :2, /* smp_zoning_save_t */
+ _reserved1 :6);
+ uint8_t sczpir_n_descrs;
+ smp_zone_phy_config_descr_t sczpir_descrs[1];
+} smp_config_zone_phy_info_req_t;
+
+/*
+ * SAS-2 10.4.3.26 CONFIGURE ZONE PERMISSION TABLE (no additional response)
+ */
+typedef struct smp_config_zone_perm_table_req {
+ uint16_t sczptr_expected_exp_change_count;
+ uint8_t sczptr_starting_source_zone_group;
+ uint8_t sczptr_n_descrs;
+ DECL_BITFIELD3(
+ sczptr_save :2, /* smp_zoning_save_t */
+ _reserved1 :4,
+ sczptr_n_zone_groups :2); /* smp_n_zone_grps_t */
+ uint8_t _reserved2[7];
+ uint8_t sczptr_descrs[1]; /* smp_zone_perm_descrXXX_t */
+} smp_config_zone_perm_table_req_t;
+
+/*
+ * SAS-2 10.4.3.27 CONFIGURE ROUTE INFORMATION (no additional response)
+ */
+typedef struct smp_config_route_info_req {
+ uint16_t scrir_expected_exp_change_count;
+ uint16_t scrir_exp_route_index;
+ uint8_t _reserved1;
+ uint8_t scrir_phy_identifier;
+ uint8_t _reserved2[2];
+ DECL_BITFIELD2(
+ _reserved3 :7,
+ scrir_disable_exp_route_entry :1);
+ uint8_t _reserved4[3];
+ uint64_t scrir_routed_sas_addr;
+ uint8_t _reserved5[16];
+} smp_config_route_info_req_t;
+
+/*
+ * SAS-2 10.4.3.28 PHY CONTROL (no additional response)
+ */
+typedef struct smp_phy_control_req {
+ uint16_t spcr_expected_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t spcr_phy_identifier;
+ uint8_t spcr_phy_operation;
+ DECL_BITFIELD2(
+ spcr_update_partial_pwy_timeout :1,
+ _reserved2 :7);
+ uint8_t _reserved3[12];
+ uint64_t spcr_attached_device_name;
+ DECL_BITFIELD2(
+ _reserved4 :4,
+ spcr_prog_min_phys_link_rate :4); /* smp_link_rate_t */
+ DECL_BITFIELD2(
+ _reserved5 :4,
+ spcr_prog_max_phys_link_rate :4); /* smp_link_rate_t */
+ uint8_t _reserved6[2];
+ DECL_BITFIELD2(
+ spcr_partial_pwy_timeout :4,
+ _reserved7 :4);
+ uint8_t _reserved8[3];
+} smp_phy_control_req_t;
+
+typedef enum smp_phy_op {
+ SMP_PHY_OP_NOP = 0x00,
+ SMP_PHY_OP_LINK_RESET = 0x01,
+ SMP_PHY_OP_HARD_RESET = 0x02,
+ SMP_PHY_OP_DISABLE = 0x03,
+ SMP_PHY_OP_CLEAR_ERROR_LOG = 0x05,
+ SMP_PHY_OP_CLEAR_AFFILIATION = 0x06,
+ SMP_PHY_OP_TRANSMIT_SATA_PORT_SELECTION_SIGNAL = 0x07,
+ SMP_PHY_OP_CLEAR_STP_NEXUS_LOSS = 0x08,
+ SMP_PHY_OP_SET_ATTACHED_DEVICE_NAME = 0x09
+} smp_phy_op_t;
+
+/*
+ * SAS-2 10.4.3.29 PHY TEST FUNCTION (no additional response)
+ */
+typedef struct smp_phy_test_function_req {
+ uint16_t sptfr_expected_exp_change_count;
+ uint8_t _reserved1[3];
+ uint8_t sptfr_phy_identifier;
+ uint8_t sptfr_phy_test_function;
+ uint8_t sptfr_phy_test_pattern; /* smp_phy_test_function_t */
+ uint8_t _reserved2[3];
+ DECL_BITFIELD4(
+ sptfr_test_pattern_phys_link_rate :4, /* smp_link_rate_t */
+ sptfr_test_pattern_ssc :2,
+ sptfr_test_pattern_sata :1,
+ _reserved3 :1);
+ uint8_t _reserved4[3];
+ uint8_t sptfr_phy_test_pattern_dwords_ctl;
+ uint8_t sptfr_phy_test_pattern_dwords[8];
+ uint8_t _reserved5[12];
+} smp_phy_test_function_req_t;
+
+typedef enum smp_phy_test_function {
+ SMP_PHY_TEST_FN_STOP = 0x00,
+ SMP_PHY_TEST_FN_TRANSMIT_PATTERN = 0x01
+} smp_phy_test_function_t;
+
+/*
+ * SAS-2 10.4.3.30 CONFIGURE PHY EVENT (no additional response)
+ */
+typedef struct smp_phy_event_config_descr {
+ uint8_t _reserved1[3];
+ uint8_t specd_phy_event_source;
+ uint32_t specd_peak_value_detector_threshold;
+} smp_phy_event_config_descr_t;
+
+typedef struct smp_config_phy_event_req {
+ uint16_t scper_expected_exp_change_count;
+ DECL_BITFIELD2(
+ scper_clear_peaks :1,
+ _reserved1 :7);
+ uint8_t _reserved2[2];
+ uint8_t scper_phy_identifier;
+ uint8_t _reserved3;
+ uint8_t scper_n_descrs;
+ smp_phy_event_config_descr_t scper_descrs[1];
+} smp_config_phy_event_req_t;
+
#pragma pack()
#ifdef __cplusplus
}
#endif
-#endif /* _SYS_SCSI_IMPL_SMP_FRAME_H */
+#endif /* _SYS_SCSI_GENERIC_SMP_FRAMES_H */
diff --git a/usr/src/uts/common/sys/scsi/impl/sas_transport.h b/usr/src/uts/common/sys/scsi/impl/sas_transport.h
index a592fe6308..43d55c28ad 100644
--- a/usr/src/uts/common/sys/scsi/impl/sas_transport.h
+++ b/usr/src/uts/common/sys/scsi/impl/sas_transport.h
@@ -67,7 +67,8 @@ typedef struct smp_pkt {
size_t pkt_reqsize;
size_t pkt_rspsize;
int pkt_timeout;
- uchar_t pkt_reason; /* code from errno.h */
+ uchar_t pkt_reason; /* code from errno.h */
+ uchar_t pkt_will_retry; /* will retry on EAGAIN */
sas_addr_t *pkt_address;
} smp_pkt_t;
@@ -87,22 +88,234 @@ struct sas_hba_tran {
int (*tran_smp_start)(
struct smp_pkt *pkt);
+ int (*tran_smp_init)(
+ dev_info_t *self,
+ dev_info_t *child,
+ sas_hba_tran_t *tran,
+ smp_device_t *smp);
+
+ void (*tran_smp_free)(
+ dev_info_t *self,
+ dev_info_t *child,
+ sas_hba_tran_t *tran,
+ smp_device_t *smp);
};
-extern sas_hba_tran_t *sas_hba_tran_alloc(dev_info_t *dip, int flags);
+extern sas_hba_tran_t *sas_hba_tran_alloc(dev_info_t *dip);
extern int sas_hba_attach_setup(dev_info_t *dip, sas_hba_tran_t *smp);
extern int sas_hba_detach(dev_info_t *self);
extern void sas_hba_tran_free(sas_hba_tran_t *smp);
+extern int smp_hba_bus_config(dev_info_t *, char *, dev_info_t **);
+extern int smp_hba_bus_config_taddr(dev_info_t *, char *);
+
+
extern int sas_smp_transport(struct smp_pkt *pkt);
extern int sas_ifgetcap(struct sas_addr *ap, char *cap);
extern int sas_hba_probe_smp(struct smp_device *smp_devp);
extern int sas_hba_lookup_capstr(char *capstr);
+/*
+ * Phymap support
+ */
+typedef struct __sas_phymap sas_phymap_t;
+typedef enum { PHYMAP_MODE_SIMPLE } sas_phymap_mode_t;
+typedef void (*sas_phymap_activate_cb_t)
+ (void *phymap_priv, char *ua, void **ua_privp);
+typedef void (*sas_phymap_deactivate_cb_t)
+ (void *phymap_priv, char *ua, void *ua_priv);
+
+extern int sas_phymap_create(dev_info_t *hba_dip,
+ clock_t settle_us,
+ sas_phymap_mode_t mode,
+ void *mode_argument,
+ void *phymap_priv,
+ sas_phymap_activate_cb_t activate_cb,
+ sas_phymap_deactivate_cb_t deactivate_cb,
+ sas_phymap_t **phymapp);
+
+void sas_phymap_destroy(sas_phymap_t *phymap);
+
+extern int sas_phymap_phy_add(sas_phymap_t *phymap,
+ int phy,
+ uint64_t local_sas_address,
+ uint64_t remote_sas_address);
+
+extern int sas_phymap_phy_rem(sas_phymap_t *phymap,
+ int phy);
+
+extern char *sas_phymap_lookup_ua(sas_phymap_t *phymap,
+ uint64_t local_sas_address,
+ uint64_t remote_sas_address);
+
+extern void *sas_phymap_lookup_uapriv(sas_phymap_t *phymap,
+ char *ua);
+
+extern int sas_phymap_uahasphys(sas_phymap_t *phymap,
+ char *ua);
+
+typedef struct __sas_phymap_phys sas_phymap_phys_t;
+extern sas_phymap_phys_t *sas_phymap_ua2phys(sas_phymap_t *, char *ua);
+extern int sas_phymap_phys_next(sas_phymap_phys_t *);
+void sas_phymap_phys_free(sas_phymap_phys_t *);
+
+extern char *sas_phymap_phy2ua(sas_phymap_t *, int);
+void sas_phymap_ua_free(char *);
+
+
#endif /* defined(_KERNEL) */
+#define KSTAT_SAS_PHY_CLASS "SAS_phy_stat"
+
+/*
+ * Format of the ks_name field for SAS Phy Stat
+ *
+ * driver_name.initiator_port_SAS_address.initiator_port_instance_number.phyid
+ * Example: pmcs.5000c50000d756aa.2.0
+ *
+ * driver_name:
+ * driver name from di_driver_name() on SAS initiator port devinfo node.
+ * initiator_port_SAS_address:
+ * SAS address of the initiator port that phy stat is reported for.
+ * initiator_port_instance_number:
+ * instance number of the initiator port that phy stat is reported for.
+ * phyid:
+ * prop phyIdentifier under initiator port node.
+ */
+
+/* Port Protocol - kstat structure definition */
+
+typedef struct sas_port_protocol_stats {
+ kstat_named_t seconds_since_last_reset;
+ kstat_named_t input_requests;
+ kstat_named_t output_requests;
+ kstat_named_t control_requests;
+ kstat_named_t input_megabytes;
+ kstat_named_t output_megabytes;
+} sas_port_protocol_stats_t;
+
+/* Port - kstat structure definition */
+
+typedef struct sas_port_stats {
+ kstat_named_t seconds_since_last_reset;
+ kstat_named_t tx_frames;
+ kstat_named_t tx_words;
+ kstat_named_t rx_frames;
+ kstat_named_t rx_words;
+} sas_port_stats_t;
+
+/* PHY - kstat structure definition */
+
+typedef struct sas_phy_stats {
+ kstat_named_t seconds_since_last_reset;
+ kstat_named_t tx_frames;
+ kstat_named_t tx_words;
+ kstat_named_t rx_frames;
+ kstat_named_t rx_words;
+ kstat_named_t invalid_dword_count;
+ kstat_named_t running_disparity_error_count;
+ kstat_named_t loss_of_dword_sync_count;
+ kstat_named_t phy_reset_problem_count;
+} sas_phy_stats_t;
+
+
+/*
+ * Supported Protocol property
+ */
+#define SAS_PROTOCOL_SSP 0x00000001
+#define SAS_PROTOCOL_STP 0x00000010
+#define SAS_PROTOCOL_SMP 0x00000100
+#define SAS_PROTOCOL_SATA 0x00001000
+
+
+/*
+ * Definition - Negotiated Physical Link Rate
+ * Based on Table 288 (Section 10.4.3.10) of the spec (SAS-2 r-15), these
+ * constants represent "Negotiated physical link rate"
+ * (and implicitly the State of the phy).
+ */
+
+#define SAS_LINK_RATE_UNKNOWN 0x0 /* Phy is enabled. */
+ /* Speed is unknown */
+#define SAS_LINK_RATE_DISABLED 0x1 /* Phy is disabled. */
+ /* Speed is undefined */
+#define SAS_LINK_RATE_FAILED 0x2 /* Phy is enabled. */
+ /* Failed speed negotiation. */
+#define SAS_LINK_RATE_SATASPINUP 0x3 /* Phy is enabled. */
+ /* Detected a SATA device and */
+ /* entered the SATA Spinup hold */
+ /* state */
+#define SAS_LINK_RATE_SATAPORTSEL 0x4 /* Phy enabled. */
+ /* The phy is attached to a */
+ /* Port Selector (SATA-2.6). */
+#define SAS_LINK_RATE_RESET_IN_PROGRESS 0x5 /* Phy is enabled. */
+ /* Expander is performing SMP */
+ /* PHY CONTROL Link/Hard Reset */
+#define SAS_LINK_RATE_PHY_UNSUPPORTED 0x6 /* Phy is enabled. */
+ /* Unsupported phy settings */
+#define SAS_LINK_RATE_RESERVED 0x7 /* Undefined. Reserved. */
+#define SAS_LINK_RATE_1_5GBIT 0x8 /* Phy enabled at 1.5 GBit/sec */
+#define SAS_LINK_RATE_3GBIT 0x9 /* Phy enabled at 3 GBit/sec */
+#define SAS_LINK_RATE_6GBIT 0xA /* Phy enabled at 6 GBit/sec. */
+
+
+/*
+ * Definition - "phy-info" property
+ *
+ * The property is an nvlist_array that represents an array of the
+ * nvlists on a per HBA basis. The individual elements of the array
+ * (the nvlists) represent the following properties for each phy of the HBA
+ */
+#define SAS_PHY_INFO "phy-info" /* Phy property name */
+#define SAS_PHY_INFO_NVL "phy-info-nvl" /* NVL array name */
+
+#define SAS_PHY_ID "PhyIdentifier" /* DATA_TYPE_UINT8 */
+#define SAS_NEG_LINK_RATE "NegotiatedLinkRate" /* DATA_TYPE_INT8 */
+#define SAS_PROG_MIN_LINK_RATE "ProgrammedMinLinkRate" /* DATA_TYPE_INT8 */
+#define SAS_HW_MIN_LINK_RATE "HardwareMinLinkRate" /* DATA_TYPE_INT8 */
+#define SAS_PROG_MAX_LINK_RATE "ProgrammedMaxLinkRate" /* DATA_TYPE_INT8 */
+#define SAS_HW_MAX_LINK_RATE "HardwareMaxLinkRate" /* DATA_TYPE_INT8 */
+
+
+/*
+ * Event definitions
+ */
+/* Event Class */
+#define EC_HBA "EC_hba"
+
+/* Event Sub-Class */
+#define ESC_SAS_HBA_PORT_BROADCAST "ESC_sas_hba_port_broadcast"
+
+/* Event Types for above Subclass */
+#define SAS_PORT_BROADCAST_CHANGE "port_broadcast_change"
+#define SAS_PORT_BROADCAST_SES "port_broadcast_ses"
+#define SAS_PORT_BROADCAST_D24_0 "port_broadcast_d24_0"
+#define SAS_PORT_BROADCAST_D27_4 "port_broadcast_d27_4"
+#define SAS_PORT_BROADCAST_D01_4 "port_broadcast_d01_4"
+#define SAS_PORT_BROADCAST_D04_7 "port_broadcast_d04_7"
+#define SAS_PORT_BROADCAST_D16_7 "port_broadcast_d16_7"
+#define SAS_PORT_BROADCAST_D29_7 "port_broadcast_d29_7"
+
+/* Event Sub-Class */
+#define ESC_SAS_PHY_EVENT "ESC_sas_phy_event"
+
+/* Event Types for above Subclass */
+#define SAS_PHY_ONLINE "port_online"
+#define SAS_PHY_OFFLINE "port_offline"
+#define SAS_PHY_REMOVE "port_remove"
+
+/* Event Payload Names */
+#define SAS_DRV_INST "driver_instance"
+#define SAS_PORT_ADDR "port_address"
+#define SAS_DEVFS_PATH "devfs_path"
+#define SAS_EVENT_TYPE "event_type"
+#define SAS_LINK_RATE "link_rate"
+/* SAS_PHY_ID - Defined Above */
+
+
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/common/sys/scsi/impl/transport.h b/usr/src/uts/common/sys/scsi/impl/transport.h
index 77820c9f43..7a7bb66590 100644
--- a/usr/src/uts/common/sys/scsi/impl/transport.h
+++ b/usr/src/uts/common/sys/scsi/impl/transport.h
@@ -39,6 +39,12 @@ extern "C" {
#ifdef _KERNEL
/*
+ * Opaque handles to address maps
+ */
+typedef struct __scsi_iportmap scsi_hba_iportmap_t;
+typedef struct __scsi_tgtmap scsi_hba_tgtmap_t;
+
+/*
* SCSI transport structures
*
* As each Host Adapter makes itself known to the system,
@@ -54,7 +60,9 @@ typedef struct scsi_hba_tran scsi_hba_tran_t;
struct scsi_hba_tran {
/*
- * Ptr to the device info structure for this particular HBA.
+ * Ptr to the device info structure for this particular HBA. If a SCSA
+ * HBA driver separates initiator port function from HBA function,
+ * this field still refers to the HBA and is used to manage DMA.
*/
dev_info_t *tran_hba_dip;
@@ -64,8 +72,8 @@ struct scsi_hba_tran {
void *tran_hba_private; /* HBA softstate */
/*
- * The following two fields are only used in the SCSI_HBA_TRAN_CLONE
- * case. Consider using SCSI_HBA_ADDR_COMPLEX instead.
+ * The following two fields are only used in the deprecated
+ * SCSI_HBA_TRAN_CLONE case. Use SCSI_HBA_ADDR_COMPLEX instead.
*/
void *tran_tgt_private;
struct scsi_device *tran_sd;
@@ -215,8 +223,7 @@ struct scsi_hba_tran {
* open_flag: bit field indicating which minor nodes are open.
* 0 = closed, 1 = shared open, all bits 1 = excl open.
*
- * XXX Unused if hba driver chooses to implement own
- * xxopen(9e) entry point
+ * NOTE: Unused if HBA driver implements its own open(9e) entry point.
*/
kmutex_t tran_open_lock;
uint64_t tran_open_flag;
@@ -246,7 +253,7 @@ struct scsi_hba_tran {
void *result);
/*
- * Inter-Connect type of trasnport as defined in
+ * Inter-Connect type of transport as defined in
* usr/src/uts/common/sys/scsi/impl/services.h
*/
int tran_interconnect_type;
@@ -286,6 +293,16 @@ struct scsi_hba_tran {
*/
dev_info_t *tran_iport_dip;
+ /*
+ * map of initiator ports below HBA
+ */
+ scsi_hba_iportmap_t *tran_iportmap;
+
+ /*
+ * map of targets below initiator
+ */
+ scsi_hba_tgtmap_t *tran_tgtmap;
+
#ifdef SCSI_SIZE_CLEAN_VERIFY
/*
* Must be last: Building a driver with-and-without
@@ -306,7 +323,7 @@ _NOTE(SCHEME_PROTECTS_DATA("stable data",
scsi_hba_tran::tran_open_flag
scsi_hba_tran::tran_pkt_cache_ptr))
/*
- * we only modify the dma atributes (like dma_attr_granular) upon
+ * we only modify the dma attributes (like dma_attr_granular) upon
* attach and in response to a setcap. It is also up to the target
* driver to not have any outstanding I/Os when it is changing the
* capabilities of the transport.
@@ -373,7 +390,22 @@ int scsi_hba_probe(
struct scsi_device *sd,
int (*callback)(void));
-char *scsi_get_device_type_string(
+int scsi_hba_probe_pi(
+ struct scsi_device *sd,
+ int (*callback)(void),
+ int pi);
+
+int scsi_hba_ua_get_reportdev(
+ struct scsi_device *sd,
+ char *ba,
+ int len);
+
+int scsi_hba_ua_get(
+ struct scsi_device *sd,
+ char *ua,
+ int len);
+
+char *scsi_get_device_type_string(
char *prop_name,
dev_info_t *hba_dip,
struct scsi_device *sd);
@@ -443,8 +475,7 @@ void scsi_hba_nodename_compatible_free(
char *nodename,
char **compatible);
-
-int scsi_hba_prop_update_inqstring(
+int scsi_device_prop_update_inqstring(
struct scsi_device *sd,
char *name,
char *data,
@@ -453,8 +484,12 @@ int scsi_hba_prop_update_inqstring(
void scsi_hba_pkt_comp(
struct scsi_pkt *pkt);
+int scsi_device_identity(
+ struct scsi_device *sd,
+ int (*callback)(void));
+
char *scsi_hba_iport_unit_address(
- dev_info_t *self);
+ dev_info_t *dip);
int scsi_hba_iport_register(
dev_info_t *dip,
@@ -466,6 +501,8 @@ int scsi_hba_iport_exist(
dev_info_t *scsi_hba_iport_find(
dev_info_t *pdip,
char *portnm);
+
+
/*
* Flags for scsi_hba_attach
*
@@ -496,8 +533,8 @@ dev_info_t *scsi_hba_iport_find(
* same driver. The driver can distinguish context
* by calling scsi_hba_iport_unit_address().
*
- * SCSI_HBA_TRAN_CLONE Consider using SCSI_HBA_ADDR_COMPLEX instead.
- * SCSI_HBA_TRAN_CLONE is a KLUDGE to address
+ * ::SCSI_HBA_TRAN_CLONE Deprecated: use SCSI_HBA_ADDR_COMPLEX instead.
+ * SCSI_HBA_TRAN_CLONE was a KLUDGE to address
* limitations of the scsi_address(9S) structure
* via duplication of scsi_hba_tran(9S) and
* use of tran_tgt_private.
@@ -507,10 +544,10 @@ dev_info_t *scsi_hba_iport_find(
#define SCSI_HBA_TRAN_PHCI 0x02 /* treat HBA as mpxio 'pHCI' */
#define SCSI_HBA_TRAN_CDB 0x04 /* allocate cdb */
#define SCSI_HBA_TRAN_SCB 0x08 /* allocate sense */
+#define SCSI_HBA_HBA 0x10 /* all HBA children are iports */
#define SCSI_HBA_ADDR_SPI 0x20 /* scsi_address in SPI form */
#define SCSI_HBA_ADDR_COMPLEX 0x40 /* scsi_address is COMPLEX */
-#define SCSI_HBA_HBA 0x80 /* all HBA children are iport */
/* upper bits used to record SCSA configuration state */
#define SCSI_HBA_SCSA_PHCI 0x10000 /* need mdi_phci_unregister */
@@ -526,8 +563,8 @@ dev_info_t *scsi_hba_iport_find(
* Support extra flavors for SCSA children
*/
#define SCSA_FLAVOR_SCSI_DEVICE NDI_FLAVOR_VANILLA
-#define SCSA_FLAVOR_IPORT 1
-#define SCSA_FLAVOR_SMP 2
+#define SCSA_FLAVOR_SMP 1
+#define SCSA_FLAVOR_IPORT 2
#define SCSA_NFLAVORS 3
/*
@@ -536,6 +573,80 @@ dev_info_t *scsi_hba_iport_find(
#define SCSI_HBA_MAX_IPORTS 32
/*
+ * SCSI iport map interfaces
+ */
+int scsi_hba_iportmap_create(
+ dev_info_t *hba_dip,
+ clock_t stable_ms,
+ int n_entries,
+ scsi_hba_iportmap_t **iportmapp);
+
+int scsi_hba_iportmap_iport_add(
+ scsi_hba_iportmap_t *iportmap,
+ char *iport_addr,
+ void *iport_priv);
+
+int scsi_hba_iportmap_iport_remove(
+ scsi_hba_iportmap_t *iportmap,
+ char *iport_addr);
+
+void scsi_hba_iportmap_destroy(scsi_hba_iportmap_t *iportmap);
+
+/*
+ * SCSI target map interfaces
+ */
+typedef enum { SCSI_TM_FULLSET = 0, SCSI_TM_PERADDR } scsi_tgtmap_mode_t;
+typedef enum {
+ SCSI_TGT_SCSI_DEVICE = 0, SCSI_TGT_SMP_DEVICE, SCSI_TGT_NTYPES
+} scsi_tgtmap_tgt_type_t;
+
+typedef void (*scsi_tgt_activate_cb_t)(
+ void *tgtmap_priv,
+ char *tgt_addr,
+ scsi_tgtmap_tgt_type_t tgt_type,
+ void **tgt_privp);
+typedef void (*scsi_tgt_deactivate_cb_t)(
+ void *tgtmap_priv,
+ char *tgt_addr,
+ scsi_tgtmap_tgt_type_t tgt_type,
+ void *tgt_priv);
+int scsi_hba_tgtmap_create(
+ dev_info_t *iport_dip,
+ scsi_tgtmap_mode_t rpt_mode,
+ clock_t stable_ms,
+ int n_entries,
+ void *tgtmap_priv,
+ scsi_tgt_activate_cb_t activate_cb,
+ scsi_tgt_deactivate_cb_t deactivate_cb,
+ scsi_hba_tgtmap_t **tgtmapp);
+
+int scsi_hba_tgtmap_set_begin(scsi_hba_tgtmap_t *tgtmap);
+
+int scsi_hba_tgtmap_set_add(
+ scsi_hba_tgtmap_t *tgtmap,
+ scsi_tgtmap_tgt_type_t tgt_type,
+ char *tgt_addr,
+ void *tgt_priv);
+
+int scsi_hba_tgtmap_set_end(
+ scsi_hba_tgtmap_t *tgtmap,
+ uint_t flags);
+
+int scsi_hba_tgtmap_tgt_add(
+ scsi_hba_tgtmap_t *tgtmap,
+ scsi_tgtmap_tgt_type_t tgt_type,
+ char *tgt_addr,
+ void *tgt_priv);
+
+int scsi_hba_tgtmap_tgt_remove(
+ scsi_hba_tgtmap_t *tgtmap,
+ scsi_tgtmap_tgt_type_t tgt_type,
+ char *tgt_addr);
+
+void scsi_hba_tgtmap_destroy(scsi_hba_tgtmap_t *tgt_map);
+
+
+/*
* For minor nodes created by the SCSA framework, minor numbers are
* formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
* number less than 64.
diff --git a/usr/src/uts/common/sys/scsi/scsi_address.h b/usr/src/uts/common/sys/scsi/scsi_address.h
index ec70e72b78..36b5543dff 100644
--- a/usr/src/uts/common/sys/scsi/scsi_address.h
+++ b/usr/src/uts/common/sys/scsi/scsi_address.h
@@ -114,6 +114,10 @@ struct scsi_address {
#define SCSI_ADDR_PROP_LUN64 "lun64" /* int64 */
#define SCSI_ADDR_PROP_SFUNC "sfunc" /* int */
+#define SCSI_ADDR_PROP_IPORTUA "scsi-iport" /* string */
+
+#define SCSI_ADDR_PROP_SATA_PHY "sata-phy" /* int */
+
/*
* Addressing property names, values are in string form compatible
* with the SCSI_ADDR_PROP_TARGET_PORT part of the related
@@ -165,6 +169,7 @@ int scsi_wwnstr_to_wwn(const char *wwnstr, uint64_t *wwnp);
char *scsi_wwn_to_wwnstr(uint64_t wwn,
int unit_address_form, char *wwnstr);
void scsi_wwnstr_hexcase(char *wwnstr, int lower_case);
+const char *scsi_wwnstr_skip_ua_prefix(const char *wwnstr);
void scsi_free_wwnstr(char *wwnstr);
#endif /* _KERNEL */
diff --git a/usr/src/uts/common/sys/scsi/scsi_ctl.h b/usr/src/uts/common/sys/scsi/scsi_ctl.h
index 0498c30d7c..32b6f54029 100644
--- a/usr/src/uts/common/sys/scsi/scsi_ctl.h
+++ b/usr/src/uts/common/sys/scsi/scsi_ctl.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_SCSI_CTL_H
#define _SYS_SCSI_SCSI_CTL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/scsi/scsi_types.h>
#ifdef __cplusplus
@@ -56,12 +53,6 @@ extern "C" {
#define SCSI_RESET_CANCEL 0x02 /* cancel the reset notification */
/*
- * Define for scsi_get_bus_addr/scsi_get_name first argument.
- */
-#define SCSI_GET_INITIATOR_ID ((struct scsi_device *)NULL)
- /* return initiator-id */
-
-/*
* Define for scsi_get_name string length.
* This is needed because MAXNAMELEN is not part of DDI.
*/
@@ -98,9 +89,8 @@ int scsi_terminate_task(struct scsi_address *ap, struct scsi_pkt *pkt);
* Other functions
*/
int scsi_clear_aca(struct scsi_address *ap);
-int scsi_get_bus_addr(struct scsi_device *devp, char *name, int len);
-int scsi_get_name(struct scsi_device *devp, char *name, int len);
-
+int scsi_ua_get_reportdev(struct scsi_device *sd, char *ba, int len);
+int scsi_ua_get(struct scsi_device *sd, char *ua, int len);
#endif /* _KERNEL */
#ifdef __cplusplus
diff --git a/usr/src/uts/common/sys/scsi/scsi_pkt.h b/usr/src/uts/common/sys/scsi/scsi_pkt.h
index f7890fc548..88f1b98602 100644
--- a/usr/src/uts/common/sys/scsi/scsi_pkt.h
+++ b/usr/src/uts/common/sys/scsi/scsi_pkt.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_SCSI_PKT_H
#define _SYS_SCSI_SCSI_PKT_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/scsi/scsi_types.h>
#ifdef __cplusplus
@@ -106,6 +104,9 @@ struct scsi_pkt {
/* private: iff scsi_pkt_allocated_correctly() */
int pkt_path_instance; /* pHCI transport path */
+ /* stage-temporary: iff scsi_pkt_allocated_correctly() */
+ void *pkt_stmp; /* temporary for current pkt stage */
+
#ifdef SCSI_SIZE_CLEAN_VERIFY
/*
* Must be last: Building a driver with-and-without
diff --git a/usr/src/uts/common/sys/scsi/scsi_types.h b/usr/src/uts/common/sys/scsi/scsi_types.h
index 82328afbd4..6aeb2bc0c5 100644
--- a/usr/src/uts/common/sys/scsi/scsi_types.h
+++ b/usr/src/uts/common/sys/scsi/scsi_types.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -57,6 +57,11 @@ typedef void *opaque_t;
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/devops.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/sunndi.h>
+#include <sys/devctl.h>
#endif /* _KERNEL */
/*
@@ -87,22 +92,9 @@ typedef void *opaque_t;
#include <sys/scsi/generic/message.h>
#include <sys/scsi/generic/mode.h>
-
/*
* Sun SCSI type definitions
*/
#include <sys/scsi/impl/types.h>
-/*
- * For drivers which do not include these - must be last
- */
-#ifdef _KERNEL
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/stat.h>
-#include <sys/sunndi.h>
-#include <sys/devctl.h>
-#include <sys/ddifm.h>
-#endif /* _KERNEL */
-
#endif /* _SYS_SCSI_SCSI_TYPES_H */
diff --git a/usr/src/uts/common/sys/scsi/targets/smp.h b/usr/src/uts/common/sys/scsi/targets/smp.h
index 9412662fd6..7ccfb7803f 100644
--- a/usr/src/uts/common/sys/scsi/targets/smp.h
+++ b/usr/src/uts/common/sys/scsi/targets/smp.h
@@ -20,15 +20,13 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_SCSI_TARGETS_SMP_H
#define _SYS_SCSI_TARGETS_SMP_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/scsi/scsi.h>
@@ -47,17 +45,19 @@ extern "C" {
#define SMP_EXOPENED 2
typedef struct smp_state {
- struct smp_device *smp_dev; /* pointer to smp_device */
- kmutex_t smp_mutex; /* mutex */
- uint32_t smp_open_flag; /* open flag */
+ struct smp_device *smp_dev; /* pointer to smp_device */
+ kmutex_t smp_mutex; /* mutex */
+ uint32_t smp_open_flag; /* open flag */
+ kcondvar_t smp_cv; /* condition variable */
+ uint32_t smp_busy; /* busy */
} smp_state_t;
#define SMP_ESTIMATED_NUM_DEVS 4 /* for soft-state allocation */
-#define SMP_DEFAULT_RETRY_TIMES 3
+#define SMP_DEFAULT_RETRY_TIMES 5
#define SMP_FLAG_REQBUF 0x1
#define SMP_FLAG_RSPBUF 0x2
-#define SMP_FLAG_XFER 0x4
+#define SMP_FLAG_XFER 0x4
#endif /* defined(_KERNEL) */
diff --git a/usr/src/uts/common/sys/sunddi.h b/usr/src/uts/common/sys/sunddi.h
index 5a390b4180..601601eaa6 100644
--- a/usr/src/uts/common/sys/sunddi.h
+++ b/usr/src/uts/common/sys/sunddi.h
@@ -116,6 +116,8 @@ extern "C" {
#define DEVI_PSEUDO_NODEID ((int)-1)
#define DEVI_SID_NODEID ((int)-2)
#define DEVI_SID_HIDDEN_NODEID ((int)-3)
+#define DEVI_SID_HP_NODEID ((int)-4)
+#define DEVI_SID_HP_HIDDEN_NODEID ((int)-5)
#define DEVI_PSEUDO_NEXNAME "pseudo"
#define DEVI_ISA_NEXNAME "isa"
@@ -1556,43 +1558,84 @@ int
ddi_fls(long mask);
/*
- * The next five routines comprise generic storage management utilities
- * for driver soft state structures.
+ * The ddi_soft_state* routines comprise generic storage management utilities
+ * for driver soft state structures. Two types of soft_state indexes are
+ * supported: 'integer index', and 'string index'.
*/
+typedef struct __ddi_soft_state_bystr ddi_soft_state_bystr;
/*
- * Allocate a set of pointers to 'n_items' objects of size 'size'
- * bytes. Each pointer is initialized to nil. 'n_items' is a hint i.e.
- * zero is allowed.
+ * Initialize a soft_state set, establishing the 'size' of soft state objects
+ * in the set.
+ *
+ * For an 'integer indexed' soft_state set, the initial set will accommodate
+ * 'n_items' objects - 'n_items' is a hint (i.e. zero is allowed), allocations
+ * that exceed 'n_items' have additional overhead.
+ *
+ * For a 'string indexed' soft_state set, 'n_items' should be the typical
+ * number of soft state objects in the set - 'n_items' is a hint, there may
+ * be additional overhead if the hint is too small (and wasted memory if the
+ * hint is too big).
*/
int
ddi_soft_state_init(void **state_p, size_t size, size_t n_items);
+int
+ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p,
+ size_t size, int n_items);
/*
- * Allocate a state structure of size 'size' to be associated
- * with item 'item'.
+ * Allocate a soft state object associated with either 'integer index' or
+ * 'string index' from a soft_state set.
*/
int
ddi_soft_state_zalloc(void *state, int item);
+int
+ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str);
/*
- * Fetch a pointer to the allocated soft state structure
- * corresponding to 'item.'
+ * Get the pointer to the allocated soft state object associated with
+ * either 'integer index' or 'string index'.
*/
void *
ddi_get_soft_state(void *state, int item);
+void *
+ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str);
/*
- * Free the state structure corresponding to 'item.'
+ * Free the soft state object associated with either 'integer index'
+ * or 'string index'.
*/
void
ddi_soft_state_free(void *state, int item);
+void
+ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str);
/*
- * Free the handle, and any associated soft state structures.
+ * Free the soft state set and any associated soft state objects.
*/
void
ddi_soft_state_fini(void **state_p);
+void
+ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p);
+
+/*
+ * The ddi_strid_* routines provide string-to-index management utilities.
+ */
+typedef struct __ddi_strid ddi_strid;
+int
+ddi_strid_init(ddi_strid **strid_p, int n_items);
+id_t
+ddi_strid_alloc(ddi_strid *strid, char *str);
+id_t
+ddi_strid_fixed_alloc(ddi_strid *strid, char *str);
+id_t
+ddi_strid_str2id(ddi_strid *strid, char *str);
+char *
+ddi_strid_id2str(ddi_strid *strid, id_t id);
+void
+ddi_strid_free(ddi_strid *strid, id_t id);
+void
+ddi_strid_fini(ddi_strid **strid_p);
/*
* Set the addr field of the name in dip to name
diff --git a/usr/src/uts/common/sys/sunmdi.h b/usr/src/uts/common/sys/sunmdi.h
index 4aa687efa8..a670d0b2b8 100644
--- a/usr/src/uts/common/sys/sunmdi.h
+++ b/usr/src/uts/common/sys/sunmdi.h
@@ -87,7 +87,11 @@ typedef enum {
#define MDI_COMPONENT_CLIENT 0x4
/*
- * mdi_pathinfo node state utility definitions
+ * mdi_pathinfo node state utility definitions (bits in mdi_pathinfo_state_t)
+ *
+ * NOTE: having mdi_pathinfo_state_t contain both state and flags is error
+ * prone. For new flags, please consider using MDI_PATHINFO_FLAG_ (and
+ * moving existing EXT_STATE_MASK flags over would be good too).
*/
#define MDI_PATHINFO_STATE_TRANSIENT 0x00010000
#define MDI_PATHINFO_STATE_USER_DISABLE 0x00100000
@@ -96,6 +100,12 @@ typedef enum {
#define MDI_PATHINFO_STATE_MASK 0x0000FFFF
#define MDI_PATHINFO_EXT_STATE_MASK 0xFFF00000
+/*
+ * mdi_pathinfo flags definitions
+ */
+#define MDI_PATHINFO_FLAGS_HIDDEN 0x00000001
+#define MDI_PATHINFO_FLAGS_DEVICE_REMOVED 0x00000002
+
#define USER_DISABLE 1
#define DRIVER_DISABLE 2
#define DRIVER_DISABLE_TRANSIENT 3
@@ -191,6 +201,12 @@ int mdi_pi_enable(dev_info_t *, dev_info_t *, int);
int mdi_pi_disable_path(mdi_pathinfo_t *, int);
int mdi_pi_enable_path(mdi_pathinfo_t *, int);
+int mdi_pi_ishidden(mdi_pathinfo_t *);
+
+int mdi_pi_device_isremoved(mdi_pathinfo_t *);
+int mdi_pi_device_remove(mdi_pathinfo_t *);
+int mdi_pi_device_insert(mdi_pathinfo_t *);
+
/*
* MPxIO-PM stuff
*/
@@ -228,11 +244,14 @@ dev_info_t *mdi_pi_get_phci(mdi_pathinfo_t *);
char *mdi_pi_get_node_name(mdi_pathinfo_t *);
char *mdi_pi_get_addr(mdi_pathinfo_t *);
mdi_pathinfo_state_t mdi_pi_get_state(mdi_pathinfo_t *);
+uint_t mdi_pi_get_flags(mdi_pathinfo_t *);
int mdi_pi_get_path_instance(mdi_pathinfo_t *);
-char *mdi_pi_pathname_by_instance(int path_instance);
+char *mdi_pi_pathname_by_instance(int);
char *mdi_pi_pathname(mdi_pathinfo_t *);
char *mdi_pi_pathname_obp(mdi_pathinfo_t *, char *);
int mdi_pi_pathname_obp_set(mdi_pathinfo_t *, char *);
+char *mdi_pi_spathname_by_instance(int);
+char *mdi_pi_spathname(mdi_pathinfo_t *);
/*
* mdi_pathinfo Property handling functions
diff --git a/usr/src/uts/common/sys/sunndi.h b/usr/src/uts/common/sys/sunndi.h
index 80bbdca329..22e61db408 100644
--- a/usr/src/uts/common/sys/sunndi.h
+++ b/usr/src/uts/common/sys/sunndi.h
@@ -318,6 +318,7 @@ ndi_irm_destroy(ddi_irm_pool_t *poolp);
#define NDI_DRV_CONF_REPROBE 0x04000000 /* reprobe conf-enum'd nodes only */
#define NDI_DETACH_DRIVER 0x08000000 /* performing driver_detach */
#define NDI_MTC_OFF 0x10000000 /* disable multi-threading */
+#define NDI_USER_REQ 0x20000000 /* user requested operation */
/* ndi interface flag values */
#define NDI_SLEEP 0x000000
@@ -342,6 +343,26 @@ dev_info_t *
ndi_devi_findchild(dev_info_t *p, char *devname);
/*
+ * Find the child dev_info node of parent nexus 'p' whose name
+ * matches "dname"@"ua". If a child doesn't have a "ua"
+ * value, it calls the function "make_ua" to create it.
+ */
+dev_info_t *
+ndi_devi_findchild_by_callback(dev_info_t *p, char *dname, char *ua,
+ int (*make_ua)(dev_info_t *, char *, int));
+
+/*
+ * Maintain DEVI_DEVICE_REMOVED hotplug devi_state for remove/reinsert hotplug
+ * of open devices.
+ */
+int
+ndi_devi_device_isremoved(dev_info_t *dip);
+int
+ndi_devi_device_remove(dev_info_t *dip);
+int
+ndi_devi_device_insert(dev_info_t *dip);
+
+/*
* generate debug msg via NDI_DEVI_DEBUG flag
*/
#define NDI_DEBUG(flags, args) \
@@ -462,10 +483,10 @@ typedef struct ndi_event_definition {
} ndi_event_definition_t;
typedef struct ndi_event_cookie {
- ndi_event_definition_t *definition; /* Event Description */
+ ndi_event_definition_t *definition; /* Event Description */
dev_info_t *ddip; /* Devi defining this event */
ndi_event_callbacks_t *callback_list; /* Cb's reg'd to w/ this evt */
- struct ndi_event_cookie *next_cookie; /* Next cookie def'd in hdl */
+ struct ndi_event_cookie *next_cookie; /* Next cookie def'd in hdl */
} ndi_event_cookie_t;
@@ -522,7 +543,7 @@ ndi_event_unbind_set(ndi_event_hdl_t handle,
* get an event cookie
*/
int
-ndi_event_retrieve_cookie(ndi_event_hdl_t handle,
+ndi_event_retrieve_cookie(ndi_event_hdl_t handle,
dev_info_t *child_dip,
char *eventname,
ddi_eventcookie_t *cookiep,
@@ -532,7 +553,7 @@ ndi_event_retrieve_cookie(ndi_event_hdl_t handle,
* add an event callback info to the ndi event handle
*/
int
-ndi_event_add_callback(ndi_event_hdl_t handle,
+ndi_event_add_callback(ndi_event_hdl_t handle,
dev_info_t *child_dip,
ddi_eventcookie_t cookie,
void (*event_callback)
@@ -591,7 +612,7 @@ ndi_event_cookie_to_name(ndi_event_hdl_t handle,
* name given an event_tag
*/
char *
-ndi_event_tag_to_name(ndi_event_hdl_t handle, int event_tag);
+ndi_event_tag_to_name(ndi_event_hdl_t handle, int event_tag);
dev_info_t *
ndi_devi_config_vhci(char *, int);
@@ -635,9 +656,9 @@ typedef struct ndi_ra_request {
/* restricted to */
uint64_t ra_boundlen; /* Length of the area, starting */
- /* from ra_boundbase, for the */
+ /* from ra_boundbase, for the */
/* allocated resource to be */
- /* restricted to. */
+ /* restricted to. */
uint64_t ra_align_mask; /* Alignment mask used for */
/* allocated base address */
@@ -728,6 +749,11 @@ int ndi_dev_is_pseudo_node(dev_info_t *);
int ndi_dev_is_persistent_node(dev_info_t *);
/*
+ * ndi_dev_is_hotplug_node: Return non-zero if the node was created by hotplug.
+ */
+int ndi_dev_is_hotplug_node(dev_info_t *);
+
+/*
* ndi_dev_is_hidden_node: Return non-zero if the node is hidden.
*/
int ndi_dev_is_hidden_node(dev_info_t *);
@@ -761,8 +787,8 @@ void ndi_set_dma_fault(ddi_dma_handle_t dh);
void ndi_clr_dma_fault(ddi_dma_handle_t dh);
/* Driver.conf property merging */
-int ndi_merge_node(dev_info_t *, int (*)(dev_info_t *, char *, int));
-void ndi_merge_wildcard_node(dev_info_t *);
+int ndi_merge_node(dev_info_t *, int (*)(dev_info_t *, char *, int));
+void ndi_merge_wildcard_node(dev_info_t *);
/*
* Ndi 'flavor' support: These interfaces are to support a nexus driver
diff --git a/usr/src/uts/common/sys/systm.h b/usr/src/uts/common/sys/systm.h
index 5963460441..4e529b9cce 100644
--- a/usr/src/uts/common/sys/systm.h
+++ b/usr/src/uts/common/sys/systm.h
@@ -175,6 +175,7 @@ hrtime_t untimeout_generic(callout_id_t, int);
clock_t untimeout_default(callout_id_t, int);
void delay(clock_t);
int delay_sig(clock_t);
+void delay_random(clock_t);
int nodev();
int nulldev();
major_t getudev(void);
diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared
index 8f18039cc3..41a523adfd 100644
--- a/usr/src/uts/intel/Makefile.intel.shared
+++ b/usr/src/uts/intel/Makefile.intel.shared
@@ -434,6 +434,7 @@ DRV_KMODS += emul64
#
DRV_KMODS += options
DRV_KMODS += scsi_vhci
+DRV_KMODS += pmcs
DRV_KMODS += arcmsr
DRV_KMODS += fcp
DRV_KMODS += fcip
diff --git a/usr/src/uts/intel/os/driver_aliases b/usr/src/uts/intel/os/driver_aliases
index a3d57fa436..dbedff44ab 100644
--- a/usr/src/uts/intel/os/driver_aliases
+++ b/usr/src/uts/intel/os/driver_aliases
@@ -54,3 +54,5 @@ amd_iommu "pci1002,5a23"
isa "pciclass,060100"
hme "pci108e,1001"
acpinex "acpivirtnex"
+nulldriver "scsa,probe"
+nulldriver "scsa,nodev"
diff --git a/usr/src/uts/intel/pmcs/Makefile b/usr/src/uts/intel/pmcs/Makefile
new file mode 100644
index 0000000000..24d111188b
--- /dev/null
+++ b/usr/src/uts/intel/pmcs/Makefile
@@ -0,0 +1,113 @@
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#
+# This makefile drives the production of the pmcs driver kernel module.
+#
+# intel architecture dependent
+#
+
+#
+# Paths to the base of the uts directory trees
+#
+UTSBASE = ../../../../src/uts
+
+#
+# Define the module and object file sets.
+#
+MODULE = pmcs
+OBJECTS = $(PMCS_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(PMCS_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/scsi/adapters/pmcs
+WARLOCK_OUT = $(PMCS_OBJS:%.o=%.ll)
+WARLOCK_OK = $(MODULE).ok
+WLCMD_DIR = $(UTSBASE)/common/io/warlock
+BLDREVHDR = ../../common/sys/scsi/adapters/pmcs/pmcs_bldrev.h
+BLDREVHDR_T = ../../common/sys/scsi/adapters/pmcs/pmcs_bldrev.h.tmp
+
+#
+# Kernel Module Dependencies
+#
+LDFLAGS += -dy -Nmisc/scsi
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(CONFMOD)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+include $(CONF_SRCDIR)/pmcs8001fw.version
+
+#
+# Add additional flags
+#
+PMCS_DRV_FLGS = -DMODNAME=\"${MODULE}\"
+CPPFLAGS += $(PMCS_DRV_FLGS) \
+ -DPMCS_FIRMWARE_VERSION=${PMCS_FW_VERSION} \
+ -DPMCS_FIRMWARE_VERSION_STRING=\"${PMCS_FW_VERSION_STRING}\"
+
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: ADDBUILDREV $(ALL_DEPS)
+
+ADDBUILDREV:
+ @$(ECHO) '#define PMCS_BUILD_VERSION "'`date '+v20%y%m%d'`\"> \
+ $(BLDREVHDR_T); \
+ if [ -f $(BLDREVHDR) ]; then \
+ RD=`$(DIFF) $(BLDREVHDR) $(BLDREVHDR_T) || $(TRUE)`; \
+ if [ ! -z "$$RD" ]; then \
+ $(MV) $(BLDREVHDR_T) $(BLDREVHDR); \
+ fi; \
+ else \
+ $(MV) $(BLDREVHDR_T) $(BLDREVHDR); \
+ fi; \
+ $(RM) $(BLDREVHDR_T)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+ -$(RM) $(BLDREVHDR)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: ADDBUILDREV $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/sparc/Makefile.sparc.shared b/usr/src/uts/sparc/Makefile.sparc.shared
index 4ff198219e..53cf38cbaf 100644
--- a/usr/src/uts/sparc/Makefile.sparc.shared
+++ b/usr/src/uts/sparc/Makefile.sparc.shared
@@ -320,6 +320,7 @@ DRV_KMODS += ii rdc rdcsrv rdcstub
DRV_KMODS += iscsi
DRV_KMODS += emlxs
DRV_KMODS += srpt
+DRV_KMODS += pmcs
$(CLOSED_BUILD)CLOSED_DRV_KMODS += ifp
$(CLOSED_BUILD)CLOSED_DRV_KMODS += uata
diff --git a/usr/src/uts/sparc/os/driver_aliases b/usr/src/uts/sparc/os/driver_aliases
index 8add121d44..11975a0ee8 100644
--- a/usr/src/uts/sparc/os/driver_aliases
+++ b/usr/src/uts/sparc/os/driver_aliases
@@ -168,3 +168,5 @@ ncp "SUNW,n2-mau"
ncp "SUNW,vf-mau"
n2rng "SUNW,n2-rng"
n2rng "SUNW,vf-rng"
+nulldriver "scsa,probe"
+nulldriver "scsa,nodev"
diff --git a/usr/src/uts/sparc/pmcs/Makefile b/usr/src/uts/sparc/pmcs/Makefile
new file mode 100644
index 0000000000..42f80ac060
--- /dev/null
+++ b/usr/src/uts/sparc/pmcs/Makefile
@@ -0,0 +1,113 @@
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#
+# This makefile drives the production of the pmcs driver kernel module.
+#
+# SPARC architecture dependent
+#
+
+#
+# Paths to the base of the uts directory trees
+#
+UTSBASE = ../../../../src/uts
+
+#
+# Define the module and object file sets.
+#
+MODULE = pmcs
+OBJECTS = $(PMCS_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(PMCS_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/scsi/adapters/pmcs
+WARLOCK_OUT = $(PMCS_OBJS:%.o=%.ll)
+WARLOCK_OK = $(MODULE).ok
+WLCMD_DIR = $(UTSBASE)/common/io/warlock
+BLDREVHDR = ../../common/sys/scsi/adapters/pmcs/pmcs_bldrev.h
+BLDREVHDR_T = ../../common/sys/scsi/adapters/pmcs/pmcs_bldrev.h.tmp
+
+#
+# Kernel Module Dependencies
+#
+LDFLAGS += -dy -Nmisc/scsi
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(CONFMOD)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sparc/Makefile.sparc
+include $(CONF_SRCDIR)/pmcs8001fw.version
+
+#
+# Add additional flags
+#
+PMCS_DRV_FLGS = -DMODNAME=\"${MODULE}\" -DDISABLE_MSIX
+CPPFLAGS += $(PMCS_DRV_FLGS) \
+ -DPMCS_FIRMWARE_VERSION=${PMCS_FW_VERSION} \
+ -DPMCS_FIRMWARE_VERSION_STRING=\"${PMCS_FW_VERSION_STRING}\"
+
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: ADDBUILDREV $(ALL_DEPS)
+
+ADDBUILDREV:
+ @$(ECHO) '#define PMCS_BUILD_VERSION "'`date '+v20%y%m%d'`\"> \
+ $(BLDREVHDR_T); \
+ if [ -f $(BLDREVHDR) ]; then \
+ RD=`$(DIFF) $(BLDREVHDR) $(BLDREVHDR_T) || $(TRUE)`; \
+ if [ ! -z "$$RD" ]; then \
+ $(MV) $(BLDREVHDR_T) $(BLDREVHDR); \
+ fi; \
+ else \
+ $(MV) $(BLDREVHDR_T) $(BLDREVHDR); \
+ fi; \
+ $(RM) $(BLDREVHDR_T)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+ -$(RM) $(BLDREVHDR)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: ADDBUILDREV $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sparc/Makefile.targ