summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authormlf <none@none>2006-03-29 16:19:30 -0800
committermlf <none@none>2006-03-29 16:19:30 -0800
commit507c32411f3f101e90ca2120f042b5ee698ba1d5 (patch)
treed6c3d69e04d180c1fa52a92cb4d897fffb3c3ec0 /usr/src
parent03831d35f7499c87d51205817c93e9a8d42c4bae (diff)
downloadillumos-joyent-507c32411f3f101e90ca2120f042b5ee698ba1d5.tar.gz
6392614 x86: ata driver should be open sourced
--HG-- rename : usr/src/uts/common/io/dktp/dcdev/dadk.c => usr/src/uts/intel/io/dktp/dcdev/dadk.c rename : usr/src/uts/common/io/dktp/dcdev/gda.c => usr/src/uts/intel/io/dktp/dcdev/gda.c rename : usr/src/uts/common/io/dktp/disk/cmdk.c => usr/src/uts/intel/io/dktp/disk/cmdk.c rename : usr/src/uts/common/io/dktp/drvobj/strategy.c => usr/src/uts/intel/io/dktp/drvobj/strategy.c rename : usr/src/uts/common/io/dktp/hba/ghd/ghd.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_debug.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_dma.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_queue.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_scsa.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_scsi.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.h rename : usr/src/uts/common/io/dktp/hba/ghd/ghd_waitq.h => usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.h
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/tools/findunref/exception_list1
-rw-r--r--usr/src/uts/common/Makefile.files10
-rw-r--r--usr/src/uts/common/Makefile.rules21
-rw-r--r--usr/src/uts/i86pc/Makefile.files7
-rw-r--r--usr/src/uts/i86pc/Makefile.i86pc.shared2
-rw-r--r--usr/src/uts/i86pc/ata/Makefile92
-rw-r--r--usr/src/uts/intel/Makefile.files10
-rw-r--r--usr/src/uts/intel/Makefile.rules37
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata.conf61
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.c86
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.h90
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_cmd.h89
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_common.c3377
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_common.h697
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_debug.c120
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_debug.h88
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_disk.c2953
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_disk.h97
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_dma.c383
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/ata_fsm.h159
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/atapi.c1174
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/atapi.h132
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/atapi_fsm.c884
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/capacity.notes.txt178
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/fsm.txt74
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/pciide.h105
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.c152
-rw-r--r--usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.h98
-rw-r--r--usr/src/uts/intel/io/dktp/dcdev/dadk.c (renamed from usr/src/uts/common/io/dktp/dcdev/dadk.c)0
-rw-r--r--usr/src/uts/intel/io/dktp/dcdev/gda.c (renamed from usr/src/uts/common/io/dktp/dcdev/gda.c)0
-rw-r--r--usr/src/uts/intel/io/dktp/disk/cmdk.c (renamed from usr/src/uts/common/io/dktp/disk/cmdk.c)0
-rw-r--r--usr/src/uts/intel/io/dktp/drvobj/strategy.c (renamed from usr/src/uts/common/io/dktp/drvobj/strategy.c)0
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd.c945
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd.h)6
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.c104
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_debug.h)6
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.c245
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_dma.h)0
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_gcmd.c102
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.c206
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_queue.h)12
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.c261
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_scsa.h)0
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.c73
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_scsi.h)12
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_timer.c898
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.c429
-rw-r--r--usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.h (renamed from usr/src/uts/common/io/dktp/hba/ghd/ghd_waitq.h)12
48 files changed, 14430 insertions, 58 deletions
diff --git a/usr/src/tools/findunref/exception_list b/usr/src/tools/findunref/exception_list
index 11c071c7cd..bb24cb9562 100644
--- a/usr/src/tools/findunref/exception_list
+++ b/usr/src/tools/findunref/exception_list
@@ -48,7 +48,6 @@
./src/cmd/oawk/EXPLAIN
./src/cmd/rpcsvc/nis/rpc.nisd/resolv_server/DNS_FWD
./src/cmd/vi/port/ex.news
-./closed/uts/common/io/dktp/controller/ata/capacity.notes
./src/cmd/ssh/doc/*
#
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 4a9f3d19b8..b025f1d7c6 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -499,7 +499,7 @@ CLONE_OBJS += clone.o
CN_OBJS += cons.o
DLD_OBJS += dld_drv.o dld_proto.o dld_str.o
-
+
DLS_OBJS += dls.o dls_link.o dls_mod.o dls_stat.o dls_vlan.o dls_soft_ring.o
GLD_OBJS += gld.o gldutil.o
@@ -674,14 +674,6 @@ VUIDPS2_OBJS += vuidmice.o vuidps2.o
SYSINIT_OBJS += sysinit.o sysinit_ddi.o
-DADK_OBJS += dadk.o
-
-GDA_OBJS += gda.o
-
-STRATEGY_OBJS += strategy.o
-
-CMDK_OBJS += cmdk.o
-
HPCSVC_OBJS += hpcsvc.o
PCIHPNEXUS_OBJS += pcihp.o
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index 24ab2e0f46..d224eabeb8 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -444,18 +444,6 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/bge/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
-$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/dktp/dcdev/%.c
- $(COMPILE.c) -o $@ $<
- $(CTFCONVERT_O)
-
-$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/dktp/disk/%.c
- $(COMPILE.c) -o $@ $<
- $(CTFCONVERT_O)
-
-$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/dktp/drvobj/%.c
- $(COMPILE.c) -o $@ $<
- $(CTFCONVERT_O)
-
$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/dld/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -1180,15 +1168,6 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/audio/sada/drv/audiots/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/bge/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
-$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/dktp/dcdev/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/dktp/disk/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/dktp/drvobj/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/dld/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/i86pc/Makefile.files b/usr/src/uts/i86pc/Makefile.files
index 730f9e34ad..d86d50e539 100644
--- a/usr/src/uts/i86pc/Makefile.files
+++ b/usr/src/uts/i86pc/Makefile.files
@@ -129,6 +129,13 @@ AGPGART_OBJS += agpgart.o \
AGPTARGET_OBJS += agptarget.o
AMD64GART_OBJS += amd64_gart.o
+GHD_OBJS += ghd.o ghd_debug.o ghd_dma.o ghd_queue.o ghd_scsa.o \
+ ghd_scsi.o ghd_timer.o ghd_waitq.o ghd_gcmd.o
+
+ATA_OBJS += $(GHD_OBJS) ata_blacklist.o ata_common.o ata_disk.o \
+ ata_dma.o atapi.o atapi_fsm.o ata_debug.o \
+ sil3xxx.o
+
include $(SRC)/common/mc/mc-amd/Makefile.mcamd
MCAMD_OBJS += \
$(MCAMD_CMN_OBJS) \
diff --git a/usr/src/uts/i86pc/Makefile.i86pc.shared b/usr/src/uts/i86pc/Makefile.i86pc.shared
index 2f8605084d..74b267b9e3 100644
--- a/usr/src/uts/i86pc/Makefile.i86pc.shared
+++ b/usr/src/uts/i86pc/Makefile.i86pc.shared
@@ -245,6 +245,7 @@ DRV_KMODS += pci
DRV_KMODS += pcie_pci
DRV_KMODS += npe
+DRV_KMODS += ata
DRV_KMODS += fd
DRV_KMODS += fdc
DRV_KMODS += kb8042
@@ -264,7 +265,6 @@ DRV_KMODS += cpc
DRV_KMODS += mc-amd
DRV_KMODS += power
-$(CLOSED_BUILD)CLOSED_DRV_KMODS += ata
$(CLOSED_BUILD)CLOSED_DRV_KMODS += audiovia823x
$(CLOSED_BUILD)CLOSED_DRV_KMODS += audioens
$(CLOSED_BUILD)CLOSED_DRV_KMODS += audioixp
diff --git a/usr/src/uts/i86pc/ata/Makefile b/usr/src/uts/i86pc/ata/Makefile
new file mode 100644
index 0000000000..e371b5bc50
--- /dev/null
+++ b/usr/src/uts/i86pc/ata/Makefile
@@ -0,0 +1,92 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# uts/i86pc/ata/Makefile
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the ata "drv"
+# kernel module.
+#
+# i86pc implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = ata
+OBJECTS = $(ATA_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(ATA_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_PSM_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/intel/io/dktp/controller/ata
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/i86pc/Makefile.i86pc
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(CONFMOD)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Overrides.
+#
+#DEBUG_FLGS = -DATA_DEBUG -DGHD_DEBUG -DDEBUG
+DEBUG_FLGS =
+DEBUG_DEFS += $(DEBUG_FLGS)
+INC_PATH += -I$(UTSBASE)/intel/io/dktp/hba/ghd
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/i86pc/Makefile.targ
diff --git a/usr/src/uts/intel/Makefile.files b/usr/src/uts/intel/Makefile.files
index e4eda3b320..87d14b8eb7 100644
--- a/usr/src/uts/intel/Makefile.files
+++ b/usr/src/uts/intel/Makefile.files
@@ -111,10 +111,18 @@ XMEMFS_OBJS += \
#
# Driver modules
#
-SD_OBJS += sd.o sd_xbuf.o
+CMDK_OBJS += cmdk.o
CMLB_OBJS += cmlb.o
+DADK_OBJS += dadk.o
+
+GDA_OBJS += gda.o
+
+SD_OBJS += sd.o sd_xbuf.o
+
+STRATEGY_OBJS += strategy.o
+
VGATEXT_OBJS += vgatext.o vgasubr.o
#
diff --git a/usr/src/uts/intel/Makefile.rules b/usr/src/uts/intel/Makefile.rules
index 8ea1d6a4b1..467289ca7f 100644
--- a/usr/src/uts/intel/Makefile.rules
+++ b/usr/src/uts/intel/Makefile.rules
@@ -20,7 +20,7 @@
# CDDL HEADER END
#
#
-# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# ident "%Z%%M% %I% %E% SMI"
@@ -89,6 +89,26 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amr/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/dktp/controller/ata/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/dktp/dcdev/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/dktp/disk/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/dktp/drvobj/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/dktp/hba/ghd/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/scsi/targets/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -163,6 +183,21 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/aac/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amr/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/controller/ata/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/dcdev/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/disk/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/drvobj/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/hba/ghd/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/scsi/targets/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata.conf b/usr/src/uts/intel/io/dktp/controller/ata/ata.conf
new file mode 100644
index 0000000000..c92fce214a
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata.conf
@@ -0,0 +1,61 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#ident "%Z%%M% %I% %E% SMI"
+
+#
+# don't change these
+#
+device_type="ide";
+flow_control="dmult";
+queue="qfifo";
+max_transfer=0x100;
+
+# Enable dma
+ata-options=0x1;
+
+#
+# for PIO performance upgrade - set block factor to 0x10
+#
+drive0_block_factor=0x1;
+drive1_block_factor=0x1;
+
+#
+# some laptop systems require setting this flag
+#
+timing_flags=0x0;
+
+#
+# To cause the driver to initiailize the drives to automatically
+# enter standby mode, the following property sets the drives
+# standby timer. The units are seconds, rounded up to the drive's
+# timer resolution.
+#
+# standby=-1 don't modify the drive's current setting
+# standby=0 disable standby timer
+# standby=n n == number of seconds to set the timer to
+#
+
+#standby=900;
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.c b/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.c
new file mode 100644
index 0000000000..7a0e65ffc8
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.c
@@ -0,0 +1,86 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/debug.h>
+#include <sys/pci.h>
+
+#include "ata_blacklist.h"
+
+pcibl_t ata_pciide_blacklist[] = {
+ /*
+ * The Nat SEMI PC87415 doesn't handle data and status byte
+ * synchornization correctly if an I/O error occurs that
+ * stops the request before the last sector. I think it can
+ * cause lockups. See section 7.4.5.3 of the PC87415 spec.
+ * It's also rumored to be a "single fifo" type chip that can't
+ * DMA on both channels correctly.
+ */
+ { 0x100b, 0xffff, 0x2, 0xffff, ATA_BL_BOGUS},
+
+ /*
+ * The CMD chip 0x646 does not support the use of interrupt bit
+ * in the busmaster ide status register when PIO is used.
+ * DMA is explicitly disabled for this legacy chip
+ */
+ { 0x1095, 0xffff, 0x0646, 0xffff, ATA_BL_BMSTATREG_PIO_BROKEN |
+ ATA_BL_NODMA},
+
+ /*
+ * Ditto for Serverworks CSB5 and CSB6 chips, but we can
+ * handle DMA. Also, when emulating OSB4 mode, the simplex
+ * bit lies!
+ */
+ { 0x1166, 0xffff, 0x0212, 0xffff, ATA_BL_BMSTATREG_PIO_BROKEN|
+ ATA_BL_NO_SIMPLEX},
+ { 0x1166, 0xffff, 0x0213, 0xffff, ATA_BL_BMSTATREG_PIO_BROKEN},
+
+ { 0, 0, 0, 0, 0 }
+};
+
+/*
+ * add drives that have DMA or other problems to this list
+ */
+
+atabl_t ata_drive_blacklist[] = {
+ { "NEC CD-ROM DRIVE:260", ATA_BL_1SECTOR },
+ { "NEC CD-ROM DRIVE:272", ATA_BL_1SECTOR },
+ { "NEC CD-ROM DRIVE:273", ATA_BL_1SECTOR },
+
+ { /* Mitsumi */ "FX001DE", ATA_BL_1SECTOR },
+
+ { "fubar",
+ (ATA_BL_NODMA |
+ ATA_BL_1SECTOR |
+ ATA_BL_NORVRT |
+ ATA_BL_BOGUS |
+ ATA_BL_BMSTATREG_PIO_BROKEN)
+ },
+ NULL
+};
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.h
new file mode 100644
index 0000000000..57f6d110b8
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_blacklist.h
@@ -0,0 +1,90 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_BLACKLIST_H
+#define _ATA_BLACKLIST_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is the PCI-IDE chip blacklist
+ */
+typedef struct {
+ uint_t b_vendorid;
+ uint_t b_vmask;
+ uint_t b_deviceid;
+ uint_t b_dmask;
+ uint_t b_flags;
+} pcibl_t;
+
+extern pcibl_t ata_pciide_blacklist[];
+
+/*
+ * This is the drive blacklist
+ */
+typedef struct {
+ char *b_model;
+ uint_t b_flags;
+} atabl_t;
+
+extern atabl_t ata_drive_blacklist[];
+
+/*
+ * use the same flags for both lists
+ */
+#define ATA_BL_BOGUS 0x1 /* only use in compatibility mode */
+#define ATA_BL_NODMA 0x2 /* don't use DMA on this one */
+#define ATA_BL_1SECTOR 0x4 /* limit PIO transfers to 1 sector */
+#define ATA_BL_BMSTATREG_PIO_BROKEN 0x8
+
+ /*
+ * do not use bus master ide status register
+ * if not doing dma, or if it does not work
+ * properly when doing DMA (for example, on
+ * some lx50's!)
+ */
+
+
+#define ATA_BL_NORVRT 0x10
+ /*
+ * Don't enable revert to power-on
+ * defaults before rebooting
+ */
+
+#define ATA_BL_NO_SIMPLEX 0x20
+ /*
+ * Ignore simplex bit on this device
+ * if set
+ */
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_BLACKLIST_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_cmd.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_cmd.h
new file mode 100644
index 0000000000..9483f1e9b0
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_cmd.h
@@ -0,0 +1,89 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1996 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_CMD_H
+#define _ATA_CMD_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Common ATA commands.
+ */
+#define ATC_DIAG 0x90 /* diagnose command */
+#define ATC_RECAL 0x10 /* restore cmd, bottom 4 bits step rate */
+#define ATC_FORMAT 0x50 /* format track command */
+#define ATC_SET_FEAT 0xef /* set features */
+#define ATC_IDLE_IMMED 0xe1 /* idle immediate */
+#define ATC_STANDBY_IM 0xe0 /* standby immediate */
+#define ATC_DOOR_LOCK 0xde /* door lock */
+#define ATC_DOOR_UNLOCK 0xdf /* door unlock */
+#define ATC_IDLE 0xe3 /* idle */
+
+/*
+ * ATA/ATAPI-4 disk commands.
+ */
+#define ATC_DEVICE_RESET 0x08 /* ATAPI device reset */
+#define ATC_EJECT 0xed /* media eject */
+#define ATC_FLUSH_CACHE 0xe7 /* flush write-cache */
+#define ATC_ID_DEVICE 0xec /* IDENTIFY DEVICE */
+#define ATC_ID_PACKET_DEVICE 0xa1 /* ATAPI identify packet device */
+#define ATC_INIT_DEVPARMS 0x91 /* initialize device parameters */
+#define ATC_PACKET 0xa0 /* ATAPI packet */
+#define ATC_RDMULT 0xc4 /* read multiple */
+#define ATC_RDSEC 0x20 /* read sector */
+#define ATC_RDVER 0x40 /* read verify */
+#define ATC_READ_DMA 0xc8 /* read (multiple) w/DMA */
+#define ATC_SEEK 0x70 /* seek */
+#define ATC_SERVICE 0xa2 /* queued/overlap service */
+#define ATC_SETMULT 0xc6 /* set multiple mode */
+#define ATC_WRITE_DMA 0xca /* write (multiple) w/DMA */
+#define ATC_WRMULT 0xc5 /* write multiple */
+#define ATC_WRSEC 0x30 /* write sector */
+
+/*
+ * Low bits for Read/Write commands...
+ */
+#define ATCM_ECCRETRY 0x01 /* Enable ECC and RETRY by controller */
+ /* enabled if bit is CLEARED!!! */
+#define ATCM_LONGMODE 0x02 /* Use Long Mode (get/send data & ECC) */
+
+
+/*
+ * Obsolete ATA commands.
+ */
+
+#define ATC_RDLONG 0x23 /* read long without retry */
+#define ATC_ACK_MC 0xdb /* acknowledge media change */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_CMD_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_common.c b/usr/src/uts/intel/io/dktp/controller/ata/ata_common.c
new file mode 100644
index 0000000000..c6a89ed795
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_common.c
@@ -0,0 +1,3377 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/modctl.h>
+#include <sys/debug.h>
+#include <sys/promif.h>
+#include <sys/pci.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/uio.h>
+#include <sys/cred.h>
+
+#include "ata_common.h"
+#include "ata_disk.h"
+#include "atapi.h"
+#include "ata_blacklist.h"
+#include "sil3xxx.h"
+
+/*
+ * Solaris Entry Points.
+ */
+
+static int ata_probe(dev_info_t *dip);
+static int ata_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int ata_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static int ata_bus_ctl(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t o,
+ void *a, void *v);
+static uint_t ata_intr(caddr_t arg);
+
+/*
+ * GHD Entry points
+ */
+
+static int ata_get_status(void *hba_handle, void *intr_status);
+static void ata_process_intr(void *hba_handle, void *intr_status);
+static int ata_hba_start(void *handle, gcmd_t *gcmdp);
+static void ata_hba_complete(void *handle, gcmd_t *gcmdp, int do_callback);
+static int ata_timeout_func(void *hba_handle, gcmd_t *gcmdp,
+ gtgt_t *gtgtp, gact_t action, int calltype);
+
+/*
+ * Local Function Prototypes
+ */
+static int ata_prop_lookup_int(dev_t match_dev, dev_info_t *dip,
+ uint_t flags, char *name, int defvalue);
+static int ata_ctlr_fsm(uchar_t fsm_func, ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
+ int *DoneFlgp);
+static void ata_destroy_controller(dev_info_t *dip);
+static int ata_drive_type(uchar_t drvhd,
+ ddi_acc_handle_t io_hdl1, caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2, caddr_t ioaddr2,
+ struct ata_id *ata_id_bufp);
+static ata_ctl_t *ata_init_controller(dev_info_t *dip);
+static ata_drv_t *ata_init_drive(ata_ctl_t *ata_ctlp,
+ uchar_t targ, uchar_t lun);
+static int ata_init_drive_pcidma(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ dev_info_t *tdip);
+static int ata_flush_cache(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp);
+static void ata_init_pciide(dev_info_t *dip, ata_ctl_t *ata_ctlp);
+static int ata_reset_bus(ata_ctl_t *ata_ctlp);
+static int ata_setup_ioaddr(dev_info_t *dip,
+ ddi_acc_handle_t *iohandle1, caddr_t *ioaddr1p,
+ ddi_acc_handle_t *iohandle2, caddr_t *ioaddr2p,
+ ddi_acc_handle_t *bm_hdlp, caddr_t *bm_addrp);
+static int ata_software_reset(ata_ctl_t *ata_ctlp);
+static int ata_start_arq(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_strncmp(char *p1, char *p2, int cnt);
+static void ata_uninit_drive(ata_drv_t *ata_drvp);
+
+static int ata_check_pciide_blacklist(dev_info_t *dip, uint_t flags);
+static int ata_check_revert_to_defaults(ata_drv_t *ata_drvp);
+static void ata_show_transfer_mode(ata_ctl_t *, ata_drv_t *);
+static int ata_spec_init_controller(dev_info_t *dip);
+
+
+/*
+ * Local static data
+ */
+static void *ata_state;
+
+static tmr_t ata_timer_conf; /* single timeout list for all instances */
+static int ata_watchdog_usec = 100000; /* check timeouts every 100 ms */
+
+int ata_hba_start_watchdog = 1000;
+int ata_process_intr_watchdog = 1000;
+int ata_reset_bus_watchdog = 1000;
+
+
+/*
+ * number of seconds to wait during various operations
+ */
+int ata_flush_delay = 5 * 1000000;
+uint_t ata_set_feature_wait = 4 * 1000000;
+uint_t ata_flush_cache_wait = 60 * 1000000; /* may take a long time */
+
+/*
+ * Change this for SFF-8070i support. Currently SFF-8070i is
+ * using a field in the IDENTIFY PACKET DEVICE response which
+ * already seems to be in use by some vendor's drives. I suspect
+ * SFF will either move their laslun field or provide a reliable
+ * way to validate it.
+ */
+int ata_enable_atapi_luns = FALSE;
+
+/*
+ * set this to disable all DMA requests
+ */
+int ata_dma_disabled = FALSE;
+
+/*
+ * set this to TRUE to enable storing the IDENTIFY DEVICE result in the
+ * "ata" or "atapi" property.
+ */
+int ata_id_debug = FALSE;
+
+/*
+ * set this to TRUE to enable logging device-capability data
+ */
+int ata_capability_data = FALSE;
+
+#define ATAPRT(fmt) ghd_err fmt
+
+/*
+ * DMA selection message pointers
+ */
+char *ata_cntrl_DMA_sel_msg;
+char *ata_dev_DMA_sel_msg;
+
+/*
+ * bus nexus operations
+ */
+static struct bus_ops ata_bus_ops;
+static struct bus_ops *scsa_bus_ops_p;
+
+/* ARGSUSED */
+static int
+ata_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
+{
+ if (ddi_get_soft_state(ata_state, getminor(*devp)) == NULL)
+ return (ENXIO);
+
+ return (0);
+}
+
+/*
+ * The purpose of this function is to pass the ioaddress of the controller
+ * to the caller, specifically used for upgrade from pre-pciide
+ * to pciide nodes
+ */
+/* ARGSUSED */
+static int
+ata_read(dev_t dev, struct uio *uio_p, cred_t *cred_p)
+{
+ ata_ctl_t *ata_ctlp;
+ char buf[18];
+ long len;
+
+ ata_ctlp = ddi_get_soft_state(ata_state, getminor(dev));
+
+ if (ata_ctlp == NULL)
+ return (ENXIO);
+
+ (void) sprintf(buf, "%p\n", (void *) ata_ctlp->ac_ioaddr1);
+
+ len = strlen(buf) - uio_p->uio_offset;
+ len = min(uio_p->uio_resid, len);
+ if (len <= 0)
+ return (0);
+
+ return (uiomove((caddr_t)(buf + uio_p->uio_offset), len,
+ UIO_READ, uio_p));
+}
+
+int
+ata_devo_reset(
+ dev_info_t *dip,
+ ddi_reset_cmd_t cmd)
+{
+ ata_ctl_t *ata_ctlp;
+ ata_drv_t *ata_drvp;
+ int instance;
+ int i;
+ int rc;
+ int flush_okay;
+
+ if (cmd != DDI_RESET_FORCE)
+ return (0);
+
+ instance = ddi_get_instance(dip);
+ ata_ctlp = ddi_get_soft_state(ata_state, instance);
+
+ if (!ata_ctlp)
+ return (0);
+
+ /*
+ * reset ATA drives and flush the write cache of any drives
+ */
+ flush_okay = TRUE;
+ for (i = 0; i < ATA_MAXTARG; i++) {
+ if ((ata_drvp = CTL2DRV(ata_ctlp, i, 0)) == 0)
+ continue;
+ /* Don't revert to defaults for certain IBM drives */
+ if ((ata_drvp->ad_flags & AD_DISK) != 0 &&
+ ((ata_drvp->ad_flags & AD_NORVRT) == 0)) {
+ /* Enable revert to defaults when reset */
+ (void) ata_set_feature(ata_ctlp, ata_drvp, 0xCC, 0);
+ }
+
+ /*
+ * skip flush cache if device type is cdrom
+ *
+ * notes: the structure definitions for ata_drvp->ad_id are
+ * defined for the ATA IDENTIFY_DEVICE, but if AD_ATAPI is set
+ * the struct holds data for the ATAPI IDENTIFY_PACKET_DEVICE
+ */
+ if (!IS_CDROM(ata_drvp)) {
+
+ /*
+ * Try the ATA/ATAPI flush write cache command
+ */
+ rc = ata_flush_cache(ata_ctlp, ata_drvp);
+ ADBG_WARN(("ata_flush_cache %s\n",
+ rc ? "okay" : "failed"));
+
+ if (!rc)
+ flush_okay = FALSE;
+ }
+
+
+ /*
+ * do something else if flush cache not supported
+ */
+ }
+
+ /*
+ * just busy wait if any drive doesn't support FLUSH CACHE
+ */
+ if (!flush_okay)
+ drv_usecwait(ata_flush_delay);
+ return (0);
+}
+
+
+static struct cb_ops ata_cb_ops = {
+ ata_open, /* open */
+ nulldev, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ ata_read, /* read */
+ nodev, /* write */
+ nodev, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* chpoll */
+ ddi_prop_op, /* prop_op */
+ NULL, /* stream info */
+ D_MP, /* driver compatibility flag */
+ CB_REV, /* cb_ops revision */
+ nodev, /* aread */
+ nodev /* awrite */
+};
+
+static struct dev_ops ata_ops = {
+ DEVO_REV, /* devo_rev, */
+ 0, /* refcnt */
+ ddi_getinfo_1to1, /* info */
+ nulldev, /* identify */
+ ata_probe, /* probe */
+ ata_attach, /* attach */
+ ata_detach, /* detach */
+ ata_devo_reset, /* reset */
+ &ata_cb_ops, /* driver operations */
+ NULL /* bus operations */
+};
+
+/* driver loadable module wrapper */
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ "ATA AT-bus attachment disk controller Driver", /* module name */
+ &ata_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modldrv, NULL
+};
+
+#ifdef ATA_DEBUG
+int ata_debug_init = FALSE;
+int ata_debug_probe = FALSE;
+int ata_debug_attach = FALSE;
+
+int ata_debug = ADBG_FLAG_ERROR
+ /* | ADBG_FLAG_ARQ */
+ /* | ADBG_FLAG_INIT */
+ /* | ADBG_FLAG_TRACE */
+ /* | ADBG_FLAG_TRANSPORT */
+ /* | ADBG_FLAG_WARN */
+ ;
+#endif
+
+int
+_init(void)
+{
+ int err;
+
+#ifdef ATA_DEBUG
+ if (ata_debug_init)
+ debug_enter("\nATA _INIT\n");
+#endif
+
+ if ((err = ddi_soft_state_init(&ata_state, sizeof (ata_ctl_t), 0)) != 0)
+ return (err);
+
+ if ((err = scsi_hba_init(&modlinkage)) != 0) {
+ ddi_soft_state_fini(&ata_state);
+ return (err);
+ }
+
+ /* save pointer to SCSA provided bus_ops struct */
+ scsa_bus_ops_p = ata_ops.devo_bus_ops;
+
+ /* make a copy of SCSA bus_ops */
+ ata_bus_ops = *(ata_ops.devo_bus_ops);
+
+ /*
+ * Modify our bus_ops to call our routines. Our implementation
+ * will determine if the device is ATA or ATAPI/SCSA and react
+ * accordingly.
+ */
+ ata_bus_ops.bus_ctl = ata_bus_ctl;
+
+ /* patch our bus_ops into the dev_ops struct */
+ ata_ops.devo_bus_ops = &ata_bus_ops;
+
+ if ((err = mod_install(&modlinkage)) != 0) {
+ scsi_hba_fini(&modlinkage);
+ ddi_soft_state_fini(&ata_state);
+ }
+
+ /*
+ * Initialize the per driver timer info.
+ */
+
+ ghd_timer_init(&ata_timer_conf, drv_usectohz(ata_watchdog_usec));
+
+ return (err);
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) == 0) {
+ ghd_timer_fini(&ata_timer_conf);
+ scsi_hba_fini(&modlinkage);
+ ddi_soft_state_fini(&ata_state);
+ }
+
+ return (err);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+
+/* driver probe entry point */
+
+static int
+ata_probe(
+ dev_info_t *dip)
+{
+ ddi_acc_handle_t io_hdl1 = NULL;
+ ddi_acc_handle_t io_hdl2 = NULL;
+ ddi_acc_handle_t bm_hdl = NULL;
+ caddr_t ioaddr1;
+ caddr_t ioaddr2;
+ caddr_t bm_addr;
+ int drive;
+ struct ata_id *ata_id_bufp;
+ int rc = DDI_PROBE_FAILURE;
+
+ ADBG_TRACE(("ata_probe entered\n"));
+#ifdef ATA_DEBUG
+ if (ata_debug_probe)
+ debug_enter("\nATA_PROBE\n");
+#endif
+
+ if (!ata_setup_ioaddr(dip, &io_hdl1, &ioaddr1, &io_hdl2, &ioaddr2,
+ &bm_hdl, &bm_addr))
+ return (rc);
+
+ ata_id_bufp = kmem_zalloc(sizeof (*ata_id_bufp), KM_SLEEP);
+
+ for (drive = 0; drive < ATA_MAXTARG; drive++) {
+ uchar_t drvhd;
+
+ /* set up drv/hd and feature registers */
+
+ drvhd = (drive == 0 ? ATDH_DRIVE0 : ATDH_DRIVE1);
+
+
+ if (ata_drive_type(drvhd, io_hdl1, ioaddr1, io_hdl2, ioaddr2,
+ ata_id_bufp) != ATA_DEV_NONE) {
+ rc = (DDI_PROBE_SUCCESS);
+ break;
+ }
+ }
+
+ /* always leave the controller set to drive 0 */
+ if (drive != 0) {
+ ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD, ATDH_DRIVE0);
+ ATA_DELAY_400NSEC(io_hdl2, ioaddr2);
+ }
+
+out2:
+ kmem_free(ata_id_bufp, sizeof (*ata_id_bufp));
+
+ if (io_hdl1)
+ ddi_regs_map_free(&io_hdl1);
+ if (io_hdl2)
+ ddi_regs_map_free(&io_hdl2);
+ if (bm_hdl)
+ ddi_regs_map_free(&bm_hdl);
+ return (rc);
+}
+
+/*
+ *
+ * driver attach entry point
+ *
+ */
+
+static int
+ata_attach(
+ dev_info_t *dip,
+ ddi_attach_cmd_t cmd)
+{
+ ata_ctl_t *ata_ctlp;
+ ata_drv_t *ata_drvp;
+ ata_drv_t *first_drvp = NULL;
+ uchar_t targ;
+ uchar_t lun;
+ uchar_t lastlun;
+ int atapi_count = 0;
+ int disk_count = 0;
+
+ ADBG_TRACE(("ata_attach entered\n"));
+#ifdef ATA_DEBUG
+ if (ata_debug_attach)
+ debug_enter("\nATA_ATTACH\n\n");
+#endif
+
+ if (cmd != DDI_ATTACH)
+ return (DDI_FAILURE);
+
+ /* initialize controller */
+ ata_ctlp = ata_init_controller(dip);
+
+ if (ata_ctlp == NULL)
+ goto errout;
+
+ mutex_enter(&ata_ctlp->ac_ccc.ccc_hba_mutex);
+
+ /* initialize drives */
+
+ for (targ = 0; targ < ATA_MAXTARG; targ++) {
+
+ ata_drvp = ata_init_drive(ata_ctlp, targ, 0);
+ if (ata_drvp == NULL)
+ continue;
+
+ if (first_drvp == NULL)
+ first_drvp = ata_drvp;
+
+ if (ATAPIDRV(ata_drvp)) {
+ atapi_count++;
+ lastlun = ata_drvp->ad_id.ai_lastlun;
+ } else {
+ disk_count++;
+ lastlun = 0;
+ }
+
+ /*
+ * LUN support is currently disabled. Check with SFF-8070i
+ * before enabling.
+ */
+ if (!ata_enable_atapi_luns)
+ lastlun = 0;
+
+ /* Initialize higher LUNs, if there are any */
+ for (lun = 1; lun <= lastlun && lun < ATA_MAXLUN; lun++) {
+ if ((ata_drvp =
+ ata_init_drive(ata_ctlp, targ, lun)) != NULL) {
+ ata_show_transfer_mode(ata_ctlp, ata_drvp);
+ }
+ }
+ }
+
+ if ((atapi_count == 0) && (disk_count == 0)) {
+ ADBG_WARN(("ata_attach: no drives detected\n"));
+ goto errout1;
+ }
+
+ /*
+ * Always make certain that a valid drive is selected so
+ * that routines which poll the status register don't get
+ * confused by non-existent drives.
+ */
+ ddi_put8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_drvhd,
+ first_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * make certain the drive selected
+ */
+ if (!ata_wait(ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2,
+ 0, ATS_BSY, 5000000)) {
+ ADBG_ERROR(("ata_attach: select failed\n"));
+ }
+
+ /*
+ * initialize atapi/ata_dsk modules if we have at least
+ * one drive of that type.
+ */
+
+ if (atapi_count) {
+ if (!atapi_attach(ata_ctlp))
+ goto errout1;
+ ata_ctlp->ac_flags |= AC_ATAPI_INIT;
+ }
+
+ if (disk_count) {
+ if (!ata_disk_attach(ata_ctlp))
+ goto errout1;
+ ata_ctlp->ac_flags |= AC_DISK_INIT;
+ }
+
+ /*
+ * make certain the interrupt and error latches are clear
+ */
+ if (ata_ctlp->ac_pciide) {
+
+ int instance = ddi_get_instance(dip);
+ if (ddi_create_minor_node(dip, "control", S_IFCHR, instance,
+ DDI_PSEUDO, 0) != DDI_SUCCESS) {
+ goto errout1;
+ }
+
+ (void) ata_pciide_status_clear(ata_ctlp);
+
+ }
+
+ /*
+ * enable the interrupt handler and drop the mutex
+ */
+ ata_ctlp->ac_flags |= AC_ATTACHED;
+ mutex_exit(&ata_ctlp->ac_ccc.ccc_hba_mutex);
+
+ ddi_report_dev(dip);
+ return (DDI_SUCCESS);
+
+errout1:
+ mutex_exit(&ata_ctlp->ac_ccc.ccc_hba_mutex);
+errout:
+ (void) ata_detach(dip, DDI_DETACH);
+ return (DDI_FAILURE);
+}
+
+/* driver detach entry point */
+
+static int
+ata_detach(
+ dev_info_t *dip,
+ ddi_detach_cmd_t cmd)
+{
+ ata_ctl_t *ata_ctlp;
+ ata_drv_t *ata_drvp;
+ int instance;
+ int i;
+ int j;
+
+ ADBG_TRACE(("ata_detach entered\n"));
+
+ if (cmd != DDI_DETACH)
+ return (DDI_FAILURE);
+
+ instance = ddi_get_instance(dip);
+ ata_ctlp = ddi_get_soft_state(ata_state, instance);
+
+ if (!ata_ctlp)
+ return (DDI_SUCCESS);
+
+ ata_ctlp->ac_flags &= ~AC_ATTACHED;
+
+ /* destroy ata module */
+ if (ata_ctlp->ac_flags & AC_DISK_INIT)
+ ata_disk_detach(ata_ctlp);
+
+ /* destroy atapi module */
+ if (ata_ctlp->ac_flags & AC_ATAPI_INIT)
+ atapi_detach(ata_ctlp);
+
+ ddi_remove_minor_node(dip, NULL);
+
+ /* destroy drives */
+ for (i = 0; i < ATA_MAXTARG; i++) {
+ for (j = 0; j < ATA_MAXLUN; j++) {
+ ata_drvp = CTL2DRV(ata_ctlp, i, j);
+ if (ata_drvp != NULL)
+ ata_uninit_drive(ata_drvp);
+ }
+ }
+
+ if (ata_ctlp->ac_iohandle1)
+ ddi_regs_map_free(&ata_ctlp->ac_iohandle1);
+ if (ata_ctlp->ac_iohandle2)
+ ddi_regs_map_free(&ata_ctlp->ac_iohandle2);
+ if (ata_ctlp->ac_bmhandle)
+ ddi_regs_map_free(&ata_ctlp->ac_bmhandle);
+
+ ddi_prop_remove_all(dip);
+
+ /* destroy controller */
+ ata_destroy_controller(dip);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Nexus driver bus_ctl entry point
+ */
+/*ARGSUSED*/
+static int
+ata_bus_ctl(
+ dev_info_t *d,
+ dev_info_t *r,
+ ddi_ctl_enum_t o,
+ void *a,
+ void *v)
+{
+ dev_info_t *tdip;
+ int target_type;
+ int rc;
+ char *bufp;
+
+ ADBG_TRACE(("ata_bus_ctl entered\n"));
+
+ switch (o) {
+
+ case DDI_CTLOPS_SIDDEV:
+ return (DDI_FAILURE);
+
+ case DDI_CTLOPS_IOMIN:
+
+ /*
+ * Since we use PIO, we return a minimum I/O size of
+ * one byte. This will need to be updated when we
+ * implement DMA support
+ */
+
+ *((int *)v) = 1;
+ return (DDI_SUCCESS);
+
+ case DDI_CTLOPS_DMAPMAPC:
+ case DDI_CTLOPS_REPORTINT:
+ case DDI_CTLOPS_REGSIZE:
+ case DDI_CTLOPS_NREGS:
+ case DDI_CTLOPS_SLAVEONLY:
+ case DDI_CTLOPS_AFFINITY:
+ case DDI_CTLOPS_POKE:
+ case DDI_CTLOPS_PEEK:
+
+ /* These ops shouldn't be called by a target driver */
+ ADBG_ERROR(("ata_bus_ctl: %s%d: invalid op (%d) from %s%d\n",
+ ddi_driver_name(d), ddi_get_instance(d), o,
+ ddi_driver_name(r), ddi_get_instance(r)));
+
+ return (DDI_FAILURE);
+
+ case DDI_CTLOPS_REPORTDEV:
+ case DDI_CTLOPS_INITCHILD:
+ case DDI_CTLOPS_UNINITCHILD:
+
+ /* these require special handling below */
+ break;
+
+ default:
+ return (ddi_ctlops(d, r, o, a, v));
+ }
+
+ /* get targets dip */
+
+ if (o == DDI_CTLOPS_INITCHILD || o == DDI_CTLOPS_UNINITCHILD)
+ tdip = (dev_info_t *)a;
+ else
+ tdip = r;
+
+ /*
+ * XXX - Get class of target
+ * Before the "class" entry in a conf file becomes
+ * a real property, we use an additional property
+ * tentatively called "class_prop". We will require that
+ * new classes (ie. direct) export "class_prop".
+ * SCSA target drivers will not have this property, so
+ * no property implies SCSA.
+ */
+ if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, tdip, DDI_PROP_DONTPASS,
+ "class", &bufp) == DDI_PROP_SUCCESS) ||
+ (ddi_prop_lookup_string(DDI_DEV_T_ANY, tdip, DDI_PROP_DONTPASS,
+ "class_prop", &bufp) == DDI_PROP_SUCCESS)) {
+ if (strcmp(bufp, "dada") == 0)
+ target_type = ATA_DEV_DISK;
+ else if (strcmp(bufp, "scsi") == 0)
+ target_type = ATA_DEV_ATAPI;
+ else {
+ ADBG_WARN(("ata_bus_ctl: invalid target class %s\n",
+ bufp));
+ ddi_prop_free(bufp);
+ return (DDI_FAILURE);
+ }
+ ddi_prop_free(bufp);
+ } else {
+ target_type = ATA_DEV_ATAPI; /* no class prop, assume SCSI */
+ }
+
+ if (o == DDI_CTLOPS_INITCHILD) {
+ int instance = ddi_get_instance(d);
+ ata_ctl_t *ata_ctlp = ddi_get_soft_state(ata_state, instance);
+ ata_drv_t *ata_drvp;
+ int targ;
+ int lun;
+ int drive_type;
+ char *disk_prop;
+ char *class_prop;
+
+ if (ata_ctlp == NULL) {
+ ADBG_WARN(("ata_bus_ctl: failed to find ctl struct\n"));
+ return (DDI_FAILURE);
+ }
+
+ /* get (target,lun) of child device */
+
+ targ = ddi_prop_get_int(DDI_DEV_T_ANY, tdip, DDI_PROP_DONTPASS,
+ "target", -1);
+ if (targ == -1) {
+ ADBG_WARN(("ata_bus_ctl: failed to get targ num\n"));
+ return (DDI_FAILURE);
+ }
+
+ lun = ddi_prop_get_int(DDI_DEV_T_ANY, tdip, DDI_PROP_DONTPASS,
+ "lun", 0);
+
+ if ((targ < 0) || (targ >= ATA_MAXTARG) ||
+ (lun < 0) || (lun >= ATA_MAXLUN)) {
+ return (DDI_FAILURE);
+ }
+
+ ata_drvp = CTL2DRV(ata_ctlp, targ, lun);
+
+ if (ata_drvp == NULL)
+ return (DDI_FAILURE); /* no drive */
+
+ /* get type of device */
+
+ if (ATAPIDRV(ata_drvp))
+ drive_type = ATA_DEV_ATAPI;
+ else
+ drive_type = ATA_DEV_DISK;
+
+ /*
+ * Check for special handling when child driver is
+ * cmdk (which morphs to the correct interface)
+ */
+ if (strcmp(ddi_get_name(tdip), "cmdk") == 0) {
+
+ if ((target_type == ATA_DEV_DISK) &&
+ (target_type != drive_type))
+ return (DDI_FAILURE);
+
+ target_type = drive_type;
+
+ if (drive_type == ATA_DEV_ATAPI) {
+ class_prop = "scsi";
+ } else {
+ disk_prop = "dadk";
+ class_prop = "dada";
+
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, tdip,
+ "disk", disk_prop) != DDI_PROP_SUCCESS) {
+ ADBG_WARN(("ata_bus_ctl: failed to "
+ "create disk prop\n"));
+ return (DDI_FAILURE);
+ }
+ }
+
+ if (ndi_prop_update_string(DDI_DEV_T_NONE, tdip,
+ "class_prop", class_prop) != DDI_PROP_SUCCESS) {
+ ADBG_WARN(("ata_bus_ctl: failed to "
+ "create class prop\n"));
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* Check that target class matches the device */
+
+ if (target_type != drive_type)
+ return (DDI_FAILURE);
+
+ /* save pointer to drive struct for ata_disk_bus_ctl */
+ ddi_set_driver_private(tdip, ata_drvp);
+
+ /*
+ * Determine whether to enable DMA support for this drive. This
+ * check is deferred to this point so that the various dma
+ * properties could reside on the devinfo node should finer
+ * grained dma control be required.
+ */
+ ata_drvp->ad_pciide_dma = ata_init_drive_pcidma(ata_ctlp,
+ ata_drvp, tdip);
+ ata_show_transfer_mode(ata_ctlp, ata_drvp);
+ }
+
+ if (target_type == ATA_DEV_ATAPI) {
+ rc = scsa_bus_ops_p->bus_ctl(d, r, o, a, v);
+ } else {
+ rc = ata_disk_bus_ctl(d, r, o, a, v);
+ }
+
+ return (rc);
+}
+
+/*
+ *
+ * GHD ccc_hba_complete callback
+ *
+ */
+
+/* ARGSUSED */
+static void
+ata_hba_complete(
+ void *hba_handle,
+ gcmd_t *gcmdp,
+ int do_callback)
+{
+ ata_drv_t *ata_drvp;
+ ata_pkt_t *ata_pktp;
+
+ ADBG_TRACE(("ata_hba_complete entered\n"));
+
+ ata_drvp = GCMD2DRV(gcmdp);
+ ata_pktp = GCMD2APKT(gcmdp);
+ if (ata_pktp->ap_complete)
+ (*ata_pktp->ap_complete)(ata_drvp, ata_pktp,
+ do_callback);
+}
+
+/* GHD ccc_timeout_func callback */
+
+/* ARGSUSED */
+static int
+ata_timeout_func(
+ void *hba_handle,
+ gcmd_t *gcmdp,
+ gtgt_t *gtgtp,
+ gact_t action,
+ int calltype)
+{
+ ata_ctl_t *ata_ctlp;
+ ata_pkt_t *ata_pktp;
+
+ ADBG_TRACE(("ata_timeout_func entered\n"));
+
+ ata_ctlp = (ata_ctl_t *)hba_handle;
+
+ if (gcmdp != NULL)
+ ata_pktp = GCMD2APKT(gcmdp);
+ else
+ ata_pktp = NULL;
+
+ switch (action) {
+ case GACTION_EARLY_ABORT:
+ /* abort before request was started */
+ if (ata_pktp != NULL) {
+ ata_pktp->ap_flags |= AP_ABORT;
+ }
+ ghd_complete(&ata_ctlp->ac_ccc, gcmdp);
+ return (TRUE);
+
+ case GACTION_EARLY_TIMEOUT:
+ /* timeout before request was started */
+ if (ata_pktp != NULL) {
+ ata_pktp->ap_flags |= AP_TIMEOUT;
+ }
+ ghd_complete(&ata_ctlp->ac_ccc, gcmdp);
+ return (TRUE);
+
+ case GACTION_RESET_TARGET:
+ /*
+ * Reset a device is not supported. Resetting a specific
+ * device can't be done at all to an ATA device and if
+ * you send a RESET to an ATAPI device you have to
+ * reset the whole bus to make certain both devices
+ * on the bus stay in sync regarding which device is
+ * the currently selected one.
+ */
+ return (FALSE);
+
+ case GACTION_RESET_BUS:
+ /*
+ * Issue bus reset and reinitialize both drives.
+ * But only if this is a timed-out request. Target
+ * driver reset requests are ignored because ATA
+ * and ATAPI devices shouldn't be gratuitously reset.
+ */
+ if (gcmdp == NULL)
+ break;
+ return (ata_reset_bus(ata_ctlp));
+ default:
+ break;
+ }
+ return (FALSE);
+}
+
+/*
+ *
+ * Initialize controller's soft-state structure
+ *
+ */
+
+static ata_ctl_t *
+ata_init_controller(
+ dev_info_t *dip)
+{
+ ata_ctl_t *ata_ctlp;
+ int instance;
+ caddr_t ioaddr1;
+ caddr_t ioaddr2;
+
+ ADBG_TRACE(("ata_init_controller entered\n"));
+
+ instance = ddi_get_instance(dip);
+
+ /* allocate controller structure */
+ if (ddi_soft_state_zalloc(ata_state, instance) != DDI_SUCCESS) {
+ ADBG_WARN(("ata_init_controller: soft_state_zalloc failed\n"));
+ return (NULL);
+ }
+
+ ata_ctlp = ddi_get_soft_state(ata_state, instance);
+
+ if (ata_ctlp == NULL) {
+ ADBG_WARN(("ata_init_controller: failed to find "
+ "controller struct\n"));
+ return (NULL);
+ }
+
+ /*
+ * initialize per-controller data
+ */
+ ata_ctlp->ac_dip = dip;
+ ata_ctlp->ac_arq_pktp = kmem_zalloc(sizeof (ata_pkt_t), KM_SLEEP);
+
+ /*
+ * map the device registers
+ */
+ if (!ata_setup_ioaddr(dip, &ata_ctlp->ac_iohandle1, &ioaddr1,
+ &ata_ctlp->ac_iohandle2, &ioaddr2,
+ &ata_ctlp->ac_bmhandle, &ata_ctlp->ac_bmaddr)) {
+ (void) ata_detach(dip, DDI_DETACH);
+ return (NULL);
+ }
+
+ ADBG_INIT(("ata_init_controller: ioaddr1 = 0x%p, ioaddr2 = 0x%p\n",
+ ioaddr1, ioaddr2));
+
+ /*
+ * Do ARQ setup
+ */
+ atapi_init_arq(ata_ctlp);
+
+ /*
+ * Do PCI-IDE setup
+ */
+ ata_init_pciide(dip, ata_ctlp);
+
+ /*
+ * port addresses associated with ioaddr1
+ */
+ ata_ctlp->ac_ioaddr1 = ioaddr1;
+ ata_ctlp->ac_data = (ushort_t *)ioaddr1 + AT_DATA;
+ ata_ctlp->ac_error = (uchar_t *)ioaddr1 + AT_ERROR;
+ ata_ctlp->ac_feature = (uchar_t *)ioaddr1 + AT_FEATURE;
+ ata_ctlp->ac_count = (uchar_t *)ioaddr1 + AT_COUNT;
+ ata_ctlp->ac_sect = (uchar_t *)ioaddr1 + AT_SECT;
+ ata_ctlp->ac_lcyl = (uchar_t *)ioaddr1 + AT_LCYL;
+ ata_ctlp->ac_hcyl = (uchar_t *)ioaddr1 + AT_HCYL;
+ ata_ctlp->ac_drvhd = (uchar_t *)ioaddr1 + AT_DRVHD;
+ ata_ctlp->ac_status = (uchar_t *)ioaddr1 + AT_STATUS;
+ ata_ctlp->ac_cmd = (uchar_t *)ioaddr1 + AT_CMD;
+
+ /*
+ * port addresses associated with ioaddr2
+ */
+ ata_ctlp->ac_ioaddr2 = ioaddr2;
+ ata_ctlp->ac_altstatus = (uchar_t *)ioaddr2 + AT_ALTSTATUS;
+ ata_ctlp->ac_devctl = (uchar_t *)ioaddr2 + AT_DEVCTL;
+
+ /*
+ * If AC_BSY_WAIT needs to be set for laptops that do
+ * suspend/resume but do not correctly wait for the busy bit to
+ * drop after a resume.
+ */
+ ata_ctlp->ac_timing_flags = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dip, DDI_PROP_DONTPASS, "timing_flags", 0);
+ /*
+ * get max transfer size, default to 256 sectors
+ */
+ ata_ctlp->ac_max_transfer = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dip, DDI_PROP_DONTPASS, "max_transfer", 0x100);
+ if (ata_ctlp->ac_max_transfer < 1)
+ ata_ctlp->ac_max_transfer = 1;
+ if (ata_ctlp->ac_max_transfer > 0x100)
+ ata_ctlp->ac_max_transfer = 0x100;
+
+ /*
+ * Get the standby timer value
+ */
+ ata_ctlp->ac_standby_time = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dip, DDI_PROP_DONTPASS, "standby", -1);
+
+ /*
+ * If this is a /pci/pci-ide instance check to see if
+ * it's supposed to be attached as an /isa/ata
+ */
+ if (ata_ctlp->ac_pciide) {
+ static char prop_buf[] = "SUNW-ata-ffff-isa";
+ int addr1 = (intptr_t)ioaddr1;
+
+
+ if (addr1 < 0 || addr1 > 0xffff) {
+ (void) ata_detach(dip, DDI_DETACH);
+ return (NULL);
+ }
+ (void) sprintf(prop_buf, "SUNW-ata-%04x-isa",
+ addr1);
+ if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(),
+ DDI_PROP_DONTPASS, prop_buf)) {
+ (void) ata_detach(dip, DDI_DETACH);
+ return (NULL);
+ }
+ }
+
+ /* Init controller specific stuff */
+ (void) ata_spec_init_controller(dip);
+
+ /*
+ * initialize GHD
+ */
+
+ GHD_WAITQ_INIT(&ata_ctlp->ac_ccc.ccc_waitq, NULL, 1);
+
+ if (!ghd_register("ata", &ata_ctlp->ac_ccc, dip, 0, ata_ctlp,
+ atapi_ccballoc, atapi_ccbfree,
+ ata_pciide_dma_sg_func, ata_hba_start,
+ ata_hba_complete, ata_intr,
+ ata_get_status, ata_process_intr, ata_timeout_func,
+ &ata_timer_conf, NULL)) {
+ (void) ata_detach(dip, DDI_DETACH);
+ return (NULL);
+ }
+
+ ata_ctlp->ac_flags |= AC_GHD_INIT;
+ return (ata_ctlp);
+}
+
+/* destroy a controller */
+
+static void
+ata_destroy_controller(
+ dev_info_t *dip)
+{
+ ata_ctl_t *ata_ctlp;
+ int instance;
+
+ ADBG_TRACE(("ata_destroy_controller entered\n"));
+
+ instance = ddi_get_instance(dip);
+ ata_ctlp = ddi_get_soft_state(ata_state, instance);
+
+ if (ata_ctlp == NULL)
+ return;
+
+ /* destroy ghd */
+ if (ata_ctlp->ac_flags & AC_GHD_INIT)
+ ghd_unregister(&ata_ctlp->ac_ccc);
+
+ /* free the pciide buffer (if any) */
+ ata_pciide_free(ata_ctlp);
+
+ /* destroy controller struct */
+ kmem_free(ata_ctlp->ac_arq_pktp, sizeof (ata_pkt_t));
+ ddi_soft_state_free(ata_state, instance);
+
+}
+
+
+/*
+ *
+ * initialize a drive
+ *
+ */
+
+static ata_drv_t *
+ata_init_drive(
+ ata_ctl_t *ata_ctlp,
+ uchar_t targ,
+ uchar_t lun)
+{
+ static char nec_260[] = "NEC CD-ROM DRIVE";
+ ata_drv_t *ata_drvp;
+ struct ata_id *aidp;
+ char buf[80];
+ int drive_type;
+ int i;
+ int valid_version = 0;
+
+ ADBG_TRACE(("ata_init_drive entered, targ = %d, lun = %d\n",
+ targ, lun));
+
+ /* check if device already exists */
+
+ ata_drvp = CTL2DRV(ata_ctlp, targ, lun);
+
+ if (ata_drvp != NULL)
+ return (ata_drvp);
+
+ /* allocate new device structure */
+
+ ata_drvp = kmem_zalloc(sizeof (ata_drv_t), KM_SLEEP);
+ aidp = &ata_drvp->ad_id;
+
+ /*
+ * set up drive struct
+ */
+ ata_drvp->ad_ctlp = ata_ctlp;
+ ata_drvp->ad_targ = targ;
+ ata_drvp->ad_drive_bits =
+ (ata_drvp->ad_targ == 0 ? ATDH_DRIVE0 : ATDH_DRIVE1);
+ /*
+ * Add the LUN for SFF-8070i support
+ */
+ ata_drvp->ad_lun = lun;
+ ata_drvp->ad_drive_bits |= ata_drvp->ad_lun;
+
+ /*
+ * get drive type, side effect is to collect
+ * IDENTIFY DRIVE data
+ */
+
+ drive_type = ata_drive_type(ata_drvp->ad_drive_bits,
+ ata_ctlp->ac_iohandle1,
+ ata_ctlp->ac_ioaddr1,
+ ata_ctlp->ac_iohandle2,
+ ata_ctlp->ac_ioaddr2,
+ aidp);
+
+ switch (drive_type) {
+ case ATA_DEV_NONE:
+ /* no drive found */
+ goto errout;
+ case ATA_DEV_ATAPI:
+ ata_drvp->ad_flags |= AD_ATAPI;
+ break;
+ case ATA_DEV_DISK:
+ ata_drvp->ad_flags |= AD_DISK;
+ break;
+ }
+
+ /*
+ * swap bytes of all text fields
+ */
+ if (!ata_strncmp(nec_260, aidp->ai_model, sizeof (aidp->ai_model))) {
+ swab(aidp->ai_drvser, aidp->ai_drvser,
+ sizeof (aidp->ai_drvser));
+ swab(aidp->ai_fw, aidp->ai_fw,
+ sizeof (aidp->ai_fw));
+ swab(aidp->ai_model, aidp->ai_model,
+ sizeof (aidp->ai_model));
+ }
+
+ /*
+ * Check if this drive has the Single Sector bug
+ */
+
+ if (ata_check_drive_blacklist(&ata_drvp->ad_id, ATA_BL_1SECTOR))
+ ata_drvp->ad_flags |= AD_1SECTOR;
+ else
+ ata_drvp->ad_flags &= ~AD_1SECTOR;
+
+ /* Check if this drive has the "revert to defaults" bug */
+ if (!ata_check_revert_to_defaults(ata_drvp))
+ ata_drvp->ad_flags |= AD_NORVRT;
+
+ /* Dump the drive info */
+ (void) strncpy(buf, aidp->ai_model, sizeof (aidp->ai_model));
+ buf[sizeof (aidp->ai_model)-1] = '\0';
+ for (i = sizeof (aidp->ai_model) - 2; buf[i] == ' '; i--)
+ buf[i] = '\0';
+
+ ATAPRT(("?\t%s device at targ %d, lun %d lastlun 0x%x\n",
+ (ATAPIDRV(ata_drvp) ? "ATAPI":"IDE"),
+ ata_drvp->ad_targ, ata_drvp->ad_lun, aidp->ai_lastlun));
+
+ ATAPRT(("?\tmodel %s\n", buf));
+
+ if (aidp->ai_majorversion != 0 && aidp->ai_majorversion != 0xffff) {
+ for (i = 14; i >= 2; i--) {
+ if (aidp->ai_majorversion & (1 << i)) {
+ valid_version = i;
+ break;
+ }
+ }
+ ATAPRT((
+ "?\tATA/ATAPI-%d supported, majver 0x%x minver 0x%x\n",
+ valid_version,
+ aidp->ai_majorversion,
+ aidp->ai_minorversion));
+ }
+
+ if (ata_capability_data) {
+
+ ATAPRT(("?\t\tstat %x, err %x\n",
+ ddi_get8(ata_ctlp->ac_iohandle2,
+ ata_ctlp->ac_altstatus),
+ ddi_get8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_error)));
+
+ ATAPRT(("?\t\tcfg 0x%x, cap 0x%x\n",
+ aidp->ai_config,
+ aidp->ai_cap));
+
+ /*
+ * Be aware that ATA-6 and later drives may not provide valid
+ * geometry information and other obsoleted info.
+ * Select what is printed based on supported ATA model (skip
+ * anything below ATA/ATAPI-3)
+ */
+
+ if (valid_version == 0 || aidp->ai_majorversion <
+ ATAC_MAJVER_6) {
+ /*
+ * Supported version less then ATA-6
+ */
+ ATAPRT(("?\t\tcyl %d, hd %d, sec/trk %d\n",
+ aidp->ai_fixcyls,
+ aidp->ai_heads,
+ aidp->ai_sectors));
+ }
+ ATAPRT(("?\t\tmult1 0x%x, mult2 0x%x\n",
+ aidp->ai_mult1,
+ aidp->ai_mult2));
+ if (valid_version && aidp->ai_majorversion < ATAC_MAJVER_4) {
+ ATAPRT((
+ "?\t\tpiomode 0x%x, dmamode 0x%x, advpiomode 0x%x\n",
+ aidp->ai_piomode,
+ aidp->ai_dmamode,
+ aidp->ai_advpiomode));
+ } else {
+ ATAPRT(("?\t\tadvpiomode 0x%x\n",
+ aidp->ai_advpiomode));
+ }
+ ATAPRT(("?\t\tminpio %d, minpioflow %d\n",
+ aidp->ai_minpio,
+ aidp->ai_minpioflow));
+ if (valid_version && aidp->ai_majorversion >= ATAC_MAJVER_4 &&
+ (aidp->ai_validinfo & ATAC_VALIDINFO_83)) {
+ ATAPRT(("?\t\tdwdma 0x%x, ultradma 0x%x\n",
+ aidp->ai_dworddma,
+ aidp->ai_ultradma));
+ } else {
+ ATAPRT(("?\t\tdwdma 0x%x\n",
+ aidp->ai_dworddma));
+ }
+ }
+
+ if (ATAPIDRV(ata_drvp)) {
+ if (!atapi_init_drive(ata_drvp))
+ goto errout;
+ } else {
+ if (!ata_disk_init_drive(ata_drvp))
+ goto errout;
+ }
+
+ /*
+ * store pointer in controller struct
+ */
+ CTL2DRV(ata_ctlp, targ, lun) = ata_drvp;
+
+ /*
+ * lock the drive's current settings in case I have to
+ * reset the drive due to some sort of error
+ */
+ (void) ata_set_feature(ata_ctlp, ata_drvp, 0x66, 0);
+
+ return (ata_drvp);
+
+errout:
+ ata_uninit_drive(ata_drvp);
+ return (NULL);
+}
+
+/* destroy a drive */
+
+static void
+ata_uninit_drive(
+ ata_drv_t *ata_drvp)
+{
+#if 0
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+#endif
+
+ ADBG_TRACE(("ata_uninit_drive entered\n"));
+
+#if 0
+ /*
+ * DON'T DO THIS. disabling interrupts floats the IRQ line
+ * which generates spurious interrupts
+ */
+
+ /*
+ * Select the correct drive
+ */
+ ddi_put8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_drvhd,
+ ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * Disable interrupts from the drive
+ */
+ ddi_put8(ata_ctlp->ac_iohandle2, ata_ctlp->ac_devctl,
+ (ATDC_D3 | ATDC_NIEN));
+#endif
+
+ /* interface specific clean-ups */
+
+ if (ata_drvp->ad_flags & AD_ATAPI)
+ atapi_uninit_drive(ata_drvp);
+ else if (ata_drvp->ad_flags & AD_DISK)
+ ata_disk_uninit_drive(ata_drvp);
+
+ /* free drive struct */
+
+ kmem_free(ata_drvp, sizeof (ata_drv_t));
+}
+
+
+/*
+ * ata_drive_type()
+ *
+ * The timeout values and exact sequence of checking is critical
+ * especially for atapi device detection, and should not be changed lightly.
+ *
+ */
+static int
+ata_drive_type(
+ uchar_t drvhd,
+ ddi_acc_handle_t io_hdl1,
+ caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2,
+ caddr_t ioaddr2,
+ struct ata_id *ata_id_bufp)
+{
+ uchar_t status;
+
+ ADBG_TRACE(("ata_drive_type entered\n"));
+
+ /*
+ * select the appropriate drive and LUN
+ */
+ ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD, drvhd);
+ ATA_DELAY_400NSEC(io_hdl2, ioaddr2);
+
+ /*
+ * make certain the drive is selected, and wait for not busy
+ */
+ (void) ata_wait3(io_hdl2, ioaddr2, 0, ATS_BSY, 0x7f, 0, 0x7f, 0,
+ 5 * 1000000);
+
+ status = ddi_get8(io_hdl2, (uchar_t *)ioaddr2 + AT_ALTSTATUS);
+
+ if (status & ATS_BSY) {
+ ADBG_TRACE(("ata_drive_type BUSY 0x%p 0x%x\n",
+ ioaddr1, status));
+ return (ATA_DEV_NONE);
+ }
+
+ if (ata_disk_id(io_hdl1, ioaddr1, io_hdl2, ioaddr2, ata_id_bufp))
+ return (ATA_DEV_DISK);
+
+ /*
+ * No disk, check for atapi unit.
+ */
+ if (!atapi_signature(io_hdl1, ioaddr1)) {
+#ifndef ATA_DISABLE_ATAPI_1_7
+ /*
+ * Check for old (but prevalent) atapi 1.7B
+ * spec device, the only known example is the
+ * NEC CDR-260 (not 260R which is (mostly) ATAPI 1.2
+ * compliant). This device has no signature
+ * and requires conversion from hex to BCD
+ * for some scsi audio commands.
+ */
+ if (atapi_id(io_hdl1, ioaddr1, io_hdl2, ioaddr2, ata_id_bufp)) {
+ return (ATA_DEV_ATAPI);
+ }
+#endif
+ return (ATA_DEV_NONE);
+ }
+
+ if (atapi_id(io_hdl1, ioaddr1, io_hdl2, ioaddr2, ata_id_bufp)) {
+ return (ATA_DEV_ATAPI);
+ }
+
+ return (ATA_DEV_NONE);
+
+}
+
+/*
+ * Wait for a register of a controller to achieve a specific state.
+ * To return normally, all the bits in the first sub-mask must be ON,
+ * all the bits in the second sub-mask must be OFF.
+ * If timeout_usec microseconds pass without the controller achieving
+ * the desired bit configuration, we return TRUE, else FALSE.
+ */
+
+int ata_usec_delay = 10;
+
+int
+ata_wait(
+ ddi_acc_handle_t io_hdl,
+ caddr_t ioaddr,
+ uchar_t onbits,
+ uchar_t offbits,
+ uint_t timeout_usec)
+{
+ ushort_t val;
+
+ do {
+ val = ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_ALTSTATUS);
+ if ((val & onbits) == onbits && (val & offbits) == 0)
+ return (TRUE);
+ drv_usecwait(ata_usec_delay);
+ timeout_usec -= ata_usec_delay;
+ } while (timeout_usec > 0);
+
+ return (FALSE);
+}
+
+
+
+/*
+ *
+ * This is a slightly more complicated version that checks
+ * for error conditions and bails-out rather than looping
+ * until the timeout expires
+ */
+int
+ata_wait3(
+ ddi_acc_handle_t io_hdl,
+ caddr_t ioaddr,
+ uchar_t onbits1,
+ uchar_t offbits1,
+ uchar_t failure_onbits2,
+ uchar_t failure_offbits2,
+ uchar_t failure_onbits3,
+ uchar_t failure_offbits3,
+ uint_t timeout_usec)
+{
+ ushort_t val;
+
+ do {
+ val = ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_ALTSTATUS);
+
+ /*
+ * check for expected condition
+ */
+ if ((val & onbits1) == onbits1 && (val & offbits1) == 0)
+ return (TRUE);
+
+ /*
+ * check for error conditions
+ */
+ if ((val & failure_onbits2) == failure_onbits2 &&
+ (val & failure_offbits2) == 0) {
+ return (FALSE);
+ }
+
+ if ((val & failure_onbits3) == failure_onbits3 &&
+ (val & failure_offbits3) == 0) {
+ return (FALSE);
+ }
+
+ drv_usecwait(ata_usec_delay);
+ timeout_usec -= ata_usec_delay;
+ } while (timeout_usec > 0);
+
+ return (FALSE);
+}
+
+
+/*
+ *
+ * low level routine for ata_disk_id() and atapi_id()
+ *
+ */
+
+int
+ata_id_common(
+ uchar_t id_cmd,
+ int expect_drdy,
+ ddi_acc_handle_t io_hdl1,
+ caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2,
+ caddr_t ioaddr2,
+ struct ata_id *aidp)
+{
+ uchar_t status;
+
+ ADBG_TRACE(("ata_id_common entered\n"));
+
+ bzero(aidp, sizeof (struct ata_id));
+
+ /*
+ * clear the features register
+ */
+ ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_FEATURE, 0);
+
+ /*
+ * enable interrupts from the device
+ */
+ ddi_put8(io_hdl2, (uchar_t *)ioaddr2 + AT_DEVCTL, ATDC_D3);
+
+ /*
+ * issue IDENTIFY DEVICE or IDENTIFY PACKET DEVICE command
+ */
+ ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_CMD, id_cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ioaddr2);
+
+ /*
+ * According to the ATA specification, some drives may have
+ * to read the media to complete this command. We need to
+ * make sure we give them enough time to respond.
+ */
+
+ (void) ata_wait3(io_hdl2, ioaddr2, 0, ATS_BSY,
+ ATS_ERR, ATS_BSY, 0x7f, 0, 5 * 1000000);
+
+ /*
+ * read the status byte and clear the pending interrupt
+ */
+ status = ddi_get8(io_hdl2, (uchar_t *)ioaddr1 + AT_STATUS);
+
+ /*
+ * this happens if there's no drive present
+ */
+ if (status == 0xff || status == 0x7f) {
+ /* invalid status, can't be an ATA or ATAPI device */
+ return (FALSE);
+ }
+
+ if (status & ATS_BSY) {
+ ADBG_ERROR(("ata_id_common: BUSY status 0x%x error 0x%x\n",
+ ddi_get8(io_hdl2, (uchar_t *)ioaddr2 +AT_ALTSTATUS),
+ ddi_get8(io_hdl1, (uchar_t *)ioaddr1 + AT_ERROR)));
+ return (FALSE);
+ }
+
+ if (!(status & ATS_DRQ)) {
+ if (status & (ATS_ERR | ATS_DF)) {
+ return (FALSE);
+ }
+ /*
+ * Give the drive another second to assert DRQ. Some older
+ * drives de-assert BSY before asserting DRQ.
+ */
+ if (!ata_wait(io_hdl2, ioaddr2, ATS_DRQ, ATS_BSY, 1000000)) {
+ ADBG_WARN(("ata_id_common: !DRQ status 0x%x error 0x%x\n",
+ ddi_get8(io_hdl2, (uchar_t *)ioaddr2 +AT_ALTSTATUS),
+ ddi_get8(io_hdl1, (uchar_t *)ioaddr1 + AT_ERROR)));
+ return (FALSE);
+ }
+ }
+
+ /*
+ * transfer the data
+ */
+ ddi_rep_get16(io_hdl1, (ushort_t *)aidp, (ushort_t *)ioaddr1 + AT_DATA,
+ NBPSCTR >> 1, DDI_DEV_NO_AUTOINCR);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ioaddr2);
+
+
+ /*
+ * Wait for the drive to recognize I've read all the data.
+ * Some drives have been observed to take as much as 3msec to
+ * deassert DRQ after reading the data; allow 10 msec just in case.
+ *
+ * Note: some non-compliant ATAPI drives (e.g., NEC Multispin 6V,
+ * CDR-1350A) don't assert DRDY. If we've made it this far we can
+ * safely ignore the DRDY bit since the ATAPI Packet command
+ * actually doesn't require it to ever be asserted.
+ *
+ */
+ if (!ata_wait(io_hdl2, ioaddr2, (uchar_t)(expect_drdy ? ATS_DRDY : 0),
+ (ATS_BSY | ATS_DRQ), 1000000)) {
+ ADBG_WARN(("ata_id_common: bad status 0x%x error 0x%x\n",
+ ddi_get8(io_hdl2, (uchar_t *)ioaddr2 + AT_ALTSTATUS),
+ ddi_get8(io_hdl1, (uchar_t *)ioaddr1 + AT_ERROR)));
+ return (FALSE);
+ }
+
+ /*
+ * Check to see if the command aborted. This happens if
+ * an IDENTIFY DEVICE command is issued to an ATAPI PACKET device,
+ * or if an IDENTIFY PACKET DEVICE command is issued to an ATA
+ * (non-PACKET) device.
+ */
+ if (status & (ATS_DF | ATS_ERR)) {
+ ADBG_WARN(("ata_id_common: status 0x%x error 0x%x \n",
+ ddi_get8(io_hdl2, (uchar_t *)ioaddr2 + AT_ALTSTATUS),
+ ddi_get8(io_hdl1, (uchar_t *)ioaddr1 + AT_ERROR)));
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+
+/*
+ * Low level routine to issue a non-data command and busy wait for
+ * the completion status.
+ */
+
+int
+ata_command(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ int expect_drdy,
+ int silent,
+ uint_t busy_wait,
+ uchar_t cmd,
+ uchar_t feature,
+ uchar_t count,
+ uchar_t sector,
+ uchar_t head,
+ uchar_t cyl_low,
+ uchar_t cyl_hi)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ uchar_t status;
+
+ /* select the drive */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /* make certain the drive selected */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ (uchar_t)(expect_drdy ? ATS_DRDY : 0),
+ ATS_BSY, busy_wait)) {
+ ADBG_ERROR(("ata_command: select failed "
+ "DRDY 0x%x CMD 0x%x F 0x%x N 0x%x "
+ "S 0x%x H 0x%x CL 0x%x CH 0x%x\n",
+ expect_drdy, cmd, feature, count,
+ sector, head, cyl_low, cyl_hi));
+ return (FALSE);
+ }
+
+ /*
+ * set all the regs
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, (head | ata_drvp->ad_drive_bits));
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, sector);
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, count);
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, cyl_low);
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, cyl_hi);
+ ddi_put8(io_hdl1, ata_ctlp->ac_feature, feature);
+
+ /* send the command */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /* wait for not busy */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2, 0, ATS_BSY, busy_wait)) {
+ ADBG_ERROR(("ata_command: BSY too long!"
+ "DRDY 0x%x CMD 0x%x F 0x%x N 0x%x "
+ "S 0x%x H 0x%x CL 0x%x CH 0x%x\n",
+ expect_drdy, cmd, feature, count,
+ sector, head, cyl_low, cyl_hi));
+ return (FALSE);
+ }
+
+ /*
+ * wait for DRDY before continuing
+ */
+ (void) ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRDY, ATS_BSY, /* okay */
+ ATS_ERR, ATS_BSY, /* cmd failed */
+ ATS_DF, ATS_BSY, /* drive failed */
+ busy_wait);
+
+ /* read status to clear IRQ, and check for error */
+ status = ddi_get8(io_hdl1, ata_ctlp->ac_status);
+
+ if ((status & (ATS_BSY | ATS_DF | ATS_ERR)) == 0)
+ return (TRUE);
+
+ if (!silent) {
+ ADBG_ERROR(("ata_command status 0x%x error 0x%x "
+ "DRDY 0x%x CMD 0x%x F 0x%x N 0x%x "
+ "S 0x%x H 0x%x CL 0x%x CH 0x%x\n",
+ ddi_get8(io_hdl1, ata_ctlp->ac_status),
+ ddi_get8(io_hdl1, ata_ctlp->ac_error),
+ expect_drdy, cmd, feature, count,
+ sector, head, cyl_low, cyl_hi));
+ }
+ return (FALSE);
+}
+
+
+
+/*
+ *
+ * Issue a SET FEATURES command
+ *
+ */
+
+int
+ata_set_feature(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ uchar_t feature,
+ uchar_t value)
+{
+ int rc;
+
+ rc = ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, ata_set_feature_wait,
+ ATC_SET_FEAT, feature, value, 0, 0, 0, 0);
+ /* feature, count, sector, head, cyl_low, cyl_hi */
+
+ if (rc) {
+ return (TRUE);
+ }
+
+ ADBG_ERROR(("?ata_set_feature: (0x%x,0x%x) failed\n", feature, value));
+ return (FALSE);
+}
+
+
+
+/*
+ *
+ * Issue a FLUSH CACHE command
+ *
+ */
+
+static int
+ata_flush_cache(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+ /* this command is optional so fail silently */
+ return (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE,
+ ata_flush_cache_wait,
+ ATC_FLUSH_CACHE, 0, 0, 0, 0, 0, 0));
+}
+
+/*
+ * ata_setup_ioaddr()
+ *
+ * Map the device registers and return the handles.
+ *
+ * If this is a ISA-ATA controller then only two handles are
+ * initialized and returned.
+ *
+ * If this is a PCI-IDE controller than a third handle (for the
+ * PCI-IDE Bus Mastering registers) is initialized and returned.
+ *
+ */
+
+static int
+ata_setup_ioaddr(
+ dev_info_t *dip,
+ ddi_acc_handle_t *handle1p,
+ caddr_t *addr1p,
+ ddi_acc_handle_t *handle2p,
+ caddr_t *addr2p,
+ ddi_acc_handle_t *bm_hdlp,
+ caddr_t *bm_addrp)
+{
+ ddi_device_acc_attr_t dev_attr;
+ char *bufp;
+ int rnumber;
+ int rc;
+ off_t regsize;
+
+ /*
+ * Make certain the controller is enabled and its regs are map-able
+ *
+ */
+ rc = ddi_dev_regsize(dip, 0, &regsize);
+ if (rc != DDI_SUCCESS || regsize <= AT_CMD) {
+ ADBG_INIT(("ata_setup_ioaddr(1): rc %d regsize %lld\n",
+ rc, (long long)regsize));
+ return (FALSE);
+ }
+
+ rc = ddi_dev_regsize(dip, 1, &regsize);
+ if (rc != DDI_SUCCESS || regsize <= AT_ALTSTATUS) {
+ ADBG_INIT(("ata_setup_ioaddr(2): rc %d regsize %lld\n",
+ rc, (long long)regsize));
+ return (FALSE);
+ }
+
+ /*
+ * setup the device attribute structure for little-endian,
+ * strict ordering access.
+ */
+ dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
+ dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+ *handle1p = NULL;
+ *handle2p = NULL;
+ *bm_hdlp = NULL;
+
+ /*
+ * Determine whether this is a ISA, PNP-ISA, or PCI-IDE device
+ */
+ if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "pnp-csn")) {
+ /* it's PNP-ISA, skip over the extra reg tuple */
+ rnumber = 1;
+ goto not_pciide;
+ }
+
+ /* else, it's ISA or PCI-IDE, check further */
+ rnumber = 0;
+
+ rc = ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "device_type", &bufp);
+ if (rc != DDI_PROP_SUCCESS) {
+ ADBG_ERROR(("ata_setup_ioaddr !device_type\n"));
+ goto not_pciide;
+ }
+
+ if (strcmp(bufp, "pci-ide") != 0) {
+ /*
+ * If it's not a PCI-IDE, there are only two reg tuples
+ * and the first one contains the I/O base (170 or 1f0)
+ * rather than the controller instance number.
+ */
+ ADBG_TRACE(("ata_setup_ioaddr !pci-ide\n"));
+ ddi_prop_free(bufp);
+ goto not_pciide;
+ }
+ ddi_prop_free(bufp);
+
+
+ /*
+ * Map the correct half of the PCI-IDE Bus Master registers.
+ * There's a single BAR that maps these registers for both
+ * controller's in a dual-controller chip and it's upto my
+ * parent nexus, pciide, to adjust which (based on my instance
+ * number) half this call maps.
+ */
+ rc = ddi_dev_regsize(dip, 2, &regsize);
+ if (rc != DDI_SUCCESS || regsize < 8) {
+ ADBG_INIT(("ata_setup_ioaddr(3): rc %d regsize %lld\n",
+ rc, (long long)regsize));
+ goto not_pciide;
+ }
+
+ rc = ddi_regs_map_setup(dip, 2, bm_addrp, 0, 0, &dev_attr, bm_hdlp);
+
+ if (rc != DDI_SUCCESS) {
+ /* map failed, try to use in non-pci-ide mode */
+ ADBG_WARN(("ata_setup_ioaddr bus master map failed, rc=0x%x\n",
+ rc));
+ *bm_hdlp = NULL;
+ }
+
+not_pciide:
+ /*
+ * map the lower command block registers
+ */
+
+ rc = ddi_regs_map_setup(dip, rnumber, addr1p, 0, 0, &dev_attr,
+ handle1p);
+
+ if (rc != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "ata: reg tuple 0 map failed, rc=0x%x\n", rc);
+ goto out1;
+ }
+
+ /*
+ * If the controller is being used in compatibility mode
+ * via /devices/isa/ata@1,{1f0,1f0}/..., the reg property
+ * will specify zeros for the I/O ports for the PCI
+ * instance.
+ */
+ if (*addr1p == 0) {
+ ADBG_TRACE(("ata_setup_ioaddr ioaddr1 0\n"));
+ goto out2;
+ }
+
+ /*
+ * map the upper control block registers
+ */
+ rc = ddi_regs_map_setup(dip, rnumber + 1, addr2p, 0, 0, &dev_attr,
+ handle2p);
+ if (rc == DDI_SUCCESS)
+ return (TRUE);
+
+ cmn_err(CE_WARN, "ata: reg tuple 1 map failed, rc=0x%x", rc);
+
+out2:
+ if (*handle1p != NULL) {
+ ddi_regs_map_free(handle1p);
+ *handle1p = NULL;
+ }
+
+out1:
+ if (*bm_hdlp != NULL) {
+ ddi_regs_map_free(bm_hdlp);
+ *bm_hdlp = NULL;
+ }
+ return (FALSE);
+
+}
+
+/*
+ *
+ * Currently, the only supported controllers are ones which
+ * support the SFF-8038 Bus Mastering spec.
+ *
+ * Check the parent node's IEEE 1275 class-code property to
+ * determine if it's an PCI-IDE instance which supports SFF-8038
+ * Bus Mastering. It's perfectly valid to have a PCI-IDE controller
+ * that doesn't do Bus Mastering. In that case, my interrupt handler
+ * only uses the interrupt latch bit in PCI-IDE status register.
+ * The assumption is that the programming interface byte of the
+ * class-code property reflects the bus master DMA capability of
+ * the controller.
+ *
+ * Whether the drive support supports the DMA option still needs
+ * to be checked later. Each individual request also has to be
+ * checked for alignment and size to decide whether to use the
+ * DMA transfer mode.
+ */
+
+static void
+ata_init_pciide(
+ dev_info_t *dip,
+ ata_ctl_t *ata_ctlp)
+{
+ uint_t class_code;
+ uchar_t status;
+
+ ata_cntrl_DMA_sel_msg = NULL;
+
+ if (ata_ctlp->ac_bmhandle == NULL) {
+ ata_ctlp->ac_pciide = FALSE;
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg = "cntrl not Bus Master DMA capable";
+ return;
+ }
+
+ /*
+ * check if it's a known bogus PCI-IDE chip
+ */
+ if (ata_check_pciide_blacklist(dip, ATA_BL_BOGUS)) {
+ ADBG_WARN(("ata_setup_ioaddr pci-ide blacklist\n"));
+ ata_ctlp->ac_pciide = FALSE;
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg = "cntrl blacklisted";
+ return;
+ }
+ ata_ctlp->ac_pciide = TRUE;
+
+ if (ata_check_pciide_blacklist(dip, ATA_BL_BMSTATREG_PIO_BROKEN)) {
+ ata_ctlp->ac_flags |= AC_BMSTATREG_PIO_BROKEN;
+ }
+
+ /*
+ * check for a PCI-IDE chip with a broken DMA engine
+ */
+ if (ata_check_pciide_blacklist(dip, ATA_BL_NODMA)) {
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg =
+ "cntrl blacklisted/DMA engine broken";
+ return;
+ }
+
+ /*
+ * Check the Programming Interface register to determine
+ * if this device supports PCI-IDE Bus Mastering. Some PCI-IDE
+ * devices don't support Bus Mastering or DMA.
+ * Since we are dealing with pre-qualified pci-ide controller,
+ * check programming interface byte only.
+ */
+
+ class_code = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "class-code", 0);
+ if ((class_code & PCIIDE_BM_CAP_MASK) != PCIIDE_BM_CAP_MASK) {
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg =
+ "cntrl not Bus Master DMA capable";
+ return;
+ }
+
+ /*
+ * Avoid doing DMA on "simplex" chips which share hardware
+ * between channels
+ */
+ status = ddi_get8(ata_ctlp->ac_bmhandle,
+ (uchar_t *)ata_ctlp->ac_bmaddr + PCIIDE_BMISX_REG);
+ /*
+ * Some motherboards have CSB5's that are wired "to emulate CSB4 mode".
+ * In such a mode, the simplex bit is asserted, but in fact testing
+ * on such a motherboard has shown that the devices are not simplex
+ * -- DMA can be used on both channels concurrently with no special
+ * considerations. For chips like this, we have the ATA_BL_NO_SIMPLEX
+ * flag set to indicate that the value of the simplex bit can be
+ * ignored.
+ */
+
+ if (status & PCIIDE_BMISX_SIMPLEX) {
+ if (ata_check_pciide_blacklist(dip, ATA_BL_NO_SIMPLEX)) {
+ cmn_err(CE_WARN, "Ignoring false simplex bit \n");
+ } else {
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg =
+ "cntrl sharing DMA engine between channels";
+ return;
+ }
+ }
+
+ /*
+ * It's a compatible PCI-IDE Bus Mastering controller,
+ * allocate and map the DMA Scatter/Gather list (PRDE table).
+ */
+ if (ata_pciide_alloc(dip, ata_ctlp))
+ ata_ctlp->ac_pciide_bm = TRUE;
+ else {
+ ata_ctlp->ac_pciide_bm = FALSE;
+ ata_cntrl_DMA_sel_msg = "unable to init DMA S/G list";
+ }
+}
+
+/*
+ *
+ * Determine whether to enable DMA support for this drive.
+ * The controller and the drive both have to support DMA.
+ * The controller's capabilities were already checked in
+ * ata_init_pciide(), now just check the drive's capabilities.
+ *
+ */
+
+static int
+ata_init_drive_pcidma(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ dev_info_t *tdip)
+{
+ boolean_t dma;
+ boolean_t cd_dma;
+ boolean_t disk_dma;
+ boolean_t atapi_dma;
+ int ata_options;
+
+ ata_dev_DMA_sel_msg = NULL;
+
+ if (ata_ctlp->ac_pciide_bm != TRUE) {
+ ata_dev_DMA_sel_msg =
+ "controller is not Bus Master capable";
+
+ return (ATA_DMA_OFF);
+ }
+
+ ata_options = ddi_prop_get_int(DDI_DEV_T_ANY, ata_ctlp->ac_dip,
+ 0, "ata-options", 0);
+
+ if (!(ata_options & ATA_OPTIONS_DMA)) {
+ /*
+ * Either the ata-options property was not found or
+ * DMA is not enabled by this property
+ */
+ ata_dev_DMA_sel_msg =
+ "disabled by \"ata-options\" property";
+
+ return (ATA_DMA_OFF);
+ }
+
+ if (ata_check_drive_blacklist(&ata_drvp->ad_id, ATA_BL_NODMA)) {
+ ata_dev_DMA_sel_msg = "device not DMA capable; blacklisted";
+
+ return (ATA_DMA_OFF);
+ }
+
+ /*
+ * DMA mode is mandatory on ATA-3 (or newer) drives but is
+ * optional on ATA-2 (or older) drives.
+ *
+ * On ATA-2 drives the ai_majorversion word will probably
+ * be 0xffff or 0x0000, check the (now obsolete) DMA bit in
+ * the capabilities word instead. The order of these tests
+ * is important since an ATA-3 drive doesn't have to set
+ * the DMA bit in the capabilities word.
+ *
+ */
+
+ if (!((ata_drvp->ad_id.ai_majorversion & 0x8000) == 0 &&
+ ata_drvp->ad_id.ai_majorversion >= (1 << 2)) &&
+ !(ata_drvp->ad_id.ai_cap & ATAC_DMA_SUPPORT)) {
+ ata_dev_DMA_sel_msg = "device not DMA capable";
+
+ return (ATA_DMA_OFF);
+ }
+
+ dma = ata_prop_lookup_int(DDI_DEV_T_ANY, tdip,
+ 0, "ata-dma-enabled", TRUE);
+ disk_dma = ata_prop_lookup_int(DDI_DEV_T_ANY, tdip,
+ 0, "ata-disk-dma-enabled", TRUE);
+ cd_dma = ata_prop_lookup_int(DDI_DEV_T_ANY, tdip,
+ 0, "atapi-cd-dma-enabled", FALSE);
+ atapi_dma = ata_prop_lookup_int(DDI_DEV_T_ANY, tdip,
+ 0, "atapi-other-dma-enabled", TRUE);
+
+ if (dma == FALSE) {
+ cmn_err(CE_CONT, "?ata_init_drive_pcidma: "
+ "DMA disabled by \"ata-dma-enabled\" property");
+ ata_dev_DMA_sel_msg = "disabled by prop ata-dma-enabled";
+
+ return (ATA_DMA_OFF);
+ }
+
+ if (IS_CDROM(ata_drvp) == TRUE) {
+ if (cd_dma == FALSE) {
+ ata_dev_DMA_sel_msg =
+ "disabled. Control with \"atapi-cd-dma-enabled\""
+ " property";
+
+ return (ATA_DMA_OFF);
+ }
+
+ } else if (ATAPIDRV(ata_drvp) == FALSE) {
+ if (disk_dma == FALSE) {
+ ata_dev_DMA_sel_msg =
+ "disabled by \"ata-disk-dma-enabled\" property";
+
+ return (ATA_DMA_OFF);
+ }
+
+ } else if (atapi_dma == FALSE) {
+ ata_dev_DMA_sel_msg =
+ "disabled by \"atapi-other-dma-enabled\" property";
+
+ return (ATA_DMA_OFF);
+ }
+
+ return (ATA_DMA_ON);
+}
+
+
+
+/*
+ * this compare routine squeezes out extra blanks and
+ * returns TRUE if p1 matches the leftmost substring of p2
+ */
+
+static int
+ata_strncmp(
+ char *p1,
+ char *p2,
+ int cnt)
+{
+
+ for (;;) {
+ /*
+ * skip over any extra blanks in both strings
+ */
+ while (*p1 != '\0' && *p1 == ' ')
+ p1++;
+
+ while (cnt != 0 && *p2 == ' ') {
+ p2++;
+ cnt--;
+ }
+
+ /*
+ * compare the two strings
+ */
+
+ if (cnt == 0 || *p1 != *p2)
+ break;
+
+ while (cnt > 0 && *p1 == *p2) {
+ p1++;
+ p2++;
+ cnt--;
+ }
+
+ }
+
+ /* return TRUE if both strings ended at same point */
+ return ((*p1 == '\0') ? TRUE : FALSE);
+}
+
+/*
+ * Per PSARC/1997/281 create variant="atapi" property (if necessary)
+ * on the target's dev_info node. Currently, the sd target driver
+ * is the only driver which refers to this property.
+ *
+ * If the flag ata_id_debug is set also create the
+ * the "ata" or "atapi" property on the target's dev_info node
+ *
+ */
+
+int
+ata_prop_create(
+ dev_info_t *tgt_dip,
+ ata_drv_t *ata_drvp,
+ char *name)
+{
+ int rc;
+
+ ADBG_TRACE(("ata_prop_create 0x%p 0x%p %s\n", tgt_dip, ata_drvp, name));
+
+ if (strcmp("atapi", name) == 0) {
+ rc = ndi_prop_update_string(DDI_DEV_T_NONE, tgt_dip,
+ "variant", name);
+ if (rc != DDI_PROP_SUCCESS)
+ return (FALSE);
+ }
+
+ if (!ata_id_debug)
+ return (TRUE);
+
+ rc = ndi_prop_update_byte_array(DDI_DEV_T_NONE, tgt_dip, name,
+ (uchar_t *)&ata_drvp->ad_id, sizeof (ata_drvp->ad_id));
+ if (rc != DDI_PROP_SUCCESS) {
+ ADBG_ERROR(("ata_prop_create failed, rc=%d\n", rc));
+ }
+ return (TRUE);
+}
+
+
+/* *********************************************************************** */
+/* *********************************************************************** */
+/* *********************************************************************** */
+
+/*
+ * This state machine doesn't implement the ATAPI Optional Overlap
+ * feature. You need that feature to efficiently support ATAPI
+ * tape drives. See the 1394-ATA Tailgate spec (D97107), Figure 24,
+ * for an example of how to add the necessary additional NextActions
+ * and NextStates to this FSM and the atapi_fsm, in order to support
+ * the Overlap Feature.
+ */
+
+
+uchar_t ata_ctlr_fsm_NextAction[ATA_CTLR_NSTATES][ATA_CTLR_NFUNCS] = {
+/* --------------------- next action --------------------- | - current - */
+/* start0 --- start1 ---- intr ------ fini --- reset --- */
+{ AC_START, AC_START, AC_NADA, AC_NADA, AC_RESET_I }, /* idle */
+{ AC_BUSY, AC_BUSY, AC_INTR, AC_FINI, AC_RESET_A }, /* active0 */
+{ AC_BUSY, AC_BUSY, AC_INTR, AC_FINI, AC_RESET_A }, /* active1 */
+};
+
+uchar_t ata_ctlr_fsm_NextState[ATA_CTLR_NSTATES][ATA_CTLR_NFUNCS] = {
+
+/* --------------------- next state --------------------- | - current - */
+/* start0 --- start1 ---- intr ------ fini --- reset --- */
+{ AS_ACTIVE0, AS_ACTIVE1, AS_IDLE, AS_IDLE, AS_IDLE }, /* idle */
+{ AS_ACTIVE0, AS_ACTIVE0, AS_ACTIVE0, AS_IDLE, AS_ACTIVE0 }, /* active0 */
+{ AS_ACTIVE1, AS_ACTIVE1, AS_ACTIVE1, AS_IDLE, AS_ACTIVE1 }, /* active1 */
+};
+
+
+static int
+ata_ctlr_fsm(
+ uchar_t fsm_func,
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp,
+ int *DoneFlgp)
+{
+ uchar_t action;
+ uchar_t current_state;
+ uchar_t next_state;
+ int rc;
+
+ current_state = ata_ctlp->ac_state;
+ action = ata_ctlr_fsm_NextAction[current_state][fsm_func];
+ next_state = ata_ctlr_fsm_NextState[current_state][fsm_func];
+
+ /*
+ * Set the controller's new state
+ */
+ ata_ctlp->ac_state = next_state;
+ switch (action) {
+
+ case AC_BUSY:
+ return (ATA_FSM_RC_BUSY);
+
+ case AC_NADA:
+ return (ATA_FSM_RC_OKAY);
+
+ case AC_START:
+ ASSERT(ata_ctlp->ac_active_pktp == NULL);
+ ASSERT(ata_ctlp->ac_active_drvp == NULL);
+
+ ata_ctlp->ac_active_pktp = ata_pktp;
+ ata_ctlp->ac_active_drvp = ata_drvp;
+
+ rc = (*ata_pktp->ap_start)(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (rc == ATA_FSM_RC_BUSY) {
+ /* the request didn't start, GHD will requeue it */
+ ata_ctlp->ac_state = AS_IDLE;
+ ata_ctlp->ac_active_pktp = NULL;
+ ata_ctlp->ac_active_drvp = NULL;
+ }
+ return (rc);
+
+ case AC_INTR:
+ ASSERT(ata_ctlp->ac_active_pktp != NULL);
+ ASSERT(ata_ctlp->ac_active_drvp != NULL);
+
+ ata_drvp = ata_ctlp->ac_active_drvp;
+ ata_pktp = ata_ctlp->ac_active_pktp;
+ return ((*ata_pktp->ap_intr)(ata_ctlp, ata_drvp, ata_pktp));
+
+ case AC_RESET_A: /* Reset, controller active */
+ ASSERT(ata_ctlp->ac_active_pktp != NULL);
+ ASSERT(ata_ctlp->ac_active_drvp != NULL);
+
+ /* clean up the active request */
+ ata_pktp = ata_ctlp->ac_active_pktp;
+ ata_pktp->ap_flags |= AP_DEV_RESET | AP_BUS_RESET;
+
+ /* halt the DMA engine */
+ if (ata_pktp->ap_pciide_dma) {
+ ata_pciide_dma_stop(ata_ctlp);
+ (void) ata_pciide_status_clear(ata_ctlp);
+ }
+
+ /* Do a Software Reset to unwedge the bus */
+ if (!ata_software_reset(ata_ctlp)) {
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /* Then send a DEVICE RESET cmd to each ATAPI device */
+ atapi_fsm_reset(ata_ctlp);
+ return (ATA_FSM_RC_FINI);
+
+ case AC_RESET_I: /* Reset, controller idle */
+ /* Do a Software Reset to unwedge the bus */
+ if (!ata_software_reset(ata_ctlp)) {
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /* Then send a DEVICE RESET cmd to each ATAPI device */
+ atapi_fsm_reset(ata_ctlp);
+ return (ATA_FSM_RC_OKAY);
+
+ case AC_FINI:
+ break;
+ }
+
+ /*
+ * AC_FINI, check ARQ needs to be started or finished
+ */
+
+ ASSERT(action == AC_FINI);
+ ASSERT(ata_ctlp->ac_active_pktp != NULL);
+ ASSERT(ata_ctlp->ac_active_drvp != NULL);
+
+ /*
+ * The active request is done now.
+ * Disconnect the request from the controller and
+ * add it to the done queue.
+ */
+ ata_drvp = ata_ctlp->ac_active_drvp;
+ ata_pktp = ata_ctlp->ac_active_pktp;
+
+ /*
+ * If ARQ pkt is done, get ptr to original pkt and wrap it up.
+ */
+ if (ata_pktp == ata_ctlp->ac_arq_pktp) {
+ ata_pkt_t *arq_pktp;
+
+ ADBG_ARQ(("ata_ctlr_fsm 0x%p ARQ done\n", ata_ctlp));
+
+ arq_pktp = ata_pktp;
+ ata_pktp = ata_ctlp->ac_fault_pktp;
+ ata_ctlp->ac_fault_pktp = NULL;
+ if (arq_pktp->ap_flags & (AP_ERROR | AP_BUS_RESET))
+ ata_pktp->ap_flags |= AP_ARQ_ERROR;
+ else
+ ata_pktp->ap_flags |= AP_ARQ_OKAY;
+ goto all_done;
+ }
+
+
+#define AP_ARQ_NEEDED (AP_ARQ_ON_ERROR | AP_GOT_STATUS | AP_ERROR)
+
+ /*
+ * Start ARQ pkt if necessary
+ */
+ if ((ata_pktp->ap_flags & AP_ARQ_NEEDED) == AP_ARQ_NEEDED &&
+ (ata_pktp->ap_status & ATS_ERR)) {
+
+ /* set controller state back to active */
+ ata_ctlp->ac_state = current_state;
+
+ /* try to start the ARQ pkt */
+ rc = ata_start_arq(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (rc == ATA_FSM_RC_BUSY) {
+ ADBG_ARQ(("ata_ctlr_fsm 0x%p ARQ BUSY\n", ata_ctlp));
+ /* let the target driver handle the problem */
+ ata_ctlp->ac_state = AS_IDLE;
+ ata_ctlp->ac_active_pktp = NULL;
+ ata_ctlp->ac_active_drvp = NULL;
+ ata_ctlp->ac_fault_pktp = NULL;
+ goto all_done;
+ }
+
+ ADBG_ARQ(("ata_ctlr_fsm 0x%p ARQ started\n", ata_ctlp));
+ return (rc);
+ }
+
+ /*
+ * Normal completion, no error status, and not an ARQ pkt,
+ * just fall through.
+ */
+
+all_done:
+
+ /*
+ * wrap everything up and tie a ribbon around it
+ */
+ ata_ctlp->ac_active_pktp = NULL;
+ ata_ctlp->ac_active_drvp = NULL;
+ if (APKT2GCMD(ata_pktp) != (gcmd_t *)0) {
+ ghd_complete(&ata_ctlp->ac_ccc, APKT2GCMD(ata_pktp));
+ if (DoneFlgp)
+ *DoneFlgp = TRUE;
+ }
+
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+static int
+ata_start_arq(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ata_pkt_t *arq_pktp;
+ int bytes;
+ uint_t senselen;
+
+ ADBG_ARQ(("ata_start_arq 0x%p ARQ needed\n", ata_ctlp));
+
+ /*
+ * Determine just the size of the Request Sense Data buffer within
+ * the scsi_arq_status structure.
+ */
+#define SIZEOF_ARQ_HEADER (sizeof (struct scsi_arq_status) \
+ - sizeof (struct scsi_extended_sense))
+ senselen = ata_pktp->ap_statuslen - SIZEOF_ARQ_HEADER;
+ ASSERT(senselen > 0);
+
+
+ /* save ptr to original pkt */
+ ata_ctlp->ac_fault_pktp = ata_pktp;
+
+ /* switch the controller's active pkt to the ARQ pkt */
+ arq_pktp = ata_ctlp->ac_arq_pktp;
+ ata_ctlp->ac_active_pktp = arq_pktp;
+
+ /* finish initializing the ARQ CDB */
+ ata_ctlp->ac_arq_cdb[1] = ata_drvp->ad_lun << 4;
+ ata_ctlp->ac_arq_cdb[4] = senselen;
+
+ /* finish initializing the ARQ pkt */
+ arq_pktp->ap_v_addr = (caddr_t)&ata_pktp->ap_scbp->sts_sensedata;
+
+ arq_pktp->ap_resid = senselen;
+ arq_pktp->ap_flags = AP_ATAPI | AP_READ;
+ arq_pktp->ap_cdb_pad =
+ ((unsigned)(ata_drvp->ad_cdb_len - arq_pktp->ap_cdb_len)) >> 1;
+
+ bytes = min(senselen, ATAPI_MAX_BYTES_PER_DRQ);
+ arq_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
+ arq_pktp->ap_lwcyl = (uchar_t)bytes;
+
+ /*
+ * This packet is shared by all drives on this controller
+ * therefore we need to init the drive number on every ARQ.
+ */
+ arq_pktp->ap_hd = ata_drvp->ad_drive_bits;
+
+ /* start it up */
+ return ((*arq_pktp->ap_start)(ata_ctlp, ata_drvp, arq_pktp));
+}
+
+/*
+ *
+ * reset the bus
+ *
+ */
+
+static int
+ata_reset_bus(
+ ata_ctl_t *ata_ctlp)
+{
+ int watchdog;
+ uchar_t drive;
+ int rc = FALSE;
+ uchar_t fsm_func;
+ int DoneFlg = FALSE;
+
+ /*
+ * Do a Software Reset to unwedge the bus, and send
+ * ATAPI DEVICE RESET to each ATAPI drive.
+ */
+ fsm_func = ATA_FSM_RESET;
+ for (watchdog = ata_reset_bus_watchdog; watchdog > 0; watchdog--) {
+ switch (ata_ctlr_fsm(fsm_func, ata_ctlp, NULL, NULL,
+ &DoneFlg)) {
+ case ATA_FSM_RC_OKAY:
+ rc = TRUE;
+ goto fsm_done;
+
+ case ATA_FSM_RC_BUSY:
+ return (FALSE);
+
+ case ATA_FSM_RC_INTR:
+ fsm_func = ATA_FSM_INTR;
+ rc = TRUE;
+ continue;
+
+ case ATA_FSM_RC_FINI:
+ fsm_func = ATA_FSM_FINI;
+ rc = TRUE;
+ continue;
+ }
+ }
+ ADBG_WARN(("ata_reset_bus: watchdog\n"));
+
+fsm_done:
+
+ /*
+ * Reinitialize the ATA drives
+ */
+ for (drive = 0; drive < ATA_MAXTARG; drive++) {
+ ata_drv_t *ata_drvp;
+
+ if ((ata_drvp = CTL2DRV(ata_ctlp, drive, 0)) == NULL)
+ continue;
+
+ if (ATAPIDRV(ata_drvp))
+ continue;
+
+ /*
+ * Reprogram the Read/Write Multiple block factor
+ * and current geometry into the drive.
+ */
+ if (!ata_disk_setup_parms(ata_ctlp, ata_drvp))
+ rc = FALSE;
+ }
+
+ /* If DoneFlg is TRUE, it means that ghd_complete() function */
+ /* has been already called. In this case ignore any errors and */
+ /* return TRUE to the caller, otherwise return the value of rc */
+ /* to the caller */
+ if (DoneFlg)
+ return (TRUE);
+ else
+ return (rc);
+}
+
+
+/*
+ *
+ * Low level routine to toggle the Software Reset bit
+ *
+ */
+
+static int
+ata_software_reset(
+ ata_ctl_t *ata_ctlp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int time_left;
+
+ ADBG_TRACE(("ata_reset_bus entered\n"));
+
+ /* disable interrupts and turn the software reset bit on */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, (ATDC_D3 | ATDC_SRST));
+
+ /* why 30 milliseconds, the ATA/ATAPI-4 spec says 5 usec. */
+ drv_usecwait(30000);
+
+ /* turn the software reset bit back off */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, ATDC_D3);
+
+ /*
+ * Wait for the controller to assert BUSY status.
+ * I don't think 300 msecs is correct. The ATA/ATAPI-4
+ * spec says 400 nsecs, (and 2 msecs if device
+ * was in sleep mode; but we don't put drives to sleep
+ * so it probably doesn't matter).
+ */
+ drv_usecwait(300000);
+
+ /*
+ * If drive 0 exists the test for completion is simple
+ */
+ time_left = 31 * 1000000;
+ if (CTL2DRV(ata_ctlp, 0, 0)) {
+ goto wait_for_not_busy;
+ }
+
+ ASSERT(CTL2DRV(ata_ctlp, 1, 0) != NULL);
+
+ /*
+ * This must be a single device configuration, with drive 1
+ * only. This complicates the test for completion because
+ * issuing the software reset just caused drive 1 to
+ * deselect. With drive 1 deselected, if I just read the
+ * status register to test the BSY bit I get garbage, but
+ * I can't re-select drive 1 until I'm certain the BSY bit
+ * is de-asserted. Catch-22.
+ *
+ * In ATA/ATAPI-4, rev 15, section 9.16.2, it says to handle
+ * this situation like this:
+ */
+
+ /* give up if the drive doesn't settle within 31 seconds */
+ while (time_left > 0) {
+ /*
+ * delay 10msec each time around the loop
+ */
+ drv_usecwait(10000);
+ time_left -= 10000;
+
+ /*
+ * try to select drive 1
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ATDH_DRIVE1);
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, 0x55);
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, 0xaa);
+ if (ddi_get8(io_hdl1, ata_ctlp->ac_sect) != 0xaa)
+ continue;
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, 0x55);
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, 0xaa);
+ if (ddi_get8(io_hdl1, ata_ctlp->ac_count) != 0xaa)
+ continue;
+
+ goto wait_for_not_busy;
+ }
+ return (FALSE);
+
+wait_for_not_busy:
+
+ /*
+ * Now wait upto 31 seconds for BUSY to clear.
+ */
+ (void) ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2, 0, ATS_BSY,
+ ATS_ERR, ATS_BSY, ATS_DF, ATS_BSY, time_left);
+
+ return (TRUE);
+}
+
+/*
+ *
+ * DDI interrupt handler
+ *
+ */
+
+static uint_t
+ata_intr(
+ caddr_t arg)
+{
+ ata_ctl_t *ata_ctlp;
+ int one_shot = 1;
+
+ ata_ctlp = (ata_ctl_t *)arg;
+
+ return (ghd_intr(&ata_ctlp->ac_ccc, (void *)&one_shot));
+}
+
+
+/*
+ *
+ * GHD ccc_get_status callback
+ *
+ */
+
+static int
+ata_get_status(
+ void *hba_handle,
+ void *intr_status)
+{
+ ata_ctl_t *ata_ctlp = (ata_ctl_t *)hba_handle;
+ uchar_t status;
+
+ ADBG_TRACE(("ata_get_status entered\n"));
+
+ /*
+ * ignore interrupts before ata_attach completes
+ */
+ if (!(ata_ctlp->ac_flags & AC_ATTACHED))
+ return (FALSE);
+
+ /*
+ * can't be interrupt pending if nothing active
+ */
+ switch (ata_ctlp->ac_state) {
+ case AS_IDLE:
+ return (FALSE);
+ case AS_ACTIVE0:
+ case AS_ACTIVE1:
+ ASSERT(ata_ctlp->ac_active_drvp != NULL);
+ ASSERT(ata_ctlp->ac_active_pktp != NULL);
+ break;
+ }
+
+ /*
+ * If this is a PCI-IDE controller, check the PCI-IDE controller's
+ * interrupt status latch. But don't clear it yet.
+ *
+ * AC_BMSTATREG_PIO_BROKEN flag is used currently for
+ * CMD chips with device id 0x646. Since the interrupt bit on
+ * Bus master IDE register is not usable when in PIO mode,
+ * this chip is treated as a legacy device for interrupt
+ * indication. The following code for CMD
+ * chips may need to be revisited when we enable support for dma.
+ *
+ * CHANGE: DMA is not disabled for these devices. BM intr bit is
+ * checked only if there was DMA used or BM intr is useable on PIO,
+ * else treat it as before - as legacy device.
+ */
+
+ if ((ata_ctlp->ac_pciide) &&
+ ((ata_ctlp->ac_pciide_bm != FALSE) &&
+ ((ata_ctlp->ac_active_pktp->ap_pciide_dma == TRUE) ||
+ !(ata_ctlp->ac_flags & AC_BMSTATREG_PIO_BROKEN)))) {
+
+ if (!ata_pciide_status_pending(ata_ctlp))
+ return (FALSE);
+ } else {
+ /*
+ * Interrupts from legacy ATA/IDE controllers are
+ * edge-triggered but the dumb legacy ATA/IDE controllers
+ * and drives don't have an interrupt status bit.
+ *
+ * Use a one_shot variable to make sure we only return
+ * one status per interrupt.
+ */
+ if (intr_status != NULL) {
+ int *one_shot = (int *)intr_status;
+
+ if (*one_shot == 1)
+ *one_shot = 0;
+ else
+ return (FALSE);
+ }
+ }
+
+ /* check if device is still busy */
+
+ status = ddi_get8(ata_ctlp->ac_iohandle2, ata_ctlp->ac_altstatus);
+ if (status & ATS_BSY)
+ return (FALSE);
+ return (TRUE);
+}
+
+
+/*
+ *
+ * get the current status and clear the IRQ
+ *
+ */
+
+int
+ata_get_status_clear_intr(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ uchar_t status;
+
+ /*
+ * Here's where we clear the PCI-IDE interrupt latch. If this
+ * request used DMA mode then we also have to check and clear
+ * the DMA error latch at the same time.
+ */
+
+ if (ata_pktp->ap_pciide_dma) {
+ if (ata_pciide_status_dmacheck_clear(ata_ctlp))
+ ata_pktp->ap_flags |= AP_ERROR | AP_TRAN_ERROR;
+ } else if ((ata_ctlp->ac_pciide) &&
+ !(ata_ctlp->ac_flags & AC_BMSTATREG_PIO_BROKEN)) {
+ /*
+ * Some requests don't use DMA mode and therefore won't
+ * set the DMA error latch, but we still have to clear
+ * the interrupt latch.
+ * Controllers with broken BM intr in PIO mode do not go
+ * through this path.
+ */
+ (void) ata_pciide_status_clear(ata_ctlp);
+ }
+
+ /*
+ * this clears the drive's interrupt
+ */
+ status = ddi_get8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_status);
+ ADBG_TRACE(("ata_get_status_clear_intr: 0x%x\n", status));
+ return (status);
+}
+
+
+
+/*
+ *
+ * GHD interrupt handler
+ *
+ */
+
+/* ARGSUSED */
+static void
+ata_process_intr(
+ void *hba_handle,
+ void *intr_status)
+{
+ ata_ctl_t *ata_ctlp = (ata_ctl_t *)hba_handle;
+ int watchdog;
+ uchar_t fsm_func;
+ int rc;
+
+ ADBG_TRACE(("ata_process_intr entered\n"));
+
+ /*
+ * process the ATA or ATAPI interrupt
+ */
+
+ fsm_func = ATA_FSM_INTR;
+ for (watchdog = ata_process_intr_watchdog; watchdog > 0; watchdog--) {
+ rc = ata_ctlr_fsm(fsm_func, ata_ctlp, NULL, NULL, NULL);
+
+ switch (rc) {
+ case ATA_FSM_RC_OKAY:
+ return;
+
+ case ATA_FSM_RC_BUSY: /* wait for the next interrupt */
+ return;
+
+ case ATA_FSM_RC_INTR: /* re-invoke the FSM */
+ fsm_func = ATA_FSM_INTR;
+ break;
+
+ case ATA_FSM_RC_FINI: /* move a request to done Q */
+ fsm_func = ATA_FSM_FINI;
+ break;
+ }
+ }
+ ADBG_WARN(("ata_process_intr: watchdog\n"));
+}
+
+
+
+/*
+ *
+ * GHD ccc_hba_start callback
+ *
+ */
+
+static int
+ata_hba_start(
+ void *hba_handle,
+ gcmd_t *gcmdp)
+{
+ ata_ctl_t *ata_ctlp;
+ ata_drv_t *ata_drvp;
+ ata_pkt_t *ata_pktp;
+ uchar_t fsm_func;
+ int request_started;
+ int watchdog;
+
+ ADBG_TRACE(("ata_hba_start entered\n"));
+
+ ata_ctlp = (ata_ctl_t *)hba_handle;
+
+ if (ata_ctlp->ac_active_drvp != NULL) {
+ ADBG_WARN(("ata_hba_start drvp not null\n"));
+ return (FALSE);
+ }
+ if (ata_ctlp->ac_active_pktp != NULL) {
+ ADBG_WARN(("ata_hba_start pktp not null\n"));
+ return (FALSE);
+ }
+
+ ata_pktp = GCMD2APKT(gcmdp);
+ ata_drvp = GCMD2DRV(gcmdp);
+
+ /*
+ * which drive?
+ */
+ if (ata_drvp->ad_targ == 0)
+ fsm_func = ATA_FSM_START0;
+ else
+ fsm_func = ATA_FSM_START1;
+
+ /*
+ * start the request
+ */
+ request_started = FALSE;
+ for (watchdog = ata_hba_start_watchdog; watchdog > 0; watchdog--) {
+ switch (ata_ctlr_fsm(fsm_func, ata_ctlp, ata_drvp, ata_pktp,
+ NULL)) {
+ case ATA_FSM_RC_OKAY:
+ request_started = TRUE;
+ goto fsm_done;
+
+ case ATA_FSM_RC_BUSY:
+ /* if first time, tell GHD to requeue the request */
+ goto fsm_done;
+
+ case ATA_FSM_RC_INTR:
+ /*
+ * The start function polled for the next
+ * bus phase, now fake an interrupt to process
+ * the next action.
+ */
+ request_started = TRUE;
+ fsm_func = ATA_FSM_INTR;
+ ata_drvp = NULL;
+ ata_pktp = NULL;
+ break;
+
+ case ATA_FSM_RC_FINI: /* move request to the done queue */
+ request_started = TRUE;
+ fsm_func = ATA_FSM_FINI;
+ ata_drvp = NULL;
+ ata_pktp = NULL;
+ break;
+ }
+ }
+ ADBG_WARN(("ata_hba_start: watchdog\n"));
+
+fsm_done:
+ return (request_started);
+
+}
+
+static int
+ata_check_pciide_blacklist(
+ dev_info_t *dip,
+ uint_t flags)
+{
+ ushort_t vendorid;
+ ushort_t deviceid;
+ pcibl_t *blp;
+ int *propp;
+ uint_t count;
+ int rc;
+
+
+ vendorid = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "vendor-id", 0);
+ deviceid = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "device-id", 0);
+
+ /*
+ * first check for a match in the "pci-ide-blacklist" property
+ */
+ rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
+ "pci-ide-blacklist", &propp, &count);
+
+ if (rc == DDI_PROP_SUCCESS) {
+ count = (count * sizeof (uint_t)) / sizeof (pcibl_t);
+ blp = (pcibl_t *)propp;
+ while (count--) {
+ /* check for matching ID */
+ if ((vendorid & blp->b_vmask)
+ != (blp->b_vendorid & blp->b_vmask)) {
+ blp++;
+ continue;
+ }
+ if ((deviceid & blp->b_dmask)
+ != (blp->b_deviceid & blp->b_dmask)) {
+ blp++;
+ continue;
+ }
+
+ /* got a match */
+ if (blp->b_flags & flags) {
+ ddi_prop_free(propp);
+ return (TRUE);
+ } else {
+ ddi_prop_free(propp);
+ return (FALSE);
+ }
+ }
+ ddi_prop_free(propp);
+ }
+
+ /*
+ * then check the built-in blacklist
+ */
+ for (blp = ata_pciide_blacklist; blp->b_vendorid; blp++) {
+ if ((vendorid & blp->b_vmask) != blp->b_vendorid)
+ continue;
+ if ((deviceid & blp->b_dmask) != blp->b_deviceid)
+ continue;
+ if (!(blp->b_flags & flags))
+ continue;
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
+int
+ata_check_drive_blacklist(
+ struct ata_id *aidp,
+ uint_t flags)
+{
+ atabl_t *blp;
+
+ for (blp = ata_drive_blacklist; blp->b_model; blp++) {
+ if (!ata_strncmp(blp->b_model, aidp->ai_model,
+ sizeof (aidp->ai_model)))
+ continue;
+ if (blp->b_flags & flags)
+ return (TRUE);
+ return (FALSE);
+ }
+ return (FALSE);
+}
+
+/*
+ * Queue a request to perform some sort of internally
+ * generated command. When this request packet reaches
+ * the front of the queue (*func)() is invoked.
+ *
+ */
+
+int
+ata_queue_cmd(
+ int (*func)(ata_ctl_t *, ata_drv_t *, ata_pkt_t *),
+ void *arg,
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ gtgt_t *gtgtp)
+{
+ ata_pkt_t *ata_pktp;
+ gcmd_t *gcmdp;
+ int rc;
+
+ if (!(gcmdp = ghd_gcmd_alloc(gtgtp, sizeof (*ata_pktp), TRUE))) {
+ ADBG_ERROR(("atapi_id_update alloc failed\n"));
+ return (FALSE);
+ }
+
+
+ /* set the back ptr from the ata_pkt to the gcmd_t */
+ ata_pktp = GCMD2APKT(gcmdp);
+ ata_pktp->ap_gcmdp = gcmdp;
+ ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
+ ata_pktp->ap_bytes_per_block = ata_drvp->ad_bytes_per_block;
+
+ /*
+ * over-ride the default start function
+ */
+ ata_pktp = GCMD2APKT(gcmdp);
+ ata_pktp->ap_start = func;
+ ata_pktp->ap_complete = NULL;
+ ata_pktp->ap_v_addr = (caddr_t)arg;
+
+ /*
+ * add it to the queue, when it gets to the front the
+ * ap_start function is called.
+ */
+ rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
+ 0, TRUE, NULL);
+
+ if (rc != TRAN_ACCEPT) {
+ /* this should never, ever happen */
+ return (FALSE);
+ }
+
+ if (ata_pktp->ap_flags & AP_ERROR)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
+ * Check if this drive has the "revert to defaults" bug
+ * PSARC 2001/500 and 2001/xxx - check for the properties
+ * ata-revert-to-defaults and atarvrt-<diskmodel> before
+ * examining the blacklist.
+ * <diskmodel> is made from the model number reported by Identify Drive
+ * with uppercase letters converted to lowercase and all characters
+ * except letters, digits, ".", "_", and "-" deleted.
+ * Return value:
+ * TRUE: enable revert to defaults
+ * FALSE: disable revert to defaults
+ *
+ * NOTE: revert to power on defaults that includes reverting to MDMA
+ * mode is allowed by ATA-6 & ATA-7 specs.
+ * Therefore drives exhibiting this behaviour are not violating the spec.
+ * Furthermore, the spec explicitly says that after the soft reset
+ * host should check the current setting of the device features.
+ * Correctly working BIOS would therefore reprogram either the drive
+ * and/or the host controller to match transfer modes.
+ * Devices with ATA_BL_NORVRT flag will be removed from
+ * the ata_blacklist.
+ * The default behaviour will be - no revert to power-on defaults
+ * for all devices. The property is retained in case the user
+ * explicitly requests revert-to-defaults before reboot.
+ */
+
+#define ATA_REVERT_PROP_PREFIX "revert-"
+#define ATA_REVERT_PROP_GLOBAL "ata-revert-to-defaults"
+/* room for prefix + model number + terminating NUL character */
+#define PROP_BUF_SIZE (sizeof (ATA_REVERT_PROP_PREFIX) + \
+ sizeof (aidp->ai_model) + 1)
+#define PROP_LEN_MAX (31)
+
+static int
+ata_check_revert_to_defaults(
+ ata_drv_t *ata_drvp)
+{
+ struct ata_id *aidp = &ata_drvp->ad_id;
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ char prop_buf[PROP_BUF_SIZE];
+ int i, j;
+ int propval;
+
+ /* put prefix into the buffer */
+ (void) strcpy(prop_buf, ATA_REVERT_PROP_PREFIX);
+ j = strlen(prop_buf);
+
+ /* append the model number, leaving out invalid characters */
+ for (i = 0; i < sizeof (aidp->ai_model); ++i) {
+ char c = aidp->ai_model[i];
+ if (c >= 'A' && c <= 'Z') /* uppercase -> lower */
+ c = c - 'A' + 'a';
+ if (c >= 'a' && c <= 'z' || c >= '0' && c <= '9' ||
+ c == '.' || c == '_' || c == '-')
+ prop_buf[j++] = c;
+ if (c == '\0')
+ break;
+ }
+
+ /* make sure there's a terminating NUL character */
+ if (j >= PROP_LEN_MAX)
+ j = PROP_LEN_MAX;
+ prop_buf[j] = '\0';
+
+ /* look for a disk-specific "revert" property" */
+ propval = ddi_getprop(DDI_DEV_T_ANY, ata_ctlp->ac_dip,
+ DDI_PROP_DONTPASS, prop_buf, -1);
+ if (propval == 0)
+ return (FALSE);
+ else if (propval != -1)
+ return (TRUE);
+
+ /* look for a global "revert" property" */
+ propval = ddi_getprop(DDI_DEV_T_ANY, ata_ctlp->ac_dip,
+ 0, ATA_REVERT_PROP_GLOBAL, -1);
+ if (propval == 0)
+ return (FALSE);
+ else if (propval != -1)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+void
+ata_show_transfer_mode(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp)
+{
+ int i;
+
+ if (ata_ctlp->ac_pciide_bm == FALSE ||
+ ata_drvp->ad_pciide_dma != ATA_DMA_ON) {
+ if (ata_cntrl_DMA_sel_msg) {
+ ATAPRT((
+ "?\tATA DMA off: %s\n", ata_cntrl_DMA_sel_msg));
+ } else if (ata_dev_DMA_sel_msg) {
+ ATAPRT(("?\tATA DMA off: %s\n", ata_dev_DMA_sel_msg));
+ }
+ ATAPRT(("?\tPIO mode %d selected\n",
+ (ata_drvp->ad_id.ai_advpiomode & ATAC_ADVPIO_4_SUP) ==
+ ATAC_ADVPIO_4_SUP ? 4 : 3));
+ } else {
+ /* Using DMA */
+ if (ata_drvp->ad_id.ai_dworddma & ATAC_MDMA_SEL_MASK) {
+ /*
+ * Rely on the fact that either dwdma or udma is
+ * selected, not both.
+ */
+ ATAPRT(("?\tMultiwordDMA mode %d selected\n",
+ (ata_drvp->ad_id.ai_dworddma & ATAC_MDMA_2_SEL) ==
+ ATAC_MDMA_2_SEL ? 2 :
+ (ata_drvp->ad_id.ai_dworddma & ATAC_MDMA_1_SEL) ==
+ ATAC_MDMA_1_SEL ? 1 : 0));
+ } else {
+ for (i = 0; i <= 6; i++) {
+ if (ata_drvp->ad_id.ai_ultradma &
+ (1 << (i + 8))) {
+ ATAPRT((
+ "?\tUltraDMA mode %d selected\n",
+ i));
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Controller-specific operation pointers.
+ * Should be extended as needed - init only for now
+ */
+struct ata_ctl_spec_ops {
+ uint_t (*cs_init)(dev_info_t *, ushort_t, ushort_t); /* ctlr init */
+};
+
+
+struct ata_ctl_spec {
+ ushort_t cs_vendor_id;
+ ushort_t cs_device_id;
+ struct ata_ctl_spec_ops *cs_ops;
+};
+
+/* Sil3XXX-specific functions (init only for now) */
+struct ata_ctl_spec_ops sil3xxx_ops = {
+ &sil3xxx_init_controller /* Sil3XXX cntrl initialization */
+};
+
+
+struct ata_ctl_spec ata_cntrls_spec[] = {
+ {0x1095, 0x3114, &sil3xxx_ops},
+ {0x1095, 0x3512, &sil3xxx_ops},
+ {0x1095, 0x3112, &sil3xxx_ops},
+ {0, 0, NULL} /* List must end with cs_ops set to NULL */
+};
+
+/*
+ * Do controller specific initialization if necessary.
+ * Pick-up controller specific functions.
+ */
+
+int
+ata_spec_init_controller(dev_info_t *dip)
+{
+ ushort_t vendor_id;
+ ushort_t device_id;
+ struct ata_ctl_spec *ctlsp;
+
+ vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "vendor-id", 0);
+ device_id = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "device-id", 0);
+
+ /* Locate controller specific ops, if they exist */
+ ctlsp = ata_cntrls_spec;
+ while (ctlsp->cs_ops != NULL) {
+ if (ctlsp->cs_vendor_id == vendor_id &&
+ ctlsp->cs_device_id == device_id)
+ break;
+ ctlsp++;
+ }
+
+ if (ctlsp->cs_ops != NULL) {
+ if (ctlsp->cs_ops->cs_init != NULL) {
+ /* Initialize controller */
+ if ((*(ctlsp->cs_ops->cs_init))
+ (dip, vendor_id, device_id) != TRUE) {
+ cmn_err(CE_WARN,
+ "pci%4x,%4x cntrl specific "
+ "initialization failed",
+ vendor_id, device_id);
+ return (FALSE);
+ }
+ }
+ }
+ return (TRUE);
+}
+
+/*
+ * this routine works like ddi_prop_get_int, except that it works on
+ * a string property that contains ascii representations
+ * of an integer.
+ * If the property is not found, the default value is returned.
+ */
+static int
+ata_prop_lookup_int(dev_t match_dev, dev_info_t *dip,
+ uint_t flags, char *name, int defvalue)
+{
+
+ char *bufp, *cp;
+ int rc = defvalue;
+ int proprc;
+
+ proprc = ddi_prop_lookup_string(match_dev, dip,
+ flags, name, &bufp);
+
+ if (proprc == DDI_PROP_SUCCESS) {
+ cp = bufp;
+ rc = stoi(&cp);
+ ddi_prop_free(bufp);
+ } else {
+ /*
+ * see if property is encoded as an int instead of string.
+ */
+ rc = ddi_prop_get_int(match_dev, dip, flags, name, defvalue);
+ }
+
+ return (rc);
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_common.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_common.h
new file mode 100644
index 0000000000..aca985bae9
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_common.h
@@ -0,0 +1,697 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_COMMON_H
+#define _ATA_COMMON_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/varargs.h>
+
+#include <sys/scsi/scsi.h>
+#include <sys/dktp/dadkio.h>
+#include <sys/dktp/dadev.h>
+#include <sys/dkio.h>
+#include <sys/dktp/tgdk.h>
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include "ghd.h"
+
+#include "pciide.h"
+#include "ata_cmd.h"
+#include "ata_fsm.h"
+#include "ata_debug.h"
+
+
+/*
+ * device types
+ */
+#define ATA_DEV_NONE 0
+#define ATA_DEV_DISK 1
+#define ATA_DEV_ATAPI 2
+
+/*
+ * Largest sector allowed in 28 bit mode
+ */
+#define MAX_28BIT_CAPACITY 0xfffffff
+
+
+
+/*
+ * ata-options property configuration bits
+ */
+
+#define ATA_OPTIONS_DMA 0x01
+
+
+
+/* ad_flags (per-drive) */
+
+#define AD_ATAPI 0x01 /* is an ATAPI drive */
+#define AD_DISK 0x02
+#define AD_MUTEX_INIT 0x04
+#define AD_NO_CDB_INTR 0x20
+#define AD_1SECTOR 0x40
+#define AD_INT13LBA 0x80 /* supports LBA at Int13 interface */
+#define AD_NORVRT 0x100 /* block revert-to-defaults */
+#define AD_EXT48 0x200 /* 48 bit (extended) LBA */
+#define ATAPIDRV(X) ((X)->ad_flags & AD_ATAPI)
+
+
+/* max targets and luns */
+
+#define ATA_MAXTARG 2
+#define ATA_MAXLUN 16
+
+/*
+ * PCI-IDE Bus Mastering Scatter/Gather list size
+ */
+#define ATA_DMA_NSEGS 17 /* enough for at least 64K */
+
+/*
+ * Controller port address defaults
+ */
+#define ATA_BASE0 0x1f0
+#define ATA_BASE1 0x170
+
+/*
+ * port offsets from base address ioaddr1
+ */
+#define AT_DATA 0x00 /* data register */
+#define AT_ERROR 0x01 /* error register (read) */
+#define AT_FEATURE 0x01 /* features (write) */
+#define AT_COUNT 0x02 /* sector count */
+#define AT_SECT 0x03 /* sector number */
+#define AT_LCYL 0x04 /* cylinder low byte */
+#define AT_HCYL 0x05 /* cylinder high byte */
+#define AT_DRVHD 0x06 /* drive/head register */
+#define AT_STATUS 0x07 /* status/command register */
+#define AT_CMD 0x07 /* status/command register */
+
+/*
+ * port offsets from base address ioaddr2
+ */
+#define AT_ALTSTATUS 0x00 /* alternate status (read) */
+#define AT_DEVCTL 0x00 /* device control (write) */
+
+/* Device control register */
+#define ATDC_NIEN 0x02 /* disable interrupts */
+#define ATDC_SRST 0x04 /* controller reset */
+#define ATDC_D3 0x08 /* Mysterious bit, must be set */
+/*
+ * ATA-6 spec
+ * In 48-bit addressing, reading the LBA location and count
+ * registers when the high-order bit is set reads the "previous
+ * content" (LBA bits 47:24, count bits 15:8) instead of the
+ * "most recent" values (LBA bits 23:0, count bits 7:0).
+ */
+#define ATDC_HOB 0x80 /* High order bit */
+
+/*
+ * Status bits from AT_STATUS register
+ */
+#define ATS_BSY 0x80 /* controller busy */
+#define ATS_DRDY 0x40 /* drive ready */
+#define ATS_DF 0x20 /* device fault */
+#define ATS_DSC 0x10 /* seek operation complete */
+#define ATS_DRQ 0x08 /* data request */
+#define ATS_CORR 0x04 /* ECC correction applied */
+#define ATS_IDX 0x02 /* disk revolution index */
+#define ATS_ERR 0x01 /* error flag */
+
+/*
+ * Status bits from AT_ERROR register
+ */
+#define ATE_BBK_ICRC 0x80 /* bad block detected in ATA-1 */
+ /* ICRC error in ATA-4 and newer */
+#define ATE_UNC 0x40 /* uncorrectable data error */
+#define ATE_MC 0x20 /* Media change */
+#define ATE_IDNF 0x10 /* ID not found */
+#define ATE_MCR 0x08 /* media change request */
+#define ATE_ABORT 0x04 /* aborted command */
+#define ATE_TKONF 0x02 /* track 0 not found */
+#define ATE_AMNF 0x01 /* address mark not found */
+
+#define ATE_NM 0x02 /* no media */
+
+/*
+ * Drive selectors for AT_DRVHD register
+ */
+#define ATDH_LBA 0x40 /* addressing in LBA mode not chs */
+#define ATDH_DRIVE0 0xa0 /* or into AT_DRVHD to select drive 0 */
+#define ATDH_DRIVE1 0xb0 /* or into AT_DRVHD to select drive 1 */
+
+/*
+ * Feature register bits
+ */
+#define ATF_ATAPI_DMA 0x01 /* ATAPI DMA enable bit */
+
+/*
+ * common bits and options for set features (ATC_SET_FEAT)
+ */
+#define FC_WRITE_CACHE_ON 0x02
+#define FC_WRITE_CACHE_OFF 0x82
+
+/* Test which version of ATA is supported */
+#define IS_ATA_VERSION_SUPPORTED(idp, n) \
+ ((idp->ai_majorversion != 0xffff) && \
+ (idp->ai_majorversion & (1<<n)))
+
+/* Test if supported version >= ATA-n */
+#define IS_ATA_VERSION_GE(idp, n) \
+ ((idp->ai_majorversion != 0xffff) && \
+ (idp->ai_majorversion != 0) && \
+ (idp->ai_majorversion >= (1<<n)))
+
+/* Test whether a device is a CD drive */
+#define IS_CDROM(dp) \
+ ((dp->ad_flags & AD_ATAPI) && \
+ ((dp->ad_id.ai_config >> 8) & DTYPE_MASK) == \
+ DTYPE_RODIRECT)
+
+/* macros from old common hba code */
+
+#define ATA_INTPROP(devi, pname, pval, plen) \
+ (ddi_prop_op(DDI_DEV_T_ANY, (devi), PROP_LEN_AND_VAL_BUF, \
+ DDI_PROP_DONTPASS, (pname), (caddr_t)(pval), (plen)))
+
+#define ATA_LONGPROP(devi, pname, pval, plen) \
+ (ddi_getlongprop(DDI_DEV_T_ANY, (devi), DDI_PROP_DONTPASS, \
+ (pname), (caddr_t)(pval), (plen)))
+
+/*
+ *
+ * per-controller soft-state data structure
+ *
+ */
+
+#define CTL2DRV(cp, t, l) (cp->ac_drvp[t][l])
+
+typedef struct ata_ctl {
+
+ dev_info_t *ac_dip;
+ uint_t ac_flags;
+ uint_t ac_timing_flags;
+ struct ata_drv *ac_drvp[ATA_MAXTARG][ATA_MAXLUN];
+ int ac_max_transfer; /* max transfer in sectors */
+ uint_t ac_standby_time; /* timer value seconds */
+
+ ccc_t ac_ccc; /* for GHD module */
+ struct ata_drv *ac_active_drvp; /* active drive, if any */
+ struct ata_pkt *ac_active_pktp; /* active packet, if any */
+ uchar_t ac_state;
+
+ scsi_hba_tran_t *ac_atapi_tran; /* for atapi module */
+
+ /*
+ * port addresses associated with ioaddr1
+ */
+ ddi_acc_handle_t ac_iohandle1; /* DDI I/O handle */
+ caddr_t ac_ioaddr1;
+ ushort_t *ac_data; /* data register */
+ uchar_t *ac_error; /* error register (read) */
+ uchar_t *ac_feature; /* features (write) */
+ uchar_t *ac_count; /* sector count */
+ uchar_t *ac_sect; /* sector number */
+ uchar_t *ac_lcyl; /* cylinder low byte */
+ uchar_t *ac_hcyl; /* cylinder high byte */
+ uchar_t *ac_drvhd; /* drive/head register */
+ uchar_t *ac_status; /* status/command register */
+ uchar_t *ac_cmd; /* status/command register */
+
+ /*
+ * port addresses associated with ioaddr2
+ */
+ ddi_acc_handle_t ac_iohandle2; /* DDI I/O handle */
+ caddr_t ac_ioaddr2;
+ uchar_t *ac_altstatus; /* alternate status (read) */
+ uchar_t *ac_devctl; /* device control (write) */
+
+ /*
+ * handle and port addresss for PCI-IDE Bus Master controller
+ */
+ ddi_acc_handle_t ac_bmhandle; /* DDI I/O handle */
+ caddr_t ac_bmaddr; /* base addr of Bus Master Regs */
+ uchar_t ac_pciide; /* PCI-IDE device */
+ uchar_t ac_pciide_bm; /* Bus Mastering PCI-IDE device */
+
+ /*
+ * Scatter/Gather list for PCI-IDE Bus Mastering controllers
+ */
+ caddr_t ac_sg_list; /* virtual addr of S/G list */
+ paddr_t ac_sg_paddr; /* phys addr of S/G list */
+ ddi_acc_handle_t ac_sg_acc_handle;
+ ddi_dma_handle_t ac_sg_handle;
+
+ /*
+ * data for managing ARQ on ATAPI devices
+ */
+ struct ata_pkt *ac_arq_pktp; /* pkt for performing ATAPI ARQ */
+ struct ata_pkt *ac_fault_pktp; /* pkt that caused ARQ */
+ uchar_t ac_arq_cdb[6];
+} ata_ctl_t;
+
+/* ac_flags (per-controller) */
+
+#define AC_GHD_INIT 0x02
+#define AC_ATAPI_INIT 0x04
+#define AC_DISK_INIT 0x08
+#define AC_ATTACHED 0x10
+#define AC_SCSI_HBA_TRAN_ALLOC 0x1000
+#define AC_SCSI_HBA_ATTACH 0x2000
+
+#define AC_BMSTATREG_PIO_BROKEN 0x80000000
+
+/*
+ * Bug 1256489:
+ *
+ * If AC_BSY_WAIT needs to be set for laptops that do
+ * suspend/resume but do not correctly wait for the busy bit to
+ * drop after a resume.
+ */
+
+/* ac_timing_flags (per-controller) */
+#define AC_BSY_WAIT 0x1 /* tweak timing in ata_start & atapi_start */
+
+
+
+/* Identify drive data */
+struct ata_id {
+/* WORD */
+/* OFFSET COMMENT */
+ ushort_t ai_config; /* 0 general configuration bits */
+ ushort_t ai_fixcyls; /* 1 # of fixed cylinders */
+ ushort_t ai_resv0; /* 2 # reserved */
+ ushort_t ai_heads; /* 3 # of heads */
+ ushort_t ai_trksiz; /* 4 # of unformatted bytes/track */
+ ushort_t ai_secsiz; /* 5 # of unformatted bytes/sector */
+ ushort_t ai_sectors; /* 6 # of sectors/track */
+ ushort_t ai_resv1[3]; /* 7 "Vendor Unique" */
+ char ai_drvser[20]; /* 10 Serial number */
+ ushort_t ai_buftype; /* 20 Buffer type */
+ ushort_t ai_bufsz; /* 21 Buffer size in 512 byte incr */
+ ushort_t ai_ecc; /* 22 # of ecc bytes avail on rd/wr */
+ char ai_fw[8]; /* 23 Firmware revision */
+ char ai_model[40]; /* 27 Model # */
+ ushort_t ai_mult1; /* 47 Multiple command flags */
+ ushort_t ai_dwcap; /* 48 Doubleword capabilities */
+ ushort_t ai_cap; /* 49 Capabilities */
+ ushort_t ai_resv2; /* 50 Reserved */
+ ushort_t ai_piomode; /* 51 PIO timing mode */
+ ushort_t ai_dmamode; /* 52 DMA timing mode */
+ ushort_t ai_validinfo; /* 53 bit0: wds 54-58, bit1: 64-70 */
+ ushort_t ai_curcyls; /* 54 # of current cylinders */
+ ushort_t ai_curheads; /* 55 # of current heads */
+ ushort_t ai_cursectrk; /* 56 # of current sectors/track */
+ ushort_t ai_cursccp[2]; /* 57 current sectors capacity */
+ ushort_t ai_mult2; /* 59 multiple sectors info */
+ ushort_t ai_addrsec[2]; /* 60 LBA only: no of addr secs */
+ ushort_t ai_sworddma; /* 62 single word dma modes */
+ ushort_t ai_dworddma; /* 63 double word dma modes */
+ ushort_t ai_advpiomode; /* 64 advanced PIO modes supported */
+ ushort_t ai_minmwdma; /* 65 min multi-word dma cycle info */
+ ushort_t ai_recmwdma; /* 66 rec multi-word dma cycle info */
+ ushort_t ai_minpio; /* 67 min PIO cycle info */
+ ushort_t ai_minpioflow; /* 68 min PIO cycle info w/flow ctl */
+ ushort_t ai_resv3[2]; /* 69,70 reserved */
+ ushort_t ai_resv4[4]; /* 71-74 reserved */
+ ushort_t ai_qdepth; /* 75 queue depth */
+ ushort_t ai_resv5[4]; /* 76-79 reserved */
+ ushort_t ai_majorversion; /* 80 major versions supported */
+ ushort_t ai_minorversion; /* 81 minor version number supported */
+ ushort_t ai_cmdset82; /* 82 command set supported */
+ ushort_t ai_cmdset83; /* 83 more command sets supported */
+ ushort_t ai_cmdset84; /* 84 more command sets supported */
+ ushort_t ai_features85; /* 85 enabled features */
+ ushort_t ai_features86; /* 86 enabled features */
+ ushort_t ai_features87; /* 87 enabled features */
+ ushort_t ai_ultradma; /* 88 Ultra DMA mode */
+ ushort_t ai_erasetime; /* 89 security erase time */
+ ushort_t ai_erasetimex; /* 90 enhanced security erase time */
+ ushort_t ai_padding1[9]; /* pad through 99 */
+ ushort_t ai_addrsecxt[4]; /* 100 extended max LBA sector */
+ ushort_t ai_padding2[22]; /* pad to 126 */
+ ushort_t ai_lastlun; /* 126 last LUN, as per SFF-8070i */
+ ushort_t ai_resv6; /* 127 reserved */
+ ushort_t ai_securestatus; /* 128 security status */
+ ushort_t ai_vendor[31]; /* 129-159 vendor specific */
+ ushort_t ai_padding3[16]; /* 160 pad to 176 */
+ ushort_t ai_curmedser[30]; /* 176-205 current media serial number */
+ ushort_t ai_padding4[49]; /* 206 pad to 255 */
+ ushort_t ai_integrity; /* 255 integrity word */
+};
+
+/* Identify Drive: general config bits - word 0 */
+
+#define ATA_ID_REM_DRV 0x80
+#define ATA_ID_COMPACT_FLASH 0x848a
+#define ATA_ID_CF_TO_ATA 0x040a
+
+/* Identify Drive: common capability bits - word 49 */
+
+#define ATAC_DMA_SUPPORT 0x0100
+#define ATAC_LBA_SUPPORT 0x0200
+#define ATAC_IORDY_DISABLE 0x0400
+#define ATAC_IORDY_SUPPORT 0x0800
+#define ATAC_RESERVED_IDPKT 0x1000 /* rsrvd for identify pkt dev */
+#define ATAC_STANDBYTIMER 0x2000
+#define ATAC_ATA_TYPE_MASK 0x8001
+#define ATAC_ATA_TYPE 0x0000
+#define ATAC_ATAPI_TYPE_MASK 0xc000
+#define ATAC_ATAPI_TYPE 0x8000
+
+/* Identify Driver ai_validinfo (word 53) */
+
+#define ATAC_VALIDINFO_83 0x0004 /* word 83 supported fields valid */
+#define ATAC_VALIDINFO_70_64 0x0002 /* word 70:64 sup. fields valid */
+
+/* Identify Drive: ai_dworddma (word 63) */
+
+#define ATAC_MDMA_SEL_MASK 0x0700 /* Multiword DMA selected */
+#define ATAC_MDMA_2_SEL 0x0400 /* Multiword DMA mode 2 selected */
+#define ATAC_MDMA_1_SEL 0x0200 /* Multiword DMA mode 1 selected */
+#define ATAC_MDMA_0_SEL 0x0100 /* Multiword DMA mode 0 selected */
+#define ATAC_MDMA_2_SUP 0x0004 /* Multiword DMA mode 2 supported */
+#define ATAC_MDMA_1_SUP 0x0002 /* Multiword DMA mode 1 supported */
+#define ATAC_MDMA_0_SUP 0x0001 /* Multiword DMA mode 0 supported */
+
+/* Identify Drive: ai_advpiomode (word 64) */
+
+#define ATAC_ADVPIO_4_SUP 0x0002 /* PIO mode 4 supported */
+#define ATAC_ADVPIO_3_SUP 0x0001 /* PIO mode 3 supported */
+#define ATAC_ADVPIO_SERIAL 0x0003 /* Serial interface */
+
+/* Identify Drive: ai_majorversion (word 80) */
+
+#define ATAC_MAJVER_6 0x0040 /* ATA/ATAPI-6 version supported */
+#define ATAC_MAJVER_4 0x0010 /* ATA/ATAPI-4 version supported */
+
+/* Identify Drive: command set supported/enabled bits - words 83 and 86 */
+
+#define ATACS_EXT48 0x0400 /* 48 bit address feature */
+
+/* Identify Drive: ai_features85 (word 85) */
+#define ATAC_FEATURES85_WCE 0x0020 /* write cache enabled */
+
+/* per-drive data struct */
+
+typedef struct ata_drv {
+ ata_ctl_t *ad_ctlp; /* pointer back to ctlr */
+ struct ata_id ad_id; /* IDENTIFY DRIVE data */
+
+ uint_t ad_flags;
+ uchar_t ad_pciide_dma; /* PCIIDE DMA supported */
+ uchar_t ad_targ; /* target */
+ uchar_t ad_lun; /* lun */
+ uchar_t ad_drive_bits;
+
+ /* Used by atapi side only */
+
+ uchar_t ad_state; /* state of ATAPI FSM */
+ uchar_t ad_cdb_len; /* Size of ATAPI CDBs */
+
+ uchar_t ad_bogus_drq;
+ uchar_t ad_nec_bad_status;
+
+ /* Used by disk side only */
+
+ struct scsi_device ad_device;
+ struct scsi_inquiry ad_inquiry;
+ struct ctl_obj ad_ctl_obj;
+ uchar_t ad_rd_cmd;
+ uchar_t ad_wr_cmd;
+ ushort_t ad_acyl;
+
+ /*
+ * Geometry note: The following three values are the geometry
+ * that the driver will use. They may differ from the
+ * geometry reported by the controller and/or BIOS. See note
+ * on ata_fix_large_disk_geometry in ata_disk.c for more
+ * details.
+ */
+ uint32_t ad_drvrcyl; /* number of cyls */
+ uint32_t ad_drvrhd; /* number of heads */
+ uint32_t ad_drvrsec; /* number of sectors */
+ ushort_t ad_phhd; /* number of phys heads */
+ ushort_t ad_phsec; /* number of phys sectors */
+ short ad_block_factor;
+ short ad_bytes_per_block;
+
+ /*
+ * Support for 48-bit LBA (ATA-6)
+ */
+ uint64_t ad_capacity; /* Total sectors on disk */
+} ata_drv_t;
+
+typedef struct ata_tgt {
+ ata_drv_t *at_drvp;
+ int at_arq;
+ ulong_t at_total_sectors;
+ ddi_dma_attr_t at_dma_attr;
+} ata_tgt_t;
+
+/* values for ad_pciide_dma */
+#define ATA_DMA_OFF 0x0
+#define ATA_DMA_ON 0x1
+
+/*
+ * (ata_pkt_t *) to (gcmd_t *)
+ */
+#define APKT2GCMD(apktp) (apktp->ap_gcmdp)
+
+/*
+ * (gcmd_t *) to (ata_pkt_t *)
+ */
+#define GCMD2APKT(gcmdp) ((ata_pkt_t *)gcmdp->cmd_private)
+
+/*
+ * (gtgt_t *) to (ata_ctl_t *)
+ */
+#define GTGTP2ATAP(gtgtp) ((ata_ctl_t *)GTGTP2HBA(gtgtp))
+
+/*
+ * (gtgt_t *) to (ata_tgt_t *)
+ */
+#define GTGTP2ATATGTP(gtgtp) ((ata_tgt_t *)GTGTP2TARGET(gtgtp))
+
+/*
+ * (gtgt_t *) to (ata_drv_t *)
+ */
+#define GTGTP2ATADRVP(gtgtp) (GTGTP2ATATGTP(gtgtp)->at_drvp)
+
+/*
+ * (gcmd_t *) to (ata_tgt_t *)
+ */
+#define GCMD2TGT(gcmdp) GTGTP2ATATGTP(GCMDP2GTGTP(gcmdp))
+
+/*
+ * (gcmd_t *) to (ata_drv_t *)
+ */
+#define GCMD2DRV(gcmdp) GTGTP2ATADRVP(GCMDP2GTGTP(gcmdp))
+
+/*
+ * (ata_pkt_t *) to (ata_drv_t *)
+ */
+#define APKT2DRV(apktp) GCMD2DRV(APKT2GCMD(apktp))
+
+
+/*
+ * (struct hba_tran *) to (ata_ctl_t *)
+ */
+#define TRAN2ATAP(tranp) ((ata_ctl_t *)TRAN2HBA(tranp))
+
+
+/*
+ * ata common packet structure
+ */
+typedef struct ata_pkt {
+
+ gcmd_t *ap_gcmdp; /* GHD command struct */
+
+ uint_t ap_flags; /* packet flags */
+
+ caddr_t ap_baddr; /* I/O buffer base address */
+ size_t ap_boffset; /* current offset into I/O buffer */
+ size_t ap_bcount; /* # bytes in this request */
+
+ caddr_t ap_v_addr; /* I/O buffer address */
+ size_t ap_resid; /* # bytes left to read/write */
+
+ uchar_t ap_pciide_dma; /* This pkt uses DMA transfer mode */
+ prde_t ap_sg_list[ATA_DMA_NSEGS]; /* Scatter/Gather list */
+ int ap_sg_cnt; /* number of entries in S/G list */
+
+ /* command, starting sector number, sector count */
+
+ daddr_t ap_startsec; /* starting sector number */
+ ushort_t ap_count; /* sector count */
+ uchar_t ap_sec;
+ uchar_t ap_lwcyl;
+ uchar_t ap_hicyl;
+ uchar_t ap_hd;
+ uchar_t ap_cmd;
+
+ /* saved status and error registers for error case */
+
+ uchar_t ap_status;
+ uchar_t ap_error;
+
+ /* disk/atapi callback routines */
+
+ int (*ap_start)(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ struct ata_pkt *ata_pktp);
+ int (*ap_intr)(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ struct ata_pkt *ata_pktp);
+ void (*ap_complete)(ata_drv_t *ata_drvp,
+ struct ata_pkt *ata_pktp, int do_callback);
+
+ /* Used by disk side */
+
+ char ap_cdb; /* disk command */
+ char ap_scb; /* status after disk cmd */
+ uint_t ap_bytes_per_block; /* blk mode factor */
+ uint_t ap_wrt_count; /* size of last write */
+ caddr_t ap_v_addr_sav; /* Original I/O buffer address. */
+ size_t ap_resid_sav; /* Original # of bytes */
+ /* left to read/write. */
+
+ /* Used by atapi side */
+
+ uchar_t *ap_cdbp; /* ptr to SCSI CDB */
+ uchar_t ap_cdb_len; /* length of SCSI CDB (in bytes) */
+ uchar_t ap_cdb_pad; /* padding after SCSI CDB (in shorts) */
+
+ struct scsi_arq_status *ap_scbp; /* ptr to SCSI status block */
+ uchar_t ap_statuslen; /* length of SCSI status block */
+} ata_pkt_t;
+
+
+/*
+ * defines for ap_flags
+ */
+#define AP_ATAPI 0x0001 /* device is atapi */
+#define AP_ERROR 0x0002 /* normal error */
+#define AP_TRAN_ERROR 0x0004 /* transport error */
+#define AP_READ 0x0008 /* read data */
+#define AP_WRITE 0x0010 /* write data */
+#define AP_ABORT 0x0020 /* packet aborted */
+#define AP_TIMEOUT 0x0040 /* packet timed out */
+#define AP_BUS_RESET 0x0080 /* bus reset */
+#define AP_DEV_RESET 0x0100 /* device reset */
+
+#define AP_SENT_CMD 0x0200 /* atapi: cdb sent */
+#define AP_XFERRED_DATA 0x0400 /* atapi: data transferred */
+#define AP_GOT_STATUS 0x0800 /* atapi: status received */
+#define AP_ARQ_ON_ERROR 0x1000 /* atapi: do ARQ on error */
+#define AP_ARQ_OKAY 0x2000
+#define AP_ARQ_ERROR 0x4000
+
+#define AP_FREE 0x80000000u /* packet is free! */
+
+
+/*
+ * public function prototypes
+ */
+
+int ata_check_drive_blacklist(struct ata_id *aidp, uint_t flags);
+int ata_command(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp, int expect_drdy,
+ int silent, uint_t busy_wait, uchar_t cmd, uchar_t feature,
+ uchar_t count, uchar_t sector, uchar_t head, uchar_t cyl_low,
+ uchar_t cyl_hi);
+int ata_get_status_clear_intr(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp);
+int ata_id_common(uchar_t id_cmd, int drdy_expected,
+ ddi_acc_handle_t io_hdl1, caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2, caddr_t ioaddr2,
+ struct ata_id *ata_idp);
+int ata_prop_create(dev_info_t *tgt_dip, ata_drv_t *ata_drvp, char *name);
+int ata_queue_cmd(int (*func)(ata_ctl_t *, ata_drv_t *, ata_pkt_t *),
+ void *arg, ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ gtgt_t *gtgtp);
+int ata_set_feature(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ uchar_t feature, uchar_t value);
+int ata_wait(ddi_acc_handle_t io_hdl, caddr_t ioaddr, uchar_t onbits,
+ uchar_t offbits, uint_t timeout_usec);
+int ata_wait3(ddi_acc_handle_t io_hdl, caddr_t ioaddr, uchar_t onbits1,
+ uchar_t offbits1, uchar_t failure_onbits2,
+ uchar_t failure_offbits2, uchar_t failure_onbits3,
+ uchar_t failure_offbits3, uint_t timeout_usec);
+int ata_test_lba_support(struct ata_id *aidp);
+
+/*
+ * It's not clear to which of the two following delay mechanisms is
+ * better.
+ *
+ * We really need something better than drv_usecwait(). The
+ * granularity for drv_usecwait() currently is 10 usec. This means that
+ * the ATA_DELAY_400NSEC macro delays 25 timers longer than necessary.
+ *
+ * Doing 4 inb()'s from the alternate status register is guaranteed
+ * to take at least 400 nsecs (it may take as long as 4 usecs.
+ * The problem with inb() is that on an x86 platform it also causes
+ * a CPU synchronization, CPU write buffer flush, cache flush, and
+ * flushes posted writes in any PCI bridge devices between the CPU
+ * and the ATA controller.
+ */
+#if 1
+#define ATA_DELAY_400NSEC(H, A) \
+ ((void) ddi_get8((H), (uint8_t *)(A) + AT_ALTSTATUS), \
+ (void) ddi_get8((H), (uint8_t *)(A) + AT_ALTSTATUS), \
+ (void) ddi_get8((H), (uint8_t *)(A) + AT_ALTSTATUS), \
+ (void) ddi_get8((H), (uint8_t *)(A) + AT_ALTSTATUS))
+#else
+#define ATA_DELAY_400NSEC(H, A) ((void) drv_usecwait(1))
+#endif
+
+
+/*
+ * PCIIDE DMA (Bus Mastering) functions and data in ata_dma.c
+ */
+extern ddi_dma_attr_t ata_pciide_dma_attr;
+extern int ata_dma_disabled;
+
+int ata_pciide_alloc(dev_info_t *dip, ata_ctl_t *ata_ctlp);
+void ata_pciide_free(ata_ctl_t *ata_ctlp);
+
+void ata_pciide_dma_sg_func(gcmd_t *gcmdp, ddi_dma_cookie_t *dmackp,
+ int single_segment, int seg_index);
+void ata_pciide_dma_setup(ata_ctl_t *ata_ctlp, prde_t *srcp, int sg_cnt);
+void ata_pciide_dma_start(ata_ctl_t *ata_ctlp, uchar_t direction);
+void ata_pciide_dma_stop(ata_ctl_t *ata_ctlp);
+int ata_pciide_status_clear(ata_ctl_t *ata_ctlp);
+int ata_pciide_status_dmacheck_clear(ata_ctl_t *ata_ctlp);
+int ata_pciide_status_pending(ata_ctl_t *ata_ctlp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_COMMON_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.c b/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.c
new file mode 100644
index 0000000000..930d919c2d
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.c
@@ -0,0 +1,120 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/debug.h>
+
+#include "ata_common.h"
+#include "ata_disk.h"
+#include "atapi.h"
+#include "pciide.h"
+
+
+#ifdef ATA_DEBUG
+
+void
+dump_ata_ctl(ata_ctl_t *P)
+{
+ ghd_err("dip 0x%p flags 0x%x timing 0x%x\n",
+ P->ac_dip, P->ac_flags, P->ac_timing_flags);
+ ghd_err("drvp[0][0..7] 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p\n",
+ P->ac_drvp[0][0], P->ac_drvp[0][1], P->ac_drvp[0][2],
+ P->ac_drvp[0][3], P->ac_drvp[0][4], P->ac_drvp[0][5],
+ P->ac_drvp[0][6], P->ac_drvp[0][7]);
+ ghd_err("drvp[1][0..7] 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p 0x%p\n",
+ P->ac_drvp[1][0], P->ac_drvp[1][1], P->ac_drvp[1][2],
+ P->ac_drvp[1][3], P->ac_drvp[1][4], P->ac_drvp[1][5],
+ P->ac_drvp[1][6], P->ac_drvp[1][7]);
+ ghd_err("max tran 0x%x &ccc_t 0x%p actv drvp 0x%p actv pktp 0x%p\n",
+ P->ac_max_transfer, &P->ac_ccc,
+ P->ac_active_drvp, P->ac_active_pktp);
+ ghd_err("state %d hba tranp 0x%p\n", P->ac_state, P->ac_atapi_tran);
+ ghd_err("iohdl1 0x%p 0x%p D 0x%p E 0x%p F 0x%p C 0x%p S 0x%p LC 0x%p "
+ "HC 0x%p HD 0x%p ST 0x%p CMD 0x%p\n",
+ P->ac_iohandle1, P->ac_ioaddr1, P->ac_data, P->ac_error,
+ P->ac_feature, P->ac_count, P->ac_sect, P->ac_lcyl,
+ P->ac_hcyl, P->ac_drvhd, P->ac_status, P->ac_cmd);
+ ghd_err("iohdl2 0x%p 0x%p AST 0x%p DC 0x%p\n",
+ P->ac_iohandle2, P->ac_ioaddr2, P->ac_altstatus, P->ac_devctl);
+ ghd_err("bm hdl 0x%p 0x%p pciide %d BM %d sg_list 0x%p paddr 0x%llx "
+ "acc hdl 0x%p sg hdl 0x%p\n",
+ P->ac_bmhandle, P->ac_bmaddr, P->ac_pciide, P->ac_pciide_bm,
+ P->ac_sg_list, (unsigned long long) P->ac_sg_paddr,
+ P->ac_sg_acc_handle, P->ac_sg_handle);
+ ghd_err("arq pktp 0x%p flt pktp 0x%p &cdb 0x%p\n",
+ P->ac_arq_pktp, P->ac_fault_pktp, &P->ac_arq_cdb);
+}
+
+void
+dump_ata_drv(ata_drv_t *P)
+{
+
+
+ ghd_err("ctlp 0x%p &ata_id 0x%p flags 0x%x pciide dma 0x%x\n",
+ P->ad_ctlp, &P->ad_id, P->ad_flags, P->ad_pciide_dma);
+
+ ghd_err("targ %d lun %d driv 0x%x state %d cdb len %d "
+ "bogus %d nec %d\n", P->ad_targ, P->ad_lun, P->ad_drive_bits,
+ P->ad_state, P->ad_cdb_len, P->ad_bogus_drq,
+ P->ad_nec_bad_status);
+
+ ghd_err("ata &scsi_dev 0x%p &scsi_inquiry 0x%p &ctl_obj 0x%p\n",
+ &P->ad_device, &P->ad_inquiry, &P->ad_ctl_obj);
+
+ ghd_err("ata rd cmd 0x%x wr cmd 0x%x acyl 0x%x\n",
+ P->ad_rd_cmd, P->ad_wr_cmd, P->ad_acyl);
+
+ ghd_err("ata bios cyl %d hd %d sec %d phs hd %d sec %d\n",
+ P->ad_drvrcyl, P->ad_drvrhd, P->ad_drvrsec, P->ad_phhd,
+ P->ad_phsec);
+
+ ghd_err("block factor %d bpb %d\n",
+ P->ad_block_factor, P->ad_bytes_per_block);
+}
+
+void
+dump_ata_pkt(ata_pkt_t *P)
+{
+ ghd_err("gcmdp 0x%p flags 0x%x v_addr 0x%p dma %d\n",
+ P->ap_gcmdp, P->ap_flags, P->ap_v_addr, P->ap_pciide_dma);
+ ghd_err("&sg_list 0x%p sg cnt 0x%x resid 0x%lx bcnt 0x%lx\n",
+ P->ap_sg_list, P->ap_sg_cnt, P->ap_resid, P->ap_bcount);
+ ghd_err("sec 0x%x cnt 0x%x lc 0x%x hc 0x%x hd 0x%x cmd 0x%x\n",
+ P->ap_sec, P->ap_count, P->ap_lwcyl, P->ap_hicyl,
+ P->ap_hd, P->ap_cmd);
+ ghd_err("status 0x%x error 0x%x\n", P->ap_status, P->ap_error);
+ ghd_err("start 0x%p intr 0x%p complete 0x%p\n",
+ P->ap_start, P->ap_intr, P->ap_complete);
+ ghd_err("ata cdb 0x%x scb 0x%x bpb 0x%x wrt cnt 0x%x\n",
+ P->ap_cdb, P->ap_scb, P->ap_bytes_per_block, P->ap_wrt_count);
+ ghd_err("atapi cdbp 0x%p cdb len %d cdb pad %d\n",
+ P->ap_cdbp, P->ap_cdb_len, P->ap_cdb_pad);
+ ghd_err("scbp 0x%p statuslen 0x%x\n", P->ap_scbp, P->ap_statuslen);
+}
+
+#endif
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.h
new file mode 100644
index 0000000000..3f28a2d2e8
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_debug.h
@@ -0,0 +1,88 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1997 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_DEBUG_H
+#define _ATA_DEBUG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * debugging options
+ */
+
+/*
+ * Always print "real" error messages on non-debugging kernels
+ */
+
+#ifdef ATA_DEBUG
+#define ADBG_ERROR(fmt) ADBG_FLAG_CHK(ADBG_FLAG_ERROR, fmt)
+#else
+#define ADBG_ERROR(fmt) ghd_err fmt
+#endif
+
+/*
+ * ... everything else is conditional on the ATA_DEBUG preprocessor symbol
+ */
+
+#define ADBG_WARN(fmt) ADBG_FLAG_CHK(ADBG_FLAG_WARN, fmt)
+#define ADBG_TRACE(fmt) ADBG_FLAG_CHK(ADBG_FLAG_TRACE, fmt)
+#define ADBG_INIT(fmt) ADBG_FLAG_CHK(ADBG_FLAG_INIT, fmt)
+#define ADBG_TRANSPORT(fmt) ADBG_FLAG_CHK(ADBG_FLAG_TRANSPORT, fmt)
+#define ADBG_DMA(fmt) ADBG_FLAG_CHK(ADBG_FLAG_DMA, fmt)
+#define ADBG_ARQ(fmt) ADBG_FLAG_CHK(ADBG_FLAG_ARQ, fmt)
+
+
+
+
+extern int ata_debug;
+
+#define ADBG_FLAG_ERROR 0x0001
+#define ADBG_FLAG_WARN 0x0002
+#define ADBG_FLAG_TRACE 0x0004
+#define ADBG_FLAG_INIT 0x0008
+#define ADBG_FLAG_TRANSPORT 0x0010
+#define ADBG_FLAG_DMA 0x0020
+#define ADBG_FLAG_ARQ 0x0040
+
+
+
+#ifdef ATA_DEBUG
+#define ADBG_FLAG_CHK(flag, fmt) if (ata_debug & (flag)) GDBG_PRF(fmt)
+#else
+#define ADBG_FLAG_CHK(flag, fmt)
+#endif
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_DEBUG_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.c b/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.c
new file mode 100644
index 0000000000..6694ecc191
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.c
@@ -0,0 +1,2953 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/dkio.h>
+#include <sys/cdio.h>
+#include <sys/file.h>
+
+#include "ata_common.h"
+#include "ata_disk.h"
+
+/*
+ * this typedef really should be in dktp/cmpkt.h
+ */
+typedef struct cmpkt cmpkt_t;
+
+
+/*
+ * DADA entry points
+ */
+
+static int ata_disk_abort(opaque_t ctl_data, cmpkt_t *pktp);
+static int ata_disk_reset(opaque_t ctl_data, int level);
+static int ata_disk_ioctl(opaque_t ctl_data, int cmd, intptr_t a, int flag);
+static cmpkt_t *ata_disk_pktalloc(opaque_t ctl_data, int (*callback)(caddr_t),
+ caddr_t arg);
+static void ata_disk_pktfree(opaque_t ctl_data, cmpkt_t *pktp);
+static cmpkt_t *ata_disk_memsetup(opaque_t ctl_data, cmpkt_t *pktp,
+ struct buf *bp, int (*callback)(caddr_t), caddr_t arg);
+static void ata_disk_memfree(opaque_t ctl_data, cmpkt_t *pktp);
+static cmpkt_t *ata_disk_iosetup(opaque_t ctl_data, cmpkt_t *pktp);
+static int ata_disk_transport(opaque_t ctl_data, cmpkt_t *pktp);
+
+/*
+ * DADA packet callbacks
+ */
+
+static void ata_disk_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
+ int do_callback);
+static int ata_disk_intr(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_intr_dma(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_intr_pio_in(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_intr_pio_out(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start_dma_in(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start_dma_out(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start_pio_in(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start_pio_out(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+
+/*
+ * Local Function prototypes
+ */
+
+static int ata_disk_eject(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static void ata_disk_fake_inquiry(ata_drv_t *ata_drvp);
+static void ata_disk_get_resid(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_initialize_device_parameters(ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp);
+static int ata_disk_lock(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_set_multiple(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp);
+static void ata_disk_pio_xfer_data_in(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp);
+static void ata_disk_pio_xfer_data_out(ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp);
+static void ata_disk_set_standby_timer(ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp);
+static int ata_disk_recalibrate(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_standby(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_start_common(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_state(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_disk_unlock(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static int ata_get_capacity(ata_drv_t *ata_drvp, uint64_t *capacity);
+static void ata_fix_large_disk_geometry(ata_drv_t *ata_drvp);
+static uint64_t ata_calculate_28bits_capacity(ata_drv_t *ata_drvp);
+static uint64_t ata_calculate_48bits_capacity(ata_drv_t *ata_drvp);
+static int ata_copy_dk_ioc_string(intptr_t arg, char *source, int length,
+ int flag);
+static void ata_set_write_cache(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp);
+
+
+/*
+ * Local static data
+ */
+
+uint_t ata_disk_init_dev_parm_wait = 4 * 1000000;
+uint_t ata_disk_set_mult_wait = 4 * 1000000;
+int ata_disk_do_standby_timer = TRUE;
+
+/*
+ * ata_write_cache == 1 force write cache on.
+ * ata_write_cache == 0 do not modify write cache. firmware defaults kept.
+ * ata_write_cache == -1 force write cache off.
+ */
+int ata_write_cache = 1;
+
+
+static struct ctl_objops ata_disk_objops = {
+ ata_disk_pktalloc,
+ ata_disk_pktfree,
+ ata_disk_memsetup,
+ ata_disk_memfree,
+ ata_disk_iosetup,
+ ata_disk_transport,
+ ata_disk_reset,
+ ata_disk_abort,
+ nulldev,
+ nulldev,
+ ata_disk_ioctl,
+ 0, 0
+};
+
+
+
+/*
+ *
+ * initialize the ata_disk sub-system
+ *
+ */
+
+/*ARGSUSED*/
+int
+ata_disk_attach(
+ ata_ctl_t *ata_ctlp)
+{
+ ADBG_TRACE(("ata_disk_init entered\n"));
+ return (TRUE);
+}
+
+
+
+/*
+ *
+ * destroy the ata_disk sub-system
+ *
+ */
+
+/*ARGSUSED*/
+void
+ata_disk_detach(
+ ata_ctl_t *ata_ctlp)
+{
+ ADBG_TRACE(("ata_disk_destroy entered\n"));
+}
+
+
+/*
+ * Test whether the disk can support Logical Block Addressing
+ */
+
+int
+ata_test_lba_support(struct ata_id *aidp)
+{
+#ifdef __old_version__
+ /*
+ * determine if the drive supports LBA mode
+ */
+ if (aidp->ai_cap & ATAC_LBA_SUPPORT)
+ return (TRUE);
+#else
+ /*
+ * Determine if the drive supports LBA mode
+ * LBA mode is mandatory on ATA-3 (or newer) drives but is
+ * optional on ATA-2 (or older) drives. On ATA-2 drives
+ * the ai_majorversion word should be 0xffff or 0x0000
+ * (version not reported).
+ */
+ if (aidp->ai_majorversion != 0xffff &&
+ aidp->ai_majorversion >= (1 << 3)) {
+ /* ATA-3 or better */
+ return (TRUE);
+ } else if (aidp->ai_cap & ATAC_LBA_SUPPORT) {
+ /* ATA-2 LBA capability bit set */
+ return (TRUE);
+ } else {
+ return (FALSE);
+ }
+#endif
+}
+
+/*
+ * ATA-6 drives do not provide geometry information, so words
+ * ai_heads, ai_sectors and ai_fixcyls may not be valid
+ */
+static void
+ata_fixup_ata6_geometry(struct ata_id *aidp)
+{
+ /* check cylinders, heads, and sectors for valid values */
+ if (aidp->ai_heads != 0 && aidp->ai_heads != 0xffff &&
+ aidp->ai_sectors != 0 && aidp->ai_sectors != 0xffff &&
+ aidp->ai_fixcyls != 0)
+ return; /* assume valid geometry - do nothing */
+
+ /*
+ * Pre-set standard geometry values - they are not necessarily
+ * optimal for a given capacity
+ */
+ aidp->ai_heads = 0x10;
+ aidp->ai_sectors = 0x3f;
+ aidp->ai_fixcyls = 1;
+ /*
+ * The fixcyls value will get fixed up later in
+ * ata_fix_large_disk_geometry.
+ */
+}
+
+/*
+ *
+ * initialize the soft-structure for an ATA (non-PACKET) drive and
+ * then configure the drive with the correct modes and options.
+ *
+ */
+
+int
+ata_disk_init_drive(
+ ata_drv_t *ata_drvp)
+{
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ struct ata_id *aidp = &ata_drvp->ad_id;
+ struct ctl_obj *ctlobjp;
+ struct scsi_device *devp;
+ int len;
+ int val;
+ int mode;
+ short *chs;
+ char buf[80];
+
+ ADBG_TRACE(("ata_disk_init_drive entered\n"));
+
+ /* ATA disks don't support LUNs */
+
+ if (ata_drvp->ad_lun != 0)
+ return (FALSE);
+
+ /*
+ * set up drive structure
+ * ATA-6 drives do not provide geometry information, so words
+ * ai_heads, ai_sectors and ai_fixcyls may not be valid - they
+ * will be fixed later
+ */
+
+ ata_drvp->ad_phhd = aidp->ai_heads;
+ ata_drvp->ad_phsec = aidp->ai_sectors;
+ ata_drvp->ad_drvrhd = aidp->ai_heads;
+ ata_drvp->ad_drvrsec = aidp->ai_sectors;
+ ata_drvp->ad_drvrcyl = aidp->ai_fixcyls;
+ ata_drvp->ad_acyl = 0;
+
+ if (ata_test_lba_support(&ata_drvp->ad_id))
+ ata_drvp->ad_drive_bits |= ATDH_LBA;
+
+ /* Get capacity and check for 48-bit mode */
+ mode = ata_get_capacity(ata_drvp, &ata_drvp->ad_capacity);
+ if (mode == AD_EXT48) {
+ ata_drvp->ad_flags |= AD_EXT48;
+ }
+
+ /* straighten out the geometry */
+ (void) sprintf(buf, "SUNW-ata-%p-d%d-chs", (void *) ata_ctlp->ac_data,
+ ata_drvp->ad_targ+1);
+ if (ddi_getlongprop(DDI_DEV_T_ANY, ddi_root_node(), 0,
+ buf, (caddr_t)&chs, &len) == DDI_PROP_SUCCESS) {
+ /*
+ * if the number of sectors and heads in bios matches the
+ * physical geometry, then so should the number of cylinders
+ * this is to prevent the 1023 limit in the older bios's
+ * causing loss of space.
+ */
+ if (chs[1] == (ata_drvp->ad_drvrhd - 1) &&
+ chs[2] == ata_drvp->ad_drvrsec)
+ /* Set chs[0] to zero-based number of cylinders. */
+ chs[0] = aidp->ai_fixcyls - 1;
+ else if (!(ata_drvp->ad_drive_bits & ATDH_LBA)) {
+ /*
+ * if the the sector/heads do not match that of the
+ * bios and the drive does not support LBA. We go ahead
+ * and advertise the bios geometry but use the physical
+ * geometry for sector translation.
+ */
+ cmn_err(CE_WARN, "!Disk 0x%p,%d: BIOS geometry "
+ "different from physical, and no LBA support.",
+ (void *)ata_ctlp->ac_data, ata_drvp->ad_targ);
+ }
+
+ /*
+ * chs[0,1] are zero-based; make them one-based.
+ */
+ ata_drvp->ad_drvrcyl = chs[0] + 1;
+ ata_drvp->ad_drvrhd = chs[1] + 1;
+ ata_drvp->ad_drvrsec = chs[2];
+ kmem_free(chs, len);
+ } else {
+ /*
+ * Property not present; this means that boot.bin has
+ * determined that the drive supports Int13 LBA. Note
+ * this, but just return a geometry with a large
+ * cylinder count; this will be the signal for dadk to
+ * fail DKIOCG_VIRTGEOM.
+ * ad_drvr* are already set; just recalculate ad_drvrcyl
+ * from capacity.
+ */
+
+ ata_drvp->ad_flags |= AD_INT13LBA;
+ if (ata_drvp->ad_capacity != 0) {
+ ata_drvp->ad_drvrcyl = ata_drvp->ad_capacity /
+ (ata_drvp->ad_drvrhd * ata_drvp->ad_drvrsec);
+ } else {
+ /*
+ * Something's wrong; return something sure to
+ * fail the "cyls < 1024" test. This will
+ * never make it out of the DKIOCG_VIRTGEOM
+ * call, so its total bogosity won't matter.
+ */
+ ata_drvp->ad_drvrcyl = 1025;
+ ata_drvp->ad_drvrhd = 1;
+ ata_drvp->ad_drvrsec = 1;
+ }
+ }
+
+ /* fix geometry for disks > 31GB, if needed */
+ ata_fix_large_disk_geometry(ata_drvp);
+
+ /*
+ * set up the scsi_device and ctl_obj structures
+ */
+ devp = &ata_drvp->ad_device;
+ ctlobjp = &ata_drvp->ad_ctl_obj;
+
+ devp->sd_inq = &ata_drvp->ad_inquiry;
+ devp->sd_address.a_hba_tran = (scsi_hba_tran_t *)ctlobjp;
+ devp->sd_address.a_target = (ushort_t)ata_drvp->ad_targ;
+ devp->sd_address.a_lun = (uchar_t)ata_drvp->ad_lun;
+ mutex_init(&devp->sd_mutex, NULL, MUTEX_DRIVER, NULL);
+ ata_drvp->ad_flags |= AD_MUTEX_INIT;
+
+ /*
+ * DADA ops vectors and cookie
+ */
+ ctlobjp->c_ops = (struct ctl_objops *)&ata_disk_objops;
+
+ /*
+ * this is filled in with gtgtp by ata_disk_bus_ctl(INITCHILD)
+ */
+ ctlobjp->c_data = NULL;
+
+ ctlobjp->c_ext = &(ctlobjp->c_extblk);
+ ctlobjp->c_extblk.c_ctldip = ata_ctlp->ac_dip;
+ ctlobjp->c_extblk.c_targ = ata_drvp->ad_targ;
+ ctlobjp->c_extblk.c_blksz = NBPSCTR;
+
+ /*
+ * Get highest block factor supported by the drive.
+ * Some drives report 0 if read/write multiple not supported,
+ * adjust their blocking factor to 1.
+ */
+ ata_drvp->ad_block_factor = aidp->ai_mult1 & 0xff;
+
+ /*
+ * If a block factor property exists, use the smaller of the
+ * property value and the highest value the drive can support.
+ */
+ (void) sprintf(buf, "drive%d_block_factor", ata_drvp->ad_targ);
+ val = ddi_prop_get_int(DDI_DEV_T_ANY, ata_ctlp->ac_dip, 0, buf,
+ ata_drvp->ad_block_factor);
+
+ ata_drvp->ad_block_factor = (short)min(val, ata_drvp->ad_block_factor);
+
+ if (ata_drvp->ad_block_factor == 0)
+ ata_drvp->ad_block_factor = 1;
+
+ if (!ata_disk_setup_parms(ata_ctlp, ata_drvp))
+ return (FALSE);
+
+ ata_disk_fake_inquiry(ata_drvp);
+
+ return (TRUE);
+}
+
+/*
+ * Test if a disk supports 48-bit (extended mode) addressing and
+ * get disk capacity.
+ * Return value:
+ * AD_EXT48 if 48-bit mode is available, 0 otherwise,
+ * capacity in sectors.
+ * There are several indicators for 48-bit addressing. If any of
+ * them is missing, assume 28-bit (non-extended) addressing.
+ */
+
+static int
+ata_get_capacity(ata_drv_t *ata_drvp, uint64_t *capacity)
+{
+ struct ata_id *aidp = &ata_drvp->ad_id;
+ uint64_t cap28; /* capacity in 28-bit mode */
+ uint64_t cap48; /* capacity in 48-bit mode */
+
+ /*
+ * First compute capacity in 28-bit mode, using 28-bit capacity
+ * words in IDENTIFY DEVICE response words
+ */
+ cap28 = ata_calculate_28bits_capacity(ata_drvp);
+ *capacity = cap28;
+
+ /* No 48-bit mode before ATA 6 */
+ if (!IS_ATA_VERSION_SUPPORTED(aidp, 6))
+ return (0);
+
+ /* Check that 48 bit addressing is supported & enabled */
+ /* words 83 and 86 */
+ if (!(aidp->ai_cmdset83 & ATACS_EXT48))
+ return (0);
+ if (!(aidp->ai_features86 & ATACS_EXT48))
+ return (0);
+
+ /*
+ * Drive supports ATA-6. Since ATA-6 drives may not provide
+ * geometry info, pre-set standard geometry values
+ */
+ ata_fixup_ata6_geometry(aidp);
+
+ /* Compute 48-bit capacity */
+ cap48 = ata_calculate_48bits_capacity(ata_drvp);
+
+ /*
+ * If capacity is smaller then the maximum capacity addressable
+ * in 28-bit mode, just use 28-bit capacity value.
+ * We will use 28-bit addressing read/write commands.
+ */
+ if (cap48 <= MAX_28BIT_CAPACITY)
+ return (0);
+
+ /*
+ * Capacity is too big for 28-bits addressing. But, to make
+ * sure that the drive implements ATA-6 correctly, the
+ * final check: cap28 should be MAX for 28-bit addressing.
+ * If it's not, we shouldn't use 48-bit mode, so return
+ * the capacity reported in 28-bit capacity words.
+ */
+ if (cap28 != MAX_28BIT_CAPACITY)
+ return (0); /* not max, use 28-bit value */
+
+ /*
+ * All is well so return 48-bit capacity indicator
+ */
+ ADBG_INIT(("ATA: using 48-bit mode for capacity %llx blocks\n",
+ (unsigned long long)cap48));
+
+ *capacity = cap48;
+ return (AD_EXT48);
+}
+
+/*
+ * With the advent of disks that hold more than 31 GB, we run into a
+ * limitation in the sizes of the fields that describe the geometry.
+ * The cylinders, heads, and sectors-per-track are each described by a
+ * 16-bit number -- both in the structure returned from IDENTIFY
+ * DEVICE and in the structure returned from the DIOCTL_GETGEOM or
+ * DIOCTL_GETPHYGEOM ioctl.
+ *
+ * The typical disk has 32 heads per cylinder and 63 sectors per
+ * track. A 16 bit field can contain up to 65535. So the largest
+ * disk that can be described in these fields is 65535 * 32 * 63 * 512
+ * (bytes/sector), or about 31.5 GB. The cylinder count gets truncated
+ * when stored in a narrow field, so a 40GB disk appears to have only
+ * 8 GB!
+ *
+ * The solution (for the time being at least) is to lie about the
+ * geometry. If the number of cylinders is too large to fit in 16
+ * bits, we will halve the cylinders and double the heads, repeating
+ * until we can fit the geometry into 3 shorts.
+ * FUTURE ENHANCEMENT: If this ever isn't enough, we could
+ * add another step to double sectors/track as well.
+ */
+
+static void
+ata_fix_large_disk_geometry(
+ ata_drv_t *ata_drvp)
+{
+ struct ata_id *aidp = &ata_drvp->ad_id;
+
+ /* no hope for large disks if LBA not supported */
+ if (!(ata_drvp->ad_drive_bits & ATDH_LBA))
+ return;
+
+ /*
+ * Fix up the geometry to be returned by DIOCTL_GETGEOM.
+ * If number of cylinders > USHRT_MAX, double heads and
+ * halve cylinders until everything fits.
+ */
+ while (ata_drvp->ad_drvrcyl > USHRT_MAX) {
+ int tempheads;
+
+ /* is there room in 16 bits to double the heads? */
+ tempheads = 2 * ata_drvp->ad_drvrhd;
+ if (tempheads > USHRT_MAX) {
+ /*
+ * No room to double the heads.
+ * I give up, there's no way to represent this.
+ * Limit disk size.
+ */
+ cmn_err(CE_WARN,
+ "Disk is too large: "
+ "Model %s, Serial# %s "
+ "Approximating...\n",
+ aidp->ai_model, aidp->ai_drvser);
+ ata_drvp->ad_drvrcyl = USHRT_MAX;
+ break;
+ }
+
+ /* OK, so double the heads and halve the cylinders */
+ ata_drvp->ad_drvrcyl /= 2;
+ ata_drvp->ad_drvrhd *= 2;
+ }
+}
+
+/*
+ * Calculate capacity using 28-bit capacity words from IDENTIFY DEVICE
+ * return words
+ */
+uint64_t
+ata_calculate_28bits_capacity(ata_drv_t *ata_drvp)
+{
+ /*
+ * Asked x3t13 for advice; this implements Hale Landis'
+ * response, minus the "use ATA_INIT_DEVPARMS".
+ * See "capacity.notes".
+ */
+
+ /* some local shorthand/renaming to clarify the meaning */
+
+ ushort_t curcyls_w54, curhds_w55, cursect_w56;
+ uint32_t curcap_w57_58;
+
+ if ((ata_drvp->ad_drive_bits & ATDH_LBA) != 0) {
+ return ((uint64_t)(ata_drvp->ad_id.ai_addrsec[0] +
+ ata_drvp->ad_id.ai_addrsec[1] * 0x10000));
+ }
+
+ /*
+ * If we're not LBA, then first try to validate "current" values.
+ */
+
+ curcyls_w54 = ata_drvp->ad_id.ai_curcyls;
+ curhds_w55 = ata_drvp->ad_id.ai_curheads;
+ cursect_w56 = ata_drvp->ad_id.ai_cursectrk;
+ curcap_w57_58 = ata_drvp->ad_id.ai_cursccp[0] +
+ ata_drvp->ad_id.ai_cursccp[1] * 0x10000;
+
+ if (((ata_drvp->ad_id.ai_validinfo & 1) == 1) &&
+ (curhds_w55 >= 1) && (curhds_w55 <= 16) &&
+ (cursect_w56 >= 1) && (cursect_w56 <= 63) &&
+ (curcap_w57_58 == curcyls_w54 * curhds_w55 * cursect_w56)) {
+ return ((uint64_t)curcap_w57_58);
+ }
+
+ /*
+ * At this point, Hale recommends ATA_INIT_DEVPARMS.
+ * I don't want to do that, so simply use 1/3/6 as
+ * a final fallback, and continue to assume the BIOS
+ * has done whatever INIT_DEVPARMS are necessary.
+ */
+
+ return ((uint64_t)(ata_drvp->ad_id.ai_fixcyls *
+ ata_drvp->ad_id.ai_heads * ata_drvp->ad_id.ai_sectors));
+}
+
+/*
+ * Calculate capacity using 48-bits capacity words from IDENTIFY DEVICE
+ * return words
+ */
+uint64_t
+ata_calculate_48bits_capacity(ata_drv_t *ata_drvp)
+{
+ uint64_t cap48 = 0;
+ int i;
+
+ for (i = 3; i >= 0; --i) {
+ cap48 <<= 16;
+ cap48 += ata_drvp->ad_id.ai_addrsecxt[i];
+ }
+ return (cap48);
+}
+
+
+/*
+ *
+ * Setup the drives Read/Write Multiple Blocking factor and the
+ * current translation geometry. Necessary during attach and after
+ * Software Resets.
+ *
+ */
+
+int
+ata_disk_setup_parms(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+
+ /*
+ * program geometry info back to the drive
+ */
+ if (!ata_disk_initialize_device_parameters(ata_ctlp, ata_drvp)) {
+ return (FALSE);
+ }
+
+ /*
+ * Determine the blocking factor
+ */
+ if (ata_drvp->ad_block_factor > 1) {
+ /*
+ * Program the block factor into the drive. If this
+ * fails, then go back to using a block size of 1.
+ */
+ if (!ata_disk_set_multiple(ata_ctlp, ata_drvp))
+ ata_drvp->ad_block_factor = 1;
+ }
+
+
+ if (ata_drvp->ad_block_factor > 1) {
+ ata_drvp->ad_rd_cmd = ATC_RDMULT;
+ ata_drvp->ad_wr_cmd = ATC_WRMULT;
+ } else {
+ ata_drvp->ad_rd_cmd = ATC_RDSEC;
+ ata_drvp->ad_wr_cmd = ATC_WRSEC;
+ }
+
+ ata_drvp->ad_bytes_per_block = ata_drvp->ad_block_factor << SCTRSHFT;
+
+ ADBG_INIT(("set block factor for drive %d to %d\n",
+ ata_drvp->ad_targ, ata_drvp->ad_block_factor));
+
+ if (ata_disk_do_standby_timer)
+ ata_disk_set_standby_timer(ata_ctlp, ata_drvp);
+
+ ata_set_write_cache(ata_ctlp, ata_drvp);
+
+ return (TRUE);
+}
+
+
+/*
+ * Take the timeout value specified in the "standby" property
+ * and convert from seconds to the magic parm expected by the
+ * the drive. Then issue the IDLE command to set the drive's
+ * internal standby timer.
+ */
+
+static void
+ata_disk_set_standby_timer(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+ uchar_t parm;
+ int timeout = ata_ctlp->ac_standby_time;
+
+ /*
+ * take the timeout value, specificed in seconds, and
+ * encode it into the proper command parm
+ */
+
+ /*
+ * don't change it if no property specified or if
+ * the specified value is out of range
+ */
+ if (timeout < 0 || timeout > (12 * 60 * 60))
+ return;
+
+ /* 1 to 1200 seconds (20 minutes) == N * 5 seconds */
+ if (timeout <= (240 * 5))
+ parm = (timeout + 4) / 5;
+
+ /* 20 to 21 minutes == 21 minutes */
+ else if (timeout <= (21 * 60))
+ parm = 252;
+
+ /* 21 minutes to 21 minutes 15 seconds == 21:15 */
+ else if (timeout <= ((21 * 60) + 15))
+ parm = 255;
+
+ /* 21:15 to 330 minutes == N * 30 minutes */
+ else if (timeout <= (11 * 30 * 60))
+ parm = 240 + ((timeout + (30 * 60) - 1)/ (30 * 60));
+
+ /* > 330 minutes == 8 to 12 hours */
+ else
+ parm = 253;
+
+ (void) ata_command(ata_ctlp, ata_drvp, TRUE, FALSE, 5 * 1000000,
+ ATC_IDLE, 0, parm, 0, 0, 0, 0);
+}
+
+
+
+/*
+ *
+ * destroy an ata disk drive
+ *
+ */
+
+void
+ata_disk_uninit_drive(
+ ata_drv_t *ata_drvp)
+{
+ struct scsi_device *devp = &ata_drvp->ad_device;
+
+ ADBG_TRACE(("ata_disk_uninit_drive entered\n"));
+
+ if (ata_drvp->ad_flags & AD_MUTEX_INIT)
+ mutex_destroy(&devp->sd_mutex);
+}
+
+
+
+
+/*
+ *
+ * DADA compliant bus_ctl entry point
+ *
+ */
+
+/*ARGSUSED*/
+int
+ata_disk_bus_ctl(
+ dev_info_t *d,
+ dev_info_t *r,
+ ddi_ctl_enum_t o,
+ void *a,
+ void *v)
+{
+ ADBG_TRACE(("ata_disk_bus_ctl entered\n"));
+
+ switch (o) {
+
+ case DDI_CTLOPS_REPORTDEV:
+ {
+ int targ;
+
+ targ = ddi_prop_get_int(DDI_DEV_T_ANY, r, DDI_PROP_DONTPASS,
+ "target", 0);
+ cmn_err(CE_CONT, "?%s%d at %s%d target %d lun %d\n",
+ ddi_driver_name(r), ddi_get_instance(r),
+ ddi_driver_name(d), ddi_get_instance(d), targ, 0);
+ return (DDI_SUCCESS);
+ }
+ case DDI_CTLOPS_INITCHILD:
+ {
+ dev_info_t *cdip = (dev_info_t *)a;
+ ata_drv_t *ata_drvp;
+ ata_ctl_t *ata_ctlp;
+ ata_tgt_t *ata_tgtp;
+ struct scsi_device *devp;
+ struct ctl_obj *ctlobjp;
+ gtgt_t *gtgtp;
+ char name[MAXNAMELEN];
+
+ /*
+ * save time by picking up ptr to drive struct left
+ * by ata_bus_ctl - isn't that convenient.
+ */
+ ata_drvp = ddi_get_driver_private(cdip);
+ ata_ctlp = ata_drvp->ad_ctlp;
+
+ /* set up pointers to child dip */
+
+ devp = &ata_drvp->ad_device;
+ /*
+ * If sd_dev is set, it means that the target has already
+ * being initialized. The cdip is a duplicate node from
+ * reexpansion of driver.conf. Fail INITCHILD here.
+ */
+ if (devp->sd_dev != NULL) {
+ return (DDI_FAILURE);
+ }
+ devp->sd_dev = cdip;
+
+ ctlobjp = &ata_drvp->ad_ctl_obj;
+ ctlobjp->c_extblk.c_devdip = cdip;
+
+ /*
+ * Create the "ata" property for use by the target driver
+ */
+ if (!ata_prop_create(cdip, ata_drvp, "ata")) {
+ return (DDI_FAILURE);
+ }
+
+ gtgtp = ghd_target_init(d, cdip, &ata_ctlp->ac_ccc,
+ sizeof (ata_tgt_t), ata_ctlp,
+ ata_drvp->ad_targ,
+ ata_drvp->ad_lun);
+
+ /* gt_tgt_private points to ata_tgt_t */
+ ata_tgtp = GTGTP2ATATGTP(gtgtp);
+ ata_tgtp->at_drvp = ata_drvp;
+ ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
+ ata_tgtp->at_dma_attr.dma_attr_maxxfer =
+ ata_ctlp->ac_max_transfer << SCTRSHFT;
+
+ /* gtgtp is the opaque arg to all my entry points */
+ ctlobjp->c_data = gtgtp;
+
+ /* create device name */
+
+ (void) sprintf(name, "%x,%x", ata_drvp->ad_targ,
+ ata_drvp->ad_lun);
+ ddi_set_name_addr(cdip, name);
+ ddi_set_driver_private(cdip, devp);
+
+ return (DDI_SUCCESS);
+ }
+
+ case DDI_CTLOPS_UNINITCHILD:
+ {
+ dev_info_t *cdip = (dev_info_t *)a;
+ struct scsi_device *devp;
+ struct ctl_obj *ctlobjp;
+ gtgt_t *gtgtp;
+
+ devp = ddi_get_driver_private(cdip);
+ ctlobjp = (struct ctl_obj *)devp->sd_address.a_hba_tran;
+ gtgtp = ctlobjp->c_data;
+
+ ghd_target_free(d, cdip, &GTGTP2ATAP(gtgtp)->ac_ccc, gtgtp);
+
+ ddi_set_driver_private(cdip, NULL);
+ ddi_set_name_addr(cdip, NULL);
+ return (DDI_SUCCESS);
+ }
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+
+/*
+ *
+ * DADA abort entry point - not currently used by dadk
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_abort(opaque_t ctl_data, cmpkt_t *pktp)
+{
+ ADBG_TRACE(("ata_disk_abort entered\n"));
+
+ /* XXX - Note that this interface is currently not used by dadk */
+
+ /*
+ * GHD abort functions take a pointer to a scsi_address
+ * and so they're unusable here. The ata driver used to
+ * return DDI_SUCCESS here without doing anything. Its
+ * seems that DDI_FAILURE is more appropriate.
+ */
+
+ return (DDI_FAILURE);
+}
+
+
+
+/*
+ *
+ * DADA reset entry point - not currently used by dadk
+ * (except in debug versions of driver)
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_reset(opaque_t ctl_data, int level)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ int rc;
+
+ ADBG_TRACE(("ata_disk_reset entered\n"));
+
+ /* XXX - Note that this interface is currently not used by dadk */
+
+ if (level == RESET_TARGET) {
+ rc = ghd_tran_reset_target(&ata_drvp->ad_ctlp->ac_ccc, gtgtp,
+ NULL);
+ } else if (level == RESET_ALL) {
+ rc = ghd_tran_reset_bus(&ata_drvp->ad_ctlp->ac_ccc, gtgtp,
+ NULL);
+ }
+
+ return (rc ? DDI_SUCCESS : DDI_FAILURE);
+}
+
+
+
+/*
+ *
+ * DADA ioctl entry point
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_ioctl(opaque_t ctl_data, int cmd, intptr_t arg, int flag)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_ctl_t *ata_ctlp = GTGTP2ATAP(gtgtp);
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ int rc;
+ struct tgdk_geom *tg;
+ struct ata_id *aidp = &ata_drvp->ad_id;
+
+ ADBG_TRACE(("ata_disk_ioctl entered, cmd = %d\n", cmd));
+
+ switch (cmd) {
+
+ case DIOCTL_GETGEOM:
+ case DIOCTL_GETPHYGEOM:
+ tg = (struct tgdk_geom *)arg;
+ tg->g_cyl = ata_drvp->ad_drvrcyl;
+ tg->g_head = ata_drvp->ad_drvrhd;
+ tg->g_sec = ata_drvp->ad_drvrsec;
+ tg->g_acyl = ata_drvp->ad_acyl;
+ tg->g_secsiz = 512;
+ tg->g_cap = tg->g_cyl * tg->g_head * tg->g_sec;
+ return (0);
+
+ case DCMD_UPDATE_GEOM:
+/* ??? fix this to issue IDENTIFY DEVICE ??? */
+/* might not be necessary since I don't know of any ATA/IDE that */
+/* can change its geometry. On the other hand, ATAPI devices like the */
+/* LS-120 or PD/CD can change their geometry when new media is inserted */
+ return (0);
+
+ /* copy the model number into the caller's buffer */
+ case DIOCTL_GETMODEL:
+ rc = ata_copy_dk_ioc_string(arg, aidp->ai_model,
+ sizeof (aidp->ai_model), flag);
+ return (rc);
+
+ /* copy the model number into the caller's buffer */
+ case DIOCTL_GETSERIAL:
+ rc = ata_copy_dk_ioc_string(arg, aidp->ai_drvser,
+ sizeof (aidp->ai_drvser),
+ flag);
+ return (rc);
+
+ case DIOCTL_GETWCE:
+ /*
+ * WCE is only supported in ATAPI-4 or higher, for
+ * lower rev devices, must assume write cache is
+ * enabled.
+ * NOTE: Since there is currently no Solaris mechanism
+ * to change the state of the Write Cache Enable feature,
+ * this code just checks the value of the WCE bit
+ * obtained at device init time. If a mechanism
+ * is added to the driver to change WCE, this code
+ * must be updated appropriately.
+ */
+ *(int *)arg = (aidp->ai_majorversion == 0xffff) ||
+ ((aidp->ai_majorversion & ATAC_MAJVER_4) == 0) ||
+ (aidp->ai_features85 & ATAC_FEATURES85_WCE) != 0;
+ return (0);
+
+ case DCMD_GET_STATE:
+ rc = ata_queue_cmd(ata_disk_state, NULL, ata_ctlp, ata_drvp,
+ gtgtp);
+ break;
+
+ case DCMD_LOCK:
+ case DKIOCLOCK:
+ rc = ata_queue_cmd(ata_disk_lock, NULL, ata_ctlp, ata_drvp,
+ gtgtp);
+ break;
+
+ case DCMD_UNLOCK:
+ case DKIOCUNLOCK:
+ rc = ata_queue_cmd(ata_disk_unlock, NULL, ata_ctlp, ata_drvp,
+ gtgtp);
+ break;
+
+ case DCMD_START_MOTOR:
+ case CDROMSTART:
+ rc = ata_queue_cmd(ata_disk_recalibrate, NULL, ata_ctlp,
+ ata_drvp, gtgtp);
+ break;
+
+ case DCMD_STOP_MOTOR:
+ case CDROMSTOP:
+ rc = ata_queue_cmd(ata_disk_standby, NULL, ata_ctlp, ata_drvp,
+ gtgtp);
+ break;
+
+ case DKIOCEJECT:
+ case CDROMEJECT:
+ rc = ata_queue_cmd(ata_disk_eject, NULL, ata_ctlp, ata_drvp,
+ gtgtp);
+ break;
+
+ default:
+ ADBG_WARN(("ata_disk_ioctl: unsupported cmd 0x%x\n", cmd));
+ return (ENOTTY);
+ }
+
+ if (rc)
+ return (0);
+ return (ENXIO);
+
+}
+
+
+#ifdef ___not___used___
+/*
+ * Issue an ATA command to the drive using the packet already
+ * allocated by the target driver
+ */
+
+int
+ata_disk_do_ioctl(
+ int (*func)(ata_ctl_t *, ata_drv_t *, ata_pkt_t *),
+ void *arg,
+ ata_ctl_t *ata_ctlp,
+ gtgt_t *gtgtp,
+ cmpkt_t *pktp)
+{
+ gcmd_t *gcmdp = CPKT2GCMD(pktp);
+ ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
+ int rc;
+
+ ata_pktp->ap_start = func;
+ ata_pktp->ap_intr = NULL;
+ ata_pktp->ap_complete = NULL;
+ ata_pktp->ap_v_addr = (caddr_t)arg;
+
+ /*
+ * add it to the queue, when it gets to the front the
+ * ap_start function is called.
+ */
+ rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
+ 0, TRUE, NULL);
+
+ if (rc != TRAN_ACCEPT) {
+ /* this should never, ever happen */
+ return (ENXIO);
+ }
+
+ if (ata_pktp->ap_flags & AP_ERROR)
+ return (ENXIO);
+ return (0);
+}
+#endif
+
+
+
+/*
+ *
+ * DADA pktalloc entry point
+ *
+ */
+
+/* ARGSUSED */
+static cmpkt_t *
+ata_disk_pktalloc(opaque_t ctl_data, int (*callback)(caddr_t), caddr_t arg)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ cmpkt_t *pktp;
+ ata_pkt_t *ata_pktp;
+ gcmd_t *gcmdp;
+
+ ADBG_TRACE(("ata_disk_pktalloc entered\n"));
+
+ /*
+ * Allocate and init the GHD gcmd_t structure and the
+ * DADA cmpkt and the ata_pkt
+ */
+ if ((gcmdp = ghd_gcmd_alloc(gtgtp,
+ (sizeof (cmpkt_t) + sizeof (ata_pkt_t)),
+ (callback == DDI_DMA_SLEEP))) == NULL) {
+ return ((cmpkt_t *)NULL);
+ }
+ ASSERT(gcmdp != NULL);
+
+ ata_pktp = GCMD2APKT(gcmdp);
+ ASSERT(ata_pktp != NULL);
+
+ pktp = (cmpkt_t *)(ata_pktp + 1);
+
+ pktp->cp_ctl_private = (void *)gcmdp;
+ ata_pktp->ap_gcmdp = gcmdp;
+ gcmdp->cmd_pktp = (void *)pktp;
+
+ /*
+ * At this point the structures are linked like this:
+ *
+ * (struct cmpkt) <--> (struct gcmd) <--> (struct ata_pkt)
+ */
+
+ /* callback functions */
+
+ ata_pktp->ap_start = ata_disk_start;
+ ata_pktp->ap_intr = ata_disk_intr;
+ ata_pktp->ap_complete = ata_disk_complete;
+
+ /* other ata_pkt setup */
+
+ ata_pktp->ap_bytes_per_block = ata_drvp->ad_bytes_per_block;
+
+ /* cmpkt setup */
+
+ pktp->cp_cdblen = 1;
+ pktp->cp_cdbp = (opaque_t)&ata_pktp->ap_cdb;
+ pktp->cp_scbp = (opaque_t)&ata_pktp->ap_scb;
+ pktp->cp_scblen = 1;
+
+ return (pktp);
+}
+
+
+
+/*
+ *
+ * DADA pktfree entry point
+ *
+ */
+
+/* ARGSUSED */
+static void
+ata_disk_pktfree(opaque_t ctl_data, cmpkt_t *pktp)
+{
+ ata_pkt_t *ata_pktp = CPKT2APKT(pktp);
+
+ ADBG_TRACE(("ata_disk_pktfree entered\n"));
+
+ /* check not free already */
+
+ ASSERT(!(ata_pktp->ap_flags & AP_FREE));
+ ata_pktp->ap_flags = AP_FREE;
+
+ ghd_gcmd_free(CPKT2GCMD(pktp));
+}
+
+
+/*
+ *
+ * DADA memsetup entry point
+ *
+ */
+
+/* ARGSUSED */
+static cmpkt_t *
+ata_disk_memsetup(
+ opaque_t ctl_data,
+ cmpkt_t *pktp,
+ struct buf *bp,
+ int (*callback)(caddr_t),
+ caddr_t arg)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_pkt_t *ata_pktp = CPKT2APKT(pktp);
+ gcmd_t *gcmdp = APKT2GCMD(ata_pktp);
+ int flags;
+
+ ADBG_TRACE(("ata_disk_memsetup entered\n"));
+
+ ata_pktp->ap_sg_cnt = 0;
+
+ if (bp->b_bcount == 0) {
+ ata_pktp->ap_v_addr = NULL;
+ return (pktp);
+ }
+
+ if (GTGTP2ATADRVP(gtgtp)->ad_pciide_dma != ATA_DMA_ON)
+ goto skip_dma_setup;
+
+ if (ata_dma_disabled)
+ goto skip_dma_setup;
+
+ /*
+ * The PCI-IDE DMA engine is brain-damaged and can't
+ * DMA non-aligned buffers.
+ */
+ if (!(bp->b_flags & B_PAGEIO) &&
+ ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
+ goto skip_dma_setup;
+ }
+
+ /*
+ * It also insists that the byte count must be even.
+ */
+ if (bp->b_bcount & 1)
+ goto skip_dma_setup;
+
+ /* check direction for data transfer */
+ if (bp->b_flags & B_READ) {
+ flags = DDI_DMA_READ | DDI_DMA_PARTIAL;
+ } else {
+ flags = DDI_DMA_WRITE | DDI_DMA_PARTIAL;
+ }
+
+ /*
+ * Bind the DMA handle to the buf
+ */
+ if (ghd_dma_buf_bind_attr(&GTGTP2ATAP(gtgtp)->ac_ccc, gcmdp, bp, flags,
+ callback, arg, &GTGTP2ATATGTP(gtgtp)->at_dma_attr)) {
+ ata_pktp->ap_v_addr = 0;
+ return (pktp);
+ }
+
+skip_dma_setup:
+ bp_mapin(bp);
+ ata_pktp->ap_v_addr = bp->b_un.b_addr;
+ return (pktp);
+}
+
+
+
+/*
+ *
+ * DADA memfree entry point
+ *
+ */
+
+/*
+ * 1157317 sez that drivers shouldn't call bp_mapout(), as either
+ * biodone() or biowait() will end up doing it, but after they
+ * call bp->b_iodone(), which is a necessary sequence for
+ * Online Disk Suite. However, the DDI group wants to rethink
+ * bp_mapin()/bp_mapout() and how they should behave in the
+ * presence of layered drivers, etc. For the moment, fix
+ * the OLDS problem by removing the bp_mapout() call.
+ */
+
+#define BUG_1157317
+
+/* ARGSUSED */
+static void
+ata_disk_memfree(opaque_t ctl_data, cmpkt_t *pktp)
+{
+ gcmd_t *gcmdp = CPKT2GCMD(pktp);
+
+ ADBG_TRACE(("ata_disk_memfree entered\n"));
+
+ if (gcmdp->cmd_dma_handle)
+ ghd_dmafree_attr(gcmdp);
+#if !defined(BUG_1157317)
+ else
+ bp_mapout(pktp->cp_bp);
+#endif
+}
+
+
+
+/*
+ *
+ * DADA iosetup entry point
+ *
+ */
+
+static cmpkt_t *
+ata_disk_iosetup(opaque_t ctl_data, cmpkt_t *pktp)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ ata_pkt_t *ata_pktp = CPKT2APKT(pktp);
+ gcmd_t *gcmdp = APKT2GCMD(ata_pktp);
+ uint_t sec_count;
+ daddr_t start_sec;
+ uint_t byte_count;
+
+ ADBG_TRACE(("ata_disk_iosetup entered\n"));
+
+ /*
+ * Check for DCMD_FLUSH_CACHE (which does no I/O) and
+ * just do basic setup.
+ */
+ if (pktp->cp_passthru == NULL &&
+ ata_pktp->ap_cdb == DCMD_FLUSH_CACHE) {
+ ata_pktp->ap_cmd = ATC_FLUSH_CACHE;
+ ata_pktp->ap_flags = 0;
+ ata_pktp->ap_count = 0;
+ ata_pktp->ap_startsec = 0;
+ ata_pktp->ap_sg_cnt = 0;
+ ata_pktp->ap_pciide_dma = FALSE;
+ return (pktp);
+ }
+
+ /* check for error retry */
+ if (ata_pktp->ap_flags & AP_ERROR) {
+ /*
+ * this is a temporary work-around for dadk calling
+ * iosetup for retry. The correct
+ * solution is changing dadk to not to call iosetup
+ * for a retry.
+ * We do not apply the work-around for pio mode since
+ * that does not involve moving dma windows and reducing the
+ * sector count would work for pio mode on a retry
+ * for now.
+ */
+ if (gcmdp->cmd_dma_handle != NULL) {
+ ata_pktp->ap_flags = 0;
+ return (NULL);
+ }
+
+ ata_pktp->ap_bytes_per_block = NBPSCTR;
+ sec_count = 1;
+
+ /*
+ * Since we are retrying the last read or write operation,
+ * restore the old values of the ap_v_addr and ap_resid.
+ * This assumes CTL_IOSETUP is called again on retry; if not,
+ * this needs to be done in CTL_TRANSPORT.
+ */
+ if (ata_pktp->ap_flags & (AP_READ | AP_WRITE)) {
+ ata_pktp->ap_v_addr = ata_pktp->ap_v_addr_sav;
+ ata_pktp->ap_resid = ata_pktp->ap_resid_sav;
+ }
+ } else {
+ /*
+ * Limit request to ac_max_transfer sectors.
+ * The value is specified by the user in the
+ * max_transfer property. It must be in the range 1 to 256.
+ * When max_transfer is 0x100 it is bigger than 8 bits.
+ * The spec says 0 represents 256 so it should be OK.
+ */
+ sec_count = min((pktp->cp_bytexfer >> SCTRSHFT),
+ ata_drvp->ad_ctlp->ac_max_transfer);
+ /*
+ * Save the current values of ap_v_addr and ap_resid
+ * in case a retry operation happens. During a retry
+ * operation we need to restore these values.
+ */
+ ata_pktp->ap_v_addr_sav = ata_pktp->ap_v_addr;
+ ata_pktp->ap_resid_sav = ata_pktp->ap_resid;
+ }
+
+ /* reset flags */
+ ata_pktp->ap_flags = 0;
+
+#ifdef DADKIO_RWCMD_READ
+ start_sec = pktp->cp_passthru ? RWCMDP(pktp)->blkaddr : pktp->cp_srtsec;
+#else
+ start_sec = pktp->cp_srtsec;
+#endif
+
+ /*
+ * Setup the PCIDE Bus Master Scatter/Gather list
+ */
+ ata_pktp->ap_sg_cnt = 0;
+ ata_pktp->ap_pciide_dma = FALSE;
+ if (gcmdp->cmd_dma_handle != NULL && sec_count != 0) {
+ byte_count = sec_count << SCTRSHFT;
+ if ((ghd_dmaget_attr(&GTGTP2ATAP(gtgtp)->ac_ccc, gcmdp,
+ byte_count, ATA_DMA_NSEGS, &byte_count) == FALSE) ||
+ (byte_count == 0)) {
+ ADBG_ERROR(("ata_disk_iosetup: byte count zero\n"));
+ return (NULL);
+ }
+ sec_count = byte_count >> SCTRSHFT;
+ }
+
+ /*
+ * In the non-48-bit mode addressing (CHS and LBA28) the sector
+ * count is a 8-bit value and the sector count 0 represents 256
+ * sectors.
+ * In the extended addressing (LBA48) the sector count is a 16-bit
+ * value, so max_transfer 0x100 cannot be truncated to 8-bits
+ * because this would represent a zero sector count.
+ */
+ ata_pktp->ap_count = sec_count;
+ if (!(ata_drvp->ad_flags & AD_EXT48)) {
+ ata_pktp->ap_count &= 0xff;
+ }
+ ata_pktp->ap_startsec = start_sec;
+
+#ifdef DADKIO_RWCMD_READ
+ if (pktp->cp_passthru) {
+ switch (RWCMDP(pktp)->cmd) {
+ case DADKIO_RWCMD_READ:
+ if (ata_pktp->ap_sg_cnt) {
+ ata_pktp->ap_cmd = ATC_READ_DMA;
+ ata_pktp->ap_pciide_dma = TRUE;
+ ata_pktp->ap_start = ata_disk_start_dma_in;
+ ata_pktp->ap_intr = ata_disk_intr_dma;
+ } else {
+ ata_pktp->ap_cmd = ATC_RDSEC;
+ ata_pktp->ap_start = ata_disk_start_pio_in;
+ ata_pktp->ap_intr = ata_disk_intr_pio_in;
+ }
+ ata_pktp->ap_flags |= AP_READ;
+ break;
+ case DADKIO_RWCMD_WRITE:
+ if (ata_pktp->ap_sg_cnt) {
+ ata_pktp->ap_cmd = ATC_WRITE_DMA;
+ ata_pktp->ap_pciide_dma = TRUE;
+ ata_pktp->ap_start = ata_disk_start_dma_out;
+ ata_pktp->ap_intr = ata_disk_intr_dma;
+ } else {
+ ata_pktp->ap_cmd = ATC_WRSEC;
+ ata_pktp->ap_start = ata_disk_start_pio_out;
+ ata_pktp->ap_intr = ata_disk_intr_pio_out;
+ }
+ ata_pktp->ap_flags |= AP_WRITE;
+ break;
+ }
+
+ byte_count = RWCMDP(pktp)->buflen;
+ pktp->cp_bytexfer = byte_count;
+ pktp->cp_resid = byte_count;
+ ata_pktp->ap_resid = byte_count;
+
+ /*
+ * since we're not using READ/WRITE MULTIPLE, we
+ * should set bytes_per_block to one sector
+ * XXX- why wasn't this in the old driver??
+ */
+ ata_pktp->ap_bytes_per_block = NBPSCTR;
+ } else
+#endif
+ {
+ byte_count = sec_count << SCTRSHFT;
+ pktp->cp_bytexfer = byte_count;
+ pktp->cp_resid = byte_count;
+ ata_pktp->ap_resid = byte_count;
+
+ /* setup the task file registers */
+
+ switch (ata_pktp->ap_cdb) {
+ case DCMD_READ:
+ if (ata_pktp->ap_sg_cnt) {
+ ata_pktp->ap_cmd = ATC_READ_DMA;
+ ata_pktp->ap_pciide_dma = TRUE;
+ ata_pktp->ap_start = ata_disk_start_dma_in;
+ ata_pktp->ap_intr = ata_disk_intr_dma;
+ } else {
+ ata_pktp->ap_cmd = ata_drvp->ad_rd_cmd;
+ ata_pktp->ap_start = ata_disk_start_pio_in;
+ ata_pktp->ap_intr = ata_disk_intr_pio_in;
+ }
+ ata_pktp->ap_flags |= AP_READ;
+ break;
+
+ case DCMD_WRITE:
+ if (ata_pktp->ap_sg_cnt) {
+ ata_pktp->ap_cmd = ATC_WRITE_DMA;
+ ata_pktp->ap_pciide_dma = TRUE;
+ ata_pktp->ap_start = ata_disk_start_dma_out;
+ ata_pktp->ap_intr = ata_disk_intr_dma;
+ } else {
+ ata_pktp->ap_cmd = ata_drvp->ad_wr_cmd;
+ ata_pktp->ap_start = ata_disk_start_pio_out;
+ ata_pktp->ap_intr = ata_disk_intr_pio_out;
+ }
+ ata_pktp->ap_flags |= AP_WRITE;
+ break;
+
+ default:
+ ADBG_WARN(("ata_disk_iosetup: unknown command 0x%x\n",
+ ata_pktp->ap_cdb));
+ pktp = NULL;
+ break;
+ }
+ }
+
+ /* If 48-bit mode is used, convert command to 48-bit mode cmd */
+ if (pktp != NULL && ata_drvp->ad_flags & AD_EXT48) {
+ switch (ata_pktp->ap_cmd) {
+ case ATC_RDSEC:
+ ata_pktp->ap_cmd = ATC_RDSEC_EXT;
+ break;
+ case ATC_WRSEC:
+ ata_pktp->ap_cmd = ATC_WRSEC_EXT;
+ break;
+ case ATC_RDMULT:
+ ata_pktp->ap_cmd = ATC_RDMULT_EXT;
+ break;
+ case ATC_WRMULT:
+ ata_pktp->ap_cmd = ATC_WRMULT_EXT;
+ break;
+ case ATC_READ_DMA:
+ ata_pktp->ap_cmd = ATC_RDDMA_EXT;
+ break;
+ case ATC_WRITE_DMA:
+ ata_pktp->ap_cmd = ATC_WRDMA_EXT;
+ break;
+ }
+ }
+
+ return (pktp);
+}
+
+
+
+/*
+ *
+ * DADA transport entry point
+ *
+ */
+
+static int
+ata_disk_transport(opaque_t ctl_data, cmpkt_t *pktp)
+{
+ gtgt_t *gtgtp = (gtgt_t *)ctl_data;
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ ata_pkt_t *ata_pktp = CPKT2APKT(pktp);
+ int rc;
+ int polled = FALSE;
+
+ ADBG_TRACE(("ata_disk_transport entered\n"));
+
+ /* check for polling pkt */
+
+ if (pktp->cp_flags & CPF_NOINTR) {
+ polled = TRUE;
+ }
+
+ /* call ghd transport routine */
+
+ rc = ghd_transport(&ata_ctlp->ac_ccc, APKT2GCMD(ata_pktp),
+ gtgtp, pktp->cp_time, polled, NULL);
+
+ /* see if pkt was not accepted */
+
+ if (rc == TRAN_BUSY)
+ return (CTL_SEND_BUSY);
+
+ if (rc == TRAN_ACCEPT)
+ return (CTL_SEND_SUCCESS);
+
+ return (CTL_SEND_FAILURE);
+}
+
+
+/*
+ *
+ * routines to load the cylinder/head/sector/count
+ * task file registers.
+ *
+ */
+static void
+ata_disk_load_regs_lba28(ata_pkt_t *ata_pktp, ata_drv_t *ata_drvp)
+{
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ uint_t lba; /* LBA of first sector */
+
+ lba = ata_pktp->ap_startsec;
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_count,
+ ata_pktp->ap_count);
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, lba);
+ lba >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, lba);
+ lba >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, lba);
+ lba >>= 8;
+ /*
+ * dev/head register can use only 4 bits
+ * must also include drive selector.
+ */
+ lba = (lba & 0xf) | ata_drvp->ad_drive_bits;
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, lba);
+}
+
+/*
+ * In 48-bit extended mode, the sector count is 16 bits wide, and the
+ * LBA is 48 bits wide, as follows:
+ * register most recent previous
+ * name value value
+ * -------- ---------- ---------
+ * sector cnt count(7:0) count(15:8)
+ * sector num lba(7:0) lba(31:24)
+ * cyl low lba(15:8) lba(39:32)
+ * cyl hi lba(23:16) lba(47:40)
+ * device/head 111D0000 N/A
+ * ^ ^
+ * | |
+ * | +-- drive number
+ * |
+ * +-- indicates LBA
+ * The other two 1 bits are historical and are not used in 48bit
+ * extended mode.
+ */
+/*
+ * WARNING:
+ * dada framework passes starting sector as daddr_t type, thus
+ * limiting reachable disk space in 32-bit x86 architecture to 1 terabyte.
+ * Therefore high 16 bits of the 48-bits address can be and
+ * are currently ignored.
+ */
+static void
+ata_disk_load_regs_lba48(ata_pkt_t *ata_pktp, ata_drv_t *ata_drvp)
+{
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ uint16_t seccnt; /* 16-bit sector count */
+ uint_t lbalow; /* low-order 24 bits of LBA */
+ uint_t lbahi; /* high-order 24 bits of LBA */
+
+ seccnt = ata_pktp->ap_count;
+ /* high-order 8 bits of lbalow never get used */
+ lbalow = ata_pktp->ap_startsec;
+ lbahi = ata_pktp->ap_startsec >> 24;
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, seccnt >> 8);
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, seccnt);
+ /* Send the high-order half first */
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, lbahi);
+ lbahi >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, lbahi);
+ lbahi >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, lbahi);
+ /* Send the low-order half */
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, lbalow);
+ lbalow >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, lbalow);
+ lbalow >>= 8;
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, lbalow);
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd,
+ ata_drvp->ad_drive_bits);
+}
+
+static void
+ata_disk_load_regs_chs(ata_pkt_t *ata_pktp, ata_drv_t *ata_drvp)
+{
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ uint_t resid;
+ uint_t cyl;
+ uchar_t head;
+ uchar_t drvheads;
+ uchar_t drvsectors;
+
+ drvheads = ata_drvp->ad_phhd;
+ drvsectors = ata_drvp->ad_phsec;
+
+ resid = ata_pktp->ap_startsec / drvsectors;
+ head = (resid % drvheads) & 0xf;
+ cyl = resid / drvheads;
+ /* automatically truncate to char */
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect,
+ (ata_pktp->ap_startsec % drvsectors) + 1);
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, ata_pktp->ap_count);
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, (cyl >> 8));
+ /* lcyl gets truncated to 8 bits */
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, cyl);
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd,
+ ata_drvp->ad_drive_bits | head);
+}
+
+
+/*
+ *
+ * packet start callback routines
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_start_common(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+
+ ADBG_TRACE(("ata_disk_start_common entered\n"));
+
+ ADBG_TRANSPORT(("ata_disk_start:\tpkt = 0x%p, pkt flags = 0x%x\n",
+ ata_pktp, ata_pktp->ap_flags));
+ ADBG_TRANSPORT(("\tcommand=0x%x, sect=0x%lx\n",
+ ata_pktp->ap_cmd, ata_pktp->ap_startsec));
+ ADBG_TRANSPORT(("\tcount=0x%x, drvhd = 0x%x\n",
+ ata_pktp->ap_count, ata_drvp->ad_drive_bits));
+
+ /*
+ * If AC_BSY_WAIT is set, wait for controller to not be busy,
+ * before issuing a command. If AC_BSY_WAIT is not set,
+ * skip the wait. This is important for laptops that do
+ * suspend/resume but do not correctly wait for the busy bit to
+ * drop after a resume.
+ *
+ * NOTE: this test for ATS_BSY is also needed if/when we
+ * implement the overlapped/queued command protocols. Currently,
+ * the overlap/queued feature is not supported so the test is
+ * conditional.
+ */
+ if (ata_ctlp->ac_timing_flags & AC_BSY_WAIT) {
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ 0, ATS_BSY, 5000000)) {
+ ADBG_ERROR(("ata_disk_start: BUSY\n"));
+ return (FALSE);
+ }
+ }
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * make certain the drive selected
+ */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRDY, ATS_BSY, 5 * 1000000)) {
+ ADBG_ERROR(("ata_disk_start: select failed\n"));
+ return (FALSE);
+ }
+
+ /*
+ * We use different methods for loading the task file
+ * registers, depending on whether the disk
+ * uses LBA or CHS addressing and whether 48-bit
+ * extended addressing is to be used.
+ */
+ if (!(ata_drvp->ad_drive_bits & ATDH_LBA))
+ ata_disk_load_regs_chs(ata_pktp, ata_drvp);
+ else if (ata_drvp->ad_flags & AD_EXT48)
+ ata_disk_load_regs_lba48(ata_pktp, ata_drvp);
+ else
+ ata_disk_load_regs_lba28(ata_pktp, ata_drvp);
+ ddi_put8(io_hdl1, ata_ctlp->ac_feature, 0);
+
+ /*
+ * Always make certain interrupts are enabled. It's been reported
+ * (but not confirmed) that some notebook computers don't
+ * clear the interrupt disable bit after being resumed. The
+ * easiest way to fix this is to always clear the disable bit
+ * before every command.
+ */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, ATDC_D3);
+ return (TRUE);
+}
+
+
+/*
+ *
+ * Start a non-data ATA command (not DMA and not PIO):
+ *
+ */
+
+static int
+ata_disk_start(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int rc;
+
+ rc = ata_disk_start_common(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (!rc)
+ return (ATA_FSM_RC_BUSY);
+
+ /*
+ * This next one sets the controller in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+
+static int
+ata_disk_start_dma_in(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int rc;
+
+ rc = ata_disk_start_common(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (!rc)
+ return (ATA_FSM_RC_BUSY);
+
+ /*
+ * Copy the Scatter/Gather list to the controller's
+ * Physical Region Descriptor Table
+ */
+ ata_pciide_dma_setup(ata_ctlp, ata_pktp->ap_sg_list,
+ ata_pktp->ap_sg_cnt);
+
+ /*
+ * reset the PCIIDE Controller's interrupt and error status bits
+ */
+ (void) ata_pciide_status_clear(ata_ctlp);
+
+ /*
+ * This next one sets the drive in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the drive's busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ ata_pciide_dma_start(ata_ctlp, PCIIDE_BMICX_RWCON_WRITE_TO_MEMORY);
+
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+
+static int
+ata_disk_start_dma_out(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int rc;
+
+ rc = ata_disk_start_common(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (!rc)
+ return (ATA_FSM_RC_BUSY);
+
+ /*
+ * Copy the Scatter/Gather list to the controller's
+ * Physical Region Descriptor Table
+ */
+ ata_pciide_dma_setup(ata_ctlp, ata_pktp->ap_sg_list,
+ ata_pktp->ap_sg_cnt);
+
+ /*
+ * reset the PCIIDE Controller's interrupt and error status bits
+ */
+ (void) ata_pciide_status_clear(ata_ctlp);
+
+ /*
+ * This next one sets the drive in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the drive's busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ ata_pciide_dma_start(ata_ctlp, PCIIDE_BMICX_RWCON_READ_FROM_MEMORY);
+
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+
+
+
+/*
+ *
+ * Start a PIO data-in ATA command:
+ *
+ */
+
+static int
+ata_disk_start_pio_in(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int rc;
+
+ rc = ata_disk_start_common(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (!rc)
+ return (ATA_FSM_RC_BUSY);
+ /*
+ * This next one sets the controller in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+
+
+/*
+ *
+ * Start a PIO data-out ATA command:
+ *
+ */
+
+static int
+ata_disk_start_pio_out(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int rc;
+
+ ata_pktp->ap_wrt_count = 0;
+
+ rc = ata_disk_start_common(ata_ctlp, ata_drvp, ata_pktp);
+
+ if (!rc)
+ return (ATA_FSM_RC_BUSY);
+ /*
+ * This next one sets the controller in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * Wait for the drive to assert DRQ to send the first chunk
+ * of data. Have to busy wait because there's no interrupt for
+ * the first chunk. This sucks (a lot of cycles) if the
+ * drive responds too slowly or if the wait loop granularity
+ * is too large. It's really bad if the drive is defective and
+ * the loop times out.
+ */
+
+ if (!ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRQ, ATS_BSY, /* okay */
+ ATS_ERR, ATS_BSY, /* cmd failed */
+ ATS_DF, ATS_BSY, /* drive failed */
+ 4000000)) {
+ ADBG_WARN(("ata_disk_start_pio_out: no DRQ\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_INTR);
+ }
+
+ /*
+ * Tell the upper layer to fake a hardware interrupt which
+ * actually causes the first segment to be written to the drive.
+ */
+ return (ATA_FSM_RC_INTR);
+}
+
+
+
+/*
+ *
+ * packet complete callback routine
+ *
+ */
+
+static void
+ata_disk_complete(
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp,
+ int do_callback)
+{
+ struct ata_id *aidp = &ata_drvp->ad_id;
+ cmpkt_t *pktp;
+
+ ADBG_TRACE(("ata_disk_complete entered\n"));
+ ADBG_TRANSPORT(("ata_disk_complete: pkt = 0x%p\n", ata_pktp));
+
+ pktp = APKT2CPKT(ata_pktp);
+
+ /* update resid */
+
+ pktp->cp_resid = ata_pktp->ap_resid;
+
+ if (ata_pktp->ap_flags & AP_ERROR) {
+
+ pktp->cp_reason = CPS_CHKERR;
+
+ if (ata_pktp->ap_error & ATE_BBK_ICRC) {
+ if (IS_ATA_VERSION_GE(aidp, 4))
+ ata_pktp->ap_scb = DERR_ICRC;
+ else
+ ata_pktp->ap_scb = DERR_BBK;
+ } else if (ata_pktp->ap_error & ATE_UNC)
+ ata_pktp->ap_scb = DERR_UNC;
+ else if (ata_pktp->ap_error & ATE_IDNF)
+ ata_pktp->ap_scb = DERR_IDNF;
+ else if (ata_pktp->ap_error & ATE_TKONF)
+ ata_pktp->ap_scb = DERR_TKONF;
+ else if (ata_pktp->ap_error & ATE_AMNF)
+ ata_pktp->ap_scb = DERR_AMNF;
+ else if (ata_pktp->ap_status & ATS_BSY)
+ ata_pktp->ap_scb = DERR_BUSY;
+ else if (ata_pktp->ap_status & ATS_DF)
+ ata_pktp->ap_scb = DERR_DWF;
+ else /* any unknown error */
+ ata_pktp->ap_scb = DERR_ABORT;
+ } else if (ata_pktp->ap_flags &
+ (AP_ABORT|AP_TIMEOUT|AP_BUS_RESET)) {
+
+ pktp->cp_reason = CPS_CHKERR;
+ ata_pktp->ap_scb = DERR_ABORT;
+ } else {
+ pktp->cp_reason = CPS_SUCCESS;
+ ata_pktp->ap_scb = DERR_SUCCESS;
+ }
+
+ /* callback */
+ if (do_callback)
+ (*pktp->cp_callback)(pktp);
+}
+
+
+/*
+ *
+ * Interrupt callbacks
+ *
+ */
+
+
+/*
+ *
+ * ATA command, no data
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_intr(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ uchar_t status;
+
+ ADBG_TRACE(("ata_disk_intr entered\n"));
+ ADBG_TRANSPORT(("ata_disk_intr: pkt = 0x%p\n", ata_pktp));
+
+ status = ata_get_status_clear_intr(ata_ctlp, ata_pktp);
+
+ ASSERT((status & (ATS_BSY | ATS_DRQ)) == 0);
+
+ /*
+ * check for errors
+ */
+
+ if (status & (ATS_DF | ATS_ERR)) {
+ ADBG_WARN(("ata_disk_intr: status 0x%x error 0x%x\n", status,
+ ddi_get8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_error)));
+ ata_pktp->ap_flags |= AP_ERROR;
+ }
+
+ if (ata_pktp->ap_flags & AP_ERROR) {
+ ata_pktp->ap_status = ddi_get8(ata_ctlp->ac_iohandle2,
+ ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(ata_ctlp->ac_iohandle1,
+ ata_ctlp->ac_error);
+ }
+
+ /* tell the upper layer this request is complete */
+ return (ATA_FSM_RC_FINI);
+}
+
+
+/*
+ *
+ * ATA command, PIO data in
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_intr_pio_in(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ uchar_t status;
+
+ ADBG_TRACE(("ata_disk_pio_in entered\n"));
+ ADBG_TRANSPORT(("ata_disk_pio_in: pkt = 0x%p\n", ata_pktp));
+
+ /*
+ * first make certain DRQ is asserted (and no errors)
+ */
+ (void) ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRQ, ATS_BSY, ATS_ERR, ATS_BSY, ATS_DF, ATS_BSY,
+ 4000000);
+
+ status = ata_get_status_clear_intr(ata_ctlp, ata_pktp);
+
+ if (status & ATS_BSY) {
+ ADBG_WARN(("ata_disk_pio_in: BUSY\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /*
+ * record any errors
+ */
+ if ((status & (ATS_DRQ | ATS_DF | ATS_ERR)) != ATS_DRQ) {
+ ADBG_WARN(("ata_disk_pio_in: status 0x%x error 0x%x\n",
+ status, ddi_get8(io_hdl1, ata_ctlp->ac_error)));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ }
+
+ /*
+ * read the next chunk of data (if any)
+ */
+ if (status & ATS_DRQ) {
+ ata_disk_pio_xfer_data_in(ata_ctlp, ata_pktp);
+ }
+
+ /*
+ * If that was the last chunk, wait for the device to clear DRQ
+ */
+ if (ata_pktp->ap_resid == 0) {
+ if (ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ 0, (ATS_DRQ | ATS_BSY), 4000000)) {
+ /* tell the upper layer this request is complete */
+ return (ATA_FSM_RC_FINI);
+ }
+
+ ADBG_WARN(("ata_disk_pio_in: DRQ stuck\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ }
+
+ /*
+ * check for errors
+ */
+ if (ata_pktp->ap_flags & AP_ERROR) {
+ return (ATA_FSM_RC_FINI);
+ }
+
+ /*
+ * If the read command isn't done yet,
+ * wait for the next interrupt.
+ */
+ ADBG_TRACE(("ata_disk_pio_in: partial\n"));
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+
+/*
+ *
+ * ATA command, PIO data out
+ *
+ */
+
+/* ARGSUSED */
+static int
+ata_disk_intr_pio_out(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int tmp_count = ata_pktp->ap_wrt_count;
+ uchar_t status;
+
+ /*
+ * clear the IRQ
+ */
+ status = ata_get_status_clear_intr(ata_ctlp, ata_pktp);
+
+ ADBG_TRACE(("ata_disk_intr_pio_out entered\n"));
+ ADBG_TRANSPORT(("ata_disk_intr_pio_out: pkt = 0x%p\n", ata_pktp));
+
+ ASSERT(!(status & ATS_BSY));
+
+
+ /*
+ * check for errors
+ */
+
+ if (status & (ATS_DF | ATS_ERR)) {
+ ADBG_WARN(("ata_disk_intr_pio_out: status 0x%x error 0x%x\n",
+ status, ddi_get8(io_hdl1, ata_ctlp->ac_error)));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ /* tell the upper layer this request is complete */
+ return (ATA_FSM_RC_FINI);
+ }
+
+
+ /*
+ * last write was okay, bump the ptr and
+ * decr the resid count
+ */
+ ata_pktp->ap_v_addr += tmp_count;
+ ata_pktp->ap_resid -= tmp_count;
+
+ /*
+ * check for final interrupt on write command
+ */
+ if (ata_pktp->ap_resid <= 0) {
+ /* tell the upper layer this request is complete */
+ return (ATA_FSM_RC_FINI);
+ }
+
+ /*
+ * Perform the next data transfer
+ *
+ * First make certain DRQ is asserted and no error status.
+ * (I'm not certain but I think some drives might deassert BSY
+ * before asserting DRQ. This extra ata_wait3() will
+ * compensate for such drives).
+ *
+ */
+ (void) ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRQ, ATS_BSY, ATS_ERR, ATS_BSY, ATS_DF, ATS_BSY, 4000000);
+
+ status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+
+ if (status & ATS_BSY) {
+ /* this should never happen */
+ ADBG_WARN(("ata_disk_intr_pio_out: BUSY\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /*
+ * bailout if any errors
+ */
+ if ((status & (ATS_DRQ | ATS_DF | ATS_ERR)) != ATS_DRQ) {
+ ADBG_WARN(("ata_disk_pio_out: status 0x%x error 0x%x\n",
+ status, ddi_get8(io_hdl1, ata_ctlp->ac_error)));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ return (ATA_FSM_RC_FINI);
+ }
+
+ /*
+ * write the next chunk of data
+ */
+ ADBG_TRACE(("ata_disk_intr_pio_out: write xfer\n"));
+ ata_disk_pio_xfer_data_out(ata_ctlp, ata_pktp);
+
+ /*
+ * Wait for the next interrupt before checking the transfer
+ * status and adjusting the transfer count.
+ *
+ */
+ return (ATA_FSM_RC_OKAY);
+}
+
+
+/*
+ *
+ * ATA command, DMA data in/out
+ *
+ */
+
+static int
+ata_disk_intr_dma(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ uchar_t status;
+
+ ADBG_TRACE(("ata_disk_intr_dma entered\n"));
+ ADBG_TRANSPORT(("ata_disk_intr_dma: pkt = 0x%p\n", ata_pktp));
+
+ /*
+ * halt the DMA engine
+ */
+ ata_pciide_dma_stop(ata_ctlp);
+
+ /*
+ * wait for the device to clear DRQ
+ */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ 0, (ATS_DRQ | ATS_BSY), 4000000)) {
+ ADBG_WARN(("ata_disk_intr_dma: DRQ stuck\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /*
+ * get the status and clear the IRQ, and check for DMA error
+ */
+ status = ata_get_status_clear_intr(ata_ctlp, ata_pktp);
+
+ /*
+ * check for drive errors
+ */
+
+ if (status & (ATS_DF | ATS_ERR)) {
+ ADBG_WARN(("ata_disk_intr_dma: status 0x%x error 0x%x\n",
+ status, ddi_get8(io_hdl1, ata_ctlp->ac_error)));
+ ata_pktp->ap_flags |= AP_ERROR;
+ ata_pktp->ap_status = ddi_get8(io_hdl2, ata_ctlp->ac_altstatus);
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ }
+
+ /*
+ * If there was a drive or DMA error, compute a resid count
+ */
+ if (ata_pktp->ap_flags & AP_ERROR) {
+ /*
+ * grab the last sector address from the drive regs
+ * and use that to compute the resid
+ */
+ ata_disk_get_resid(ata_ctlp, ata_drvp, ata_pktp);
+ } else {
+ ata_pktp->ap_resid = 0;
+ }
+
+ /* tell the upper layer this request is complete */
+ return (ATA_FSM_RC_FINI);
+}
+
+
+/*
+ *
+ * Low level PIO routine that transfers data from the drive
+ *
+ */
+
+static void
+ata_disk_pio_xfer_data_in(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int count;
+
+ count = min(ata_pktp->ap_resid,
+ ata_pktp->ap_bytes_per_block);
+
+ ADBG_TRANSPORT(("ata_disk_pio_xfer_data_in: 0x%x bytes, addr = 0x%p\n",
+ count, ata_pktp->ap_v_addr));
+
+ /*
+ * read count bytes
+ */
+
+ ASSERT(count != 0);
+
+ ddi_rep_get16(io_hdl1, (ushort_t *)ata_pktp->ap_v_addr,
+ ata_ctlp->ac_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * this read command completed okay, bump the ptr and
+ * decr the resid count now.
+ */
+ ata_pktp->ap_v_addr += count;
+ ata_pktp->ap_resid -= count;
+}
+
+
+/*
+ *
+ * Low level PIO routine that transfers data to the drive
+ *
+ */
+
+static void
+ata_disk_pio_xfer_data_out(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int count;
+
+ count = min(ata_pktp->ap_resid,
+ ata_pktp->ap_bytes_per_block);
+
+ ADBG_TRANSPORT(("ata_disk_pio_xfer_data_out: 0x%x bytes, addr = 0x%p\n",
+ count, ata_pktp->ap_v_addr));
+
+ /*
+ * read or write count bytes
+ */
+
+ ASSERT(count != 0);
+
+ ddi_rep_put16(io_hdl1, (ushort_t *)ata_pktp->ap_v_addr,
+ ata_ctlp->ac_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * save the count here so I can correctly adjust
+ * the ap_v_addr and ap_resid values at the next
+ * interrupt.
+ */
+ ata_pktp->ap_wrt_count = count;
+}
+
+
+/*
+ *
+ * ATA Initialize Device Parameters (aka Set Params) command
+ *
+ * If the drive was put in some sort of CHS extended/logical geometry
+ * mode by the BIOS, this function will reset it to its "native"
+ * CHS geometry. This ensures that we don't run into any sort of
+ * 1024 cylinder (or 65535 cylinder) limitation that may have been
+ * created by a BIOS (or users) that chooses a bogus translated geometry.
+ */
+
+static int
+ata_disk_initialize_device_parameters(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+ int rc;
+
+#ifdef _SIMULATOR_SUPPORT
+ extern int simulator_run; /* running under simulator ? */
+#endif /* _SIMULATOR_SUPPORT */
+
+ rc = ata_command(ata_ctlp, ata_drvp, FALSE, FALSE,
+ ata_disk_init_dev_parm_wait,
+ ATC_SETPARAM,
+ 0, /* feature n/a */
+ ata_drvp->ad_phsec, /* max sector (1-based) */
+ 0, /* sector n/a */
+ (ata_drvp->ad_phhd -1), /* max head (0-based) */
+ 0, /* cyl_low n/a */
+ 0); /* cyl_hi n/a */
+
+#ifdef _SIMULATOR_SUPPORT
+ if (rc || simulator_run) {
+ return (TRUE);
+ }
+#else
+ if (rc) {
+ return (TRUE);
+ }
+#endif /* _SIMULATOR_SUPPORT */
+
+ ADBG_ERROR(("ata_init_dev_parms: failed\n"));
+ return (FALSE);
+}
+
+
+
+/*
+ *
+ * create fake inquiry data for DADA interface
+ *
+ */
+
+static void
+ata_disk_fake_inquiry(
+ ata_drv_t *ata_drvp)
+{
+ struct ata_id *ata_idp = &ata_drvp->ad_id;
+ struct scsi_inquiry *inqp = &ata_drvp->ad_inquiry;
+
+ ADBG_TRACE(("ata_disk_fake_inquiry entered\n"));
+
+ if (ata_idp->ai_config & ATA_ID_REM_DRV) /* ide removable bit */
+ inqp->inq_rmb = 1; /* scsi removable bit */
+
+ (void) strncpy(inqp->inq_vid, "Gen-ATA ", sizeof (inqp->inq_vid));
+ inqp->inq_dtype = DTYPE_DIRECT;
+ inqp->inq_qual = DPQ_POSSIBLE;
+
+ (void) strncpy(inqp->inq_pid, ata_idp->ai_model,
+ sizeof (inqp->inq_pid));
+ (void) strncpy(inqp->inq_revision, ata_idp->ai_fw,
+ sizeof (inqp->inq_revision));
+}
+
+#define LOOP_COUNT 10000
+
+
+/*
+ *
+ * ATA Set Multiple Mode
+ *
+ */
+
+static int
+ata_disk_set_multiple(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+ int rc;
+
+ rc = ata_command(ata_ctlp, ata_drvp, TRUE, FALSE,
+ ata_disk_set_mult_wait,
+ ATC_SETMULT,
+ 0, /* feature n/a */
+ ata_drvp->ad_block_factor, /* count */
+ 0, /* sector n/a */
+ 0, /* head n/a */
+ 0, /* cyl_low n/a */
+ 0); /* cyl_hi n/a */
+
+ if (rc) {
+ return (TRUE);
+ }
+
+ ADBG_ERROR(("ata_disk_set_multiple: failed\n"));
+ return (FALSE);
+}
+
+
+/*
+ *
+ * ATA Identify Device command
+ *
+ */
+
+int
+ata_disk_id(
+ ddi_acc_handle_t io_hdl1,
+ caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2,
+ caddr_t ioaddr2,
+ struct ata_id *ata_idp)
+{
+ int rc;
+
+ ADBG_TRACE(("ata_disk_id entered\n"));
+
+ rc = ata_id_common(ATC_ID_DEVICE, TRUE, io_hdl1, ioaddr1, io_hdl2,
+ ioaddr2, ata_idp);
+
+ if (!rc)
+ return (FALSE);
+
+ /*
+ * If the disk is a CF/Microdrive that works under ATA mode
+ * through CF<->ATA adapters, identify it as an ATA device
+ * and a non removable media.
+ */
+ if (ata_idp->ai_config == ATA_ID_COMPACT_FLASH) {
+ ata_idp->ai_config = ATA_ID_CF_TO_ATA;
+ }
+
+ if ((ata_idp->ai_config & ATAC_ATA_TYPE_MASK) != ATAC_ATA_TYPE)
+ return (FALSE);
+
+ if (ata_idp->ai_heads == 0 || ata_idp->ai_sectors == 0) {
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+static daddr_t
+ata_last_block_xferred_chs(ata_drv_t *ata_drvp)
+{
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ uchar_t drvheads = ata_drvp->ad_phhd;
+ uchar_t drvsectors = ata_drvp->ad_phsec;
+ uchar_t sector;
+ uchar_t head;
+ uchar_t low_cyl;
+ uchar_t hi_cyl;
+ daddr_t lbastop;
+
+ sector = ddi_get8(io_hdl1, ata_ctlp->ac_sect);
+ head = ddi_get8(io_hdl1, ata_ctlp->ac_drvhd) & 0xf;
+ low_cyl = ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+ hi_cyl = ddi_get8(io_hdl1, ata_ctlp->ac_hcyl);
+
+ lbastop = low_cyl;
+ lbastop |= (uint_t)hi_cyl << 8;
+ lbastop *= (uint_t)drvheads;
+ lbastop += (uint_t)head;
+ lbastop *= (uint_t)drvsectors;
+ lbastop += (uint_t)sector - 1;
+ return (lbastop);
+}
+
+static daddr_t
+ata_last_block_xferred_lba28(ata_ctl_t *ata_ctlp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ daddr_t lbastop;
+
+ lbastop = ddi_get8(io_hdl1, ata_ctlp->ac_drvhd) & 0xf;
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_hcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_sect);
+ return (lbastop);
+}
+
+static daddr_t
+ata_last_block_xferred_lba48(ata_ctl_t *ata_ctlp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ daddr_t lbastop;
+
+ /* turn on HOB and read the high-order 24 bits */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, (ATDC_D3 | ATDC_HOB));
+ lbastop = ddi_get8(io_hdl1, ata_ctlp->ac_hcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_sect);
+ lbastop <<= 8;
+
+ /* Turn off HOB and read the low-order 24-bits */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, (ATDC_D3));
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_hcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+ lbastop <<= 8;
+ lbastop += ddi_get8(io_hdl1, ata_ctlp->ac_sect);
+ return (lbastop);
+}
+
+
+/*
+ *
+ * Need to compute a value for ap_resid so that cp_resid can
+ * be set by ata_disk_complete(). The cp_resid var is actually
+ * misnamed. It's actually the offset to the block in which the
+ * error occurred not the number of bytes transferred to the device.
+ * At least that's how dadk actually uses the cp_resid when reporting
+ * an error. In other words the sector that had the error and the
+ * number of bytes transferred don't always indicate the same offset.
+ * On top of that, when doing DMA transfers there's actually no
+ * way to determine how many bytes have been transferred by the DMA
+ * engine. On the other hand, the drive will report which sector
+ * it faulted on. Using that address this routine computes the
+ * number of residual bytes beyond that point which probably weren't
+ * written to the drive (the drive is allowed to re-order sector
+ * writes but on an ATA disk there's no way to deal with that
+ * complication; in other words, the resid value calculated by
+ * this routine is as good as we can manage).
+ */
+
+static void
+ata_disk_get_resid(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ uint_t lba_start;
+ uint_t lba_stop;
+ uint_t resid_bytes;
+ uint_t resid_sectors;
+
+ lba_start = ata_pktp->ap_startsec;
+
+ if (ata_drvp->ad_flags & AD_EXT48)
+ lba_stop = ata_last_block_xferred_lba48(ata_ctlp);
+ else if (ata_drvp->ad_drive_bits & ATDH_LBA)
+ lba_stop = ata_last_block_xferred_lba28(ata_ctlp);
+ else /* CHS mode */
+ lba_stop = ata_last_block_xferred_chs(ata_drvp);
+
+ resid_sectors = lba_start + ata_pktp->ap_count - lba_stop;
+ resid_bytes = resid_sectors << SCTRSHFT;
+
+ ADBG_TRACE(("ata_disk_get_resid start 0x%x cnt 0x%x stop 0x%x\n",
+ lba_start, ata_pktp->ap_count, lba_stop));
+ ata_pktp->ap_resid = resid_bytes;
+}
+
+
+
+/*
+ * Removable media commands *
+ */
+
+
+
+/*
+ * get the media status
+ *
+ * NOTE: the error handling case probably isn't correct but it
+ * will have to do until someone gives me a drive to test this on.
+ */
+static int
+ata_disk_state(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ int *statep = (int *)ata_pktp->ap_v_addr;
+ uchar_t err;
+
+ ADBG_TRACE(("ata_disk_state\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 5 * 1000000,
+ ATC_DOOR_LOCK, 0, 0, 0, 0, 0, 0)) {
+ *statep = DKIO_INSERTED;
+ return (ATA_FSM_RC_FINI);
+ }
+
+ err = ddi_get8(ata_ctlp->ac_iohandle1, ata_ctlp->ac_error);
+ if (err & ATE_NM)
+ *statep = DKIO_EJECTED;
+ else
+ *statep = DKIO_NONE;
+
+ return (ATA_FSM_RC_FINI);
+}
+
+/*
+ * eject the media
+ */
+
+static int
+ata_disk_eject(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ADBG_TRACE(("ata_disk_eject\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 5 * 1000000,
+ ATC_EJECT, 0, 0, 0, 0, 0, 0)) {
+ return (ATA_FSM_RC_FINI);
+ }
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+}
+
+/*
+ * lock the drive
+ *
+ */
+static int
+ata_disk_lock(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ADBG_TRACE(("ata_disk_lock\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 5 * 1000000,
+ ATC_DOOR_LOCK, 0, 0, 0, 0, 0, 0)) {
+ return (ATA_FSM_RC_FINI);
+ }
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+}
+
+
+/*
+ * unlock the drive
+ *
+ */
+static int
+ata_disk_unlock(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ADBG_TRACE(("ata_disk_unlock\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 5 * 1000000,
+ ATC_DOOR_UNLOCK, 0, 0, 0, 0, 0, 0)) {
+ return (ATA_FSM_RC_FINI);
+ }
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+}
+
+
+/*
+ * put the drive into standby mode
+ */
+static int
+ata_disk_standby(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ADBG_TRACE(("ata_disk_standby\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 5 * 1000000,
+ ATC_STANDBY_IM, 0, 0, 0, 0, 0, 0)) {
+ return (ATA_FSM_RC_FINI);
+ }
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+}
+
+
+/*
+ * Recalibrate
+ *
+ * Note the extra long timeout value. This is necessary in case
+ * the drive was in standby mode and needs to spin up the media.
+ *
+ */
+static int
+ata_disk_recalibrate(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ADBG_TRACE(("ata_disk_recalibrate\n"));
+ if (ata_command(ata_ctlp, ata_drvp, TRUE, TRUE, 31 * 1000000,
+ ATC_RECAL, 0, 0, 0, 0, 0, 0)) {
+ return (ATA_FSM_RC_FINI);
+ }
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+}
+
+/*
+ * Copy a string of bytes that were obtained by Identify Device into a
+ * string buffer provided by the caller.
+ *
+ * 1. Determine the amount to copy. This is the lesser of the
+ * length of the source string or the space available in the user's
+ * buffer.
+ * 2. The true length of the source string is always returned to the
+ * caller in the size field of the argument.
+ * 3. Copy the string, add a terminating NUL character at the end.
+ */
+
+static int
+ata_copy_dk_ioc_string(intptr_t arg, char *source, int length, int flag)
+{
+ STRUCT_DECL(dadk_ioc_string, ds_arg);
+ int destsize;
+ char nulchar;
+ caddr_t outp;
+
+ /*
+ * The ioctls that use this routine are only available to
+ * the kernel.
+ */
+ if ((flag & FKIOCTL) == 0)
+ return (EFAULT);
+
+ STRUCT_INIT(ds_arg, flag & FMODELS);
+
+ /* 1. determine size of user's buffer */
+ if (ddi_copyin((caddr_t)arg, STRUCT_BUF(ds_arg), STRUCT_SIZE(ds_arg),
+ flag))
+ return (EFAULT);
+ destsize = STRUCT_FGET(ds_arg, is_size);
+ if (destsize > length + 1)
+ destsize = length + 1;
+
+ /*
+ * 2. Return the copied length to the caller. Note: for
+ * convenience, we actually copy the entire structure back out, not
+ * just the length. We don't change the is_buf field, so this
+ * shouldn't break anything.
+ */
+ STRUCT_FSET(ds_arg, is_size, length);
+ if (ddi_copyout(STRUCT_BUF(ds_arg), (caddr_t)arg, STRUCT_SIZE(ds_arg),
+ flag))
+ return (EFAULT);
+
+ /* 3. copy the string and add a NULL terminator */
+ outp = STRUCT_FGETP(ds_arg, is_buf);
+ if (ddi_copyout(source, outp, destsize - 1, flag))
+ return (EFAULT);
+ nulchar = '\0';
+ if (ddi_copyout(&nulchar, outp + (destsize - 1), 1, flag))
+ return (EFAULT);
+ return (0);
+}
+
+/*
+ * Sun branded drives are shipped write cache disabled. The default is to
+ * force write write caching on.
+ */
+static void
+ata_set_write_cache(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp)
+{
+ char *path;
+
+ if (ata_write_cache == 1) {
+ if (ata_set_feature(ata_ctlp, ata_drvp, FC_WRITE_CACHE_ON, 0)
+ == FALSE) {
+ path = kmem_alloc(MAXPATHLEN + 1, KM_NOSLEEP);
+ if (path != NULL) {
+ cmn_err(CE_WARN,
+ "%s unable to enable write cache targ=%d",
+ ddi_pathname(ata_ctlp->ac_dip, path),
+ ata_drvp->ad_targ);
+ kmem_free(path, MAXPATHLEN + 1);
+ }
+ }
+ } else if (ata_write_cache == -1) {
+ if (ata_set_feature(ata_ctlp, ata_drvp, FC_WRITE_CACHE_OFF, 0)
+ == FALSE) {
+ path = kmem_alloc(MAXPATHLEN + 1, KM_NOSLEEP);
+ if (path != NULL) {
+ cmn_err(CE_WARN,
+ "%s unable to disable write cache targ=%d",
+ ddi_pathname(ata_ctlp->ac_dip, path),
+ ata_drvp->ad_targ);
+ kmem_free(path, MAXPATHLEN + 1);
+ }
+ }
+ }
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.h
new file mode 100644
index 0000000000..8bdba91cde
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_disk.h
@@ -0,0 +1,97 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_DISK_H
+#define _ATA_DISK_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * ATA disk commands.
+ */
+
+#define ATC_SEEK 0x70 /* seek cmd, bottom 4 bits step rate */
+#define ATC_RDVER 0x40 /* read verify cmd */
+#define ATC_RDSEC 0x20 /* read sector cmd */
+#define ATC_RDLONG 0x23 /* read long without retry */
+#define ATC_WRSEC 0x30 /* write sector cmd */
+#define ATC_SETMULT 0xc6 /* set multiple mode */
+#define ATC_RDMULT 0xc4 /* read multiple */
+#define ATC_WRMULT 0xc5 /* write multiple */
+#define ATC_READ_DMA 0xc8 /* read (multiple) w/DMA */
+#define ATC_WRITE_DMA 0xca /* write (multiple) w/DMA */
+#define ATC_SETPARAM 0x91 /* set parameters command */
+#define ATC_ID_DEVICE 0xec /* IDENTIFY DEVICE command */
+#define ATC_ACK_MC 0xdb /* acknowledge media change */
+ /* ATA extended (48 bit) disk commands */
+#define ATC_RDSEC_EXT 0x24 /* read sector */
+#define ATC_RDMULT_EXT 0x29 /* read multiple */
+#define ATC_RDDMA_EXT 0x25 /* read DMA */
+#define ATC_WRSEC_EXT 0x34 /* write sector */
+#define ATC_WRMULT_EXT 0x39 /* write multiple */
+#define ATC_WRDMA_EXT 0x35 /* write DMA */
+
+/*
+ * Low bits for Read/Write commands...
+ */
+#define ATCM_ECCRETRY 0x01 /* Enable ECC and RETRY by controller */
+ /* enabled if bit is CLEARED!!! */
+#define ATCM_LONGMODE 0x02 /* Use Long Mode (get/send data & ECC) */
+
+#ifdef DADKIO_RWCMD_READ
+#define RWCMDP(pktp) ((struct dadkio_rwcmd *)((pktp)->cp_bp->b_back))
+#endif
+
+/* useful macros */
+
+#define CPKT2GCMD(cpkt) ((gcmd_t *)(cpkt)->cp_ctl_private)
+#define CPKT2APKT(cpkt) (GCMD2APKT(CPKT2GCMD(cpkt)))
+
+#define GCMD2CPKT(cmdp) ((struct cmpkt *)((cmdp)->cmd_pktp))
+#define APKT2CPKT(apkt) (GCMD2CPKT(APKT2GCMD(apkt)))
+
+/* public function prototypes */
+
+int ata_disk_attach(ata_ctl_t *ata_ctlp);
+void ata_disk_detach(ata_ctl_t *ata_ctlp);
+int ata_disk_init_drive(ata_drv_t *ata_drvp);
+void ata_disk_uninit_drive(ata_drv_t *ata_drvp);
+int ata_disk_id(ddi_acc_handle_t io_hdl1, caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2, caddr_t ioaddr2,
+ struct ata_id *ata_idp);
+int ata_disk_bus_ctl(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t o,
+ void *a, void *v);
+int ata_disk_setup_parms(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_DISK_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_dma.c b/usr/src/uts/intel/io/dktp/controller/ata/ata_dma.c
new file mode 100644
index 0000000000..5b7ed19407
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_dma.c
@@ -0,0 +1,383 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/debug.h>
+
+#include "ata_common.h"
+#include "ata_disk.h"
+#include "atapi.h"
+#include "pciide.h"
+
+/*
+ * grap the PCI-IDE status byte
+ */
+#define PCIIDE_STATUS_GET(hdl, addr) \
+ ddi_get8((hdl), ((uchar_t *)(addr) + PCIIDE_BMISX_REG))
+
+/*
+ * DMA attributes for device I/O
+ */
+
+ddi_dma_attr_t ata_pciide_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffff, /* dma_attr_count_max */
+ sizeof (int), /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0x100 << SCTRSHFT, /* dma_attr_maxxfer */
+ /* note that this value can change */
+ /* based on max_transfer property */
+ 0xffff, /* dma_attr_seg */
+ ATA_DMA_NSEGS, /* dma_attr_sgllen */
+ 512, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+/*
+ * DMA attributes for the Bus Mastering PRD table
+ *
+ * PRD table Must not cross 4k boundary.
+ *
+ * NOTE: the SFF-8038i spec says don't cross a 64k boundary but
+ * some chip specs seem to think the spec says 4k boundary, Intel
+ * 82371AB, section 5.2.3. I don't know whether the 4k restriction
+ * is for real or just a typo. I've specified 4k just to be safe.
+ * The same Intel spec says the buffer must be 64K aligned, I don't
+ * believe that and have specified 4 byte alignment.
+ *
+ */
+
+#define PCIIDE_BOUNDARY (0x1000)
+
+ddi_dma_attr_t ata_prd_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ PCIIDE_BOUNDARY - 1, /* dma_attr_count_max */
+ sizeof (int), /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ PCIIDE_BOUNDARY, /* dma_attr_maxxfer */
+ PCIIDE_BOUNDARY - 1, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+
+
+size_t prd_size = sizeof (prde_t) * ATA_DMA_NSEGS;
+
+int
+ata_pciide_alloc(
+ dev_info_t *dip,
+ ata_ctl_t *ata_ctlp)
+{
+ ddi_device_acc_attr_t dev_attr;
+ ddi_dma_cookie_t cookie;
+ size_t buf_size;
+ uint_t count;
+ int rc;
+
+ dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
+ dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+
+ rc = ddi_dma_alloc_handle(dip, &ata_prd_dma_attr, DDI_DMA_SLEEP, NULL,
+ &ata_ctlp->ac_sg_handle);
+ if (rc != DDI_SUCCESS) {
+ ADBG_ERROR(("ata_pciide_alloc 0x%p handle %d\n",
+ (void *)ata_ctlp, rc));
+ goto err3;
+ }
+
+ rc = ddi_dma_mem_alloc(ata_ctlp->ac_sg_handle, prd_size, &dev_attr,
+ DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
+ &ata_ctlp->ac_sg_list, &buf_size, &ata_ctlp->ac_sg_acc_handle);
+ if (rc != DDI_SUCCESS) {
+ ADBG_ERROR(("ata_pciide_alloc 0x%p mem %d\n",
+ (void *)ata_ctlp, rc));
+ goto err2;
+ }
+
+ rc = ddi_dma_addr_bind_handle(ata_ctlp->ac_sg_handle, NULL,
+ ata_ctlp->ac_sg_list, buf_size,
+ DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL, &cookie, &count);
+ if (rc != DDI_DMA_MAPPED) {
+ ADBG_ERROR(("ata_pciide_alloc 0x%p bind %d\n",
+ (void *)ata_ctlp, rc));
+ goto err1;
+ }
+
+ ASSERT(count == 1);
+ ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
+#define Mask4K 0xfffff000
+ ASSERT((cookie.dmac_address & Mask4K)
+ == ((cookie.dmac_address + cookie.dmac_size - 1) & Mask4K));
+
+ ata_ctlp->ac_sg_paddr = cookie.dmac_address;
+ return (TRUE);
+err1:
+ ddi_dma_mem_free(&ata_ctlp->ac_sg_acc_handle);
+ ata_ctlp->ac_sg_acc_handle = NULL;
+err2:
+ ddi_dma_free_handle(&ata_ctlp->ac_sg_handle);
+ ata_ctlp->ac_sg_handle = NULL;
+err3:
+ return (FALSE);
+}
+
+
+void
+ata_pciide_free(ata_ctl_t *ata_ctlp)
+{
+ if (ata_ctlp->ac_sg_handle == NULL)
+ return;
+
+ (void) ddi_dma_unbind_handle(ata_ctlp->ac_sg_handle);
+ ddi_dma_mem_free(&ata_ctlp->ac_sg_acc_handle);
+ ddi_dma_free_handle(&ata_ctlp->ac_sg_handle);
+ ata_ctlp->ac_sg_handle = NULL;
+ ata_ctlp->ac_sg_acc_handle = NULL;
+}
+
+
+
+void
+ata_pciide_dma_setup(
+ ata_ctl_t *ata_ctlp,
+ prde_t *srcp,
+ int sg_cnt)
+{
+ ddi_acc_handle_t bmhandle = ata_ctlp->ac_bmhandle;
+ caddr_t bmaddr = ata_ctlp->ac_bmaddr;
+ ddi_acc_handle_t sg_acc_handle = ata_ctlp->ac_sg_acc_handle;
+ uint_t *dstp = (uint_t *)ata_ctlp->ac_sg_list;
+ int idx;
+
+ ASSERT(dstp != 0);
+ ASSERT(sg_cnt != 0);
+
+ ADBG_DMA(("ata dma_setup 0x%p 0x%p %d\n", ata_ctlp, srcp, sg_cnt));
+ /*
+ * Copy the PRD list to controller's phys buffer.
+ * Copying to a fixed location avoids having to check
+ * every ata_pkt for alignment and page boundaries.
+ */
+ for (idx = 0; idx < sg_cnt - 1; idx++, srcp++) {
+ ddi_put32(sg_acc_handle, dstp++, srcp->p_address);
+ ddi_put32(sg_acc_handle, dstp++, srcp->p_count);
+ }
+
+ /*
+ * set the end of table flag in the last entry
+ */
+ srcp->p_count |= PCIIDE_PRDE_EOT;
+ ddi_put32(sg_acc_handle, dstp++, srcp->p_address);
+ ddi_put32(sg_acc_handle, dstp++, srcp->p_count);
+
+ /*
+ * give the pciide chip the physical address of the PRDE table
+ */
+ ddi_put32(bmhandle, (uint_t *)(bmaddr + PCIIDE_BMIDTPX_REG),
+ ata_ctlp->ac_sg_paddr);
+
+ ADBG_DMA(("ata dma_setup 0x%p 0x%llx\n",
+ bmaddr, (unsigned long long)ata_ctlp->ac_sg_paddr));
+}
+
+
+
+void
+ata_pciide_dma_start(
+ ata_ctl_t *ata_ctlp,
+ uchar_t direction)
+{
+ ddi_acc_handle_t bmhandle = ata_ctlp->ac_bmhandle;
+ caddr_t bmaddr = ata_ctlp->ac_bmaddr;
+ uchar_t tmp;
+
+ ASSERT((ata_ctlp->ac_sg_paddr & PCIIDE_BMIDTPX_MASK) == 0);
+ ASSERT((direction == PCIIDE_BMICX_RWCON_WRITE_TO_MEMORY) ||
+ (direction == PCIIDE_BMICX_RWCON_READ_FROM_MEMORY));
+
+ /*
+ * Set the direction control and start the PCIIDE DMA controller
+ */
+ tmp = ddi_get8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMICX_REG);
+ tmp &= PCIIDE_BMICX_MASK;
+ ddi_put8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMICX_REG,
+ (tmp | direction));
+
+ ddi_put8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMICX_REG,
+ (tmp | PCIIDE_BMICX_SSBM_E | direction));
+
+ return;
+
+}
+
+
+void
+ata_pciide_dma_stop(
+ ata_ctl_t *ata_ctlp)
+{
+ ddi_acc_handle_t bmhandle = ata_ctlp->ac_bmhandle;
+ caddr_t bmaddr = ata_ctlp->ac_bmaddr;
+ uchar_t tmp;
+
+ /*
+ * Stop the PCIIDE DMA controller
+ */
+ tmp = ddi_get8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMICX_REG);
+ tmp &= (PCIIDE_BMICX_MASK & (~PCIIDE_BMICX_SSBM));
+
+ ADBG_DMA(("ata_pciide_dma_stop 0x%p 0x%x\n", bmaddr, tmp));
+
+ ddi_put8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMICX_REG, tmp);
+}
+
+/* ARGSUSED */
+void
+ata_pciide_dma_sg_func(
+ gcmd_t *gcmdp,
+ ddi_dma_cookie_t *dmackp,
+ int single_segment,
+ int seg_index)
+{
+ ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
+ prde_t *dmap;
+
+ ASSERT(seg_index < ATA_DMA_NSEGS);
+ ASSERT(((uint_t)dmackp->dmac_address & PCIIDE_PRDE_ADDR_MASK) == 0);
+ ASSERT((dmackp->dmac_size & PCIIDE_PRDE_CNT_MASK) == 0);
+ ASSERT(dmackp->dmac_size <= PCIIDE_PRDE_CNT_MAX);
+
+ ADBG_TRACE(("adp_dma_sg_func: gcmdp 0x%p dmackp 0x%p s %d idx %d\n",
+ gcmdp, dmackp, single_segment, seg_index));
+
+ /* set address of current entry in scatter/gather list */
+ dmap = ata_pktp->ap_sg_list + seg_index;
+
+ /* store the phys addr and count from the cookie */
+ dmap->p_address = (uint_t)dmackp->dmac_address;
+ dmap->p_count = (uint_t)dmackp->dmac_size;
+
+ /* save the count of scatter/gather segments */
+ ata_pktp->ap_sg_cnt = seg_index + 1;
+
+ /* compute the total bytes in this request */
+ if (seg_index == 0)
+ ata_pktp->ap_bcount = 0;
+ ata_pktp->ap_bcount += dmackp->dmac_size;
+}
+
+
+
+int
+ata_pciide_status_clear(
+ ata_ctl_t *ata_ctlp)
+{
+ ddi_acc_handle_t bmhandle = ata_ctlp->ac_bmhandle;
+ caddr_t bmaddr = ata_ctlp->ac_bmaddr;
+ uchar_t status;
+ uchar_t tmp;
+
+ /*
+ * Get the current PCIIDE status
+ */
+ status = PCIIDE_STATUS_GET(ata_ctlp->ac_bmhandle, ata_ctlp->ac_bmaddr);
+ tmp = status & PCIIDE_BMISX_MASK;
+ tmp |= (PCIIDE_BMISX_IDERR | PCIIDE_BMISX_IDEINTS);
+
+ ADBG_DMA(("ata_pciide_status_clear 0x%p 0x%x\n",
+ bmaddr, status));
+
+ /*
+ * Clear the latches (and preserve the other bits)
+ */
+ ddi_put8(bmhandle, (uchar_t *)bmaddr + PCIIDE_BMISX_REG, tmp);
+
+#ifdef NAT_SEMI_PC87415_BUG
+ /* ??? chip errata ??? */
+ if (ata_ctlp->ac_nat_semi_bug) {
+ tmp = ddi_get8(bmhandle, bmaddr + PCIIDE_BMICX_REG);
+ tmp &= PCIIDE_BMICX_MASK;
+ ddi_put8(bmhandle, bmaddr + PCIIDE_BMICX_REG,
+ (tmp | PCIIDE_BMISX_IDERR | PCIIDE_BMISX_IDEINTS));
+ }
+#endif
+ return (status);
+}
+
+int
+ata_pciide_status_dmacheck_clear(
+ ata_ctl_t *ata_ctlp)
+{
+ uchar_t status;
+
+ /*
+ * Get the PCIIDE DMA controller's current status
+ */
+ status = ata_pciide_status_clear(ata_ctlp);
+
+ ADBG_DMA(("ata_pciide_status_dmacheck_clear 0x%p 0x%x\n",
+ ata_ctlp->ac_bmaddr, status));
+ /*
+ * check for errors
+ */
+ if (status & PCIIDE_BMISX_IDERR) {
+ ADBG_WARN(("ata_pciide_status: 0x%x\n", status));
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
+
+
+/*
+ * Check for a pending PCI-IDE interrupt
+ */
+
+int
+ata_pciide_status_pending(
+ ata_ctl_t *ata_ctlp)
+{
+ uchar_t status;
+
+ status = PCIIDE_STATUS_GET(ata_ctlp->ac_bmhandle, ata_ctlp->ac_bmaddr);
+ ADBG_DMA(("ata_pciide_status_pending 0x%p 0x%x\n",
+ ata_ctlp->ac_bmaddr, status));
+ if (status & PCIIDE_BMISX_IDEINTS)
+ return (TRUE);
+ return (FALSE);
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/ata_fsm.h b/usr/src/uts/intel/io/dktp/controller/ata/ata_fsm.h
new file mode 100644
index 0000000000..4d4bbef7ca
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/ata_fsm.h
@@ -0,0 +1,159 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1997 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATA_FSM_H
+#define _ATA_FSM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ *
+ * The interrupt reason can be interpreted from other bits as follows:
+ *
+ * IO CoD DRQ
+ * -- --- ---
+ * 0 0 1 == 1 Data to device
+ * 0 1 0 == 2 Idle
+ * 0 1 1 == 3 Send ATAPI CDB to device
+ * 1 0 1 == 5 Data from device
+ * 1 1 0 == 6 Status ready
+ * 1 1 1 == 7 Future use
+ *
+ */
+
+/*
+ * This macro encodes the interrupt reason into a one byte
+ * event code which is used to index the FSM tables
+ */
+#define ATAPI_EVENT(drq, intr) \
+ (((unsigned char)((drq) & ATS_DRQ) >> 3) \
+ | (((intr) & (ATI_IO | ATI_COD)) << 1))
+
+/*
+ * These are the names for the encoded ATAPI events
+ */
+#define ATAPI_EVENT_0 0
+#define ATAPI_EVENT_IDLE ATAPI_EVENT(0, ATI_COD)
+#define ATAPI_EVENT_2 2
+#define ATAPI_EVENT_STATUS ATAPI_EVENT(0, ATI_IO | ATI_COD)
+#define ATAPI_EVENT_PIO_OUT ATAPI_EVENT(ATS_DRQ, 0)
+#define ATAPI_EVENT_CDB ATAPI_EVENT(ATS_DRQ, ATI_COD)
+#define ATAPI_EVENT_PIO_IN ATAPI_EVENT(ATS_DRQ, ATI_IO)
+#define ATAPI_EVENT_UNKNOWN ATAPI_EVENT(ATS_DRQ, (ATI_IO | ATI_COD))
+
+#define ATAPI_NEVENTS 8
+
+/*
+ * Actions for the ATAPI PIO FSM
+ *
+ */
+
+enum {
+ A_UNK, /* invalid event detected */
+ A_NADA, /* do nothing */
+ A_CDB, /* send the CDB */
+ A_IN, /* transfer data out to the device */
+ A_OUT, /* transfer data in from the device */
+ A_IDLE, /* unexpected idle phase */
+ A_RE, /* read the error code register */
+ A_REX /* alternate read the error code register */
+};
+
+/*
+ * States for the ATAPI PIO FSM
+ */
+
+enum {
+ S_IDLE, /* idle or fatal error state */
+ S_CMD, /* command byte sent */
+ S_CDB, /* CDB sent */
+ S_IN, /* transferring data in from device */
+ S_OUT, /* transferring data out to device */
+ S_DMA, /* dma transfer active */
+
+ ATAPI_NSTATES
+};
+
+#define S_X S_IDLE /* alias for idle */
+
+/*
+ * controller and device functions
+ */
+enum {
+ ATA_FSM_START0,
+ ATA_FSM_START1,
+ ATA_FSM_INTR,
+ ATA_FSM_FINI,
+ ATA_FSM_RESET,
+
+ ATA_CTLR_NFUNCS
+};
+
+
+/*
+ * FSM return codes
+ */
+enum {
+ ATA_FSM_RC_OKAY,
+ ATA_FSM_RC_BUSY,
+ ATA_FSM_RC_INTR,
+ ATA_FSM_RC_FINI
+};
+
+/*
+ * states for the controller FSM
+ */
+enum {
+ AS_IDLE,
+ AS_ACTIVE0,
+ AS_ACTIVE1,
+
+ ATA_CTLR_NSTATES
+};
+
+/*
+ * actions for the controller FSM
+ */
+enum {
+ AC_NADA,
+ AC_START,
+ AC_INTR,
+ AC_FINI,
+ AC_BUSY,
+ AC_RESET_I,
+ AC_RESET_A
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATA_FSM_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/atapi.c b/usr/src/uts/intel/io/dktp/controller/ata/atapi.c
new file mode 100644
index 0000000000..0a0f01350a
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/atapi.c
@@ -0,0 +1,1174 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#include "ata_common.h"
+#include "atapi.h"
+
+/* SCSA entry points */
+
+static int atapi_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
+static int atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void));
+static void atapi_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
+ scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
+static int atapi_tran_abort(struct scsi_address *ap, struct scsi_pkt *spktp);
+static int atapi_tran_reset(struct scsi_address *ap, int level);
+static int atapi_tran_getcap(struct scsi_address *ap, char *capstr, int whom);
+static int atapi_tran_setcap(struct scsi_address *ap, char *capstr,
+ int value, int whom);
+static struct scsi_pkt *atapi_tran_init_pkt(struct scsi_address *ap,
+ struct scsi_pkt *spktp, struct buf *bp, int cmdlen, int statuslen,
+ int tgtlen, int flags, int (*callback)(caddr_t), caddr_t arg);
+static void atapi_tran_destroy_pkt(struct scsi_address *ap,
+ struct scsi_pkt *spktp);
+static void atapi_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *spktp);
+static void atapi_tran_sync_pkt(struct scsi_address *ap,
+ struct scsi_pkt *spktp);
+static int atapi_tran_start(struct scsi_address *ap, struct scsi_pkt *spktp);
+
+/*
+ * packet callbacks
+ */
+static void atapi_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
+ int do_callback);
+static int atapi_id_update(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+
+
+/* external dependencies */
+
+char _depends_on[] = "misc/scsi";
+
+/*
+ * Local static data
+ */
+
+#if 0
+static ddi_dma_lim_t atapi_dma_limits = {
+ 0, /* address low */
+ 0xffffffffU, /* address high */
+ 0, /* counter max */
+ 1, /* burstsize */
+ DMA_UNIT_8, /* minimum xfer */
+ 0, /* dma speed */
+ (uint_t)DMALIM_VER0, /* version */
+ 0xffffffffU, /* address register */
+ 0xffffffffU, /* counter register */
+ 1, /* granular */
+ 1, /* scatter/gather list length */
+ 0xffffffffU /* request size */
+};
+#endif
+
+static int atapi_use_static_geometry = TRUE;
+static int atapi_arq_enable = TRUE;
+
+
+/*
+ *
+ * Call SCSA init to initialize the ATAPI half of the driver
+ *
+ */
+
+int
+atapi_attach(ata_ctl_t *ata_ctlp)
+{
+ dev_info_t *dip = ata_ctlp->ac_dip;
+ scsi_hba_tran_t *tran;
+
+ ADBG_TRACE(("atapi_init entered\n"));
+
+ /* allocate transport structure */
+
+ tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
+
+ if (tran == NULL) {
+ ADBG_WARN(("atapi_init: scsi_hba_tran_alloc failed\n"));
+ goto errout;
+ }
+
+ ata_ctlp->ac_atapi_tran = tran;
+ ata_ctlp->ac_flags |= AC_SCSI_HBA_TRAN_ALLOC;
+
+ /* initialize transport structure */
+
+ tran->tran_hba_private = ata_ctlp;
+ tran->tran_tgt_private = NULL;
+
+ tran->tran_tgt_init = atapi_tran_tgt_init;
+ tran->tran_tgt_probe = atapi_tran_tgt_probe;
+ tran->tran_tgt_free = atapi_tran_tgt_free;
+ tran->tran_start = atapi_tran_start;
+ tran->tran_reset = atapi_tran_reset;
+ tran->tran_abort = atapi_tran_abort;
+ tran->tran_getcap = atapi_tran_getcap;
+ tran->tran_setcap = atapi_tran_setcap;
+ tran->tran_init_pkt = atapi_tran_init_pkt;
+ tran->tran_destroy_pkt = atapi_tran_destroy_pkt;
+ tran->tran_dmafree = atapi_tran_dmafree;
+ tran->tran_sync_pkt = atapi_tran_sync_pkt;
+
+ if (scsi_hba_attach_setup(ata_ctlp->ac_dip, &ata_pciide_dma_attr, tran,
+ SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
+ ADBG_WARN(("atapi_init: scsi_hba_attach_setup failed\n"));
+ goto errout;
+ }
+
+ ata_ctlp->ac_flags |= AC_SCSI_HBA_ATTACH;
+
+ return (TRUE);
+
+errout:
+ atapi_detach(ata_ctlp);
+ return (FALSE);
+}
+
+
+/*
+ *
+ * destroy the atapi sub-system
+ *
+ */
+
+void
+atapi_detach(
+ ata_ctl_t *ata_ctlp)
+{
+ ADBG_TRACE(("atapi_detach entered\n"));
+
+ if (ata_ctlp->ac_flags & AC_SCSI_HBA_ATTACH)
+ scsi_hba_detach(ata_ctlp->ac_dip);
+
+ if (ata_ctlp->ac_flags & AC_SCSI_HBA_TRAN_ALLOC)
+ scsi_hba_tran_free(ata_ctlp->ac_atapi_tran);
+}
+
+
+
+/*
+ *
+ * initialize the ATAPI drive's soft-state based on the
+ * response to IDENTIFY PACKET DEVICE command
+ *
+ */
+
+int
+atapi_init_drive(
+ ata_drv_t *ata_drvp)
+{
+ ADBG_TRACE(("atapi_init_drive entered\n"));
+
+ /* Determine ATAPI CDB size */
+
+ switch (ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_PKT_SZ) {
+
+ case ATAPI_ID_CFG_PKT_12B:
+ ata_drvp->ad_cdb_len = 12;
+ break;
+ case ATAPI_ID_CFG_PKT_16B:
+ ata_drvp->ad_cdb_len = 16;
+ break;
+ default:
+ ADBG_WARN(("atapi_init_drive: bad pkt size support\n"));
+ return (FALSE);
+ }
+
+ /* determine if drive gives an intr when it wants the CDB */
+
+ if ((ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_DRQ_TYPE) !=
+ ATAPI_ID_CFG_DRQ_INTR)
+ ata_drvp->ad_flags |= AD_NO_CDB_INTR;
+
+ return (TRUE);
+}
+
+
+/*
+ *
+ * destroy an atapi drive
+ *
+ */
+
+/* ARGSUSED */
+void
+atapi_uninit_drive(
+ ata_drv_t *ata_drvp)
+{
+ ADBG_TRACE(("atapi_uninit_drive entered\n"));
+}
+
+/*
+ *
+ * Issue an IDENTIFY PACKET (ATAPI) DEVICE command
+ *
+ */
+
+int
+atapi_id(
+ ddi_acc_handle_t io_hdl1,
+ caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2,
+ caddr_t ioaddr2,
+ struct ata_id *ata_idp)
+{
+ int rc;
+
+ ADBG_TRACE(("atapi_id entered\n"));
+
+ rc = ata_id_common(ATC_ID_PACKET_DEVICE, FALSE, io_hdl1, ioaddr1,
+ io_hdl2, ioaddr2, ata_idp);
+
+ if (!rc)
+ return (FALSE);
+
+ if ((ata_idp->ai_config & ATAC_ATAPI_TYPE_MASK) != ATAC_ATAPI_TYPE)
+ return (FALSE);
+
+ return (TRUE);
+}
+
+
+/*
+ *
+ * Check the device's register block for the ATAPI signature.
+ *
+ * Although the spec says the sector count, sector number and device/head
+ * registers are also part of the signature, for some unknown reason, this
+ * routine only checks the cyl hi and cyl low registers. I'm just
+ * guessing, but it might be because ATA and ATAPI devices return
+ * identical values in those registers and we actually rely on the
+ * IDENTIFY DEVICE and IDENTIFY PACKET DEVICE commands to recognize the
+ * device type.
+ *
+ */
+
+int
+atapi_signature(
+ ddi_acc_handle_t io_hdl,
+ caddr_t ioaddr)
+{
+ int rc = FALSE;
+ ADBG_TRACE(("atapi_signature entered\n"));
+
+ if (ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_HCYL) == ATAPI_SIG_HI &&
+ ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_LCYL) != ATAPI_SIG_LO)
+ rc = TRUE;
+
+ /*
+ * The following is a little bit of bullet proofing.
+ *
+ * When some drives are configured on a master-only bus they
+ * "shadow" their registers for the not-present slave drive.
+ * This is bogus and if you're not careful it may cause a
+ * master-only drive to be mistakenly recognized as both
+ * master and slave. By clearing the signature registers here
+ * I can make certain that when ata_drive_type() switches from
+ * the master to slave drive that I'll read back non-signature
+ * values regardless of whether the master-only drive does
+ * the "shadow" register trick. This prevents a bogus
+ * IDENTIFY PACKET DEVICE command from being issued which
+ * a really bogus master-only drive will return "shadow"
+ * data for.
+ */
+ ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_HCYL, 0);
+ ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_LCYL, 0);
+
+ return (rc);
+}
+
+
+/*
+ *
+ * SCSA tran_tgt_init entry point
+ *
+ */
+
+/* ARGSUSED */
+static int
+atapi_tran_tgt_init(
+ dev_info_t *hba_dip,
+ dev_info_t *tgt_dip,
+ scsi_hba_tran_t *hba_tran,
+ struct scsi_device *sd)
+{
+ gtgt_t *gtgtp; /* GHD's per-target-instance structure */
+ ata_ctl_t *ata_ctlp;
+ ata_tgt_t *ata_tgtp;
+ ata_drv_t *ata_drvp;
+ struct scsi_address *ap;
+ int rc = DDI_SUCCESS;
+
+ ADBG_TRACE(("atapi_tran_tgt_init entered\n"));
+
+ /*
+ * Qualification of targ, lun, and ATAPI device presence
+ * have already been taken care of by ata_bus_ctl
+ */
+
+ /* store pointer to drive struct in cloned tran struct */
+
+ ata_ctlp = TRAN2CTL(hba_tran);
+ ap = &sd->sd_address;
+
+ ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
+
+ /*
+ * Create the "atapi" property so the target driver knows
+ * to use the correct set of SCSI commands
+ */
+ if (!ata_prop_create(tgt_dip, ata_drvp, "atapi")) {
+ return (DDI_FAILURE);
+ }
+
+ gtgtp = ghd_target_init(hba_dip, tgt_dip, &ata_ctlp->ac_ccc,
+ sizeof (ata_tgt_t), ata_ctlp,
+ ap->a_target, ap->a_lun);
+
+ /* tran_tgt_private points to gtgt_t */
+ hba_tran->tran_tgt_private = gtgtp;
+
+ /* gt_tgt_private points to ata_tgt_t */
+ ata_tgtp = GTGTP2ATATGTP(gtgtp);
+
+ /* initialize the per-target-instance data */
+ ata_tgtp->at_drvp = ata_drvp;
+ ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
+ ata_tgtp->at_dma_attr.dma_attr_maxxfer =
+ ata_ctlp->ac_max_transfer << SCTRSHFT;
+
+ return (rc);
+}
+
+
+/*
+ *
+ * SCSA tran_tgt_probe entry point
+ *
+ */
+
+static int
+atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void))
+{
+ ADBG_TRACE(("atapi_tran_tgt_probe entered\n"));
+
+ return (scsi_hba_probe(sd, callback));
+}
+
+
+/*
+ *
+ * SCSA tran_tgt_free entry point
+ *
+ */
+
+/* ARGSUSED */
+static void
+atapi_tran_tgt_free(
+ dev_info_t *hba_dip,
+ dev_info_t *tgt_dip,
+ scsi_hba_tran_t *hba_tran,
+ struct scsi_device *sd)
+{
+ ADBG_TRACE(("atapi_tran_tgt_free entered\n"));
+
+ ghd_target_free(hba_dip, tgt_dip, &TRAN2ATAP(hba_tran)->ac_ccc,
+ TRAN2GTGTP(hba_tran));
+ hba_tran->tran_tgt_private = NULL;
+}
+
+
+
+/*
+ *
+ * SCSA tran_abort entry point
+ *
+ */
+
+/* ARGSUSED */
+static int
+atapi_tran_abort(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp)
+{
+ ADBG_TRACE(("atapi_tran_abort entered\n"));
+
+ if (spktp) {
+ return (ghd_tran_abort(&ADDR2CTL(ap)->ac_ccc, PKTP2GCMDP(spktp),
+ ADDR2GTGTP(ap), NULL));
+ }
+
+ return (ghd_tran_abort_lun(&ADDR2CTL(ap)->ac_ccc, ADDR2GTGTP(ap),
+ NULL));
+}
+
+
+/*
+ *
+ * SCSA tran_reset entry point
+ *
+ */
+
+/* ARGSUSED */
+static int
+atapi_tran_reset(
+ struct scsi_address *ap,
+ int level)
+{
+ ADBG_TRACE(("atapi_tran_reset entered\n"));
+
+ if (level == RESET_TARGET)
+ return (ghd_tran_reset_target(&ADDR2CTL(ap)->ac_ccc,
+ ADDR2GTGTP(ap), NULL));
+ if (level == RESET_ALL)
+ return (ghd_tran_reset_bus(&ADDR2CTL(ap)->ac_ccc,
+ ADDR2GTGTP(ap), NULL));
+ return (FALSE);
+
+}
+
+
+/*
+ *
+ * SCSA tran_setcap entry point
+ *
+ */
+
+static int
+atapi_tran_setcap(
+ struct scsi_address *ap,
+ char *capstr,
+ int value,
+ int whom)
+{
+ gtgt_t *gtgtp = ADDR2GTGTP(ap);
+ ata_tgt_t *tgtp = GTGTP2ATATGTP(gtgtp);
+
+ ADBG_TRACE(("atapi_tran_setcap entered\n"));
+
+ switch (scsi_hba_lookup_capstr(capstr)) {
+ case SCSI_CAP_SECTOR_SIZE:
+ tgtp->at_dma_attr.dma_attr_granular = (uint_t)value;
+ return (TRUE);
+
+ case SCSI_CAP_ARQ:
+ if (whom) {
+ tgtp->at_arq = value;
+ return (TRUE);
+ }
+ break;
+
+ case SCSI_CAP_TOTAL_SECTORS:
+ tgtp->at_total_sectors = value;
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
+
+/*
+ *
+ * SCSA tran_getcap entry point
+ *
+ */
+
+static int
+atapi_tran_getcap(
+ struct scsi_address *ap,
+ char *capstr,
+ int whom)
+{
+ struct ata_id ata_id;
+ struct ata_id *ata_idp;
+ ata_ctl_t *ata_ctlp;
+ ata_drv_t *ata_drvp;
+ gtgt_t *gtgtp;
+ int rval = -1;
+
+ ADBG_TRACE(("atapi_tran_getcap entered\n"));
+
+ if (capstr == NULL || whom == 0)
+ return (-1);
+
+ ata_ctlp = ADDR2CTL(ap);
+
+ switch (scsi_hba_lookup_capstr(capstr)) {
+ case SCSI_CAP_ARQ:
+ rval = TRUE;
+ break;
+
+ case SCSI_CAP_INITIATOR_ID:
+ rval = 7;
+ break;
+
+ case SCSI_CAP_DMA_MAX:
+ /* XXX - what should the real limit be?? */
+ /* limit to 64K ??? */
+ rval = 4096 * (ATA_DMA_NSEGS - 1);
+ break;
+
+ case SCSI_CAP_GEOMETRY:
+ /* Default geometry */
+ if (atapi_use_static_geometry) {
+ rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
+ break;
+ }
+
+ /* this code is currently not used */
+
+ ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
+ gtgtp = ADDR2GTGTP(ap);
+
+ /*
+ * retrieve the current IDENTIFY PACKET DEVICE info
+ */
+ if (!ata_queue_cmd(atapi_id_update, &ata_id, ata_ctlp,
+ ata_drvp, gtgtp)) {
+ ADBG_TRACE(("atapi_tran_getcap geometry failed"));
+ return (0);
+ }
+
+ /*
+ * save the new response data
+ */
+ ata_idp = &ata_drvp->ad_id;
+ *ata_idp = ata_id;
+
+ switch ((ata_idp->ai_config >> 8) & 0xf) {
+ case DTYPE_RODIRECT:
+ rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
+ break;
+ case DTYPE_DIRECT:
+ case DTYPE_OPTICAL:
+ rval = (ata_idp->ai_curheads << 16) |
+ ata_idp->ai_cursectrk;
+ break;
+ default:
+ rval = 0;
+ }
+ break;
+ }
+
+ return (rval);
+}
+
+
+
+/*
+ *
+ * SCSA tran_init_pkt entry point
+ *
+ */
+
+static struct scsi_pkt *
+atapi_tran_init_pkt(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp,
+ struct buf *bp,
+ int cmdlen,
+ int statuslen,
+ int tgtlen,
+ int flags,
+ int (*callback)(caddr_t),
+ caddr_t arg)
+{
+ gtgt_t *gtgtp = ADDR2GTGTP(ap);
+ ata_tgt_t *ata_tgtp = GTGTP2ATATGTP(gtgtp);
+ ata_ctl_t *ata_ctlp = ADDR2CTL(ap);
+ ata_pkt_t *ata_pktp;
+ struct scsi_pkt *new_spktp;
+ ddi_dma_attr_t *sg_attrp;
+ int bytes;
+
+ ADBG_TRACE(("atapi_tran_init_pkt entered\n"));
+
+
+ /*
+ * Determine whether to do PCI-IDE DMA setup, start out by
+ * assuming we're not.
+ */
+ sg_attrp = NULL;
+
+ if (bp == NULL) {
+ /* no data to transfer */
+ goto skip_dma_setup;
+ }
+
+ if (bp->b_bcount == 0) {
+ /* no data to transfer */
+ goto skip_dma_setup;
+ }
+
+ if ((GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_pciide_dma == ATA_DMA_OFF)) {
+ goto skip_dma_setup;
+ }
+
+ if (ata_dma_disabled)
+ goto skip_dma_setup;
+
+
+ /*
+ * The PCI-IDE DMA engine is brain-damaged and can't
+ * DMA non-aligned buffers.
+ */
+ if (((bp->b_flags & B_PAGEIO) == 0) &&
+ ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
+ /*
+ * if the virtual address isn't aligned, then the
+ * physical address also isn't aligned.
+ */
+ goto skip_dma_setup;
+ }
+
+ /*
+ * It also insists that the byte count must be even.
+ */
+ if (bp->b_bcount & 1) {
+ /* something odd here */
+ goto skip_dma_setup;
+ }
+
+ /*
+ * Huzza! We're really going to do it
+ */
+ sg_attrp = &ata_tgtp->at_dma_attr;
+
+
+skip_dma_setup:
+
+ /*
+ * Call GHD packet init function
+ */
+
+ new_spktp = ghd_tran_init_pkt_attr(&ata_ctlp->ac_ccc, ap, spktp, bp,
+ cmdlen, statuslen, tgtlen, flags,
+ callback, arg, sizeof (ata_pkt_t), sg_attrp);
+
+ if (new_spktp == NULL)
+ return (NULL);
+
+ ata_pktp = SPKT2APKT(new_spktp);
+ ata_pktp->ap_cdbp = new_spktp->pkt_cdbp;
+ ata_pktp->ap_statuslen = (uchar_t)statuslen;
+
+ /* reset data direction flags */
+ if (spktp)
+ ata_pktp->ap_flags &= ~(AP_READ | AP_WRITE);
+
+ /*
+ * check for ARQ mode
+ */
+ if (atapi_arq_enable == TRUE &&
+ ata_tgtp->at_arq == TRUE &&
+ ata_pktp->ap_statuslen >= sizeof (struct scsi_arq_status)) {
+ ADBG_TRACE(("atapi_tran_init_pkt ARQ\n"));
+ ata_pktp->ap_scbp =
+ (struct scsi_arq_status *)new_spktp->pkt_scbp;
+ ata_pktp->ap_flags |= AP_ARQ_ON_ERROR;
+ }
+
+ /*
+ * fill these with zeros for ATA/ATAPI-4 compatibility
+ */
+ ata_pktp->ap_sec = 0;
+ ata_pktp->ap_count = 0;
+
+ if (ata_pktp->ap_sg_cnt) {
+ ASSERT(bp != NULL);
+ /* determine direction to program the DMA engine later */
+ if (bp->b_flags & B_READ) {
+ ata_pktp->ap_flags |= AP_READ;
+ } else {
+ ata_pktp->ap_flags |= AP_WRITE;
+ }
+ ata_pktp->ap_pciide_dma = TRUE;
+ ata_pktp->ap_hicyl = 0;
+ ata_pktp->ap_lwcyl = 0;
+ return (new_spktp);
+ }
+
+ /*
+ * Since we're not using DMA, we need to map the buffer into
+ * kernel address space
+ */
+
+ ata_pktp->ap_pciide_dma = FALSE;
+ if (bp && bp->b_bcount) {
+ /*
+ * If this is a fresh request map the buffer and
+ * reset the ap_baddr pointer and the current offset
+ * and byte count.
+ *
+ * The ap_boffset is used to set the ap_v_addr ptr at
+ * the start of each I/O request.
+ *
+ * The ap_bcount is used to update ap_boffset when the
+ * target driver requests the next segment.
+ *
+ */
+ if (cmdlen) {
+ bp_mapin(bp);
+ ata_pktp->ap_baddr = bp->b_un.b_addr;
+ ata_pktp->ap_bcount = 0;
+ ata_pktp->ap_boffset = 0;
+ }
+ ASSERT(ata_pktp->ap_baddr != NULL);
+
+ /* determine direction for the PIO FSM */
+ if (bp->b_flags & B_READ) {
+ ata_pktp->ap_flags |= AP_READ;
+ } else {
+ ata_pktp->ap_flags |= AP_WRITE;
+ }
+
+ /*
+ * If the drive has the Single Sector bug, limit
+ * the transfer to a single sector. This assumes
+ * ATAPI CD drives always use 2k sectors.
+ */
+ if (GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_flags & AD_1SECTOR) {
+ size_t resid;
+ size_t tmp;
+
+ /* adjust offset based on prior request */
+ ata_pktp->ap_boffset += ata_pktp->ap_bcount;
+
+ /* compute number of bytes left to transfer */
+ resid = bp->b_bcount - ata_pktp->ap_boffset;
+
+ /* limit the transfer to 2k */
+ tmp = MIN(2048, resid);
+ ata_pktp->ap_bcount = tmp;
+
+ /* tell target driver how much is left for next time */
+ new_spktp->pkt_resid = resid - tmp;
+ } else {
+ /* do the whole request in one swell foop */
+ ata_pktp->ap_bcount = bp->b_bcount;
+ new_spktp->pkt_resid = 0;
+ }
+
+ } else {
+ ata_pktp->ap_baddr = NULL;
+ ata_pktp->ap_bcount = 0;
+ ata_pktp->ap_boffset = 0;
+ }
+
+ /*
+ * determine the size of each partial data transfer
+ * to/from the drive
+ */
+ bytes = min(ata_pktp->ap_bcount, ATAPI_MAX_BYTES_PER_DRQ);
+ ata_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
+ ata_pktp->ap_lwcyl = (uchar_t)bytes;
+ return (new_spktp);
+}
+
+
+/*
+ * GHD ccballoc callback
+ *
+ * Initializing the ata_pkt, and return the ptr to the gcmd_t to GHD.
+ *
+ */
+
+/* ARGSUSED */
+int
+atapi_ccballoc(
+ gtgt_t *gtgtp,
+ gcmd_t *gcmdp,
+ int cmdlen,
+ int statuslen,
+ int tgtlen,
+ int ccblen)
+
+{
+ ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
+ ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
+
+ ADBG_TRACE(("atapi_ccballoc entered\n"));
+
+ /* set the back ptr from the ata_pkt to the gcmd_t */
+ ata_pktp->ap_gcmdp = gcmdp;
+
+ /* check length of SCSI CDB is not larger than drive expects */
+
+ if (cmdlen > ata_drvp->ad_cdb_len) {
+ ADBG_WARN(("atapi_ccballoc: SCSI CDB too large!\n"));
+ return (FALSE);
+ }
+
+ /*
+ * save length of the SCSI CDB, and calculate CDB padding
+ * note that for convenience, padding is expressed in shorts.
+ */
+
+ ata_pktp->ap_cdb_len = (uchar_t)cmdlen;
+ ata_pktp->ap_cdb_pad =
+ ((unsigned)(ata_drvp->ad_cdb_len - cmdlen)) >> 1;
+
+ /* set up callback functions */
+
+ ata_pktp->ap_start = atapi_fsm_start;
+ ata_pktp->ap_intr = atapi_fsm_intr;
+ ata_pktp->ap_complete = atapi_complete;
+
+ /* set-up for start */
+
+ ata_pktp->ap_flags = AP_ATAPI;
+ ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
+ ata_pktp->ap_cmd = ATC_PACKET;
+
+ return (TRUE);
+}
+
+
+
+/*
+ *
+ * SCSA tran_destroy_pkt entry point
+ *
+ */
+
+static void
+atapi_tran_destroy_pkt(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp)
+{
+ gcmd_t *gcmdp = PKTP2GCMDP(spktp);
+
+ ADBG_TRACE(("atapi_tran_destroy_pkt entered\n"));
+
+ if (gcmdp->cmd_dma_handle != NULL) {
+ ghd_dmafree_attr(gcmdp);
+ }
+
+ ghd_pktfree(&ADDR2CTL(ap)->ac_ccc, ap, spktp);
+}
+
+
+
+/*
+ *
+ * GHD ccbfree callback function
+ *
+ */
+
+/* ARGSUSED */
+void
+atapi_ccbfree(
+ gcmd_t *gcmdp)
+{
+ ADBG_TRACE(("atapi_ccbfree entered\n"));
+
+ /* nothing to do */
+}
+
+
+/*
+ *
+ * SCSA tran_dmafree entry point
+ *
+ */
+
+/*ARGSUSED*/
+static void
+atapi_tran_dmafree(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp)
+{
+ gcmd_t *gcmdp = PKTP2GCMDP(spktp);
+
+ ADBG_TRACE(("atapi_tran_dmafree entered\n"));
+
+ if (gcmdp->cmd_dma_handle != NULL) {
+ ghd_dmafree_attr(gcmdp);
+ }
+}
+
+
+
+/*
+ *
+ * SCSA tran_sync_pkt entry point
+ *
+ */
+
+/*ARGSUSED*/
+static void
+atapi_tran_sync_pkt(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp)
+{
+
+ ADBG_TRACE(("atapi_tran_sync_pkt entered\n"));
+
+ if (PKTP2GCMDP(spktp)->cmd_dma_handle != NULL) {
+ ghd_tran_sync_pkt(ap, spktp);
+ }
+}
+
+
+
+/*
+ *
+ * SCSA tran_start entry point
+ *
+ */
+
+/* ARGSUSED */
+static int
+atapi_tran_start(
+ struct scsi_address *ap,
+ struct scsi_pkt *spktp)
+{
+ ata_pkt_t *ata_pktp = SPKT2APKT(spktp);
+ ata_drv_t *ata_drvp = APKT2DRV(ata_pktp);
+ ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
+ gcmd_t *gcmdp = APKT2GCMD(ata_pktp);
+ int polled = FALSE;
+ int rc;
+
+ ADBG_TRACE(("atapi_tran_start entered\n"));
+
+ /*
+ * Basic initialization performed each and every time a
+ * scsi_pkt is submitted. A single scsi_pkt may be submitted
+ * multiple times so this routine has to be idempotent. One
+ * time initializations don't belong here.
+ */
+
+ /*
+ * The ap_v_addr pointer is incremented by the PIO data
+ * transfer routine as each word is transferred. Therefore, need
+ * to reset ap_v_addr here (rather than atapi_tran_init_pkt())
+ * in case the target resubmits the same pkt multiple times
+ * (which is permitted by SCSA).
+ */
+ ata_pktp->ap_v_addr = ata_pktp->ap_baddr + ata_pktp->ap_boffset;
+
+ /* ap_resid is decremented as the data transfer progresses */
+ ata_pktp->ap_resid = ata_pktp->ap_bcount;
+
+ /* clear error flags */
+ ata_pktp->ap_flags &= (AP_ATAPI | AP_READ | AP_WRITE | AP_ARQ_ON_ERROR);
+ spktp->pkt_reason = 0;
+ spktp->pkt_state = 0;
+ spktp->pkt_statistics = 0;
+
+ /*
+ * check for polling pkt
+ */
+ if (spktp->pkt_flags & FLAG_NOINTR) {
+ polled = TRUE;
+ }
+
+#ifdef ___just_ignore_unsupported_flags___
+ /* driver cannot accept tagged commands */
+
+ if (spktp->pkt_flags & (FLAG_HTAG|FLAG_OTAG|FLAG_STAG)) {
+ spktp->pkt_reason = CMD_TRAN_ERR;
+ return (TRAN_BADPKT);
+ }
+#endif
+
+ /* call common transport routine */
+
+ rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
+ spktp->pkt_time, polled, NULL);
+
+ /* see if pkt was not accepted */
+
+ if (rc != TRAN_ACCEPT)
+ return (rc);
+
+ return (rc);
+}
+
+
+/*
+ *
+ * GHD packet complete callback
+ *
+ */
+/* ARGSUSED */
+static void
+atapi_complete(
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp,
+ int do_callback)
+{
+ struct scsi_pkt *spktp = APKT2SPKT(ata_pktp);
+ struct scsi_status *scsi_stat = (struct scsi_status *)spktp->pkt_scbp;
+
+ ADBG_TRACE(("atapi_complete entered\n"));
+ ADBG_TRANSPORT(("atapi_complete: pkt = 0x%p\n", ata_pktp));
+
+ /* update resid */
+
+ spktp->pkt_resid = ata_pktp->ap_resid;
+
+ if (ata_pktp->ap_flags & AP_SENT_CMD) {
+ spktp->pkt_state |=
+ STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
+ }
+ if (ata_pktp->ap_flags & AP_XFERRED_DATA) {
+ spktp->pkt_state |= STATE_XFERRED_DATA;
+ }
+
+ if (ata_pktp->ap_flags & AP_GOT_STATUS) {
+ spktp->pkt_state |= STATE_GOT_STATUS;
+ }
+
+ /* check for fatal errors */
+
+ if (ata_pktp->ap_flags & AP_TRAN_ERROR) {
+ spktp->pkt_reason = CMD_TRAN_ERR;
+ } else if (ata_pktp->ap_flags & AP_BUS_RESET) {
+ spktp->pkt_reason = CMD_RESET;
+ spktp->pkt_statistics |= STAT_BUS_RESET;
+ } else if (ata_pktp->ap_flags & AP_DEV_RESET) {
+ spktp->pkt_reason = CMD_RESET;
+ spktp->pkt_statistics |= STAT_DEV_RESET;
+ } else if (ata_pktp->ap_flags & AP_ABORT) {
+ spktp->pkt_reason = CMD_ABORTED;
+ spktp->pkt_statistics |= STAT_ABORTED;
+ } else if (ata_pktp->ap_flags & AP_TIMEOUT) {
+ spktp->pkt_reason = CMD_TIMEOUT;
+ spktp->pkt_statistics |= STAT_TIMEOUT;
+ } else {
+ spktp->pkt_reason = CMD_CMPLT;
+ }
+
+ /* non-fatal errors */
+
+ if (ata_pktp->ap_flags & AP_ERROR)
+ scsi_stat->sts_chk = 1;
+ else
+ scsi_stat->sts_chk = 0;
+
+ if (ata_pktp->ap_flags & AP_ARQ_ERROR) {
+ ADBG_ARQ(("atapi_complete ARQ error 0x%p\n", ata_pktp));
+ spktp->pkt_reason = CMD_TRAN_ERR;
+
+ } else if (ata_pktp->ap_flags & AP_ARQ_OKAY) {
+ static struct scsi_status zero_scsi_status = { 0 };
+ struct scsi_arq_status *arqp;
+
+ ADBG_ARQ(("atapi_complete ARQ okay 0x%p\n", ata_pktp));
+ spktp->pkt_state |= STATE_ARQ_DONE;
+ arqp = ata_pktp->ap_scbp;
+ arqp->sts_rqpkt_reason = CMD_CMPLT;
+ arqp->sts_rqpkt_state = STATE_XFERRED_DATA;
+ arqp->sts_rqpkt_status = zero_scsi_status;
+ arqp->sts_rqpkt_resid = 0;
+ arqp->sts_rqpkt_statistics = 0;
+
+ }
+
+ ADBG_TRANSPORT(("atapi_complete: reason = 0x%x stats = 0x%x "
+ "sts_chk = %d\n", spktp->pkt_reason, spktp->pkt_statistics,
+ scsi_stat->sts_chk));
+
+ if (do_callback && (spktp->pkt_comp))
+ (*spktp->pkt_comp)(spktp);
+}
+
+
+
+/*
+ * Update the IDENTIFY PACKET DEVICE info
+ */
+
+static int
+atapi_id_update(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ caddr_t ioaddr1 = ata_ctlp->ac_ioaddr1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ caddr_t ioaddr2 = ata_ctlp->ac_ioaddr2;
+ int rc;
+
+ /*
+ * select the appropriate drive and LUN
+ */
+ ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD,
+ ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(io_hdl2, ioaddr2);
+
+ /*
+ * make certain the drive is selected, and wait for not busy
+ */
+ if (!ata_wait(io_hdl2, ioaddr2, ATS_DRDY, ATS_BSY, 5 * 1000000)) {
+ ADBG_ERROR(("atapi_id_update: select failed\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_FINI);
+ }
+
+ rc = atapi_id(ata_ctlp->ac_iohandle1, ata_ctlp->ac_ioaddr1,
+ ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2,
+ (struct ata_id *)ata_pktp->ap_v_addr);
+
+ if (!rc) {
+ ata_pktp->ap_flags |= AP_ERROR;
+ } else {
+ ata_pktp->ap_flags |= AP_XFERRED_DATA;
+ }
+ return (ATA_FSM_RC_FINI);
+}
+
+
+
+/*
+ * Both drives on the controller share a common pkt to do
+ * ARQ processing. Therefore the pkt is only partially
+ * initialized here. The rest of initialization occurs
+ * just before starting the ARQ pkt when an error is
+ * detected.
+ */
+
+void
+atapi_init_arq(
+ ata_ctl_t *ata_ctlp)
+{
+ ata_pkt_t *arq_pktp = ata_ctlp->ac_arq_pktp;
+
+ arq_pktp->ap_cdbp = ata_ctlp->ac_arq_cdb;
+ arq_pktp->ap_cdb_len = sizeof (ata_ctlp->ac_arq_cdb);
+ arq_pktp->ap_start = atapi_fsm_start;
+ arq_pktp->ap_intr = atapi_fsm_intr;
+ arq_pktp->ap_complete = atapi_complete;
+ arq_pktp->ap_flags = AP_ATAPI;
+ arq_pktp->ap_cmd = ATC_PACKET;
+
+ ata_ctlp->ac_arq_cdb[0] = SCMD_REQUEST_SENSE;
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/atapi.h b/usr/src/uts/intel/io/dktp/controller/ata/atapi.h
new file mode 100644
index 0000000000..d3be006682
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/atapi.h
@@ -0,0 +1,132 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1997 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _ATAPI_H
+#define _ATAPI_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Additional atapi status bits (redefinitions)
+ */
+#define ATE_ILI 0x01 /* Illegal length indication */
+#define ATE_EOM 0x02 /* End of media detected */
+#define ATE_MCR 0x08 /* Media change requested */
+#define ATS_SERVICE 0x10 /* overlap operation needs service */
+#define ATS_SENSE_KEY 0xf0 /* 4 bit sense key -see ata_sense_table */
+
+#define ATS_SENSE_KEY_SHIFT 4 /* shift to get to ATS_SENSE_KEY */
+
+/*
+ * Status bits from ATAPI Interrupt reason register (AT_COUNT) register
+ */
+#define ATI_COD 0x01 /* Command or Data */
+#define ATI_IO 0x02 /* IO direction */
+#define ATI_RELEASE 0x04 /* Release for ATAPI overlap */
+
+/* ATAPI feature reg definitions */
+
+#define ATF_OVERLAP 0x02
+
+/*
+ * ATAPI IDENTIFY_DRIVE configuration word
+ */
+
+#define ATAPI_ID_CFG_PKT_SZ 0x3
+#define ATAPI_ID_CFG_PKT_12B 0x0
+#define ATAPI_ID_CFG_PKT_16B 0x1
+#define ATAPI_ID_CFG_DRQ_TYPE 0x60
+#define ATAPI_ID_CFG_DRQ_INTR 0x20
+#define ATAPI_ID_CFG_DEV_TYPE 0x0f00
+#define ATAPI_ID_CFG_DEV_SHFT 8
+
+/*
+ * ATAPI IDENTIFY_DRIVE capabilities word
+ */
+
+#define ATAPI_ID_CAP_DMA 0x0100
+#define ATAPI_ID_CAP_OVERLAP 0x2000
+
+/* ATAPI SET FEATURE commands */
+
+#define ATAPI_FEAT_RELEASE_INTR 0x5d
+#define ATAPI_FEAT_SERVICE_INTR 0x5e
+
+/*
+ * ATAPI bits
+ */
+#define ATAPI_SIG_HI 0xeb /* in high cylinder register */
+#define ATAPI_SIG_LO 0x14 /* in low cylinder register */
+
+
+#define ATAPI_SECTOR_SIZE 2048
+#define ATAPI_MAX_BYTES_PER_DRQ 0xf800 /* 16 bits - 2KB ie 62KB */
+#define ATAPI_HEADS 64
+#define ATAPI_SECTORS_PER_TRK 32
+
+/* Useful macros */
+
+#define TRAN2CTL(tran) ((ata_ctl_t *)((tran)->tran_hba_private))
+#define ADDR2CTL(ap) (TRAN2CTL(ADDR2TRAN(ap)))
+
+#define SPKT2APKT(spkt) (GCMD2APKT(PKTP2GCMDP(spkt)))
+#define APKT2SPKT(apkt) (GCMDP2PKTP(APKT2GCMD(apkt)))
+
+/* public function prototypes */
+
+int atapi_attach(ata_ctl_t *ata_ctlp);
+void atapi_detach(ata_ctl_t *ata_ctlp);
+void atapi_init_arq(ata_ctl_t *ata_ctlp);
+int atapi_init_drive(ata_drv_t *ata_drvp);
+void atapi_uninit_drive(ata_drv_t *ata_drvp);
+
+int atapi_id(ddi_acc_handle_t io_hdl1, caddr_t ioaddr1,
+ ddi_acc_handle_t io_hdl2, caddr_t ioaddr2, struct ata_id *buf);
+int atapi_signature(ddi_acc_handle_t io_hdl, caddr_t ioaddr);
+
+int atapi_ccballoc(gtgt_t *gtgtp, gcmd_t *gcmdp, int cmdlen,
+ int statuslen, int tgtlen, int ccblen);
+void atapi_ccbfree(gcmd_t *gcmdp);
+
+
+int atapi_fsm_intr(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+int atapi_fsm_start(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+void atapi_fsm_reset(ata_ctl_t *ata_ctlp);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ATAPI_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/atapi_fsm.c b/usr/src/uts/intel/io/dktp/controller/ata/atapi_fsm.c
new file mode 100644
index 0000000000..fc4f8355b6
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/atapi_fsm.c
@@ -0,0 +1,884 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Finite State Machines for ATA controller and ATAPI devices
+ */
+
+#include <sys/types.h>
+
+#include "ata_common.h"
+#include "atapi.h"
+
+/*
+ * Local functions
+ */
+static int atapi_start_cmd(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static void atapi_send_cdb(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp);
+static void atapi_start_dma(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp);
+static void atapi_pio_data_in(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp);
+static void atapi_pio_data_out(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp);
+static void atapi_status(ata_ctl_t *ata_ctlp, ata_pkt_t *ata_pktp,
+ uchar_t status, int dma_complete);
+static void atapi_fsm_error(ata_ctl_t *ata_ctlp, uchar_t state,
+ uchar_t event);
+
+
+
+
+static void
+atapi_fsm_error(
+ ata_ctl_t *ata_ctlp,
+ uchar_t state,
+ uchar_t event)
+{
+ ADBG_ERROR(("atapi protocol error: 0x%p 0x%x 0x%x\n",
+ ata_ctlp->ac_data, state, event));
+}
+
+
+/*
+ *
+ * IO CoD DRQ
+ * -- --- ---
+ * 0 0 0 == 0 invalid
+ * 0 0 1 == 1 Data to device
+ * 0 1 0 == 2 Idle
+ * 0 1 1 == 3 Send ATAPI CDB to device
+ * 1 0 0 == 4 invalid
+ * 1 0 1 == 5 Data from device
+ * 1 1 0 == 6 Status ready
+ * 1 1 1 == 7 Future use
+ *
+ */
+
+/*
+ * Given the current state and the current event this
+ * table determines what action to take. Note, in the actual
+ * table I've left room for the invalid event codes: 0, 2, and 7.
+ *
+ * +-----------------------------------------------------
+ * | Current Event
+ * |
+ * State | dataout idle cdb datain status
+ * | 1 2 3 5 6
+ * |-----------------------------------------------------
+ * idle | sendcmd sendcmd sendcmd sendcmd sendcmd
+ * cmd | * * sendcdb * read-err-code
+ * cdb | xfer-out nada nada xfer-in read-err-code
+ * datain | * * * xfer-in read-err-code
+ * dataout | xfer-out * * * read-err-code
+ * DMA | * * * * read-err-code
+ *
+ */
+
+uchar_t atapi_PioAction[ATAPI_NSTATES][ATAPI_NEVENTS] = {
+/* invalid dataout idle cdb invalid datain status future */
+{ A_NADA, A_NADA, A_NADA, A_NADA, A_NADA, A_NADA, A_NADA, A_NADA }, /* Idle */
+{ A_NADA, A_NADA, A_NADA, A_CDB, A_NADA, A_NADA, A_RE, A_NADA }, /* Cmd */
+{ A_REX, A_OUT, A_NADA, A_NADA, A_IDLE, A_IN, A_RE, A_UNK }, /* Cdb */
+{ A_REX, A_UNK, A_IDLE, A_UNK, A_IDLE, A_IN, A_RE, A_UNK }, /* DtaIn */
+{ A_REX, A_OUT, A_IDLE, A_UNK, A_IDLE, A_UNK, A_RE, A_UNK }, /* DtaOut */
+{ A_REX, A_UNK, A_UNK, A_UNK, A_UNK, A_UNK, A_RE, A_UNK } /* DmaAct */
+};
+
+/*
+ *
+ * Give the current state and the current event this table
+ * determines the new state of the device.
+ *
+ * +----------------------------------------------
+ * | Current Event
+ * |
+ * State | dataout idle cdb datain status
+ * |----------------------------------------------
+ * idle | cmd cmd cmd cmd cmd
+ * cmd | * * cdb * *
+ * cdb | dataout cdb cdb datain (idle)
+ * datain | * * * datain (idle)
+ * dataout | dataout * * * (idle)
+ * DMA | DMA DMA DMA DMA (idle)
+ *
+ *
+ * Note: the states enclosed in parens "(state)", are the accept states
+ * for this FSM. A separate table is used to encode the done
+ * states rather than extra state codes.
+ *
+ */
+
+uchar_t atapi_PioNextState[ATAPI_NSTATES][ATAPI_NEVENTS] = {
+/* invalid dataout idle cdb invalid datain status future */
+{ S_IDLE, S_IDLE, S_IDLE, S_IDLE, S_IDLE, S_IDLE, S_IDLE, S_IDLE}, /* idle */
+{ S_CDB, S_CDB, S_CDB, S_CDB, S_CDB, S_CDB, S_IDLE, S_X }, /* cmd */
+{ S_IDLE, S_OUT, S_CDB, S_CDB, S_CDB, S_IN, S_IDLE, S_X }, /* cdb */
+{ S_IDLE, S_X, S_IN, S_X, S_IN, S_IN, S_IDLE, S_X }, /* datain */
+{ S_IDLE, S_OUT, S_OUT, S_X, S_OUT, S_X, S_IDLE, S_X }, /* dataout */
+{ S_IDLE, S_DMA, S_DMA, S_DMA, S_DMA, S_DMA, S_IDLE, S_DMA } /* dmaActv */
+};
+
+
+static int
+atapi_start_cmd(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+
+ /*
+ * Bug 1256489:
+ *
+ * If AC_BSY_WAIT is set, wait for controller to be not busy,
+ * before issuing a command. If AC_BSY_WAIT is not set,
+ * skip the wait. This is important for laptops that do
+ * suspend/resume but do not correctly wait for the busy bit to
+ * drop after a resume.
+ */
+
+ if (ata_ctlp->ac_timing_flags & AC_BSY_WAIT) {
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2,
+ 0, ATS_BSY, 5000000)) {
+ ADBG_WARN(("atapi_start: BSY too long!\n"));
+ ata_pktp->ap_flags |= AP_ERROR;
+ return (ATA_FSM_RC_BUSY);
+ }
+ }
+
+ /*
+ * Select the drive
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ata_pktp->ap_hd);
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * make certain the drive selected
+ */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2, 0, ATS_BSY, 5000000)) {
+ ADBG_ERROR(("atapi_start_cmd: drive select failed\n"));
+ return (ATA_FSM_RC_BUSY);
+ }
+
+ /*
+ * Always make certain interrupts are enabled. It's been reported
+ * (but not confirmed) that some notebook computers don't
+ * clear the interrupt disable bit after being resumed. The
+ * easiest way to fix this is to always clear the disable bit
+ * before every command.
+ */
+ ddi_put8(io_hdl2, ata_ctlp->ac_devctl, ATDC_D3);
+
+ ddi_put8(io_hdl1, ata_ctlp->ac_lcyl, ata_pktp->ap_lwcyl);
+ ddi_put8(io_hdl1, ata_ctlp->ac_hcyl, ata_pktp->ap_hicyl);
+ ddi_put8(io_hdl1, ata_ctlp->ac_sect, ata_pktp->ap_sec);
+ ddi_put8(io_hdl1, ata_ctlp->ac_count, ata_pktp->ap_count);
+
+ if (ata_pktp->ap_pciide_dma) {
+
+ ASSERT((ata_pktp->ap_flags & (AP_READ | AP_WRITE)) != 0);
+
+ /*
+ * DMA but no Overlap
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_feature, ATF_ATAPI_DMA);
+
+ /*
+ * copy the Scatter/Gather list to the controller's
+ * Physical Region Descriptor Table
+ */
+ ata_pciide_dma_setup(ata_ctlp, ata_pktp->ap_sg_list,
+ ata_pktp->ap_sg_cnt);
+ } else {
+ /*
+ * no DMA and no Overlap
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_feature, 0);
+ }
+
+ /*
+ * This next one sets the device in motion
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ata_pktp->ap_cmd);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ if (!(ata_drvp->ad_flags & AD_NO_CDB_INTR)) {
+ /*
+ * the device will send me an interrupt when it's
+ * ready for the packet
+ */
+ return (ATA_FSM_RC_OKAY);
+ }
+
+ /* else */
+
+ /*
+ * If we don't receive an interrupt requesting the scsi CDB,
+ * we must poll for DRQ, and then send out the CDB.
+ */
+
+ /*
+ * Wait for DRQ before sending the CDB. Bailout early
+ * if an error occurs.
+ *
+ * I'm not certain what the correct timeout should be.
+ */
+ if (ata_wait3(io_hdl2, ata_ctlp->ac_ioaddr2,
+ ATS_DRQ, ATS_BSY, /* okay */
+ ATS_ERR, ATS_BSY, /* cmd failed */
+ ATS_DF, ATS_BSY, /* cmd failed */
+ 4000000)) {
+ /* got good status */
+ return (ATA_FSM_RC_INTR);
+ }
+
+ ADBG_WARN(("atapi_start_cmd: 0x%x status 0x%x error 0x%x\n",
+ ata_pktp->ap_cmd,
+ ddi_get8(io_hdl2, ata_ctlp->ac_altstatus),
+ ddi_get8(io_hdl1, ata_ctlp->ac_error)));
+
+ return (ATA_FSM_RC_INTR);
+}
+
+
+/*
+ *
+ * Send the SCSI CDB to the ATAPI device
+ *
+ */
+
+static void
+atapi_send_cdb(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int padding;
+
+ ADBG_TRACE(("atapi_send_cdb entered\n"));
+
+ /*
+ * send the CDB to the drive
+ */
+ ddi_rep_put16(io_hdl1, (ushort_t *)ata_pktp->ap_cdbp, ata_ctlp->ac_data,
+ ata_pktp->ap_cdb_len >> 1, DDI_DEV_NO_AUTOINCR);
+
+ /*
+ * pad to ad_cdb_len bytes
+ */
+
+ padding = ata_pktp->ap_cdb_pad;
+
+ while (padding) {
+ ddi_put16(io_hdl1, ata_ctlp->ac_data, 0);
+ padding--;
+ }
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+#ifdef ATA_DEBUG_XXX
+ {
+ uchar_t *cp = ata_pktp->ap_cdbp;
+
+ ADBG_TRANSPORT(("\tatapi scsi cmd (%d bytes):\n ",
+ ata_pktp->ap_cdb_len));
+ ADBG_TRANSPORT(("\t\t 0x%x 0x%x 0x%x 0x%x\n",
+ cp[0], cp[1], cp[2], cp[3]));
+ ADBG_TRANSPORT(("\t\t 0x%x 0x%x 0x%x 0x%x\n",
+ cp[4], cp[5], cp[6], cp[7]));
+ ADBG_TRANSPORT(("\t\t 0x%x 0x%x 0x%x 0x%x\n",
+ cp[8], cp[9], cp[10], cp[11]));
+ }
+#endif
+
+ ata_pktp->ap_flags |= AP_SENT_CMD;
+}
+
+
+
+/*
+ * Start the DMA engine
+ */
+
+/* ARGSUSED */
+static void
+atapi_start_dma(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ uchar_t rd_wr;
+
+ /*
+ * Determine the direction. This may look backwards
+ * but the command bit programmed into the DMA engine
+ * specifies the type of operation the engine performs
+ * on the PCI bus (not the ATA bus). Therefore when
+ * transferring data from the device to system memory, the
+ * DMA engine performs PCI Write operations.
+ */
+ if (ata_pktp->ap_flags & AP_READ)
+ rd_wr = PCIIDE_BMICX_RWCON_WRITE_TO_MEMORY;
+ else
+ rd_wr = PCIIDE_BMICX_RWCON_READ_FROM_MEMORY;
+
+ /*
+ * Start the DMA engine
+ */
+ ata_pciide_dma_start(ata_ctlp, rd_wr);
+}
+
+
+
+/*
+ * Transfer the data from the device
+ *
+ * Note: the atapi_pio_data_in() and atapi_pio_data_out() functions
+ * are complicated a lot by the requirement to handle an odd byte count.
+ * The only device we've seen which does this is the Hitachi CDR-7730.
+ * See bug ID 1214595. It's my understanding that Dell stopped shipping
+ * that drive after discovering all the problems it caused, so it may
+ * be impossible to find one for any sort of regression test.
+ *
+ * In the future, ATAPI tape drives will also probably support odd byte
+ * counts so this code will be excersized more often.
+ *
+ */
+
+static void
+atapi_pio_data_in(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int drive_bytes;
+ int xfer_bytes;
+ int xfer_words;
+
+ ata_pktp->ap_flags |= AP_XFERRED_DATA;
+
+ /*
+ * Get the device's byte count for this transfer
+ */
+ drive_bytes = ((int)ddi_get8(io_hdl1, ata_ctlp->ac_hcyl) << 8)
+ + ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+
+ /*
+ * Determine actual number I'm going to transfer. My
+ * buffer might have fewer bytes than what the device
+ * expects or handles on each interrupt.
+ */
+ xfer_bytes = min(ata_pktp->ap_resid, drive_bytes);
+
+ ASSERT(xfer_bytes >= 0);
+
+ /*
+ * Round down my transfer count to whole words so that
+ * if the transfer count is odd it's still handled correctly.
+ */
+ xfer_words = xfer_bytes / 2;
+
+ if (xfer_words) {
+ int byte_count = xfer_words * 2;
+
+ ddi_rep_get16(io_hdl1, (ushort_t *)ata_pktp->ap_v_addr,
+ ata_ctlp->ac_data, xfer_words, DDI_DEV_NO_AUTOINCR);
+
+ ata_pktp->ap_v_addr += byte_count;
+ drive_bytes -= byte_count;
+ }
+
+ /*
+ * Handle possible odd byte at end. Read a 16-bit
+ * word but discard the high-order byte.
+ */
+ if (xfer_bytes & 1) {
+ ushort_t tmp_word;
+
+ tmp_word = ddi_get16(io_hdl1, ata_ctlp->ac_data);
+ *ata_pktp->ap_v_addr++ = tmp_word & 0xff;
+ drive_bytes -= 2;
+ }
+
+ ata_pktp->ap_resid -= xfer_bytes;
+
+ ADBG_TRANSPORT(("atapi_pio_data_in: read 0x%x bytes\n", xfer_bytes));
+
+ /*
+ * Discard any unwanted data.
+ */
+ if (drive_bytes > 0) {
+ ADBG_TRANSPORT(("atapi_pio_data_in: dump 0x%x bytes\n",
+ drive_bytes));
+
+ /* rounded up if the drive_bytes count is odd */
+ for (; drive_bytes > 0; drive_bytes -= 2)
+ (void) ddi_get16(io_hdl1, ata_ctlp->ac_data);
+ }
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+}
+
+
+/*
+ * Transfer the data to the device
+ */
+
+static void
+atapi_pio_data_out(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+ int drive_bytes;
+ int xfer_bytes;
+ int xfer_words;
+
+ ata_pktp->ap_flags |= AP_XFERRED_DATA;
+
+ /*
+ * Get the device's byte count for this transfer
+ */
+ drive_bytes = ((int)ddi_get8(io_hdl1, ata_ctlp->ac_hcyl) << 8)
+ + ddi_get8(io_hdl1, ata_ctlp->ac_lcyl);
+
+ /*
+ * Determine actual number I'm going to transfer. My
+ * buffer might have fewer bytes than what the device
+ * expects or handles on each interrupt.
+ */
+ xfer_bytes = min(ata_pktp->ap_resid, drive_bytes);
+
+ /*
+ * Round down my transfer count to whole words so that
+ * if the transfer count is odd it's handled correctly.
+ */
+ xfer_words = xfer_bytes / 2;
+
+ if (xfer_words) {
+ int byte_count = xfer_words * 2;
+
+ ddi_rep_put16(io_hdl1, (ushort_t *)ata_pktp->ap_v_addr,
+ ata_ctlp->ac_data, xfer_words, DDI_DEV_NO_AUTOINCR);
+ ata_pktp->ap_v_addr += byte_count;
+ }
+
+ /*
+ * If odd byte count, transfer the last
+ * byte. Use a tmp so that I don't run off
+ * the end off the buffer and possibly page
+ * fault.
+ */
+ if (xfer_bytes & 1) {
+ ushort_t tmp_word;
+
+ /* grab the last unsigned byte and widen it to 16-bits */
+ tmp_word = *ata_pktp->ap_v_addr++;
+ ddi_put16(io_hdl1, ata_ctlp->ac_data, tmp_word);
+ }
+
+ ata_pktp->ap_resid -= xfer_bytes;
+
+ ADBG_TRANSPORT(("atapi_pio_data_out: wrote 0x%x bytes\n", xfer_bytes));
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+}
+
+
+/*
+ *
+ * check status of completed command
+ *
+ */
+static void
+atapi_status(
+ ata_ctl_t *ata_ctlp,
+ ata_pkt_t *ata_pktp,
+ uchar_t status,
+ int dma_completion)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+
+ ata_pktp->ap_flags |= AP_GOT_STATUS;
+
+ if (status & (ATS_DF | ATS_ERR)) {
+ ata_pktp->ap_flags |= AP_ERROR;
+ }
+
+ if (ata_pktp->ap_flags & AP_ERROR) {
+ ata_pktp->ap_status = status;
+ ata_pktp->ap_error = ddi_get8(io_hdl1, ata_ctlp->ac_error);
+ }
+
+
+ /*
+ * If the DMA transfer failed leave the resid set to
+ * the original byte count. The target driver has
+ * to do a REQUEST SENSE to get the true residual
+ * byte count. Otherwise, it all transferred so update
+ * the flags and residual byte count.
+ */
+ if (dma_completion && !(ata_pktp->ap_flags & AP_TRAN_ERROR)) {
+ ata_pktp->ap_flags |= AP_XFERRED_DATA;
+ ata_pktp->ap_resid = 0;
+ }
+}
+
+
+static void
+atapi_device_reset(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
+
+ /* select the drive */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /* issue atapi DEVICE RESET */
+ ddi_put8(io_hdl1, ata_ctlp->ac_cmd, ATC_DEVICE_RESET);
+
+ /* wait for the busy bit to settle */
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /*
+ * Re-select the drive (this is probably only necessary
+ * when resetting drive 1).
+ */
+ ddi_put8(io_hdl1, ata_ctlp->ac_drvhd, ata_drvp->ad_drive_bits);
+ ATA_DELAY_400NSEC(io_hdl2, ata_ctlp->ac_ioaddr2);
+
+ /* allow the drive the full 6 seconds to respond */
+ /* LINTED */
+ if (!ata_wait(io_hdl2, ata_ctlp->ac_ioaddr2, 0, ATS_BSY, 6 * 1000000)) {
+ ADBG_WARN(("atapi_device_reset: still busy\n"));
+ /*
+ * It's not clear to me what to do at this point,
+ * the drive might be dead or might eventually
+ * recover. For now just ignore it and continue
+ * to attempt to use the drive.
+ */
+ }
+}
+
+
+
+void
+atapi_fsm_reset(ata_ctl_t *ata_ctlp)
+{
+ ata_drv_t *ata_drvp;
+ int drive;
+
+ /*
+ * reset drive drive 0 and the drive 1
+ */
+ for (drive = 0; drive <= 1; drive++) {
+ ata_drvp = CTL2DRV(ata_ctlp, drive, 0);
+ if (ata_drvp && ATAPIDRV(ata_drvp)) {
+ ata_drvp->ad_state = S_IDLE;
+ atapi_device_reset(ata_ctlp, ata_drvp);
+ }
+ }
+}
+
+
+int
+atapi_fsm_start(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ int rc;
+
+ ADBG_TRACE(("atapi_start entered\n"));
+ ADBG_TRANSPORT(("atapi_start: pkt = 0x%p\n", ata_pktp));
+
+ /*
+ * check for valid state
+ */
+ if (ata_drvp->ad_state != S_IDLE) {
+ ADBG_ERROR(("atapi_fsm_start not idle 0x%x\n",
+ ata_drvp->ad_state));
+ return (ATA_FSM_RC_BUSY);
+ } else {
+ ata_drvp->ad_state = S_CMD;
+ }
+
+ rc = atapi_start_cmd(ata_ctlp, ata_drvp, ata_pktp);
+
+ switch (rc) {
+ case ATA_FSM_RC_OKAY:
+ /*
+ * The command started okay. Just return.
+ */
+ break;
+ case ATA_FSM_RC_INTR:
+ /*
+ * Got Command Phase. The upper layer will send
+ * the cdb by faking an interrupt.
+ */
+ break;
+ case ATA_FSM_RC_FINI:
+ /*
+ * command completed immediately, stick on done q
+ */
+ break;
+ case ATA_FSM_RC_BUSY:
+ /*
+ * The command wouldn't start, tell the upper layer to
+ * stick this request on the done queue.
+ */
+ ata_drvp->ad_state = S_IDLE;
+ return (ATA_FSM_RC_BUSY);
+ }
+ return (rc);
+}
+
+/*
+ *
+ * All interrupts on an ATAPI device come through here.
+ * This function determines what to do next, based on
+ * the current state of the request and the drive's current
+ * status bits. See the FSM tables at the top of this file.
+ *
+ */
+
+int
+atapi_fsm_intr(
+ ata_ctl_t *ata_ctlp,
+ ata_drv_t *ata_drvp,
+ ata_pkt_t *ata_pktp)
+{
+ ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
+ uchar_t status;
+ uchar_t intr_reason;
+ uchar_t state;
+ uchar_t event;
+ uchar_t action;
+
+
+ /*
+ * get the prior state
+ */
+ state = ata_drvp->ad_state;
+
+ /*
+ * If doing DMA, then:
+ *
+ * 1. halt the DMA engine
+ * 2. reset the interrupt and error latches
+ * 3. reset the drive's IRQ.
+ *
+ * I think the order of these operations must be
+ * exactly as listed. Otherwise we the PCI-IDE
+ * controller can hang or we can miss the next interrupt
+ * edge.
+ *
+ */
+ switch (state) {
+ case S_DMA:
+ ASSERT(ata_pktp->ap_pciide_dma == TRUE);
+ /*
+ * Halt the DMA engine. When we reach this point
+ * we already know for certain that the device has
+ * an interrupt pending since the ata_get_status()
+ * function already checked the PCI-IDE interrupt
+ * status bit.
+ */
+ ata_pciide_dma_stop(ata_ctlp);
+ /*FALLTHRU*/
+ case S_IDLE:
+ case S_CMD:
+ case S_CDB:
+ case S_IN:
+ case S_OUT:
+ break;
+ }
+
+
+ /*
+ * Clear the PCI-IDE latches and the drive's IRQ
+ */
+ status = ata_get_status_clear_intr(ata_ctlp, ata_pktp);
+
+ /*
+ * some non-compliant (i.e., NEC) drives don't
+ * set ATS_BSY within 400 nsec. and/or don't keep
+ * it asserted until they're actually non-busy.
+ * There's a small window between reading the alt_status
+ * and status registers where the drive might "bounce"
+ * the ATS_BSY bit.
+ */
+ if (status & ATS_BSY)
+ return (ATA_FSM_RC_BUSY);
+
+ /*
+ * get the interrupt reason code
+ */
+ intr_reason = ddi_get8(io_hdl1, ata_ctlp->ac_count);
+
+ /*
+ * encode the status and interrupt reason bits
+ * into an event code which is used to index the
+ * FSM tables
+ */
+ event = ATAPI_EVENT(status, intr_reason);
+
+ /*
+ * determine the action for this event
+ */
+ action = atapi_PioAction[state][event];
+
+ /*
+ * determine the new state
+ */
+ ata_drvp->ad_state = atapi_PioNextState[state][event];
+
+ switch (action) {
+ default:
+ case A_UNK:
+ /*
+ * invalid state
+ */
+/*
+ * ??? this shouldn't happen. ???
+ * if there's an active command on
+ * this device, the pkt timer should eventually clear the
+ * device. I might try sending a DEVICE-RESET here to speed
+ * up the error recovery except that DEVICE-RESET is kind of
+ * complicated to implement correctly because if I send a
+ * DEVICE-RESET to drive 1 it deselects itself.
+ */
+ ADBG_WARN(("atapi_fsm_intr: Unsupported intr\n"));
+ break;
+
+ case A_NADA:
+ drv_usecwait(100);
+ break;
+
+ case A_CDB:
+ /*
+ * send out atapi pkt
+ */
+ atapi_send_cdb(ata_ctlp, ata_pktp);
+
+ /*
+ * start the DMA engine if necessary and change
+ * the state variable to reflect not doing PIO
+ */
+ if (ata_pktp->ap_pciide_dma) {
+ atapi_start_dma(ata_ctlp, ata_drvp, ata_pktp);
+ ata_drvp->ad_state = S_DMA;
+ }
+ break;
+
+ case A_IN:
+ if (!(ata_pktp->ap_flags & AP_READ)) {
+ /*
+ * maybe this was a spurious interrupt, just
+ * spin for a bit and see if the drive
+ * recovers
+ */
+ atapi_fsm_error(ata_ctlp, state, event);
+ drv_usecwait(100);
+ break;
+ }
+ /*
+ * read in the data
+ */
+ if (!ata_pktp->ap_pciide_dma) {
+ atapi_pio_data_in(ata_ctlp, ata_pktp);
+ }
+ break;
+
+ case A_OUT:
+ if (!(ata_pktp->ap_flags & AP_WRITE)) {
+ /* spin for a bit and see if the drive recovers */
+ atapi_fsm_error(ata_ctlp, state, event);
+ drv_usecwait(100);
+ break;
+ }
+ /*
+ * send out data
+ */
+ if (!ata_pktp->ap_pciide_dma) {
+ atapi_pio_data_out(ata_ctlp, ata_pktp);
+ }
+ break;
+
+ case A_IDLE:
+ /*
+ * The DRQ bit deasserted before or between the data
+ * transfer phases.
+ */
+ if (!ata_drvp->ad_bogus_drq) {
+ ata_drvp->ad_bogus_drq = TRUE;
+ atapi_fsm_error(ata_ctlp, state, event);
+ }
+ drv_usecwait(100);
+ break;
+
+ case A_RE:
+ /*
+ * If we get here, a command has completed!
+ *
+ * check status of completed command
+ */
+ atapi_status(ata_ctlp, ata_pktp, status,
+ (state == S_DMA) ? TRUE : FALSE);
+
+ return (ATA_FSM_RC_FINI);
+
+ case A_REX:
+ /*
+ * some NEC drives don't report the right interrupt
+ * reason code for the status phase
+ */
+ if (!ata_drvp->ad_nec_bad_status) {
+ ata_drvp->ad_nec_bad_status = TRUE;
+ atapi_fsm_error(ata_ctlp, state, event);
+ drv_usecwait(100);
+ }
+ atapi_status(ata_ctlp, ata_pktp, status,
+ (state == S_DMA) ? TRUE : FALSE);
+ return (ATA_FSM_RC_FINI);
+
+ }
+ return (ATA_FSM_RC_OKAY);
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/capacity.notes.txt b/usr/src/uts/intel/io/dktp/controller/ata/capacity.notes.txt
new file mode 100644
index 0000000000..1b720071d4
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/capacity.notes.txt
@@ -0,0 +1,178 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+#
+# Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+
+Dan Mick, 2/16/1999
+
+I had to come up with some sort of synthetic device geometry in the
+case that a drive supports LBA access and therefore the BIOS's geometry
+may be wrong or too small.
+
+In despair at reading the specs, I asked the x3t13 reflector
+how one is supposed to calculate capacity:
+
+==
+X-Authentication-Warning: mage.dt.wdc.com: majordom set sender to owner-t13@dt.wdc.com using -f
+Date: Thu, 11 Feb 1999 19:16:39 -0800 (PST)
+From: Dan Mick <dan.mick@West>
+Subject: Capacity?
+To: t13@dt.wdc.com
+
+So, I'm sure I'm being naive in expecting there to be a way to
+reliably calculate the capacity of an ATA drive, but I can't make
+sense of the IDENTIFY DEVICE results, words
+
+1,3,6,53,54-58,60-61
+
+Is the right algorithm for making sense of all this written down
+somewhere? I *have* searched the specs and Hale's HIW docs and
+the "ATA FAQ" from Wehman and den Hahn, and I still don't understand
+how this can be so nondeterministic.
+
+Even assertions in the specs seem to be ignored; I have a drive for
+which words 57-58 do *not* represent the product of words 54, 55, and
+56, for instance...
+==
+
+Several responses came; one from curtis_stevens@phoenix.com said "just
+use LBA", which of course doesn't answer the question about non-LBA
+drives. David_S_Thompson@notes.seagate.com said "read section
+6.2.1 of ATA-4, rev 17 or above", which does help a bit. But
+the best pragmatic answer came from Hale Landis. I've tried to
+implement this algorithm in deriving the capacity and geometry
+for ata, without using "Init Drive Parameters", since the driver
+hasn't done that in recent incarnations, and I'm loath to mess
+with what the BIOS and the drive have figured out unless it
+becomes absolutely necessary.
+
+
+From: "Hale Landis" <hlandis@ibm.net>
+To: "T13 Reflector" <t13@dt.wdc.com>, "Dan Mick" <dan.mick@West>
+Date: Thu, 11 Feb 1999 23:46:59 -0700
+Subject: Re: Capacity?
+
+Dan Mick said...
+>So, I'm sure I'm being naive in expecting there to be a way to
+>reliably calculate the capacity of an ATA drive, but I can't make
+>sense of the IDENTIFY DEVICE results, words
+>
+>1,3,6,53,54-58,60-61
+>
+>Is the right algorithm for making sense of all this written down
+>somewhere? I *have* searched the specs and Hale's HIW docs and
+>the "ATA FAQ" from Wehman and den Hahn, and I still don't understand
+>how this can be so nondeterministic.
+>
+>Even assertions in the specs seem to be ignored; I have a drive for
+>which words 57-58 do *not* represent the product of words 54, 55, and
+>56, for instance...
+
+If the words [54]*[55]*[56] don't match [57:58] then the drive is
+"broken". Warning: some older drives have words 57:58 in big endian
+format (that is easy to verify!).
+
+Of course Read/Set Max do alter the drive's apparent capacity but assuming
+this feature is not being used or it is being used and implemented
+correctly...
+
+If you have no need to use CHS mode, then just ignore words 1, 3, 6 and
+53:58. Words 60:61 are the drive capacity. But even if you must use CHS
+mode, words 60:61 are still the true drive capacity but words 57:58 are
+the capacity that the current CHS geometry can address and [57:58] must be
+<= [60:61]. Oh yea, if you find that 57:58 are big endian then 60:61 are
+probably big endian too.
+
+An algorithm??? (I hope there aren't any typo's here)...
+
+1) If you are LBA only (don't use CHS) then words 60:61 are all you need,
+you are done.
+
+2) If you must use CHS then I suggest the following:
+
+2a) Check words 53:58...
+ does 53 indicate "valid",
+ is 1 <= [55] <= 16,
+ is 1 <= [56] <= 63,
+ and does [54]*[55]*[56] == [57:58]?
+
+ - Yes, you know that current CHS geometry of the drive, you are done.
+ If you don't like this geometry then issue an Init Drv Params with
+ a different heads and sectors and repeat this step.
+
+ - No, then go to 2b).
+
+2b) Does the drive support LBA and is [1]*[3]*[6] <= [60:61]?
+
+ - Yes, assume 60:61 are correct, and go to 2c)
+
+ - No, go to 2d)
+
+2c) Issue a Init Drv Params and set your favorite heads and sectors.
+ Compute the number of cylinders:
+
+ num-cyl = [60:61] / (favorite heads) * (favorite sectors)
+
+ The drive capacity is (num-cyl)*(favorite heads)*(favorite sectors).
+ And this value should be in 57:58 now. You are done.
+
+2d) Now you got a problem... 60:61 are no good, 53:58 are no good.
+ You don't have much choice but to assume that [1]*[3]*[6] is the
+ drive capacity. Issue an Init Drv Params to set the default geometry
+ from [3] and [6] -or- issue an Init Drv Params with your favorite
+ heads and sectors. Compute the number of cylinders:
+
+ num-cyl = ([1]*[3]*[6]) / (num heads) * (num sectors)
+
+ The drive capacity is (num-cyl)*(num-head)*(num-sectors).
+
+ You are done.
+
+And one final thing... If you used Init Drv Params you must now verify
+that it worked. Issue a read command and make sure you can read what you
+think is the last sector on the drive. If this read fails with ABRT or
+IDNF, you are in *BIG* trouble.
+
+All we did here was find a CHS geometry and a drive capacity that should
+work. If the drive has a Master Boot Record then this geometry may not
+have a CHS translation that matches the CHS translation that was used in
+that Master Boot Record. But I'll not go into that here (I would probably
+have to say bad things about the documents published by some of my friends
+a few years ago!).
+
+I'll say "sorry" now to all you hardware folks that read these reflector
+messages but I'm sure this will begin a long series of messages on the
+reflector that will just bore you to near death!
+
+
++---------------+---------------------------+
+| Hale Landis | hlandis@ibm.net |
+| Niwot, CO USA | hlandis@sugs.talisman.com |
++---------------+---------------------------+
+| !! Coming soon: www.talisman.com/sugs !! |
++-------------------------------------------+
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/fsm.txt b/usr/src/uts/intel/io/dktp/controller/ata/fsm.txt
new file mode 100644
index 0000000000..ec4619c432
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/fsm.txt
@@ -0,0 +1,74 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2000 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ident "%Z%%M% %I% %E% SMI"
+
+
+ /*
+ * The interrupt reason can be interpreted
+ * from other bits as follows:
+ *
+ * DRQ IO CoD
+ * --- -- ---
+ * 0 0 1 Idle
+ * 1 0 1 Send ATAPI CDB to device
+ * 1 1 0 Data from device
+ * 1 0 0 Data to device
+ * 1 1 1 Future use
+ * 0 1 1 Status ready
+ *
+ */
+
+ ACTION
+
+ ATAPI Status Bits
+Current |
+State | idle cdb datain dataout status
+ |-----------------------------------------------------------------
+idle | cmd cmd cmd cmd cmd
+command | * sendcdb * * rd-intr-reason
+cdb | * * start-xfer start-xfer rd-intr-reason
+datain | * * continue-xfer * rd-intr-reason
+dataout | * * * continue-xfer rd-intr-reason
+
+
+
+ NEXT-STATE
+
+ ATAPI Status Bits
+Current |
+State | idle cdb datain dataout status
+ |-----------------------------------------------------------------
+idle | command command command command command
+command | * cdb * * *
+cdb | * * datain dataout (idle)
+datain | * * datain * (idle)
+dataout | * * * dataout (idle)
+
+
+
+States marked '*' should be invalid but some non-complaint drives
+don't transition correctly between states.
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/pciide.h b/usr/src/uts/intel/io/dktp/controller/ata/pciide.h
new file mode 100644
index 0000000000..047478bdd0
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/pciide.h
@@ -0,0 +1,105 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _PCIIDE_H
+#define _PCIIDE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Bus Mastering devices have a PCI class-code of 0x010180 to 0x0101ff
+ */
+#define PCIIDE_BM_CAP_MASK 0x80
+#define PCIIDE_BM_CLASS ((PCI_CLASS_MASS << 16) | (PCI_MASS_IDE << 8) | 0x80)
+#define PCIIDE_BM_CLASS_MASK 0xffffff80
+
+
+#define PCIIDE_BMICX_REG 0 /* Bus Master IDE Command Register */
+
+#define PCIIDE_BMICX_SSBM 0x01 /* Start/Stop Bus Master */
+#define PCIIDE_BMICX_SSBM_E 0x01 /* 1=Start (Enable) */
+ /* 0=Start (Disable) */
+
+/*
+ * NOTE: "read" and "write" are the actions of the DMA
+ * engine on the PCI bus. Not the DMA engine's action on the ATA
+ * BUS. Therefore for a ATA READ command, program the DMA engine to
+ * "write to memory" mode (and vice versa).
+ */
+#define PCIIDE_BMICX_RWCON 0x08 /* Read/Write Control */
+#define PCIIDE_BMICX_RWCON_WRITE_TO_MEMORY 0x08 /* 1=Write (dev to host) */
+#define PCIIDE_BMICX_RWCON_READ_FROM_MEMORY 0x00 /* 0=Read (host to dev) */
+
+/* preserve these bits during updates */
+#define PCIIDE_BMICX_MASK (~(PCIIDE_BMICX_SSBM | PCIIDE_BMICX_RWCON))
+
+
+
+#define PCIIDE_BMISX_REG 2 /* Bus Master IDE Status Register */
+
+#define PCIIDE_BMISX_BMIDEA 0x01 /* Bus Master IDE Active */
+#define PCIIDE_BMISX_IDERR 0x02 /* IDE DMA Error */
+#define PCIIDE_BMISX_IDEINTS 0x04 /* IDE Interrupt Status */
+#define PCIIDE_BMISX_DMA0CAP 0x20 /* Drive 0 DMA Capable */
+#define PCIIDE_BMISX_DMA1CAP 0x40 /* Drive 1 DMA Capable */
+#define PCIIDE_BMISX_SIMPLEX 0x80 /* Simplex only */
+
+/* preserve these bits during updates */
+#define PCIIDE_BMISX_MASK 0xf8
+
+#define PCIIDE_BMIDTPX_REG 4 /* Bus Master IDE Desc. Table Ptr */
+#define PCIIDE_BMIDTPX_MASK 0x00000003 /* must be zeros */
+
+
+typedef struct PhysicalRegionDescriptorTableEntry {
+ uint_t p_address; /* physical address */
+ uint_t p_count; /* byte count, EOT in high order bit */
+} prde_t;
+
+/*
+ * Some specs say the p_address must 32-bit aligned, and some claim
+ * 16-bit alignment. Use 32-bit alignment just to be safe.
+ */
+#ifdef __not_yet__
+#define PCIIDE_PRDE_ADDR_MASK ((uint_t)(sizeof (short) -1))
+#else
+#define PCIIDE_PRDE_ADDR_MASK ((uint_t)(sizeof (int) -1))
+#endif
+
+#define PCIIDE_PRDE_CNT_MASK ((uint_t)0x0001) /* must be even */
+#define PCIIDE_PRDE_CNT_MAX ((uint_t)0x10000) /* 0 == 64k */
+#define PCIIDE_PRDE_EOT ((uint_t)0x80000000)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PCIIDE_H */
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.c b/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.c
new file mode 100644
index 0000000000..5a5cd19f8b
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.c
@@ -0,0 +1,152 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Silicon Image 3XXX controller specific processing
+ *
+ * This file may be expanded to take advantage of Silicon Image
+ * additional features (if applicable to specific controller model):
+ * 1. Virtual DMA operation
+ * 2. Concurrent all-channel DMA
+ * 3. Large Block Transfers
+ * 4. Watchdog Timer
+ * 5. Power Management
+ * 6. Hot Plug Support
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ata_common.h"
+#include "sil3xxx.h"
+#include <sys/pci.h>
+
+int fifocntctl[] = {FIFO_CNTCTL_0, FIFO_CNTCTL_1, FIFO_CNTCTL_2, FIFO_CNTCTL_3};
+int sfiscfg[] = {SFISCFG_0, SFISCFG_1, SFISCFG_2, SFISCFG_3};
+
+/*
+ * Controller specific initialization
+ */
+uint_t
+sil3xxx_init_controller(dev_info_t *dip,
+ /* LINTED */
+ ushort_t vendor_id, ushort_t device_id)
+{
+ ddi_acc_handle_t pci_conf_handle; /* pci config space handle */
+ uint8_t cache_lnsz, frrc = 0;
+ uint32_t fifo_cnt_ctl;
+ int ports, i;
+
+#ifdef DEBUG
+ /* LINTED */
+ ushort_t sfiscfg_val;
+#endif
+
+ /*
+ * Sil3114, Sil3512, Sil3112
+ * We want to perform this initialization only once per entire
+ * pciide controller (all channels)
+ */
+ if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_get_parent(dip),
+ DDI_PROP_DONTPASS, "sil3xxx-initialized")) {
+ return (TRUE);
+ }
+
+ if (pci_config_setup(ddi_get_parent(dip), &pci_conf_handle) !=
+ DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "sil3xxx_init_controller: Can't do pci_config_setup\n");
+ return (FALSE);
+ }
+
+ /*
+ * Sil3114/3512/3112 incorrectly change between MR and back to
+ * MRM for same transaction, which violates the PCI spec and can
+ * lead to incorrect data reads. The workaround
+ * is to set bits 2:0 in the FIFO count and control register so
+ * that its value, a multiple of 32 bytes starting at 32, not 0,
+ * is greater or equal to the cacheline size, a multiple of 4
+ * bytes. This will prevent any reads until the FIFO free space
+ * is greater than a cacheline size, ensuring only MRM is issued.
+ */
+
+ cache_lnsz = pci_config_get8(pci_conf_handle, PCI_CONF_CACHE_LINESZ);
+
+ /*
+ * The cache line is specified in 32-bit words, so multiply by 4
+ * to get bytes. Then divide by 32 bytes, the granularity of the
+ * FIFO control bits 2:0. Add 1 if there is any remainder to
+ * account for a partial 32-byte block, then subtract 1 since for
+ * FIFO controls bits 2:0, 0 corresponds to 32, 1 corresponds to
+ * 64, and so on. The calculation is expanded for clarity.
+ */
+ if (cache_lnsz != 0) {
+ frrc = (cache_lnsz * 4 / 32) +
+ (((cache_lnsz * 4) % 32) ? 1 : 0) - 1;
+ }
+
+ if (device_id == SIL3114_DEVICE_ID) {
+ ports = 4;
+ } else {
+ ports = 2;
+ }
+
+ /*
+ * The following BAR5 registers are accessed via an indirect register
+ * in the PCI configuration space rather than mapping BAR5.
+ */
+ for (i = 0; i < ports; i++) {
+ GET_BAR5_INDIRECT(pci_conf_handle, fifocntctl[i],
+ fifo_cnt_ctl);
+ fifo_cnt_ctl = (fifo_cnt_ctl & ~0x7) | (frrc & 0x7);
+ PUT_BAR5_INDIRECT(pci_conf_handle, fifocntctl[i],
+ fifo_cnt_ctl);
+ /*
+ * Correct default setting for FIS0cfg
+ */
+#ifdef DEBUG
+ GET_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
+ sfiscfg_val);
+ ADBG_WARN(("sil3xxx_init_controller: old val SFISCfg "
+ "ch%d: %x\n", i, sfiscfg_val));
+#endif
+ PUT_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
+ SFISCFG_ERRATA);
+#ifdef DEBUG
+ GET_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
+ sfiscfg_val);
+ ADBG_WARN(("sil3xxx_init_controller: new val SFISCfg "
+ "ch%d: %x\n", i, sfiscfg_val));
+#endif
+ }
+
+ /* Now tear down the pci config setup */
+ pci_config_teardown(&pci_conf_handle);
+
+ /* Create property indicating that initialization was done */
+ (void) ddi_prop_update_int(DDI_DEV_T_NONE, ddi_get_parent(dip),
+ "sil3xxx-initialized", 1);
+
+ return (TRUE);
+}
diff --git a/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.h b/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.h
new file mode 100644
index 0000000000..f3ea4b0aaf
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/controller/ata/sil3xxx.h
@@ -0,0 +1,98 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SIL3XXX_H
+#define _SIL3XXX_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PCI IDs
+ */
+#define SILICON_IMAGE_VENDOR_ID 0x1095
+#define SIL3112_DEVICE_ID 0x3112
+#define SIL3114_DEVICE_ID 0x3114
+#define SIL3512_DEVICE_ID 0x3512
+
+/* Base Register 5 Indirect Address Offset */
+
+#define PCI_CONF_BA5_IND_ADDRESS 0xc0
+#define PCI_CONF_BA5_IND_ACCESS 0xc4
+
+/*
+ * FIS Configuration channel offsets
+ * Sil3114 has 4 channels
+ * Sil3112 has 2 channels
+ * Sil3512 has 2 channels
+ */
+#define SFISCFG_0 0x14c /* SFISCfg Channel 0 */
+#define SFISCFG_1 0x1cc /* SFISCfg Channel 1 */
+#define SFISCFG_2 0x34c /* SFISCfg Channel 2 */
+#define SFISCFG_3 0x3cc /* SFISCfg Channel 3 */
+
+/*
+ * FIFO count and contrl offsets for channel 0-4
+ */
+#define FIFO_CNTCTL_0 0x40
+#define FIFO_CNTCTL_1 0x44
+#define FIFO_CNTCTL_2 0x240
+#define FIFO_CNTCTL_3 0x244
+
+/*
+ * Errata Sil-AN-0028-C (Sil3512 Rev 0.3)
+ * Errata Sil-AN-0109-B2 (Sil3114 Rev 0.3)
+ * To prevent erroneous ERR set for queued DMA transfers
+ * greater then 8k, FIS reception for FIS0cfg needs to be set
+ * to Accept FIS without Interlock
+ * Default SFISCfg value of 0x10401555 in channel SFISCfg
+ * register need to be changed to 0x10401554.
+ */
+#define SFISCFG_ERRATA 0x10401554
+
+
+#define PUT_BAR5_INDIRECT(handle, address, value) \
+{\
+ pci_config_put32(handle, PCI_CONF_BA5_IND_ADDRESS, address); \
+ pci_config_put32(handle, PCI_CONF_BA5_IND_ACCESS, value); \
+}
+
+#define GET_BAR5_INDIRECT(handle, address, rval) \
+{\
+ pci_config_put32(handle, PCI_CONF_BA5_IND_ADDRESS, address); \
+ rval = pci_config_get32(handle, PCI_CONF_BA5_IND_ACCESS); \
+}
+
+uint_t sil3xxx_init_controller(dev_info_t *, ushort_t, ushort_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SIL3XXX_H */
diff --git a/usr/src/uts/common/io/dktp/dcdev/dadk.c b/usr/src/uts/intel/io/dktp/dcdev/dadk.c
index 02b22ef9db..02b22ef9db 100644
--- a/usr/src/uts/common/io/dktp/dcdev/dadk.c
+++ b/usr/src/uts/intel/io/dktp/dcdev/dadk.c
diff --git a/usr/src/uts/common/io/dktp/dcdev/gda.c b/usr/src/uts/intel/io/dktp/dcdev/gda.c
index ca40757c9f..ca40757c9f 100644
--- a/usr/src/uts/common/io/dktp/dcdev/gda.c
+++ b/usr/src/uts/intel/io/dktp/dcdev/gda.c
diff --git a/usr/src/uts/common/io/dktp/disk/cmdk.c b/usr/src/uts/intel/io/dktp/disk/cmdk.c
index 5308314aca..5308314aca 100644
--- a/usr/src/uts/common/io/dktp/disk/cmdk.c
+++ b/usr/src/uts/intel/io/dktp/disk/cmdk.c
diff --git a/usr/src/uts/common/io/dktp/drvobj/strategy.c b/usr/src/uts/intel/io/dktp/drvobj/strategy.c
index fb802e26ca..fb802e26ca 100644
--- a/usr/src/uts/common/io/dktp/drvobj/strategy.c
+++ b/usr/src/uts/intel/io/dktp/drvobj/strategy.c
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c
new file mode 100644
index 0000000000..80d28d842f
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd.c
@@ -0,0 +1,945 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/debug.h>
+#include <sys/scsi/scsi.h>
+
+#include "ghd.h"
+
+/* ghd_poll() function codes: */
+typedef enum {
+ GHD_POLL_REQUEST, /* wait for a specific request */
+ GHD_POLL_DEVICE, /* wait for a specific device to idle */
+ GHD_POLL_ALL /* wait for the whole bus to idle */
+} gpoll_t;
+
+/*
+ * Local functions:
+ */
+static gcmd_t *ghd_doneq_get(ccc_t *cccp);
+static void ghd_doneq_pollmode_enter(ccc_t *cccp);
+static void ghd_doneq_pollmode_exit(ccc_t *cccp);
+static uint_t ghd_doneq_process(caddr_t arg);
+static void ghd_do_reset_notify_callbacks(ccc_t *cccp);
+
+static uint_t ghd_dummy_intr(caddr_t arg);
+static int ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime,
+ gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status);
+
+
+/*
+ * Local configuration variables
+ */
+
+ulong_t ghd_tran_abort_timeout = 5;
+ulong_t ghd_tran_abort_lun_timeout = 5;
+ulong_t ghd_tran_reset_target_timeout = 5;
+ulong_t ghd_tran_reset_bus_timeout = 5;
+
+static int
+ghd_doneq_init(ccc_t *cccp)
+{
+ ddi_iblock_cookie_t iblock;
+
+ L2_INIT(&cccp->ccc_doneq);
+ cccp->ccc_hba_pollmode = TRUE;
+
+ if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW,
+ &cccp->ccc_doneq_softid, &iblock, NULL,
+ ghd_doneq_process, (caddr_t)cccp) != DDI_SUCCESS) {
+ GDBG_ERROR(("ghd_doneq_init: add softintr failed cccp 0x%p\n",
+ (void *)cccp));
+ return (FALSE);
+ }
+
+ mutex_init(&cccp->ccc_doneq_mutex, NULL, MUTEX_DRIVER, iblock);
+ ghd_doneq_pollmode_exit(cccp);
+ return (TRUE);
+}
+
+/*
+ * ghd_complete():
+ *
+ * The HBA driver calls this entry point when it's completely
+ * done processing a request.
+ *
+ * See the GHD_COMPLETE_INLINE() macro in ghd.h for the actual code.
+ */
+
+void
+ghd_complete(ccc_t *cccp, gcmd_t *gcmdp)
+{
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ GHD_COMPLETE_INLINE(cccp, gcmdp);
+}
+
+
+/*
+ * ghd_doneq_put_head():
+ *
+ * Mark the request done and prepend it to the doneq.
+ * See the GHD_DONEQ_PUT_HEAD_INLINE() macros in ghd.h for
+ * the actual code.
+ */
+void
+ghd_doneq_put_head(ccc_t *cccp, gcmd_t *gcmdp)
+{
+ GHD_DONEQ_PUT_HEAD_INLINE(cccp, gcmdp)
+}
+
+/*
+ * ghd_doneq_put_tail():
+ *
+ * Mark the request done and append it to the doneq.
+ * See the GHD_DONEQ_PUT_TAIL_INLINE() macros in ghd.h for
+ * the actual code.
+ */
+void
+ghd_doneq_put_tail(ccc_t *cccp, gcmd_t *gcmdp)
+{
+ GHD_DONEQ_PUT_TAIL_INLINE(cccp, gcmdp)
+}
+
+static gcmd_t *
+ghd_doneq_get(ccc_t *cccp)
+{
+ kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
+ gcmd_t *gcmdp;
+
+ mutex_enter(doneq_mutexp);
+ if ((gcmdp = L2_next(&cccp->ccc_doneq)) != NULL)
+ L2_delete(&gcmdp->cmd_q);
+ mutex_exit(doneq_mutexp);
+ return (gcmdp);
+}
+
+
+static void
+ghd_doneq_pollmode_enter(ccc_t *cccp)
+{
+ kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
+
+ mutex_enter(doneq_mutexp);
+ cccp->ccc_hba_pollmode = TRUE;
+ mutex_exit(doneq_mutexp);
+}
+
+
+static void
+ghd_doneq_pollmode_exit(ccc_t *cccp)
+{
+ kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
+
+ mutex_enter(doneq_mutexp);
+ cccp->ccc_hba_pollmode = FALSE;
+ mutex_exit(doneq_mutexp);
+
+ /* trigger software interrupt for the completion callbacks */
+ if (!L2_EMPTY(&cccp->ccc_doneq))
+ ddi_trigger_softintr(cccp->ccc_doneq_softid);
+}
+
+
+/* ***************************************************************** */
+
+/*
+ *
+ * ghd_doneq_process()
+ *
+ * This function is called directly from the software interrupt
+ * handler.
+ *
+ * The doneq is protected by a separate mutex than the
+ * HBA mutex in order to avoid mutex contention on MP systems.
+ *
+ */
+
+static uint_t
+ghd_doneq_process(caddr_t arg)
+{
+ ccc_t *cccp = (ccc_t *)arg;
+ kmutex_t *doneq_mutexp;
+ gcmd_t *gcmdp;
+ int rc = DDI_INTR_UNCLAIMED;
+
+ doneq_mutexp = &cccp->ccc_doneq_mutex;
+
+ for (;;) {
+ mutex_enter(doneq_mutexp);
+ /* skip if FLAG_NOINTR request in progress */
+ if (cccp->ccc_hba_pollmode)
+ break;
+ /* pop the first one from the done Q */
+ if ((gcmdp = L2_next(&cccp->ccc_doneq)) == NULL)
+ break;
+ L2_delete(&gcmdp->cmd_q);
+
+ if (gcmdp->cmd_flags & GCMDFLG_RESET_NOTIFY) {
+ /* special request; processed here and discarded */
+ ghd_do_reset_notify_callbacks(cccp);
+ ghd_gcmd_free(gcmdp);
+ mutex_exit(doneq_mutexp);
+ continue;
+ }
+
+ /*
+ * drop the mutex since completion
+ * function can re-enter the top half via
+ * ghd_transport()
+ */
+ mutex_exit(doneq_mutexp);
+ gcmdp->cmd_state = GCMD_STATE_IDLE;
+ (*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, TRUE);
+#ifdef notyet
+ /* I don't think this is ever necessary */
+ rc = DDI_INTR_CLAIMED;
+#endif
+ }
+ mutex_exit(doneq_mutexp);
+ return (rc);
+}
+
+static void
+ghd_do_reset_notify_callbacks(ccc_t *cccp)
+{
+ ghd_reset_notify_list_t *rnp;
+ L2el_t *rnl = &cccp->ccc_reset_notify_list;
+
+ ASSERT(mutex_owned(&cccp->ccc_doneq_mutex));
+
+ /* lock the reset notify list while we operate on it */
+ mutex_enter(&cccp->ccc_reset_notify_mutex);
+
+ for (rnp = (ghd_reset_notify_list_t *)L2_next(rnl);
+ rnp != NULL;
+ rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
+
+ /* don't call if HBA driver didn't set it */
+ if (cccp->ccc_hba_reset_notify_callback) {
+ (*cccp->ccc_hba_reset_notify_callback)(rnp->gtgtp,
+ rnp->callback, rnp->arg);
+ }
+ }
+ mutex_exit(&cccp->ccc_reset_notify_mutex);
+}
+
+
+/* ***************************************************************** */
+
+
+
+/*
+ * Autovector Interrupt Entry Point
+ *
+ * Dummy return to be used before mutexes has been initialized
+ * guard against interrupts from drivers sharing the same irq line
+ */
+
+/*ARGSUSED*/
+static uint_t
+ghd_dummy_intr(caddr_t arg)
+{
+ return (DDI_INTR_UNCLAIMED);
+}
+
+
+/*
+ * ghd_register()
+ *
+ * Do the usual interrupt handler setup stuff.
+ *
+ * Also, set up three mutexes: the wait queue mutex, the HBA
+ * mutex, and the done queue mutex. The permitted locking
+ * orders are:
+ *
+ * 1. enter(waitq)
+ * 2. enter(activel)
+ * 3. enter(doneq)
+ * 4. enter(HBA) then enter(activel)
+ * 5. enter(HBA) then enter(doneq)
+ * 6. enter(HBA) then enter(waitq)
+ * 7. enter(waitq) then tryenter(HBA)
+ *
+ * Note: cases 6 and 7 won't deadlock because case 7 is always
+ * mutex_tryenter() call.
+ *
+ */
+
+
+int
+ghd_register(char *labelp,
+ ccc_t *cccp,
+ dev_info_t *dip,
+ int inumber,
+ void *hba_handle,
+ int (*ccballoc)(gtgt_t *, gcmd_t *, int, int, int, int),
+ void (*ccbfree)(gcmd_t *),
+ void (*sg_func)(gcmd_t *, ddi_dma_cookie_t *, int, int),
+ int (*hba_start)(void *, gcmd_t *),
+ void (*hba_complete)(void *, gcmd_t *, int),
+ uint_t (*int_handler)(caddr_t),
+ int (*get_status)(void *, void *),
+ void (*process_intr)(void *, void *),
+ int (*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int),
+ tmr_t *tmrp,
+ void (*hba_reset_notify_callback)(gtgt_t *,
+ void (*)(caddr_t), caddr_t))
+{
+
+ cccp->ccc_label = labelp;
+ cccp->ccc_hba_dip = dip;
+ cccp->ccc_ccballoc = ccballoc;
+ cccp->ccc_ccbfree = ccbfree;
+ cccp->ccc_sg_func = sg_func;
+ cccp->ccc_hba_start = hba_start;
+ cccp->ccc_hba_complete = hba_complete;
+ cccp->ccc_process_intr = process_intr;
+ cccp->ccc_get_status = get_status;
+ cccp->ccc_hba_handle = hba_handle;
+ cccp->ccc_hba_reset_notify_callback = hba_reset_notify_callback;
+
+ /* initialize the HBA's list headers */
+ CCCP_INIT(cccp);
+
+ /*
+ * Establish initial dummy interrupt handler
+ * get iblock cookie to initialize mutexes used in the
+ * real interrupt handler
+ */
+ if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
+ ghd_dummy_intr, hba_handle) != DDI_SUCCESS) {
+ return (FALSE);
+ }
+ mutex_init(&cccp->ccc_hba_mutex, NULL, MUTEX_DRIVER, cccp->ccc_iblock);
+ ddi_remove_intr(dip, inumber, cccp->ccc_iblock);
+
+ /* Establish real interrupt handler */
+ if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
+ int_handler, (caddr_t)hba_handle) != DDI_SUCCESS) {
+ mutex_destroy(&cccp->ccc_hba_mutex);
+ return (FALSE);
+ }
+
+ mutex_init(&cccp->ccc_waitq_mutex, NULL,
+ MUTEX_DRIVER, cccp->ccc_iblock);
+
+ mutex_init(&cccp->ccc_reset_notify_mutex, NULL,
+ MUTEX_DRIVER, cccp->ccc_iblock);
+
+ if (ghd_timer_attach(cccp, tmrp, timeout_func) == FALSE) {
+ ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
+ mutex_destroy(&cccp->ccc_hba_mutex);
+ mutex_destroy(&cccp->ccc_waitq_mutex);
+ return (FALSE);
+ }
+
+ if (ghd_doneq_init(cccp)) {
+ return (TRUE);
+ }
+
+ ghd_timer_detach(cccp);
+ ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
+ mutex_destroy(&cccp->ccc_hba_mutex);
+ mutex_destroy(&cccp->ccc_waitq_mutex);
+ return (FALSE);
+
+}
+
+
+void
+ghd_unregister(ccc_t *cccp)
+{
+ ghd_timer_detach(cccp);
+ ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
+ ddi_remove_softintr(cccp->ccc_doneq_softid);
+ mutex_destroy(&cccp->ccc_hba_mutex);
+ mutex_destroy(&cccp->ccc_waitq_mutex);
+ mutex_destroy(&cccp->ccc_doneq_mutex);
+}
+
+
+
+int
+ghd_intr(ccc_t *cccp, void *intr_status)
+{
+ int (*statfunc)(void *, void *) = cccp->ccc_get_status;
+ void (*processfunc)(void *, void *) = cccp->ccc_process_intr;
+ kmutex_t *waitq_mutexp = &cccp->ccc_waitq_mutex;
+ kmutex_t *hba_mutexp = &cccp->ccc_hba_mutex;
+ void *handle = cccp->ccc_hba_handle;
+ int rc = DDI_INTR_UNCLAIMED;
+ int more;
+
+
+ mutex_enter(hba_mutexp);
+
+ GDBG_INTR(("ghd_intr(): cccp=0x%p status=0x%p\n",
+ cccp, intr_status));
+
+ for (;;) {
+ more = FALSE;
+
+ /* process the interrupt status */
+ while ((*statfunc)(handle, intr_status)) {
+ (*processfunc)(handle, intr_status);
+ rc = DDI_INTR_CLAIMED;
+ more = TRUE;
+ }
+ mutex_enter(waitq_mutexp);
+ if (ghd_waitq_process_and_mutex_hold(cccp)) {
+ ASSERT(mutex_owned(hba_mutexp));
+ mutex_exit(waitq_mutexp);
+ continue;
+ }
+ if (more) {
+ mutex_exit(waitq_mutexp);
+ continue;
+ }
+ GDBG_INTR(("ghd_intr(): done cccp=0x%p status=0x%p rc %d\n",
+ cccp, intr_status, rc));
+ /*
+ * Release the mutexes in the opposite order that they
+ * were acquired to prevent requests queued by
+ * ghd_transport() from getting hung up in the wait queue.
+ */
+ mutex_exit(hba_mutexp);
+ mutex_exit(waitq_mutexp);
+ return (rc);
+ }
+}
+
+static int
+ghd_poll(ccc_t *cccp,
+ gpoll_t polltype,
+ ulong_t polltime,
+ gcmd_t *poll_gcmdp,
+ gtgt_t *gtgtp,
+ void *intr_status)
+{
+ gcmd_t *gcmdp;
+ L2el_t gcmd_hold_queue;
+ int got_it = FALSE;
+ clock_t start_lbolt;
+ clock_t current_lbolt;
+
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ L2_INIT(&gcmd_hold_queue);
+
+ /* Que hora es? */
+ start_lbolt = ddi_get_lbolt();
+
+ /* unqueue and save all CMD/CCBs until I find the right one */
+ while (!got_it) {
+
+ /* Give up yet? */
+ current_lbolt = ddi_get_lbolt();
+ if (polltime && (current_lbolt - start_lbolt >= polltime))
+ break;
+
+ /*
+ * delay 1 msec each time around the loop (this is an
+ * arbitrary delay value, any value should work) except
+ * zero because some devices don't like being polled too
+ * fast and it saturates the bus on an MP system.
+ */
+ drv_usecwait(1000);
+
+ /*
+ * check for any new device status
+ */
+ if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status))
+ (*cccp->ccc_process_intr)(cccp->ccc_hba_handle,
+ intr_status);
+
+ /*
+ * If something completed then try to start the
+ * next request from the wait queue. Don't release
+ * the HBA mutex because I don't know whether my
+ * request(s) is/are on the done queue yet.
+ */
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ (void) ghd_waitq_process_and_mutex_hold(cccp);
+ mutex_exit(&cccp->ccc_waitq_mutex);
+
+ /*
+ * Process the first of any timed-out requests.
+ */
+ ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE);
+
+ /*
+ * Unqueue all the completed requests, look for mine
+ */
+ while (gcmdp = ghd_doneq_get(cccp)) {
+ /*
+ * If we got one and it's my request, then
+ * we're done.
+ */
+ if (gcmdp == poll_gcmdp) {
+ poll_gcmdp->cmd_state = GCMD_STATE_IDLE;
+ got_it = TRUE;
+ continue;
+ }
+ /* fifo queue the other cmds on my local list */
+ L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp);
+ }
+
+
+ /*
+ * Check whether we're done yet.
+ */
+ switch (polltype) {
+ case GHD_POLL_DEVICE:
+ /*
+ * wait for everything queued on a specific device
+ */
+ if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0)
+ got_it = TRUE;
+ break;
+
+ case GHD_POLL_ALL:
+ /*
+ * if waiting for all outstanding requests and
+ * if active list is now empty then exit
+ */
+ if (GHBA_NACTIVE(cccp) == 0)
+ got_it = TRUE;
+ break;
+
+ case GHD_POLL_REQUEST:
+ break;
+
+ }
+ }
+
+ if (L2_EMPTY(&gcmd_hold_queue)) {
+ ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ return (got_it);
+ }
+
+ /*
+ * copy the local gcmd_hold_queue back to the doneq so
+ * that the order of completion callbacks is preserved
+ */
+ while (gcmdp = L2_next(&gcmd_hold_queue)) {
+ L2_delete(&gcmdp->cmd_q);
+ GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
+ }
+
+ ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ return (got_it);
+}
+
+
+/*
+ * ghd_tran_abort()
+ *
+ * Abort specific command on a target.
+ *
+ */
+
+int
+ghd_tran_abort(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, void *intr_status)
+{
+ gact_t action;
+ int rc;
+
+ /*
+ * call the driver's abort_cmd function
+ */
+
+ mutex_enter(&cccp->ccc_hba_mutex);
+ ghd_doneq_pollmode_enter(cccp);
+
+ switch (gcmdp->cmd_state) {
+ case GCMD_STATE_WAITQ:
+ /* not yet started */
+ action = GACTION_EARLY_ABORT;
+ break;
+
+ case GCMD_STATE_ACTIVE:
+ /* in progress */
+ action = GACTION_ABORT_CMD;
+ break;
+
+ default:
+ /* everything else, probably already being aborted */
+ rc = FALSE;
+ goto exit;
+ }
+
+ /* stop the timer and remove it from the active list */
+ GHD_TIMER_STOP(cccp, gcmdp);
+
+ /* start a new timer and send out the abort command */
+ ghd_timer_newstate(cccp, gcmdp, gtgtp, action, GHD_TGTREQ);
+
+ /* wait for the abort to complete */
+ if (rc = ghd_poll(cccp, GHD_POLL_REQUEST, ghd_tran_abort_timeout,
+ gcmdp, gtgtp, intr_status)) {
+ gcmdp->cmd_state = GCMD_STATE_DONEQ;
+ GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
+ }
+
+exit:
+ ghd_doneq_pollmode_exit(cccp);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ return (rc);
+}
+
+
+/*
+ * ghd_tran_abort_lun()
+ *
+ * Abort all commands on a specific target.
+ *
+ */
+
+int
+ghd_tran_abort_lun(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
+{
+ int rc;
+
+ /*
+ * call the HBA driver's abort_device function
+ */
+
+ mutex_enter(&cccp->ccc_hba_mutex);
+ ghd_doneq_pollmode_enter(cccp);
+
+ /* send out the abort device request */
+ ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_ABORT_DEV, GHD_TGTREQ);
+
+ /* wait for the device to go idle */
+ rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_abort_lun_timeout,
+ NULL, gtgtp, intr_status);
+
+ ghd_doneq_pollmode_exit(cccp);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ return (rc);
+}
+
+
+
+/*
+ * ghd_tran_reset_target()
+ *
+ * reset the target device
+ *
+ *
+ */
+
+int
+ghd_tran_reset_target(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
+{
+ int rc = TRUE;
+
+
+ mutex_enter(&cccp->ccc_hba_mutex);
+ ghd_doneq_pollmode_enter(cccp);
+
+ /* send out the device reset request */
+ ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_TARGET, GHD_TGTREQ);
+
+ /* wait for the device to reset */
+ rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_reset_target_timeout,
+ NULL, gtgtp, intr_status);
+
+ ghd_doneq_pollmode_exit(cccp);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ return (rc);
+}
+
+
+
+/*
+ * ghd_tran_reset_bus()
+ *
+ * reset the scsi bus
+ *
+ */
+
+int
+ghd_tran_reset_bus(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
+{
+ int rc;
+
+ mutex_enter(&cccp->ccc_hba_mutex);
+ ghd_doneq_pollmode_enter(cccp);
+
+ /* send out the bus reset request */
+ ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_BUS, GHD_TGTREQ);
+
+ /*
+ * Wait for all active requests on this HBA to complete
+ */
+ rc = ghd_poll(cccp, GHD_POLL_ALL, ghd_tran_reset_bus_timeout,
+ NULL, NULL, intr_status);
+
+
+ ghd_doneq_pollmode_exit(cccp);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ return (rc);
+}
+
+
+int
+ghd_transport(ccc_t *cccp,
+ gcmd_t *gcmdp,
+ gtgt_t *gtgtp,
+ ulong_t timeout,
+ int polled,
+ void *intr_status)
+{
+ gdev_t *gdevp = gtgtp->gt_gdevp;
+
+ ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
+ ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
+
+ if (polled) {
+ /*
+ * Grab the HBA mutex so no other requests are started
+ * until after this one completes.
+ */
+ mutex_enter(&cccp->ccc_hba_mutex);
+
+ GDBG_START(("ghd_transport: polled"
+ " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
+ cccp, gdevp, gtgtp, gcmdp));
+
+ /*
+ * Lock the doneq so no other thread flushes the Q.
+ */
+ ghd_doneq_pollmode_enter(cccp);
+ }
+#if defined(GHD_DEBUG) || defined(__lint)
+ else {
+ GDBG_START(("ghd_transport: non-polled"
+ " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
+ cccp, gdevp, gtgtp, gcmdp));
+ }
+#endif
+ /*
+ * add this request to the tail of the waitq
+ */
+ gcmdp->cmd_waitq_level = 1;
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp);
+
+ /*
+ * Add this request to the packet timer active list and start its
+ * abort timer.
+ */
+ gcmdp->cmd_state = GCMD_STATE_WAITQ;
+ ghd_timer_start(cccp, gcmdp, timeout);
+
+
+ /*
+ * Check the device wait queue throttle and perhaps move
+ * some requests to the end of the HBA wait queue.
+ */
+ ghd_waitq_shuffle_up(cccp, gdevp);
+
+ if (!polled) {
+ /*
+ * See if the HBA mutex is available but use the
+ * tryenter so I don't deadlock.
+ */
+ if (!mutex_tryenter(&cccp->ccc_hba_mutex)) {
+ /* The HBA mutex isn't available */
+ GDBG_START(("ghd_transport: !mutex cccp 0x%p\n", cccp));
+ mutex_exit(&cccp->ccc_waitq_mutex);
+ return (TRAN_ACCEPT);
+ }
+ GDBG_START(("ghd_transport: got mutex cccp 0x%p\n", cccp));
+
+ /*
+ * start as many requests as possible from the head
+ * of the HBA wait queue
+ */
+
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
+ ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
+
+ return (TRAN_ACCEPT);
+ }
+
+
+ /*
+ * If polled mode (FLAG_NOINTR specified in scsi_pkt flags),
+ * then ghd_poll() waits until the request completes or times out
+ * before returning.
+ */
+
+ mutex_exit(&cccp->ccc_waitq_mutex);
+ (void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status);
+ ghd_doneq_pollmode_exit(cccp);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ /* call HBA's completion function but don't do callback to target */
+ (*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE);
+
+ GDBG_START(("ghd_transport: polled done cccp 0x%p\n", cccp));
+ return (TRAN_ACCEPT);
+}
+
+int ghd_reset_notify(ccc_t *cccp,
+ gtgt_t *gtgtp,
+ int flag,
+ void (*callback)(caddr_t),
+ caddr_t arg)
+{
+ ghd_reset_notify_list_t *rnp;
+ int rc = FALSE;
+
+ switch (flag) {
+
+ case SCSI_RESET_NOTIFY:
+
+ rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp),
+ KM_SLEEP);
+ rnp->gtgtp = gtgtp;
+ rnp->callback = callback;
+ rnp->arg = arg;
+
+ mutex_enter(&cccp->ccc_reset_notify_mutex);
+ L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link,
+ (void *)rnp);
+ mutex_exit(&cccp->ccc_reset_notify_mutex);
+
+ rc = TRUE;
+
+ break;
+
+ case SCSI_RESET_CANCEL:
+
+ mutex_enter(&cccp->ccc_reset_notify_mutex);
+ for (rnp = (ghd_reset_notify_list_t *)
+ L2_next(&cccp->ccc_reset_notify_list);
+ rnp != NULL;
+ rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
+ if (rnp->gtgtp == gtgtp &&
+ rnp->callback == callback &&
+ rnp->arg == arg) {
+ L2_delete(&rnp->l2_link);
+ kmem_free(rnp, sizeof (*rnp));
+ rc = TRUE;
+ }
+ }
+ mutex_exit(&cccp->ccc_reset_notify_mutex);
+ break;
+
+ default:
+ rc = FALSE;
+ break;
+ }
+
+ return (rc);
+}
+
+/*
+ * freeze the HBA waitq output (see ghd_waitq_process_and_mutex_hold),
+ * presumably because of a SCSI reset, for delay milliseconds.
+ */
+
+void
+ghd_freeze_waitq(ccc_t *cccp, int delay)
+{
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ /* freeze the waitq for delay milliseconds */
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ cccp->ccc_waitq_freezetime = ddi_get_lbolt();
+ cccp->ccc_waitq_freezedelay = delay;
+ cccp->ccc_waitq_frozen = 1;
+ mutex_exit(&cccp->ccc_waitq_mutex);
+}
+
+void
+ghd_queue_hold(ccc_t *cccp)
+{
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ cccp->ccc_waitq_held = 1;
+ mutex_exit(&cccp->ccc_waitq_mutex);
+}
+
+void
+ghd_queue_unhold(ccc_t *cccp)
+{
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ cccp->ccc_waitq_held = 0;
+ mutex_exit(&cccp->ccc_waitq_mutex);
+}
+
+
+
+/*
+ * Trigger previously-registered reset notifications
+ */
+
+void
+ghd_trigger_reset_notify(ccc_t *cccp)
+{
+ gcmd_t *gcmdp;
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ /* create magic doneq entry */
+
+ gcmdp = ghd_gcmd_alloc((gtgt_t *)NULL, 0, TRUE);
+ gcmdp->cmd_flags = GCMDFLG_RESET_NOTIFY;
+
+ /* put at head of doneq so it's processed ASAP */
+
+ GHD_DONEQ_PUT_HEAD(cccp, gcmdp);
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd.h
index ab7fd4f72b..3c8b45ed3e 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,6 +18,7 @@
*
* CDDL HEADER END
*/
+
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.c
new file mode 100644
index 0000000000..068dbd9f76
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.c
@@ -0,0 +1,104 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ghd.h"
+#include "ghd_debug.h"
+
+#if !(defined(GHD_DEBUG) || defined(__lint))
+ulong_t ghd_debug_flags = 0;
+#else
+ulong_t ghd_debug_flags = GDBG_FLAG_ERROR
+ /* | GDBG_FLAG_WAITQ */
+ /* | GDBG_FLAG_INTR */
+ /* | GDBG_FLAG_START */
+ /* | GDBG_FLAG_WARN */
+ /* | GDBG_FLAG_DMA */
+ /* | GDBG_FLAG_PEND_INTR */
+ /* | GDBG_FLAG_START */
+ /* | GDBG_FLAG_PKT */
+ /* | GDBG_FLAG_INIT */
+ ;
+#endif
+
+void
+ghd_err(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vcmn_err(CE_CONT, fmt, ap);
+ va_end(ap);
+}
+
+#if defined(GHD_DEBUG)
+#define PRF prom_printf
+
+static void
+ghd_dump_ccc(ccc_t *P)
+{
+ PRF("nextp 0x%p tmrp 0x%p label 0x%p &mutex 0x%p\n",
+ P->ccc_nextp, P->ccc_tmrp, P->ccc_label, &P->ccc_activel_mutex);
+ PRF("&activel 0x%p dip 0x%p iblock 0x%p\n",
+ &P->ccc_activel, P->ccc_hba_dip, P->ccc_iblock);
+ PRF("softid 0x%p &hba_mutext 0x%p\n poll 0x%p\n",
+ P->ccc_soft_id, &P->ccc_hba_mutex, &P->ccc_hba_pollmode);
+ PRF("&devs 0x%p &waitq_mutex 0x%p &waitq 0x%p\n",
+ &P->ccc_devs, &P->ccc_waitq_mutex, &P->ccc_waitq);
+ PRF("waitq_freezetime 0x%p waitq_freezedelay %p\n",
+ &P->ccc_waitq_freezetime, &P->ccc_waitq_freezedelay);
+ PRF("dq softid 0x%p &dq_mutex 0x%p &doneq 0x%p\n",
+ P->ccc_doneq_softid, &P->ccc_doneq_mutex, &P->ccc_doneq);
+ PRF("handle 0x%p &ccballoc 0x%p\n",
+ P->ccc_hba_handle, &P->ccc_ccballoc);
+ PRF("hba_reset_notify_callback 0x%p notify_list 0x%p mutex 0x%p\n",
+ P->ccc_hba_reset_notify_callback, &P->ccc_reset_notify_list,
+ &P->ccc_reset_notify_mutex);
+}
+
+
+static void
+ghd_dump_gcmd(gcmd_t *P)
+{
+ PRF("cmd_q nextp 0x%p prevp 0x%p private 0x%p\n",
+ P->cmd_q.l2_nextp, P->cmd_q.l2_prevp, P->cmd_q.l2_private);
+ PRF("state %ul wq lev %ld flags 0x%x\n",
+ P->cmd_state, P->cmd_waitq_level, P->cmd_flags);
+ PRF("timer Q nextp 0x%p prevp 0x%p private 0x%p\n",
+ P->cmd_timer_link.l2_nextp, P->cmd_timer_link.l2_prevp,
+ P->cmd_timer_link.l2_private);
+
+ PRF("start time 0x%lx timeout 0x%lx hba private 0x%p pktp 0x%p\n",
+ P->cmd_start_time, P->cmd_timeout, P->cmd_private, P->cmd_pktp);
+ PRF("gtgtp 0x%p dma_flags 0x%x dma_handle 0x%p dmawin 0x%p "
+ "dmaseg 0x%p\n", P->cmd_gtgtp, P->cmd_dma_flags,
+ P->cmd_dma_handle, P->cmd_dmawin, P->cmd_dmaseg);
+ PRF("wcount %d windex %d ccount %d cindex %d\n",
+ P->cmd_wcount, P->cmd_windex, P->cmd_ccount, P->cmd_cindex);
+ PRF("totxfer %ld\n", P->cmd_totxfer);
+}
+#endif
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_debug.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.h
index 2c7087419a..cc30fadfb4 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_debug.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_debug.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,6 +18,7 @@
*
* CDDL HEADER END
*/
+
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.c
new file mode 100644
index 0000000000..45333efb48
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.c
@@ -0,0 +1,245 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ghd.h"
+
+void
+ghd_dmafree_attr(gcmd_t *gcmdp)
+{
+ GDBG_DMA(("ghd_dma_attr_free: gcmdp 0x%p\n", gcmdp));
+
+ if (gcmdp->cmd_dma_handle != NULL) {
+ if (ddi_dma_unbind_handle(gcmdp->cmd_dma_handle) !=
+ DDI_SUCCESS)
+ cmn_err(CE_WARN, "ghd dma free attr: "
+ "unbind handle failed");
+ ddi_dma_free_handle(&gcmdp->cmd_dma_handle);
+ GDBG_DMA(("ghd_dma_attr_free: ddi_dma_free 0x%p\n", gcmdp));
+ gcmdp->cmd_dma_handle = NULL;
+ gcmdp->cmd_ccount = 0;
+ gcmdp->cmd_totxfer = 0;
+ }
+}
+
+
+int
+ghd_dma_buf_bind_attr(ccc_t *cccp,
+ gcmd_t *gcmdp,
+ struct buf *bp,
+ int dma_flags,
+ int (*callback)(),
+ caddr_t arg,
+ ddi_dma_attr_t *sg_attrp)
+{
+ int status;
+
+ GDBG_DMA(("ghd_dma_attr_get: start: gcmdp 0x%p sg_attrp 0x%p\n",
+ gcmdp, sg_attrp));
+
+
+ /*
+ * First time, need to establish the handle.
+ */
+
+ ASSERT(gcmdp->cmd_dma_handle == NULL);
+
+ status = ddi_dma_alloc_handle(cccp->ccc_hba_dip, sg_attrp, callback,
+ arg, &gcmdp->cmd_dma_handle);
+
+ if (status != DDI_SUCCESS) {
+ bp->b_error = 0;
+ return (FALSE);
+ }
+
+ status = ddi_dma_buf_bind_handle(gcmdp->cmd_dma_handle, bp, dma_flags,
+ callback, arg, &gcmdp->cmd_first_cookie,
+ &gcmdp->cmd_ccount);
+
+ GDBG_DMA(("ghd_dma_attr_get: setup: gcmdp 0x%p status %d h 0x%p "
+ "c 0x%d\n", gcmdp, status, gcmdp->cmd_dma_handle,
+ gcmdp->cmd_ccount));
+
+ switch (status) {
+ case DDI_DMA_MAPPED:
+ /* enable first (and only) call to ddi_dma_getwin */
+ gcmdp->cmd_wcount = 1;
+ break;
+
+ case DDI_DMA_PARTIAL_MAP:
+ /* enable first call to ddi_dma_getwin */
+ if (ddi_dma_numwin(gcmdp->cmd_dma_handle, &gcmdp->cmd_wcount) !=
+ DDI_SUCCESS) {
+ bp->b_error = 0;
+ ddi_dma_free_handle(&gcmdp->cmd_dma_handle);
+ gcmdp->cmd_dma_handle = NULL;
+ return (FALSE);
+ }
+ break;
+
+ case DDI_DMA_NORESOURCES:
+ bp->b_error = 0;
+ ddi_dma_free_handle(&gcmdp->cmd_dma_handle);
+ gcmdp->cmd_dma_handle = NULL;
+ return (FALSE);
+
+ case DDI_DMA_TOOBIG:
+ bioerror(bp, EINVAL);
+ ddi_dma_free_handle(&gcmdp->cmd_dma_handle);
+ gcmdp->cmd_dma_handle = NULL;
+ return (FALSE);
+
+ case DDI_DMA_NOMAPPING:
+ case DDI_DMA_INUSE:
+ default:
+ bioerror(bp, EFAULT);
+ ddi_dma_free_handle(&gcmdp->cmd_dma_handle);
+ gcmdp->cmd_dma_handle = NULL;
+ return (FALSE);
+ }
+
+ /* initialize the loop controls for ghd_dmaget_next_attr() */
+ gcmdp->cmd_windex = 0;
+ gcmdp->cmd_cindex = 0;
+ gcmdp->cmd_totxfer = 0;
+ gcmdp->cmd_dma_flags = dma_flags;
+ gcmdp->use_first = 1;
+ return (TRUE);
+}
+
+
+uint_t
+ghd_dmaget_next_attr(ccc_t *cccp, gcmd_t *gcmdp, long max_transfer_cnt,
+ int sg_size, ddi_dma_cookie_t cookie)
+{
+ ulong_t toxfer = 0;
+ int num_segs = 0;
+ int single_seg;
+
+ GDBG_DMA(("ghd_dma_attr_get: start: gcmdp 0x%p h 0x%p c 0x%x\n",
+ gcmdp, gcmdp->cmd_dma_handle, gcmdp->cmd_ccount));
+
+ /*
+ * Disable single-segment Scatter/Gather option
+ * if can't do this transfer in a single segment,
+ */
+ if (gcmdp->cmd_cindex + 1 < gcmdp->cmd_ccount) {
+ single_seg = FALSE;
+ } else {
+ single_seg = TRUE;
+ }
+
+
+ for (;;) {
+ /*
+ * call the controller specific S/G function
+ */
+ (*cccp->ccc_sg_func)(gcmdp, &cookie, single_seg, num_segs);
+
+ /* take care of the loop-bookkeeping */
+ toxfer += cookie.dmac_size;
+ num_segs++;
+ gcmdp->cmd_cindex++;
+
+ /*
+ * if this was the last cookie in the current window
+ * set the loop controls start the next window and
+ * exit so the HBA can do this partial transfer
+ */
+ if (gcmdp->cmd_cindex >= gcmdp->cmd_ccount) {
+ gcmdp->cmd_windex++;
+ gcmdp->cmd_cindex = 0;
+ break;
+ }
+ ASSERT(single_seg == FALSE);
+
+ if (toxfer >= max_transfer_cnt)
+ break;
+
+ if (num_segs >= sg_size)
+ break;
+
+ ddi_dma_nextcookie(gcmdp->cmd_dma_handle, &cookie);
+ }
+
+ gcmdp->cmd_totxfer += toxfer;
+
+ return (toxfer);
+}
+
+
+
+int
+ghd_dmaget_attr(ccc_t *cccp,
+ gcmd_t *gcmdp,
+ long count,
+ int sg_size,
+ uint_t *xfer)
+{
+ int status;
+ ddi_dma_cookie_t cookie;
+
+ *xfer = 0;
+
+
+ if (gcmdp->use_first == 1) {
+ cookie = gcmdp->cmd_first_cookie;
+ gcmdp->use_first = 0;
+ } else if (gcmdp->cmd_windex >= gcmdp->cmd_wcount) {
+ /*
+ * reached the end of buffer. This should not happen.
+ */
+ ASSERT(gcmdp->cmd_windex < gcmdp->cmd_wcount);
+ return (FALSE);
+
+ } else if (gcmdp->cmd_cindex == 0) {
+ off_t offset;
+ size_t length;
+
+ /*
+ * start the next window, and get its first cookie
+ */
+ status = ddi_dma_getwin(gcmdp->cmd_dma_handle,
+ gcmdp->cmd_windex, &offset, &length,
+ &cookie, &gcmdp->cmd_ccount);
+ if (status != DDI_SUCCESS)
+ return (FALSE);
+
+ } else {
+ /*
+ * get the next cookie in the current window
+ */
+ ddi_dma_nextcookie(gcmdp->cmd_dma_handle, &cookie);
+ }
+
+ /*
+ * start the Scatter/Gather loop passing in the first
+ * cookie obtained above
+ */
+ *xfer = ghd_dmaget_next_attr(cccp, gcmdp, count, sg_size, cookie);
+ return (TRUE);
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_dma.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.h
index ad785d1272..ad785d1272 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_dma.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_dma.h
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_gcmd.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_gcmd.c
new file mode 100644
index 0000000000..26c1042505
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_gcmd.c
@@ -0,0 +1,102 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ghd.h"
+
+/*
+ * Round up all allocations so that we can guarantee
+ * long-long alignment. This is the same alignment
+ * provided by kmem_alloc().
+ */
+#define ROUNDUP(x) (((x) + 0x07) & ~0x07)
+
+/*
+ * Private wrapper for gcmd_t
+ */
+typedef struct gw_gcmd_and_length {
+ gcmd_t gcmd; /* this must be first */
+ int glen; /* length includes HBA private area */
+}gw_t;
+
+/*
+ * round up the size so the HBA private area is on a 8 byte boundary
+ */
+#define GW_PADDED_LENGTH ROUNDUP(sizeof (gw_t))
+
+typedef struct gcmd_padded_wrapper {
+ union {
+ gw_t gw;
+ char gw_pad[GW_PADDED_LENGTH];
+
+ } gwrap;
+} gwrap_t;
+
+/*
+ * Allocate a gcmd_t wrapper and HBA private area
+ */
+
+gcmd_t *
+ghd_gcmd_alloc(gtgt_t *gtgtp,
+ int ccblen,
+ int sleep)
+{
+ gwrap_t *gwp;
+ gcmd_t *gcmdp;
+ int gwrap_len;
+
+ ccblen = ROUNDUP(ccblen);
+ gwrap_len = sizeof (gwrap_t) + ccblen;
+ gwp = kmem_zalloc(gwrap_len, (sleep ? KM_SLEEP : KM_NOSLEEP));
+ if (gwp == NULL) {
+ ASSERT(sleep == FALSE);
+ return (NULL);
+ }
+
+ /* save the total length for the free function */
+ gwp->gwrap.gw.glen = gwrap_len;
+
+ /*
+ * save the ptr to HBA private area and initialize all
+ * the gcmd_t members and save
+ */
+ gcmdp = &gwp->gwrap.gw.gcmd;
+ GHD_GCMD_INIT(gcmdp, (void *)(gwp + 1), gtgtp);
+ return (gcmdp);
+}
+
+
+
+/*
+ * Free the gcmd_t wrapper and HBA private area
+ */
+
+void
+ghd_gcmd_free(gcmd_t *gcmdp)
+{
+ kmem_free(gcmdp, ((gw_t *)gcmdp)->glen);
+}
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.c
new file mode 100644
index 0000000000..5529cfa637
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.c
@@ -0,0 +1,206 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/debug.h>
+#include "ghd_queue.h"
+
+
+
+void
+L1_add(L1_t *lp, L1el_t *lep, void *datap)
+{
+ /* init the list element */
+ lep->le_nextp = NULL;
+ lep->le_datap = datap;
+
+ if (!lp->l1_tailp) {
+ /* list is empty */
+ lp->l1_headp = lep;
+ } else {
+ /* add it to the tailend */
+ lp->l1_tailp->le_nextp = lep;
+ }
+
+ lp->l1_tailp = lep;
+}
+
+
+/*
+ * L1Delete()
+ *
+ * Remove a specific entry from a singly-linked list.
+ *
+ */
+
+void
+L1_delete(L1_t *lp, L1el_t *lep)
+{
+ L1el_t *prevp;
+
+ if (lp->l1_headp == lep) {
+ /* it's the first entry in the list */
+ if ((lp->l1_headp = lep->le_nextp) == NULL) {
+ /* the list is now empty */
+ lp->l1_tailp = NULL;
+ }
+ return;
+ }
+
+ for (prevp = lp->l1_headp; prevp != NULL; prevp = prevp->le_nextp) {
+ if (prevp->le_nextp == lep) {
+ if ((prevp->le_nextp = lep->le_nextp) == NULL)
+ lp->l1_tailp = prevp;
+ return;
+ }
+ }
+ /* its not on this list */
+}
+
+
+/*
+ * L1_remove()
+ *
+ * Remove the entry at the head of the list (if any).
+ *
+ */
+
+void *
+L1_remove(L1_t *lp)
+{
+ L1el_t *lep;
+
+ /* pop the first one off the list head */
+ if ((lep = lp->l1_headp) == NULL) {
+ return (NULL);
+ }
+
+ /* if the list is now empty fix the tail pointer */
+ if ((lp->l1_headp = lep->le_nextp) == NULL)
+ lp->l1_tailp = NULL;
+
+ lep->le_nextp = NULL;
+
+ return (lep->le_datap);
+}
+
+
+void
+L2_add(L2el_t *headp, L2el_t *elementp, void *private)
+{
+
+ ASSERT(headp != NULL && elementp != NULL);
+ ASSERT(headp->l2_nextp != NULL);
+ ASSERT(headp->l2_prevp != NULL);
+
+ elementp->l2_private = private;
+
+ elementp->l2_nextp = headp;
+ elementp->l2_prevp = headp->l2_prevp;
+ headp->l2_prevp->l2_nextp = elementp;
+ headp->l2_prevp = elementp;
+}
+
+void
+L2_delete(L2el_t *elementp)
+{
+
+ ASSERT(elementp != NULL);
+ ASSERT(elementp->l2_nextp != NULL);
+ ASSERT(elementp->l2_prevp != NULL);
+ ASSERT(elementp->l2_nextp->l2_prevp == elementp);
+ ASSERT(elementp->l2_prevp->l2_nextp == elementp);
+
+ elementp->l2_prevp->l2_nextp = elementp->l2_nextp;
+ elementp->l2_nextp->l2_prevp = elementp->l2_prevp;
+
+ /* link it to itself in case someone does a double delete */
+ elementp->l2_nextp = elementp;
+ elementp->l2_prevp = elementp;
+}
+
+
+void
+L2_add_head(L2el_t *headp, L2el_t *elementp, void *private)
+{
+
+ ASSERT(headp != NULL && elementp != NULL);
+ ASSERT(headp->l2_nextp != NULL);
+ ASSERT(headp->l2_prevp != NULL);
+
+ elementp->l2_private = private;
+
+ elementp->l2_prevp = headp;
+ elementp->l2_nextp = headp->l2_nextp;
+ headp->l2_nextp->l2_prevp = elementp;
+ headp->l2_nextp = elementp;
+}
+
+
+
+/*
+ * L2_remove()
+ *
+ * Remove the entry from the head of the list (if any).
+ *
+ */
+
+void *
+L2_remove_head(L2el_t *headp)
+{
+ L2el_t *elementp;
+
+ ASSERT(headp != NULL);
+
+ if (L2_EMPTY(headp))
+ return (NULL);
+
+ elementp = headp->l2_nextp;
+
+ headp->l2_nextp = elementp->l2_nextp;
+ elementp->l2_nextp->l2_prevp = headp;
+
+ /* link it to itself in case someone does a double delete */
+ elementp->l2_nextp = elementp;
+ elementp->l2_prevp = elementp;
+
+ return (elementp->l2_private);
+}
+
+void *
+L2_next(L2el_t *elementp)
+{
+
+ ASSERT(elementp != NULL);
+
+ if (L2_EMPTY(elementp))
+ return (NULL);
+ return (elementp->l2_nextp->l2_private);
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_queue.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.h
index b76f6c5240..472ba1eb44 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_queue.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_queue.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,15 +18,16 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright (c) 1999, by Sun Microsystems, Inc.
- * All rights reserved.
+ * Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
*/
#ifndef _GHD_QUEUE_H
#define _GHD_QUEUE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.c
new file mode 100644
index 0000000000..ff479c2eab
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.c
@@ -0,0 +1,261 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "ghd.h"
+
+
+/*
+ * Local Function Prototypes
+ */
+
+static struct scsi_pkt *ghd_pktalloc(ccc_t *cccp, struct scsi_address *ap,
+ int cmdlen, int statuslen, int tgtlen,
+ int (*callback)(), caddr_t arg, int ccblen);
+
+/*
+ * Round up all allocations so that we can guarantee
+ * long-long alignment. This is the same alignment
+ * provided by kmem_alloc().
+ */
+#define ROUNDUP(x) (((x) + 0x07) & ~0x07)
+
+/*
+ * Private wrapper for gcmd_t
+ */
+
+/*
+ * round up the size so the HBA private area is on a 8 byte boundary
+ */
+#define GW_PADDED_LENGTH ROUNDUP(sizeof (gcmd_t))
+
+typedef struct gcmd_padded_wrapper {
+ union {
+ gcmd_t gw_gcmd;
+ char gw_pad[GW_PADDED_LENGTH];
+
+ } gwrap;
+} gwrap_t;
+
+
+
+
+/*ARGSUSED*/
+void
+ghd_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
+{
+ gcmd_t *gcmdp = PKTP2GCMDP(pktp);
+ int status;
+
+ if (gcmdp->cmd_dma_handle) {
+ status = ddi_dma_sync(gcmdp->cmd_dma_handle, 0, 0,
+ (gcmdp->cmd_dma_flags & DDI_DMA_READ) ?
+ DDI_DMA_SYNC_FORCPU : DDI_DMA_SYNC_FORDEV);
+ if (status != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "ghd_tran_sync_pkt() fail\n");
+ }
+ }
+}
+
+
+static struct scsi_pkt *
+ghd_pktalloc(ccc_t *cccp,
+ struct scsi_address *ap,
+ int cmdlen,
+ int statuslen,
+ int tgtlen,
+ int (*callback)(),
+ caddr_t arg,
+ int ccblen)
+{
+ gtgt_t *gtgtp = ADDR2GTGTP(ap);
+ struct scsi_pkt *pktp;
+ gcmd_t *gcmdp;
+ gwrap_t *gwp;
+ int gwrap_len;
+
+ gwrap_len = sizeof (gwrap_t) + ROUNDUP(ccblen);
+
+ /* allocate everything from kmem pool */
+ pktp = scsi_hba_pkt_alloc(cccp->ccc_hba_dip, ap, cmdlen, statuslen,
+ tgtlen, gwrap_len, callback, arg);
+ if (pktp == NULL) {
+ return (NULL);
+ }
+
+ /* get the ptr to the HBA specific buffer */
+ gwp = (gwrap_t *)(pktp->pkt_ha_private);
+
+ /* get the ptr to the GHD specific buffer */
+ gcmdp = &gwp->gwrap.gw_gcmd;
+
+ ASSERT((caddr_t)gwp == (caddr_t)gcmdp);
+
+ /*
+ * save the ptr to HBA private area and initialize the rest
+ * of the gcmd_t members
+ */
+ GHD_GCMD_INIT(gcmdp, (void *)(gwp + 1), gtgtp);
+
+ /*
+ * save the the scsi_pkt ptr in gcmd_t.
+ */
+ gcmdp->cmd_pktp = pktp;
+
+ /*
+ * callback to the HBA driver so it can initalize its
+ * buffer and return the ptr to my cmd_t structure which is
+ * probably embedded in its buffer.
+ */
+
+ if (!(*cccp->ccc_ccballoc)(gtgtp, gcmdp, cmdlen, statuslen, tgtlen,
+ ccblen)) {
+ scsi_hba_pkt_free(ap, pktp);
+ return (NULL);
+ }
+
+ return (pktp);
+}
+
+
+
+/*
+ * packet free
+ */
+/*ARGSUSED*/
+void
+ghd_pktfree(ccc_t *cccp,
+ struct scsi_address *ap,
+ struct scsi_pkt *pktp)
+{
+ GDBG_PKT(("ghd_pktfree: cccp 0x%p ap 0x%p pktp 0x%p\n",
+ cccp, ap, pktp));
+
+ /* free any extra resources allocated by the HBA */
+ (*cccp->ccc_ccbfree)(PKTP2GCMDP(pktp));
+
+ /* free the scsi_pkt and the GHD and HBA private areas */
+ scsi_hba_pkt_free(ap, pktp);
+}
+
+
+struct scsi_pkt *
+ghd_tran_init_pkt_attr(ccc_t *cccp,
+ struct scsi_address *ap,
+ struct scsi_pkt *pktp,
+ struct buf *bp,
+ int cmdlen,
+ int statuslen,
+ int tgtlen,
+ int flags,
+ int (*callback)(),
+ caddr_t arg,
+ int ccblen,
+ ddi_dma_attr_t *sg_attrp)
+{
+ gcmd_t *gcmdp;
+ int new_pkt;
+ uint_t xfercount;
+
+ ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
+
+ /*
+ * Allocate a pkt
+ */
+ if (pktp == NULL) {
+ pktp = ghd_pktalloc(cccp, ap, cmdlen, statuslen, tgtlen,
+ callback, arg, ccblen);
+ if (pktp == NULL)
+ return (NULL);
+ new_pkt = TRUE;
+
+ } else {
+ new_pkt = FALSE;
+
+ }
+
+ gcmdp = PKTP2GCMDP(pktp);
+
+ GDBG_PKT(("ghd_tran_init_pkt_attr: gcmdp 0x%p dma_handle 0x%p\n",
+ gcmdp, gcmdp->cmd_dma_handle));
+
+ /*
+ * free stale DMA window if necessary.
+ */
+
+ if (cmdlen && gcmdp->cmd_dma_handle) {
+ /* release the old DMA resources */
+ ghd_dmafree_attr(gcmdp);
+ }
+
+ /*
+ * Set up dma info if there's any data and
+ * if the device supports DMA.
+ */
+
+ GDBG_PKT(("ghd_tran_init_pkt: gcmdp 0x%p bp 0x%p limp 0x%p\n",
+ gcmdp, bp, sg_attrp));
+
+ if (bp && bp->b_bcount && sg_attrp) {
+ int dma_flags;
+
+ /* check direction for data transfer */
+ if (bp->b_flags & B_READ)
+ dma_flags = DDI_DMA_READ;
+ else
+ dma_flags = DDI_DMA_WRITE;
+
+ /* check dma option flags */
+ if (flags & PKT_CONSISTENT)
+ dma_flags |= DDI_DMA_CONSISTENT;
+ if (flags & PKT_DMA_PARTIAL)
+ dma_flags |= DDI_DMA_PARTIAL;
+
+ if (gcmdp->cmd_dma_handle == NULL) {
+ if (!ghd_dma_buf_bind_attr(cccp, gcmdp, bp, dma_flags,
+ callback, arg, sg_attrp)) {
+ if (new_pkt)
+ ghd_pktfree(cccp, ap, pktp);
+ return (NULL);
+ }
+ }
+
+ /* map the buffer and/or create the scatter/gather list */
+ if (!ghd_dmaget_attr(cccp, gcmdp,
+ bp->b_bcount - gcmdp->cmd_totxfer,
+ sg_attrp->dma_attr_sgllen, &xfercount)) {
+ if (new_pkt)
+ ghd_pktfree(cccp, ap, pktp);
+ return (NULL);
+ }
+ pktp->pkt_resid = bp->b_bcount - gcmdp->cmd_totxfer;
+ } else {
+ pktp->pkt_resid = 0;
+ }
+
+ return (pktp);
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_scsa.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.h
index 2e622df39f..2e622df39f 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_scsa.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsa.h
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.c
new file mode 100644
index 0000000000..2c40084f21
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.c
@@ -0,0 +1,73 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 1999 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/byteorder.h>
+
+
+/*
+ * functions to convert between host format and scsi format
+ */
+void
+scsi_htos_3byte(unchar *ap, ulong nav)
+{
+ *(ushort *)ap = (ushort)(((nav & 0xff0000) >> 16) | (nav & 0xff00));
+ ap[2] = (unchar)nav;
+}
+
+void
+scsi_htos_long(unchar *ap, ulong niv)
+{
+ *(ulong *)ap = htonl(niv);
+}
+
+void
+scsi_htos_short(unchar *ap, ushort nsv)
+{
+ *(ushort *)ap = htons(nsv);
+}
+
+ulong
+scsi_stoh_3byte(unchar *ap)
+{
+ register ulong av = *(ulong *)ap;
+
+ return (((av & 0xff) << 16) | (av & 0xff00) | ((av & 0xff0000) >> 16));
+}
+
+ulong
+scsi_stoh_long(ulong ai)
+{
+ return (ntohl(ai));
+}
+
+ushort
+scsi_stoh_short(ushort as)
+{
+ return (ntohs(as));
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_scsi.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.h
index 5c41ed6215..40e6aa42f2 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_scsi.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_scsi.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -21,14 +20,15 @@
*/
/*
- * Copyright (c) 1996, Sun Microsystems, Inc.
- * All Rights Reserved.
+ * Copyright 1996 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
*/
+
#ifndef _GHD_SCSI_H
#define _GHD_SCSI_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_timer.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_timer.c
new file mode 100644
index 0000000000..5ab711f6d0
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_timer.c
@@ -0,0 +1,898 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/ksynch.h>
+#include <sys/scsi/conf/autoconf.h>
+#include <sys/reboot.h>
+
+#include "ghd.h"
+
+/*
+ * Local functions
+ */
+
+static gcmd_t *ghd_timeout_get(ccc_t *cccp);
+static int ghd_timeout_loop(ccc_t *cccp);
+static uint_t ghd_timeout_softintr(caddr_t arg);
+static void ghd_timeout(void *arg);
+static void ghd_timeout_disable(tmr_t *tmrp);
+static void ghd_timeout_enable(tmr_t *tmrp);
+
+/*
+ * Local data
+ */
+long ghd_HZ;
+static kmutex_t tglobal_mutex;
+
+/* table of timeouts for abort processing steps */
+cmdstate_t ghd_timeout_table[GCMD_NSTATES];
+
+/* This table indirectly initializes the ghd_timeout_table */
+struct {
+ int valid;
+ cmdstate_t state;
+ long value;
+} ghd_time_inits[] = {
+ { TRUE, GCMD_STATE_ABORTING_CMD, 3 },
+ { TRUE, GCMD_STATE_ABORTING_DEV, 3 },
+ { TRUE, GCMD_STATE_RESETTING_DEV, 5 },
+ { TRUE, GCMD_STATE_RESETTING_BUS, 10 },
+ { TRUE, GCMD_STATE_HUNG, 60},
+ { FALSE, 0, 0 }, /* spare entry */
+ { FALSE, 0, 0 }, /* spare entry */
+ { FALSE, 0, 0 }, /* spare entry */
+ { FALSE, 0, 0 }, /* spare entry */
+ { FALSE, 0, 0 } /* spare entry */
+};
+int ghd_ntime_inits = sizeof (ghd_time_inits)
+ / sizeof (ghd_time_inits[0]);
+
+/*
+ * Locally-used macros
+ */
+
+/*
+ * Compare two gcmd_t's to see if they're for the same device (same gdev_t)
+ */
+#define GCMD_SAME_DEV(gcmdp1, gcmdp2) \
+ (GCMDP2GDEVP(gcmdp1) == GCMDP2GDEVP(gcmdp2))
+
+/*
+ * Compare two gcmd_t's to see if they're for the same bus (same HBA inst)
+ */
+#define GCMD_SAME_BUS(gcmdp1, gcmdp2) \
+ (GCMDP2CCCP(gcmdp1) == GCMDP2CCCP(gcmdp2))
+
+
+/*
+ * Update state of gcmdp (in one direction, increasing state number, only)
+ */
+#define GCMD_UPDATE_STATE(gcmdp, newstate) \
+{ \
+ if ((gcmdp)->cmd_state < (newstate)) { \
+ ((gcmdp)->cmd_state = (newstate)); \
+ } \
+}
+
+#ifdef ___notyet___
+
+#include <sys/modctl.h>
+extern struct mod_ops mod_miscops;
+static struct modlmisc modlmisc = {
+ &mod_miscops, /* Type of module */
+ "CCB Timeout Utility Routines"
+};
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
+
+/*
+ * If this is a loadable module then there's a single CCB timer configure
+ * structure for all HBA drivers (rather than one per HBA driver).
+ */
+static tmr_t tmr_conf;
+
+int
+_init()
+{
+ int err;
+
+ ghd_timer_init(&tmr_conf, 0);
+ return ((err = mod_install(&modlinkage)) != 0)
+ ghd_timer_fini(&tmr_conf);
+ return (err);
+}
+
+int
+_fini()
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) == 0)
+ ghd_timer_fini(&tmr_conf);
+ return (err);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+#endif /* ___notyet___ */
+
+
+
+/*
+ *
+ * ghd_timeout_loop()
+ *
+ * Check the CCB timer value for every active CCB for this
+ * HBA driver instance.
+ *
+ * This function is called both by the ghd_timeout() interrupt
+ * handler when called via the timer callout, and by ghd_timer_poll()
+ * while procesing "polled" (FLAG_NOINTR) requests.
+ *
+ * The ccc_activel_mutex is held while a CCB list is being scanned.
+ * This prevents the HBA driver's transport or interrupt functions
+ * from changing the active CCB list. But we wake up very infrequently
+ * and do as little as possible so it shouldn't affect performance.
+ *
+ */
+
+static int
+ghd_timeout_loop(ccc_t *cccp)
+{
+ int got_any = FALSE;
+ gcmd_t *gcmdp;
+ ulong_t lbolt;
+
+ mutex_enter(&cccp->ccc_activel_mutex);
+ lbolt = ddi_get_lbolt();
+ gcmdp = (gcmd_t *)L2_next(&cccp->ccc_activel);
+ while (gcmdp) {
+ /*
+ * check to see if this one has timed out
+ */
+ if ((gcmdp->cmd_timeout > 0) &&
+ (lbolt - gcmdp->cmd_start_time >= gcmdp->cmd_timeout)) {
+ got_any = TRUE;
+ }
+ gcmdp = (gcmd_t *)L2_next(&gcmdp->cmd_timer_link);
+ }
+ mutex_exit(&cccp->ccc_activel_mutex);
+ return (got_any);
+}
+
+/*
+ *
+ * ghd_timeout()
+ *
+ * Called every t_ticks ticks to scan the CCB timer lists
+ *
+ * The t_mutex mutex is held the entire time this routine is active.
+ * It protects the list of ccc_t's.
+ *
+ * The list of cmd_t's is protected by the ccc_activel_mutex mutex
+ * in the ghd_timeout_loop() routine.
+ *
+ * We also check to see if the waitq is frozen, and if so,
+ * adjust our timeout to call back sooner if necessary (to
+ * unfreeze the waitq as soon as possible).
+ *
+ *
+ * +------------+
+ * | tmr_t |----+
+ * +------------+ |
+ * |
+ * V
+ * +---------+
+ * | ccc_t |----+
+ * +---------+ |
+ * | V
+ * | +--------+ +--------+
+ * | | gcmd_t |-->| gcmd_t |--> ...
+ * | +--------+ +--------+
+ * V
+ * +---------+
+ * | ccc_t |----+
+ * +---------+ |
+ * | V
+ * | +--------+
+ * | | gcmd_t |
+ * V +--------+
+ * ...
+ *
+ *
+ *
+ */
+
+static void
+ghd_timeout(void *arg)
+{
+ tmr_t *tmrp = (tmr_t *)arg;
+ ccc_t *cccp;
+ clock_t ufdelay_curr;
+ clock_t lbolt, delay_in_hz;
+ clock_t resched = (clock_t)0x7FFFFFFF;
+
+ /*
+ * Each HBA driver instance has a separate CCB timer list. Skip
+ * timeout processing if there are no more active timeout lists
+ * to process. (There are no lists only if there are no attached
+ * HBA instances; the list still exists if there are no outstanding
+ * active commands.)
+ */
+ mutex_enter(&tmrp->t_mutex);
+ if ((cccp = tmrp->t_ccc_listp) == NULL) {
+ mutex_exit(&tmrp->t_mutex);
+ return;
+ }
+
+ lbolt = ddi_get_lbolt();
+
+ do {
+ /*
+ * If any active CCBs on this HBA have timed out
+ * then kick off the HBA driver's softintr
+ * handler to do the timeout processing
+ */
+ if (ghd_timeout_loop(cccp)) {
+ cccp->ccc_timeout_pending = 1;
+ ddi_trigger_softintr(cccp->ccc_soft_id);
+ }
+
+ /* Record closest unfreeze time for use in next timeout */
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ if (cccp->ccc_waitq_frozen) {
+
+ delay_in_hz =
+ drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);
+ ufdelay_curr = delay_in_hz -
+ (lbolt - cccp->ccc_waitq_freezetime);
+
+ if (ufdelay_curr < resched)
+ resched = ufdelay_curr;
+
+ /* frozen; trigger softintr to maybe unfreeze */
+ ddi_trigger_softintr(cccp->ccc_soft_id);
+ }
+ mutex_exit(&cccp->ccc_waitq_mutex);
+
+ } while ((cccp = cccp->ccc_nextp) != NULL);
+
+ /* don't allow any unfreeze delays to increase the timeout delay */
+ if (resched > tmrp->t_ticks)
+ resched = tmrp->t_ticks;
+
+ /* re-establish the timeout callback */
+ tmrp->t_timeout_id = timeout(ghd_timeout, (void *)tmrp, resched);
+
+ mutex_exit(&tmrp->t_mutex);
+}
+
+
+/*
+ *
+ * ghd_timer_newstate()
+ *
+ * The HBA mutex is held by my caller.
+ *
+ */
+
+void
+ghd_timer_newstate(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp,
+ gact_t action, int calltype)
+{
+ gact_t next_action;
+ cmdstate_t next_state;
+ char *msgp;
+ long new_timeout;
+ int (*func)(void *, gcmd_t *, gtgt_t *, gact_t, int);
+ void *hba_handle;
+ gcmd_t gsav;
+ int gsav_used = 0;
+ gcmd_t *gcmdp_scan;
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+#ifdef DEBUG
+ /* it shouldn't be on the timer active list */
+ if (gcmdp != NULL) {
+ L2el_t *lp = &gcmdp->cmd_timer_link;
+ ASSERT(lp->l2_nextp == lp);
+ ASSERT(lp->l2_prevp == lp);
+ }
+#endif
+
+ func = cccp->ccc_timeout_func;
+ hba_handle = cccp->ccc_hba_handle;
+
+ for (;;) {
+ switch (action) {
+ case GACTION_EARLY_ABORT:
+ /* done before it started */
+ ASSERT(gcmdp != NULL);
+ msgp = "early abort";
+ next_state = GCMD_STATE_DONEQ;
+ next_action = GACTION_ABORT_CMD;
+ break;
+
+ case GACTION_EARLY_TIMEOUT:
+ /* done before it started */
+ ASSERT(gcmdp != NULL);
+ msgp = "early timeout";
+ next_state = GCMD_STATE_DONEQ;
+ next_action = GACTION_ABORT_CMD;
+ break;
+
+ case GACTION_ABORT_CMD:
+ msgp = "abort request";
+ ASSERT(gcmdp != NULL);
+ next_state = GCMD_STATE_ABORTING_CMD;
+ next_action = GACTION_ABORT_DEV;
+ break;
+
+ case GACTION_ABORT_DEV:
+ msgp = "abort device";
+ next_state = GCMD_STATE_ABORTING_DEV;
+ next_action = GACTION_RESET_TARGET;
+ break;
+
+ case GACTION_RESET_TARGET:
+ msgp = "reset target";
+ next_state = GCMD_STATE_RESETTING_DEV;
+ next_action = GACTION_RESET_BUS;
+ break;
+
+ case GACTION_RESET_BUS:
+ msgp = "reset bus";
+ next_state = GCMD_STATE_RESETTING_BUS;
+ next_action = GACTION_INCOMPLETE;
+ break;
+
+ case GACTION_INCOMPLETE:
+ default:
+ /* be verbose about HBA resets */
+ GDBG_ERROR(("?ghd_timer_newstate: HBA reset failed "
+ "hba 0x%p gcmdp 0x%p gtgtp 0x%p\n",
+ (void *)hba_handle, (void *)gcmdp, (void *)gtgtp));
+ /*
+ * When all else fails, punt.
+ *
+ * We're in big trouble if we get to this point.
+ * Maybe we should try to re-initialize the HBA.
+ */
+ msgp = "HBA reset";
+ next_state = GCMD_STATE_HUNG;
+ next_action = GACTION_INCOMPLETE;
+ break;
+ }
+
+ /*
+ * I want to see target requests only if verbose, but
+ * scsi_log() only prints the device pathname if level
+ * is CE_WARN or CE_PANIC...so I guess we can't use
+ * scsi_log for TGTREQ messages, or they must come to
+ * the console. How silly. Looking for "verbose boot"
+ * is non-DDI-compliant, but let's do it anyway.
+ */
+
+ if (calltype == GHD_TGTREQ) {
+ if ((boothowto & RB_VERBOSE)) {
+ scsi_log(cccp->ccc_hba_dip, cccp->ccc_label,
+ CE_WARN,
+ "target request: %s, target=%d lun=%d",
+ msgp, gtgtp->gt_target, gtgtp->gt_lun);
+ }
+ } else {
+ scsi_log(cccp->ccc_hba_dip, cccp->ccc_label, CE_WARN,
+ "timeout: %s, target=%d lun=%d", msgp,
+ gtgtp->gt_target, gtgtp->gt_lun);
+ }
+
+ /*
+ * Before firing off the HBA action, restart the timer
+ * using the timeout value from ghd_timeout_table[].
+ *
+ * The table entries should never restart the timer
+ * for the GHD_STATE_IDLE and GHD_STATE_DONEQ states.
+ *
+ */
+ if (gcmdp) {
+ gcmdp->cmd_state = next_state;
+ new_timeout = ghd_timeout_table[gcmdp->cmd_state];
+ if (new_timeout != 0)
+ ghd_timer_start(cccp, gcmdp, new_timeout);
+
+ /* save a copy in case action function frees it */
+ gsav = *gcmdp;
+ gsav_used = 1;
+ }
+
+ if (action == GACTION_RESET_BUS && cccp->ccc_waitq_frozen) {
+ GDBG_WARN(("avoiding bus reset while waitq frozen\n"));
+ break;
+ }
+
+ /* invoke the HBA's action function */
+ if ((*func)(hba_handle, gcmdp, gtgtp, action, calltype)) {
+ /* if it took wait for an interrupt or timeout */
+ break;
+ }
+ /*
+ * if the HBA reset fails leave the retry
+ * timer running and just exit.
+ */
+ if (action == GACTION_INCOMPLETE)
+ return;
+
+ /* all other failures cause transition to next action */
+ if (gcmdp != NULL && new_timeout != 0) {
+ /*
+ * But stop the old timer prior to
+ * restarting a new timer because each step may
+ * have a different timeout value.
+ */
+ GHD_TIMER_STOP(cccp, gcmdp);
+ }
+ action = next_action;
+ }
+
+ /*
+ * HBA action function is done with gsav (if used)
+ * or gtgtp/cccp (if gsav not used). We need to mark other
+ * outstanding requests if they were affected by this action
+ * (say, a device reset which also cancels all outstanding
+ * requests on this device) to prevent multiple timeouts/HBA
+ * actions for the same device or bus condition. Scan the timer
+ * list (all active requests) and update states as necessary.
+ * Hold the activel_mutex while scanning the active list. Check
+ * for either same dev/bus as gsav (if used) or for same
+ * dev/bus as gtgtp or cccp (if gsav is not used).
+ */
+
+ mutex_enter(&cccp->ccc_activel_mutex);
+
+ for (gcmdp_scan = (gcmd_t *)L2_next(&cccp->ccc_activel);
+ gcmdp_scan != NULL;
+ gcmdp_scan = (gcmd_t *)L2_next(&gcmdp_scan->cmd_timer_link)) {
+
+ /* skip idle or waitq commands */
+ if (gcmdp_scan->cmd_state <= GCMD_STATE_WAITQ)
+ continue;
+
+ switch (action) {
+
+ case GACTION_ABORT_DEV:
+ if ((gsav_used && GCMD_SAME_DEV(&gsav, gcmdp_scan)) ||
+ (GCMDP2GDEVP(gcmdp_scan) == GTGTP2GDEVP(gtgtp))) {
+ GCMD_UPDATE_STATE(gcmdp_scan,
+ GCMD_STATE_ABORTING_DEV);
+ }
+ break;
+
+ case GACTION_RESET_TARGET:
+ if ((gsav_used && GCMD_SAME_DEV(&gsav, gcmdp_scan)) ||
+ (GCMDP2GDEVP(gcmdp_scan) == GTGTP2GDEVP(gtgtp))) {
+ GCMD_UPDATE_STATE(gcmdp_scan,
+ GCMD_STATE_RESETTING_DEV);
+ }
+ break;
+
+ case GACTION_RESET_BUS:
+ if ((gsav_used && GCMD_SAME_BUS(&gsav, gcmdp_scan)) ||
+ (GCMDP2CCCP(gcmdp_scan) == cccp)) {
+ GCMD_UPDATE_STATE(gcmdp_scan,
+ GCMD_STATE_RESETTING_BUS);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ mutex_exit(&cccp->ccc_activel_mutex);
+}
+
+
+/*
+ *
+ * ghd_timeout_softintr()
+ *
+ * This interrupt is scheduled if a particular HBA instance's
+ * CCB timer list has a timed out CCB, or if the waitq is in a
+ * frozen state.
+ *
+ * Find the timed out CCB and then call the HBA driver's timeout
+ * function.
+ *
+ * In order to avoid race conditions all processing must be done
+ * while holding the HBA instance's mutex. If the mutex wasn't
+ * held the HBA driver's hardware interrupt routine could be
+ * triggered and it might try to remove a CCB from the list at
+ * same time as were trying to abort it.
+ *
+ * For frozen-waitq processing, just call ghd_waitq_process...
+ * it takes care of the time calculations.
+ *
+ */
+
+static uint_t
+ghd_timeout_softintr(caddr_t arg)
+{
+ ccc_t *cccp = (ccc_t *)arg;
+
+ if (cccp->ccc_timeout_pending) {
+
+ /* grab this HBA instance's mutex */
+ mutex_enter(&cccp->ccc_hba_mutex);
+
+ /*
+ * The claim is we could reset "pending" outside the mutex, but
+ * since we have to acquire the mutex anyway, it doesn't hurt
+ */
+ cccp->ccc_timeout_pending = 0;
+
+ /* timeout each expired CCB */
+ ghd_timer_poll(cccp, GHD_TIMER_POLL_ALL);
+
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ } else if (cccp->ccc_waitq_frozen) {
+ mutex_enter(&cccp->ccc_hba_mutex);
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ ghd_waitq_process_and_mutex_exit(cccp);
+ }
+
+ return (DDI_INTR_UNCLAIMED);
+}
+
+
+/*
+ * ghd_timer_poll()
+ *
+ * This function steps a packet to the next action in the recovery
+ * procedure.
+ *
+ * The caller must be already holding the HBA mutex and take care of
+ * running the pkt completion functions.
+ *
+ */
+
+void
+ghd_timer_poll(ccc_t *cccp, gtimer_poll_t calltype)
+{
+ gcmd_t *gcmdp;
+ gact_t action;
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ /* abort each expired CCB */
+ while (gcmdp = ghd_timeout_get(cccp)) {
+
+ GDBG_INTR(("?ghd_timer_poll: cccp=0x%p gcmdp=0x%p\n",
+ cccp, gcmdp));
+
+ switch (gcmdp->cmd_state) {
+ case GCMD_STATE_IDLE:
+ case GCMD_STATE_DONEQ:
+ default:
+ /* not supposed to happen */
+ GDBG_ERROR(("ghd_timer_poll: invalid state %d\n",
+ gcmdp->cmd_state));
+ return;
+
+ case GCMD_STATE_WAITQ:
+ action = GACTION_EARLY_TIMEOUT;
+ break;
+
+ case GCMD_STATE_ACTIVE:
+ action = GACTION_ABORT_CMD;
+ break;
+
+ case GCMD_STATE_ABORTING_CMD:
+ action = GACTION_ABORT_DEV;
+ break;
+
+ case GCMD_STATE_ABORTING_DEV:
+ action = GACTION_RESET_TARGET;
+ break;
+
+ case GCMD_STATE_RESETTING_DEV:
+ action = GACTION_RESET_BUS;
+ break;
+
+ case GCMD_STATE_RESETTING_BUS:
+ action = GACTION_INCOMPLETE;
+ break;
+
+ case GCMD_STATE_HUNG:
+ action = GACTION_INCOMPLETE;
+ break;
+ }
+
+ ghd_timer_newstate(cccp, gcmdp, gcmdp->cmd_gtgtp, action,
+ GHD_TIMEOUT);
+
+ /* return after processing first cmd if requested */
+
+ if (calltype == GHD_TIMER_POLL_ONE)
+ return;
+ }
+}
+
+
+
+
+/*
+ *
+ * ghd_timeout_get()
+ *
+ * Remove the first expired CCB from a particular timer list.
+ *
+ */
+
+static gcmd_t *
+ghd_timeout_get(ccc_t *cccp)
+{
+ gcmd_t *gcmdp;
+ ulong_t lbolt;
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+
+ mutex_enter(&cccp->ccc_activel_mutex);
+ lbolt = ddi_get_lbolt();
+ gcmdp = (gcmd_t *)L2_next(&cccp->ccc_activel);
+ while (gcmdp != NULL) {
+ if ((gcmdp->cmd_timeout > 0) &&
+ (lbolt - gcmdp->cmd_start_time >= gcmdp->cmd_timeout))
+ goto expired;
+ gcmdp = (gcmd_t *)L2_next(&gcmdp->cmd_timer_link);
+ }
+ mutex_exit(&cccp->ccc_activel_mutex);
+ return (NULL);
+
+expired:
+ /* unlink if from the CCB timer list */
+ L2_delete(&gcmdp->cmd_timer_link);
+ mutex_exit(&cccp->ccc_activel_mutex);
+ return (gcmdp);
+}
+
+
+/*
+ *
+ * ghd_timeout_enable()
+ *
+ * Only start a single timeout callback for each HBA driver
+ * regardless of the number of boards it supports.
+ *
+ */
+
+static void
+ghd_timeout_enable(tmr_t *tmrp)
+{
+ mutex_enter(&tglobal_mutex);
+ if (tmrp->t_refs++ == 0) {
+ /* establish the timeout callback */
+ tmrp->t_timeout_id = timeout(ghd_timeout, (void *)tmrp,
+ tmrp->t_ticks);
+ }
+ mutex_exit(&tglobal_mutex);
+}
+
+static void
+ghd_timeout_disable(tmr_t *tmrp)
+{
+ ASSERT(tmrp != NULL);
+ ASSERT(tmrp->t_ccc_listp == NULL);
+
+ mutex_enter(&tglobal_mutex);
+ if (tmrp->t_refs-- <= 1)
+ (void) untimeout(tmrp->t_timeout_id);
+ mutex_exit(&tglobal_mutex);
+}
+
+/* ************************************************************************ */
+
+ /* these are the externally callable routines */
+
+
+void
+ghd_timer_init(tmr_t *tmrp, long ticks)
+{
+ int indx;
+
+ mutex_init(&tglobal_mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&tmrp->t_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ /*
+ * determine default timeout value
+ */
+ ghd_HZ = drv_usectohz(1000000);
+ if (ticks == 0)
+ ticks = scsi_watchdog_tick * ghd_HZ;
+ tmrp->t_ticks = ticks;
+
+
+ /*
+ * Initialize the table of abort timer values using an
+ * indirect lookup table so that this code isn't dependant
+ * on the cmdstate_t enum values or order.
+ */
+ for (indx = 0; indx < ghd_ntime_inits; indx++) {
+ int state;
+ ulong_t value;
+
+ if (!ghd_time_inits[indx].valid)
+ continue;
+ state = ghd_time_inits[indx].state;
+ value = ghd_time_inits[indx].value;
+ ghd_timeout_table[state] = value;
+ }
+}
+
+void
+ghd_timer_fini(tmr_t *tmrp)
+{
+ mutex_destroy(&tmrp->t_mutex);
+ mutex_destroy(&tglobal_mutex);
+}
+
+int
+ghd_timer_attach(ccc_t *cccp, tmr_t *tmrp,
+ int (*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int))
+{
+ ddi_iblock_cookie_t iblock;
+
+ if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW,
+ &cccp->ccc_soft_id, &iblock, NULL,
+ ghd_timeout_softintr, (caddr_t)cccp) != DDI_SUCCESS) {
+ GDBG_ERROR((
+ "ghd_timer_attach: add softintr failed cccp 0x%p\n",
+ (void *)cccp));
+ return (FALSE);
+ }
+
+ /* init the per HBA-instance control fields */
+ mutex_init(&cccp->ccc_activel_mutex, NULL, MUTEX_DRIVER, iblock);
+ L2_INIT(&cccp->ccc_activel);
+ cccp->ccc_timeout_func = timeout_func;
+
+ /* stick this HBA's control structure on the master list */
+ mutex_enter(&tmrp->t_mutex);
+
+ cccp->ccc_nextp = tmrp->t_ccc_listp;
+ tmrp->t_ccc_listp = cccp;
+ cccp->ccc_tmrp = tmrp;
+ mutex_exit(&tmrp->t_mutex);
+
+ /*
+ * The enable and disable routines use a separate mutex than
+ * t_mutex which is used by the timeout callback function.
+ * This is to avoid a deadlock when calling untimeout() from
+ * the disable routine.
+ */
+ ghd_timeout_enable(tmrp);
+
+ return (TRUE);
+}
+
+
+/*
+ *
+ * ghd_timer_detach()
+ *
+ * clean up for a detaching HBA instance
+ *
+ */
+
+void
+ghd_timer_detach(ccc_t *cccp)
+{
+ tmr_t *tmrp = cccp->ccc_tmrp;
+ ccc_t **prevpp;
+
+ /* make certain the CCB list is empty */
+ ASSERT(cccp->ccc_activel.l2_nextp == &cccp->ccc_activel);
+ ASSERT(cccp->ccc_activel.l2_nextp == cccp->ccc_activel.l2_prevp);
+
+ mutex_enter(&tmrp->t_mutex);
+
+ prevpp = &tmrp->t_ccc_listp;
+ ASSERT(*prevpp != NULL);
+
+ /* run down the linked list to find the entry that preceeds this one */
+ do {
+ if (*prevpp == cccp)
+ goto remove_it;
+ prevpp = &(*prevpp)->ccc_nextp;
+ } while (*prevpp != NULL);
+
+ /* fell off the end of the list */
+ GDBG_ERROR(("ghd_timer_detach: corrupt list, cccp=0x%p\n",
+ (void *)cccp));
+
+remove_it:
+ *prevpp = cccp->ccc_nextp;
+ mutex_exit(&tmrp->t_mutex);
+ mutex_destroy(&cccp->ccc_activel_mutex);
+
+ ddi_remove_softintr(cccp->ccc_soft_id);
+
+ ghd_timeout_disable(tmrp);
+}
+
+/*
+ *
+ * ghd_timer_start()
+ *
+ * Add a CCB to the CCB timer list.
+ */
+
+void
+ghd_timer_start(ccc_t *cccp, gcmd_t *gcmdp, long cmd_timeout)
+{
+ ulong_t lbolt;
+
+ mutex_enter(&cccp->ccc_activel_mutex);
+ lbolt = ddi_get_lbolt();
+
+ /* initialize this CCB's timer */
+ gcmdp->cmd_start_time = lbolt;
+ gcmdp->cmd_timeout = (cmd_timeout * ghd_HZ);
+
+ /* add it to the list */
+ L2_add(&cccp->ccc_activel, &gcmdp->cmd_timer_link, gcmdp);
+ mutex_exit(&cccp->ccc_activel_mutex);
+}
+
+
+/*
+ *
+ * ghd_timer_stop()
+ *
+ * Remove a completed CCB from the CCB timer list.
+ *
+ * See the GHD_TIMER_STOP_INLINE() macro in ghd.h for
+ * the actual code.
+ */
+
+void
+ghd_timer_stop(ccc_t *cccp, gcmd_t *gcmdp)
+{
+ GHD_TIMER_STOP_INLINE(cccp, gcmdp);
+}
diff --git a/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.c b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.c
new file mode 100644
index 0000000000..af06ea69a2
--- /dev/null
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.c
@@ -0,0 +1,429 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/note.h>
+
+#include "ghd.h"
+
+
+
+/*ARGSUSED*/
+gtgt_t *
+ghd_target_init(dev_info_t *hba_dip,
+ dev_info_t *tgt_dip,
+ ccc_t *cccp,
+ size_t tgt_private_size,
+ void *hba_private,
+ ushort_t target,
+ uchar_t lun)
+{
+ _NOTE(ARGUNUSED(hba_dip))
+ gtgt_t *gtgtp;
+ size_t size = sizeof (*gtgtp) + tgt_private_size;
+ gdev_t *gdevp;
+ ulong_t maxactive;
+
+ gtgtp = kmem_zalloc(size, KM_SLEEP);
+
+ /*
+ * initialize the per instance structure
+ */
+
+ gtgtp->gt_tgt_private = (void *)(gtgtp + 1);
+ gtgtp->gt_size = size;
+ gtgtp->gt_hba_private = hba_private;
+ gtgtp->gt_target = target;
+ gtgtp->gt_lun = lun;
+ gtgtp->gt_ccc = cccp;
+
+ /*
+ * set the queue's maxactive to 1 if
+ * property not specified on target or hba devinfo node
+ */
+ maxactive = ddi_getprop(DDI_DEV_T_ANY, tgt_dip, 0, "ghd-maxactive", 1);
+ gtgtp->gt_maxactive = maxactive;
+
+ /* initialize the linked list pointers */
+ GTGT_INIT(gtgtp);
+
+ /*
+ * grab both mutexes so the queue structures
+ * stay stable while adding this instance to the linked lists
+ */
+ mutex_enter(&cccp->ccc_hba_mutex);
+ mutex_enter(&cccp->ccc_waitq_mutex);
+
+ /*
+ * Search the HBA's linked list of device structures.
+ *
+ * If this device is already attached then link this instance
+ * to the existing per-device-structure on the ccc_devs list.
+ *
+ */
+ gdevp = CCCP2GDEVP(cccp);
+ while (gdevp != NULL) {
+ if (gdevp->gd_target == target && gdevp->gd_lun == lun) {
+ GDBG_WAITQ(("ghd_target_init(%d,%d) found gdevp 0x%p"
+ " gtgtp 0x%p max %lu\n",
+ target, lun, gdevp, gtgtp, maxactive));
+
+ goto foundit;
+ }
+ gdevp = GDEV_NEXTP(gdevp);
+ }
+
+ /*
+ * Not found. This is the first instance for this device.
+ */
+
+
+ /* allocate the per-device-structure */
+
+ gdevp = kmem_zalloc(sizeof (*gdevp), KM_SLEEP);
+ gdevp->gd_target = target;
+ gdevp->gd_lun = lun;
+
+ /*
+ * link this second level queue to the HBA's first
+ * level queue
+ */
+ GDEV_QATTACH(gdevp, cccp, maxactive);
+
+ GDBG_WAITQ(("ghd_target_init(%d,%d) new gdevp 0x%p gtgtp 0x%p"
+ " max %lu\n", target, lun, gdevp, gtgtp, maxactive));
+
+foundit:
+
+ /* save the ptr to the per device structure */
+ gtgtp->gt_gdevp = gdevp;
+
+ /* Add the per instance structure to the per device list */
+ GTGT_ATTACH(gtgtp, gdevp);
+
+ ghd_waitq_process_and_mutex_exit(cccp);
+
+ return (gtgtp);
+}
+
+/*ARGSUSED*/
+void
+ghd_target_free(dev_info_t *hba_dip,
+ dev_info_t *tgt_dip,
+ ccc_t *cccp,
+ gtgt_t *gtgtp)
+{
+ _NOTE(ARGUNUSED(hba_dip,tgt_dip))
+
+ gdev_t *gdevp = gtgtp->gt_gdevp;
+
+ GDBG_WAITQ(("ghd_target_free(%d,%d) gdevp-0x%p gtgtp 0x%p\n",
+ gtgtp->gt_target, gtgtp->gt_lun, gdevp, gtgtp));
+
+ /*
+ * grab both mutexes so the queue structures
+ * stay stable while deleting this instance
+ */
+ mutex_enter(&cccp->ccc_hba_mutex);
+ mutex_enter(&cccp->ccc_waitq_mutex);
+
+ ASSERT(gdevp->gd_ninstances > 0);
+
+ /*
+ * remove this per-instance structure from the device list and
+ * free the memory
+ */
+ GTGT_DEATTACH(gtgtp, gdevp);
+ kmem_free((caddr_t)gtgtp, gtgtp->gt_size);
+
+ if (gdevp->gd_ninstances == 1) {
+ GDBG_WAITQ(("ghd_target_free: N=1 gdevp 0x%p\n", gdevp));
+ /*
+ * If there's now just one instance left attached to this
+ * device then reset the queue's max active value
+ * from that instance's saved value.
+ */
+ gtgtp = GDEVP2GTGTP(gdevp);
+ GDEV_MAXACTIVE(gdevp) = gtgtp->gt_maxactive;
+
+ } else if (gdevp->gd_ninstances == 0) {
+ /* else no instances left */
+ GDBG_WAITQ(("ghd_target_free: N=0 gdevp 0x%p\n", gdevp));
+
+ /* detach this per-dev-structure from the HBA's dev list */
+ GDEV_QDETACH(gdevp, cccp);
+ kmem_free(gdevp, sizeof (*gdevp));
+
+ }
+#if defined(GHD_DEBUG) || defined(__lint)
+ else {
+ /* leave maxactive set to 1 */
+ GDBG_WAITQ(("ghd_target_free: N>1 gdevp 0x%p\n", gdevp));
+ }
+#endif
+
+ ghd_waitq_process_and_mutex_exit(cccp);
+}
+
+void
+ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp)
+{
+ gcmd_t *gcmdp;
+
+ ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
+
+ GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld "
+ "max %ld\n", cccp, gdevp, GDEV_NACTIVE(gdevp),
+ GDEV_MAXACTIVE(gdevp)));
+ for (;;) {
+ /*
+ * Now check the device wait queue throttle to see if I can
+ * shuffle up a request to the HBA wait queue.
+ */
+ if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) {
+ GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n",
+ gdevp));
+ return;
+ }
+
+ /*
+ * single thread requests while multiple instances
+ * because the different target drives might have
+ * conflicting maxactive throttles.
+ */
+ if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) {
+ GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n",
+ gdevp));
+ return;
+ }
+
+ /*
+ * promote the topmost request from the device queue to
+ * the HBA queue.
+ */
+ if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) {
+ /* the device is empty so we're done */
+ GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n",
+ gdevp));
+ return;
+ }
+ L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
+ GDEV_NACTIVE(gdevp)++;
+ gcmdp->cmd_waitq_level++;
+ GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n",
+ gdevp, gcmdp));
+ }
+}
+
+
+void
+ghd_waitq_delete(ccc_t *cccp, gcmd_t *gcmdp)
+{
+ gtgt_t *gtgtp = GCMDP2GTGTP(gcmdp);
+ gdev_t *gdevp = gtgtp->gt_gdevp;
+#if defined(GHD_DEBUG) || defined(__lint)
+ Q_t *qp = &gdevp->gd_waitq;
+#endif
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ mutex_enter(&cccp->ccc_waitq_mutex);
+
+ /*
+ * Adjust all queue counters. If this request is being aborted
+ * it might only have made it to the target queue. Otherwise,
+ * both the target and hba queue have to be adjusted when a
+ * request is completed normally. The cmd_waitq_level value
+ * indicates which queue counters need to be adjusted. It's
+ * incremented as the request progresses up the queues.
+ */
+ switch (gcmdp->cmd_waitq_level) {
+ case 0:
+ break;
+ case 1:
+ /*
+ * If this is an early-timeout, or early-abort, the request
+ * is still linked onto a waitq. Remove it now. If it's
+ * an active request and no longer on the waitq then calling
+ * L2_delete a second time does no harm.
+ */
+ L2_delete(&gcmdp->cmd_q);
+ break;
+
+ case 2:
+ L2_delete(&gcmdp->cmd_q);
+#if defined(GHD_DEBUG) || defined(__lint)
+ if (GDEV_NACTIVE(gdevp) == 0)
+ debug_enter("\n\nGHD WAITQ DELETE\n\n");
+#endif
+ GDEV_NACTIVE(gdevp)--;
+ break;
+
+ case 3:
+ /* it's an active or completed command */
+#if defined(GHD_DEBUG) || defined(__lint)
+ if (GDEV_NACTIVE(gdevp) == 0 || GHBA_NACTIVE(cccp) == 0)
+ debug_enter("\n\nGHD WAITQ DELETE\n\n");
+#endif
+ GDEV_NACTIVE(gdevp)--;
+ GHBA_NACTIVE(cccp)--;
+ break;
+
+ default:
+ /* this shouldn't happen */
+#if defined(GHD_DEBUG) || defined(__lint)
+ debug_enter("\n\nGHD WAITQ LEVEL > 3\n\n");
+#endif
+ break;
+ }
+
+ GDBG_WAITQ(("ghd_waitq_delete: gcmdp 0x%p qp 0x%p level %ld\n",
+ gcmdp, qp, gcmdp->cmd_waitq_level));
+
+
+ /*
+ * There's probably now more room in the HBA queue. Move
+ * up as many requests as possible.
+ */
+ ghd_waitq_shuffle_up(cccp, gdevp);
+
+ mutex_exit(&cccp->ccc_waitq_mutex);
+}
+
+
+int
+ghd_waitq_process_and_mutex_hold(ccc_t *cccp)
+{
+ gcmd_t *gcmdp;
+ int rc = FALSE;
+
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
+
+ for (;;) {
+ if (L2_EMPTY(&GHBA_QHEAD(cccp))) {
+ /* return if the list is empty */
+ GDBG_WAITQ(("ghd_waitq_proc: MT cccp 0x%p qp 0x%p\n",
+ cccp, &cccp->ccc_waitq));
+ break;
+ }
+ if (GHBA_NACTIVE(cccp) >= GHBA_MAXACTIVE(cccp)) {
+ /* return if the HBA is too active */
+ GDBG_WAITQ(("ghd_waitq_proc: N>M cccp 0x%p qp 0x%p"
+ " N %ld max %ld\n", cccp, &cccp->ccc_waitq,
+ GHBA_NACTIVE(cccp),
+ GHBA_MAXACTIVE(cccp)));
+ break;
+ }
+
+ /*
+ * bail out if the wait queue has been
+ * "held" by the HBA driver
+ */
+ if (cccp->ccc_waitq_held) {
+ GDBG_WAITQ(("ghd_waitq_proc: held"));
+ return (rc);
+ }
+
+ if (cccp->ccc_waitq_frozen) {
+
+ clock_t lbolt, delay_in_hz, time_to_wait;
+
+ delay_in_hz =
+ drv_usectohz(cccp->ccc_waitq_freezedelay * 1000);
+
+ lbolt = ddi_get_lbolt();
+ time_to_wait = delay_in_hz -
+ (lbolt - cccp->ccc_waitq_freezetime);
+
+ if (time_to_wait > 0) {
+ /*
+ * stay frozen; we'll be called again
+ * by ghd_timeout_softintr()
+ */
+ GDBG_WAITQ(("ghd_waitq_proc: frozen"));
+ return (rc);
+ } else {
+ /* unfreeze and continue */
+ GDBG_WAITQ(("ghd_waitq_proc: unfreezing"));
+ cccp->ccc_waitq_freezetime = 0;
+ cccp->ccc_waitq_freezedelay = 0;
+ cccp->ccc_waitq_frozen = 0;
+ }
+ }
+
+ gcmdp = (gcmd_t *)L2_remove_head(&GHBA_QHEAD(cccp));
+ GHBA_NACTIVE(cccp)++;
+ gcmdp->cmd_waitq_level++;
+ mutex_exit(&cccp->ccc_waitq_mutex);
+
+ /*
+ * Start up the next I/O request
+ */
+ ASSERT(gcmdp != NULL);
+ gcmdp->cmd_state = GCMD_STATE_ACTIVE;
+ if (!(*cccp->ccc_hba_start)(cccp->ccc_hba_handle, gcmdp)) {
+ /* if the HBA rejected the request, requeue it */
+ gcmdp->cmd_state = GCMD_STATE_WAITQ;
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ GHBA_NACTIVE(cccp)--;
+ gcmdp->cmd_waitq_level--;
+ L2_add_head(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp);
+ GDBG_WAITQ(("ghd_waitq_proc: busy cccp 0x%p gcmdp 0x%p"
+ " handle 0x%p\n", cccp, gcmdp,
+ cccp->ccc_hba_handle));
+ break;
+ }
+ rc = TRUE;
+ mutex_enter(&cccp->ccc_waitq_mutex);
+ GDBG_WAITQ(("ghd_waitq_proc: ++ cccp 0x%p gcmdp 0x%p N %ld\n",
+ cccp, gcmdp, GHBA_NACTIVE(cccp)));
+ }
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
+ return (rc);
+}
+
+void
+ghd_waitq_process_and_mutex_exit(ccc_t *cccp)
+{
+ ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
+ ASSERT(mutex_owned(&cccp->ccc_waitq_mutex));
+
+ GDBG_WAITQ(("ghd_waitq_process_and_mutex_exit: cccp 0x%p\n", cccp));
+
+ (void) ghd_waitq_process_and_mutex_hold(cccp);
+
+ /*
+ * Release the mutexes in the opposite order that they
+ * were acquired to prevent requests queued by
+ * ghd_transport() from getting hung up in the wait queue.
+ */
+ mutex_exit(&cccp->ccc_hba_mutex);
+ mutex_exit(&cccp->ccc_waitq_mutex);
+}
diff --git a/usr/src/uts/common/io/dktp/hba/ghd/ghd_waitq.h b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.h
index 143d2472e9..67a23fa37a 100644
--- a/usr/src/uts/common/io/dktp/hba/ghd/ghd_waitq.h
+++ b/usr/src/uts/intel/io/dktp/hba/ghd/ghd_waitq.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -19,15 +18,16 @@
*
* CDDL HEADER END
*/
+
/*
- * Copyright (c) 1999, by Sun Microsystems, Inc.
- * All rights reserved.
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
*/
#ifndef _GHD_WAITQ_H
#define _GHD_WAITQ_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {