summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/io/1394
diff options
context:
space:
mode:
authorstevel@tonic-gate <none@none>2005-06-14 00:00:00 -0700
committerstevel@tonic-gate <none@none>2005-06-14 00:00:00 -0700
commit7c478bd95313f5f23a4c958a745db2134aa03244 (patch)
treec871e58545497667cbb4b0a4f2daf204743e1fe7 /usr/src/uts/common/io/1394
downloadillumos-gate-7c478bd95313f5f23a4c958a745db2134aa03244.tar.gz
OpenSolaris Launch
Diffstat (limited to 'usr/src/uts/common/io/1394')
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394.c199
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394.conf47
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_async.c3850
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_attach.c918
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_buf.c190
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_csr.c576
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_detach.c193
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_extern.c90
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ioctl.c653
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_isoch.c895
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_isr.c915
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c2848
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ixl_isr.c1125
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ixl_misc.c508
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c2231
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_misc.c223
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_ohci.c3338
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_q.c1764
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_s1394if.c1256
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_tlabel.c519
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_tlist.c510
-rw-r--r--usr/src/uts/common/io/1394/adapters/hci1394_vendor.c451
-rw-r--r--usr/src/uts/common/io/1394/h1394.c1791
-rw-r--r--usr/src/uts/common/io/1394/inc.flg82
-rw-r--r--usr/src/uts/common/io/1394/nx1394.c566
-rw-r--r--usr/src/uts/common/io/1394/s1394.c159
-rw-r--r--usr/src/uts/common/io/1394/s1394_addr.c1704
-rw-r--r--usr/src/uts/common/io/1394/s1394_asynch.c2340
-rw-r--r--usr/src/uts/common/io/1394/s1394_bus_reset.c1566
-rw-r--r--usr/src/uts/common/io/1394/s1394_cmp.c438
-rw-r--r--usr/src/uts/common/io/1394/s1394_csr.c2198
-rw-r--r--usr/src/uts/common/io/1394/s1394_dev_disc.c3421
-rw-r--r--usr/src/uts/common/io/1394/s1394_fa.c268
-rw-r--r--usr/src/uts/common/io/1394/s1394_fcp.c333
-rw-r--r--usr/src/uts/common/io/1394/s1394_hotplug.c1204
-rw-r--r--usr/src/uts/common/io/1394/s1394_isoch.c1257
-rw-r--r--usr/src/uts/common/io/1394/s1394_misc.c983
-rw-r--r--usr/src/uts/common/io/1394/t1394.c3779
-rw-r--r--usr/src/uts/common/io/1394/t1394_errmsg.c199
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394.c695
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_as.c63
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_async.c554
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_cfgrom.c560
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_cmp.c792
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_fcp.c542
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_isoch.c773
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_chan.c813
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_recv.c850
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_xmit.c1249
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_list.c129
-rw-r--r--usr/src/uts/common/io/1394/targets/av1394/av1394_queue.c188
-rw-r--r--usr/src/uts/common/io/1394/targets/scsa1394/hba.c2498
-rw-r--r--usr/src/uts/common/io/1394/targets/scsa1394/sbp2_bus.c536
-rw-r--r--usr/src/uts/common/io/1394/targets/scsa1394/sbp2_driver.c996
54 files changed, 56825 insertions, 0 deletions
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394.c b/usr/src/uts/common/io/1394/adapters/hci1394.c
new file mode 100644
index 0000000000..9254b78a9e
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394.c
@@ -0,0 +1,199 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394.c
+ * 1394 (firewire) OpenHCI 1.0 HBA driver. This file contains the driver's
+ * _init(), _info(), and _fini().
+ */
+
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/h1394.h>
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+/* HAL State Pointer */
+void *hci1394_statep;
+
+/* Character/Block Operations */
+static struct cb_ops hci1394_cb_ops = {
+ hci1394_open, /* open */
+ hci1394_close, /* close */
+ nodev, /* strategy (block) */
+ nodev, /* print (block) */
+ nodev, /* dump (block) */
+ nodev, /* read */
+ nodev, /* write */
+ hci1394_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* chpoll */
+ ddi_prop_op, /* prop_op */
+ NULL, /* streams */
+ D_NEW | D_MP |
+ D_64BIT | D_HOTPLUG, /* flags */
+ CB_REV /* rev */
+};
+
+/* Driver Operations */
+static struct dev_ops hci1394_ops = {
+ DEVO_REV, /* struct rev */
+ 0, /* refcnt */
+ hci1394_getinfo, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ hci1394_attach, /* attach */
+ hci1394_detach, /* detach */
+ nodev, /* reset */
+ &hci1394_cb_ops, /* cb_ops */
+ NULL, /* bus_ops */
+ NULL /* power */
+};
+
+/* Module Driver Info */
+static struct modldrv hci1394_modldrv = {
+ &mod_driverops,
+ "1394 OpenHCI HBA driver v1.0",
+ &hci1394_ops
+};
+
+/* Module Linkage */
+static struct modlinkage hci1394_modlinkage = {
+ MODREV_1,
+ &hci1394_modldrv,
+ NULL
+};
+
+#ifndef NPROBE
+extern int tnf_mod_load(void);
+extern int tnf_mod_unload(struct modlinkage *mlp);
+#endif
+
+int
+_init()
+{
+ int status;
+
+
+#ifndef NPROBE
+ (void) tnf_mod_load();
+#endif
+ TNF_PROBE_0_DEBUG(hci1394_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = ddi_soft_state_init(&hci1394_statep, sizeof (hci1394_state_t),
+ (size_t)HCI1394_INITIAL_STATES);
+ if (status != 0) {
+ TNF_PROBE_2(hci1394_init_ssi_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "failed in ddi_soft_state_init",
+ tnf_int, error, status);
+ TNF_PROBE_0_DEBUG(hci1394_init_exit, HCI1394_TNF_HAL_STACK, "");
+#ifndef NPROBE
+ (void) tnf_mod_unload(&hci1394_modlinkage);
+#endif
+ return (status);
+ }
+
+ /* Call into services layer to init bus-ops */
+ status = h1394_init(&hci1394_modlinkage);
+ if (status != 0) {
+ TNF_PROBE_2(hci1394_init_h1394_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "failed in h1394_init",
+ tnf_int, error, status);
+ TNF_PROBE_0_DEBUG(hci1394_init_exit, HCI1394_TNF_HAL_STACK, "");
+#ifndef NPROBE
+ (void) tnf_mod_unload(&hci1394_modlinkage);
+#endif
+ return (status);
+ }
+
+ status = mod_install(&hci1394_modlinkage);
+ if (status != 0) {
+ TNF_PROBE_2(hci1394_init_modi_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "failed in mod_install",
+ tnf_int, error, status);
+ ddi_soft_state_fini(&hci1394_statep);
+#ifndef NPROBE
+ (void) tnf_mod_unload(&hci1394_modlinkage);
+#endif
+ return (status);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+int
+_info(struct modinfo *modinfop)
+{
+ int status;
+
+ TNF_PROBE_0_DEBUG(hci1394_info_enter, HCI1394_TNF_HAL_STACK, "");
+ status = mod_info(&hci1394_modlinkage, modinfop);
+ TNF_PROBE_0_DEBUG(hci1394_info_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+int
+_fini()
+{
+ int status;
+
+ TNF_PROBE_0_DEBUG(hci1394_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = mod_remove(&hci1394_modlinkage);
+ if (status != 0) {
+ TNF_PROBE_2(hci1394_fini_modr_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "failed in mod_remove",
+ tnf_int, error, status);
+ TNF_PROBE_0_DEBUG(hci1394_fini_exit, HCI1394_TNF_HAL_STACK, "");
+ return (status);
+ }
+
+ /* Call into services layer notify about _fini */
+ h1394_fini(&hci1394_modlinkage);
+ ddi_soft_state_fini(&hci1394_statep);
+
+ TNF_PROBE_0_DEBUG(hci1394_fini_exit, HCI1394_TNF_HAL_STACK, "");
+
+#ifndef NPROBE
+ (void) tnf_mod_unload(&hci1394_modlinkage);
+#endif
+
+ return (status);
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394.conf b/usr/src/uts/common/io/1394/adapters/hci1394.conf
new file mode 100644
index 0000000000..56b57a7531
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394.conf
@@ -0,0 +1,47 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+
+reserved-addresses= 0x0000ffff,0xf0000B00,0x200,
+ 0x0000ffff,0xf0000D00,0x200,
+ 0x0000ffff,0xf0000234,0x4;
+#
+# force attach this driver to support hotplug activities
+ddi-forceattach=1;
+
+#
+# disable context flushing
+#
+active-dma-flush=1;
+
+#
+# By default, FCP (Function Control Protocol) address space is permanently
+# owned by the 1394 Framework. If this property exists, the FCP addresses
+# are only allocated before being used and released when not in use.
+#
+# h1394-fcp-claim-on-demand;
+
+interrupt-priorities=0x9;
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_async.c b/usr/src/uts/common/io/1394/adapters/hci1394_async.c
new file mode 100644
index 0000000000..7450dd5035
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_async.c
@@ -0,0 +1,3850 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_async.c
+ * These routines manipulate the 1394 asynchronous dma engines. This
+ * includes incoming and outgoing reads, writes, and locks and their
+ * associated responses.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/note.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+/*
+ * ASYNC_ARRESP_ACK_ERROR is or'd into the error status when we get an ACK error
+ * on an ARRESP. Since the 1394 response code overlaps with the OpenHCI ACK/EVT
+ * errors, we use this to distinguish between the errors in process_arresp().
+ */
+#define ASYNC_ARRESP_ACK_ERROR 0x8000
+
+/* Macro's to help extract 48-bit 1394 address into a uint64_t */
+#define HCI1394_TO_ADDR_HI(data) (((uint64_t)((data) & 0xFFFF)) << 32)
+#define HCI1394_TO_ADDR_LO(data) ((uint64_t)((data) & 0xFFFFFFFF))
+
+/*
+ * Macro to convert a byte stream into a big endian quadlet or octlet or back
+ * the other way. 1394 arithmetic lock operations are done on big endian
+ * quadlets or octlets. compare swaps and bit masks are done on a byte streams.
+ * All data is treated as byte streams over the bus. These macros will convert
+ * the data to a big endian "integer" on x86 plaforms if the operation is an
+ * arithmetic lock operation. It will do nothing if it is not on x86 or is not
+ * an arithmetic lock operation.
+ */
+#ifdef _LITTLE_ENDIAN
+#define HCI1394_ARITH_LOCK_SWAP32(tcode, data) \
+ (((tcode) == CMD1394_LOCK_FETCH_ADD) || \
+ ((tcode) == CMD1394_LOCK_BOUNDED_ADD) || \
+ ((tcode) == CMD1394_LOCK_WRAP_ADD)) ? \
+ (ddi_swap32(data)) : (data)
+#define HCI1394_ARITH_LOCK_SWAP64(tcode, data) \
+ (((tcode) == CMD1394_LOCK_FETCH_ADD) || \
+ ((tcode) == CMD1394_LOCK_BOUNDED_ADD) || \
+ ((tcode) == CMD1394_LOCK_WRAP_ADD)) ? \
+ (ddi_swap64(data)) : (data)
+#else
+#define HCI1394_ARITH_LOCK_SWAP32(tcode, data) (data)
+#define HCI1394_ARITH_LOCK_SWAP64(tcode, data) (data)
+#endif
+
+
+
+static int hci1394_async_arresp_read(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd,
+ uint_t *size);
+static int hci1394_async_arresp_size_get(uint_t tcode, hci1394_q_handle_t q,
+ uint32_t *addr, uint_t *size);
+
+static int hci1394_async_arreq_read(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd,
+ uint_t *size);
+static int hci1394_async_arreq_read_qrd(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size);
+static int hci1394_async_arreq_read_qwr(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size);
+static int hci1394_async_arreq_read_brd(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size);
+static int hci1394_async_arreq_read_bwr(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size);
+static int hci1394_async_arreq_read_lck(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size);
+static int hci1394_async_arreq_read_phy(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size,
+ boolean_t *bus_reset_token);
+
+static void hci1394_async_hcicmd_init(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv,
+ hci1394_async_cmd_t **hcicmd);
+
+static void hci1394_async_atreq_start(void *async, uint32_t command_ptr);
+static void hci1394_async_arresp_start(void *async, uint32_t command_ptr);
+static void hci1394_async_arreq_start(void *async, uint32_t command_ptr);
+static void hci1394_async_atresp_start(void *async, uint32_t command_ptr);
+
+static void hci1394_async_atreq_wake(void *async);
+static void hci1394_async_arresp_wake(void *async);
+static void hci1394_async_arreq_wake(void *async);
+static void hci1394_async_atresp_wake(void *async);
+
+static void hci1394_async_atreq_flush(hci1394_async_handle_t async_handle);
+static void hci1394_async_arresp_flush(hci1394_async_handle_t async_handle);
+static void hci1394_async_arreq_flush(hci1394_async_handle_t async_handle);
+static void hci1394_async_atresp_flush(hci1394_async_handle_t async_handle);
+static void hci1394_async_pending_list_flush(hci1394_async_handle_t
+ async_handle);
+
+static void hci1394_async_pending_timeout(hci1394_tlist_node_t *node,
+ void *arg);
+static uint_t hci1394_async_timeout_calc(hci1394_async_handle_t async_handle,
+ uint_t current_time);
+
+_NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
+
+/*
+ * hci1394_async_init()
+ * Initialize the async DMA engines and state. We init the tlabels; ATREQ
+ * pending Q; and ATREQ, ARRESP, ARREQ, and ATRESP Q's. init() returns a
+ * handle to be used in rest of the functions.
+ */
+int
+hci1394_async_init(hci1394_drvinfo_t *drvinfo,
+ hci1394_ohci_handle_t ohci_handle, hci1394_csr_handle_t csr_handle,
+ hci1394_async_handle_t *async_handle)
+{
+ hci1394_tlist_timer_t timer_info;
+ hci1394_q_info_t qinfo;
+ hci1394_async_t *async;
+ int status;
+
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(ohci_handle != NULL);
+ ASSERT(csr_handle != NULL);
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* alloc the space to keep track of the list */
+ async = kmem_alloc(sizeof (hci1394_async_t), KM_SLEEP);
+
+ /* copy in parms to our local state */
+ async->as_drvinfo = drvinfo;
+ async->as_ohci = ohci_handle;
+ async->as_csr = csr_handle;
+ async->as_flushing_arreq = B_FALSE;
+ async->as_phy_reset = 0xFFFFFFFF;
+ mutex_init(&async->as_atomic_lookup, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+
+ /*
+ * Initialize the tlabels. Reclaim a bad tlabel after the split timeout
+ * has gone by. This time is in reference to the point the transaction
+ * has been marked as bad. Therefore the tlabel will be reclaimed at
+ * twice the split_timeout. (i.e. if the split timeout was set to 100mS
+ * and the transaction has timed out, 100mS has already gone by. We need
+ * to wait for 100mS more before we can reuse the tlabel. Therefore, the
+ * reclaim time is split_timeout and not split_timeout * 2. The split
+ * timeout is stored as the number of bus cycles. We need to convert
+ * this to nS since the reclaim time is passed as nS.
+ */
+ hci1394_tlabel_init(drvinfo, OHCI_BUS_CYCLE_TO_nS(
+ hci1394_csr_split_timeout_get(csr_handle)), &async->as_tlabel);
+
+ /*
+ * Initialize ATREQ pending list. A pended ATREQ will be timed out after
+ * "split_timeout" has gone by. split timeout is in bus cycles so we
+ * need to convert that to nS for the tlist timer info. We will set the
+ * timer resolution to 1/2 of the timeout so that we will have a worst
+ * case timeout of split timeout + (1/2 * split timeout). See
+ * hci1394_tlist.h for more information about this.
+ */
+ timer_info.tlt_timeout =
+ OHCI_BUS_CYCLE_TO_nS(hci1394_csr_split_timeout_get(csr_handle));
+ timer_info.tlt_timer_resolution = timer_info.tlt_timeout / 2;
+ timer_info.tlt_callback = hci1394_async_pending_timeout;
+ timer_info.tlt_callback_arg = async;
+ hci1394_tlist_init(drvinfo, &timer_info, &async->as_pending_list);
+
+ /* Initialize ATREQ Q */
+ qinfo.qi_desc_size = ASYNC_ATREQ_DESC_SIZE;
+ qinfo.qi_data_size = ASYNC_ATREQ_DATA_SIZE;
+ qinfo.qi_mode = HCI1394_ATQ;
+ qinfo.qi_start = hci1394_async_atreq_start;
+ qinfo.qi_wake = hci1394_async_atreq_wake;
+ qinfo.qi_callback_arg = async;
+ status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo,
+ &async->as_atreq_q);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&async->as_atomic_lookup);
+ hci1394_tlist_fini(&async->as_pending_list);
+ hci1394_tlabel_fini(&async->as_tlabel);
+ kmem_free(async, sizeof (hci1394_async_t));
+ *async_handle = NULL;
+ TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_async_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize ARRESP Q */
+ qinfo.qi_desc_size = ASYNC_ARRESP_DESC_SIZE;
+ qinfo.qi_data_size = ASYNC_ARRESP_DATA_SIZE;
+ qinfo.qi_mode = HCI1394_ARQ;
+ qinfo.qi_start = hci1394_async_arresp_start;
+ qinfo.qi_wake = hci1394_async_arresp_wake;
+ qinfo.qi_callback_arg = async;
+ status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo,
+ &async->as_arresp_q);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&async->as_atomic_lookup);
+ hci1394_tlist_fini(&async->as_pending_list);
+ hci1394_tlabel_fini(&async->as_tlabel);
+ hci1394_q_fini(&async->as_atreq_q);
+ kmem_free(async, sizeof (hci1394_async_t));
+ *async_handle = NULL;
+ TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_async_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize ARREQ Q */
+ qinfo.qi_desc_size = ASYNC_ARREQ_DESC_SIZE;
+ qinfo.qi_data_size = ASYNC_ARREQ_DATA_SIZE;
+ qinfo.qi_mode = HCI1394_ARQ;
+ qinfo.qi_start = hci1394_async_arreq_start;
+ qinfo.qi_wake = hci1394_async_arreq_wake;
+ qinfo.qi_callback_arg = async;
+ status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo,
+ &async->as_arreq_q);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&async->as_atomic_lookup);
+ hci1394_tlist_fini(&async->as_pending_list);
+ hci1394_tlabel_fini(&async->as_tlabel);
+ hci1394_q_fini(&async->as_atreq_q);
+ hci1394_q_fini(&async->as_arresp_q);
+ kmem_free(async, sizeof (hci1394_async_t));
+ *async_handle = NULL;
+ TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_async_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize ATRESP Q */
+ qinfo.qi_desc_size = ASYNC_ATRESP_DESC_SIZE;
+ qinfo.qi_data_size = ASYNC_ATRESP_DATA_SIZE;
+ qinfo.qi_mode = HCI1394_ATQ;
+ qinfo.qi_start = hci1394_async_atresp_start;
+ qinfo.qi_wake = hci1394_async_atresp_wake;
+ qinfo.qi_callback_arg = async;
+ status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo,
+ &async->as_atresp_q);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&async->as_atomic_lookup);
+ hci1394_tlist_fini(&async->as_pending_list);
+ hci1394_tlabel_fini(&async->as_tlabel);
+ hci1394_q_fini(&async->as_atreq_q);
+ hci1394_q_fini(&async->as_arresp_q);
+ hci1394_q_fini(&async->as_arreq_q);
+ kmem_free(async, sizeof (hci1394_async_t));
+ *async_handle = NULL;
+ TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_async_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ *async_handle = async;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_fini()
+ * Free's up the space allocated in init(). Notice that a pointer to the
+ * handle is used for the parameter. fini() will set your handle to NULL
+ * before returning.
+ */
+void
+hci1394_async_fini(hci1394_async_handle_t *async_handle)
+{
+ hci1394_async_t *async;
+
+
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ async = (hci1394_async_t *)*async_handle;
+
+ mutex_destroy(&async->as_atomic_lookup);
+ hci1394_tlabel_fini(&async->as_tlabel);
+ hci1394_tlist_fini(&async->as_pending_list);
+ hci1394_q_fini(&async->as_atreq_q);
+ hci1394_q_fini(&async->as_atresp_q);
+ hci1394_q_fini(&async->as_arreq_q);
+ hci1394_q_fini(&async->as_arresp_q);
+
+ kmem_free(async, sizeof (hci1394_async_t));
+
+ /* set handle to null. This helps catch bugs. */
+ *async_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_suspend()
+ * The system is getting ready to be suspended. Make sure that all of
+ * the Q's are clean and that the there are no scheduled timeouts in the
+ * pending Q.
+ */
+void
+hci1394_async_suspend(hci1394_async_handle_t async_handle)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_suspend_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Flush out async DMA Q's */
+ hci1394_async_flush(async_handle);
+
+ /* Cancel any scheduled pending timeouts */
+ hci1394_tlist_timeout_cancel(async_handle->as_pending_list);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_suspend_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_resume()
+ * Re-setup the DMA Q's during a resume after a successful suspend. The
+ * tlabels will be re-initialized during the bus reset and the pending Q will
+ * be flushed during the suspend.
+ */
+int
+hci1394_async_resume(hci1394_async_handle_t async_handle)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_resume_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_q_resume(async_handle->as_atreq_q);
+ hci1394_q_resume(async_handle->as_atresp_q);
+ hci1394_q_resume(async_handle->as_arreq_q);
+ hci1394_q_resume(async_handle->as_arresp_q);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_cmd_overhead()
+ * Return the size of the HAL private area to attach to every alloced 1394
+ * framework command. This allows us to track command state without having
+ * to alloc memory every time a command comes down the pipe.
+ */
+uint_t
+hci1394_async_cmd_overhead()
+{
+ return (sizeof (hci1394_async_cmd_t));
+}
+
+
+/*
+ * hci1394_async_flush()
+ * Flush out the Async Q's and the ATREQ pending list. This is called every
+ * bus reset so that we're sync'd up with the HW and when shutting down or
+ * suspending to make sure we cleanup after all commands.
+ */
+void
+hci1394_async_flush(hci1394_async_handle_t async_handle)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_flush_enter, HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_async_atreq_flush(async_handle);
+ hci1394_async_arresp_flush(async_handle);
+ hci1394_async_pending_list_flush(async_handle);
+ hci1394_async_arreq_flush(async_handle);
+ hci1394_async_atresp_flush(async_handle);
+ hci1394_tlabel_reset(async_handle->as_tlabel);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_flush_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_pending_timeout_update()
+ * Update the timeout for the pending list. This updates both the pending
+ * list timeout and time we wait to reclaim bad tlabels. timeout is the
+ * time in nS so we do not have to do any conversions. This routine will be
+ * called when the CSR split timeout registers are updated.
+ */
+void
+hci1394_async_pending_timeout_update(hci1394_async_handle_t async_handle,
+ hrtime_t timeout)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_update_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ hci1394_tlist_timeout_update(async_handle->as_pending_list, timeout);
+ hci1394_tlabel_set_reclaim_time(async_handle->as_tlabel, timeout);
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_update_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atreq_process()
+ * Process an atreq, if one has completed. This is called during interrupt
+ * processing and will process a completed atreq. It returns status if an
+ * atreq was processed so that the ISR knows that it needs to be called
+ * again to see if another ATREQ has completed. flush_q set to B_TRUE tells
+ * this routine to process all commands regardless of their completion
+ * status. This is used during bus reset processing to remove all commands
+ * from the Q.
+ *
+ * There are a few race conditions that we have to watch for in atreq/arresp.
+ * They all have to do with pended responses so they are not applicable in
+ * the ARREQ/ATRESP engine (since ATRESP's can't be pended).
+ *
+ * Since the race conditions only exist for pended responses, we will only
+ * talk about that sequence here. We're also going to simplify the discussion
+ * so what the code does, so it won't exactly match what we say (e.g. we
+ * don't always setup a timeout for every single command, etc.)
+ *
+ * After Q'ing up an ATREQ, we will process the result of that command in
+ * one of a couple different paths. A normal condition would be that we Q up
+ * a command, we get an ATREQ complete interrupt and look at the ATREQ
+ * result. In the case it has been pended, we setup a timeout to wait for the
+ * response. If we receive the response before the timeout, the command is
+ * done and we send the response up the chain, if we do not, the command is
+ * done and we send a timeout notification up the chain.
+ *
+ * The first race condition is when we get the timeout at the same time as
+ * the response. At first glance a mutex around the command state would
+ * solve this problem. But on a multi-processor machine, we may have the
+ * ARRESP interrupt handler(ISR) running on one processor and the timeout on
+ * another. This means that the command state could change between two
+ * reads while in the ISR. This means we need to have a little more complex
+ * logic around changing the command state and have to be careful how and
+ * when we do this.
+ *
+ * The second race condition is that we could see the ARRESP before we
+ * process the ATREQ. We could be processing a few ARRESP from previous
+ * ATREQ's when the ATREQ completes and then the ARRESP comes in. Since we
+ * already are in the interrupt handler, the ATREQ complete will not preempt
+ * us.
+ *
+ * We will never see a race condition between the ATREQ interrupt for a
+ * command and the pending timeout since the command is not being timed until
+ * this routine is run for that command.
+ */
+int
+hci1394_async_atreq_process(hci1394_async_handle_t async_handle,
+ boolean_t flush_q, boolean_t *request_available)
+{
+ hci1394_async_cmd_t *hcicmd;
+ hci1394_q_cmd_t *qcmd;
+ int cmd_status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(request_available != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Get the next ATREQ that has completed (if one has). Space is free'd
+ * up in atreq_q and atreq_data_q as part of this function call.
+ */
+ hci1394_q_at_next(async_handle->as_atreq_q, flush_q, &qcmd);
+
+ /*
+ * See if there were anymore requests on ATREQ Q. A NULL means there
+ * were no completed commands left on the Q
+ */
+ if (qcmd == NULL) {
+ *request_available = B_FALSE;
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /* There is a completed ATREQ, setup the HAL command pointer */
+ *request_available = B_TRUE;
+ hcicmd = (hci1394_async_cmd_t *)qcmd->qc_arg;
+
+ TNF_PROBE_1_DEBUG(hci1394_atreq_ack, HCI1394_TNF_HAL, "", tnf_uint,
+ atreq_ack, qcmd->qc_status);
+
+ /* save away the command completed timestamp for the services layer */
+ hcicmd->ac_priv->ack_tstamp = qcmd->qc_timestamp;
+
+ /*
+ * Make sure this command has not already been processed. This command
+ * may have already received a response. If the ACK was not an ACK
+ * pending, we have a HW error (i.e. The target HW sent a response to a
+ * non-pended request). There is a race condition where the software
+ * will see and complete a response before processing it's ACK Pending.
+ * This can only happen for ACK pendings. We have seen this race
+ * condition and response to a non-pended request during real-world
+ * testing :-)
+ */
+ if (hcicmd->ac_state != HCI1394_CMD_STATE_IN_PROGRESS) {
+ /*
+ * we already processed the ARRESP in arresp_process(), it
+ * better have been ACK pended. Otherwise the target device
+ * performed an illegal action.
+ */
+ if (qcmd->qc_status == OHCI_ACK_PENDING) {
+ /*
+ * Tell source that their command has completed. We're
+ * done with this command.
+ * NOTE: We use ac_status which was set in
+ * process_arresp()
+ */
+ h1394_cmd_is_complete(
+ async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_REQ,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ /*
+ * This is a HW error. Process the ACK like we never saw the
+ * response. We will do this below.
+ */
+ } else {
+ TNF_PROBE_1(hci1394_async_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "response sent to non-pended ack");
+ }
+ }
+
+ /*
+ * if we got an ack pending, add it to the pending list and leave. We
+ * will either get an ARRESP or the pending list will timeout the
+ * response.
+ */
+ if (qcmd->qc_status == OHCI_ACK_PENDING) {
+ hcicmd->ac_state = HCI1394_CMD_STATE_PENDING;
+ /* Add this command to the pending list */
+ hcicmd->ac_plist_node.tln_addr = hcicmd;
+ hci1394_tlist_add(async_handle->as_pending_list,
+ &hcicmd->ac_plist_node);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * setup our return command status based on the ACK from the HW. See the
+ * OpenHCI 1.0 spec (table 3.2 on pg. 18) for more information about
+ * these ACK/EVT's.
+ */
+ switch (qcmd->qc_status) {
+ case OHCI_ACK_COMPLETE:
+ cmd_status = H1394_CMD_SUCCESS;
+ break;
+
+ /*
+ * we can get a nostatus during a bus reset (i.e. we shutdown the AT
+ * engine before it flushed all the commands)
+ */
+ case OHCI_EVT_FLUSHED:
+ case OHCI_EVT_NO_STATUS:
+ cmd_status = H1394_CMD_EBUSRESET;
+ break;
+
+ case OHCI_EVT_MISSING_ACK:
+ case OHCI_EVT_TIMEOUT:
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ cmd_status = H1394_CMD_ETIMEOUT;
+ break;
+
+ case OHCI_ACK_BUSY_X:
+ case OHCI_ACK_BUSY_A:
+ case OHCI_ACK_BUSY_B:
+ cmd_status = H1394_CMD_EDEVICE_BUSY;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_TARDY:
+ cmd_status = H1394_CMD_EDEVICE_POWERUP;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_DATA_ERROR:
+ cmd_status = H1394_CMD_EDATA_ERROR;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_TYPE_ERROR:
+ cmd_status = H1394_CMD_ETYPE_ERROR;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_CONFLICT_ERROR:
+ cmd_status = H1394_CMD_ERSRC_CONFLICT;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_ADDRESS_ERROR:
+ cmd_status = H1394_CMD_EADDR_ERROR;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_EVT_UNDERRUN:
+ case OHCI_EVT_DATA_READ:
+ case OHCI_EVT_TCODE_ERR:
+ case OHCI_EVT_DESCRIPTOR_READ:
+ case OHCI_EVT_UNKNOWN:
+ default:
+ cmd_status = H1394_CMD_EUNKNOWN_ERROR;
+ TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination),
+ tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel,
+ tnf_uint, atreq_ack, qcmd->qc_status);
+ break;
+ }
+
+ /*
+ * Free the tlabel that was used for this transfer. We will not try and
+ * free the tlabel in the case that we already received a response or if
+ * we did not allocate one (PHY packet). If we already received a
+ * response, the tlabel would have been free'd in
+ * hci1394_async_arresp_process().
+ */
+ if ((hcicmd->ac_state == HCI1394_CMD_STATE_IN_PROGRESS) &&
+ (hcicmd->ac_tlabel_alloc == B_TRUE)) {
+ hci1394_tlabel_free(async_handle->as_tlabel,
+ &hcicmd->ac_tlabel);
+ }
+
+ /*
+ * if we got anything other than and ACK pending, we are done w/ this
+ * transaction.
+ */
+ hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED;
+
+ /* tell the services layer that the command has completed */
+ h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_REQ, cmd_status);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arresp_process()
+ * Process an arresp, if one has completed. This is called during interrupt
+ * processing and will process a completed arresp. It returns status if an
+ * arresp was processed so that the ISR knows that it needs to be called
+ * again to see if another ARRESP has completed.
+ */
+int
+hci1394_async_arresp_process(hci1394_async_handle_t async_handle,
+ boolean_t *response_available)
+{
+ hci1394_async_cmd_t *hcicmd;
+ uint32_t *addr;
+ int cmd_status;
+ uint_t tcode;
+ uint_t size;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(response_available != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * See if there were any responses on ARRESP Q. A NULL means there
+ * were no responses on the Q. This call does NOT free up space. We
+ * need to do that later after we figure out how much space the
+ * response takes up.
+ */
+ hci1394_q_ar_next(async_handle->as_arresp_q, &addr);
+ if (addr == NULL) {
+ *response_available = B_FALSE;
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * We got a response. Lock out pending timeout callback from marking
+ * tlabel bad.
+ */
+ *response_available = B_TRUE;
+ mutex_enter(&async_handle->as_atomic_lookup);
+
+ /*
+ * Read in the response into the 1394 framework command. We could get a
+ * NULL for a command if we got a response with an error (i.e. tlabel
+ * that didn't match a request) This would be a successful read but with
+ * a NULL hcicmd returned. If we ever get a DDI_FAILURE, we will
+ * shutdown.
+ */
+ status = hci1394_async_arresp_read(async_handle,
+ (hci1394_basic_pkt_t *)addr, &tcode, &hcicmd, &size);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&async_handle->as_atomic_lookup);
+ h1394_error_detected(async_handle->as_drvinfo->di_sl_private,
+ H1394_SELF_INITIATED_SHUTDOWN, NULL);
+ cmn_err(CE_WARN, "hci1394(%d): driver shutdown: "
+ "unrecoverable error interrupt detected",
+ async_handle->as_drvinfo->di_instance);
+ hci1394_shutdown(async_handle->as_drvinfo->di_dip);
+ TNF_PROBE_0(hci1394_async_arresp_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Free up the arresp Q space, we are done with the data */
+ hci1394_q_ar_free(async_handle->as_arresp_q, size);
+
+ /*
+ * if we did not get a valid command response (i.e. we got a bad tlabel
+ * or something like that) we don't have anything else to do. We will
+ * say that we processed a response and will return successfully. We
+ * still may have other responses on the Q.
+ */
+ if (hcicmd == NULL) {
+ mutex_exit(&async_handle->as_atomic_lookup);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_arresp_resp, HCI1394_TNF_HAL, "", tnf_uint,
+ atresp_resp, hcicmd->ac_status);
+
+ /*
+ * Make sure this is in the pending list. There is a small chance that
+ * we will see the response before we see the ACK PENDING. If it is the
+ * expected case, it is in the pending list. We will remove it since
+ * we are done with the command.
+ *
+ * NOTE: there is a race condition here with the pending timeout. Look
+ * at the comments before hci1394_async_atreq_process() for more info.
+ */
+ if (hcicmd->ac_state == HCI1394_CMD_STATE_PENDING) {
+ /* remove this transfer from our the pending list */
+ status = hci1394_tlist_delete(async_handle->as_pending_list,
+ &hcicmd->ac_plist_node);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&async_handle->as_atomic_lookup);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ }
+
+ /* allow pending timeout callback to mark tlabel as bad */
+ mutex_exit(&async_handle->as_atomic_lookup);
+
+ /*
+ * We got a valid response that we were able to read in. Free the tlabel
+ * that was used for this transfer.
+ */
+ hci1394_tlabel_free(async_handle->as_tlabel, &hcicmd->ac_tlabel);
+
+ /*
+ * Setup our return command status based on the RESP or ACK or SW error.
+ * See the IEEE1394-1995 spec (6.2.4.10 on pg. 159) for more information
+ * on response codes. See the OpenHCI 1.0 spec (table 3.2 on pg. 18) for
+ * more information about ACK/EVT's. ac_status could have an IEEE1394
+ * response in it, a 1394 EVT/ACK, or a special cmd1394 error for a
+ * device error caught in SW (e.g. for a block read request that got a
+ * quadlet read response). We use a special mask to separate the
+ * ACK/EVT's from the responses (ASYNC_ARRESP_ACK_ERROR).
+ */
+ switch (hcicmd->ac_status) {
+ case IEEE1394_RESP_COMPLETE:
+ cmd_status = H1394_CMD_SUCCESS;
+ break;
+ case IEEE1394_RESP_DATA_ERROR:
+ cmd_status = H1394_CMD_EDATA_ERROR;
+ break;
+ case IEEE1394_RESP_TYPE_ERROR:
+ cmd_status = H1394_CMD_ETYPE_ERROR;
+ break;
+ case IEEE1394_RESP_CONFLICT_ERROR:
+ cmd_status = H1394_CMD_ERSRC_CONFLICT;
+ break;
+ case IEEE1394_RESP_ADDRESS_ERROR:
+ cmd_status = H1394_CMD_EADDR_ERROR;
+ break;
+ case H1394_CMD_EDEVICE_ERROR:
+ cmd_status = H1394_CMD_EDEVICE_ERROR;
+ break;
+ case OHCI_ACK_DATA_ERROR | ASYNC_ARRESP_ACK_ERROR:
+ cmd_status = H1394_CMD_EDATA_ERROR;
+ break;
+ case OHCI_ACK_TYPE_ERROR | ASYNC_ARRESP_ACK_ERROR:
+ cmd_status = H1394_CMD_ETYPE_ERROR;
+ break;
+ case OHCI_EVT_UNDERRUN | ASYNC_ARRESP_ACK_ERROR:
+ case OHCI_EVT_DATA_READ | ASYNC_ARRESP_ACK_ERROR:
+ case OHCI_EVT_TCODE_ERR | ASYNC_ARRESP_ACK_ERROR:
+ cmd_status = H1394_CMD_EUNKNOWN_ERROR;
+ break;
+ default:
+ cmd_status = H1394_CMD_EUNKNOWN_ERROR;
+ TNF_PROBE_1(hci1394_async_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, arresp_resp, hcicmd->ac_status);
+ break;
+ }
+
+ /*
+ * if we have already processed the atreq and put it on the pending Q
+ * (normal case), tell the services layer it completed.
+ */
+ if (hcicmd->ac_state == HCI1394_CMD_STATE_PENDING) {
+ /* Set state indicating that we are done with this cmd */
+ hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED;
+
+ /* tell the services lyaer the command has completed */
+ h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_REQ, cmd_status);
+
+ /*
+ * We have not seen the atreq status yet. We will call
+ * h1394_command_is_complete() in atreq_process() in case we did not get
+ * an ack pending (target HW error -> this is based on real world
+ * experience :-))
+ */
+ } else {
+ /* Set state indicating that we are done with this cmd */
+ hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED;
+
+ /* save away the status for atreq_process() */
+ hcicmd->ac_status = cmd_status;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_process()
+ * Process an arreq, if one has arrived. This is called during interrupt
+ * processing and will process an arreq that has arrived. It returns status
+ * if an arreq was processed so that the ISR knows that it needs to be
+ * called again to see if another ARREQ has arrived.
+ */
+int
+hci1394_async_arreq_process(hci1394_async_handle_t async_handle,
+ boolean_t *request_available)
+{
+ hci1394_async_cmd_t *hcicmd;
+ uint32_t *addr;
+ uint_t tcode;
+ uint_t size;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(request_available != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * See if there were any requests on ARREQ Q. A NULL means there
+ * were no requests on the Q. This call does NOT free up space. We
+ * need to do that later after we figure out how much space the
+ * request takes up.
+ */
+ hci1394_q_ar_next(async_handle->as_arreq_q, &addr);
+ if (addr == NULL) {
+ *request_available = B_FALSE;
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * We got a request. Read the request into a 1394 framework command.
+ * We could get a NULL for a command if we got a request with an error
+ * (i.e. ARREQ ACK was not ack pending or ack complete). This would be a
+ * successful read but with a NULL hcicmd returned. If we ever get a
+ * DDI_FAILURE, we will shutdown.
+ */
+ *request_available = B_TRUE;
+ status = hci1394_async_arreq_read(async_handle,
+ (hci1394_basic_pkt_t *)addr, &tcode, &hcicmd, &size);
+ if (status != DDI_SUCCESS) {
+ h1394_error_detected(async_handle->as_drvinfo->di_sl_private,
+ H1394_SELF_INITIATED_SHUTDOWN, NULL);
+ cmn_err(CE_WARN, "hci1394(%d): driver shutdown: "
+ "unrecoverable error interrupt detected",
+ async_handle->as_drvinfo->di_instance);
+ hci1394_shutdown(async_handle->as_drvinfo->di_dip);
+ TNF_PROBE_0(hci1394_async_arreq_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Free up the arreq Q space, we are done with the data */
+ hci1394_q_ar_free(async_handle->as_arreq_q, size);
+
+ /*
+ * if we did not get a valid request (i.e. The ARREQ had a bad ACK
+ * or something like that) we don't have anything else to do. We will
+ * say that we processed a request and will return successfully. We
+ * still may have other requests on the Q.
+ */
+ if (hcicmd == NULL) {
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * If as_flushing_arreq is set, we do not want to send any requests up
+ * to the Services Layer. We are flushing the ARREQ until we see a bus
+ * reset token that matches the current bus generation. Free up the
+ * alloc'd command and return success.
+ */
+ if (async_handle->as_flushing_arreq == B_TRUE) {
+ hci1394_async_response_complete(async_handle, hcicmd->ac_cmd,
+ hcicmd->ac_priv);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_arreq_ack, HCI1394_TNF_HAL, "", tnf_uint,
+ arreq_ack, hcicmd->ac_status);
+
+ /*
+ * We got a valid request that we were able to read in. Call into the
+ * services layer based on the type of request.
+ */
+ switch (tcode) {
+ case IEEE1394_TCODE_READ_QUADLET:
+ case IEEE1394_TCODE_READ_BLOCK:
+ h1394_read_request(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd);
+ break;
+ case IEEE1394_TCODE_WRITE_QUADLET:
+ case IEEE1394_TCODE_WRITE_BLOCK:
+ h1394_write_request(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd);
+ break;
+ case IEEE1394_TCODE_LOCK:
+ h1394_lock_request(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd);
+ break;
+ case IEEE1394_TCODE_PHY:
+ /*
+ * OpenHCI only handles 1 PHY quadlet at a time. If a selfid
+ * packet was received with multiple quadlets, we will treat
+ * each quadlet as a separate call. We do not notify the
+ * services layer through the normal command interface, we will
+ * treat it like a command internally and then free up the
+ * command ourselves when we are done with it.
+ */
+ h1394_phy_packet(async_handle->as_drvinfo->di_sl_private,
+ &hcicmd->ac_cmd->cmd_u.q.quadlet_data, 1,
+ hcicmd->ac_priv->recv_tstamp);
+ /* free alloc'd command */
+ hci1394_async_response_complete(async_handle, hcicmd->ac_cmd,
+ hcicmd->ac_priv);
+ break;
+ default:
+ /* free alloc'd command */
+ hci1394_async_response_complete(async_handle, hcicmd->ac_cmd,
+ hcicmd->ac_priv);
+ TNF_PROBE_1(hci1394_async_arreq_tcode_err,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_tcode, tcode);
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_atresp_process()
+ * Process an atresp, if one has completed. This is called during interrupt
+ * processing and will process a completed atresp. It returns status if an
+ * atresp was processed so that the ISR knows that it needs to be called
+ * again to see if another ATRESP has completed. flush_q set to B_TRUE tells
+ * this routine to process all commands regardless of their completion
+ * status. This is used during bus reset processing to remove all commands
+ * from the Q.
+ */
+int
+hci1394_async_atresp_process(hci1394_async_handle_t async_handle,
+ boolean_t flush_q, boolean_t *response_available)
+{
+ hci1394_async_cmd_t *hcicmd;
+ hci1394_q_cmd_t *qcmd;
+ int cmd_status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(response_available != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Get the next ATRESP that has completed (if one has). Space is free'd
+ * up in atresp_q and atresp_data_q as part of this function call.
+ */
+ hci1394_q_at_next(async_handle->as_atresp_q, flush_q, &qcmd);
+
+ /*
+ * See if there were anymore requests on ATRESP Q. A NULL means there
+ * were no completed commands left on the Q.
+ */
+ if (qcmd == NULL) {
+ *response_available = B_FALSE;
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /* There is a completed ATRESP, setup the HAL command pointer */
+ *response_available = B_TRUE;
+ hcicmd = (hci1394_async_cmd_t *)qcmd->qc_arg;
+
+ TNF_PROBE_1_DEBUG(hci1394_atresp_ack, HCI1394_TNF_HAL, "", tnf_uint,
+ atresp_ack, qcmd->qc_status);
+
+ /* save away the command completed timestamp for the services layer */
+ hcicmd->ac_priv->ack_tstamp = qcmd->qc_timestamp;
+
+ /*
+ * setup our return command status based on the ACK from the HW. See the
+ * OpenHCI 1.0 spec (table 3.2 on pg. 18) for more information about
+ * these ACK/EVT's.
+ */
+ switch (qcmd->qc_status) {
+ case OHCI_ACK_COMPLETE:
+ cmd_status = H1394_CMD_SUCCESS;
+ break;
+
+ /*
+ * we can get a nostatus during a bus reset (i.e. we shutdown the AT
+ * engine before it flushed all the commands)
+ */
+ case OHCI_EVT_FLUSHED:
+ case OHCI_EVT_NO_STATUS:
+ cmd_status = H1394_CMD_EBUSRESET;
+ break;
+
+ case OHCI_EVT_MISSING_ACK:
+ case OHCI_EVT_TIMEOUT:
+ cmd_status = H1394_CMD_ETIMEOUT;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_BUSY_X:
+ case OHCI_ACK_BUSY_A:
+ case OHCI_ACK_BUSY_B:
+ cmd_status = H1394_CMD_EDEVICE_BUSY;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_TARDY:
+ cmd_status = H1394_CMD_EDEVICE_POWERUP;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_DATA_ERROR:
+ cmd_status = H1394_CMD_EDATA_ERROR;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_TYPE_ERROR:
+ cmd_status = H1394_CMD_ETYPE_ERROR;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_CONFLICT_ERROR:
+ cmd_status = H1394_CMD_ERSRC_CONFLICT;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_ACK_ADDRESS_ERROR:
+ cmd_status = H1394_CMD_EADDR_ERROR;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_EVT_UNKNOWN:
+ cmd_status = H1394_CMD_EUNKNOWN_ERROR;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+
+ case OHCI_EVT_UNDERRUN:
+ case OHCI_EVT_DATA_READ:
+ case OHCI_EVT_TCODE_ERR:
+ case OHCI_EVT_DESCRIPTOR_READ:
+ default:
+ cmd_status = H1394_CMD_EUNKNOWN_ERROR;
+ TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, atresp_ack, qcmd->qc_status);
+ break;
+ }
+
+ /* tell the services layer that the command has completed */
+ h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_RESP, cmd_status);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arresp_read()
+ * Read ARRESP in from memory into 1394 Framework command. We read the tcode
+ * which tells us which kind of arresp the packet is, get the size of the
+ * response, read in the sender, tlabel, and response code, and then
+ * lookup the command based on the sender and tlabel. Once we get the command
+ * (corresponding to the ATREQ), we will copy the rest of the response into
+ * that command.
+ *
+ * The only time this routine should return DDI_FAILURE is if it was unable
+ * to maintain a good state in the ARRESP Q (i.e. an unknown response was
+ * received and we can not cleanup after it.) If we detect a recoverable
+ * error, and it doesn't make sense to pass the response up to the Services
+ * Layer, we should return DDI_SUCCESS with hcicmd = NULL.
+ */
+static int
+hci1394_async_arresp_read(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd,
+ uint_t *size)
+{
+ hci1394_tlabel_info_t ac_tlabel;
+ h1394_cmd_priv_t *cmd_priv;
+ cmd1394_cmd_t *cmd;
+ uint32_t *status_addr;
+ uint_t data_length;
+ uint32_t quadlet;
+ void *command;
+ uint_t rcode;
+ uint_t ack;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(tcode != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* read in the arresp tcode */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q1);
+ *tcode = HCI1394_DESC_TCODE_GET(quadlet);
+
+ /* Get the size of the arresp */
+ status = hci1394_async_arresp_size_get(*tcode,
+ async_handle->as_arresp_q, &pkt->q1, size);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_arresp_read_size_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Read in the tlabel, destination, and rcode (response code) */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q1);
+ ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q2);
+ ac_tlabel.tbi_destination = HCI1394_DESC_DESTID_GET(quadlet);
+ rcode = HCI1394_DESC_RCODE_GET(quadlet);
+
+ /* Lookup the ATREQ framework command this response goes with */
+ hci1394_tlabel_lookup(async_handle->as_tlabel, &ac_tlabel, &command);
+
+ /*
+ * If there is not a cooresponding ATREQ command, this is an error. We
+ * will ignore this response but still return success so we cleanup
+ * after it and go on with other arresp's. This could happend if a
+ * response was sent after the command has timed out or if the target
+ * device is misbehaving. (we have seen both cases)
+ */
+ *hcicmd = (hci1394_async_cmd_t *)command;
+ if ((*hcicmd) == NULL) {
+ TNF_PROBE_2(hci1394_invalid_tlabel, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, nodeid,
+ IEEE1394_NODE_NUM(ac_tlabel.tbi_destination), tnf_uint,
+ rx_tlabel, ac_tlabel.tbi_tlabel);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * copy the response code into the hal private command space. Setup
+ * shortcuts to the 1394 framework command (cmd) and the HAL/SL private
+ * area (cmd_priv). A command is made up of 4 parts. There is the public
+ * part which is accessable to the target driver, there is the Services
+ * Layer private part which is only accessible to the services layer,
+ * there is the SL/HAL private area which is where the SL and HAL share
+ * information about a particular command, and there is the HAL private
+ * area where we keep track of our command specific state information.
+ */
+ (*hcicmd)->ac_status = rcode;
+ cmd = (*hcicmd)->ac_cmd;
+ cmd_priv = (*hcicmd)->ac_priv;
+
+ /*
+ * Calculate the address where the status of the ARRESP and timestamp is
+ * kept at. It is the last quadlet in the response. Save away the
+ * timestamp.
+ */
+ status_addr = (uint32_t *)((uintptr_t)pkt + (uintptr_t)*size -
+ (uintptr_t)IEEE1394_QUADLET);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, status_addr);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+
+ /*
+ * if we did not get an ACK_COMPLETE, we will use the ack error instead
+ * of the response in the packet for our status. We use special mask to
+ * separate the reponses from the ACKs (ASYNC_ARRESP_ACK_ERROR). We will
+ * return success with hcicmd set to the command so that this error gets
+ * sent up to the Services Layer.
+ */
+ ack = HCI1394_DESC_EVT_GET(quadlet);
+ if (ack != OHCI_ACK_COMPLETE) {
+ /* use the ack error instead of rcode for the command status */
+ (*hcicmd)->ac_status = ack | ASYNC_ARRESP_ACK_ERROR;
+ TNF_PROBE_1(hci1394_arresp_bad_ack, HCI1394_TNF_HAL_ERROR,
+ "", tnf_uint, arresp_ack, ack);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_atrresp_resp, HCI1394_TNF_HAL, "", tnf_uint,
+ arresp_resp, rcode);
+
+ /*
+ * If we get to this point we have gotten a valid ACK on the response
+ * and have matched up the response with an ATREQ. Now we check the
+ * response code. If it is not resp_complete, we do not have anything
+ * left to look at in the response. Return successfully.
+ */
+ if (rcode != IEEE1394_RESP_COMPLETE) {
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * Read the rest of the response (based on which kind of response it is)
+ * into the 1394 framework command. In all of the different responses,
+ * we check to make sure the response matches the original request. We
+ * originally did not have this check but found a device or two which
+ * did not behave very well and would cause us to corrupt our commands.
+ * Now we check :-) We will return success when we get this error since
+ * we can recover from it.
+ */
+ switch (*tcode) {
+ case IEEE1394_TCODE_WRITE_RESP:
+ /*
+ * make sure the ATREQ was a quadlet/block write. The same
+ * response is sent back for those two type of ATREQs.
+ */
+ if ((cmd->cmd_type != CMD1394_ASYNCH_WR_QUAD) &&
+ (cmd->cmd_type != CMD1394_ASYNCH_WR_BLOCK)) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_lockresp_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg,
+ "Invalid response sent for write request", tnf_uint,
+ arresp_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_READ_QUADLET_RESP:
+ /* make sure the ATREQ was a quadlet read */
+ if (cmd->cmd_type != CMD1394_ASYNCH_RD_QUAD) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_lockresp_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg,
+ "Invalid response sent for qrd request", tnf_uint,
+ arresp_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * read the quadlet read response in. Data is treated as a byte
+ * stream.
+ */
+ hci1394_q_ar_rep_get8(async_handle->as_arresp_q,
+ (uint8_t *)&cmd->cmd_u.q.quadlet_data,
+ (uint8_t *)&pkt->q4, IEEE1394_QUADLET);
+ break;
+
+ case IEEE1394_TCODE_READ_BLOCK_RESP:
+ /* make sure the ATREQ was a block read */
+ if (cmd->cmd_type != CMD1394_ASYNCH_RD_BLOCK) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_lockresp_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg,
+ "Invalid response sent for brd request", tnf_uint,
+ arresp_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * read in the data length. Make sure the data length is the
+ * same size as the read block request size that went out.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q,
+ &pkt->q4);
+ data_length = HCI1394_DESC_DATALEN_GET(quadlet);
+ if (data_length != cmd_priv->mblk.length) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_3(hci1394_async_arresp_brdsz_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string,
+ errmsg, "Block read response size is bad",
+ tnf_uint, requested_size, cmd_priv->mblk.length,
+ tnf_uint, response_size, data_length);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /* Copy the read block data into the command mblk */
+ hci1394_q_ar_copy_to_mblk(async_handle->as_arresp_q,
+ (uint8_t *)&pkt->q5, &cmd_priv->mblk);
+ break;
+
+ case IEEE1394_TCODE_LOCK_RESP:
+ /* read in the data length */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q,
+ &pkt->q4);
+ data_length = HCI1394_DESC_DATALEN_GET(quadlet);
+
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ /*
+ * read in the data length. Make sure the data length
+ * is the valid for a lock32 response (1 quadlet)
+ */
+ if (data_length != IEEE1394_QUADLET) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_l32sz_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string,
+ errmsg, "Invalid size for lock32 response",
+ tnf_uint, data_size, data_length);
+ TNF_PROBE_0_DEBUG(
+ hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * read the lock32 response in. Data is treated as a
+ * byte stream unless it is an arithmetic lock
+ * operation. In that case we treat data like a 32-bit
+ * word.
+ */
+ hci1394_q_ar_rep_get8(async_handle->as_arresp_q,
+ (uint8_t *)&cmd->cmd_u.l32.old_value,
+ (uint8_t *)&pkt->q5, IEEE1394_QUADLET);
+ cmd->cmd_u.l32.old_value = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.old_value);
+
+ } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) {
+ /*
+ * read in the data length. Make sure the data length
+ * is the valid for a lock64 response (1 octlet)
+ */
+ if (data_length != IEEE1394_OCTLET) {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_l64sz_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string,
+ errmsg, "Invalid size for lock64 response",
+ tnf_uint, data_size, data_length);
+ TNF_PROBE_0_DEBUG(
+ hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * read the lock64 response in. Data is treated as a
+ * byte stream unless it is an arithmetic lock
+ * operation. In that case we treat data like a 64-bit
+ * word.
+ */
+ hci1394_q_ar_rep_get8(async_handle->as_arresp_q,
+ (uint8_t *)&cmd->cmd_u.l64.old_value,
+ (uint8_t *)&pkt->q5, IEEE1394_OCTLET);
+ cmd->cmd_u.l64.old_value = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.old_value);
+
+ /*
+ * we sent out a request that was NOT a lock request and got
+ * back a lock response.
+ */
+ } else {
+ (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR;
+ TNF_PROBE_2(hci1394_async_arresp_lockresp_fail,
+ HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg,
+ "Invalid response sent for lock request", tnf_uint,
+ arresp_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ default:
+ /* we got a tcode that we don't know about. Return error */
+ TNF_PROBE_2(hci1394_async_arresp_tcode_err,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "unknown ARRESP received", tnf_uint, arresp_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read()
+ * Read ARREQ in from memory into a 1394 Framework command. Allocate a 1394
+ * framework command, read in the ARREQ, and before passing it up to the
+ * services layer, see if it was a valid broadcast request.
+ *
+ * The only time this routine should return DDI_FAILURE is if it was unable
+ * to maintain a good state in the ARREQ Q (i.e. an unknown request was
+ * received and we can not cleanup after it.) If we detect a recoverable
+ * error we should return DDI_SUCCESS with hcicmd = NULL.
+ */
+static int
+hci1394_async_arreq_read(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd,
+ uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ boolean_t is_reset_token;
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(tcode != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* read in the arresp tcode */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ *tcode = HCI1394_DESC_TCODE_GET(quadlet);
+
+ /*
+ * Allocated 1394 framework command. The Services layer takes care of
+ * cacheing commands. This is called during interrupt processing so we
+ * do not want to sleep.
+ */
+ status = h1394_alloc_cmd(async_handle->as_drvinfo->di_sl_private,
+ H1394_ALLOC_CMD_NOSLEEP, &cmd, &cmd_priv);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_arreq_read_cmdalloc_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the HAL private command info */
+ hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, hcicmd);
+
+ /*
+ * There are two generations in the command structure, one in the public
+ * space and one in the HAL/SL private shared space. We need to fill in
+ * both. We only use the private one internally.
+ */
+ cmd_priv->bus_generation = async_handle->as_drvinfo->di_gencnt;
+ cmd->bus_generation = async_handle->as_drvinfo->di_gencnt;
+
+ /*
+ * Read the request (based on which kind of request it is) into the 1394
+ * framework command.
+ */
+ switch (*tcode) {
+ case IEEE1394_TCODE_READ_QUADLET:
+ /*
+ * We got a ARREQ quadlet read request. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_qrd(async_handle, pkt,
+ *hcicmd, size);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_qrd_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_WRITE_QUADLET:
+ /*
+ * We got a ARREQ quadlet write request. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_qwr(async_handle, pkt,
+ *hcicmd, size);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_qwr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_READ_BLOCK:
+ /*
+ * We got a ARREQ block read request. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_brd(async_handle, pkt,
+ *hcicmd, size);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_brd_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_WRITE_BLOCK:
+ /*
+ * We got a ARREQ block write request. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_bwr(async_handle, pkt,
+ *hcicmd, size);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_bwr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_LOCK:
+ /*
+ * We got a ARREQ lock request. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_lck(async_handle, pkt,
+ *hcicmd, size);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_lck_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ case IEEE1394_TCODE_PHY:
+ /*
+ * We got a PHY packet in the ARREQ buffer. Read in the packet.
+ * If there is a problem with the packet (i.e. we don't get
+ * DDI_SUCCESS), we will free up the command and return NULL in
+ * hcicmd to indicate that we did not get a valid ARREQ to
+ * process.
+ */
+ status = hci1394_async_arreq_read_phy(async_handle, pkt,
+ *hcicmd, size, &is_reset_token);
+ if (status != DDI_SUCCESS) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_phy_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * If we got a bus reset token, free up the command and return
+ * NULL in hcicmd to indicate that we did not get a valid ARREQ
+ * to process.
+ */
+ if (is_reset_token == B_TRUE) {
+ hci1394_async_response_complete(async_handle, cmd,
+ cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ break;
+
+ default:
+ /* we got a tcode that we don't know about. Return error */
+ TNF_PROBE_2(hci1394_async_arreq_tcode_err,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "unknown ARREQ received", tnf_uint, arreq_tcode, *tcode);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If this command was broadcast and it was not a write, drop the
+ * command since it's an invalid request. We will free up the command
+ * and return NULL in hcicmd to indicate that we did not get a valid
+ * ARREQ to process.
+ */
+ if ((((*hcicmd)->ac_dest & IEEE1394_NODE_NUM_MASK) ==
+ IEEE1394_BROADCAST_NODEID) && ((*tcode !=
+ IEEE1394_TCODE_WRITE_QUADLET) && (*tcode !=
+ IEEE1394_TCODE_WRITE_BLOCK))) {
+ hci1394_async_response_complete(async_handle, cmd, cmd_priv);
+ *hcicmd = NULL;
+ TNF_PROBE_0(hci1394_async_arreq_read_bcast_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+
+ /*
+ * It is a valid broadcast command, set that field in the public
+ * command structure.
+ */
+ } else if ((((*hcicmd)->ac_dest & IEEE1394_NODE_NUM_MASK) ==
+ IEEE1394_BROADCAST_NODEID)) {
+ cmd->broadcast = 1;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_qrd()
+ * Read ARREQ quadlet read into the 1394 Framework command. This routine will
+ * return DDI_FAILURE if it was not able to read the request succesfully.
+ */
+static int
+hci1394_async_arreq_read_qrd(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup shortcuts, command type, and size of request */
+ cmd = hcicmd->ac_cmd;
+ cmd_priv = hcicmd->ac_priv;
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+ *size = DESC_SZ_AR_READQUAD_REQ;
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * calculate the ATRESP timeout for when we send it.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle,
+ cmd_priv->recv_tstamp);
+
+ /*
+ * if the ARREQ ACK was bad, we were unable to successfully read in this
+ * request. Return failure.
+ */
+ if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) &&
+ (hcicmd->ac_status != OHCI_ACK_PENDING)) {
+ TNF_PROBE_1(hci1394_async_arreq_qrd_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Read in the tlabel and destination. We don't use an mblk for this
+ * request.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet);
+ hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+ hcicmd->ac_mblk_alloc = B_FALSE;
+
+ /*
+ * Read in the sender so we know who to send the ATRESP to and read in
+ * the 1394 48-bit address for this request.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet);
+ cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+ cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_qwr()
+ * Read ARREQ quadlet write into the 1394 Framework command. This routine
+ * will return DDI_FAILURE if it was not able to read the request
+ * succesfully.
+ */
+static int
+hci1394_async_arreq_read_qwr(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup shortcuts, command type, and size of request */
+ cmd = hcicmd->ac_cmd;
+ cmd_priv = hcicmd->ac_priv;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
+ *size = DESC_SZ_AR_WRITEQUAD_REQ;
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * calculate the ATRESP timeout for when we send it.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q5);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle,
+ cmd_priv->recv_tstamp);
+
+ /*
+ * if the ARREQ ACK was bad, we were unable to successfully read in this
+ * request. Return failure.
+ */
+ if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) &&
+ (hcicmd->ac_status != OHCI_ACK_PENDING)) {
+ TNF_PROBE_1(hci1394_async_arreq_qwr_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Read in the tlabel and destination. We don't use an mblk for this
+ * request.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet);
+ hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+ hcicmd->ac_mblk_alloc = B_FALSE;
+
+ /*
+ * Read in the sender so we know who to send the ATRESP to. Read in
+ * the 1394 48-bit address for this request. Copy the data quadlet into
+ * the command. The data quadlet is treated like a byte stream.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet);
+ cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+ cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet);
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)&cmd->cmd_u.q.quadlet_data, (uint8_t *)&pkt->q4,
+ IEEE1394_QUADLET);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_brd()
+ * Read ARREQ block read into the 1394 Framework command. This routine will
+ * return DDI_FAILURE if it was not able to read the request succesfully.
+ */
+static int
+hci1394_async_arreq_read_brd(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup shortcuts, command type, and size of request */
+ cmd = hcicmd->ac_cmd;
+ cmd_priv = hcicmd->ac_priv;
+ cmd->cmd_type = CMD1394_ASYNCH_RD_BLOCK;
+ *size = DESC_SZ_AR_READBLOCK_REQ;
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * calculate the ATRESP timeout for when we send it.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q5);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle,
+ cmd_priv->recv_tstamp);
+
+ /*
+ * if the ARREQ ACK was bad, we were unable to successfully read in this
+ * request. Return failure.
+ */
+ if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) &&
+ (hcicmd->ac_status != OHCI_ACK_PENDING)) {
+ TNF_PROBE_1(hci1394_async_arreq_brd_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Read in the tlabel and destination */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet);
+ hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+
+ /*
+ * Read in the sender so we know who to send the ATRESP to. Read in
+ * the 1394 48-bit address for this request. Read in the block data size
+ * and allocate an mblk of that size.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet);
+ cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+ cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4);
+ cmd->cmd_u.b.blk_length = HCI1394_DESC_DATALEN_GET(quadlet);
+ cmd->cmd_u.b.data_block = allocb(cmd->cmd_u.b.blk_length, 0);
+ if (cmd->cmd_u.b.data_block == NULL) {
+ TNF_PROBE_0(hci1394_async_arreq_brd_mblk_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hcicmd->ac_mblk_alloc = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_bwr()
+ * Read ARREQ block write into the 1394 Framework command. This routine will
+ * return DDI_FAILURE if it was not able to read the request succesfully.
+ */
+static int
+hci1394_async_arreq_read_bwr(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ uint32_t *local_addr;
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Setup shortcuts, command type, and size of request. The size of the
+ * request is in quadlets, therefore we need to make sure we count in
+ * the padding when figureing out the size (i.e. data may be in bytes
+ * but the HW always pads to quadlets)
+ */
+ cmd = hcicmd->ac_cmd;
+ cmd_priv = hcicmd->ac_priv;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_BLOCK;
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4);
+ cmd->cmd_u.b.blk_length = HCI1394_DESC_DATALEN_GET(quadlet);
+ *size = DESC_SZ_AR_WRITEBLOCK_REQ +
+ HCI1394_ALIGN_QUAD(cmd->cmd_u.b.blk_length);
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * calculate the ATRESP timeout for when we send it. The status word is
+ * the last quadlet in the packet.
+ */
+ local_addr = (uint32_t *)(((uintptr_t)(&pkt->q5)) +
+ ((uintptr_t)HCI1394_ALIGN_QUAD(cmd->cmd_u.b.blk_length)));
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, local_addr);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle,
+ cmd_priv->recv_tstamp);
+
+ /*
+ * if the ARREQ ACK was bad, we were unable to successfully read in this
+ * request. Return failure.
+ */
+ if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) &&
+ (hcicmd->ac_status != OHCI_ACK_PENDING)) {
+ TNF_PROBE_1(hci1394_async_arreq_bwr_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Read in the tlabel and destination */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet);
+ hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+
+ /*
+ * Read in the sender so we know who to send the ATRESP to. Read in
+ * the 1394 48-bit address for this request. Read in the block data size
+ * and allocate an mblk of that size.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet);
+ cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+ cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet);
+ cmd->cmd_u.b.data_block = allocb(cmd->cmd_u.b.blk_length, 0);
+ if (cmd->cmd_u.b.data_block == NULL) {
+ TNF_PROBE_0(hci1394_async_arreq_bwr_mblk_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hcicmd->ac_mblk_alloc = B_TRUE;
+
+ /* Copy ARREQ write data into mblk_t */
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)cmd->cmd_u.b.data_block->b_wptr,
+ (uint8_t *)&pkt->q5, cmd->cmd_u.b.blk_length);
+
+ /* Update mblk_t wptr */
+ cmd->cmd_u.b.data_block->b_wptr += cmd->cmd_u.b.blk_length;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_lck()
+ * Read ARREQ lock request into the 1394 Framework command. This routine will
+ * return DDI_FAILURE if it was not able to read the request succesfully.
+ */
+static int
+hci1394_async_arreq_read_lck(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size)
+{
+ h1394_cmd_priv_t *cmd_priv;
+ uint32_t *local_addr;
+ cmd1394_cmd_t *cmd;
+ uint8_t *data_addr;
+ uint32_t quadlet;
+ uint32_t length;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Setup shortcuts, command type, and size of request. The size of the
+ * request is in quadlets, therefore we need to make sure we count in
+ * the padding when figuring out the size (i.e. data may be in bytes
+ * but the HW always pads to quadlets)
+ */
+ cmd = hcicmd->ac_cmd;
+ cmd_priv = hcicmd->ac_priv;
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4);
+ length = HCI1394_DESC_DATALEN_GET(quadlet);
+ *size = DESC_SZ_AR_LOCK_REQ + HCI1394_ALIGN_QUAD(length);
+
+ /* make sure the length is a valid lock request length */
+ if (length == DESC_TWO_QUADS) {
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+ cmd->cmd_u.l32.lock_type = HCI1394_DESC_EXTTCODE_GET(quadlet);
+ } else if (length == DESC_TWO_OCTLETS) {
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_64;
+ cmd->cmd_u.l64.lock_type = HCI1394_DESC_EXTTCODE_GET(quadlet);
+ } else {
+ TNF_PROBE_2(hci1394_async_arreq_lck_sz_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "unexpected length received", tnf_uint, locklen, length);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * calculate the ATRESP timeout for when we send it. The status word is
+ * the last quadlet in the packet.
+ */
+ local_addr = (uint32_t *)(((uintptr_t)(&pkt->q5)) +
+ ((uintptr_t)HCI1394_ALIGN_QUAD(length)));
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, local_addr);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle,
+ cmd_priv->recv_tstamp);
+
+ /*
+ * if the ARREQ ACK was bad, we were unable to successfully read in this
+ * request. Return failure.
+ */
+ if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) &&
+ (hcicmd->ac_status != OHCI_ACK_PENDING)) {
+ TNF_PROBE_1(hci1394_async_arreq_read_ack_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack,
+ hcicmd->ac_status);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Read in the tlabel and destination */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1);
+ hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet);
+ hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet);
+ hcicmd->ac_mblk_alloc = B_FALSE;
+
+ /*
+ * Read in the sender so we know who to send the ATRESP to. Read in
+ * the 1394 48-bit address for this request.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet);
+ cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet);
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+ cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet);
+
+ /* Copy ARREQ lock data into 1394 framework command */
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ data_addr = (uint8_t *)&pkt->q5;
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)&cmd->cmd_u.l32.arg_value, data_addr,
+ IEEE1394_QUADLET);
+ data_addr = (uint8_t *)((uintptr_t)data_addr +
+ (uintptr_t)IEEE1394_QUADLET);
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)&cmd->cmd_u.l32.data_value, data_addr,
+ IEEE1394_QUADLET);
+ /*
+ * swap these for our correct architecture if we are doing
+ * arithmetic lock operations
+ */
+ cmd->cmd_u.l32.arg_value = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.arg_value);
+ cmd->cmd_u.l32.data_value = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.data_value);
+ } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) {
+ data_addr = (uint8_t *)&pkt->q5;
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)&cmd->cmd_u.l64.arg_value, data_addr,
+ IEEE1394_OCTLET);
+ data_addr = (uint8_t *)((uintptr_t)data_addr +
+ (uintptr_t)IEEE1394_OCTLET);
+ hci1394_q_ar_rep_get8(async_handle->as_arreq_q,
+ (uint8_t *)&cmd->cmd_u.l64.data_value, data_addr,
+ IEEE1394_OCTLET);
+
+ /*
+ * swap these for our correct architecture if we are doing
+ * arithmetic lock operations
+ */
+ cmd->cmd_u.l64.arg_value = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.arg_value);
+ cmd->cmd_u.l64.data_value = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.data_value);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_arreq_read_phy()
+ * Read ARREQ PHY quadlet into the 1394 Framework command. This routine will
+ * return DDI_FAILURE if it was not able to read the request succesfully.
+ */
+static int
+hci1394_async_arreq_read_phy(hci1394_async_handle_t async_handle,
+ hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size,
+ boolean_t *bus_reset_token)
+{
+ cmd1394_cmd_t *cmd;
+ uint32_t quadlet;
+ uint32_t data1;
+ uint32_t data2;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(pkt != NULL);
+ ASSERT(hcicmd != NULL);
+ ASSERT(size != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup shortcuts, command type, and size of request */
+ cmd = hcicmd->ac_cmd;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
+ *size = DESC_SZ_AR_PHY;
+
+ /*
+ * read in the ARREQ ACK/EVT, the speed, the time we received it, and
+ * set state that we do not use an mblk for this request.
+ */
+ quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4);
+ hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet);
+ hcicmd->ac_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet);
+ hcicmd->ac_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet);
+ hcicmd->ac_mblk_alloc = B_FALSE;
+
+ /* Read in the PHY packet quadlet and its check quadlet */
+ data1 = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2);
+ data2 = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3);
+
+ /*
+ * if this is a bus reset token, save away the generation. If the bus
+ * reset token is for the current generation, we do not need to flush
+ * the ARREQ Q anymore.
+ */
+ if (hcicmd->ac_status == OHCI_EVT_BUS_RESET) {
+ *bus_reset_token = B_TRUE;
+ async_handle->as_phy_reset = HCI1394_DESC_PHYGEN_GET(data2);
+ if (async_handle->as_phy_reset == hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ async_handle->as_flushing_arreq = B_FALSE;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ *bus_reset_token = B_FALSE;
+
+ /* if there is a data error in the PHY packet, return failure */
+ if (data1 != ~data2) {
+ TNF_PROBE_2(hci1394_async_arreq_phy_xor_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_opaque, first_quadlet,
+ data1, tnf_opaque, second_quadlet, data2);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy the PHY quadlet to the command */
+ cmd->cmd_u.q.quadlet_data = data1;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_phy()
+ * Queue up ATREQ phy packet.
+ */
+int
+hci1394_async_phy(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd,
+ h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ hci1394_async_cmd_t *hcicmd;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_phy_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the private HAL command structure */
+ hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd);
+
+ /* We do not allocate a tlabel for a PHY packet */
+ hcicmd->ac_tlabel_alloc = B_FALSE;
+
+ /*
+ * Setup the packet header information for a ATREQ PHY packet Add in
+ * the tcode, phy quadlet, and it's 1's complement.
+ */
+ header.q1 = DESC_ATREQ_Q1_PHY;
+ header.q2 = cmd->cmd_u.q.quadlet_data;
+ header.q3 = ~header.q2;
+
+ /* Write request into the ATREQ Q. If we fail, we're out of space */
+ status = hci1394_q_at(async_handle->as_atreq_q, &hcicmd->ac_qcmd,
+ &header, DESC_PKT_HDRLEN_AT_PHY, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_phy_q_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_async_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_phy_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_write()
+ * Queue up ATREQ write. This could be either a block write or a quadlet
+ * write.
+ */
+int
+hci1394_async_write(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd,
+ h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_async_cmd_t *hcicmd;
+ hci1394_basic_pkt_t header;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_write_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the private HAL command structure */
+ hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd);
+ hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT);
+
+ /* allocate a tlabel for this request */
+ status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest,
+ &hcicmd->ac_tlabel);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_EMPTY_TLABEL;
+ TNF_PROBE_0(hci1394_async_write_tlb_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Setup the packet header information for a ATREQ write packet. We
+ * will set the tcode later on since this could be a block write or
+ * a quadlet write. Set SRCBusId if this write is not a local bus
+ * access. Copy in the speed, tlabel, and destination address.
+ */
+ header.q1 = 0;
+ if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) !=
+ IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (uint32_t)(cmd->cmd_addr >> 32);
+ header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK);
+
+ /* Register this command w/ its tlabel */
+ hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel,
+ hcicmd);
+
+ /* If this is a quadlet write ATREQ */
+ if (cmd->cmd_type == CMD1394_ASYNCH_WR_QUAD) {
+ /*
+ * setup the tcode for a quadlet write request and copy in
+ * the quadlet data. Endian issues will be taken care of in
+ * hci1394_q_at().
+ */
+ header.q1 |= DESC_ATREQ_Q1_QWR;
+ header.q4 = cmd->cmd_u.q.quadlet_data;
+
+ /*
+ * Write the request into the ATREQ Q. If we fail, we are out
+ * of space.
+ */
+ status = hci1394_q_at(async_handle->as_atreq_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_WRITEQUAD,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_write_q_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* This is a block write ATREQ */
+ } else {
+ /* setup the tcode and the length of the block write */
+ header.q1 |= DESC_ATREQ_Q1_BWR;
+ header.q4 = HCI1394_DESC_DATALEN_SET(cmd_priv->mblk.length);
+
+ /*
+ * Write the request into the ATREQ Q. If we fail, we are out
+ * of space. The data is in a mblk(s). We use a special
+ * interface in the HAL/SL private command block to handle
+ * partial transfers out of the mblk due to packet size
+ * restrictions.
+ */
+ status = hci1394_q_at_with_mblk(async_handle->as_atreq_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_WRITEBLOCK,
+ &cmd_priv->mblk, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_write_qmblk_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_write_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_read()
+ * Queue up ATREQ read. This could be either a block read or a quadlet
+ * read.
+ */
+int
+hci1394_async_read(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd,
+ h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ int status;
+ hci1394_async_cmd_t *hcicmd;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_read_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the private HAL command structure */
+ hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd);
+ hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT);
+
+ /* allocate a tlabel for this request */
+ status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest,
+ &hcicmd->ac_tlabel);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_EMPTY_TLABEL;
+ TNF_PROBE_0(hci1394_async_read_tlb_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Setup the packet header information for a ATREQ read packet. We
+ * will set the tcode later on since this could be a block read or
+ * a quadlet read. Set SRCBusId if this read is not a local bus
+ * access. Copy in the speed, tlabel, and destination address.
+ */
+ header.q1 = 0;
+ if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) !=
+ IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (uint32_t)(cmd->cmd_addr >> 32);
+ header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK);
+
+ /* Register this command w/ its tlabel */
+ hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel,
+ hcicmd);
+
+ /* If this is a quadlet read ATREQ */
+ if (cmd->cmd_type == CMD1394_ASYNCH_RD_QUAD) {
+ /* setup the tcode for a quadlet read request */
+ header.q1 |= DESC_ATREQ_Q1_QRD;
+ header.q4 = 0;
+
+ /*
+ * Write the request into the ATREQ Q. If we fail, we are out
+ * of space.
+ */
+ status = hci1394_q_at(async_handle->as_atreq_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READQUAD,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_q_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ } else {
+ /* setup the tcode and the length of the block read */
+ header.q1 |= DESC_ATREQ_Q1_BRD;
+ header.q4 = HCI1394_DESC_DATALEN_SET(cmd_priv->mblk.length);
+
+ /*
+ * Write the request into the ATREQ Q. If we fail, we are out
+ * of space.
+ */
+ status = hci1394_q_at(async_handle->as_atreq_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READBLOCK,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_qb_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_read_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_lock()
+ * Queue up ATREQ lock. This could be either a 32-bit or 64-bit lock
+ * request.
+ */
+int
+hci1394_async_lock(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd,
+ h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ hci1394_async_cmd_t *hcicmd;
+ uint32_t data32[2];
+ uint64_t data64[2];
+ uint8_t *datap;
+ uint_t size;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the private HAL command structure */
+ hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd);
+ hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT);
+
+ /* allocate a tlabel for this request */
+ status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest,
+ &hcicmd->ac_tlabel);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_EMPTY_TLABEL;
+ TNF_PROBE_0(hci1394_async_lock_tlb_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Register this command w/ its tlabel */
+ hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel,
+ hcicmd);
+
+ /*
+ * Setup the packet header information for a ATREQ lock packet. Set
+ * the tcode up as a lock request. Set SRCBusId if this lock is not a
+ * local bus access. Copy in the speed, tlabel, and destination
+ * address.
+ */
+ header.q1 = DESC_ATREQ_Q1_LCK;
+ if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) !=
+ IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (uint32_t)(cmd->cmd_addr >> 32);
+ header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK);
+
+ /*
+ * Setup the lock length based on what size lock operation we are
+ * performing. If it isn't a lock32 or lock64, we have encountered an
+ * internal error. Copy the lock data into a local data buffer. Perform
+ * a byte swap if it is an arithmetic lock operation and we are on a
+ * little endian machine.
+ */
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ size = DESC_TWO_QUADS;
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type);
+ data32[0] = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.arg_value);
+ data32[1] = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.data_value);
+ datap = (uint8_t *)data32;
+ } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) {
+ size = DESC_TWO_OCTLETS;
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type);
+ data64[0] = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.arg_value);
+ data64[1] = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.data_value);
+ datap = (uint8_t *)data64;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ TNF_PROBE_0(hci1394_lock_length_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Write request into the ATREQ Q. If we fail, we're out of space */
+ status = hci1394_q_at_with_data(async_handle->as_atreq_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK, datap, size,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_lock_q_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_write_response()
+ * Send a write ATRESP. This routine should be called from the Services
+ * layer to send a response to a received write request (ARREQ). The same
+ * response is sent to a quadlet and block write request.
+ */
+int
+hci1394_async_write_response(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ int status;
+ hci1394_async_cmd_t *hcicmd;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_write_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * setup a shortcut to the hal private command area. Copy the generation
+ * to the Q area so that we can check the generation when the AT Q is
+ * locked. This prevents us from loosing commands due to race
+ * conditions.
+ */
+ hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead;
+ hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation;
+
+ /*
+ * Setup the packet header information for a ATRESP write packet. Set
+ * the tcode for a write response. Set SRCBusId if the addr is not a
+ * local bus address. Copy in the speed, tlabel, and response code.
+ */
+ header.q1 = DESC_ATRESP_Q1_WR;
+ if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (HCI1394_DESC_DESTID_SET(cmd->nodeID) |
+ HCI1394_DESC_RCODE_SET(cmd->cmd_result));
+ header.q3 = 0;
+
+ /* Write response into the ATRESP Q. If we fail, we're out of space */
+ status = hci1394_q_at(async_handle->as_atresp_q, &hcicmd->ac_qcmd,
+ &header, DESC_PKT_HDRLEN_AT_WRITE_RESP, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_write_response_q_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_read_response()
+ * Send a read ATRESP. This routine should be called from the Services
+ * layer to send a response to a received read request (ARREQ). The
+ * response will differ between quadlet/block read requests.
+ */
+int
+hci1394_async_read_response(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ int status;
+ hci1394_async_cmd_t *hcicmd;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * setup a shortcut to the hal private command area. Copy the generation
+ * to the Q area so that we can check the generation when the AT Q is
+ * locked. This prevents us from loosing commands due to race
+ * conditions.
+ */
+ hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead;
+ hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation;
+
+ /*
+ * Setup the packet header information for a ATRESP read packet. we
+ * will set the tcode later based on type of read response. Set
+ * SRCBusId if the addr is not a local bus address. Copy in the
+ * speed, tlabel, and response code.
+ */
+ header.q1 = 0;
+ if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (uint32_t)(HCI1394_DESC_DESTID_SET(cmd->nodeID) |
+ HCI1394_DESC_RCODE_SET(cmd->cmd_result));
+ header.q3 = 0;
+
+ /* if the response is a read quadlet response */
+ if (cmd->cmd_type == CMD1394_ASYNCH_RD_QUAD) {
+ /*
+ * setup the tcode for a quadlet read response, If the
+ * response code is not resp complete.
+ */
+ header.q1 |= DESC_ATRESP_Q1_QRD;
+ if (cmd->cmd_result == IEEE1394_RESP_COMPLETE) {
+ header.q4 = cmd->cmd_u.q.quadlet_data;
+ } else {
+ header.q4 = 0x0;
+ }
+
+ /*
+ * Write response into the ATRESP Q. If we fail, we're out of
+ * space.
+ */
+ status = hci1394_q_at(async_handle->as_atresp_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READQUAD_RESP,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_response_q_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * the response is a block read response. If the result is not a
+ * resp complete, we are not going to send any data back.
+ */
+ } else if ((cmd->cmd_type == CMD1394_ASYNCH_RD_BLOCK) &&
+ (cmd->cmd_result != IEEE1394_RESP_COMPLETE)) {
+ /*
+ * Setup the tcode for a block read response, set the data
+ * length to zero since we had an error.
+ */
+ header.q1 |= DESC_ATRESP_Q1_BRD;
+ header.q4 = 0x0;
+
+ /*
+ * Write response into the ATRESP Q. If we fail, we're out of
+ * space.
+ */
+ status = hci1394_q_at(async_handle->as_atresp_q,
+ &hcicmd->ac_qcmd, &header,
+ DESC_PKT_HDRLEN_AT_READBLOCK_RESP, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_response_qbf_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * the response is a block read response with a resp complete for the
+ * response code. Send back the read data.
+ */
+ } else {
+ /*
+ * Setup the tcode for a block read response, setup the data
+ * length.
+ */
+ header.q1 |= DESC_ATRESP_Q1_BRD;
+ header.q4 = HCI1394_DESC_DATALEN_SET(cmd->cmd_u.b.blk_length);
+
+ /*
+ * Write response into the ATRESP Q. If we fail, we're out of
+ * space. Use the data in the mblk.
+ */
+ status = hci1394_q_at_with_mblk(async_handle->as_atresp_q,
+ &hcicmd->ac_qcmd, &header,
+ DESC_PKT_HDRLEN_AT_READBLOCK_RESP, &cmd_priv->mblk, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_response_qb_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_lock_response()
+ * Send a lock ATRESP. This routine should be called from the Services
+ * layer to send a response to a received lock request (ARREQ). The
+ * response will differ between 32-bit/64-bit lock requests.
+ */
+int
+hci1394_async_lock_response(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result)
+{
+ hci1394_basic_pkt_t header;
+ hci1394_async_cmd_t *hcicmd;
+ uint32_t data32;
+ uint64_t data64;
+ uint8_t *datap;
+ uint_t size;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+ ASSERT(result != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure this call is during the current bus generation (i.e. no
+ * bus resets have occured since this request was made.
+ */
+ if (cmd_priv->bus_generation != hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * setup a shortcut to the hal private command area. Copy the generation
+ * to the Q area so that we can check the generation when the AT Q is
+ * locked. This prevents us from loosing commands due to race
+ * conditions.
+ */
+ hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead;
+ hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation;
+
+ /*
+ * Setup the packet header information for a ATRESP lock packet. Set
+ * the tcode for a lock response. Set SRCBusId if the addr is not a
+ * local bus address. Copy in the speed, tlabel, and response code.
+ */
+ header.q1 = DESC_ATRESP_Q1_LCK;
+ if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) {
+ header.q1 |= DESC_AT_SRCBUSID;
+ }
+ header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) |
+ HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel);
+ header.q2 = (uint32_t)(HCI1394_DESC_DESTID_SET(cmd->nodeID) |
+ HCI1394_DESC_RCODE_SET(cmd->cmd_result));
+ header.q3 = 0;
+
+ /*
+ * If the lock result is not a resp complete, we are not going to send
+ * any data back.with the response.
+ */
+ if (cmd->cmd_result != IEEE1394_RESP_COMPLETE) {
+ /* set response size to 0 for error. Set the extended tcode */
+ size = 0;
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type);
+ } else {
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type);
+ }
+
+ /*
+ * Write response into the ATRESP Q. If we fail, we're out of
+ * space.
+ */
+ status = hci1394_q_at(async_handle->as_atresp_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK_RESP,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_q_alloc_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * if the lock result is resp complete, setup the size of the response
+ * depending on the lock size and copy the lock response data into a
+ * local buffer. If the lock response is an arithmetic operation, swap
+ * the data on little endian machines. If we don't know what type of
+ * lock operation it is, someone has corrupted the command since we
+ * had received the ARREQ.
+ */
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ size = IEEE1394_QUADLET;
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type);
+ data32 = HCI1394_ARITH_LOCK_SWAP32(
+ cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.old_value);
+ datap = (uint8_t *)&data32;
+ } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) {
+ size = IEEE1394_OCTLET;
+ header.q4 = HCI1394_DESC_DATALEN_SET(size) |
+ HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type);
+ data64 = HCI1394_ARITH_LOCK_SWAP64(
+ cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.old_value);
+ datap = (uint8_t *)&data64;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ TNF_PROBE_0(hci1394_lock_type_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Write response into the ATRESP Q. If we fail, we're out of space.
+ * Use the local data buffer that we copied the data to above.
+ */
+ status = hci1394_q_at_with_data(async_handle->as_atresp_q,
+ &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK_RESP, datap,
+ size, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_q_alloc_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_response_complete()
+ * Free up space allocted during an ARREQ. This is called when the target
+ * driver and Services Layer are done with a command which was by the HAL
+ * during ARREQ processing. This routine will also free up any allocated
+ * mblks.
+ *
+ * NOTE: a target driver can hold on to a block write ARREQ mblk by setting
+ * the mblk pointer to NULL. This ONLY applies to block write ARREQs. The
+ * HAL will no longer track the mblk for this case.
+ */
+void
+hci1394_async_response_complete(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv)
+{
+ hci1394_async_cmd_t *hcicmd;
+
+
+ ASSERT(async_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(cmd_priv != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_response_complete_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead;
+
+ /* If we allocated an mblk for this command */
+ if (hcicmd->ac_mblk_alloc == B_TRUE) {
+ /*
+ * Don't free mblk if it is set to NULL. This allows a target
+ * driver to hold on to it in the case of a block write ARREQ.
+ */
+ if (cmd->cmd_u.b.data_block != NULL) {
+ freeb(cmd->cmd_u.b.data_block);
+ }
+ }
+
+ /* free up the 1394 framework command */
+ (void) h1394_free_cmd((void *)async_handle->as_drvinfo->di_sl_private,
+ &cmd);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_response_complete_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_pending_timeout()
+ * This is the ARREQ Pending timeout callback routine. It is called from
+ * the tlist code. There is a race condition with the ARRESP interrupt
+ * handler (hci1394_async_arresp_process) which requires a mutex to
+ * lock around the mark of the bad tlabel.
+ *
+ * Once we enter this routine, the command has timed out. If the command is
+ * in both the ARRESP handler and here, we will consider it to have timed
+ * out. That code path handles the race condition more easily.
+ */
+static void
+hci1394_async_pending_timeout(hci1394_tlist_node_t *node, void *arg)
+{
+ hci1394_async_handle_t async_handle;
+ hci1394_async_cmd_t *hcicmd;
+
+
+ async_handle = (hci1394_async_handle_t)arg;
+ ASSERT(async_handle != NULL);
+ ASSERT(node != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hcicmd = (hci1394_async_cmd_t *)node->tln_addr;
+
+ /*
+ * We do NOT want to set the command state here. That should only be
+ * done in the ISR. The state does nothing for us here.
+ */
+
+ /*
+ * We want a lock around tlabel_lookup/reading data into the cmd in the
+ * ARRESP ISR processing and a lock around the tlabel_bad in this
+ * routine. This ensures that we will not be touching the command
+ * structure after we pass it up to the Services Layer. If we mark it as
+ * bad first, the lookup will fail. If we get to the lookup first, the
+ * pending list delete will fail in arresp_process() which will tell
+ * that guy that we are in the middle of doing the timeout processing
+ * for this command. The ARRESP logic will just drop the response and
+ * continue on.
+ */
+ mutex_enter(&hcicmd->ac_async->as_atomic_lookup);
+ hci1394_tlabel_bad(async_handle->as_tlabel, &hcicmd->ac_tlabel);
+ mutex_exit(&hcicmd->ac_async->as_atomic_lookup);
+
+ /* Tell the Services Layer that the command has timed out */
+ h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_REQ, H1394_CMD_ETIMEOUT);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_timeout_calc()
+ * Calculate the timeout for an ATRESP. When an ARREQ is received, this
+ * routine is called with the time the ARREQ was received. It returns the
+ * time when the ATRESP is considered to have timed out. We timeout after
+ * split_timeout has gone by. Split timeout and the returned value are in bus
+ * cycles.
+ */
+static uint_t
+hci1394_async_timeout_calc(hci1394_async_handle_t async_handle,
+ uint_t current_time)
+{
+ uint_t split_timeout;
+ uint_t temp;
+ uint_t carry;
+ uint_t z;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_async_timeout_calc_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Get the current split timeout */
+ split_timeout = hci1394_csr_split_timeout_get(async_handle->as_csr);
+
+ /*
+ * The cycle count is broken up into two sections, the 3-bit seconds
+ * field and the 13-bit cycle count. The cycle count is in 125uS
+ * increments. The maximum value of cycle count is 7999 (8000 is one
+ * second). With 13-bits, we could store up to 8191. Therefore, we don't
+ * have a simple 16-bit addition. Hence, the code we see below.
+ */
+
+ /*
+ * calculate the new cycle count based on the cycle count from current
+ * time and the split timeout. If this new value is not greater than the
+ * maximum cycle count, we don't have a carry. Go to the next step.
+ */
+ temp = (current_time & OHCI_CYCLE_CNT_MASK) + (split_timeout &
+ OHCI_CYCLE_CNT_MASK);
+ if (temp < OHCI_MAX_CYCLE_CNT) {
+ carry = 0;
+
+ /*
+ * the new cycle count adds up to more than the maximum cycle count,
+ * set the carry state and adjust the total accordingly.
+ */
+ } else {
+ temp = temp - OHCI_MAX_CYCLE_CNT;
+ carry = 1;
+ }
+
+ /*
+ * The timeout time equals the seconds added with the carry (1 or 0
+ * seconds), added with the adjusted (if necessary) cycle count.
+ * Mask the final value to get rid of any second rollovers.
+ */
+ z = (current_time & OHCI_CYCLE_SEC_MASK) + (split_timeout &
+ OHCI_CYCLE_SEC_MASK) + (carry << OHCI_CYCLE_SEC_SHIFT) + temp;
+ z = z & OHCI_TIMESTAMP_MASK;
+
+ TNF_PROBE_0_DEBUG(hci1394_async_timeout_calc_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (z);
+}
+
+
+/*
+ * hci1394_async_arresp_size_get()
+ * Return the size of the arresp that was received in q_handle at addr.
+ */
+static int
+hci1394_async_arresp_size_get(uint_t tcode, hci1394_q_handle_t q_handle,
+ uint32_t *addr, uint_t *size)
+{
+ uint_t data_length;
+ uint32_t quadlet;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(addr != NULL);
+ ASSERT(size != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ if (tcode == IEEE1394_TCODE_WRITE_RESP) {
+ *size = DESC_PKT_HDRLEN_AT_WRITE_RESP + IEEE1394_QUADLET;
+ } else if (tcode == IEEE1394_TCODE_READ_QUADLET_RESP) {
+ *size = DESC_PKT_HDRLEN_AT_READQUAD_RESP + IEEE1394_QUADLET;
+ } else if (tcode == IEEE1394_TCODE_READ_BLOCK_RESP) {
+ quadlet = hci1394_q_ar_get32(q_handle, &addr[3]);
+ data_length = HCI1394_DESC_DATALEN_GET(quadlet);
+ /*
+ * response size is in quadlets, therefore we need to
+ * make sure we count in the padding when figuring out
+ * the size used up for this response
+ */
+ *size = DESC_PKT_HDRLEN_AT_READBLOCK_RESP +
+ HCI1394_ALIGN_QUAD(data_length) + IEEE1394_QUADLET;
+ } else if (tcode == IEEE1394_TCODE_LOCK_RESP) {
+ quadlet = hci1394_q_ar_get32(q_handle, &addr[3]);
+ data_length = HCI1394_DESC_DATALEN_GET(quadlet);
+ /*
+ * response size is in quadlets, therefore we need to
+ * make sure we count in the padding when figuring out
+ * the size used up for this response
+ */
+ *size = DESC_PKT_HDRLEN_AT_LOCK_RESP +
+ HCI1394_ALIGN_QUAD(data_length) + IEEE1394_QUADLET;
+ } else {
+ TNF_PROBE_1(hci1394_async_arresp_size_tcode_err,
+ HCI1394_TNF_HAL_ERROR,
+ "unknown ARRESP received", tnf_uint, arresp_tcode, tcode);
+ TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_async_pending_list_flush()
+ * Flush out the ATREQ pending list. All commands still on the ATREQ pending
+ * list are considered to be completed due to a bus reset. The ATREQ and
+ * ARRESP Q's should be flushed before the pending Q is flushed. The ATREQ
+ * could have more ACK pendings and the ARRESP could have valid responses to
+ * pended requests.
+ */
+void
+hci1394_async_pending_list_flush(hci1394_async_handle_t async_handle)
+{
+ hci1394_tlist_node_t *node;
+ hci1394_async_cmd_t *hcicmd;
+
+
+ ASSERT(async_handle != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_list_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ do {
+ /*
+ * get the first node on the pending list. This routine also
+ * removes the node from the list.
+ */
+ hci1394_tlist_get(async_handle->as_pending_list, &node);
+ if (node != NULL) {
+ /* set the command state to completed */
+ hcicmd = (hci1394_async_cmd_t *)node->tln_addr;
+ hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED;
+
+ /*
+ * Send the command up to the Services Layer with
+ * completed due to the bus reset for status.
+ */
+ h1394_cmd_is_complete(
+ async_handle->as_drvinfo->di_sl_private,
+ hcicmd->ac_cmd, H1394_AT_REQ,
+ H1394_CMD_EBUSRESET);
+ }
+ } while (node != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_pending_list_flush_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atreq_start()
+ * Setup the command pointer for the first descriptor to be fetched and
+ * then set the run bit. This routine will be called the first time
+ * a descriptor is added to the Q.
+ */
+static void
+hci1394_async_atreq_start(void *async, uint32_t command_ptr)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_atreq_start(async_handle->as_ohci, command_ptr);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atreq_wake()
+ * Set the wake bit for the ATREQ DMA engine. This routine will be called
+ * from the Q logic after placing a descriptor on the Q.
+ */
+static void
+hci1394_async_atreq_wake(void *async)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_atreq_wake(async_handle->as_ohci);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atreq_reset()
+ * Reset the atreq Q. The AT DMA engines must be stopped every bus reset.
+ * They will restart when the next descriptor is added to the Q. We will stop
+ * the DMA engine and then notify the Q logic that it has been stopped so it
+ * knows to do a start next time it puts a descriptor on the Q.
+ */
+void
+hci1394_async_atreq_reset(hci1394_async_handle_t async_handle)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ hci1394_ohci_atreq_stop(async_handle->as_ohci);
+ hci1394_q_stop(async_handle->as_atreq_q);
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atreq_flush()
+ * Flush out the atreq Q. This routine is called during bus reset processing.
+ * it should be called before arresp_flush() and pending_list_flush().
+ */
+static void
+hci1394_async_atreq_flush(hci1394_async_handle_t async_handle)
+{
+ boolean_t request_available;
+ int status;
+
+ ASSERT(async_handle != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Clear reqTxComplete interrupt */
+ hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_REQ_TX_CMPLT);
+
+ /*
+ * Processes all Q'd AT requests. If the request is pended, it is
+ * considered complete relative the the atreq engine.
+ * flush_pending_list() will finish up the required processing for
+ * pended requests.
+ */
+ do {
+ /* Flush the atreq Q. Process all Q'd commands */
+ status = hci1394_async_atreq_process(async_handle,
+ B_TRUE, &request_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_atreq_process_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (request_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atreq_flush_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arresp_start()
+ * Setup the command pointer for the first descriptor to be fetched and
+ * then set the run bit. This routine will be called the first time
+ * a descriptor is added to the Q.
+ */
+static void
+hci1394_async_arresp_start(void *async, uint32_t command_ptr)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_arresp_start(async_handle->as_ohci, command_ptr);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arresp_wake()
+ * Set the wake bit for the ARRESP DMA engine. This routine will be called
+ * from the Q logic after placing a descriptor on the Q.
+ */
+static void
+hci1394_async_arresp_wake(void *async)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_arresp_wake(async_handle->as_ohci);
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arresp_flush()
+ * Flush out the arresp Q. This routine is called during bus reset
+ * processing. This should be called before pending_list_flush(). All
+ * receive responses will be processed normally. The tlabels should
+ * not be reset until after the ARRESP Q has been flushed. Otherwise
+ * we would reject valid responses.
+ */
+static void
+hci1394_async_arresp_flush(hci1394_async_handle_t async_handle)
+{
+ boolean_t response_available;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Clear reqTxComplete interrupt */
+ hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RSPKT);
+
+ do {
+ /* Flush the arresp Q. Process all received commands */
+ status = hci1394_async_arresp_process(async_handle,
+ &response_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_arresp_process_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (response_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arresp_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arreq_start()
+ * Setup the command pointer for the first descriptor to be fetched and
+ * then set the run bit. This routine will be called the first time
+ * a descriptor is added to the Q.
+ */
+static void
+hci1394_async_arreq_start(void *async, uint32_t command_ptr)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_arreq_start(async_handle->as_ohci, command_ptr);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arreq_wake()
+ * Set the wake bit for the ARREQ DMA engine. This routine will be called
+ * from the Q logic after placing a descriptor on the Q.
+ */
+static void
+hci1394_async_arreq_wake(void *async)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_arreq_wake(async_handle->as_ohci);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_arreq_flush()
+ * Flush the ARREQ Q. This will flush up to the bus reset token in the
+ * ARREQ. There is no order dependency for when routine should get called
+ * (relative to the other Q flushing routines)
+ */
+static void
+hci1394_async_arreq_flush(hci1394_async_handle_t async_handle)
+{
+ boolean_t request_available;
+ int status;
+
+
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * If the last bus reset token we have seen in
+ * hci1394_async_arreq_read_phy() matches the current generation, the
+ * ARREQ is already flushed. We have nothing further to do here so
+ * return. This can happen if we are processing ARREQ's and a bus reset
+ * occurs. Since we are already in the ISR, we will see the token before
+ * the bus reset handler gets to run.
+ */
+ if (async_handle->as_phy_reset == hci1394_ohci_current_busgen(
+ async_handle->as_ohci)) {
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * set flag to tell hci1394_async_arreq_process() that we should not
+ * pass ARREQ's up to the Services Layer. This will be set to B_FALSE
+ * in hci1394_async_arreq_read_phy() when a bus reset token matching
+ * the current generation is found.
+ */
+ async_handle->as_flushing_arreq = B_TRUE;
+
+ /*
+ * Process all requests that have been received or until we find the
+ * correct bus reset token.
+ */
+ do {
+ status = hci1394_async_arreq_process(async_handle,
+ &request_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_arreq_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while ((request_available == B_TRUE) &&
+ (async_handle->as_flushing_arreq == B_TRUE));
+
+ /*
+ * Clear the asserted interrupt if there are no more ARREQ's to process.
+ * We could have ARREQ's in the Q after the bus reset token since we
+ * will set as_flushing_arreq to FALSE when we see the correct bus reset
+ * token in hci1394_async_arreq_read_phy(). If there are more ARREQ's,
+ * we will process them later after finishing the reset of bus reset
+ * processing. That is why we will leave the interrupt asserted.
+ */
+ if (request_available == B_FALSE) {
+ hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RQPKT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atresp_start()
+ * Setup the command pointer for the first descriptor to be fetched and
+ * then set the run bit. This routine will be called the first time
+ * a descriptor is added to the Q.
+ */
+static void
+hci1394_async_atresp_start(void *async, uint32_t command_ptr)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_atresp_start(async_handle->as_ohci, command_ptr);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atresp_wake()
+ * Set the wake bit for the ATRESP DMA engine. This routine will be called
+ * from the Q logic after placing a descriptor on the Q.
+ */
+static void
+hci1394_async_atresp_wake(void *async)
+{
+ hci1394_async_handle_t async_handle;
+ ASSERT(async != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ async_handle = (hci1394_async_handle_t)async;
+ hci1394_ohci_atresp_wake(async_handle->as_ohci);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atresp_reset()
+ * Reset the atresp Q. The AT DMA engines must be stopped every bus reset.
+ * They will restart when the next descriptor is added to the Q. We will stop
+ * the DMA engine and then notify the Q logic that it has been stopped so it
+ * knows to do a start next time it puts a descriptor on the Q.
+ */
+void
+hci1394_async_atresp_reset(hci1394_async_handle_t async_handle)
+{
+ ASSERT(async_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ hci1394_ohci_atresp_stop(async_handle->as_ohci);
+ hci1394_q_stop(async_handle->as_atresp_q);
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_async_atresp_flush()
+ * Flush all commands out of the atresp Q. This routine will be called
+ * during bus reset processing. There is no order dependency for when
+ * routine should get called (relative to the other Q flushing routines)
+ */
+static void
+hci1394_async_atresp_flush(hci1394_async_handle_t async_handle)
+{
+ boolean_t response_available;
+ int status;
+
+ ASSERT(async_handle != NULL);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_flush_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Clear respTxComplete interrupt */
+ hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RESP_TX_CMPLT);
+
+ /* Processes all AT responses */
+ do {
+ /* Flush the atresp Q. Process all Q'd commands */
+ status = hci1394_async_atresp_process(async_handle,
+ B_TRUE, &response_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_atresp_process_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (response_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_async_atresp_flush_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+/*
+ * hci1394_async_hcicmd_init()
+ * Initialize the private HAL command structure. This should be called from
+ * ATREQ and ARREQ routines.
+ */
+static void
+hci1394_async_hcicmd_init(hci1394_async_handle_t async_handle,
+ cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv,
+ hci1394_async_cmd_t **hcicmd)
+{
+ *hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead;
+ (*hcicmd)->ac_cmd = cmd;
+ (*hcicmd)->ac_priv = cmd_priv;
+ (*hcicmd)->ac_async = async_handle;
+ (*hcicmd)->ac_state = HCI1394_CMD_STATE_IN_PROGRESS;
+ (*hcicmd)->ac_dest = 0;
+ (*hcicmd)->ac_tlabel_alloc = B_TRUE;
+ (*hcicmd)->ac_tlabel.tbi_tlabel = 0;
+ (*hcicmd)->ac_tlabel.tbi_destination = 0;
+ (*hcicmd)->ac_status = 0;
+ (*hcicmd)->ac_qcmd.qc_timestamp = 0;
+ (*hcicmd)->ac_qcmd.qc_arg = *hcicmd;
+ (*hcicmd)->ac_qcmd.qc_generation = cmd_priv->bus_generation;
+ (*hcicmd)->ac_mblk_alloc = B_FALSE;
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_attach.c b/usr/src/uts/common/io/1394/adapters/hci1394_attach.c
new file mode 100644
index 0000000000..f47c37e6b5
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_attach.c
@@ -0,0 +1,918 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_attach.c
+ * HBA attach() routine with associated funtions.
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/pci.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+
+
+/*
+ * Attach State Information. These states are used to track the status of the
+ * attach. They are bit offsets.
+ */
+#define STATE_ZALLOC 0
+#define STATE_ISR_INIT 1
+#define STATE_MINOR_NODE 2
+#define STATE_HW_INIT 3
+#define STATE_PHASE2 4
+#define STATE_POWER_INIT 5
+#define STATE_H1394_ATTACH 6
+#define STATE_ISR_HANDLER 7
+#define STATE_STARTUP 8
+
+static void hci1394_statebit_set(uint64_t *state, uint_t statebit);
+static boolean_t hci1394_statebit_tst(uint64_t state, uint_t statebit);
+
+static void hci1394_cleanup(hci1394_state_t *soft_state, uint64_t attach_state);
+
+static int hci1394_hardware_init(hci1394_state_t *soft_state);
+static int hci1394_hardware_resume(hci1394_state_t *soft_state);
+
+static int hci1394_pci_init(hci1394_state_t *soft_state);
+static void hci1394_pci_resume(hci1394_state_t *soft_state);
+
+static void hci1394_soft_state_phase1_init(hci1394_state_t *soft_state,
+ dev_info_t *dip, int instance);
+static void hci1394_soft_state_phase2_init(hci1394_state_t *soft_state);
+
+static int hci1394_resmap_get(hci1394_state_t *soft_state);
+static void hci1394_resmap_free(hci1394_state_t *soft_state);
+
+
+
+int
+hci1394_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ hci1394_state_t *soft_state;
+ uint64_t attach_state = 0;
+ int instance;
+ int status;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_attach_enter, HCI1394_TNF_HAL_STACK, "");
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ instance = ddi_get_instance(dip);
+ status = ddi_soft_state_zalloc(hci1394_statep, instance);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_attach_ssz_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "ddi_soft_state_zalloc() failed");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ soft_state = ddi_get_soft_state(hci1394_statep, instance);
+ if (soft_state == NULL) {
+ ddi_soft_state_free(hci1394_statep, instance);
+ TNF_PROBE_1(hci1394_attach_gss_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "ddi_get_soft_state() failed");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hci1394_statebit_set(&attach_state, STATE_ZALLOC);
+
+ hci1394_soft_state_phase1_init(soft_state, dip, instance);
+
+ /* get iblock cookie, other interrupt init stuff */
+ status = hci1394_isr_init(soft_state);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_isr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hci1394_statebit_set(&attach_state, STATE_ISR_INIT);
+
+ status = ddi_create_minor_node(dip, "devctl", S_IFCHR,
+ instance, DDI_NT_NEXUS, 0);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_cmn_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hci1394_statebit_set(&attach_state, STATE_MINOR_NODE);
+
+ status = hci1394_hardware_init(soft_state);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_hwi_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hci1394_statebit_set(&attach_state, STATE_HW_INIT);
+
+ hci1394_soft_state_phase2_init(soft_state);
+ hci1394_statebit_set(&attach_state, STATE_PHASE2);
+
+ /* build up the reserved addresses map */
+ status = hci1394_resmap_get(soft_state);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_rmg_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* "attach" to the Services Layer */
+ status = h1394_attach(&soft_state->halinfo, DDI_ATTACH,
+ &soft_state->drvinfo.di_sl_private);
+ if (status != DDI_SUCCESS) {
+ hci1394_resmap_free(soft_state);
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_ha_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ /* free the reserved addresses map */
+ hci1394_resmap_free(soft_state);
+ hci1394_statebit_set(&attach_state, STATE_H1394_ATTACH);
+
+ status = hci1394_isr_handler_init(soft_state);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_ih_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hci1394_statebit_set(&attach_state, STATE_ISR_HANDLER);
+
+ /* Report that driver was loaded */
+ ddi_report_dev(dip);
+
+ /*
+ * Turn on link, Reset Bus, enable interrupts. Should be the
+ * last routine called in attach. The statebit for starup must
+ * be set before startup is called since startup enables
+ * interrupts.
+ */
+ hci1394_statebit_set(&attach_state, STATE_STARTUP);
+ status = hci1394_ohci_startup(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ hci1394_cleanup(soft_state, attach_state);
+ TNF_PROBE_0(hci1394_attach_str_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+
+ case DDI_RESUME:
+ instance = ddi_get_instance(dip);
+ soft_state = ddi_get_soft_state(hci1394_statep, instance);
+ if (soft_state == NULL) {
+ TNF_PROBE_1(hci1394_attach_resgss_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "ddi_get_soft_state() failed");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_hardware_resume(soft_state);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_attach_res_hwr_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "hardware failed to resume");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* tell the Services Layer that we are resuming */
+ status = h1394_attach(&soft_state->halinfo, DDI_RESUME,
+ &soft_state->drvinfo.di_sl_private);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_attach_res_ha_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * set our state back to initial. The next bus reset were
+ * about to generate will set us in motion.
+ */
+ soft_state->drvinfo.di_drvstate.ds_state = HCI1394_INITIAL;
+
+ /* turn on the link, enable interrupts, reset the bus */
+ status = hci1394_ohci_startup(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_attach_res_str_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "hci1394_ohci_startup() failed");
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_SUCCESS);
+
+ default:
+ TNF_PROBE_0(h1394_attach_default_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_attach_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_FAILURE);
+}
+
+
+/*
+ * hci1394_soft_state_phase1_init()
+ * First part soft_state initialization. This should be called before any
+ * other initialization routines are called. Anything that requires cleanup
+ * on detach or after an attach failure should be setup in phase2 init (i.e.
+ * mutex's, cv's, etc.)
+ */
+static void
+hci1394_soft_state_phase1_init(hci1394_state_t *soft_state, dev_info_t *dip,
+ int instance)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_phase1_init_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state->drvinfo.di_dip = dip;
+ soft_state->drvinfo.di_instance = instance;
+
+ /* current bus generation */
+ soft_state->drvinfo.di_gencnt = 0;
+
+ soft_state->drvinfo.di_sl_private = NULL;
+
+ /* initialize statistics */
+ soft_state->drvinfo.di_stats.st_bus_reset_count = 0;
+ soft_state->drvinfo.di_stats.st_selfid_count = 0;
+ soft_state->drvinfo.di_stats.st_phy_isr = 0;
+ soft_state->drvinfo.di_stats.st_phy_loop_err = 0;
+ soft_state->drvinfo.di_stats.st_phy_pwrfail_err = 0;
+ soft_state->drvinfo.di_stats.st_phy_timeout_err = 0;
+ soft_state->drvinfo.di_stats.st_phy_portevt_err = 0;
+
+ soft_state->swap_data = B_FALSE;
+ soft_state->sl_selfid_buf = NULL;
+
+ /* halinfo is what is passed up to the Services Layer */
+ soft_state->halinfo.hal_private = soft_state;
+ soft_state->halinfo.dip = soft_state->drvinfo.di_dip;
+ soft_state->halinfo.hal_events = hci1394_evts;
+ soft_state->halinfo.max_generation = OHCI_BUSGEN_MAX;
+ soft_state->halinfo.addr_map_num_entries = HCI1394_ADDR_MAP_SIZE;
+ soft_state->halinfo.addr_map = hci1394_addr_map;
+ hci1394_buf_attr_get(&soft_state->halinfo.dma_attr);
+
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_phase1_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_soft_state_phase2_init()
+ * Second part of soft_state initialization. This should be called after a
+ * successful hardware_init() and before the call to h1394_attach().
+ */
+static void
+hci1394_soft_state_phase2_init(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_phase2_init_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Setup our initial driver state. This requires the HW iblock
+ * cookie so this must be setup in phase2_init()
+ */
+ soft_state->drvinfo.di_drvstate.ds_state = HCI1394_INITIAL;
+ mutex_init(&soft_state->drvinfo.di_drvstate.ds_mutex, NULL,
+ MUTEX_DRIVER, soft_state->drvinfo.di_iblock_cookie);
+
+ /*
+ * halinfo.acc_attr tells the services layer what our buffer access
+ * attributes are. drvinfo.di_buf_attr it initialized in pci_init so
+ * this must be setup in phase2_init()
+ */
+ soft_state->halinfo.acc_attr = soft_state->drvinfo.di_buf_attr;
+
+ /*
+ * halinfo.hw_interrupt tells the services layer what our
+ * iblock_cookie is. drvinfo.di_iblock_cookie is setup in isr_init so
+ * this must be setup in phase2_init()
+ */
+ soft_state->halinfo.hw_interrupt = soft_state->drvinfo.di_iblock_cookie;
+
+ /*
+ * Read in our node capabilities. Since we are calling into csr
+ * we must have first called hardware_init(). Therefore, this must
+ * be in phase2_init().
+ */
+ hci1394_csr_node_capabilities(soft_state->csr,
+ &soft_state->halinfo.node_capabilities);
+
+ /*
+ * Read in our bus capabilities. Since we are calling into ohci
+ * we must have first called hardware_init(). Therefore, this must
+ * be in phase2_init().
+ */
+ hci1394_ohci_bus_capabilities(soft_state->ohci,
+ &soft_state->halinfo.bus_capabilities);
+
+ /*
+ * Setup our async command overhead. When a target driver or the ARREQ
+ * engine allocates a command, the services layer will tack on space
+ * for itself and the HAL so we do not have to manage memory for every
+ * command. hal_overhead is how much memory the hal requires to track
+ * an async command. Since we are calling into async we must have first
+ * called hardware_init(). Therefore, this must be in phase2_init().
+ */
+ soft_state->halinfo.hal_overhead = hci1394_async_cmd_overhead();
+
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_phase2_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_hardware_init()
+ * Initialize the adapter hardware. This should be called during
+ * the initial attach().
+ */
+static int
+hci1394_hardware_init(hci1394_state_t *soft_state)
+{
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* Initialize PCI config registers */
+ status = hci1394_pci_init(soft_state);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_hardware_init_pci_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the OpenHCI Hardware */
+ status = hci1394_ohci_init(soft_state, &soft_state->drvinfo,
+ &soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ hci1394_pci_fini(soft_state);
+ TNF_PROBE_0(hci1394_hardware_init_ohci_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize SW based CSR registers */
+ hci1394_csr_init(&soft_state->drvinfo, soft_state->ohci,
+ &soft_state->csr);
+
+ /* Initialize the Asynchronous Q's */
+ status = hci1394_async_init(&soft_state->drvinfo, soft_state->ohci,
+ soft_state->csr, &soft_state->async);
+ if (status != DDI_SUCCESS) {
+ hci1394_csr_fini(&soft_state->csr);
+ hci1394_ohci_fini(&soft_state->ohci);
+ hci1394_pci_fini(soft_state);
+ TNF_PROBE_0(hci1394_hardware_init_asyn_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the Isochronous logic */
+ hci1394_isoch_init(&soft_state->drvinfo, soft_state->ohci,
+ &soft_state->isoch);
+
+ /* Initialize any Vendor Specific Registers */
+ status = hci1394_vendor_init(&soft_state->drvinfo, soft_state->ohci,
+ &soft_state->vendor_info, &soft_state->vendor);
+ if (status != DDI_SUCCESS) {
+ hci1394_isoch_fini(&soft_state->isoch);
+ hci1394_async_fini(&soft_state->async);
+ hci1394_csr_fini(&soft_state->csr);
+ hci1394_ohci_fini(&soft_state->ohci);
+ hci1394_pci_fini(soft_state);
+ TNF_PROBE_0(hci1394_hardware_init_vend_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_hardware_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_hardware_resume()
+ * Resume the adapter HW. This routine will be called during resume after
+ * a successful system suspend. All memory should be in the state it was
+ * before the suspend. All we have to do is re-setup the HW.
+ */
+static int
+hci1394_hardware_resume(hci1394_state_t *soft_state)
+{
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_hardware_resume_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* re-enable global byte swap (if we using it) */
+ hci1394_pci_resume(soft_state);
+
+ /* Re-init the OpenHCI HW */
+ status = hci1394_ohci_resume(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_hardware_resume_ohci_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* re-setup our SW based CSR registers */
+ hci1394_csr_resume(soft_state->csr);
+
+ /* Re-setup the Async Q's */
+ status = hci1394_async_resume(soft_state->async);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_hardware_resume_asyn_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Re-setup any Vendor Specific Registers */
+ status = hci1394_vendor_resume(soft_state->vendor);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_hardware_resume_vend_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_hardware_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_hardware_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_pci_init()
+ * Map in PCI config space and initialize PCI config space registers.
+ */
+static int
+hci1394_pci_init(hci1394_state_t *soft_state)
+{
+ int status;
+#ifndef _LITTLE_ENDIAN
+ uint32_t global_swap;
+#endif
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_pci_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup PCI configuration space */
+ status = pci_config_setup(soft_state->drvinfo.di_dip,
+ &soft_state->pci_config);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_pci_init_cfg_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_pci_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+
+#ifdef _LITTLE_ENDIAN
+ /* Start of little endian specific code */
+ soft_state->drvinfo.di_reg_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_LE_ACC;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_LE_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->swap_data = B_TRUE;
+ /* End of little endian specific code */
+#else
+ /* Start of big endian specific code */
+ /* If PCI_Global_Swap bit is not set, try to set it */
+ global_swap = pci_config_get32(soft_state->pci_config,
+ OHCI_PCI_HCI_CONTROL_REG);
+
+ /* Lets see if the global byte swap feature is supported */
+ if ((global_swap & OHCI_PCI_GLOBAL_SWAP) == 0) {
+ global_swap = global_swap | OHCI_PCI_GLOBAL_SWAP;
+ pci_config_put32(soft_state->pci_config,
+ OHCI_PCI_HCI_CONTROL_REG, global_swap);
+ }
+
+ global_swap = pci_config_get32(soft_state->pci_config,
+ OHCI_PCI_HCI_CONTROL_REG);
+
+ /* If PCI_Global_Swap bit is not set, it is unsupported */
+ if ((global_swap & OHCI_PCI_GLOBAL_SWAP) == 0) {
+ TNF_PROBE_0_DEBUG(hci1394_pci_gbs_npresent,
+ HCI1394_TNF_HAL_INFO, "global swap not present");
+ soft_state->drvinfo.di_reg_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_LE_ACC;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_LE_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->swap_data = B_TRUE;
+ /*
+ * global byte swap is supported. This should be the case
+ * for almost all of the adapters.
+ */
+ } else {
+ TNF_PROBE_0_DEBUG(hci1394_pci_gbs_present,
+ HCI1394_TNF_HAL_INFO, "global swap present");
+ soft_state->drvinfo.di_reg_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_BE_ACC;
+ soft_state->drvinfo.di_reg_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_version =
+ DDI_DEVICE_ATTR_V0;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_endian_flags =
+ DDI_STRUCTURE_BE_ACC;
+ soft_state->drvinfo.di_buf_attr.devacc_attr_dataorder =
+ DDI_STRICTORDER_ACC;
+ soft_state->swap_data = B_FALSE;
+ }
+ /* End of big endian specific code */
+#endif
+
+ /* read in vendor Information */
+ soft_state->vendor_info.vendor_id =
+ (uint_t)pci_config_get16(soft_state->pci_config, PCI_CONF_VENID);
+ soft_state->vendor_info.device_id =
+ (uint_t)pci_config_get16(soft_state->pci_config, PCI_CONF_DEVID);
+ soft_state->vendor_info.revision_id =
+ (uint_t)pci_config_get8(soft_state->pci_config, PCI_CONF_REVID);
+
+ TNF_PROBE_0_DEBUG(hci1394_pci_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_pci_resume()
+ * Re-Initialize PCI config space registers during a resume.
+ */
+/* ARGSUSED */
+static void
+hci1394_pci_resume(hci1394_state_t *soft_state)
+{
+#ifndef _LITTLE_ENDIAN
+ uint32_t global_swap;
+#endif
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_pci_resume_enter, HCI1394_TNF_HAL_STACK, "");
+
+#ifdef _LITTLE_ENDIAN
+ /* Start of little endian specific code */
+ /* nothing to do here yet. Maybe later?? */
+ /* End of little endian specific code */
+#else
+ /* Start of big endian specific code */
+ /* If PCI_Global_Swap bit is not set, try to set it */
+ global_swap = pci_config_get32(soft_state->pci_config,
+ OHCI_PCI_HCI_CONTROL_REG);
+ /* Try and set GlobalByteSwap */
+ if ((global_swap & OHCI_PCI_GLOBAL_SWAP) == 0) {
+ global_swap = global_swap | OHCI_PCI_GLOBAL_SWAP;
+ pci_config_put32(soft_state->pci_config,
+ OHCI_PCI_HCI_CONTROL_REG, global_swap);
+ }
+ /* End of big endian specific code */
+#endif
+ TNF_PROBE_0_DEBUG(hci1394_pci_resume_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_resmap_get()
+ * Look for adapter property "reserved-addresses". This property is used to
+ * reserve 1394 address space so that it will not randomly be given to a
+ * target driver during a 1394 address space alloc. Some protocols hard
+ * code addresses which make us do this. The target driver must specifically
+ * ask for these addresses. This routine should be called before the
+ * call to h1394_attach().
+ */
+static int
+hci1394_resmap_get(hci1394_state_t *soft_state)
+{
+ h1394_addr_map_t *resv_map;
+ int resv_num;
+ int status;
+ int reslen;
+ uint32_t *resptr;
+ int rescnt;
+ int mapcnt;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_resmap_get_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * See if the "reserved-addresses" property is defined. The format
+ * should be:
+ *
+ * reserved-addresses= 0x0000ffff,0xf0000B00,0x200,
+ * 0x0000ffff,0xf0000D00,0x200,
+ * 0x0000ffff,0xf0000234,0x4;
+ * You can have multiple reserved addresses. Each reserved address
+ * takes up 3 integers.
+ * MSWofAddr,LSWofAddr,ByteCount
+ */
+ status = ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
+ soft_state->drvinfo.di_dip, DDI_PROP_DONTPASS, "reserved-addresses",
+ (int **)&resptr, (uint_t *)&reslen);
+ if (status != DDI_PROP_SUCCESS) {
+ /* the property is not defined, 0 reserved addresses */
+ soft_state->halinfo.resv_map_num_entries = 0;
+ soft_state->halinfo.resv_map = NULL;
+ TNF_PROBE_0_DEBUG(hci1394_resmap_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ } else if ((reslen < 3) || ((reslen % 3) != 0)) {
+ /*
+ * the property is defined but the correct number of integers
+ * is not present.
+ */
+ resv_num = 0;
+ resv_map = NULL;
+ cmn_err(CE_NOTE, "!%s(%d): Invalid reserved-addresses property."
+ " Property ignored", ddi_node_name(
+ soft_state->drvinfo.di_dip), ddi_get_instance(
+ soft_state->drvinfo.di_dip));
+ } else {
+ /* the property is defined. Alloc space to copy data into */
+ resv_num = reslen / 3;
+ resv_map = kmem_alloc((sizeof (h1394_addr_map_t) * (resv_num)),
+ KM_SLEEP);
+
+ /* read in the address, length, and set the type to reserved */
+ rescnt = 0;
+ mapcnt = 0;
+ while (rescnt < reslen) {
+ resv_map[mapcnt].address =
+ (uint64_t)resptr[rescnt] << 32;
+ rescnt++;
+ resv_map[mapcnt].address |= (uint64_t)resptr[rescnt];
+ rescnt++;
+ resv_map[mapcnt].length = (uint64_t)resptr[rescnt];
+ rescnt++;
+ resv_map[mapcnt].addr_type = H1394_ADDR_RESERVED;
+ mapcnt++;
+ }
+ }
+
+ ddi_prop_free(resptr);
+
+ /*
+ * copy the number of reserved address ranges and a pointer to the map
+ * into halinfo so we can tell the services layer about them in
+ * h1394_attach()
+ */
+ soft_state->halinfo.resv_map_num_entries = resv_num;
+ soft_state->halinfo.resv_map = resv_map;
+
+ TNF_PROBE_0_DEBUG(hci1394_resmap_get_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_resmap_free()
+ * Free up the space alloced in hci1394_resmap_get(). This routine should
+ * be called after h1394_attach(). The HAL does not need this information
+ * and the services layer only uses it for a calculation during attach and
+ * should not refer to the pointer after it returns from h1394_attach().
+ */
+static void
+hci1394_resmap_free(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_resmap_free_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * if we have one or more reserved map entries, free up the space that
+ * was allocated to store them
+ */
+ if (soft_state->halinfo.resv_map_num_entries > 0) {
+ ASSERT(soft_state->halinfo.resv_map != NULL);
+ kmem_free(soft_state->halinfo.resv_map,
+ (sizeof (h1394_addr_map_t) *
+ soft_state->halinfo.resv_map_num_entries));
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_resmap_free_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_statebit_set()
+ * Set bit "statebit" in "state"
+ */
+static void
+hci1394_statebit_set(uint64_t *state, uint_t statebit)
+{
+ ASSERT(state != NULL);
+ ASSERT(statebit < 64);
+ *state |= (uint64_t)0x1 << statebit;
+}
+
+
+/*
+ * hci1394_statebit_tst()
+ * Return status of bit "statebit". Is it set or not?
+ */
+static boolean_t
+hci1394_statebit_tst(uint64_t state, uint_t statebit)
+{
+ uint64_t bitset;
+ int status;
+
+
+ ASSERT(statebit < 64);
+ bitset = state & ((uint64_t)0x1 << statebit);
+ if (bitset == 0) {
+ status = B_FALSE;
+ } else {
+ status = B_TRUE;
+ }
+ return (status);
+}
+
+
+/*
+ * hci1394_cleanup()
+ * Cleanup after a failed attach
+ */
+static void
+hci1394_cleanup(hci1394_state_t *soft_state, uint64_t attach_state)
+{
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_cleanup_enter, HCI1394_TNF_HAL_STACK, "");
+
+
+ status = hci1394_statebit_tst(attach_state, STATE_STARTUP);
+ if (status == B_TRUE) {
+ /* Don't allow the HW to generate any more interrupts */
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+
+ /* don't accept anymore commands from services layer */
+ (void) hci1394_state_set(&soft_state->drvinfo,
+ HCI1394_SHUTDOWN);
+
+ /* Reset the chip */
+ (void) hci1394_ohci_soft_reset(soft_state->ohci);
+
+ /* Flush out async DMA Q's (cancels pendingQ timeouts too) */
+ hci1394_async_flush(soft_state->async);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_ISR_HANDLER);
+ if (status == B_TRUE) {
+ hci1394_isr_handler_fini(soft_state);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_H1394_ATTACH);
+ if (status == B_TRUE) {
+ (void) h1394_detach(&soft_state->drvinfo.di_sl_private,
+ DDI_DETACH);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_HW_INIT);
+ if (status == B_TRUE) {
+ hci1394_detach_hardware(soft_state);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_MINOR_NODE);
+ if (status == B_TRUE) {
+ ddi_remove_minor_node(soft_state->drvinfo.di_dip, "devctl");
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_ISR_INIT);
+ if (status == B_TRUE) {
+ hci1394_isr_fini(soft_state);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_PHASE2);
+ if (status == B_TRUE) {
+ hci1394_soft_state_fini(soft_state);
+ }
+
+ status = hci1394_statebit_tst(attach_state, STATE_ZALLOC);
+ if (status == B_TRUE) {
+ ddi_soft_state_free(hci1394_statep,
+ soft_state->drvinfo.di_instance);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_cleanup_exit, HCI1394_TNF_HAL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_buf.c b/usr/src/uts/common/io/1394/adapters/hci1394_buf.c
new file mode 100644
index 0000000000..3b9e3465f7
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_buf.c
@@ -0,0 +1,190 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_buf.c
+ * These routines handle IO mapped memory. They include routines to alloc and
+ * free IO mapped memory and a routine to get the adapters default dma
+ * attributes. These routines are meant to be called from the base context.
+ * They should not be called from an interrupt handler.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/kmem.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+/*
+ * hci1394_buffer_attr_get()
+ * returns (in dma_attr) the default DMA attributes for this adapter.
+ */
+void
+hci1394_buf_attr_get(ddi_dma_attr_t *dma_attr)
+{
+ dma_attr->dma_attr_version = DMA_ATTR_V0;
+ dma_attr->dma_attr_addr_lo = (uint64_t)0x00000000;
+ dma_attr->dma_attr_addr_hi = (uint64_t)0xFFFFFFFF;
+ dma_attr->dma_attr_count_max = (uint64_t)0xFFFFFFFF;
+ dma_attr->dma_attr_align = 64;
+ dma_attr->dma_attr_burstsizes = 0x3FF;
+ dma_attr->dma_attr_minxfer = 1;
+ dma_attr->dma_attr_maxxfer = (uint64_t)0xFFFFFFFF;
+ dma_attr->dma_attr_seg = (uint64_t)0xFFFFFFFF;
+ dma_attr->dma_attr_sgllen = 0x7FFFFFFF;
+ dma_attr->dma_attr_granular = 4;
+ dma_attr->dma_attr_flags = 0;
+
+#if defined(__i386) || defined(__amd64)
+ /* XXX - Not sure why x86 wants the dma_attr_seg to be 0x7FFF?? */
+ dma_attr->dma_attr_seg = (uint64_t)0x7FFF;
+#endif
+}
+
+
+/*
+ * hci1394_buf_alloc()
+ * Allocate an IO mapped buffer. drvinfo is passed in and contains generic
+ * driver info, like dip, instance, buf_attr, etc. Parms is passed in and
+ * contains the input parameters for alloc, ow much memory to alloc, how many
+ * cookies can we handle, and alignment requirements. info is returned with
+ * all the info about the mapped buffer. handle is returned. It should be
+ * used when calling hci1394_buf_free().
+ */
+int
+hci1394_buf_alloc(hci1394_drvinfo_t *drvinfo, hci1394_buf_parms_t *parms,
+ hci1394_buf_info_t *info, hci1394_buf_handle_t *handle)
+{
+ ddi_dma_attr_t dma_attr;
+ hci1394_buf_t *buf;
+ int status;
+
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(parms != NULL);
+ ASSERT(info != NULL);
+ ASSERT(handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_buf_alloc_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* alloc the space to keep track of the buffer */
+ buf = kmem_alloc(sizeof (hci1394_buf_t), KM_SLEEP);
+
+ /* setup the return parameter */
+ *handle = buf;
+
+ /* save away pointer to general info */
+ buf->bu_drvinfo = drvinfo;
+
+ /* Get the default DMA attributes and override sgllen and alignment */
+
+ _NOTE(SCHEME_PROTECTS_DATA("unique (on stack)", ddi_dma_attr_t))
+ hci1394_buf_attr_get(&dma_attr);
+ dma_attr.dma_attr_sgllen = parms->bp_max_cookies;
+ dma_attr.dma_attr_align = parms->bp_alignment;
+
+ status = ddi_dma_alloc_handle(drvinfo->di_dip, &dma_attr,
+ DDI_DMA_SLEEP, NULL, &buf->bu_dma_handle);
+ if (status != DDI_SUCCESS) {
+ kmem_free(buf, sizeof (hci1394_buf_t));
+ TNF_PROBE_0(hci1394_buf_alloc_dah_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_buf_alloc_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ status = ddi_dma_mem_alloc(buf->bu_dma_handle, parms->bp_length,
+ &drvinfo->di_buf_attr, DDI_DMA_STREAMING, DDI_DMA_SLEEP,
+ NULL, &info->bi_kaddr, &info->bi_real_length, &buf->bu_handle);
+ if (status != DDI_SUCCESS) {
+ ddi_dma_free_handle(&buf->bu_dma_handle);
+ kmem_free(buf, sizeof (hci1394_buf_t));
+ TNF_PROBE_0(hci1394_buf_alloc_dam_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_buf_alloc_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ status = ddi_dma_addr_bind_handle(buf->bu_dma_handle, NULL,
+ info->bi_kaddr, info->bi_real_length, DDI_DMA_RDWR |
+ DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &info->bi_cookie,
+ &info->bi_cookie_count);
+ if (status != DDI_SUCCESS) {
+ ddi_dma_mem_free(&buf->bu_handle);
+ ddi_dma_free_handle(&buf->bu_dma_handle);
+ kmem_free(buf, sizeof (hci1394_buf_t));
+ TNF_PROBE_0(hci1394_buf_alloc_dbh_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_buf_alloc_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* setup rest of buffer info returned to caller */
+ info->bi_handle = buf->bu_handle;
+ info->bi_dma_handle = buf->bu_dma_handle;
+ info->bi_length = parms->bp_length;
+
+ TNF_PROBE_0_DEBUG(hci1394_buf_alloc_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_buf_free()
+ * Free IO mapped buffer. Notice that a pointer to the handle is used for
+ * the parameter. free() will set your handle to NULL before returning.
+ */
+void
+hci1394_buf_free(hci1394_buf_handle_t *handle)
+{
+ hci1394_buf_t *buf;
+
+ ASSERT(handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_buf_free_enter, HCI1394_TNF_HAL_STACK, "");
+
+ buf = *handle;
+ (void) ddi_dma_unbind_handle(buf->bu_dma_handle);
+ ddi_dma_mem_free(&buf->bu_handle);
+ ddi_dma_free_handle(&buf->bu_dma_handle);
+
+ /* free the space to keep track of the buffer */
+ kmem_free(buf, sizeof (hci1394_buf_t));
+
+ /* set the handle to NULL to help catch bugs */
+ *handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_buf_free_exit, HCI1394_TNF_HAL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_csr.c b/usr/src/uts/common/io/1394/adapters/hci1394_csr.c
new file mode 100644
index 0000000000..0bbc70f98a
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_csr.c
@@ -0,0 +1,576 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_csr.c
+ * This code contains the code for the CSR registers handled by the HAL in
+ * SW. The HW implemented CSR registers are in hci1394_ohci.c
+ *
+ * For more information on CSR registers, see
+ * IEEE 1212
+ * IEEE 1394-1995
+ * section 8.3.2
+ * IEEE P1394A Draft 3.0
+ * sections 10.32,10.33
+ *
+ * NOTE: A read/write to a CSR SW based register will first go to the Services
+ * Layer which will do some filtering and then come through the s1394if. Look
+ * in hci1394_s1394if.c to see which registers are implemented in HW and
+ * which are implemented in SW.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/types.h>
+
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+
+
+/*
+ * The split_timeout_lo register cannot be set below 800 and above 7999. The
+ * split_timeout_hi register cannot be set above 7.
+ */
+#define CSR_MIN_SPLIT_TIMEOUT_LO 800
+#define CSR_MAX_SPLIT_TIMEOUT_LO 7999
+#define CSR_MAX_SPLIT_TIMEOUT_HI 7
+
+/*
+ * We will convert the split_timeout_lo to return the data in most significant
+ * 13 bits on the fly.
+ */
+#define CSR_SPLIT_TIMEOUT_LO_SHIFT 19
+
+/*
+ * This is what we report to the services layer as our node capabilities.
+ * See IEEE 1212_1994, section 8.4.11
+ *
+ * Split Timeout Registers are implemented (bit 15)
+ * This node uses 64-bit addressing (bit 9)
+ * This node uses fixed addressing scheme (bit 8)
+ * STATE_BITS.lost is implemented
+ * STATE_BITS.dreq is implemented
+ */
+#define CSR_INITIAL_NODE_CAPABILITIES 0x000083C0
+
+/*
+ * macro to calculate split_timeout based on split_timeout_lo and
+ * split_timeout_hi
+ */
+#define CSR_SPLIT_TIMEOUT(split_hi, split_lo) \
+ ((split_hi * IEEE1394_BUS_CYCLES_PER_SEC) + split_lo)
+
+
+static void hci1394_csr_state_init(hci1394_csr_t *csr);
+
+
+/*
+ * hci1394_csr_init()
+ * Initialize CSR state and CSR SW based registers.
+ */
+void
+hci1394_csr_init(hci1394_drvinfo_t *drvinfo, hci1394_ohci_handle_t ohci,
+ hci1394_csr_handle_t *csr_handle)
+{
+ hci1394_csr_t *csr;
+
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(ohci != NULL);
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* alloc the space to keep track of the csr registers */
+ csr = kmem_alloc(sizeof (hci1394_csr_t), KM_SLEEP);
+
+ /* setup the return parameter */
+ *csr_handle = csr;
+
+ /* Initialize the csr structure */
+ csr->csr_drvinfo = drvinfo;
+ csr->csr_ohci = ohci;
+ mutex_init(&csr->csr_mutex, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+ hci1394_csr_state_init(csr);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_init_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_fini()
+ * Free up any space allocated and any mutexes used.
+ */
+void
+hci1394_csr_fini(hci1394_csr_handle_t *csr_handle)
+{
+ hci1394_csr_t *csr;
+
+
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ csr = (hci1394_csr_t *)*csr_handle;
+ mutex_destroy(&csr->csr_mutex);
+ kmem_free(csr, sizeof (hci1394_csr_t));
+ *csr_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_resume()
+ * When resuming power on a workstation, re-setup our CSR registers.
+ */
+void
+hci1394_csr_resume(hci1394_csr_handle_t csr_handle)
+{
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_resume_enter, HCI1394_TNF_HAL_STACK, "");
+ hci1394_csr_state_init(csr_handle);
+ TNF_PROBE_0_DEBUG(hci1394_csr_resume_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_node_capabilities()
+ * Return the CSR node capabilities.
+ */
+void
+hci1394_csr_node_capabilities(hci1394_csr_handle_t csr_handle,
+ uint32_t *capabilities)
+{
+ ASSERT(csr_handle != NULL);
+ ASSERT(capabilities != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_node_capabilities_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+ *capabilities = csr_handle->csr_capabilities;
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_node_capabilities_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_state_get()
+ * Read the CSR state register. Currently we only support the dreq, cmstr,
+ * and abdicate bits in the CSR state register. See the specs mentioned
+ * above for the behavior of these bits.
+ */
+void
+hci1394_csr_state_get(hci1394_csr_handle_t csr_handle, uint32_t *state)
+{
+ ASSERT(csr_handle != NULL);
+ ASSERT(state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_get_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+ *state = csr_handle->csr_state;
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_get_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_csr_state_bset()
+ * Perform a bit set on the CSR state register. The value of state will be
+ * or'd with the CSR state register. Currently we only support the dreq,
+ * cmstr, and abdicate bits in the CSR state register. See the specs
+ * mentioned above for the behavior of these bits.
+ */
+void
+hci1394_csr_state_bset(hci1394_csr_handle_t csr_handle, uint32_t state)
+{
+ uint32_t supported_state;
+
+
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_bset_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /* only support dreq, cmstr, and abdicate bits */
+ supported_state = state & (IEEE1394_CSR_STATE_ABDICATE |
+ IEEE1394_CSR_STATE_CMSTR | IEEE1394_CSR_STATE_DREQ);
+
+ /*
+ * If we are setting the Cycle Master bit and we are the root node,
+ * enable Cycle Start Packets.
+ */
+ if ((supported_state & IEEE1394_CSR_STATE_CMSTR) &&
+ (hci1394_ohci_root_check(csr_handle->csr_ohci))) {
+ hci1394_ohci_cycle_master_enable(csr_handle->csr_ohci);
+ }
+
+ /* set the supported bits in csr_state */
+ csr_handle->csr_state |= supported_state;
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_bset_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_csr_state_bclr()
+ * Perform a bit clear on the CSR state register. The inverted value of
+ * state will be and'd with CSR state register. Currently we only support
+ * the dreq, cmstr, and abdicate bits in the CSR state register. See the
+ * specs mentioned above for the behavior of these bits.
+ */
+void
+hci1394_csr_state_bclr(hci1394_csr_handle_t csr_handle, uint32_t state)
+{
+ uint32_t supported_state;
+
+
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_bclr_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /* only support dreq, cmstr, and abdicate bits */
+ supported_state = state & (IEEE1394_CSR_STATE_ABDICATE |
+ IEEE1394_CSR_STATE_CMSTR | IEEE1394_CSR_STATE_DREQ);
+
+ /*
+ * If we are clearing the Cycle Master bit and we are the root node,
+ * disable Cycle Start Packets.
+ */
+ if ((supported_state & IEEE1394_CSR_STATE_CMSTR) &&
+ (hci1394_ohci_root_check(csr_handle->csr_ohci))) {
+ hci1394_ohci_cycle_master_disable(csr_handle->csr_ohci);
+ }
+
+ /* Clear the supported bits in csr_state */
+ csr_handle->csr_state &= ~state;
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_bclr_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_csr_split_timeout_hi_get()
+ * Read the CSR split_timeout_hi register.
+ */
+void
+hci1394_csr_split_timeout_hi_get(hci1394_csr_handle_t csr_handle,
+ uint32_t *split_timeout_hi)
+{
+ ASSERT(csr_handle != NULL);
+ ASSERT(split_timeout_hi != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_hi_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+ *split_timeout_hi = csr_handle->csr_split_timeout_hi;
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_hi_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+}
+
+
+/*
+ * hci1394_csr_split_timeout_lo_get()
+ * Read the CSR split_timeout_lo register.
+ */
+void
+hci1394_csr_split_timeout_lo_get(hci1394_csr_handle_t csr_handle,
+ uint32_t *split_timeout_lo)
+{
+ ASSERT(csr_handle != NULL);
+ ASSERT(split_timeout_lo != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_lo_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /*
+ * Read the split_timeout_lo CSR register. Convert split_timeout_lo to
+ * use the data in most significant 13 bits on the fly.
+ */
+ *split_timeout_lo = csr_handle->csr_split_timeout_lo <<
+ CSR_SPLIT_TIMEOUT_LO_SHIFT;
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_lo_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+}
+
+
+/*
+ * hci1394_csr_split_timeout_hi_set()
+ * Write the CSR split_timeout_hi register. This routine will also
+ * re-calculate the "split_timeout" which is used internally in the HAL
+ * driver. The only accesses to split_timeout_hi and split_timeout_lo
+ * should be over the 1394 bus. Only the least significant 3 bits are
+ * relevant in the split_timeout_hi register.
+ */
+void
+hci1394_csr_split_timeout_hi_set(hci1394_csr_handle_t csr_handle,
+ uint32_t split_timeout_hi)
+{
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_hi_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /*
+ * update the split_timeout_hi CSR register. Only look at the 3 LSBits.
+ * Update our internal split_timeout value.
+ */
+ csr_handle->csr_split_timeout_hi = split_timeout_hi &
+ CSR_MAX_SPLIT_TIMEOUT_HI;
+ csr_handle->csr_split_timeout = CSR_SPLIT_TIMEOUT(
+ csr_handle->csr_split_timeout_hi, csr_handle->csr_split_timeout_lo);
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_hi_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_split_timeout_lo_set()
+ * Write the CSR split_timeout_lo register. This routine will also
+ * re-calculate the "split_timeout" which is used internally in the HAL
+ * driver. The only accesses to split_timeout_hi and split_timeout_lo
+ * should be over the 1394 bus. Only the most significant 13 bits are
+ * relevant in the split_timeout_lo register.
+ */
+void
+hci1394_csr_split_timeout_lo_set(hci1394_csr_handle_t csr_handle,
+ uint32_t split_timeout_lo)
+{
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_lo_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /*
+ * Update the split_timeout_lo CSR register. Only look at the 3 LSBits.
+ * Convert the split_timeout_lo to use the data in most significant 13
+ * bits on the fly.
+ */
+ csr_handle->csr_split_timeout_lo = split_timeout_lo >>
+ CSR_SPLIT_TIMEOUT_LO_SHIFT;
+
+ /* threshold the split_timeout_lo value */
+ if (csr_handle->csr_split_timeout_lo < CSR_MIN_SPLIT_TIMEOUT_LO) {
+ csr_handle->csr_split_timeout_lo = CSR_MIN_SPLIT_TIMEOUT_LO;
+ } else if (csr_handle->csr_split_timeout_lo >
+ CSR_MAX_SPLIT_TIMEOUT_LO) {
+ csr_handle->csr_split_timeout_lo = CSR_MAX_SPLIT_TIMEOUT_LO;
+ }
+
+ /* Update our internal split_timeout value */
+ csr_handle->csr_split_timeout = CSR_SPLIT_TIMEOUT(
+ csr_handle->csr_split_timeout_hi, csr_handle->csr_split_timeout_lo);
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_lo_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_csr_split_timeout_get()
+ * Return the current value of split_timeout. This is the only routine
+ * which should be used to get the split timeout for use in a calculation
+ * (e.g. for calculating ACK pending timeout).
+ */
+uint_t
+hci1394_csr_split_timeout_get(hci1394_csr_handle_t csr_handle)
+{
+ uint_t split_timeout;
+
+
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /* read our internal split_timeout value */
+ split_timeout = csr_handle->csr_split_timeout;
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_split_timeout_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (split_timeout);
+}
+
+
+/*
+ * hci1394_csr_bus_reset()
+ * Perform required bus reset processing on CSR registers. This includes
+ * clearing the abdicate bit, and setting/clearing the Cycle Master bit.
+ * See sections 10.32 and 10.33 in the IEEE P1394A Draft 3.0 spec. See
+ * section 8.3.2.2.1 in the IEEE 1394-1995 spec. This routine should be
+ * called every bus reset.
+ */
+void
+hci1394_csr_bus_reset(hci1394_csr_handle_t csr_handle)
+{
+ ASSERT(csr_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_bus_reset_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&csr_handle->csr_mutex);
+
+ /* Clear the abdicate bit. Always do this. */
+ csr_handle->csr_state &= ~IEEE1394_CSR_STATE_ABDICATE;
+
+ /* if we are NOT currently the root node on the bus */
+ if (hci1394_ohci_root_check(csr_handle->csr_ohci) == B_FALSE) {
+ /*
+ * Set the was_root state. This is needed for the Cycle Master
+ * state machine below.
+ */
+ csr_handle->csr_was_root = B_FALSE;
+
+ /*
+ * Clear the Cycle Master bit. We do not have to shut off cycle
+ * master in OpenHCI. The HW will automatically stop generating
+ * Cycle Start packets when it is not the root node.
+ */
+ csr_handle->csr_state &= ~IEEE1394_CSR_STATE_CMSTR;
+
+ /*
+ * if we are currently the root node on the bus and we were NOT
+ * the root before the reset.
+ */
+ } else if (csr_handle->csr_was_root == B_FALSE) {
+
+ /* set the was_root state to TRUE */
+ csr_handle->csr_was_root = B_TRUE;
+
+ /*
+ * if we are cycle master capable, set the Cycle Master bit and
+ * start Cycle Start packets. We should always be Cycle Master
+ * capable.
+ */
+ if (hci1394_ohci_cmc_check(csr_handle->csr_ohci)) {
+ csr_handle->csr_state |= IEEE1394_CSR_STATE_CMSTR;
+ hci1394_ohci_cycle_master_enable(csr_handle->csr_ohci);
+
+ /*
+ * if we are NOT cycle master capable, clear the Cycle Master
+ * bit and stop Cycle Start packets. We should never see this
+ * in OpenHCI. I think? :-)
+ */
+ } else {
+ csr_handle->csr_state &= ~IEEE1394_CSR_STATE_CMSTR;
+ hci1394_ohci_cycle_master_disable(csr_handle->csr_ohci);
+ }
+ }
+ /*
+ * else {}
+ * else we are root now. We were root before, keep cmstr the same.
+ * Nothing to do.
+ */
+
+ mutex_exit(&csr_handle->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_bus_reset_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_csr_state_init()
+ * set the CSR SW registers and state variables to their initial settings.
+ */
+static void hci1394_csr_state_init(hci1394_csr_t *csr)
+{
+ ASSERT(csr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_init_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&csr->csr_mutex);
+
+ /*
+ * Initialize the split timeout to be 0 seconds (split_timeout_hi) and
+ * use a patchable variable for the initial split_timeout_lo. This
+ * variable must be patched before the driver attaches. It is never
+ * looked at again after this code is run.
+ *
+ * Calculate the split_timeout which we will use in the driver based on
+ * split_timeout_lo and split_timeout_hi.
+ */
+ csr->csr_split_timeout_hi = 0;
+ csr->csr_split_timeout_lo = hci1394_split_timeout;
+ csr->csr_split_timeout = CSR_SPLIT_TIMEOUT(
+ csr->csr_split_timeout_hi, csr->csr_split_timeout_lo);
+
+ /* Set the initial CSR State register to 0 */
+ csr->csr_state = 0;
+
+ /*
+ * was_root is an internal state variable which tracks if we were root
+ * last bus reset. This is needed for the required state register bus
+ * reset processing.
+ */
+ csr->csr_was_root = B_FALSE;
+
+ /* setup our initial capabilities setting */
+ csr->csr_capabilities = CSR_INITIAL_NODE_CAPABILITIES;
+
+ mutex_exit(&csr->csr_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_csr_state_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_detach.c b/usr/src/uts/common/io/1394/adapters/hci1394_detach.c
new file mode 100644
index 0000000000..cddc6f610e
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_detach.c
@@ -0,0 +1,193 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_detach.c
+ * HBA detach() routine with associated funtions.
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+
+
+
+int
+hci1394_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ hci1394_state_t *soft_state;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_detach_enter, HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = ddi_get_soft_state(hci1394_statep, ddi_get_instance(dip));
+ if (soft_state == NULL) {
+ TNF_PROBE_1(hci1394_detach_ssn_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "soft_state = NULL");
+ TNF_PROBE_0_DEBUG(hci1394_detach_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ /* Don't allow the HW to generate any more interrupts */
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+
+ /* don't accept anymore commands from services layer */
+ (void) hci1394_state_set(&soft_state->drvinfo,
+ HCI1394_SHUTDOWN);
+
+ /* Reset the OHCI HW */
+ (void) hci1394_ohci_soft_reset(soft_state->ohci);
+
+ /* Flush out async DMA Q's (cancels pendingQ timeouts too) */
+ hci1394_async_flush(soft_state->async);
+
+ (void) h1394_detach(&soft_state->drvinfo.di_sl_private,
+ DDI_DETACH);
+
+ /* remove the minor node */
+ ddi_remove_minor_node(dip, "devctl");
+
+ /* cleanup */
+ hci1394_detach_hardware(soft_state);
+
+ /* cleanup Solaris interrupt stuff */
+ hci1394_isr_fini(soft_state);
+
+ /* cleanup soft state stuff */
+ hci1394_soft_state_fini(soft_state);
+
+ /* free soft state */
+ ddi_soft_state_free(hci1394_statep,
+ soft_state->drvinfo.di_instance);
+
+ TNF_PROBE_0_DEBUG(hci1394_detach_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_SUCCESS);
+
+ case DDI_SUSPEND:
+ /* Don't allow the HW to generate any more interrupts */
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+
+ /* don't accept anymore commands from services layer */
+ (void) hci1394_state_set(&soft_state->drvinfo,
+ HCI1394_SHUTDOWN);
+
+ /* Reset the OHCI HW */
+ (void) hci1394_ohci_soft_reset(soft_state->ohci);
+
+ /* Make sure async engine is ready to suspend */
+ hci1394_async_suspend(soft_state->async);
+
+ (void) h1394_detach(&soft_state->drvinfo.di_sl_private,
+ DDI_SUSPEND);
+
+ TNF_PROBE_0_DEBUG(hci1394_detach_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_SUCCESS);
+
+ default:
+ TNF_PROBE_1(hci1394_detach_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "in detach default");
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_detach_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_FAILURE);
+}
+
+
+void
+hci1394_detach_hardware(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_detach_hardware_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* free up vendor specific registers */
+ hci1394_vendor_fini(&soft_state->vendor);
+
+ /* cleanup isoch layer */
+ hci1394_isoch_fini(&soft_state->isoch);
+
+ /* cleanup async layer */
+ hci1394_async_fini(&soft_state->async);
+
+ /* Free up csr register space */
+ hci1394_csr_fini(&soft_state->csr);
+
+ /* free up OpenHCI registers */
+ hci1394_ohci_fini(&soft_state->ohci);
+
+ /* free up PCI config space */
+ hci1394_pci_fini(soft_state);
+
+ TNF_PROBE_0_DEBUG(hci1394_detach_hardware_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_pci_fini()
+ * Cleanup after a PCI init.
+ */
+void
+hci1394_pci_fini(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_pci_fini_enter, HCI1394_TNF_HAL_STACK, "");
+ pci_config_teardown(&soft_state->pci_config);
+ TNF_PROBE_0_DEBUG(hci1394_pci_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_soft_state_fini()
+ * Cleanup any mutex's, etc. in soft_state.
+ */
+void
+hci1394_soft_state_fini(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_fini_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ mutex_destroy(&soft_state->drvinfo.di_drvstate.ds_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_soft_state_fini_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_extern.c b/usr/src/uts/common/io/1394/adapters/hci1394_extern.c
new file mode 100644
index 0000000000..1da7580b6b
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_extern.c
@@ -0,0 +1,90 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_extern.c
+ * Central location for externs. There are two exceptions to this,
+ * hci1394_statep (located in hci1394.c) and hci1394_evts (located in
+ * hci1394_s1394if.c).
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+
+#include <sys/1394/h1394.h>
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+
+/*
+ * The 1394 bus ticks are in 125uS increments. split_timeout is represented in
+ * 1394 bus ticks. 800 bus ticks is 100mS.
+ */
+uint32_t hci1394_split_timeout = 800;
+
+
+/*
+ * 1394 address map for OpenHCI adpaters.
+ *
+ * This is what is reported to the services layer. The hci1394 driver does not
+ * modify the HW to reflect this. This should reflect what the OpenHCI 1.0 HW
+ * is set to. The comments below give the actual address ranges where the
+ * actual structure has the format of - start address, size, type.
+ *
+ * physical => 0x0000000000000000 - 0x00000000FFFFFFFF
+ * posted write => 0x0000000100000000 - 0x0000FFFEFFFFFFFF
+ * normal => 0x0000FFFF00000000 - 0x0000FFFFEFFFFFFF
+ * csr => 0x0000FFFFF0000000 - 0x0000FFFFFFFFFFFF
+ */
+h1394_addr_map_t hci1394_addr_map[HCI1394_ADDR_MAP_SIZE] = {
+ {0x0000000000000000, 0x0000000100000000, H1394_ADDR_PHYSICAL},
+ {0x0000000100000000, 0x0000FFFE00000000, H1394_ADDR_POSTED_WRITE},
+ {0x0000FFFF00000000, 0x00000000F0000000, H1394_ADDR_NORMAL},
+ {0x0000FFFFF0000000, 0x0000000010000000, H1394_ADDR_CSR}
+};
+
+
+/* Max number of uS to wait for phy reads & writes to finish */
+uint_t hci1394_phy_delay_uS = 10;
+
+/*
+ * Time to wait for PHY to SCLK to be stable. There does not seem to be standard
+ * time for how long wait for the PHY to come up. The problem is that the PHY
+ * provides a clock to the link layer and if that is not stable, we could get a
+ * PCI timeout error when reading/writing a phy register (and maybe an OpenHCI
+ * register?) This used to be set to 10mS which works for just about every
+ * adapter we tested on. We got a new TI adapter which would crash the system
+ * once in a while if nothing (1394 device) was plugged into the adapter?
+ * Changing this delay to 50mS made that problem go away.
+ *
+ * NOTE: Do not this delay unless you know what your doing!!!!
+ */
+uint_t hci1394_phy_stabilization_delay_uS = 50000;
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ioctl.c b/usr/src/uts/common/io/1394/adapters/hci1394_ioctl.c
new file mode 100644
index 0000000000..162a23a8e5
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ioctl.c
@@ -0,0 +1,653 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ioctl.c
+ * Test ioctl's to support test/debug of the 1394 HW. hci1394_ioctl_enum_t is
+ * passed in cmd and a pointer to the appropriate structure (i.e.
+ * hci1394_ioctl_wrreg_t) is passed in arg.
+ */
+
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/mkdev.h>
+#include <sys/cred.h>
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+#include <sys/1394/adapters/hci1394_ioctl.h>
+
+
+/* HCI1394_IOCTL_READ_SELFID for 32-bit apps in 64-bit kernel */
+typedef struct hci1394_ioctl_readselfid32_s {
+ uint32_t buf;
+ uint_t count;
+} hci1394_ioctl_readselfid32_t;
+
+
+static int hci1394_ioctl_wrreg(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_rdreg(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_wrvreg(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_rdvreg(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_selfid_cnt(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_busgen_cnt(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_wrphy(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_rdphy(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_hbainfo(hci1394_state_t *soft_state, void *arg,
+ int mode);
+static int hci1394_ioctl_read_selfid(hci1394_state_t *soft_state, void *arg,
+ int mode);
+#ifdef _MULTI_DATAMODEL
+static int hci1394_ioctl_read_selfid32(hci1394_state_t *soft_state,
+ hci1394_ioctl_readselfid32_t *read_selfid, int mode);
+#endif
+
+
+/* ARGSUSED */
+int
+hci1394_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ hci1394_state_t *soft_state;
+ int instance;
+ int status;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_enter, HCI1394_TNF_HAL_STACK, "");
+
+ instance = getminor(dev);
+ if (instance == -1) {
+ TNF_PROBE_0(hci1394_ioctl_gm_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (EBADF);
+ }
+
+ soft_state = ddi_get_soft_state(hci1394_statep, instance);
+ if (soft_state == NULL) {
+ TNF_PROBE_0(hci1394_ioctl_gss_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (EBADF);
+ }
+
+ status = 0;
+
+ switch (cmd) {
+ case HCI1394_IOCTL_WRITE_REG:
+ status = hci1394_ioctl_wrreg(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_READ_REG:
+ status = hci1394_ioctl_rdreg(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_READ_VREG:
+ status = hci1394_ioctl_rdvreg(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_WRITE_VREG:
+ status = hci1394_ioctl_wrvreg(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_RESET_BUS:
+ status = hci1394_ohci_bus_reset(soft_state->ohci);
+ break;
+ case HCI1394_IOCTL_SELFID_CNT:
+ status = hci1394_ioctl_selfid_cnt(soft_state, (void *)arg,
+ mode);
+ break;
+ case HCI1394_IOCTL_BUSGEN_CNT:
+ status = hci1394_ioctl_busgen_cnt(soft_state, (void *)arg,
+ mode);
+ break;
+ case HCI1394_IOCTL_READ_SELFID:
+ status = hci1394_ioctl_read_selfid(soft_state, (void *)arg,
+ mode);
+ break;
+ case HCI1394_IOCTL_READ_PHY:
+ status = hci1394_ioctl_rdphy(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_WRITE_PHY:
+ status = hci1394_ioctl_wrphy(soft_state, (void *)arg, mode);
+ break;
+ case HCI1394_IOCTL_HBA_INFO:
+ status = hci1394_ioctl_hbainfo(soft_state, (void *)arg, mode);
+ break;
+ default:
+ /*
+ * if we don't know what the ioctl is, forward it on to the
+ * services layer. The services layer will handle the devctl
+ * ioctl's along with any services layer private ioctls that
+ * it has defined.
+ */
+ status = h1394_ioctl(soft_state->drvinfo.di_sl_private, cmd,
+ arg, mode, credp, rvalp);
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+static int
+hci1394_ioctl_wrreg(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_wrreg_t wrreg;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrreg_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = ddi_copyin(arg, &wrreg, sizeof (hci1394_ioctl_wrreg_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_wrreg_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ hci1394_ohci_reg_write(soft_state->ohci, wrreg.addr, wrreg.data);
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrreg_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_rdreg(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_rdreg_t rdreg;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = ddi_copyin(arg, &rdreg, sizeof (hci1394_ioctl_rdreg_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdreg_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ hci1394_ohci_reg_read(soft_state->ohci, rdreg.addr, &rdreg.data);
+
+ status = ddi_copyout(&rdreg, arg, sizeof (hci1394_ioctl_rdreg_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdreg_c0_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdreg_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_wrvreg(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_wrvreg_t wrvreg;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ status = ddi_copyin(arg, &wrvreg, sizeof (hci1394_ioctl_wrvreg_t),
+ mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_wrvreg_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ status = hci1394_vendor_reg_write(soft_state->vendor,
+ wrvreg.regset, wrvreg.addr, wrvreg.data);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ioctl_wrvreg_vrw_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_rdvreg(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_rdvreg_t rdvreg;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ status = ddi_copyin(arg, &rdvreg, sizeof (hci1394_ioctl_rdvreg_t),
+ mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdvreg_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ status = hci1394_vendor_reg_read(soft_state->vendor,
+ rdvreg.regset, rdvreg.addr, &rdvreg.data);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ioctl_rdvreg_vrr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ status = ddi_copyout(&rdvreg, arg, sizeof (hci1394_ioctl_rdvreg_t),
+ mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdvreg_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdvreg_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_selfid_cnt(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_selfid_cnt_t selfid_cnt;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_selfid_cnt_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ selfid_cnt.count = soft_state->drvinfo.di_stats.st_selfid_count;
+
+ status = ddi_copyout(&selfid_cnt, arg,
+ sizeof (hci1394_ioctl_selfid_cnt_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_selfid_cnt_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_selfid_cnt_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_selfid_cnt_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_busgen_cnt(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_busgen_cnt_t busgen_cnt;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_busgen_cnt_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ busgen_cnt.count = hci1394_ohci_current_busgen(soft_state->ohci);
+
+ status = ddi_copyout(&busgen_cnt, arg,
+ sizeof (hci1394_ioctl_busgen_cnt_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_busgen_cnt_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_busgen_cnt_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_busgen_cnt_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_wrphy(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_wrphy_t wrphy;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = ddi_copyin(arg, &wrphy, sizeof (hci1394_ioctl_wrphy_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_wrphy_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ status = hci1394_ohci_phy_write(soft_state->ohci, wrphy.addr,
+ wrphy.data);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ioctl_wrphy_pw_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_wrphy_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_rdphy(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_rdphy_t rdphy;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_enter, HCI1394_TNF_HAL_STACK, "");
+
+ status = ddi_copyin(arg, &rdphy, sizeof (hci1394_ioctl_rdphy_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdphy_ci_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ status = hci1394_ohci_phy_read(soft_state->ohci, rdphy.addr,
+ &rdphy.data);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ioctl_rdphy_pr_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ status = ddi_copyout(&rdphy, arg, sizeof (hci1394_ioctl_rdphy_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_rdphy_co_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_rdphy_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_hbainfo(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_hbainfo_t hbainfo;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_hbainfo_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hbainfo.pci_vendor_id = soft_state->vendor_info.vendor_id;
+ hbainfo.pci_device_id = soft_state->vendor_info.device_id;
+ hbainfo.pci_revision_id = soft_state->vendor_info.revision_id;
+ hbainfo.ohci_version = soft_state->vendor_info.ohci_version;
+ hbainfo.ohci_vendor_id = soft_state->vendor_info.ohci_vendor_id;
+ hbainfo.ohci_vregset_cnt = soft_state->vendor_info.vendor_reg_count;
+
+ status = ddi_copyout(&hbainfo, arg, sizeof (hci1394_ioctl_hbainfo_t),
+ mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_hbainfo_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_hbainfo_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_hbainfo_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+static int
+hci1394_ioctl_read_selfid(hci1394_state_t *soft_state, void *arg, int mode)
+{
+ hci1394_ioctl_read_selfid_t read_selfid;
+ int status;
+ uint_t offset;
+ uint32_t data;
+#ifdef _MULTI_DATAMODEL
+ hci1394_ioctl_readselfid32_t read_selfid32;
+#endif
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(arg != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+#ifdef _MULTI_DATAMODEL
+ switch (ddi_model_convert_from(mode & FMODELS)) {
+
+ /* 32-bit app in 64-bit kernel */
+ case DDI_MODEL_ILP32:
+ /* copy in the 32-bit version of the args */
+ status = ddi_copyin(arg, &read_selfid32,
+ sizeof (hci1394_ioctl_readselfid32_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid_ci_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ /*
+ * Use a special function to process the 32-bit user address
+ * pointer embedded in the structure we pass in arg.
+ */
+ status = hci1394_ioctl_read_selfid32(soft_state,
+ &read_selfid32, mode);
+ return (status);
+ default:
+ break;
+ }
+#endif
+
+ /*
+ * if we got here, we either are a 64-bit app in a 64-bit kernel or a
+ * 32-bit app in a 32-bit kernel
+ */
+
+ /* copy in the args. We don't need to do any special conversions */
+ status = ddi_copyin(arg, &read_selfid,
+ sizeof (hci1394_ioctl_read_selfid_t), mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid_ci_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+
+ /*
+ * make sure we are not trying to copy more data than the selfid buffer
+ * can hold. count is in quadlets and max_selfid_size is in bytes.
+ */
+ if ((read_selfid.count * 4) > OHCI_MAX_SELFID_SIZE) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid_cnt_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ /*
+ * copy the selfid buffer one word at a time into the user buffer. The
+ * combination between having to do ddi_get32's (for endian reasons)
+ * and a ddi_copyout() make it easier to do it one word at a time.
+ */
+ for (offset = 0; offset < read_selfid.count; offset++) {
+ /* read word from selfid buffer */
+ hci1394_ohci_selfid_read(soft_state->ohci, offset, &data);
+
+ /* copy the selfid word into the user buffer */
+ status = ddi_copyout(&data, &read_selfid.buf[offset], 4, mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+#ifdef _MULTI_DATAMODEL
+static int
+hci1394_ioctl_read_selfid32(hci1394_state_t *soft_state,
+ hci1394_ioctl_readselfid32_t *read_selfid, int mode)
+{
+ int status;
+ uint_t offset;
+ uint32_t data;
+
+
+ ASSERT(soft_state != NULL);
+ ASSERT(read_selfid != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * make sure we are not trying to copy more data than the selfid buffer
+ * can hold. count is in quadlets and max_selfid_size is in bytes.
+ */
+ if ((read_selfid->count * 4) > OHCI_MAX_SELFID_SIZE) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid32_cnt_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EINVAL);
+ }
+
+ /*
+ * copy the selfid buffer one word at a time into the user buffer. The
+ * combination between having to do ddi_get32's (for endian reasons) and
+ * a ddi_copyout() make it easier to do it one word at a time.
+ */
+ for (offset = 0; offset < read_selfid->count; offset++) {
+ /* read word from selfid buffer */
+ hci1394_ohci_selfid_read(soft_state->ohci, offset, &data);
+ /* copy the selfid word into the user buffer */
+ status = ddi_copyout(&data,
+ (void *)(uintptr_t)(read_selfid->buf + (offset * 4)),
+ 4, mode);
+ if (status != 0) {
+ TNF_PROBE_0(hci1394_ioctl_read_selfid32_co_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (EFAULT);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid32_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+#endif
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_isoch.c b/usr/src/uts/common/io/1394/adapters/hci1394_isoch.c
new file mode 100644
index 0000000000..eea2e0b6c7
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_isoch.c
@@ -0,0 +1,895 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2001-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_isoch.c
+ * HCI HAL isochronous interface routines. Contains routines used
+ * internally within the HAL to manage isochronous contexts, and
+ * also routines called from the Services Layer to manage an isochronous
+ * DMA resource.
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+/*
+ * Patchable variable used to indicate the number of microseconds to wait
+ * for an isoch ctxt to stop ("active" goes low) after clearing the "run"
+ * bit
+ */
+uint_t hci1394_iso_ctxt_stop_delay_uS = 1000;
+
+/*
+ * Number of microseconds to wait in hci1394_do_stop() for an isoch ctxt
+ * interrupt handler to complete. Experiments showed that in some cases
+ * the timeout needed was as long as 2 seconds. This is probably due to
+ * significant interrupt processing overhead for certain IXL chains.
+ */
+uint_t hci1394_iso_ctxt_stop_intr_timeout_uS = 5 * 1000000;
+
+/*
+ * hci1394_isoch_init()
+ * Initialize the isochronous dma soft state.
+ */
+void
+hci1394_isoch_init(hci1394_drvinfo_t *drvinfo, hci1394_ohci_handle_t ohci,
+ hci1394_isoch_handle_t *isoch_hdl)
+{
+ hci1394_isoch_t *isochp;
+ int i;
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(isoch_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isoch_init_enter, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+
+ isochp = kmem_alloc(sizeof (hci1394_isoch_t), KM_SLEEP);
+
+ /* initialize contexts */
+ for (i = 0; i < HCI1394_MAX_ISOCH_CONTEXTS; i++) {
+ isochp->ctxt_xmit[i].ctxt_index = i;
+
+ /* init context flags to 0 */
+ isochp->ctxt_xmit[i].ctxt_flags = 0;
+
+ mutex_init(&isochp->ctxt_xmit[i].intrprocmutex, NULL,
+ MUTEX_DRIVER, drvinfo->di_iblock_cookie);
+ cv_init(&isochp->ctxt_xmit[i].intr_cv, NULL,
+ CV_DRIVER, NULL);
+
+ isochp->ctxt_recv[i].ctxt_index = i;
+ isochp->ctxt_recv[i].ctxt_flags = HCI1394_ISO_CTXT_RECV;
+ mutex_init(&isochp->ctxt_recv[i].intrprocmutex, NULL,
+ MUTEX_DRIVER, drvinfo->di_iblock_cookie);
+ cv_init(&isochp->ctxt_recv[i].intr_cv, NULL,
+ CV_DRIVER, NULL);
+ }
+
+ /* initialize the count for allocated isoch dma */
+ isochp->isoch_dma_alloc_cnt = 0;
+
+ /* initialize the cycle_lost_thresh struct */
+ isochp->cycle_lost_thresh.last_intr_time = 0;
+ isochp->cycle_lost_thresh.delta_t_counter = 0;
+ isochp->cycle_lost_thresh.delta_t_thresh = HCI1394_CYC_LOST_DELTA;
+ isochp->cycle_lost_thresh.counter_thresh = HCI1394_CYC_LOST_COUNT;
+
+ /* initialize the cycle_incon_thresh struct */
+ isochp->cycle_incon_thresh.last_intr_time = 0;
+ isochp->cycle_incon_thresh.delta_t_counter = 0;
+ isochp->cycle_incon_thresh.delta_t_thresh = HCI1394_CYC_INCON_DELTA;
+ isochp->cycle_incon_thresh.counter_thresh = HCI1394_CYC_INCON_COUNT;
+
+ /* determine number of contexts supported */
+ isochp->ctxt_xmit_count = hci1394_ohci_it_ctxt_count_get(ohci);
+ isochp->ctxt_recv_count = hci1394_ohci_ir_ctxt_count_get(ohci);
+
+ /* the isochronous context mutex is used during some error interrupts */
+ mutex_init(&isochp->ctxt_list_mutex, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+
+ *isoch_hdl = isochp;
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_init_exit, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+}
+
+/*
+ * hci1394_isoch_fini()
+ * Cleanup after hci1394_isoch_init. This should be called during detach.
+ */
+void
+hci1394_isoch_fini(hci1394_isoch_handle_t *isoch_hdl)
+{
+ hci1394_isoch_t *isochp;
+ int i;
+
+ ASSERT(isoch_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isoch_fini_enter, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+
+ isochp = *isoch_hdl;
+
+ for (i = 0; i < HCI1394_MAX_ISOCH_CONTEXTS; i++) {
+ mutex_destroy(&isochp->ctxt_xmit[i].intrprocmutex);
+ mutex_destroy(&isochp->ctxt_recv[i].intrprocmutex);
+ cv_destroy(&isochp->ctxt_xmit[i].intr_cv);
+ cv_destroy(&isochp->ctxt_recv[i].intr_cv);
+ }
+
+ mutex_destroy(&isochp->ctxt_list_mutex);
+ kmem_free(isochp, sizeof (hci1394_isoch_t));
+ *isoch_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_fini_exit, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+}
+
+
+/*
+ * hci1394_isoch_resume()
+ * There is currently nothing to do for resume. This is a placeholder.
+ */
+/* ARGSUSED */
+int
+hci1394_isoch_resume(hci1394_state_t *soft_state)
+{
+ TNF_PROBE_0_DEBUG(hci1394_isoch_resume_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ TNF_PROBE_0_DEBUG(hci1394_isoch_resume_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_alloc_isoch_dma ()
+ * Called by the Services Layer. Used to allocate a local Isoch DMA context.
+ * Goes through appropriate context list (either transmit or receive)
+ * looking for an unused context. Fails if none found.
+ * Then compiles the provided IXL program.
+ */
+int
+hci1394_alloc_isoch_dma(void *hal_private, id1394_isoch_dmainfo_t *idi,
+ void **hal_idma_handlep, int *resultp)
+{
+ int i;
+ int err;
+ hci1394_state_t *soft_statep = (hci1394_state_t *)hal_private;
+ hci1394_isoch_t *isochp;
+ hci1394_iso_ctxt_t *ctxtp;
+
+
+ ASSERT(soft_statep != NULL);
+ ASSERT(hal_idma_handlep != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_alloc_isoch_dma_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ isochp = soft_statep->isoch;
+ *hal_idma_handlep = NULL;
+
+ /*
+ * find context to use based on whether talking(send) or listening(recv)
+ */
+ mutex_enter(&isochp->ctxt_list_mutex);
+ if ((idi->idma_options & ID1394_TALK) != 0) {
+ /* TRANSMIT */
+
+ TNF_PROBE_1_DEBUG(hci1394_alloc_isoch_dma_transmit,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_string, msg,
+ "Allocating isoch transmit context");
+
+ /*
+ * search through list of hardware supported contexts for
+ * one that's not inuse
+ */
+ for (i = 0; i < isochp->ctxt_xmit_count; i++) {
+ if ((isochp->ctxt_xmit[i].ctxt_flags &
+ HCI1394_ISO_CTXT_INUSE) == 0) {
+ break;
+ }
+ }
+
+ /* if there aren't any left, return an error */
+ if (i >= isochp->ctxt_xmit_count) {
+ TNF_PROBE_1(hci1394_alloc_isoch_dma_xmit_rsrc_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "Out of isoch transmit resources");
+ TNF_PROBE_0_DEBUG(hci1394_alloc_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ mutex_exit(&isochp->ctxt_list_mutex);
+ *resultp = IXL1394_ENO_DMA_RESRCS;
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_1_DEBUG(t1394_alloc_isoch_dma_it_ctxtnum,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int, it_ctxt_num, i);
+
+ /* mark inuse and set up handle to context */
+ isochp->ctxt_xmit[i].ctxt_flags |= HCI1394_ISO_CTXT_INUSE;
+ ctxtp = &isochp->ctxt_xmit[i];
+ isochp->ctxt_xmit[i].ctxt_regsp =
+ &soft_statep->ohci->ohci_regs->it[i];
+ } else {
+ /* RECEIVE */
+
+ TNF_PROBE_1_DEBUG(hci1394_alloc_isoch_dma_receive,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_string, msg,
+ "Allocating isoch receive context");
+
+ /* search thru implemented contexts for one that's available */
+ for (i = 0; i < isochp->ctxt_recv_count; i++) {
+ if ((isochp->ctxt_recv[i].ctxt_flags &
+ HCI1394_ISO_CTXT_INUSE) == 0) {
+ break;
+ }
+ }
+
+ /* if there aren't any left, return an error */
+ /* XXX support for multi-chan could go here */
+ if (i >= isochp->ctxt_recv_count) {
+
+ TNF_PROBE_1(t1394_alloc_isoch_dma_ir_rsrc_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "Out of isoch receive resources");
+ TNF_PROBE_0_DEBUG(hci1394_alloc_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ mutex_exit(&isochp->ctxt_list_mutex);
+ *resultp = IXL1394_ENO_DMA_RESRCS;
+ return (DDI_FAILURE);
+ }
+
+ /* set up receive mode flags */
+ if ((idi->idma_options & ID1394_LISTEN_BUF_MODE) != 0) {
+ isochp->ctxt_recv[i].ctxt_flags |=
+ HCI1394_ISO_CTXT_BFFILL;
+ }
+ if ((idi->idma_options & ID1394_RECV_HEADERS) != 0) {
+ isochp->ctxt_recv[i].ctxt_flags |=
+ HCI1394_ISO_CTXT_RHDRS;
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_alloc_isoch_dma_recv_ctxtnum,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int, recv_ctxt_num, i);
+
+ /* mark inuse and set up handle to context */
+ isochp->ctxt_recv[i].ctxt_flags |= HCI1394_ISO_CTXT_INUSE;
+ ctxtp = &isochp->ctxt_recv[i];
+
+ isochp->ctxt_recv[i].ctxt_regsp = (hci1394_ctxt_regs_t *)
+ &soft_statep->ohci->ohci_regs->ir[i];
+ }
+ mutex_exit(&isochp->ctxt_list_mutex);
+
+ /* before compiling, set up some default context values */
+ ctxtp->isochan = idi->channel_num;
+ ctxtp->default_tag = idi->default_tag;
+ ctxtp->default_sync = idi->default_sync;
+ ctxtp->global_callback_arg = idi->global_callback_arg;
+ ctxtp->isoch_dma_stopped = idi->isoch_dma_stopped;
+ ctxtp->idma_evt_arg = idi->idma_evt_arg;
+ ctxtp->isospd = idi->it_speed;
+ ctxtp->default_skipmode = idi->it_default_skip;
+ ctxtp->default_skiplabelp = idi->it_default_skiplabel;
+
+ err = hci1394_compile_ixl(soft_statep, ctxtp, idi->ixlp, resultp);
+
+
+ /*
+ * if the compile failed, clear the appropriate flags.
+ * Note that the context mutex is needed to eliminate race condition
+ * with cycle_inconsistent and other error intrs.
+ */
+ if (err != DDI_SUCCESS) {
+
+ mutex_enter(&isochp->ctxt_list_mutex);
+ if ((ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) != 0) {
+ /* undo the set up of receive mode flags */
+ isochp->ctxt_recv[i].ctxt_flags &=
+ ~HCI1394_ISO_CTXT_BFFILL;
+ isochp->ctxt_recv[i].ctxt_flags &=
+ ~HCI1394_ISO_CTXT_RHDRS;
+ }
+ ctxtp->ctxt_flags &= ~HCI1394_ISO_CTXT_INUSE;
+ mutex_exit(&isochp->ctxt_list_mutex);
+
+ TNF_PROBE_2(t1394_alloc_isoch_dma_compile_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL compilation error", tnf_int, ixl_error, *resultp);
+ TNF_PROBE_0_DEBUG(hci1394_alloc_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Update count of allocated isoch dma (and enable interrupts
+ * if necessary)
+ */
+ mutex_enter(&isochp->ctxt_list_mutex);
+ if (isochp->isoch_dma_alloc_cnt == 0) {
+ hci1394_ohci_intr_clear(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST | OHCI_INTR_CYC_INCONSISTENT);
+ hci1394_ohci_intr_enable(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST | OHCI_INTR_CYC_INCONSISTENT);
+ }
+ isochp->isoch_dma_alloc_cnt++;
+ mutex_exit(&isochp->ctxt_list_mutex);
+
+ /* No errors, so all set to go. initialize interrupt/execution flags */
+ ctxtp->intr_flags = 0;
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ *hal_idma_handlep = ctxtp;
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_start_isoch_dma()
+ * Used to start an allocated isochronous dma resource.
+ * Sets the context's command ptr to start at the first IXL,
+ * sets up IR match register (if IR), and enables the context_control
+ * register RUN bit.
+ */
+/* ARGSUSED */
+int
+hci1394_start_isoch_dma(void *hal_private, void *hal_isoch_dma_handle,
+ id1394_isoch_dma_ctrlinfo_t *idma_ctrlinfop, uint_t flags, int *result)
+{
+ hci1394_state_t *soft_statep = (hci1394_state_t *)hal_private;
+ hci1394_iso_ctxt_t *ctxtp;
+ int tag0, tag1, tag2, tag3;
+
+ TNF_PROBE_0_DEBUG(hci1394_start_isoch_dma_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* pick up the context pointer from the private idma data */
+ ctxtp = (hci1394_iso_ctxt_t *)hal_isoch_dma_handle;
+
+ ASSERT(hal_private != NULL);
+ ASSERT(ctxtp != NULL);
+ ASSERT(idma_ctrlinfop != NULL);
+ TNF_PROBE_4_DEBUG(hci1394_start_isoch_dma_ctxt_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ctxt_ptr, ctxtp,
+ tnf_int, ctxt_index, ctxtp->ctxt_index, tnf_opaque, ctxt_flags,
+ ctxtp->ctxt_flags, tnf_opaque, first_ixl, ctxtp->ixl_firstp);
+
+
+ /* if the context is already running, just exit. else set running */
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+ if ((ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RUNNING) != 0) {
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ TNF_PROBE_1_DEBUG(hci1394_start_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "context already running");
+ return (DDI_SUCCESS);
+ }
+ ctxtp->ctxt_flags |= HCI1394_ISO_CTXT_RUNNING;
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_STOP;
+
+ /* initialize context values */
+ ctxtp->ixl_execp = ctxtp->ixl_firstp; /* start of ixl chain */
+ ctxtp->ixl_exec_depth = 0;
+ ctxtp->dma_last_time = 0;
+ ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
+
+ /*
+ * clear out hci DMA descriptor status to start with clean slate.
+ * note that statuses could be set if context was previously started
+ * then stopped.
+ */
+ hci1394_ixl_reset_status(ctxtp);
+
+ /* set up registers, and start isoch */
+ if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
+
+ /* set context's command ptr to the first descriptor */
+ hci1394_ohci_ir_cmd_ptr_set(soft_statep->ohci,
+ ctxtp->ctxt_index, ctxtp->dma_mem_execp);
+
+ TNF_PROBE_2_DEBUG(hci1394_start_isoch_dma_index_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_string, msg,
+ "starting IR ctxt", tnf_int, ctxt_num, ctxtp->ctxt_index);
+
+ /*
+ * determine correct tag values. map target's requested 2-bit
+ * tag into one of the 4 openHCI tag bits.
+ * XXX for now the t1394 api only supports a single tag setting,
+ * whereas openhci supports a set of (non-mutually exclusive)
+ * valid tags. if the api changes to support multiple
+ * simultaneous tags, then this code must be changed.
+ */
+ tag0 = 0;
+ tag1 = 1;
+ tag2 = 2;
+ tag3 = 3;
+ if (ctxtp->default_tag == 0x0)
+ tag0 = 1;
+ else if (ctxtp->default_tag == 0x1)
+ tag1 = 1;
+ else if (ctxtp->default_tag == 0x2)
+ tag2 = 1;
+ else if (ctxtp->default_tag == 0x3)
+ tag3 = 1;
+
+ /* set match register as desired */
+ HCI1394_IRCTXT_MATCH_WRITE(soft_statep, ctxtp->ctxt_index, tag3,
+ tag2, tag1, tag0,
+ idma_ctrlinfop->start_cycle /* cycleMatch */,
+ ctxtp->default_sync /* sync */, 0 /* tag1sync */,
+ ctxtp->isochan /* chan */);
+
+ /* clear all bits in context ctrl reg to init to known state */
+ HCI1394_IRCTXT_CTRL_CLR(soft_statep, ctxtp->ctxt_index,
+ (uint32_t)1, 1, 1, 1, 1);
+
+ /* set desired values in context control register */
+ HCI1394_IRCTXT_CTRL_SET(soft_statep, ctxtp->ctxt_index,
+ (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) != 0 /* bf */,
+ (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RHDRS) != 0 /* hdr */,
+ (flags & ID1394_START_ON_CYCLE) != 0 /* match enbl */,
+ 0 /* multi-chan mode */, 1 /* run */, 0 /* wake */);
+
+ /*
+ * before enabling interrupts, make sure any vestige interrupt
+ * event (from a previous use) is cleared.
+ */
+ hci1394_ohci_ir_intr_clear(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+
+ /* enable interrupts for this IR context */
+ hci1394_ohci_ir_intr_enable(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+
+ } else {
+ /* TRANSMIT */
+
+ /* set context's command ptr to the first descriptor */
+ hci1394_ohci_it_cmd_ptr_set(soft_statep->ohci,
+ ctxtp->ctxt_index, ctxtp->dma_mem_execp);
+
+ TNF_PROBE_2_DEBUG(hci1394_start_isoch_dma_index_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_string, msg,
+ "starting IT ctxt", tnf_int, ctxt_num, ctxtp->ctxt_index);
+
+ /* set desired values in context control register */
+ HCI1394_ITCTXT_CTRL_SET(soft_statep, ctxtp->ctxt_index,
+ ((flags & ID1394_START_ON_CYCLE) != 0) /* match enable */,
+ idma_ctrlinfop->start_cycle /* cycle Match */,
+ 1 /* run */, 0 /* wake */);
+
+ /*
+ * before enabling interrupts, make sure any vestige interrupt
+ * event (from a previous use) is cleared.
+ */
+ hci1394_ohci_it_intr_clear(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+
+ /* enable interrupts for this IT context */
+ hci1394_ohci_it_intr_enable(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_start_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_update_isoch_dma()
+ *
+ * Returns DDI_SUCCESS, or DDI_FAILURE. If DDI_FAILURE, then resultp
+ * contains the error code.
+ */
+/* ARGSUSED */
+int
+hci1394_update_isoch_dma(void *hal_private, void *hal_isoch_dma_handle,
+ id1394_isoch_dma_updateinfo_t *idma_updateinfop, uint_t flags, int *resultp)
+{
+ hci1394_state_t *soft_statep = (hci1394_state_t *)hal_private;
+ hci1394_iso_ctxt_t *ctxtp;
+ ixl1394_command_t *cur_new_ixlp;
+ ixl1394_command_t *cur_orig_ixlp;
+ int ii;
+ int err = DDI_SUCCESS;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_update_isoch_dma_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* pick up the context pointer from the private idma data */
+ ctxtp = (hci1394_iso_ctxt_t *)hal_isoch_dma_handle;
+
+ ASSERT(hal_private != NULL);
+ ASSERT(ctxtp != NULL);
+ ASSERT(idma_updateinfop != NULL);
+
+ /*
+ * regardless of the type of context (IR or IT), loop through each
+ * command pair (one from new, one from orig), updating the relevant
+ * fields of orig with those from new.
+ */
+ cur_new_ixlp = idma_updateinfop->temp_ixlp;
+ cur_orig_ixlp = idma_updateinfop->orig_ixlp;
+
+ ASSERT(cur_new_ixlp != NULL);
+ ASSERT(cur_orig_ixlp != NULL);
+
+ /* lots of debug trace info */
+ TNF_PROBE_4_DEBUG(hci1394_update_isoch_dma_ctxt_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ctxt_ptr, ctxtp,
+ tnf_int, ixlcount, idma_updateinfop->ixl_count,
+ tnf_opaque, new_ixl, cur_new_ixlp, tnf_opaque, orig_ixl,
+ cur_orig_ixlp);
+
+ for (ii = 0; (ii < idma_updateinfop->ixl_count) && (err == DDI_SUCCESS);
+ ii++) {
+
+ /* error if hit a null ixl command too soon */
+ if ((cur_new_ixlp == NULL) || (cur_orig_ixlp == NULL)) {
+ *resultp = IXL1394_ECOUNT_MISMATCH;
+ err = DDI_FAILURE;
+
+ TNF_PROBE_3_DEBUG(hci1394_update_isoch_dma_mismatch,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_opaque, new,
+ cur_new_ixlp, tnf_opaque, orig, cur_orig_ixlp,
+ tnf_int, iteration, ii);
+ break;
+ }
+
+ /* proceed with the update */
+ err = hci1394_ixl_update(soft_statep, ctxtp, cur_new_ixlp,
+ cur_orig_ixlp, 0, resultp);
+
+ /* advance new and orig chains */
+ cur_new_ixlp = cur_new_ixlp->next_ixlp;
+ cur_orig_ixlp = cur_orig_ixlp->next_ixlp;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_update_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (err);
+}
+
+
+/*
+ * hci1394_stop_isoch_dma()
+ * Used to stop a "running" isochronous dma resource.
+ * This is a wrapper which calls the hci1394_do_stop to do the actual work,
+ * but NOT to invoke the target's isoch_dma_stopped().
+ */
+/* ARGSUSED */
+void
+hci1394_stop_isoch_dma(void *hal_private, void *hal_isoch_dma_handle,
+ int *result)
+{
+ hci1394_state_t *soft_statep = (hci1394_state_t *)hal_private;
+ hci1394_iso_ctxt_t *ctxtp;
+
+ TNF_PROBE_0_DEBUG(hci1394_stop_isoch_dma_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* pick up the context pointer from the private idma data */
+ ctxtp = (hci1394_iso_ctxt_t *)hal_isoch_dma_handle;
+
+ ASSERT(hal_private != NULL);
+ ASSERT(ctxtp != NULL);
+
+ /* stop the context, do not invoke target's stop callback */
+ hci1394_do_stop(soft_statep, ctxtp, B_FALSE, 0);
+
+ /*
+ * call interrupt processing functions to bring callbacks and
+ * store_timestamps upto date. Don't care about errors.
+ */
+ hci1394_ixl_interrupt(soft_statep, ctxtp, B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_stop_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_do_stop()
+ * Used to stop a "running" isochronous dma resource.
+ * Disables interrupts for the context, clears the context_control register's
+ * RUN bit, and makes sure the ixl is up-to-date with where the hardware is
+ * in the DMA chain.
+ * If do_callback is B_TRUE, the target's isoch_dma_stopped() callback is
+ * invoked. Caller must not hold mutex(es) if calling with
+ * do_callback==B_TRUE, otherwise mutex(es) will be held during callback.
+ * If do_callback is B_FALSE, the isoch_dma_stopped() callback is NOT
+ * invoked and stop_args is ignored.
+ */
+void
+hci1394_do_stop(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
+ boolean_t do_callback, id1394_isoch_dma_stopped_t stop_args)
+{
+ int count;
+ clock_t upto;
+
+ TNF_PROBE_0_DEBUG(hci1394_do_stop_enter, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+
+ TNF_PROBE_4_DEBUG(hci1394_do_stop_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ctxt_ptr, ctxtp,
+ tnf_int, ctxt_index, ctxtp->ctxt_index, tnf_opaque, ctxt_flags,
+ ctxtp->ctxt_flags, tnf_string, reason,
+ (stop_args == ID1394_DONE) ? "DONE":"FAIL");
+
+ /* already stopped? if yes, done, else set state to not-running */
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+ if ((ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RUNNING) == 0) {
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ TNF_PROBE_1_DEBUG(hci1394_do_stop_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "context already stopped");
+ return;
+ }
+ ctxtp->ctxt_flags &= ~HCI1394_ISO_CTXT_RUNNING;
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ /* turn off context control register's run bit */
+ if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
+ /* RECEIVE */
+
+ /* disable interrupts for this IR context */
+ hci1394_ohci_ir_intr_disable(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+
+ /* turn off run bit */
+ HCI1394_IRCTXT_CTRL_CLR(soft_statep, ctxtp->ctxt_index,
+ 0 /* bffill */, 0 /* iso hdrs */, 0 /* match enbl */,
+ 0 /* multi-chan mode (not implemented) */, 1 /* run */);
+ } else {
+ /* TRANSMIT */
+
+ /* disable interrupts for this IT context */
+ hci1394_ohci_it_intr_disable(soft_statep->ohci,
+ (uint32_t)(0x1 << ctxtp->ctxt_index));
+
+ /* turn of run bit */
+ HCI1394_ITCTXT_CTRL_CLR(soft_statep, ctxtp->ctxt_index,
+ 0 /* match enbl */, 0 /* match */, 1 /* run */);
+ }
+
+ /*
+ * If interrupt is already in progress, wait until it's over.
+ * Otherwise, set flag to prevent the new interrupt.
+ */
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags |= HCI1394_ISO_CTXT_STOP;
+ if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
+ upto = ddi_get_lbolt() +
+ drv_usectohz(hci1394_iso_ctxt_stop_intr_timeout_uS);
+ while (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
+ if (cv_timedwait(&ctxtp->intr_cv, &ctxtp->intrprocmutex,
+ upto) <= 0) {
+ break;
+ }
+ }
+
+ if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
+ TNF_PROBE_1(hci1394_do_stop_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "",
+ tnf_string, msg, "intr completion timeout");
+ }
+ }
+ mutex_exit(&ctxtp->intrprocmutex);
+
+ /* Wait until "active" bit is cleared before continuing */
+ count = 0;
+ while (count < hci1394_iso_ctxt_stop_delay_uS) {
+ /* Has the "active" bit gone low yet? */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0)
+ break;
+
+ /*
+ * The context did not stop yet. Wait 1us, increment the
+ * count and try again.
+ */
+ drv_usecwait(1);
+ count++;
+ }
+
+ /* Check to see if we timed out or not */
+ if (count >= hci1394_iso_ctxt_stop_delay_uS) {
+ h1394_error_detected(soft_statep->drvinfo.di_sl_private,
+ H1394_SELF_INITIATED_SHUTDOWN, NULL);
+ cmn_err(CE_WARN, "hci1394(%d): driver shutdown: "
+ "unable to stop isoch context",
+ soft_statep->drvinfo.di_instance);
+ hci1394_shutdown(soft_statep->drvinfo.di_dip);
+
+ TNF_PROBE_1_DEBUG(hci1394_do_stop_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "context timed out trying to stop");
+ return;
+ }
+
+ /*
+ * invoke callback as directed. Note that the CTXT_INCALL flag is NOT
+ * needed here. That flag is only used when we have to drop a mutex
+ * that we want to grab back again. We're not doing that here.
+ */
+ if (do_callback == B_TRUE) {
+ if (ctxtp->isoch_dma_stopped != NULL) {
+ ctxtp->isoch_dma_stopped(
+ (struct isoch_dma_handle *)ctxtp,
+ ctxtp->idma_evt_arg, stop_args);
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_do_stop_exit, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+}
+
+/*
+ * hci1394_free_isoch_dma()
+ * Used to free up usage of an isochronous context and any other
+ * system resources acquired during IXL compilation.
+ * This does NOT free up the IXL and it's data buffers which is
+ * the target driver's responsibility.
+ */
+void
+hci1394_free_isoch_dma(void *hal_private, void *hal_isoch_dma_handle)
+{
+ hci1394_state_t *soft_statep = (hci1394_state_t *)hal_private;
+ hci1394_iso_ctxt_t *ctxtp;
+ hci1394_isoch_t *isochp;
+
+ TNF_PROBE_0_DEBUG(hci1394_free_isoch_dma_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* pick up the context pointer from the private idma data */
+ ctxtp = (hci1394_iso_ctxt_t *)hal_isoch_dma_handle;
+
+ ASSERT(soft_statep);
+ ASSERT(ctxtp);
+
+ isochp = soft_statep->isoch;
+
+ /* lots of debug trace info */
+ TNF_PROBE_4_DEBUG(hci1394_free_isoch_dma_ctxt_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ctxt_ptr, ctxtp,
+ tnf_int, ctxt_index, ctxtp->ctxt_index, tnf_opaque, ctxt_flags,
+ ctxtp->ctxt_flags, tnf_opaque, first_ixl, ctxtp->ixl_firstp);
+
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+
+ /* delete xfer_ctl structs and pages of allocated hci_desc memory */
+ hci1394_ixl_cleanup(soft_statep, ctxtp);
+
+ /*
+ * free context. no need to determine if xmit or recv. clearing of recv
+ * flags is harmless for xmit.
+ */
+ ctxtp->ctxt_flags &= ~(HCI1394_ISO_CTXT_INUSE |
+ HCI1394_ISO_CTXT_BFFILL | HCI1394_ISO_CTXT_RHDRS);
+
+ /*
+ * Update count of allocated isoch dma (and disable interrupts
+ * if necessary)
+ */
+ ASSERT(isochp->isoch_dma_alloc_cnt > 0);
+ isochp->isoch_dma_alloc_cnt--;
+ if (isochp->isoch_dma_alloc_cnt == 0) {
+ hci1394_ohci_intr_disable(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST | OHCI_INTR_CYC_INCONSISTENT);
+ }
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_free_isoch_dma_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_isoch_recv_count_get()
+ * returns the number of supported isoch receive contexts.
+ */
+int
+hci1394_isoch_recv_count_get(hci1394_isoch_handle_t isoch_hdl)
+{
+ ASSERT(isoch_hdl != NULL);
+ return (isoch_hdl->ctxt_recv_count);
+}
+
+/*
+ * hci1394_isoch_recv_ctxt_get()
+ * given a context index, returns its isoch receive context struct
+ */
+hci1394_iso_ctxt_t *
+hci1394_isoch_recv_ctxt_get(hci1394_isoch_handle_t isoch_hdl, int num)
+{
+ ASSERT(isoch_hdl != NULL);
+ return (&isoch_hdl->ctxt_recv[num]);
+}
+
+/*
+ * hci1394_isoch_xmit_count_get()
+ * returns the number of supported isoch transmit contexts.
+ */
+int
+hci1394_isoch_xmit_count_get(hci1394_isoch_handle_t isoch_hdl)
+{
+ ASSERT(isoch_hdl != NULL);
+ return (isoch_hdl->ctxt_xmit_count);
+}
+
+/*
+ * hci1394_isoch_xmit_ctxt_get()
+ * given a context index, returns its isoch transmit context struct
+ */
+hci1394_iso_ctxt_t *
+hci1394_isoch_xmit_ctxt_get(hci1394_isoch_handle_t isoch_hdl, int num)
+{
+ ASSERT(isoch_hdl != NULL);
+ return (&isoch_hdl->ctxt_xmit[num]);
+}
+
+/*
+ * hci1394_isoch_error_ints_enable()
+ * after bus reset, reenable CYCLE_LOST and CYCLE_INCONSISTENT
+ * interrupts (if necessary).
+ */
+void
+hci1394_isoch_error_ints_enable(hci1394_state_t *soft_statep)
+{
+ ASSERT(soft_statep);
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_error_ints_enable_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+
+ if (soft_statep->isoch->isoch_dma_alloc_cnt != 0) {
+ soft_statep->isoch->cycle_lost_thresh.delta_t_counter = 0;
+ soft_statep->isoch->cycle_incon_thresh.delta_t_counter = 0;
+ hci1394_ohci_intr_clear(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST | OHCI_INTR_CYC_INCONSISTENT);
+ hci1394_ohci_intr_enable(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST | OHCI_INTR_CYC_INCONSISTENT);
+ }
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_error_ints_enable_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_isr.c b/usr/src/uts/common/io/1394/adapters/hci1394_isr.c
new file mode 100644
index 0000000000..23001f1eed
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_isr.c
@@ -0,0 +1,915 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_isr.c
+ * Contains the core interrupt handling logic for the hci1394 driver.
+ * It also contains the routine which sets up the initial interrupt
+ * mask during HW init.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+static uint_t hci1394_isr(caddr_t parm);
+static void hci1394_isr_bus_reset(hci1394_state_t *soft_state);
+static void hci1394_isr_self_id(hci1394_state_t *soft_state);
+static void hci1394_isr_isoch_ir(hci1394_state_t *soft_state);
+static void hci1394_isr_isoch_it(hci1394_state_t *soft_state);
+static void hci1394_isr_atreq_complete(hci1394_state_t *soft_state);
+static void hci1394_isr_arresp(hci1394_state_t *soft_state);
+static void hci1394_isr_arreq(hci1394_state_t *soft_state);
+static void hci1394_isr_atresp_complete(hci1394_state_t *soft_state);
+
+
+/*
+ * hci1394_isr_init()
+ * Get the iblock_cookie, make sure we are not using a high level interrupt,
+ * register our interrupt service routine.
+ */
+int
+hci1394_isr_init(hci1394_state_t *soft_state)
+{
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* This driver does not support running at a high level interrupt */
+ status = ddi_intr_hilevel(soft_state->drvinfo.di_dip, 0);
+ if (status != 0) {
+ TNF_PROBE_1(hci1394_isr_init_hli_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "High Level interrupts not supported");
+ TNF_PROBE_0_DEBUG(hci1394_isr_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* There should only be 1 1394 interrupt for an OpenHCI adapter */
+ status = ddi_get_iblock_cookie(soft_state->drvinfo.di_dip, 0,
+ &soft_state->drvinfo.di_iblock_cookie);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_init_gic_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_isr_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_isr_fini()
+ * un-register our interrupt service routine.
+ */
+/* ARGSUSED */
+void
+hci1394_isr_fini(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* nothing to do right now */
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_handler_init()
+ * register our interrupt service routine.
+ */
+int
+hci1394_isr_handler_init(hci1394_state_t *soft_state)
+{
+ int status;
+
+ ASSERT(soft_state != NULL);
+
+ /* Initialize interrupt handler */
+ status = ddi_add_intr(soft_state->drvinfo.di_dip, 0, NULL, NULL,
+ hci1394_isr, (caddr_t)soft_state);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_handler_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_isr_handler_fini()
+ * un-register our interrupt service routine.
+ */
+void
+hci1394_isr_handler_fini(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+
+ /* Remove interrupt handler */
+ ddi_remove_intr(soft_state->drvinfo.di_dip, 0,
+ soft_state->drvinfo.di_iblock_cookie);
+}
+
+
+/*
+ * hci1394_isr_mask_setup()
+ * Setup the initial interrupt mask for OpenHCI. These are the interrupts
+ * that our interrupt handler is expected to handle.
+ */
+void
+hci1394_isr_mask_setup(hci1394_state_t *soft_state)
+{
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_mask_setup_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* start off with all interrupts cleared/disabled */
+ hci1394_ohci_ir_intr_disable(soft_state->ohci, 0xFFFFFFFF);
+ hci1394_ohci_ir_intr_clear(soft_state->ohci, 0xFFFFFFFF);
+ hci1394_ohci_it_intr_disable(soft_state->ohci, 0xFFFFFFFF);
+ hci1394_ohci_it_intr_clear(soft_state->ohci, 0xFFFFFFFF);
+ hci1394_ohci_intr_disable(soft_state->ohci, 0xFFFFFFFF);
+ hci1394_ohci_intr_clear(soft_state->ohci, 0xFFFFFFFF);
+
+ /* Setup Interrupt Mask Register */
+ hci1394_ohci_intr_enable(soft_state->ohci,
+ (OHCI_INTR_UNRECOVERABLE_ERR | OHCI_INTR_CYC_TOO_LONG |
+ OHCI_INTR_BUS_RESET | OHCI_INTR_SELFID_CMPLT |
+ OHCI_INTR_REQ_TX_CMPLT | OHCI_INTR_RESP_TX_CMPLT |
+ OHCI_INTR_RQPKT | OHCI_INTR_RSPKT | OHCI_INTR_ISOCH_TX |
+ OHCI_INTR_ISOCH_RX | OHCI_INTR_POST_WR_ERR | OHCI_INTR_PHY |
+ OHCI_INTR_LOCK_RESP_ERR));
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_mask_setup_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_isr()
+ * Core interrupt handler. Every interrupt enabled in
+ * hci1394_isr_mask_setup() should be covered here. There may be other
+ * interrupts supported in here even if they are not initially enabled
+ * (like OHCI_INTR_CYC_64_SECS) since they may be enabled later (i.e. due to
+ * CSR register write)
+ */
+static uint_t
+hci1394_isr(caddr_t parm)
+{
+ hci1394_state_t *soft_state;
+ h1394_posted_wr_err_t posted_wr_err;
+ uint32_t interrupt_event;
+ uint_t status;
+
+
+ status = DDI_INTR_UNCLAIMED;
+ soft_state = (hci1394_state_t *)parm;
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Get all of the enabled 1394 interrupts which are currently
+ * asserted.
+ */
+ interrupt_event = hci1394_ohci_intr_asserted(soft_state->ohci);
+ do {
+ /* handle the asserted interrupts */
+ if (interrupt_event & OHCI_INTR_BUS_RESET) {
+ hci1394_isr_bus_reset(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_SELFID_CMPLT) {
+ hci1394_isr_self_id(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_ISOCH_TX) {
+ hci1394_isr_isoch_it(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_ISOCH_RX) {
+ hci1394_isr_isoch_ir(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_REQ_TX_CMPLT) {
+ hci1394_isr_atreq_complete(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_RSPKT) {
+ hci1394_isr_arresp(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_RQPKT) {
+ hci1394_isr_arreq(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_RESP_TX_CMPLT) {
+ hci1394_isr_atresp_complete(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_CYC_64_SECS) {
+ hci1394_ohci_isr_cycle64seconds(soft_state->ohci);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_UNRECOVERABLE_ERR) {
+ h1394_error_detected(soft_state->drvinfo.di_sl_private,
+ H1394_SELF_INITIATED_SHUTDOWN, NULL);
+ cmn_err(CE_WARN, "hci1394(%d): driver shutdown: "
+ "unrecoverable error interrupt detected",
+ soft_state->drvinfo.di_instance);
+ hci1394_shutdown(soft_state->drvinfo.di_dip);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_CYC_LOST) {
+ hci1394_isoch_cycle_lost(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_CYC_INCONSISTENT) {
+ hci1394_isoch_cycle_inconsistent(soft_state);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_CYC_TOO_LONG) {
+ hci1394_ohci_intr_clear(soft_state->ohci,
+ OHCI_INTR_CYC_TOO_LONG);
+ /* clear cycle master bit in csr state register */
+ hci1394_csr_state_bclr(soft_state->csr,
+ IEEE1394_CSR_STATE_CMSTR);
+ h1394_error_detected(soft_state->drvinfo.di_sl_private,
+ H1394_CYCLE_TOO_LONG, NULL);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_POST_WR_ERR) {
+ hci1394_ohci_postwr_addr(soft_state->ohci,
+ &posted_wr_err.addr);
+ h1394_error_detected(soft_state->drvinfo.di_sl_private,
+ H1394_POSTED_WR_ERR, &posted_wr_err);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_PHY) {
+ hci1394_ohci_isr_phy(soft_state->ohci);
+ status = DDI_INTR_CLAIMED;
+ }
+ if (interrupt_event & OHCI_INTR_LOCK_RESP_ERR) {
+ hci1394_ohci_intr_clear(soft_state->ohci,
+ OHCI_INTR_LOCK_RESP_ERR);
+ h1394_error_detected(soft_state->drvinfo.di_sl_private,
+ H1394_LOCK_RESP_ERR, NULL);
+ status = DDI_INTR_CLAIMED;
+ }
+
+ /*
+ * Check for self-id-complete interrupt disappearing. There is
+ * a chance in OpenHCI where it will assert the selfid
+ * interrupt and then take it away. We will look for this case
+ * and claim it just in case. We could possibly claim an
+ * interrupt that's not ours. We would have to be in the
+ * middle of a bus reset and a bunch of other weird stuff
+ * would have to align. It should not hurt anything if we do.
+ *
+ * This will very very rarely happen, if ever. We still have
+ * to handle the case, just in case. OpenHCI 1.1 should fix
+ * this problem.
+ */
+ if ((status == DDI_INTR_UNCLAIMED) &&
+ (hci1394_state(&soft_state->drvinfo) ==
+ HCI1394_BUS_RESET)) {
+ if (soft_state->drvinfo.di_gencnt !=
+ hci1394_ohci_current_busgen(soft_state->ohci)) {
+ TNF_PROBE_0(hci1394_isr_busgen_claim,
+ HCI1394_TNF_HAL, "");
+ status = DDI_INTR_CLAIMED;
+ }
+ }
+
+ /*
+ * See if any of the enabled 1394 interrupts have been asserted
+ * since we first checked.
+ */
+ interrupt_event = hci1394_ohci_intr_asserted(
+ soft_state->ohci);
+ } while (interrupt_event != 0);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_isr_bus_reset()
+ * Process a 1394 bus reset. This signifies that a bus reset has started.
+ * A bus reset will not be complete until a selfid complete interrupt
+ * comes in.
+ */
+static void
+hci1394_isr_bus_reset(hci1394_state_t *soft_state)
+{
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_bus_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Set the driver state to reset. If we cannot, we have been shutdown.
+ * The only way we can get in this code is if we have a multi-processor
+ * machine and the HAL is shutdown by one processor running in base
+ * context while this interrupt handler runs in another processor.
+ * We will disable all interrupts and just return. We shouldn't have
+ * to disable the interrupts, but we will just in case.
+ */
+ status = hci1394_state_set(&soft_state->drvinfo, HCI1394_BUS_RESET);
+ if (status != DDI_SUCCESS) {
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+ return;
+ }
+
+ /*
+ * Save away reset generation count so we can detect self-id-compete
+ * interrupt which disappears in event register. This is discussed in
+ * more detail in hci1394_isr()
+ */
+ soft_state->drvinfo.di_gencnt =
+ hci1394_ohci_current_busgen(soft_state->ohci);
+
+ soft_state->drvinfo.di_stats.st_bus_reset_count++;
+
+ /*
+ * Mask off busReset until SelfIdComplete comes in. The bus reset
+ * interrupt will be asserted until the SelfIdComplete interrupt
+ * comes in (i.e. you cannot clear the interrupt until a SelfIdComplete
+ * interrupt). Therefore, we disable the interrupt via its mask so we
+ * don't get stuck in the ISR indefinitely.
+ */
+ hci1394_ohci_intr_disable(soft_state->ohci, OHCI_INTR_BUS_RESET);
+
+ /* Reset the ATREQ and ATRESP Q's */
+ hci1394_async_atreq_reset(soft_state->async);
+ hci1394_async_atresp_reset(soft_state->async);
+
+ /* Inform Services Layer about Bus Reset */
+ h1394_bus_reset(soft_state->drvinfo.di_sl_private,
+ (void **)&soft_state->sl_selfid_buf);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_self_id()
+ * Process the selfid complete interrupt. The bus reset has completed
+ * and the 1394 HW has finished it's bus enumeration. The SW needs to
+ * see what's changed and handle any hotplug conditions.
+ */
+static void
+hci1394_isr_self_id(hci1394_state_t *soft_state)
+{
+ int status;
+ uint_t node_id;
+ uint_t selfid_size;
+ uint_t quadlet_count;
+ uint_t index;
+ uint32_t *selfid_buf_p;
+ boolean_t selfid_error;
+ boolean_t nodeid_error;
+ boolean_t saw_error = B_FALSE;
+ uint_t phy_status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_self_id_enter, HCI1394_TNF_HAL_STACK, "");
+
+ soft_state->drvinfo.di_stats.st_selfid_count++;
+
+ /*
+ * check for the bizarre case that we got both a bus reset and self id
+ * complete after checking for a bus reset
+ */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_BUS_RESET) {
+ hci1394_isr_bus_reset(soft_state);
+ }
+
+ /*
+ * Clear any set PHY error status bits set. The PHY status bits
+ * may always be set (i.e. we removed cable power) so we do not want
+ * to clear them when we handle the interrupt. We will clear them
+ * every selfid complete interrupt so worst case we will get 1 PHY event
+ * interrupt every bus reset.
+ */
+ status = hci1394_ohci_phy_read(soft_state->ohci, 5, &phy_status);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_self_id_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ } else {
+ phy_status |= OHCI_PHY_LOOP_ERR | OHCI_PHY_PWRFAIL_ERR |
+ OHCI_PHY_TIMEOUT_ERR | OHCI_PHY_PORTEVT_ERR;
+ status = hci1394_ohci_phy_write(soft_state->ohci, 5,
+ phy_status);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_self_id_pw_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ } else {
+ /*
+ * Re-enable PHY interrupt. We disable the PHY interrupt
+ * when we get one so that we do not get stuck in the
+ * ISR.
+ */
+ hci1394_ohci_intr_enable(soft_state->ohci,
+ OHCI_INTR_PHY);
+ }
+ }
+
+ /* See if either AT active bit is set */
+ if (hci1394_ohci_at_active(soft_state->ohci) == B_TRUE) {
+ TNF_PROBE_1(hci1394_isr_self_id_as_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "AT ACTIVE still set");
+ saw_error = B_TRUE;
+ }
+
+ /* Clear busReset and selfIdComplete interrupts */
+ hci1394_ohci_intr_clear(soft_state->ohci, (OHCI_INTR_BUS_RESET |
+ OHCI_INTR_SELFID_CMPLT));
+
+ /* Read node info and test for Invalid Node ID */
+ hci1394_ohci_nodeid_info(soft_state->ohci, &node_id, &nodeid_error);
+ if (nodeid_error == B_TRUE) {
+ TNF_PROBE_1(hci1394_isr_self_id_ni_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "saw invalid NodeID");
+ saw_error = B_TRUE;
+ }
+
+ /* Sync Selfid Buffer */
+ hci1394_ohci_selfid_sync(soft_state->ohci);
+
+ /* store away selfid info */
+ hci1394_ohci_selfid_info(soft_state->ohci,
+ &soft_state->drvinfo.di_gencnt, &selfid_size, &selfid_error);
+
+ /* Test for selfid error */
+ if (selfid_error == B_TRUE) {
+ TNF_PROBE_1(hci1394_isr_self_id_si_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "saw invalid SelfID");
+ saw_error = B_TRUE;
+ }
+
+ /*
+ * selfid size could be 0 if a bus reset has occurred. If this occurs,
+ * we should have another selfid int coming later.
+ */
+ if ((saw_error == B_FALSE) && (selfid_size == 0)) {
+ TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * make sure generation count in buffer matches generation
+ * count in register.
+ */
+ if (hci1394_ohci_selfid_buf_current(soft_state->ohci) == B_FALSE) {
+ TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * Skip over first quadlet in selfid buffer, this is OpenHCI specific
+ * data.
+ */
+ selfid_size = selfid_size - IEEE1394_QUADLET;
+ quadlet_count = selfid_size >> 2;
+
+ /* Copy selfid buffer to Services Layer buffer */
+ for (index = 0; index < quadlet_count; index++) {
+ hci1394_ohci_selfid_read(soft_state->ohci, index + 1,
+ &soft_state->sl_selfid_buf[index]);
+ }
+
+ /*
+ * Put our selfID info into the Services Layer's selfid buffer if we
+ * have a 1394-1995 PHY.
+ */
+ if (soft_state->halinfo.phy == H1394_PHY_1995) {
+ selfid_buf_p = (uint32_t *)(
+ (uintptr_t)soft_state->sl_selfid_buf +
+ (uintptr_t)selfid_size);
+ status = hci1394_ohci_phy_info(soft_state->ohci,
+ &selfid_buf_p[0]);
+ if (status != DDI_SUCCESS) {
+ /*
+ * If we fail reading from PHY, put invalid data into
+ * the selfid buffer so the SL will reset the bus again.
+ */
+ TNF_PROBE_0(hci1394_isr_self_id_pi_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ selfid_buf_p[0] = 0xFFFFFFFF;
+ selfid_buf_p[1] = 0xFFFFFFFF;
+ } else {
+ selfid_buf_p[1] = ~selfid_buf_p[0];
+ }
+ selfid_size = selfid_size + 8;
+ }
+
+ /* Flush out async DMA Q's */
+ hci1394_async_flush(soft_state->async);
+
+ /*
+ * Make sure generation count is still valid. i.e. we have not gotten
+ * another bus reset since the last time we checked. If we have gotten
+ * another bus reset, we should have another selfid interrupt coming.
+ */
+ if (soft_state->drvinfo.di_gencnt !=
+ hci1394_ohci_current_busgen(soft_state->ohci)) {
+ TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * do whatever CSR register processing that needs to be done.
+ */
+ hci1394_csr_bus_reset(soft_state->csr);
+
+ /*
+ * do whatever management may be necessary for the CYCLE_LOST and
+ * CYCLE_INCONSISTENT interrupts.
+ */
+ hci1394_isoch_error_ints_enable(soft_state);
+
+ /*
+ * See if we saw an error. If we did, tell the services layer that we
+ * finished selfid processing and give them an illegal selfid buffer
+ * size of 0. The Services Layer will try to reset the bus again to
+ * see if we can recover from this problem. It will threshold after
+ * a finite number of errors.
+ */
+ if (saw_error == B_TRUE) {
+ h1394_self_ids(soft_state->drvinfo.di_sl_private,
+ soft_state->sl_selfid_buf, 0, node_id,
+ soft_state->drvinfo.di_gencnt);
+
+ /*
+ * Take ourself out of Bus Reset processing mode
+ *
+ * Set the driver state to normal. If we cannot, we have been
+ * shutdown. The only way we can get in this code is if we have
+ * a multi-processor machine and the HAL is shutdown by one
+ * processor running in base context while this interrupt
+ * handler runs in another processor. We will disable all
+ * interrupts and just return. We shouldn't have to disable
+ * the interrupts, but we will just in case.
+ */
+ status = hci1394_state_set(&soft_state->drvinfo,
+ HCI1394_NORMAL);
+ if (status != DDI_SUCCESS) {
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+ return;
+ }
+ } else if (IEEE1394_NODE_NUM(node_id) != 63) {
+ /*
+ * Notify services layer about self-id-complete. Don't notify
+ * the services layer if there are too many devices on the bus.
+ */
+ h1394_self_ids(soft_state->drvinfo.di_sl_private,
+ soft_state->sl_selfid_buf, selfid_size,
+ node_id, soft_state->drvinfo.di_gencnt);
+
+ /*
+ * Take ourself out of Bus Reset processing mode
+ *
+ * Set the driver state to normal. If we cannot, we have been
+ * shutdown. The only way we can get in this code is if we have
+ * a multi-processor machine and the HAL is shutdown by one
+ * processor running in base context while this interrupt
+ * handler runs in another processor. We will disable all
+ * interrupts and just return. We shouldn't have to disable
+ * the interrupts, but we will just in case.
+ */
+ status = hci1394_state_set(&soft_state->drvinfo,
+ HCI1394_NORMAL);
+ if (status != DDI_SUCCESS) {
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+ return;
+ }
+ } else {
+ cmn_err(CE_NOTE, "hci1394(%d): Too many devices on the 1394 "
+ "bus", soft_state->drvinfo.di_instance);
+ }
+
+ /* enable bus reset interrupt */
+ hci1394_ohci_intr_enable(soft_state->ohci, OHCI_INTR_BUS_RESET);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_isoch_ir()
+ * Process each isoch recv context which has its interrupt asserted. The
+ * interrupt will be asserted when an isoch recv descriptor with the
+ * interrupt bits enabled have finished being processed.
+ */
+static void
+hci1394_isr_isoch_ir(hci1394_state_t *soft_state)
+{
+ uint32_t i;
+ uint32_t mask = 0x00000001;
+ uint32_t ev;
+ int num_ir_contexts;
+ hci1394_iso_ctxt_t *ctxtp;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_isoch_ir_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ num_ir_contexts = hci1394_isoch_recv_count_get(soft_state->isoch);
+
+ /*
+ * Main isochRx int is not clearable. it is automatically
+ * cleared by the hw when the ir_intr_event is cleared
+ */
+ /* loop until no more IR events */
+ while ((ev = hci1394_ohci_ir_intr_asserted(soft_state->ohci)) != 0) {
+
+ /* clear the events we just learned about */
+ hci1394_ohci_ir_intr_clear(soft_state->ohci, ev);
+
+ /* for each interrupting IR context, process the interrupt */
+ for (i = 0; i < num_ir_contexts; i++) {
+ /*
+ * if the intr bit is on for a context,
+ * call xmit/recv common processing code
+ */
+ if (ev & mask) {
+ ctxtp = hci1394_isoch_recv_ctxt_get(
+ soft_state->isoch, i);
+ hci1394_ixl_interrupt(soft_state, ctxtp,
+ B_FALSE);
+ }
+ mask <<= 1;
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_isr_isoch_ir_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_isoch_it()
+ * Process each isoch transmit context which has its interrupt asserted. The
+ * interrupt will be asserted when an isoch transmit descriptor with the
+ * interrupt bit is finished being processed.
+ */
+static void
+hci1394_isr_isoch_it(hci1394_state_t *soft_state)
+{
+ uint32_t i;
+ uint32_t mask = 0x00000001;
+ uint32_t ev;
+ int num_it_contexts;
+ hci1394_iso_ctxt_t *ctxtp;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_isoch_it_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ num_it_contexts = hci1394_isoch_xmit_count_get(soft_state->isoch);
+
+ /*
+ * Main isochTx int is not clearable. it is automatically
+ * cleared by the hw when the it_intr_event is cleared.
+ */
+
+ /* loop until no more IT events */
+ while ((ev = hci1394_ohci_it_intr_asserted(soft_state->ohci)) != 0) {
+
+ /* clear the events we just learned about */
+ hci1394_ohci_it_intr_clear(soft_state->ohci, ev);
+
+ /* for each interrupting IR context, process the interrupt */
+ for (i = 0; i < num_it_contexts; i++) {
+ /*
+ * if the intr bit is on for a context,
+ * call xmit/recv common processing code
+ */
+ if (ev & mask) {
+ ctxtp = hci1394_isoch_xmit_ctxt_get(
+ soft_state->isoch, i);
+ hci1394_ixl_interrupt(soft_state, ctxtp,
+ B_FALSE);
+ }
+ mask <<= 1;
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_isr_isoch_it_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_atreq_complete()
+ * Process all completed requests that we have sent out (i.e. HW gave us
+ * an ack).
+ */
+static void
+hci1394_isr_atreq_complete(hci1394_state_t *soft_state)
+{
+ boolean_t request_available;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_atreq_complete_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_ohci_intr_clear(soft_state->ohci, OHCI_INTR_REQ_TX_CMPLT);
+
+ /*
+ * Processes all ack'd AT requests. If the request is pended, it is
+ * considered complete relative the the atreq engine. AR response
+ * processing will make sure we track the response.
+ */
+ do {
+ /*
+ * Process a single request. Do not flush Q. That is only
+ * done during bus reset processing.
+ */
+ status = hci1394_async_atreq_process(soft_state->async, B_FALSE,
+ &request_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_atreq_complete_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (request_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_atreq_complete_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_arresp()
+ * Process all responses that have come in off the bus and send then up to
+ * the services layer. We send out a request on the bus (atreq) and some time
+ * later a response comes in. We send this response up to the services
+ * layer.
+ */
+static void
+hci1394_isr_arresp(hci1394_state_t *soft_state)
+{
+ boolean_t response_available;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_arresp_enter, HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_ohci_intr_clear(soft_state->ohci, OHCI_INTR_RSPKT);
+
+ /*
+ * Process all responses that have been received. If more responses
+ * come in we will stay in interrupt handler and re-run this routine.
+ * It is possible that we will call hci1394_async_arresp_process()
+ * even though there are no more AR responses to process. This would
+ * be because we have processed them earlier on. (i.e. we cleared
+ * interrupt, then got another response and processed it. The interrupt
+ * would still be pending.
+ */
+ do {
+ status = hci1394_async_arresp_process(soft_state->async,
+ &response_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_arresp_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (response_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_arresp_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_arreq()
+ * Process all requests that have come in off the bus and send then up to
+ * the services layer.
+ */
+static void
+hci1394_isr_arreq(hci1394_state_t *soft_state)
+{
+ boolean_t request_available;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_arreq_enter, HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_ohci_intr_clear(soft_state->ohci, OHCI_INTR_RQPKT);
+
+ /*
+ * Process all requests that have been received. It is possible that we
+ * will call hci1394_async_arreq_process() even though there are no
+ * more requests to process. This would be because we have processed
+ * them earlier on. (i.e. we cleared interrupt, got another request
+ * and processed it. The interrupt would still be pending.
+ */
+ do {
+ status = hci1394_async_arreq_process(soft_state->async,
+ &request_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_arreq_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (request_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_arreq_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_isr_atresp_complete()
+ * Process all completed responses that we have sent out (i.e. HW gave us
+ * an ack). We get in a request off the bus (arreq) and send it up to the
+ * services layer, they send down a response to that request some time
+ * later. This interrupt signifies that the HW is done with the response.
+ * (i.e. it sent it out or failed it)
+ */
+static void
+hci1394_isr_atresp_complete(hci1394_state_t *soft_state)
+{
+ boolean_t response_available;
+ int status;
+
+
+ ASSERT(soft_state != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_isr_atresp_complete_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_ohci_intr_clear(soft_state->ohci, OHCI_INTR_RESP_TX_CMPLT);
+
+ /*
+ * Processes all ack'd AT responses It is possible that we will call
+ * hci1394_async_atresp_process() even thought there are no more
+ * responses to process. This would be because we have processed
+ * them earlier on. (i.e. we cleared interrupt, then got another
+ * response and processed it. The interrupt would still be pending.
+ */
+ do {
+ /*
+ * Process a single response. Do not flush Q. That is only
+ * done during bus reset processing.
+ */
+ status = hci1394_async_atresp_process(soft_state->async,
+ B_FALSE, &response_available);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_isr_atresp_complete_pr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ }
+ } while (response_available == B_TRUE);
+
+ TNF_PROBE_0_DEBUG(hci1394_isr_atresp_complete_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c
new file mode 100644
index 0000000000..130fd439f8
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c
@@ -0,0 +1,2848 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ixl_comp.c
+ * Isochronous IXL Compiler.
+ * The compiler converts the general hardware independent IXL command
+ * blocks into OpenHCI DMA descriptors.
+ */
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/ixl1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+/* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
+#define HCI1394_IXL_PAGESIZE 8000
+
+/* invalid opcode */
+#define IXL1394_OP_INVALID (0 | IXL1394_OPTY_OTHER)
+
+/*
+ * maximum number of interrupts permitted for a single context in which
+ * the context does not advance to the next DMA descriptor. Interrupts are
+ * triggered by 1) hardware completing a DMA descriptor block which has the
+ * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
+ * interrupt. Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
+ * returned.
+ */
+int hci1394_ixl_max_noadv_intrs = 8;
+
+
+static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
+ hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
+ ixl1394_command_t *ixlp);
+static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
+ ixl1394_command_t *ixlp);
+static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
+static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
+ caddr_t *dma_descpp, uint32_t *dma_desc_bound);
+static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
+static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
+static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
+ uint32_t bufp, uint16_t size);
+static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
+ uint32_t count);
+static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
+static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
+static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
+ uint32_t dmacnt);
+static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
+ uint32_t size, uint32_t *dma_bound);
+static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
+
+
+/*
+ * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
+ * Receive Only: Transmit Only:
+ * IXL1394_OP_RECV_PKT_ST IXL1394_OP_SEND_PKT_WHDR_ST
+ * IXL1394_OP_RECV_PKT IXL1394_OP_SEND_PKT_ST
+ * IXL1394_OP_RECV_BUF IXL1394_OP_SEND_PKT
+ * IXL1394_OP_SET_SYNCWAIT IXL1394_OP_SEND_BUF
+ * IXL1394_OP_SEND_HDR_ONLY
+ * Receive or Transmit: IXL1394_OP_SEND_NO_PKT
+ * IXL1394_OP_CALLBACK IXL1394_OP_SET_TAGSYNC
+ * IXL1394_OP_LABEL IXL1394_OP_SET_SKIPMODE
+ * IXL1394_OP_JUMP IXL1394_OP_STORE_TIMESTAMP
+ */
+
+/*
+ * hci1394_compile_ixl()
+ * Top level ixl compiler entry point. Scans ixl and builds openHCI 1.0
+ * descriptor blocks in dma memory.
+ */
+int
+hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
+ ixl1394_command_t *ixlp, int *resultp)
+{
+ hci1394_comp_ixl_vars_t wv; /* working variables used throughout */
+
+ ASSERT(soft_statep != NULL);
+ ASSERT(ctxtp != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* Initialize compiler working variables */
+ hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
+
+ /*
+ * First pass:
+ * Parse ixl commands, building desc blocks, until end of IXL
+ * linked list.
+ */
+ hci1394_parse_ixl(&wv, ixlp);
+
+ /*
+ * Second pass:
+ * Resolve all generated descriptor block jump and skip addresses.
+ * Set interrupt enable in descriptor blocks which have callback
+ * operations in their execution scope. (Previously store_timesamp
+ * operations were counted also.) Set interrupt enable in descriptor
+ * blocks which were introduced by an ixl label command.
+ */
+ if (wv.dma_bld_error == 0) {
+ hci1394_finalize_all_xfer_desc(&wv);
+ }
+
+ /* Endup: finalize and cleanup ixl compile, return result */
+ hci1394_compile_ixl_endup(&wv);
+
+ *resultp = wv.dma_bld_error;
+ if (*resultp != 0) {
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ } else {
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+ }
+}
+
+/*
+ * hci1394_compile_ixl_init()
+ * Initialize the isoch context structure associated with the IXL
+ * program, and initialize the temporary working variables structure.
+ */
+static void
+hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
+ hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
+ ixl1394_command_t *ixlp)
+{
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* initialize common recv/xmit compile values */
+ wvp->soft_statep = soft_statep;
+ wvp->ctxtp = ctxtp;
+
+ /* init/clear ctxtp values */
+ ctxtp->dma_mem_execp = NULL;
+ ctxtp->dma_firstp = NULL;
+ ctxtp->dma_last_time = 0;
+ ctxtp->xcs_firstp = NULL;
+ ctxtp->ixl_exec_depth = 0;
+ ctxtp->ixl_execp = NULL;
+ ctxtp->ixl_firstp = ixlp;
+ ctxtp->default_skipxferp = NULL;
+
+ /*
+ * the context's max_noadv_intrs is set here instead of in isoch init
+ * because the default is patchable and would only be picked up this way
+ */
+ ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
+
+ /* init working variables */
+ wvp->xcs_firstp = NULL;
+ wvp->xcs_currentp = NULL;
+
+ wvp->dma_firstp = NULL;
+ wvp->dma_currentp = NULL;
+ wvp->dma_bld_error = 0;
+
+ wvp->ixl_io_mode = ctxtp->ctxt_flags;
+ wvp->ixl_cur_cmdp = NULL;
+ wvp->ixl_cur_xfer_stp = NULL;
+ wvp->ixl_cur_labelp = NULL;
+
+ wvp->ixl_xfer_st_cnt = 0; /* count of xfer start commands found */
+ wvp->xfer_state = XFER_NONE; /* none, pkt, buf, skip, hdronly */
+ wvp->xfer_hci_flush = 0; /* updateable - xfer, jump, set */
+ wvp->xfer_pktlen = 0;
+ wvp->xfer_bufcnt = 0;
+ wvp->descriptors = 0;
+
+ /* START RECV ONLY SECTION */
+ wvp->ixl_setsyncwait_cnt = 0;
+
+ /* START XMIT ONLY SECTION */
+ wvp->ixl_settagsync_cmdp = NULL;
+ wvp->ixl_setskipmode_cmdp = NULL;
+ wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
+ wvp->default_skiplabelp = ctxtp->default_skiplabelp;
+ wvp->default_skipxferp = NULL;
+ wvp->skipmode = ctxtp->default_skipmode;
+ wvp->skiplabelp = NULL;
+ wvp->skipxferp = NULL;
+ wvp->default_tag = ctxtp->default_tag;
+ wvp->default_sync = ctxtp->default_sync;
+ wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
+ wvp->storevalue_data = 0;
+ wvp->xmit_pkthdr1 = 0;
+ wvp->xmit_pkthdr2 = 0;
+ /* END XMIT ONLY SECTION */
+
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_compile_ixl_endup()
+ * This routine is called just before the main hci1394_compile_ixl() exits.
+ * It checks for errors and performs the appropriate cleanup, or it rolls any
+ * relevant info from the working variables struct into the context structure
+ */
+static void
+hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
+{
+ ixl1394_command_t *ixl_exec_stp;
+ hci1394_idma_desc_mem_t *dma_nextp;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* error if no descriptor blocks found in ixl & created in dma memory */
+ if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
+ TNF_PROBE_1(hci1394_compile_ixl_endup_nodata_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_ENO_DATA_PKTS: prog has no data packets");
+
+ wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
+ }
+
+ /* if no errors yet, find the first IXL command that's a transfer cmd */
+ if (wvp->dma_bld_error == 0) {
+ err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
+ NULL, &ixl_exec_stp);
+
+ /* error if a label<->jump loop, or no xfer */
+ if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
+ TNF_PROBE_1(hci1394_compile_ixl_endup_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_ENO_DATA_PKTS: loop or no xfer detected");
+
+ wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
+ }
+ }
+
+ /* Sync all the DMA descriptor buffers */
+ dma_nextp = wvp->ctxtp->dma_firstp;
+ while (dma_nextp != NULL) {
+ err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
+ (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
+ DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_1(hci1394_compile_ixl_endup_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_INTERNAL_ERROR: dma_sync() failed");
+ break;
+ }
+
+ /* advance to next dma memory descriptor */
+ dma_nextp = dma_nextp->dma_nextp;
+ }
+
+ /*
+ * If error, cleanup and return. delete all allocated xfer_ctl structs
+ * and all dma descriptor page memory and its dma memory blocks too.
+ */
+ if (wvp->dma_bld_error != 0) {
+ wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
+ wvp->ctxtp->dma_firstp = wvp->dma_firstp;
+ hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
+
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /* can only get to here if the first ixl transfer command is found */
+
+ /* set required processing vars into ctxtp struct */
+ wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
+ wvp->ctxtp->dma_mem_execp = 0;
+
+ /*
+ * the transfer command's compiler private xfer_ctl structure has the
+ * appropriate bound address
+ */
+ wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
+ ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
+ wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
+ wvp->ctxtp->dma_firstp = wvp->dma_firstp;
+ wvp->ctxtp->dma_last_time = 0;
+ wvp->ctxtp->ixl_exec_depth = 0;
+ wvp->ctxtp->ixl_execp = NULL;
+
+ /* compile done */
+ TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_parse_ixl()
+ * Scan IXL program and build ohci DMA descriptor blocks in dma memory.
+ *
+ * Parse/process succeeding ixl commands until end of IXL linked list is
+ * reached. Evaluate ixl syntax and build (xmit or recv) descriptor
+ * blocks. To aid execution time evaluation of current location, enable
+ * status recording on each descriptor block built.
+ * On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
+ */
+static void
+hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
+{
+ ixl1394_command_t *ixlnextp = ixlp; /* addr of next ixl cmd */
+ ixl1394_command_t *ixlcurp = NULL; /* addr of current ixl cmd */
+ uint16_t ixlopcode = 0; /* opcode of currnt ixl cmd */
+
+ uint32_t pktsize;
+ uint32_t pktcnt;
+
+ TNF_PROBE_0_DEBUG(hci1394_parse_ixl_enter, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+
+ /* follow ixl links until reach end or find error */
+ while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
+
+ /* set this command as the current ixl command */
+ wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
+ ixlnextp = ixlcurp->next_ixlp;
+
+ ixlopcode = ixlcurp->ixl_opcode;
+
+ /* init compiler controlled values in current ixl command */
+ ixlcurp->compiler_privatep = NULL;
+ ixlcurp->compiler_resv = 0;
+
+ /* error if xmit/recv mode not appropriate for current cmd */
+ if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
+ ((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
+ (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
+ ((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
+
+ /* check if command op failed because it was invalid */
+ if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
+ TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_BAD_IXL_OPCODE",
+ tnf_opaque, ixl_commandp, ixlcurp,
+ tnf_opaque, ixl_opcode, ixlopcode);
+
+ wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
+ } else {
+ TNF_PROBE_3(hci1394_parse_ixl_mode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
+ "invalid ixlop in mode", tnf_uint, io_mode,
+ wvp->ixl_io_mode, tnf_opaque, ixl_opcode,
+ ixlopcode);
+
+ wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
+ }
+ continue;
+ }
+
+ /*
+ * if ends xfer flag set, finalize current xfer descriptor
+ * block build
+ */
+ if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
+ /* finalize any descriptor block build in progress */
+ hci1394_finalize_cur_xfer_desc(wvp);
+
+ if (wvp->dma_bld_error != 0) {
+ continue;
+ }
+ }
+
+ /*
+ * now process based on specific opcode value
+ */
+ switch (ixlopcode) {
+
+ case IXL1394_OP_RECV_BUF:
+ case IXL1394_OP_RECV_BUF_U: {
+ ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
+
+ cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
+
+ /*
+ * In packet-per-buffer mode:
+ * This ixl command builds a collection of xfer
+ * descriptor blocks (size/pkt_size of them) each to
+ * recv a packet whose buffer size is pkt_size and
+ * whose buffer ptr is (pktcur*pkt_size + bufp)
+ *
+ * In buffer fill mode:
+ * This ixl command builds a single xfer descriptor
+ * block to recv as many packets or parts of packets
+ * as can fit into the buffer size specified
+ * (pkt_size is not used).
+ */
+
+ /* set xfer_state for new descriptor block build */
+ wvp->xfer_state = XFER_BUF;
+
+ /* set this ixl command as current xferstart command */
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+
+ /*
+ * perform packet-per-buffer checks
+ * (no checks needed when in buffer fill mode)
+ */
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
+
+ /* the packets must use the buffer exactly */
+ pktsize = cur_xfer_buf_ixlp->pkt_size;
+ pktcnt = 0;
+ if (pktsize != 0) {
+ pktcnt = cur_xfer_buf_ixlp->size /
+ pktsize;
+ }
+ if ((pktcnt == 0) || ((pktsize * pktcnt) !=
+ cur_xfer_buf_ixlp->size)) {
+
+ TNF_PROBE_3(hci1394_parse_ixl_rat_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "",
+ tnf_string, errmsg,
+ "IXL1394_EPKTSIZE_RATIO", tnf_int,
+ buf_size, cur_xfer_buf_ixlp->size,
+ tnf_int, pkt_size, pktsize);
+
+ wvp->dma_bld_error =
+ IXL1394_EPKTSIZE_RATIO;
+ continue;
+ }
+ }
+
+ /*
+ * set buffer pointer & size into first xfer_bufp
+ * and xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_RECV_PKT_ST:
+ case IXL1394_OP_RECV_PKT_ST_U: {
+ ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
+
+ cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
+
+ /* error if in buffer fill mode */
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
+ TNF_PROBE_1(hci1394_parse_ixl_mode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
+ "RECV_PKT_ST used in BFFILL mode");
+
+ wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
+ continue;
+ }
+
+ /* set xfer_state for new descriptor block build */
+ /* set this ixl command as current xferstart command */
+ wvp->xfer_state = XFER_PKT;
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+
+ /*
+ * set buffer pointer & size into first xfer_bufp
+ * and xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_RECV_PKT:
+ case IXL1394_OP_RECV_PKT_U: {
+ ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
+
+ cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
+
+ /* error if in buffer fill mode */
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
+ TNF_PROBE_1(hci1394_parse_ixl_mode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
+ "RECV_PKT_ST used in BFFILL mode");
+
+ wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
+ continue;
+ }
+
+ /* error if xfer_state not xfer pkt */
+ if (wvp->xfer_state != XFER_PKT) {
+ TNF_PROBE_1(hci1394_parse_ixl_misplacercv_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EMISPLACED_RECV: "
+ "RECV_PKT without RECV_PKT_ST");
+
+ wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
+ continue;
+ }
+
+ /*
+ * save xfer start cmd ixl ptr in compiler_privatep
+ * field of this cmd
+ */
+ ixlcurp->compiler_privatep = (void *)
+ wvp->ixl_cur_xfer_stp;
+
+ /*
+ * save pkt index [1-n] in compiler_resv field of
+ * this cmd
+ */
+ ixlcurp->compiler_resv = wvp->xfer_bufcnt;
+
+ /*
+ * set buffer pointer & size into next xfer_bufp
+ * and xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+
+ /*
+ * set updateable xfer cache flush eval flag if
+ * updateable opcode
+ */
+ if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_XFER;
+ }
+ break;
+ }
+
+ case IXL1394_OP_SEND_BUF:
+ case IXL1394_OP_SEND_BUF_U: {
+ ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
+
+ cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
+
+ /*
+ * These send_buf commands build a collection of xmit
+ * descriptor blocks (size/pkt_size of them) each to
+ * xfer a packet whose buffer size is pkt_size and whose
+ * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
+ * are adjusted if they have header form of ixl cmd)
+ */
+
+ /* set xfer_state for new descriptor block build */
+ wvp->xfer_state = XFER_BUF;
+
+ /* set this ixl command as current xferstart command */
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+
+ /* the packets must use the buffer exactly,else error */
+ pktsize = cur_xfer_buf_ixlp->pkt_size;
+ pktcnt = 0;
+ if (pktsize != 0) {
+ pktcnt = cur_xfer_buf_ixlp->size / pktsize;
+ }
+ if ((pktcnt == 0) || ((pktsize * pktcnt) !=
+ cur_xfer_buf_ixlp->size)) {
+
+ TNF_PROBE_3(hci1394_parse_ixl_rat_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EPKTSIZE_RATIO", tnf_int,
+ buf_size, cur_xfer_buf_ixlp->size, tnf_int,
+ pkt_size, pktsize);
+
+ wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
+ continue;
+ }
+
+ /* set buf ptr & size into 1st xfer_bufp & xfer_size */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_SEND_PKT_ST:
+ case IXL1394_OP_SEND_PKT_ST_U: {
+ ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
+
+ cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
+
+ /* set xfer_state for new descriptor block build */
+ /* set this ixl command as current xferstart command */
+ wvp->xfer_state = XFER_PKT;
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+
+ /*
+ * set buffer pointer & size into first xfer_bufp and
+ * xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_SEND_PKT_WHDR_ST:
+ case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
+ ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
+
+ cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
+
+ /* set xfer_state for new descriptor block build */
+ /* set this ixl command as current xferstart command */
+ wvp->xfer_state = XFER_PKT;
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+
+ /*
+ * buffer size must be at least 4 (must include header),
+ * else error
+ */
+ if (cur_xfer_pkt_ixlp->size < 4) {
+ TNF_PROBE_2(hci1394_parse_ixl_hdr_missing_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EPKT_HDR_MISSING", tnf_int,
+ pkt_size, cur_xfer_pkt_ixlp->size);
+
+ wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
+ continue;
+ }
+
+ /*
+ * set buffer and size(excluding header) into first
+ * xfer_bufp and xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
+ cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_SEND_PKT:
+ case IXL1394_OP_SEND_PKT_U: {
+ ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
+
+ cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
+
+ /* error if xfer_state not xfer pkt */
+ if (wvp->xfer_state != XFER_PKT) {
+ TNF_PROBE_1(hci1394_parse_ixl_misplacesnd_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EMISPLACED_SEND: SEND_PKT "
+ "without SEND_PKT_ST");
+
+ wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
+ continue;
+ }
+
+ /*
+ * save xfer start cmd ixl ptr in compiler_privatep
+ * field of this cmd
+ */
+ ixlcurp->compiler_privatep = (void *)
+ wvp->ixl_cur_xfer_stp;
+
+ /*
+ * save pkt index [1-n] in compiler_resv field of this
+ * cmd
+ */
+ ixlcurp->compiler_resv = wvp->xfer_bufcnt;
+
+ /*
+ * set buffer pointer & size into next xfer_bufp
+ * and xfer_size
+ */
+ if (hci1394_set_next_xfer_buf(wvp,
+ cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
+ cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
+
+ /* wvp->dma_bld_error is set by above call */
+ continue;
+ }
+
+ /*
+ * set updateable xfer cache flush eval flag if
+ * updateable opcode
+ */
+ if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_XFER;
+ }
+ break;
+ }
+
+ case IXL1394_OP_SEND_HDR_ONLY:
+ /* set xfer_state for new descriptor block build */
+ wvp->xfer_state = XMIT_HDRONLY;
+
+ /* set this ixl command as current xferstart command */
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+ break;
+
+ case IXL1394_OP_SEND_NO_PKT:
+ /* set xfer_state for new descriptor block build */
+ wvp->xfer_state = XMIT_NOPKT;
+
+ /* set this ixl command as current xferstart command */
+ wvp->ixl_cur_xfer_stp = ixlcurp;
+ break;
+
+ case IXL1394_OP_JUMP:
+ case IXL1394_OP_JUMP_U: {
+ ixl1394_jump_t *cur_jump_ixlp;
+
+ cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
+
+ /*
+ * verify label indicated by IXL1394_OP_JUMP is
+ * actually an IXL1394_OP_LABEL or NULL
+ */
+ if ((cur_jump_ixlp->label != NULL) &&
+ (cur_jump_ixlp->label->ixl_opcode !=
+ IXL1394_OP_LABEL)) {
+ TNF_PROBE_3(hci1394_parse_ixl_jumplabel_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
+ tnf_opaque, jumpixl_commandp, ixlcurp,
+ tnf_opaque, jumpto_ixl,
+ cur_jump_ixlp->label);
+
+ wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
+ continue;
+ }
+ break;
+ }
+
+ case IXL1394_OP_LABEL:
+ /*
+ * save current ixl label command for xfer cmd
+ * finalize processing
+ */
+ wvp->ixl_cur_labelp = ixlcurp;
+
+ /* set initiating label flag to cause cache flush */
+ wvp->xfer_hci_flush |= INITIATING_LBL;
+ break;
+
+ case IXL1394_OP_CALLBACK:
+ case IXL1394_OP_CALLBACK_U:
+ case IXL1394_OP_STORE_TIMESTAMP:
+ /*
+ * these commands are accepted during compile,
+ * processed during execution (interrupt handling)
+ * No further processing is needed here.
+ */
+ break;
+
+ case IXL1394_OP_SET_SKIPMODE:
+ case IXL1394_OP_SET_SKIPMODE_U:
+ /*
+ * Error if already have a set skipmode cmd for
+ * this xfer
+ */
+ if (wvp->ixl_setskipmode_cmdp != NULL) {
+ TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
+ " duplicate set skipmode", tnf_opaque,
+ ixl_commandp, ixlcurp);
+
+ wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
+ continue;
+ }
+
+ /* save skip mode ixl command and verify skipmode */
+ wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
+ ixlcurp;
+
+ if ((wvp->ixl_setskipmode_cmdp->skipmode !=
+ IXL1394_SKIP_TO_NEXT) &&
+ (wvp->ixl_setskipmode_cmdp->skipmode !=
+ IXL1394_SKIP_TO_SELF) &&
+ (wvp->ixl_setskipmode_cmdp->skipmode !=
+ IXL1394_SKIP_TO_STOP) &&
+ (wvp->ixl_setskipmode_cmdp->skipmode !=
+ IXL1394_SKIP_TO_LABEL)) {
+
+ TNF_PROBE_3(hci1394_parse_ixl_dup_set_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL EBAD_SKIPMODE", tnf_opaque,
+ ixl_commandp, ixlcurp, tnf_int, skip,
+ wvp->ixl_setskipmode_cmdp->skipmode);
+
+ wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
+ continue;
+ }
+
+ /*
+ * if mode is IXL1394_SKIP_TO_LABEL, verify label
+ * references an IXL1394_OP_LABEL
+ */
+ if ((wvp->ixl_setskipmode_cmdp->skipmode ==
+ IXL1394_SKIP_TO_LABEL) &&
+ ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
+ (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
+ IXL1394_OP_LABEL))) {
+
+ TNF_PROBE_3(hci1394_parse_ixl_jump_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
+ tnf_opaque, jumpixl_commandp, ixlcurp,
+ tnf_opaque, jumpto_ixl,
+ wvp->ixl_setskipmode_cmdp->label);
+
+ wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
+ continue;
+ }
+ /*
+ * set updateable set cmd cache flush eval flag if
+ * updateable opcode
+ */
+ if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_SET;
+ }
+ break;
+
+ case IXL1394_OP_SET_TAGSYNC:
+ case IXL1394_OP_SET_TAGSYNC_U:
+ /*
+ * is an error if already have a set tag and sync cmd
+ * for this xfer
+ */
+ if (wvp->ixl_settagsync_cmdp != NULL) {
+ TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
+ " duplicate set tagsync", tnf_opaque,
+ ixl_commandp, ixlcurp);
+
+ wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
+ continue;
+ }
+
+ /* save ixl command containing tag and sync values */
+ wvp->ixl_settagsync_cmdp =
+ (ixl1394_set_tagsync_t *)ixlcurp;
+
+ /*
+ * set updateable set cmd cache flush eval flag if
+ * updateable opcode
+ */
+ if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_SET;
+ }
+ break;
+
+ case IXL1394_OP_SET_SYNCWAIT:
+ /*
+ * count ixl wait-for-sync commands since last
+ * finalize ignore multiple occurrences for same xfer
+ * command
+ */
+ wvp->ixl_setsyncwait_cnt++;
+ break;
+
+ default:
+ /* error - unknown/unimplemented ixl command */
+ TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_BAD_IXL_OPCODE", tnf_opaque, ixl_commandp,
+ ixlcurp, tnf_opaque, ixl_opcode, ixlopcode);
+
+ wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
+ continue;
+ }
+ } /* while */
+
+ /* finalize any last descriptor block build */
+ wvp->ixl_cur_cmdp = NULL;
+ if (wvp->dma_bld_error == 0) {
+ hci1394_finalize_cur_xfer_desc(wvp);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_parse_ixl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_finalize_all_xfer_desc()
+ * Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
+ *
+ * Set interrupt enable on first descriptor block associated with current
+ * xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
+ *
+ * Set interrupt enable on last descriptor block associated with current xfer
+ * IXL command if any callback ixl commands are found on the execution path
+ * between the current and the next xfer ixl command. (Previously, this
+ * applied to store timestamp ixl commands, as well.)
+ */
+static void
+hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ ixl1394_command_t *ixlcurp; /* current ixl command */
+ ixl1394_command_t *ixlnextp; /* next ixl command */
+ ixl1394_command_t *ixlexecnext;
+ hci1394_xfer_ctl_t *xferctl_curp;
+ hci1394_xfer_ctl_t *xferctl_nxtp;
+ hci1394_desc_t *hcidescp;
+ ddi_acc_handle_t acc_hdl;
+ uint32_t temp;
+ uint32_t dma_execnext_addr;
+ uint32_t dma_skiplabel_addr;
+ uint32_t dma_skip_addr;
+ uint32_t callback_cnt;
+ uint16_t repcnt;
+ uint16_t ixlopcode;
+ int ii;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * If xmit mode and if default skipmode is skip to label -
+ * follow exec path starting at default skipmode label until
+ * find the first ixl xfer command which is to be executed.
+ * Set its address into default_skipxferp.
+ */
+ if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
+ (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
+
+ err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
+ NULL, &wvp->default_skipxferp);
+ if (err == DDI_FAILURE) {
+ TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
+ "for skiplabel default w/no xfers", tnf_opaque,
+ skipixl_cmdp, wvp->default_skiplabelp);
+ TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
+ return;
+ }
+ }
+
+ /* set first ixl cmd */
+ ixlnextp = wvp->ctxtp->ixl_firstp;
+
+ /* follow ixl links until reach end or find error */
+ while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
+
+ /* set this command as the current ixl command */
+ ixlcurp = ixlnextp;
+ ixlnextp = ixlcurp->next_ixlp;
+
+ /* get command opcode removing unneeded update flag */
+ ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
+
+ /*
+ * Scan for next ixl xfer start command (including this one),
+ * along ixl link path. Once xfer command found, find next IXL
+ * xfer cmd along execution path and fill in branch address of
+ * current xfer command. If is composite ixl xfer command, first
+ * link forward branch dma addresses of each descriptor block in
+ * composite, until reach final one then set its branch address
+ * to next execution path xfer found. Next determine skip mode
+ * and fill in skip address(es) appropriately.
+ */
+ /* skip to next if not xfer start ixl command */
+ if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
+ ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
+ continue;
+ }
+
+ /*
+ * get xfer_ctl structure and composite repeat count for current
+ * IXL xfer cmd
+ */
+ xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
+ repcnt = xferctl_curp->cnt;
+
+ /*
+ * if initiated by an IXL label command, set interrupt enable
+ * flag into last component of first descriptor block of
+ * current IXL xfer cmd
+ */
+ if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
+ hcidescp = (hci1394_desc_t *)
+ xferctl_curp->dma[0].dma_descp;
+ acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
+ temp = ddi_get32(acc_hdl, &hcidescp->hdr);
+ temp |= DESC_INTR_ENBL;
+ ddi_put32(acc_hdl, &hcidescp->hdr, temp);
+ }
+
+ /* find next xfer IXL cmd by following execution path */
+ err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
+ &callback_cnt, &ixlexecnext);
+
+ /* if label<->jump loop detected, return error */
+ if (err == DDI_FAILURE) {
+ wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
+
+ TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
+ "w/no xfers", tnf_opaque, ixl_cmdp,
+ ixlcurp->next_ixlp);
+ continue;
+ }
+
+ /* link current IXL's xfer_ctl to next xfer IXL on exec path */
+ xferctl_curp->execp = ixlexecnext;
+
+ /*
+ * if callbacks have been seen during execution path scan,
+ * set interrupt enable flag into last descriptor of last
+ * descriptor block of current IXL xfer cmd
+ */
+ if (callback_cnt != 0) {
+ hcidescp = (hci1394_desc_t *)
+ xferctl_curp->dma[repcnt - 1].dma_descp;
+ acc_hdl =
+ xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
+ temp = ddi_get32(acc_hdl, &hcidescp->hdr);
+ temp |= DESC_INTR_ENBL;
+ ddi_put32(acc_hdl, &hcidescp->hdr, temp);
+ }
+
+ /*
+ * obtain dma bound addr of next exec path IXL xfer command,
+ * if any
+ */
+ dma_execnext_addr = 0;
+
+ if (ixlexecnext != NULL) {
+ xferctl_nxtp = (hci1394_xfer_ctl_t *)
+ ixlexecnext->compiler_privatep;
+ dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
+ } else {
+ /*
+ * If this is last descriptor (next == NULL), then
+ * make sure the interrupt bit is enabled. This
+ * way we can ensure that we are notified when the
+ * descriptor chain processing has come to an end.
+ */
+ hcidescp = (hci1394_desc_t *)
+ xferctl_curp->dma[repcnt - 1].dma_descp;
+ acc_hdl =
+ xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
+ temp = ddi_get32(acc_hdl, &hcidescp->hdr);
+ temp |= DESC_INTR_ENBL;
+ ddi_put32(acc_hdl, &hcidescp->hdr, temp);
+ }
+
+ /*
+ * set jump address of final cur IXL xfer cmd to addr next
+ * IXL xfer cmd
+ */
+ hcidescp = (hci1394_desc_t *)
+ xferctl_curp->dma[repcnt - 1].dma_descp;
+ acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
+ ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
+
+ /*
+ * if a composite object, forward link initial jump
+ * dma addresses
+ */
+ for (ii = 0; ii < repcnt - 1; ii++) {
+ hcidescp = (hci1394_desc_t *)
+ xferctl_curp->dma[ii].dma_descp;
+ acc_hdl = xferctl_curp->dma[ii].dma_buf->bi_handle;
+ ddi_put32(acc_hdl, &hcidescp->branch,
+ xferctl_curp->dma[ii + 1].dma_bound);
+ }
+
+ /*
+ * fill in skip address(es) for all descriptor blocks belonging
+ * to current IXL xfer command; note:skip addresses apply only
+ * to xmit mode commands
+ */
+ if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
+
+ /* first obtain and set skip mode information */
+ wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
+ hci1394_set_xmit_skip_mode(wvp);
+
+ /*
+ * if skip to label,init dma bound addr to be
+ * 1st xfer cmd after label
+ */
+ dma_skiplabel_addr = 0;
+ if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
+ (wvp->skipxferp != NULL)) {
+ xferctl_nxtp = (hci1394_xfer_ctl_t *)
+ wvp->skipxferp->compiler_privatep;
+ dma_skiplabel_addr =
+ xferctl_nxtp->dma[0].dma_bound;
+ }
+
+ /*
+ * set skip addrs for each descriptor blk at this
+ * xfer start IXL cmd
+ */
+ for (ii = 0; ii < repcnt; ii++) {
+ switch (wvp->skipmode) {
+
+ case IXL1394_SKIP_TO_LABEL:
+ /* set dma bound address - label */
+ dma_skip_addr = dma_skiplabel_addr;
+ break;
+
+ case IXL1394_SKIP_TO_NEXT:
+ /* set dma bound address - next */
+ if (ii < repcnt - 1) {
+ dma_skip_addr = xferctl_curp->
+ dma[ii + 1].dma_bound;
+ } else {
+ dma_skip_addr =
+ dma_execnext_addr;
+ }
+ break;
+
+ case IXL1394_SKIP_TO_SELF:
+ /* set dma bound address - self */
+ dma_skip_addr =
+ xferctl_curp->dma[ii].dma_bound;
+ break;
+
+ case IXL1394_SKIP_TO_STOP:
+ default:
+ /* set dma bound address - stop */
+ dma_skip_addr = 0;
+ break;
+ }
+
+ /*
+ * determine address of first descriptor of
+ * current descriptor block by adjusting addr of
+ * last descriptor of current descriptor block
+ */
+ hcidescp = ((hci1394_desc_t *)
+ xferctl_curp->dma[ii].dma_descp);
+ acc_hdl =
+ xferctl_curp->dma[ii].dma_buf->bi_handle;
+
+ /*
+ * adjust by count of descriptors in this desc
+ * block not including the last one (size of
+ * descriptor)
+ */
+ hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
+ DESC_Z_MASK) - 1);
+
+ /*
+ * adjust further if the last descriptor is
+ * double sized
+ */
+ if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
+ hcidescp++;
+ }
+ /*
+ * now set skip address into first descriptor
+ * of descriptor block
+ */
+ ddi_put32(acc_hdl, &hcidescp->branch,
+ dma_skip_addr);
+ } /* for */
+ } /* if */
+ } /* while */
+
+ TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_finalize_cur_xfer_desc()
+ * Build the openHCI descriptor for a packet or buffer based on info
+ * currently collected into the working vars struct (wvp). After some
+ * checks, this routine dispatches to the appropriate descriptor block
+ * build (bld) routine for the packet or buf type.
+ */
+static void
+hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ uint16_t ixlopcode;
+ uint16_t ixlopraw;
+
+ TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* extract opcode from current IXL cmd (if any) */
+ if (wvp->ixl_cur_cmdp != NULL) {
+ ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
+ ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
+ } else {
+ ixlopcode = ixlopraw = IXL1394_OP_INVALID;
+ }
+
+ /*
+ * if no xfer descriptor block being built, perform validity checks
+ */
+ if (wvp->xfer_state == XFER_NONE) {
+ /*
+ * error if being finalized by IXL1394_OP_LABEL or
+ * IXL1394_OP_JUMP or if at end, and have an unapplied
+ * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
+ * IXL1394_OP_SET_SYNCWAIT
+ */
+ if ((ixlopraw == IXL1394_OP_JUMP) ||
+ (ixlopraw == IXL1394_OP_LABEL) ||
+ (wvp->ixl_cur_cmdp == NULL) ||
+ (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
+ if ((wvp->ixl_settagsync_cmdp != NULL) ||
+ (wvp->ixl_setskipmode_cmdp != NULL) ||
+ (wvp->ixl_setsyncwait_cnt != 0)) {
+
+ wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
+
+ TNF_PROBE_2(
+ hci1394_finalize_cur_xfer_desc_set_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_UNAPPLIED_SET_CMD: "
+ "orphaned set (no associated packet)",
+ tnf_opaque, ixl_commandp,
+ wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(
+ hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+ }
+
+ /* error if finalize is due to updateable jump cmd */
+ if (ixlopcode == IXL1394_OP_JUMP_U) {
+
+ wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
+
+ TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_upd_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EUPDATE_DISALLOWED: jumpU w/out pkt",
+ tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no error, no xfer */
+ return;
+ }
+
+ /*
+ * finalize current xfer descriptor block being built
+ */
+
+ /* count IXL xfer start command for descriptor block being built */
+ wvp->ixl_xfer_st_cnt++;
+
+ /*
+ * complete setting of cache flush evaluation flags; flags will already
+ * have been set by updateable set cmds and non-start xfer pkt cmds
+ */
+ /* now set cache flush flag if current xfer start cmnd is updateable */
+ if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_XFER;
+ }
+ /*
+ * also set cache flush flag if xfer being finalized by
+ * updateable jump cmd
+ */
+ if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
+ wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
+ }
+
+ /*
+ * Determine if cache flush required before building next descriptor
+ * block. If xfer pkt command and any cache flush flags are set,
+ * hci flush needed.
+ * If buffer or special xfer command and xfer command is updateable or
+ * an associated set command is updateable, hci flush is required now.
+ * If a single-xfer buffer or special xfer command is finalized by
+ * updateable jump command, hci flush is required now.
+ * Note: a cache flush will be required later, before the last
+ * descriptor block of a multi-xfer set of descriptor blocks is built,
+ * if this (non-pkt) xfer is finalized by an updateable jump command.
+ */
+ if (wvp->xfer_hci_flush != 0) {
+ if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
+ IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
+ (UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
+ 0)) {
+
+ if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(
+ hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+ }
+ }
+
+ /*
+ * determine which kind of descriptor block to build based on
+ * xfer state - hdr only, skip cycle, pkt or buf.
+ */
+ switch (wvp->xfer_state) {
+
+ case XFER_PKT:
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
+ hci1394_bld_recv_pkt_desc(wvp);
+ } else {
+ hci1394_bld_xmit_pkt_desc(wvp);
+ }
+ break;
+
+ case XFER_BUF:
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
+ if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
+ hci1394_bld_recv_buf_fill_desc(wvp);
+ } else {
+ hci1394_bld_recv_buf_ppb_desc(wvp);
+ }
+ } else {
+ hci1394_bld_xmit_buf_desc(wvp);
+ }
+ break;
+
+ case XMIT_HDRONLY:
+ case XMIT_NOPKT:
+ hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
+ break;
+
+ default:
+ /* internal compiler error */
+ TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_internal_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_INTERNAL_ERROR: invalid state", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_cmdp);
+ wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
+ }
+
+ /* return if error */
+ if (wvp->dma_bld_error != 0) {
+ TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * if was finalizing IXL jump cmd, set compiler_privatep to
+ * cur xfer IXL cmd
+ */
+ if (ixlopraw == IXL1394_OP_JUMP) {
+ wvp->ixl_cur_cmdp->compiler_privatep =
+ (void *)wvp->ixl_cur_xfer_stp;
+ }
+
+ /* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
+ if (wvp->ixl_cur_labelp != NULL) {
+ ((hci1394_xfer_ctl_t *)
+ (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
+ XCTL_LABELLED;
+ wvp->ixl_cur_labelp = NULL;
+ }
+
+ /*
+ * set any associated IXL set skipmode cmd into xfer_ctl of
+ * cur xfer IXL cmd
+ */
+ if (wvp->ixl_setskipmode_cmdp != NULL) {
+ ((hci1394_xfer_ctl_t *)
+ (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
+ wvp->ixl_setskipmode_cmdp;
+ }
+
+ /* set no current xfer start cmd */
+ wvp->ixl_cur_xfer_stp = NULL;
+
+ /* set no current set tag&sync, set skipmode or set syncwait commands */
+ wvp->ixl_settagsync_cmdp = NULL;
+ wvp->ixl_setskipmode_cmdp = NULL;
+ wvp->ixl_setsyncwait_cnt = 0;
+
+ /* set no currently active descriptor blocks */
+ wvp->descriptors = 0;
+
+ /* reset total packet length and buffers count */
+ wvp->xfer_pktlen = 0;
+ wvp->xfer_bufcnt = 0;
+
+ /* reset flush cache evaluation flags */
+ wvp->xfer_hci_flush = 0;
+
+ /* set no xmit descriptor block being built */
+ wvp->xfer_state = XFER_NONE;
+
+ TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_recv_pkt_desc()
+ * Used to create the openHCI dma descriptor block(s) for a receive packet.
+ */
+static void
+hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ caddr_t dma_descp;
+ uint32_t dma_desc_bound;
+ uint32_t wait_for_sync;
+ uint32_t ii;
+ hci1394_desc_t *wv_descp; /* shorthand to local descrpt */
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * is error if number of descriptors to be built exceeds maximum
+ * descriptors allowed in a descriptor block.
+ */
+ if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
+
+ wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
+
+ TNF_PROBE_3(hci1394_bld_recv_pkt_desc_fragment_oflo_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
+ wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
+ wvp->descriptors + wvp->xfer_bufcnt);
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_recv_pkt_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_xfer_stp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer cmd
+ */
+ wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /*
+ * if enabled, set wait for sync flag in first descriptor of
+ * descriptor block
+ */
+ if (wvp->ixl_setsyncwait_cnt > 0) {
+ wvp->ixl_setsyncwait_cnt = 1;
+ wait_for_sync = DESC_W_ENBL;
+ } else {
+ wait_for_sync = DESC_W_DSABL;
+ }
+
+ /* create descriptor block for this recv packet (xfer status enabled) */
+ for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
+ wv_descp = &wvp->descriptor_block[wvp->descriptors];
+
+ if (ii == (wvp->xfer_bufcnt - 1)) {
+ HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
+ DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
+ } else {
+ HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
+ wvp->xfer_size[ii]);
+ }
+ wv_descp->data_addr = wvp->xfer_bufp[ii];
+ wv_descp->branch = 0;
+ wv_descp->status = (wvp->xfer_size[ii] <<
+ DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
+ wvp->descriptors++;
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above function call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
+ * is last component)
+ */
+ xctlp->dma[0].dma_bound = dma_desc_bound;
+ xctlp->dma[0].dma_descp =
+ dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
+ xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_recv_buf_ppb_desc()
+ * Used to create the openHCI dma descriptor block(s) for a receive buf
+ * in packet per buffer mode.
+ */
+static void
+hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
+ caddr_t dma_descp;
+ uint32_t dma_desc_bound;
+ uint32_t pktsize;
+ uint32_t pktcnt;
+ uint32_t wait_for_sync;
+ uint32_t ii;
+ hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
+
+ /* determine number and size of pkt desc blocks to create */
+ pktsize = local_ixl_cur_xfer_stp->pkt_size;
+ pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
+
+ /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_recv_buf_ppb_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_xfer_stp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer cmd
+ */
+ local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /*
+ * if enabled, set wait for sync flag in first descriptor in
+ * descriptor block
+ */
+ if (wvp->ixl_setsyncwait_cnt > 0) {
+ wvp->ixl_setsyncwait_cnt = 1;
+ wait_for_sync = DESC_W_ENBL;
+ } else {
+ wait_for_sync = DESC_W_DSABL;
+ }
+
+ /* create first descriptor block for this recv packet */
+ /* consists of one descriptor and xfer status is enabled */
+ wv_descp = &wvp->descriptor_block[wvp->descriptors];
+ HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
+ wait_for_sync, pktsize);
+ wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
+ wv_descp->branch = 0;
+ wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
+ DESC_ST_RESCOUNT_MASK;
+ wvp->descriptors++;
+
+ /* useful debug trace info - IXL command, and packet count and size */
+ TNF_PROBE_3_DEBUG(hci1394_bld_recv_buf_ppb_desc_recv_buf_info,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ixl_commandp,
+ wvp->ixl_cur_xfer_stp, tnf_int, pkt_count, pktcnt, tnf_int,
+ pkt_size, pktsize);
+
+ /*
+ * generate as many contiguous descriptor blocks as there are
+ * recv pkts
+ */
+ for (ii = 0; ii < pktcnt; ii++) {
+
+ /* if about to create last descriptor block */
+ if (ii == (pktcnt - 1)) {
+ /* check and perform any required hci cache flush */
+ if (hci1394_flush_end_desc_check(wvp, ii) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_bld_recv_buf_ppb_desc_fl_error,
+ HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int,
+ for_ii, ii);
+ TNF_PROBE_0_DEBUG(
+ hci1394_bld_recv_buf_ppb_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
+ &dma_desc_bound) != DDI_SUCCESS) {
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl struct (unbound addr (kernel
+ * virtual) is last component (descriptor))
+ */
+ xctlp->dma[ii].dma_bound = dma_desc_bound;
+ xctlp->dma[ii].dma_descp = dma_descp;
+ xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
+
+ /* advance buffer ptr by pktsize in descriptor block */
+ wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
+ pktsize;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_recv_buf_fill_desc()
+ * Used to create the openHCI dma descriptor block(s) for a receive buf
+ * in buffer fill mode.
+ */
+static void
+hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ caddr_t dma_descp;
+ uint32_t dma_desc_bound;
+ uint32_t wait_for_sync;
+ ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
+
+
+ /* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_recv_buf_fill_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_xfer_stp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer cmd
+ */
+ local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /*
+ * if enabled, set wait for sync flag in first descriptor of
+ * descriptor block
+ */
+ if (wvp->ixl_setsyncwait_cnt > 0) {
+ wvp->ixl_setsyncwait_cnt = 1;
+ wait_for_sync = DESC_W_ENBL;
+ } else {
+ wait_for_sync = DESC_W_DSABL;
+ }
+
+ /*
+ * create descriptor block for this buffer fill mode recv command which
+ * consists of one descriptor with xfer status enabled
+ */
+ HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
+ DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
+
+ wvp->descriptor_block[wvp->descriptors].data_addr =
+ local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
+ wvp->descriptor_block[wvp->descriptors].branch = 0;
+ wvp->descriptor_block[wvp->descriptors].status =
+ (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
+ DESC_ST_RESCOUNT_MASK;
+ wvp->descriptors++;
+
+ /* check and perform any required hci cache flush */
+ if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
+ != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
+ * is last component.
+ */
+ xctlp->dma[0].dma_bound = dma_desc_bound;
+ xctlp->dma[0].dma_descp = dma_descp;
+ xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_xmit_pkt_desc()
+ * Used to create the openHCI dma descriptor block(s) for a transmit packet.
+ */
+static void
+hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
+ hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
+ caddr_t dma_descp; /* dma bound memory for descriptor */
+ uint32_t dma_desc_bound;
+ uint32_t ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * is error if number of descriptors to be built exceeds maximum
+ * descriptors allowed in a descriptor block. Add 2 for the overhead
+ * of the OMORE-Immediate.
+ */
+ if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
+
+ wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
+
+ TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_fragment_oflo_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
+ wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
+ wvp->descriptors + 2 + wvp->xfer_bufcnt);
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /* is error if total packet length exceeds 0xFFFF */
+ if (wvp->xfer_pktlen > 0xFFFF) {
+
+ wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
+
+ TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_packet_oflo_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EPKTSIZE_MAX_OFLO", tnf_opaque, ixl_commandp,
+ wvp->ixl_cur_xfer_stp, tnf_int, total_pktlen,
+ wvp->xfer_pktlen);
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_xmit_pkt_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer cmd
+ */
+ wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /* generate values for the xmit pkt hdrs */
+ hci1394_set_xmit_pkt_hdr(wvp);
+
+ /*
+ * xmit pkt starts with an output more immediate,
+ * a double sized hci1394_desc
+ */
+ wv_omi_descp = (hci1394_output_more_imm_t *)
+ (&wvp->descriptor_block[wvp->descriptors]);
+ HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
+
+ wv_omi_descp->data_addr = 0;
+ wv_omi_descp->branch = 0;
+ wv_omi_descp->status = 0;
+ wv_omi_descp->q1 = wvp->xmit_pkthdr1;
+ wv_omi_descp->q2 = wvp->xmit_pkthdr2;
+ wv_omi_descp->q3 = 0;
+ wv_omi_descp->q4 = 0;
+
+ wvp->descriptors += 2;
+
+ /*
+ * create the required output more hci1394_desc descriptor, then create
+ * an output last hci1394_desc descriptor with xfer status enabled
+ */
+ for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
+ wv_descp = &wvp->descriptor_block[wvp->descriptors];
+
+ if (ii == (wvp->xfer_bufcnt - 1)) {
+ HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
+ DESC_INTR_DSABL, wvp->xfer_size[ii]);
+ } else {
+ HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
+ }
+ wv_descp->data_addr = wvp->xfer_bufp[ii];
+ wv_descp->branch = 0;
+ wv_descp->status = 0;
+ wvp->descriptors++;
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
+ * is last component (descriptor))
+ */
+ xctlp->dma[0].dma_bound = dma_desc_bound;
+ xctlp->dma[0].dma_descp =
+ dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
+ xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_xmit_buf_desc()
+ * Used to create the openHCI dma descriptor blocks for a transmit buffer.
+ */
+static void
+hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
+ hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
+ hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
+ caddr_t dma_descp;
+ uint32_t dma_desc_bound;
+ uint32_t pktsize;
+ uint32_t pktcnt;
+ uint32_t ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
+
+ /* determine number and size of pkt desc blocks to create */
+ pktsize = local_ixl_cur_xfer_stp->pkt_size;
+ pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
+
+ /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_xmit_buf_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer cmd
+ */
+ local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /* generate values for the xmit pkt hdrs */
+ wvp->xfer_pktlen = pktsize;
+ hci1394_set_xmit_pkt_hdr(wvp);
+
+ /*
+ * xmit pkt starts with an output more immediate,
+ * a double sized hci1394_desc
+ */
+ wv_omi_descp = (hci1394_output_more_imm_t *)
+ &wvp->descriptor_block[wvp->descriptors];
+
+ HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
+
+ wv_omi_descp->data_addr = 0;
+ wv_omi_descp->branch = 0;
+ wv_omi_descp->status = 0;
+ wv_omi_descp->q1 = wvp->xmit_pkthdr1;
+ wv_omi_descp->q2 = wvp->xmit_pkthdr2;
+ wv_omi_descp->q3 = 0;
+ wv_omi_descp->q4 = 0;
+
+ wvp->descriptors += 2;
+
+ /* follow with a single output last descriptor w/status enabled */
+ wv_descp = &wvp->descriptor_block[wvp->descriptors];
+ HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
+ pktsize);
+ wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
+ wv_descp->branch = 0;
+ wv_descp->status = 0;
+ wvp->descriptors++;
+
+ /*
+ * generate as many contiguous descriptor blocks as there are
+ * xmit packets
+ */
+ for (ii = 0; ii < pktcnt; ii++) {
+
+ /* if about to create last descriptor block */
+ if (ii == (pktcnt - 1)) {
+ /* check and perform any required hci cache flush */
+ if (hci1394_flush_end_desc_check(wvp, ii) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(
+ hci1394_bld_xmit_buf_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
+ &dma_desc_bound) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl structure (unbound addr
+ * (kernel virtual) is last component (descriptor))
+ */
+ xctlp->dma[ii].dma_bound = dma_desc_bound;
+ xctlp->dma[ii].dma_descp = dma_descp + 2 *
+ sizeof (hci1394_desc_t);
+ xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
+
+ /* advance buffer ptr by pktsize in descriptor block */
+ wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
+ pktsize;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_xmit_hdronly_nopkt_desc()
+ * Used to create the openHCI dma descriptor blocks for transmitting
+ * a packet consisting of an isochronous header with no data payload,
+ * or for not sending a packet at all for a cycle.
+ *
+ * A Store_Value openhci descriptor is built at the start of each
+ * IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
+ * descriptor block (to allow for skip cycle specification and set skipmode
+ * processing for these commands).
+ */
+static void
+hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ hci1394_xfer_ctl_t *xctlp;
+ hci1394_output_last_t *wv_ol_descp; /* shorthand to local descrp */
+ hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
+ caddr_t dma_descp;
+ uint32_t dma_desc_bound;
+ uint32_t repcnt;
+ uint32_t ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* determine # of instances of output hdronly/nopkt to generate */
+ repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
+
+ /*
+ * allocate an xfer_ctl structure which includes repcnt
+ * xfer_ctl_dma structs
+ */
+ if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_xmit_hdronly_nopkt_desc_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
+ ixl_commandp, wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return;
+ }
+
+ /*
+ * save xfer_ctl struct addr in compiler_privatep of
+ * current IXL xfer command
+ */
+ wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
+
+ /*
+ * create a storevalue descriptor
+ * (will be used for skip vs jump processing)
+ */
+ hci1394_set_xmit_storevalue_desc(wvp);
+
+ /*
+ * processing now based on opcode:
+ * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
+ */
+ if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
+ IXL1394_OP_SEND_HDR_ONLY) {
+
+ /* for header only, generate values for the xmit pkt hdrs */
+ hci1394_set_xmit_pkt_hdr(wvp);
+
+ /*
+ * create an output last immediate (double sized) descriptor
+ * xfer status enabled
+ */
+ wv_oli_descp = (hci1394_output_last_imm_t *)
+ &wvp->descriptor_block[wvp->descriptors];
+
+ HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
+ DESC_INTR_DSABL);
+
+ wv_oli_descp->data_addr = 0;
+ wv_oli_descp->branch = 0;
+ wv_oli_descp->status = 0;
+ wv_oli_descp->q1 = wvp->xmit_pkthdr1;
+ wv_oli_descp->q2 = wvp->xmit_pkthdr2;
+ wv_oli_descp->q3 = 0;
+ wv_oli_descp->q4 = 0;
+ wvp->descriptors += 2;
+ } else {
+ /*
+ * for skip cycle, create a single output last descriptor
+ * with xfer status enabled
+ */
+ wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
+ HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
+ DESC_INTR_DSABL, 0);
+ wv_ol_descp->data_addr = 0;
+ wv_ol_descp->branch = 0;
+ wv_ol_descp->status = 0;
+ wvp->descriptors++;
+ }
+
+ /*
+ * generate as many contiguous descriptor blocks as repeat count
+ * indicates
+ */
+ for (ii = 0; ii < repcnt; ii++) {
+
+ /* if about to create last descriptor block */
+ if (ii == (repcnt - 1)) {
+ /* check and perform any required hci cache flush */
+ if (hci1394_flush_end_desc_check(wvp, ii) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(
+ hci1394_bld_xmit_hdronly_nopkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+ }
+
+ /* allocate and copy descriptor block to dma memory */
+ if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
+ &dma_desc_bound) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(
+ hci1394_bld_xmit_hdronly_nopkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return;
+ }
+
+ /*
+ * set dma addrs into xfer_ctl structure (unbound addr
+ * (kernel virtual) is last component (descriptor)
+ */
+ xctlp->dma[ii].dma_bound = dma_desc_bound;
+ xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
+ xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_bld_dma_mem_desc_blk()
+ * Used to put a given OpenHCI descriptor block into dma bound memory.
+ */
+static int
+hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
+ uint32_t *dma_desc_bound)
+{
+ uint32_t dma_bound;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* set internal error if no descriptor blocks to build */
+ if (wvp->descriptors == 0) {
+
+ wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_INTERNAL_ERROR: no descriptors to build");
+ TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* allocate dma memory and move this descriptor block to it */
+ *dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
+ sizeof (hci1394_desc_t), &dma_bound);
+
+ if (*dma_descpp == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for descriptors");
+ TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+#ifdef _KERNEL
+ ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
+ (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
+ wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
+ DDI_DEV_AUTOINCR);
+#else
+ bcopy(wvp->descriptor_block, *dma_descpp,
+ wvp->descriptors * sizeof (hci1394_desc_t));
+#endif
+ /*
+ * convert allocated block's memory address to bus address space
+ * include properly set Z bits (descriptor count).
+ */
+ *dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
+
+ TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_set_xmit_pkt_hdr()
+ * Compose the 2 quadlets for the xmit packet header.
+ */
+static void
+hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
+{
+ uint16_t tag;
+ uint16_t sync;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * choose tag and sync bits for header either from default values or
+ * from currently active set tag and sync IXL command
+ * (clear command after use)
+ */
+ if (wvp->ixl_settagsync_cmdp == NULL) {
+ tag = wvp->default_tag;
+ sync = wvp->default_sync;
+ } else {
+ tag = wvp->ixl_settagsync_cmdp->tag;
+ sync = wvp->ixl_settagsync_cmdp->sync;
+ wvp->ixl_settagsync_cmdp = NULL;
+ }
+ tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
+ sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
+
+ /*
+ * build xmit pkt header -
+ * hdr1 has speed, tag, channel number and sync bits
+ * hdr2 has the packet length.
+ */
+ wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
+ (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
+ DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
+ DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
+
+ wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_set_xmit_skip_mode()
+ * Set current skip mode from default or from currently active command.
+ * If non-default skip mode command's skip mode is skip to label, find
+ * and set xfer start IXL command which follows skip to label into
+ * compiler_privatep of set skipmode IXL command.
+ */
+static void
+hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
+{
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ if (wvp->ixl_setskipmode_cmdp == NULL) {
+ wvp->skipmode = wvp->default_skipmode;
+ wvp->skiplabelp = wvp->default_skiplabelp;
+ wvp->skipxferp = wvp->default_skipxferp;
+ } else {
+ wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
+ wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
+ wvp->skipxferp = NULL;
+ if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
+ err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
+ NULL, &wvp->skipxferp);
+ if (err == DDI_FAILURE) {
+ TNF_PROBE_2(hci1394_set_xmit_skip_mode_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "IXL1394_ENO_DATA_PKTS: "
+ "label<->jump loop detected for skiplabel "
+ "w/no xfers", tnf_opaque, setskip_cmdp,
+ wvp->ixl_setskipmode_cmdp);
+ wvp->skipxferp = NULL;
+ wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
+ }
+ }
+ wvp->ixl_setskipmode_cmdp->compiler_privatep =
+ (void *)wvp->skipxferp;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_set_xmit_storevalue_desc()
+ * Set up store_value DMA descriptor.
+ * XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
+ * descriptor in the descriptor block (to handle skip mode processing)
+ */
+static void
+hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
+{
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ wvp->descriptors++;
+
+ HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
+ wvp->storevalue_data);
+ wvp->descriptor_block[wvp->descriptors - 1].data_addr =
+ wvp->storevalue_bufp;
+ wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
+ wvp->descriptor_block[wvp->descriptors - 1].status = 0;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_set_next_xfer_buf()
+ * This routine adds the data buffer to the current wvp list.
+ * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
+ * contains the error code.
+ */
+static int
+hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
+ uint16_t size)
+{
+ TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* error if buffer pointer is null (size may be 0) */
+ if (bufp == NULL) {
+
+ wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* count new xfer buffer */
+ wvp->xfer_bufcnt++;
+
+ /* error if exceeds maximum xfer buffer components allowed */
+ if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
+
+ wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
+
+ TNF_PROBE_2(hci1394_set_next_xfer_buf_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EFRAGMENT_OFLO", tnf_int, frag_count,
+ wvp->xfer_bufcnt);
+ TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* save xmit buffer and size */
+ wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
+ wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
+
+ /* accumulate total packet length */
+ wvp->xfer_pktlen += size;
+
+ TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_flush_end_desc_check()
+ * Check if flush required before last descriptor block of a
+ * non-unary set generated by an xfer buff or xmit special command
+ * or a unary set provided no other flush has already been done.
+ *
+ * hci flush is required if xfer is finalized by an updateable
+ * jump command.
+ *
+ * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
+ * will contain the error code.
+ */
+static int
+hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
+{
+ TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ if ((count != 0) ||
+ ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
+ INITIATING_LBL)) == 0)) {
+
+ if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
+ if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_flush_end_desc_check_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* wvp->dma_bld_error is set by above call */
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_flush_hci_cache()
+ * Sun hci controller (RIO) implementation specific processing!
+ *
+ * Allocate dma memory for 1 hci descriptor block which will be left unused.
+ * During execution this will cause a break in the contiguous address space
+ * processing required by Sun's RIO implementation of the ohci controller and
+ * will require the controller to refetch the next descriptor block from
+ * host memory.
+ *
+ * General rules for cache flush preceeding a descriptor block in dma memory:
+ * 1. Current IXL Xfer Command Updateable Rule:
+ * Cache flush of IXL xfer command is required if it, or any of the
+ * non-start IXL packet xfer commands associated with it, is flagged
+ * updateable.
+ * 2. Next IXL Xfer Command Indeterminate Rule:
+ * Cache flush of IXL xfer command is required if an IXL jump command
+ * which is flagged updateable has finalized the current IXL xfer
+ * command.
+ * 3. Updateable IXL Set Command Rule:
+ * Cache flush of an IXL xfer command is required if any of the IXL
+ * "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
+ * command (i.e. immediately preceeding it), is flagged updateable.
+ * 4. Label Initiating Xfer Command Rule:
+ * Cache flush of IXL xfer command is required if it is initiated by a
+ * label IXL command. (This is to allow both a flush of the cache and
+ * an interrupt to be generated easily and in close proximity to each
+ * other. This can make possible simpler more successful reset of
+ * descriptor statuses, especially under circumstances where the cycle
+ * of hci commands is short and/or there are no callbacks distributed
+ * through the span of xfers, etc... This is especially important for
+ * input where statuses must be reset before execution cycles back
+ * again.
+ *
+ * Application of above rules:
+ * Packet mode IXL xfer commands:
+ * If any of the above flush rules apply, flush cache should be done
+ * immediately preceeding the generation of the dma descriptor block
+ * for the packet xfer.
+ * Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
+ * SEND_HDR_ONLY, and SEND_NO_PKT):
+ * If Rules #1, #3 or #4 applies, a flush cache should be done
+ * immediately before the first generated dma descriptor block of the
+ * non-packet xfer.
+ * If Rule #2 applies, a flush cache should be done immediately before
+ * the last generated dma descriptor block of the non-packet xfer.
+ *
+ * Note: The flush cache should be done at most once in each location that is
+ * required to be flushed no matter how many rules apply (i.e. only once
+ * before the first descriptor block and/or only once before the last
+ * descriptor block generated). If more than one place requires a flush,
+ * then both flush operations must be performed. This is determined by
+ * taking all rules that apply into account.
+ *
+ * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
+ * will contain the error code.
+ */
+static int
+hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
+{
+ uint32_t dma_bound;
+
+ TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
+ NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_1(hci1394_flush_hci_cache_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for flush_hci_cache");
+ TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_alloc_storevalue_dma_mem()
+ * Allocate dma memory for a 1 hci component descriptor block
+ * which will be used as the dma memory location that ixl
+ * compiler generated storevalue descriptor commands will
+ * specify as location to store their data value.
+ *
+ * Returns 32-bit bound address of allocated mem, or NULL.
+ */
+static uint32_t
+hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
+{
+ uint32_t dma_bound;
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
+ &dma_bound) == NULL) {
+
+ wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
+
+ TNF_PROBE_2(hci1394_bld_alloc_storevalue_dma_mem_alloc_fail,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "IXL1394_EMEM_ALLOC_FAIL: for storevalue dma",
+ tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
+ TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* return bound address of allocated memory */
+ return (dma_bound);
+}
+
+
+/*
+ * hci1394_alloc_xfer_ctl()
+ * Allocate an xfer_ctl structure.
+ */
+static hci1394_xfer_ctl_t *
+hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
+{
+ hci1394_xfer_ctl_t *xcsp;
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * allocate an xfer_ctl struct which includes dmacnt of
+ * xfer_ctl_dma structs
+ */
+#ifdef _KERNEL
+ if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
+ (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
+ sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+#else
+ /*
+ * This section makes it possible to easily run and test the compiler in
+ * user mode.
+ */
+ if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
+ sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
+ sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+#endif
+ /*
+ * set dma structure count into allocated xfer_ctl struct for
+ * later deletion.
+ */
+ xcsp->cnt = dmacnt;
+
+ /* link it to previously allocated xfer_ctl structs or set as first */
+ if (wvp->xcs_firstp == NULL) {
+ wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
+ } else {
+ wvp->xcs_currentp->ctl_nextp = xcsp;
+ wvp->xcs_currentp = xcsp;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* return allocated xfer_ctl structure */
+ return (xcsp);
+}
+
+/*
+ * hci1394_alloc_dma_mem()
+ * Allocates and binds memory for openHCI DMA descriptors as needed.
+ */
+static void *
+hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
+ uint32_t *dma_bound)
+{
+ hci1394_idma_desc_mem_t *dma_new;
+ hci1394_buf_parms_t parms;
+ hci1394_buf_info_t *memp;
+ void *dma_mem_ret;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * if no dma has been allocated or current request exceeds
+ * remaining memory
+ */
+ if ((wvp->dma_currentp == NULL) ||
+ (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
+ wvp->dma_currentp->used))) {
+#ifdef _KERNEL
+ /* kernel-mode memory allocation for driver */
+
+ /* allocate struct to track more dma descriptor memory */
+ if ((dma_new = (hci1394_idma_desc_mem_t *)
+ kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
+ KM_NOSLEEP)) == NULL) {
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+
+ /*
+ * if more cookies available from the current mem, try to find
+ * one of suitable size. Cookies that are too small will be
+ * skipped and unused. Given that cookie size is always at least
+ * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
+ * it's a small price to pay for code simplicity.
+ */
+ if (wvp->dma_currentp != NULL) {
+ /* new struct is derived from current */
+ memp = &wvp->dma_currentp->mem;
+ dma_new->mem = *memp;
+ dma_new->offset = wvp->dma_currentp->offset +
+ memp->bi_cookie.dmac_size;
+
+ for (; memp->bi_cookie_count > 1;
+ memp->bi_cookie_count--) {
+ ddi_dma_nextcookie(memp->bi_dma_handle,
+ &dma_new->mem.bi_cookie);
+
+ if (dma_new->mem.bi_cookie.dmac_size >= size) {
+ dma_new->mem_handle =
+ wvp->dma_currentp->mem_handle;
+ wvp->dma_currentp->mem_handle = NULL;
+ dma_new->mem.bi_cookie_count--;
+ break;
+ }
+ dma_new->offset +=
+ dma_new->mem.bi_cookie.dmac_size;
+ }
+ }
+
+ /* if no luck with current buffer, allocate a new one */
+ if (dma_new->mem_handle == NULL) {
+ parms.bp_length = HCI1394_IXL_PAGESIZE;
+ parms.bp_max_cookies = OHCI_MAX_COOKIE;
+ parms.bp_alignment = 16;
+ ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
+ &parms, &dma_new->mem, &dma_new->mem_handle);
+ if (ret != DDI_SUCCESS) {
+ kmem_free(dma_new,
+ sizeof (hci1394_idma_desc_mem_t));
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+
+ /* paranoia: this is not supposed to happen */
+ if (dma_new->mem.bi_cookie.dmac_size < size) {
+ hci1394_buf_free(&dma_new->mem_handle);
+ kmem_free(dma_new,
+ sizeof (hci1394_idma_desc_mem_t));
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+ dma_new->offset = 0;
+ }
+#else
+ /* user-mode memory allocation for user mode compiler tests */
+ /* allocate another dma_desc_mem struct */
+ if ((dma_new = (hci1394_idma_desc_mem_t *)
+ calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+ dma_new->mem.bi_dma_handle = NULL;
+ dma_new->mem.bi_handle = NULL;
+ if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
+ HCI1394_IXL_PAGESIZE)) == NULL) {
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (NULL);
+ }
+ dma_new->mem.bi_cookie.dmac_address =
+ (unsigned long)dma_new->mem.bi_kaddr;
+ dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
+ dma_new->mem.bi_cookie_count = 1;
+#endif
+
+ /* if this is not first dma_desc_mem, link last one to it */
+ if (wvp->dma_currentp != NULL) {
+ wvp->dma_currentp->dma_nextp = dma_new;
+ wvp->dma_currentp = dma_new;
+ } else {
+ /* else set it as first one */
+ wvp->dma_currentp = wvp->dma_firstp = dma_new;
+ }
+ }
+
+ /* now allocate requested memory from current block */
+ dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
+ wvp->dma_currentp->offset + wvp->dma_currentp->used;
+ *dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
+ wvp->dma_currentp->used;
+ wvp->dma_currentp->used += size;
+
+ TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (dma_mem_ret);
+}
+
+
+/*
+ * hci1394_is_opcode_valid()
+ * given an ixl opcode, this routine returns B_TRUE if it is a
+ * recognized opcode and B_FALSE if it is not recognized.
+ * Note that the FULL 16 bits of the opcode are checked which includes
+ * various flags and not just the low order 8 bits of unique code.
+ */
+static boolean_t
+hci1394_is_opcode_valid(uint16_t ixlopcode)
+{
+ TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* if it's not one we know about, then it's bad */
+ switch (ixlopcode) {
+ case IXL1394_OP_LABEL:
+ case IXL1394_OP_JUMP:
+ case IXL1394_OP_CALLBACK:
+ case IXL1394_OP_RECV_PKT:
+ case IXL1394_OP_RECV_PKT_ST:
+ case IXL1394_OP_RECV_BUF:
+ case IXL1394_OP_SEND_PKT:
+ case IXL1394_OP_SEND_PKT_ST:
+ case IXL1394_OP_SEND_PKT_WHDR_ST:
+ case IXL1394_OP_SEND_BUF:
+ case IXL1394_OP_SEND_HDR_ONLY:
+ case IXL1394_OP_SEND_NO_PKT:
+ case IXL1394_OP_STORE_TIMESTAMP:
+ case IXL1394_OP_SET_TAGSYNC:
+ case IXL1394_OP_SET_SKIPMODE:
+ case IXL1394_OP_SET_SYNCWAIT:
+ case IXL1394_OP_JUMP_U:
+ case IXL1394_OP_CALLBACK_U:
+ case IXL1394_OP_RECV_PKT_U:
+ case IXL1394_OP_RECV_PKT_ST_U:
+ case IXL1394_OP_RECV_BUF_U:
+ case IXL1394_OP_SEND_PKT_U:
+ case IXL1394_OP_SEND_PKT_ST_U:
+ case IXL1394_OP_SEND_PKT_WHDR_ST_U:
+ case IXL1394_OP_SEND_BUF_U:
+ case IXL1394_OP_SET_TAGSYNC_U:
+ case IXL1394_OP_SET_SKIPMODE_U:
+ TNF_PROBE_1_DEBUG(hci1394_is_opcode_valid_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "ixl opcode is valid");
+ TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (B_TRUE);
+ default:
+ TNF_PROBE_2(hci1394_is_opcode_valid_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "ixl opcode is NOT valid", tnf_opaque, ixl_opcode,
+ ixlopcode);
+ TNF_PROBE_0_DEBUG(hci1394_is_opcode_valid_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (B_FALSE);
+ }
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ixl_isr.c b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_isr.c
new file mode 100644
index 0000000000..886b8565f8
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_isr.c
@@ -0,0 +1,1125 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ixl_isr.c
+ * Isochronous IXL Interrupt Service Routines.
+ * The interrupt handler determines which OpenHCI DMA descriptors
+ * have been executed by the hardware, tracks the path in the
+ * corresponding IXL program, issues callbacks as needed, and resets
+ * the OpenHCI DMA descriptors.
+ */
+
+#include <sys/types.h>
+#include <sys/conf.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/ixl1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+/* Return values for local hci1394_ixl_intr_check_done() */
+#define IXL_CHECK_LOST (-1) /* ixl cmd intr processing lost */
+#define IXL_CHECK_DONE 0 /* ixl cmd intr processing done */
+#define IXL_CHECK_SKIP 1 /* ixl cmd intr processing context skipped */
+#define IXL_CHECK_STOP 2 /* ixl cmd intr processing context stopped */
+
+static boolean_t hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
+ hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
+ ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep);
+static int hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
+ hci1394_iso_ctxt_t *ctxtp);
+
+/*
+ * hci1394_ixl_interrupt
+ * main entry point (front-end) into interrupt processing.
+ * acquires mutex, checks if update in progress, sets flags accordingly,
+ * and calls to do real interrupt processing.
+ */
+void
+hci1394_ixl_interrupt(hci1394_state_t *soft_statep,
+ hci1394_iso_ctxt_t *ctxtp, boolean_t in_stop)
+{
+ uint_t status;
+ int retcode;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ status = 1;
+
+ /* acquire the interrupt processing context mutex */
+ mutex_enter(&ctxtp->intrprocmutex);
+
+ /* set flag to indicate that interrupt processing is required */
+ ctxtp->intr_flags |= HCI1394_ISO_CTXT_INTRSET;
+
+ /* if update proc already in progress, let it handle intr processing */
+ if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
+ retcode = HCI1394_IXL_INTR_INUPDATE;
+ status = 0;
+ TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "HCI1394_IXL_INTR_INUPDATE");
+
+ } else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
+ /* else fatal error if inter processing already in progress */
+ retcode = HCI1394_IXL_INTR_ININTR;
+ status = 0;
+ TNF_PROBE_1(hci1394_ixl_interrupt_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "HCI1394_IXL_INTR_ININTR");
+
+ } else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
+ /* else fatal error if callback in progress flag is set */
+ retcode = HCI1394_IXL_INTR_INCALL;
+ status = 0;
+ TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "HCI1394_IXL_INTR_INCALL");
+ } else if (!in_stop && (ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP)) {
+ /* context is being stopped */
+ retcode = HCI1394_IXL_INTR_STOP;
+ status = 0;
+ TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "HCI1394_IXL_INTR_STOP");
+ }
+
+ /*
+ * if context is available, reserve it, do interrupt processing
+ * and free it
+ */
+ if (status) {
+ ctxtp->intr_flags |= HCI1394_ISO_CTXT_ININTR;
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
+ mutex_exit(&ctxtp->intrprocmutex);
+
+ retcode = hci1394_ixl_dma_sync(soft_statep, ctxtp);
+
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_ININTR;
+
+ /* notify stop thread that the interrupt is finished */
+ if ((ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP) && !in_stop) {
+ cv_signal(&ctxtp->intr_cv);
+ }
+ };
+
+ /* free the intr processing context mutex before error checks */
+ mutex_exit(&ctxtp->intrprocmutex);
+
+ /* if context stopped, invoke callback */
+ if (retcode == HCI1394_IXL_INTR_DMASTOP) {
+ hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_DONE);
+ }
+ /* if error, stop and invoke callback */
+ if (retcode == HCI1394_IXL_INTR_DMALOST) {
+ hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_ixl_dma_sync()
+ * the heart of interrupt processing, this routine correlates where the
+ * hardware is for the specified context with the IXL program. Invokes
+ * callbacks as needed. Also called by "update" to make sure ixl is
+ * sync'ed up with where the hardware is.
+ * Returns one of the ixl_intr defined return codes - HCI1394_IXL_INTR...
+ * {..._DMALOST, ..._DMASTOP, ..._NOADV,... _NOERROR}
+ */
+int
+hci1394_ixl_dma_sync(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
+{
+ ixl1394_command_t *ixlp = NULL; /* current ixl command */
+ ixl1394_command_t *ixlnextp; /* next ixl command */
+ uint16_t ixlopcode;
+ uint16_t timestamp;
+ int donecode;
+ boolean_t isdone;
+
+ void (*callback)(opaque_t, struct ixl1394_callback *);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ASSERT(MUTEX_NOT_HELD(&ctxtp->intrprocmutex));
+
+ /* xfer start ixl cmd where last left off */
+ ixlnextp = ctxtp->ixl_execp;
+
+ /* last completed descriptor block's timestamp */
+ timestamp = ctxtp->dma_last_time;
+
+ /*
+ * follow execution path in IXL, until find dma descriptor in IXL
+ * xfer command whose status isn't set or until run out of IXL cmds
+ */
+ while (ixlnextp != NULL) {
+ ixlp = ixlnextp;
+ ixlnextp = ixlp->next_ixlp;
+ ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
+
+ /*
+ * process IXL commands: xfer start, callback, store timestamp
+ * and jump and ignore the others
+ */
+
+ /* determine if this is an xfer start IXL command */
+ if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
+ ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
+
+ /* process xfer cmd to see if HW has been here */
+ isdone = hci1394_ixl_intr_check_xfer(soft_statep, ctxtp,
+ ixlp, &ixlnextp, &timestamp, &donecode);
+
+ if (isdone == B_TRUE) {
+ TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (donecode);
+ }
+
+ /* continue to process next IXL command */
+ continue;
+ }
+
+ /* else check if IXL cmd - jump, callback or store timestamp */
+ switch (ixlopcode) {
+ case IXL1394_OP_JUMP:
+ /*
+ * set next IXL cmd to label ptr in current IXL jump cmd
+ */
+ ixlnextp = ((ixl1394_jump_t *)ixlp)->label;
+ break;
+
+ case IXL1394_OP_STORE_TIMESTAMP:
+ /*
+ * set last timestamp value recorded into current IXL
+ * cmd
+ */
+ ((ixl1394_store_timestamp_t *)ixlp)->timestamp =
+ timestamp;
+ break;
+
+ case IXL1394_OP_CALLBACK:
+ /*
+ * if callback function is specified, call it with IXL
+ * cmd addr. Make sure to grab the lock before setting
+ * the "in callback" flag in intr_flags.
+ */
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags |= HCI1394_ISO_CTXT_INCALL;
+ mutex_exit(&ctxtp->intrprocmutex);
+
+ callback = ((ixl1394_callback_t *)ixlp)->callback;
+ if (callback != NULL) {
+ callback(ctxtp->global_callback_arg,
+ (ixl1394_callback_t *)ixlp);
+ }
+
+ /*
+ * And grab the lock again before clearing
+ * the "in callback" flag.
+ */
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INCALL;
+ mutex_exit(&ctxtp->intrprocmutex);
+ break;
+ }
+ }
+
+ /*
+ * If we jumped to NULL because of an updateable JUMP, set ixl_execp
+ * back to ixlp. The destination label might get updated to a
+ * non-NULL value.
+ */
+ if ((ixlp != NULL) && (ixlp->ixl_opcode == IXL1394_OP_JUMP_U)) {
+ ctxtp->ixl_execp = ixlp;
+ TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "INTR_NOERROR");
+ return (HCI1394_IXL_INTR_NOERROR);
+ }
+
+ /* save null IXL cmd and depth and last timestamp */
+ ctxtp->ixl_execp = NULL;
+ ctxtp->ixl_exec_depth = 0;
+ ctxtp->dma_last_time = timestamp;
+
+ ctxtp->rem_noadv_intrs = 0;
+
+
+ /* return stopped status if at end of IXL cmds & context stopped */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "INTR_DMASTOP");
+ return (HCI1394_IXL_INTR_DMASTOP);
+ }
+
+ /* else interrupt processing is lost */
+ TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "INTR_DMALOST");
+ return (HCI1394_IXL_INTR_DMALOST);
+}
+
+/*
+ * hci1394_ixl_intr_check_xfer()
+ * Process given IXL xfer cmd, checking status of each dma descriptor block
+ * for the command until find one whose status isn't set or until full depth
+ * reached at current IXL command or until find hardware skip has occurred.
+ *
+ * Returns B_TRUE if processing should terminate (either have stopped
+ * or encountered an error), and B_FALSE if it should continue looking.
+ * If B_TRUE, donecodep contains the reason: HCI1394_IXL_INTR_DMALOST,
+ * HCI1394_IXL_INTR_DMASTOP, HCI1394_IXL_INTR_NOADV, or
+ * HCI1394_IXL_INTR_NOERROR. NOERROR means that the current location
+ * has been determined and do not need to look further.
+ */
+static boolean_t
+hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
+ hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
+ ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep)
+{
+ uint_t dma_advances;
+ int intrstatus;
+ uint_t skipped;
+ hci1394_xfer_ctl_t *xferctlp;
+ uint16_t ixldepth;
+ uint16_t ixlopcode;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_xfer_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ *donecodep = 0;
+ dma_advances = 0;
+ ixldepth = ctxtp->ixl_exec_depth;
+ ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
+
+ /* get control struct for this xfer start IXL command */
+ xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
+
+ skipped = 0;
+ while ((skipped == 0) && (ixldepth < xferctlp->cnt)) {
+ /*
+ * check if status is set in dma descriptor
+ * block at cur depth in cur xfer start IXL cmd
+ */
+ if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
+ ixlopcode, timestampp, B_TRUE) != 0) {
+
+ /* advance depth to next desc block in cur IXL cmd */
+ ixldepth++;
+
+ /*
+ * count dma desc blks whose status was set
+ * (i.e. advanced to next dma desc)
+ */
+ dma_advances++;
+ continue;
+ }
+
+ /* if get to here, status is not set */
+
+ /*
+ * cur IXL cmd dma desc status not set. save IXL cur cmd
+ * and depth and last timestamp for next time.
+ */
+ ctxtp->ixl_execp = ixlp;
+ ctxtp->ixl_exec_depth = ixldepth;
+ ctxtp->dma_last_time = *timestampp;
+
+ /*
+ * check if dma descriptor processing location is indeterminate
+ * (lost), context has either stopped, is done, or has skipped
+ */
+ intrstatus = hci1394_ixl_intr_check_done(soft_statep, ctxtp);
+ if (intrstatus == IXL_CHECK_LOST) {
+ /*
+ * location indeterminate, try once more to determine
+ * current state. First, recheck if status has become
+ * set in cur dma descriptor block. (don't reset status
+ * here if is set)
+ */
+ if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
+ ixlopcode, timestampp, 1) != B_TRUE) {
+ /* resume from where we left off */
+ skipped = 0;
+ continue;
+ }
+
+ /*
+ * status not set, check intr processing
+ * completion status again
+ */
+ if ((intrstatus = hci1394_ixl_intr_check_done(
+ soft_statep, ctxtp)) == IXL_CHECK_LOST) {
+ /*
+ * location still indeterminate,
+ * processing is lost
+ */
+ *donecodep = HCI1394_IXL_INTR_DMALOST;
+
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "INTR_DMALOST");
+ return (B_TRUE);
+ }
+ }
+
+ /*
+ * if dma processing stopped. current location has been
+ * determined.
+ */
+ if (intrstatus == IXL_CHECK_STOP) {
+ /*
+ * save timestamp, clear currently executing IXL
+ * command and depth. return stopped.
+ */
+ ctxtp->ixl_execp = NULL;
+ ctxtp->ixl_exec_depth = 0;
+ ctxtp->dma_last_time = *timestampp;
+ ctxtp->rem_noadv_intrs = 0;
+
+ *donecodep = HCI1394_IXL_INTR_DMASTOP;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "INTR_DMASTOP");
+ return (B_TRUE);
+ }
+
+ /*
+ * dma processing done for now. current location has
+ * has been determined
+ */
+ if (intrstatus == IXL_CHECK_DONE) {
+ /*
+ * if in update processing call:
+ * clear update processing flag & return ok.
+ * if dma advances happened, reset to max allowed.
+ * however, if none have, don't reduce remaining
+ * amount - that's for real interrupt call to adjust.
+ */
+ if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
+
+ if (dma_advances > 0) {
+ ctxtp->rem_noadv_intrs =
+ ctxtp->max_noadv_intrs;
+ }
+
+ *donecodep = HCI1394_IXL_INTR_NOERROR;
+
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "INTR_NOERROR");
+ return (B_TRUE);
+ }
+
+ /*
+ * else, not in update call processing, are in normal
+ * intr call. if no dma statuses were found set
+ * (i.e. no dma advances), reduce remaining count of
+ * interrupts allowed with no I/O completions
+ */
+ if (dma_advances == 0) {
+ ctxtp->rem_noadv_intrs--;
+ } else {
+ /*
+ * else some dma statuses were found set.
+ * reinit remaining count of interrupts allowed
+ * with no I/O completions
+ */
+ ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
+ }
+
+ /*
+ * if no remaining count of interrupts allowed with no
+ * I/O completions, return failure (no dma advance after
+ * max retries), else return ok
+ */
+ if (ctxtp->rem_noadv_intrs == 0) {
+ *donecodep = HCI1394_IXL_INTR_NOADV;
+
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "INTR_NOADV");
+ return (B_TRUE);
+ }
+
+ *donecodep = HCI1394_IXL_INTR_NOERROR;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "INTR_NOERROR2");
+ return (B_TRUE);
+ }
+
+ /*
+ * else (intrstatus == IXL_CHECK_SKIP) indicating skip has
+ * occured, retrieve current IXL cmd, depth, and timestamp and
+ * continue interrupt processing
+ */
+ skipped = 1;
+ *ixlnextpp = ctxtp->ixl_execp;
+ ixldepth = ctxtp->ixl_exec_depth;
+ *timestampp = ctxtp->dma_last_time;
+
+ /*
+ * also count as 1, intervening skips to next posted
+ * dma descriptor.
+ */
+ dma_advances++;
+ }
+
+ /*
+ * if full depth reached at current IXL cmd, set back to start for next
+ * IXL xfer command that will be processed
+ */
+ if ((skipped == 0) && (ixldepth >= xferctlp->cnt)) {
+ ctxtp->ixl_exec_depth = 0;
+ }
+
+ /*
+ * make sure rem_noadv_intrs is reset to max if we advanced.
+ */
+ if (dma_advances > 0) {
+ ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* continue to process next IXL command */
+ return (B_FALSE);
+}
+
+/*
+ * hci1394_ixl_intr_check_done()
+ * checks if context has stopped, or if able to match hardware location
+ * with an expected IXL program location.
+ */
+static int
+hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
+ hci1394_iso_ctxt_t *ctxtp)
+{
+ ixl1394_command_t *ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+ uint_t ixldepth;
+ hci1394_xfer_ctl_dma_t *dma;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ uint32_t desc_status;
+ hci1394_desc_t *hcidescp;
+ off_t hcidesc_off;
+ int err;
+ uint32_t dma_cmd_cur_loc;
+ uint32_t dma_cmd_last_loc;
+ uint32_t dma_loc_check_enabled;
+ uint32_t dmastartp;
+ uint32_t dmaendp;
+
+ uint_t rem_dma_skips;
+ uint16_t skipmode;
+ uint16_t skipdepth;
+ ixl1394_command_t *skipdestp;
+ ixl1394_command_t *skipxferp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_done_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * start looking through the IXL list from the xfer start command where
+ * we last left off (for composite opcodes, need to start from the
+ * appropriate depth).
+ */
+
+ ixlp = ctxtp->ixl_execp;
+ ixldepth = ctxtp->ixl_exec_depth;
+
+ /* control struct for xfer start IXL command */
+ xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
+ dma = &xferctlp->dma[ixldepth];
+
+ /* determine if dma location checking is enabled */
+ if ((dma_loc_check_enabled =
+ (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_CMDREG)) != 0) {
+
+ /* if so, get current dma command location */
+ dma_cmd_last_loc = 0xFFFFFFFF;
+
+ while ((dma_cmd_cur_loc = HCI1394_ISOCH_CTXT_CMD_PTR(
+ soft_statep, ctxtp)) != dma_cmd_last_loc) {
+
+ /* retry get until location register stabilizes */
+ dma_cmd_last_loc = dma_cmd_cur_loc;
+ }
+ }
+
+ /*
+ * compare the (bound) address of the DMA descriptor corresponding to
+ * the current xfer IXL command against the current value in the
+ * DMA location register. If exists and if matches, then
+ * if context stopped, return stopped, else return done.
+ *
+ * The dma start address is the first address of the descriptor block.
+ * Since "Z" is a count of 16-byte descriptors in the block, calculate
+ * the end address by adding Z*16 to the start addr.
+ */
+ dmastartp = dma->dma_bound & ~DESC_Z_MASK;
+ dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
+
+ if (dma_loc_check_enabled &&
+ ((dma_cmd_cur_loc >= dmastartp) && (dma_cmd_cur_loc < dmaendp))) {
+
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_STOP");
+ return (IXL_CHECK_STOP);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_DONE");
+ return (IXL_CHECK_DONE);
+ }
+
+ /*
+ * if receive mode:
+ */
+ if ((ixlp->ixl_opcode & IXL1394_OPF_ONXMIT) == 0) {
+ /*
+ * if context stopped, return stopped, else,
+ * if there is no current dma location reg, return done
+ * else return location indeterminate
+ */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_STOP");
+ return (IXL_CHECK_STOP);
+ }
+ if (!dma_loc_check_enabled) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_DONE");
+ return (IXL_CHECK_DONE);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_LOST");
+ return (IXL_CHECK_LOST);
+ }
+
+ /*
+ * else is xmit mode:
+ * check status of current xfer IXL command's dma descriptor
+ */
+ acc_hdl = dma->dma_buf->bi_handle;
+ dma_hdl = dma->dma_buf->bi_dma_handle;
+ hcidescp = (hci1394_desc_t *)dma->dma_descp;
+ hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
+
+ /* Sync the descriptor before we get the status */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off, sizeof (hci1394_desc_t),
+ DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "dma_sync() failed");
+ }
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+
+ if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
+
+ /*
+ * if status is now set here, return skipped, to cause calling
+ * function to continue, even though location hasn't changed
+ */
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_SKIP");
+ return (IXL_CHECK_SKIP);
+ }
+
+ /*
+ * At this point, we have gotten to a DMA descriptor with an empty
+ * status. This is not enough information however to determine that
+ * we've found all processed DMA descriptors because during cycle-lost
+ * conditions, the HW will skip over some descriptors without writing
+ * status. So we have to look ahead until we're convinced that the HW
+ * hasn't jumped ahead.
+ *
+ * Follow the IXL skip-to links until find one whose status is set
+ * or until dma location register (if any) matches an xfer IXL
+ * command's dma location or until have examined max_dma_skips
+ * IXL commands.
+ */
+ rem_dma_skips = ctxtp->max_dma_skips;
+
+ while (rem_dma_skips-- > 0) {
+
+ /*
+ * get either IXL command specific or
+ * system default skipmode info
+ */
+ skipdepth = 0;
+ if (xferctlp->skipmodep != NULL) {
+ skipmode = xferctlp->skipmodep->skipmode;
+ skipdestp = xferctlp->skipmodep->label;
+ skipxferp = (ixl1394_command_t *)
+ xferctlp->skipmodep->compiler_privatep;
+ } else {
+ skipmode = ctxtp->default_skipmode;
+ skipdestp = ctxtp->default_skiplabelp;
+ skipxferp = ctxtp->default_skipxferp;
+ }
+
+ switch (skipmode) {
+
+ case IXL1394_SKIP_TO_SELF:
+ /*
+ * mode is skip to self:
+ * if context is stopped, return stopped, else
+ * if dma location reg not enabled, return done
+ * else, return location indeterminate
+ */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
+ 0) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "CHECK_STOP");
+ return (IXL_CHECK_STOP);
+ }
+
+ if (!dma_loc_check_enabled) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "CHECK_DONE");
+ return (IXL_CHECK_DONE);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_LOST");
+ return (IXL_CHECK_LOST);
+
+ case IXL1394_SKIP_TO_NEXT:
+ /*
+ * mode is skip to next:
+ * set potential skip target to current command at
+ * next depth
+ */
+ skipdestp = ixlp;
+ skipxferp = ixlp;
+ skipdepth = ixldepth + 1;
+
+ /*
+ * else if at max depth at current cmd adjust to next
+ * IXL command.
+ *
+ * (NOTE: next means next IXL command along execution
+ * path, whatever IXL command it might be. e.g. store
+ * timestamp or callback or label or jump or send... )
+ */
+ if (skipdepth >= xferctlp->cnt) {
+ skipdepth = 0;
+ skipdestp = ixlp->next_ixlp;
+ skipxferp = xferctlp->execp;
+ }
+
+ /* evaluate skip to status further, below */
+ break;
+
+
+ case IXL1394_SKIP_TO_LABEL:
+ /*
+ * mode is skip to label:
+ * set skip destination depth to 0 (should be
+ * redundant)
+ */
+ skipdepth = 0;
+
+ /* evaluate skip to status further, below */
+ break;
+
+ case IXL1394_SKIP_TO_STOP:
+ /*
+ * mode is skip to stop:
+ * set all xfer and destination skip to locations to
+ * null
+ */
+ skipxferp = NULL;
+ skipdestp = NULL;
+ skipdepth = 0;
+
+ /* evaluate skip to status further, below */
+ break;
+
+ } /* end switch */
+
+ /*
+ * if no xfer IXL command follows at or after current skip-to
+ * location
+ */
+ if (skipxferp == NULL) {
+ /*
+ * if context is stopped, return stopped, else
+ * if dma location reg not enabled, return done
+ * else, return location indeterminate
+ */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
+ 0) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "CHECK_STOP");
+ return (IXL_CHECK_STOP);
+ }
+
+ if (!dma_loc_check_enabled) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "CHECK_DONE");
+ return (IXL_CHECK_DONE);
+ }
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_LOST");
+ return (IXL_CHECK_LOST);
+ }
+
+ /*
+ * if the skip to xfer IXL dma descriptor's status is set,
+ * then execution did skip
+ */
+ xferctlp = (hci1394_xfer_ctl_t *)skipxferp->compiler_privatep;
+ dma = &xferctlp->dma[skipdepth];
+ acc_hdl = dma->dma_buf->bi_handle;
+ dma_hdl = dma->dma_buf->bi_dma_handle;
+ hcidescp = (hci1394_desc_t *)dma->dma_descp;
+ hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
+
+ /* Sync the descriptor before we get the status */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off,
+ sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "dma_sync() failed");
+ }
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+
+ if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
+
+ /*
+ * adjust to continue from skip to IXL command and
+ * return skipped, to have calling func continue.
+ * (Note: next IXL command may be any allowed IXL
+ * command)
+ */
+ ctxtp->ixl_execp = skipdestp;
+ ctxtp->ixl_exec_depth = skipdepth;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_SKIP");
+ return (IXL_CHECK_SKIP);
+ }
+
+ /*
+ * if dma location command register checking is enabled,
+ * and the skip to xfer IXL dma location matches current
+ * dma location register value, execution did skip
+ */
+ dmastartp = dma->dma_bound & ~DESC_Z_MASK;
+ dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
+
+ if (dma_loc_check_enabled && ((dma_cmd_cur_loc >= dmastartp) &&
+ (dma_cmd_cur_loc < dmaendp))) {
+
+ /* if the context is stopped, return stopped */
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
+ 0) {
+ TNF_PROBE_1_DEBUG(
+ hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "",
+ tnf_string, msg, "CHECK STOP");
+ return (IXL_CHECK_STOP);
+ }
+ /*
+ * adjust to continue from skip to IXL command and
+ * return skipped, to have calling func continue
+ * (Note: next IXL command may be any allowed IXL cmd)
+ */
+ ctxtp->ixl_execp = skipdestp;
+ ctxtp->ixl_exec_depth = skipdepth;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_SKIP");
+ return (IXL_CHECK_SKIP);
+ }
+
+ /*
+ * else, advance working current locn to skipxferp and
+ * skipdepth and continue skip evaluation loop processing
+ */
+ ixlp = skipxferp;
+ ixldepth = skipdepth;
+
+ } /* end while */
+
+ /*
+ * didn't find dma status set, nor location reg match, along skip path
+ *
+ * if context is stopped, return stopped,
+ *
+ * else if no current location reg active don't change context values,
+ * just return done (no skip)
+ *
+ * else, return location indeterminate
+ */
+
+ if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_STOP");
+ return (IXL_CHECK_STOP);
+ }
+ if (!dma_loc_check_enabled) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "CHECK_DONE");
+ return (IXL_CHECK_DONE);
+ }
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "CHECK_LOST");
+ return (IXL_CHECK_LOST);
+}
+
+/*
+ * hci1394_isoch_cycle_inconsistent()
+ * Called during interrupt notification to indicate that the cycle time
+ * has changed unexpectedly. We need to take this opportunity to
+ * update our tracking of each running transmit context's execution.
+ * cycle_inconsistent only affects transmit, so recv contexts are left alone.
+ */
+void
+hci1394_isoch_cycle_inconsistent(hci1394_state_t *soft_statep)
+{
+ int i, cnt_thresh;
+ boolean_t note;
+ hrtime_t current_time, last_time, delta, delta_thresh;
+ hci1394_iso_ctxt_t *ctxtp; /* current context */
+
+ ASSERT(soft_statep);
+ TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_INCONSISTENT);
+
+ /* grab the mutex before checking each context's INUSE and RUNNING */
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+
+ /* check for transmit contexts which are inuse and running */
+ for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
+ ctxtp = &soft_statep->isoch->ctxt_xmit[i];
+
+ if ((ctxtp->ctxt_flags &
+ (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+ hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+ }
+ }
+
+ /*
+ * get the current time and calculate the delta between now and
+ * when the last interrupt was processed. (NOTE: if the time
+ * returned by gethrtime() rolls-over while we are counting these
+ * interrupts, we will incorrectly restart the counting process.
+ * However, because the probability of this happening is small and
+ * not catching the roll-over will AT MOST double the time it takes
+ * us to discover and correct from this condition, we can safely
+ * ignore it.)
+ */
+ current_time = gethrtime();
+ last_time = soft_statep->isoch->cycle_incon_thresh.last_intr_time;
+ delta = current_time - last_time;
+
+ /*
+ * compare the calculated delta to the delta T threshold. If it
+ * is less than the threshold, then increment the counter. If it
+ * is not then reset the counter.
+ */
+ delta_thresh = soft_statep->isoch->cycle_incon_thresh.delta_t_thresh;
+ if (delta < delta_thresh)
+ soft_statep->isoch->cycle_incon_thresh.delta_t_counter++;
+ else
+ soft_statep->isoch->cycle_incon_thresh.delta_t_counter = 0;
+
+ /*
+ * compare the counter to the counter threshold. If it is greater,
+ * then disable the cycle inconsistent interrupt.
+ */
+ cnt_thresh = soft_statep->isoch->cycle_incon_thresh.counter_thresh;
+ note = B_FALSE;
+ if (soft_statep->isoch->cycle_incon_thresh.delta_t_counter >
+ cnt_thresh) {
+ hci1394_ohci_intr_disable(soft_statep->ohci,
+ OHCI_INTR_CYC_INCONSISTENT);
+ note = B_TRUE;
+ }
+
+ /* save away the current time into the last_intr_time field */
+ soft_statep->isoch->cycle_incon_thresh.last_intr_time = current_time;
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ if (note == B_TRUE) {
+ cmn_err(CE_NOTE, "!hci1394(%d): cycle_inconsistent interrupt "
+ "disabled until next bus reset",
+ soft_statep->drvinfo.di_instance);
+ TNF_PROBE_1(hci1394_isoch_cycle_inconsistent_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
+ "CYCLE_INCONSISTENT intr disabled until next bus reset");
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+
+/*
+ * hci1394_isoch_cycle_lost()
+ * Interrupt indicates an expected cycle_start packet (and therefore our
+ * opportunity to transmit) did not show up. Update our tracking of each
+ * running transmit context.
+ */
+void
+hci1394_isoch_cycle_lost(hci1394_state_t *soft_statep)
+{
+ int i, cnt_thresh;
+ boolean_t note;
+ hrtime_t current_time, last_time, delta, delta_thresh;
+ hci1394_iso_ctxt_t *ctxtp; /* current context */
+
+ ASSERT(soft_statep);
+ TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_lost_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_LOST);
+
+ /* grab the mutex before checking each context's INUSE and RUNNING */
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+
+ /* check for transmit contexts which are inuse and running */
+ for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
+ ctxtp = &soft_statep->isoch->ctxt_xmit[i];
+
+ if ((ctxtp->ctxt_flags &
+ (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+ hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
+ mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
+ }
+ }
+
+ /*
+ * get the current time and calculate the delta between now and
+ * when the last interrupt was processed. (NOTE: if the time
+ * returned by gethrtime() rolls-over while we are counting these
+ * interrupts, we will incorrectly restart the counting process.
+ * However, because the probability of this happening is small and
+ * not catching the roll-over will AT MOST double the time it takes
+ * us to discover and correct from this condition, we can safely
+ * ignore it.)
+ */
+ current_time = gethrtime();
+ last_time = soft_statep->isoch->cycle_lost_thresh.last_intr_time;
+ delta = current_time - last_time;
+
+ /*
+ * compare the calculated delta to the delta T threshold. If it
+ * is less than the threshold, then increment the counter. If it
+ * is not then reset the counter.
+ */
+ delta_thresh = soft_statep->isoch->cycle_lost_thresh.delta_t_thresh;
+ if (delta < delta_thresh)
+ soft_statep->isoch->cycle_lost_thresh.delta_t_counter++;
+ else
+ soft_statep->isoch->cycle_lost_thresh.delta_t_counter = 0;
+
+ /*
+ * compare the counter to the counter threshold. If it is greater,
+ * then disable the cycle lost interrupt.
+ */
+ cnt_thresh = soft_statep->isoch->cycle_lost_thresh.counter_thresh;
+ note = B_FALSE;
+ if (soft_statep->isoch->cycle_lost_thresh.delta_t_counter >
+ cnt_thresh) {
+ hci1394_ohci_intr_disable(soft_statep->ohci,
+ OHCI_INTR_CYC_LOST);
+ note = B_TRUE;
+ }
+
+ /* save away the current time into the last_intr_time field */
+ soft_statep->isoch->cycle_lost_thresh.last_intr_time = current_time;
+
+ mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
+
+ if (note == B_TRUE) {
+ cmn_err(CE_NOTE, "!hci1394(%d): cycle_lost interrupt "
+ "disabled until next bus reset",
+ soft_statep->drvinfo.di_instance);
+ TNF_PROBE_1(hci1394_isoch_cycle_lost_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
+ "CYCLE_LOST intr disabled until next bus reset");
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_lost_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ixl_misc.c b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_misc.c
new file mode 100644
index 0000000000..845f901898
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_misc.c
@@ -0,0 +1,508 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ixl_misc.c
+ * Isochronous IXL miscellaneous routines.
+ * Contains common routines used by the ixl compiler, interrupt handler and
+ * dynamic update.
+ */
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/ixl1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+/* local routines */
+static void hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
+ hci1394_idma_desc_mem_t *);
+static void hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *);
+
+
+/*
+ * hci1394_ixl_set_start()
+ * Set up the context structure with the first ixl command to process
+ * and the first hci descriptor to execute.
+ *
+ * This function assumes the current context is stopped!
+ *
+ * If ixlstp IS NOT null AND is not the first compiled ixl command and
+ * is not an ixl label command, returns an error.
+ * If ixlstp IS null, uses the first compiled ixl command (ixl_firstp)
+ * in place of ixlstp.
+ *
+ * If no executeable xfer found along exec path from ixlstp, returns error.
+ */
+int
+hci1394_ixl_set_start(hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlstp)
+{
+
+ ixl1394_command_t *ixl_exec_startp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* if ixl start command is null, use first compiled ixl command */
+ if (ixlstp == NULL) {
+ ixlstp = ctxtp->ixl_firstp;
+ }
+
+ /*
+ * if ixl start command is not first ixl compiled and is not a label,
+ * error
+ */
+ if ((ixlstp != ctxtp->ixl_firstp) && (ixlstp->ixl_opcode !=
+ IXL1394_OP_LABEL)) {
+ TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (-1);
+ }
+
+ /* follow exec path to find first ixl command that's an xfer command */
+ (void) hci1394_ixl_find_next_exec_xfer(ixlstp, NULL, &ixl_exec_startp);
+
+ /*
+ * if there was one, then in it's compiler private, its
+ * hci1394_xfer_ctl structure has the appropriate bound address
+ */
+ if (ixl_exec_startp != NULL) {
+
+ /* set up for start of context and return done */
+ ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
+ ixl_exec_startp->compiler_privatep)->dma[0].dma_bound;
+
+ ctxtp->dma_last_time = 0;
+ ctxtp->ixl_exec_depth = 0;
+ ctxtp->ixl_execp = ixlstp;
+ ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (0);
+ }
+
+ /* else no executeable xfer command found, return error */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (1);
+}
+#ifdef _KERNEL
+/*
+ * hci1394_ixl_reset_status()
+ * Reset all statuses in all hci descriptor blocks associated with the
+ * current linked list of compiled ixl commands.
+ *
+ * This function assumes the current context is stopped!
+ */
+void
+hci1394_ixl_reset_status(hci1394_iso_ctxt_t *ctxtp)
+{
+ ixl1394_command_t *ixlcur;
+ ixl1394_command_t *ixlnext;
+ hci1394_xfer_ctl_t *xferctlp;
+ uint_t ixldepth;
+ uint16_t timestamp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_reset_status_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ixlnext = ctxtp->ixl_firstp;
+
+ /*
+ * Scan for next ixl xfer start command along ixl link path.
+ * Once xfer command found, clear its hci descriptor block's
+ * status. If is composite ixl xfer command, clear statuses
+ * in each of its hci descriptor blocks.
+ */
+ while (ixlnext != NULL) {
+
+ /* set current and next ixl command */
+ ixlcur = ixlnext;
+ ixlnext = ixlcur->next_ixlp;
+
+ /* skip to examine next if this is not xfer start ixl command */
+ if (((ixlcur->ixl_opcode & IXL1394_OPF_ISXFER) == 0) ||
+ ((ixlcur->ixl_opcode & IXL1394_OPTY_MASK) == 0)) {
+ continue;
+ }
+
+ /* get control struct for this xfer start ixl command */
+ xferctlp = (hci1394_xfer_ctl_t *)ixlcur->compiler_privatep;
+
+ /* clear status in each hci descriptor block for this ixl cmd */
+ ixldepth = 0;
+ while (ixldepth < xferctlp->cnt) {
+ (void) hci1394_ixl_check_status(
+ &xferctlp->dma[ixldepth], ixlcur->ixl_opcode,
+ &timestamp, B_TRUE);
+ ixldepth++;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_reset_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+#endif
+/*
+ * hci1394_ixl_find_next_exec_xfer()
+ * Follows execution path of ixl linked list until finds next xfer start IXL
+ * command, including the current IXL command or finds end of IXL linked
+ * list. Counts callback commands found along the way. (Previously, counted
+ * store timestamp commands, as well.)
+ *
+ * To detect an infinite loop of label<->jump without an intervening xfer,
+ * a tolerance level of HCI1394_IXL_MAX_SEQ_JUMPS is used. Once this
+ * number of jumps is traversed, the IXL prog is assumed to have a loop.
+ *
+ * Returns DDI_SUCCESS or DDI_FAILURE. DDI_FAILURE, indicates an infinite
+ * loop of labels & jumps was detected without any intervening xfers.
+ * DDI_SUCCESS indicates the next_exec_ixlpp contains the next xfer ixlp
+ * address, or NULL indicating the end of the list was reached. Note that
+ * DDI_FAILURE can only be returned during the IXL compilation phase, and
+ * not during ixl_update processing.
+ */
+int
+hci1394_ixl_find_next_exec_xfer(ixl1394_command_t *ixl_start,
+ uint_t *callback_cnt, ixl1394_command_t **next_exec_ixlpp)
+{
+ uint16_t ixlopcode;
+ boolean_t xferfound;
+ ixl1394_command_t *ixlp;
+ int ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_find_next_exec_xfer_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ixlp = ixl_start;
+ xferfound = B_FALSE;
+ ii = HCI1394_IXL_MAX_SEQ_JUMPS;
+ if (callback_cnt != NULL) {
+ *callback_cnt = 0;
+ }
+
+ /* continue until xfer start ixl cmd or end of ixl list found */
+ while ((xferfound == B_FALSE) && (ixlp != NULL) && (ii > 0)) {
+
+ /* get current ixl cmd opcode without update flag */
+ ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
+
+ /* if found an xfer start ixl command, are done */
+ if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
+ ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
+ xferfound = B_TRUE;
+ continue;
+ }
+
+ /* if found jump command, adjust to follow its path */
+ if (ixlopcode == IXL1394_OP_JUMP) {
+ ixlp = (ixl1394_command_t *)
+ ((ixl1394_jump_t *)ixlp)->label;
+ ii--;
+
+ /* if exceeded tolerance, give up */
+ if (ii == 0) {
+ TNF_PROBE_1(
+ hci1394_ixl_find_next_exec_xfer_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "Infinite loop w/no xfers");
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_find_next_exec_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ continue;
+ }
+
+ /* if current ixl command is a callback, count it */
+ if ((ixlopcode == IXL1394_OP_CALLBACK) &&
+ (callback_cnt != NULL)) {
+ (*callback_cnt)++;
+ }
+
+ /* advance to next linked ixl command */
+ ixlp = ixlp->next_ixlp;
+ }
+
+ /* return ixl xfer start command found, if any */
+ *next_exec_ixlpp = ixlp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_find_next_exec_xfer_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+#ifdef _KERNEL
+/*
+ * hci1394_ixl_check_status()
+ * Read the descriptor status and hdrs, clear as appropriate.
+ */
+int32_t
+hci1394_ixl_check_status(hci1394_xfer_ctl_dma_t *dma, uint16_t ixlopcode,
+ uint16_t *timestamp, boolean_t do_status_reset)
+{
+ uint16_t bufsiz;
+ uint16_t hcicnt;
+ uint16_t hcirecvcnt;
+ hci1394_desc_t *hcidescp;
+ off_t hcidesc_off;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ uint32_t desc_status;
+ uint32_t desc_hdr;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* last dma descriptor in descriptor block from dma structure */
+ hcidescp = (hci1394_desc_t *)(dma->dma_descp);
+ hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
+ acc_hdl = dma->dma_buf->bi_handle;
+ dma_hdl = dma->dma_buf->bi_dma_handle;
+
+ /* if current ixl command opcode is xmit */
+ if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
+
+ /* Sync the descriptor before we get the status */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off,
+ sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_check_status_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "dma_sync() failed");
+ }
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+
+ /* check if status is set in last dma descriptor in block */
+ if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
+ /*
+ * dma descriptor status set - I/O done.
+ * if not to reset status, just return; else extract
+ * timestamp, reset desc status and return dma
+ * descriptor block status set
+ */
+ if (do_status_reset == B_FALSE) {
+ return (1);
+ }
+ *timestamp = (uint16_t)
+ ((desc_status & DESC_ST_TIMESTAMP_MASK) >>
+ DESC_ST_TIMESTAMP_SHIFT);
+ ddi_put32(acc_hdl, &hcidescp->status, 0);
+
+ /* Sync descriptor for device (status was cleared) */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off,
+ sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_check_status_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "dma_sync() failed");
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (1);
+ }
+ /* else, return dma descriptor block status not set */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (0);
+ }
+
+ /* else current ixl opcode is recv */
+ hcirecvcnt = 0;
+
+ /* get count of descriptors in current dma descriptor block */
+ hcicnt = dma->dma_bound & DESC_Z_MASK;
+ hcidescp -= (hcicnt - 1);
+ hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
+
+ /* iterate fwd through hci descriptors until end or find status set */
+ while (hcicnt-- != 0) {
+
+ /* Sync the descriptor before we get the status */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off,
+ hcicnt * sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_check_status_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
+ "dma_sync() failed");
+ }
+
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+
+ /* get cur buffer size & accumulate potential buffr usage */
+ bufsiz = (desc_hdr & DESC_HDR_REQCOUNT_MASK) >>
+ DESC_HDR_REQCOUNT_SHIFT;
+ hcirecvcnt += bufsiz;
+
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+
+ /* check if status set on this descriptor block descriptor */
+ if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
+ /*
+ * dma descriptor status set - I/O done.
+ * if not to reset status, just return; else extract
+ * buffer space used, reset desc status and return dma
+ * descriptor block status set
+ */
+ if (do_status_reset == B_FALSE) {
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (1);
+ }
+
+ hcirecvcnt -= (desc_status & DESC_ST_RESCOUNT_MASK) >>
+ DESC_ST_RESCOUNT_SHIFT;
+ *timestamp = hcirecvcnt;
+ desc_status = (bufsiz << DESC_ST_RESCOUNT_SHIFT) &
+ DESC_ST_RESCOUNT_MASK;
+ ddi_put32(acc_hdl, &hcidescp->status, desc_status);
+
+ /* Sync descriptor for device (status was cleared) */
+ err = ddi_dma_sync(dma_hdl, hcidesc_off,
+ sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_ixl_check_status_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
+ errmsg, "dma_sync() failed");
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (1);
+ } else {
+ /* else, set to evaluate next descriptor. */
+ hcidescp++;
+ hcidesc_off = (off_t)hcidescp -
+ (off_t)dma->dma_buf->bi_kaddr;
+ }
+ }
+
+ /* return input not complete status */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (0);
+}
+#endif
+/*
+ * hci1394_ixl_cleanup()
+ * Delete all memory that has earlier been allocated for a context's IXL prog
+ */
+void
+hci1394_ixl_cleanup(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
+{
+ TNF_PROBE_0_DEBUG(hci1394_ixl_cleanup_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ hci1394_delete_xfer_ctl((hci1394_xfer_ctl_t *)ctxtp->xcs_firstp);
+ hci1394_delete_dma_desc_mem(soft_statep, ctxtp->dma_firstp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_cleanup_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_delete_dma_desc_mem()
+ * Iterate through linked list of dma memory descriptors, deleting
+ * allocated dma memory blocks, then deleting the dma memory
+ * descriptor after advancing to next one
+ */
+static void
+/* ARGSUSED */
+hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
+ hci1394_idma_desc_mem_t *dma_firstp)
+{
+ hci1394_idma_desc_mem_t *dma_next;
+
+ TNF_PROBE_0_DEBUG(hci1394_delete_dma_desc_mem_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ while (dma_firstp != NULL) {
+ dma_next = dma_firstp->dma_nextp;
+#ifdef _KERNEL
+ /*
+ * if this dma descriptor memory block has the handles, then
+ * free the memory. (Note that valid handles are kept only with
+ * the most recently acquired cookie, and that each cookie is in
+ * it's own idma_desc_mem_t struct.)
+ */
+ if (dma_firstp->mem_handle != NULL) {
+ hci1394_buf_free(&dma_firstp->mem_handle);
+ }
+
+ /* free current dma memory descriptor */
+ kmem_free(dma_firstp, sizeof (hci1394_idma_desc_mem_t));
+#else
+ /* user mode free */
+ /* free dma memory block and current dma mem descriptor */
+ free(dma_firstp->mem.bi_kaddr);
+ free(dma_firstp);
+#endif
+ /* advance to next dma memory descriptor */
+ dma_firstp = dma_next;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_delete_dma_desc_mem_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
+
+/*
+ * hci1394_delete_xfer_ctl()
+ * Iterate thru linked list of xfer_ctl structs, deleting allocated memory.
+ */
+void
+hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *xcsp)
+{
+ hci1394_xfer_ctl_t *delp;
+
+ TNF_PROBE_0_DEBUG(hci1394_delete_xfer_ctl_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ while ((delp = xcsp) != NULL) {
+ /* advance ptr to next xfer_ctl struct */
+ xcsp = xcsp->ctl_nextp;
+
+ /*
+ * delete current xfer_ctl struct and included
+ * xfer_ctl_dma structs
+ */
+#ifdef _KERNEL
+ kmem_free(delp,
+ sizeof (hci1394_xfer_ctl_t) +
+ sizeof (hci1394_xfer_ctl_dma_t) * (delp->cnt - 1));
+#else
+ free(delp);
+#endif
+ }
+ TNF_PROBE_0_DEBUG(hci1394_delete_xfer_ctl_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c
new file mode 100644
index 0000000000..e77247e9ea
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c
@@ -0,0 +1,2231 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ixl_update.c
+ * Isochronous IXL update routines.
+ * Routines used to dynamically update a compiled and presumably running
+ * IXL program.
+ */
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/ixl1394.h> /* IXL opcodes & data structs */
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+/* local defines for hci1394_ixl_update_prepare return codes */
+#define IXL_PREP_READY 1
+#define IXL_PREP_SUCCESS 0
+#define IXL_PREP_FAILURE (-1)
+
+/*
+ * variable used to indicate the number of times update will wait for
+ * interrupt routine to complete.
+ */
+int hci1394_upd_retries_before_fail = 50;
+
+/* IXL runtime update static functions */
+static int hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp);
+static void hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp);
+static int hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp);
+
+/*
+ * IXL commands and included fields which can be updated
+ * IXL1394_OP_CALLBACK: callback(), callback_data
+ * IXL1394_OP_JUMP: label
+ * IXL1394_OP_RECV_PKT ixl_buf, size, mem_bufp
+ * IXL1394_OP_RECV_PKT_ST ixl_buf, size, mem_bufp
+ * IXL1394_OP_RECV_BUF(ppb) ixl_buf, size, pkt_size, mem_bufp, buf_offset
+ * IXL1394_OP_RECV_BUF(fill) ixl_buf, size, pkt_size, mem_bufp, buf_offset
+ * IXL1394_OP_SEND_PKT ixl_buf, size, mem_bufp
+ * IXL1394_OP_SEND_PKT_ST ixl_buf, size, mem_bufp
+ * IXL1394_OP_SEND_PKT_WHDR_ST ixl_buf, size, mem_bufp
+ * IXL1394_OP_SEND_BUF ixl_buf, size, pkt_size, mem_bufp, buf_offset
+ * IXL1394_OP_SET_TAGSYNC tag, sync
+ * IXL1394_OP_SET_SKIPMODE skipmode, label
+ *
+ * IXL commands which can not be updated
+ * IXL1394_OP_LABEL
+ * IXL1394_OP_SEND_HDR_ONLY
+ * IXL1394_OP_SEND_NOPKT
+ * IXL1394_OP_STORE_VALUE
+ * IXL1394_OP_STORE_TIMESTAMP
+ * IXL1394_OP_SET_SYNCWAIT
+ */
+
+/*
+ * hci1394_ixl_update
+ * main entrypoint into dynamic update code: initializes temporary
+ * update variables, evaluates request, coordinates with potentially
+ * simultaneous run of interrupt stack, evaluates likelyhood of success,
+ * performs the update, checks if completed, performs cleanup
+ * resulting from coordination with interrupt stack.
+ */
+int
+hci1394_ixl_update(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
+ ixl1394_command_t *ixlnewp, ixl1394_command_t *ixloldp,
+ uint_t riskoverride, int *resultp)
+{
+ hci1394_ixl_update_vars_t uv; /* update work variables structure */
+ int prepstatus;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+
+ /* save caller specified values in update work variables structure */
+ uv.soft_statep = soft_statep;
+ uv.ctxtp = ctxtp;
+ uv.ixlnewp = ixlnewp;
+ uv.ixloldp = ixloldp;
+ uv.risklevel = riskoverride;
+
+ /* initialize remainder of update work variables */
+ uv.ixlxferp = NULL;
+ uv.skipxferp = NULL;
+ uv.skipmode = 0;
+ uv.skipaddr = 0;
+ uv.jumpaddr = 0;
+ uv.pkthdr1 = 0;
+ uv.pkthdr2 = 0;
+ uv.bufaddr = 0;
+ uv.bufsize = 0;
+ uv.ixl_opcode = uv.ixlnewp->ixl_opcode;
+ uv.hcihdr = 0;
+ uv.hcistatus = 0;
+ uv.hci_offset = 0;
+ uv.hdr_offset = 0;
+
+ /* set done ok return status */
+ uv.upd_status = 0;
+
+ /* evaluate request and prepare to perform update */
+ prepstatus = hci1394_ixl_update_prepare(&uv);
+ if (prepstatus != IXL_PREP_READY) {
+ /*
+ * if either done or nothing to do or an evaluation error,
+ * return update status
+ */
+ *resultp = uv.upd_status;
+
+ /* if prep evaluation error, return failure */
+ if (prepstatus != IXL_PREP_SUCCESS) {
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_error,
+ HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
+ "IXL_PREP_FAILURE");
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ /* if no action or update done, return update successful */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+ }
+
+ /* perform update processing reservation of interrupt context */
+ ret = hci1394_ixl_update_enable(&uv);
+ if (ret != DDI_SUCCESS) {
+
+ /* error acquiring control of context - return */
+ *resultp = uv.upd_status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* perform update risk analysis */
+ if (hci1394_ixl_update_analysis(&uv) != DDI_SUCCESS) {
+ /*
+ * return, if excessive risk or dma execution processing lost
+ * (note: caller risk override not yet implemented)
+ */
+
+ /* attempt intr processing cleanup, unless err is dmalost */
+ if (uv.upd_status != IXL1394_EPRE_UPD_DMALOST) {
+ (void) hci1394_ixl_update_endup(&uv);
+ } else {
+ /*
+ * error is dmalost, just release interrupt context.
+ * take the lock here to ensure an atomic read, modify,
+ * write of the "intr_flags" field while we try to
+ * clear the "in update" flag. protects from the
+ * interrupt routine.
+ */
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
+ mutex_exit(&ctxtp->intrprocmutex);
+ }
+ *resultp = uv.upd_status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+
+ /* perform requested update */
+ if (hci1394_ixl_update_perform(&uv) != DDI_SUCCESS) {
+ /*
+ * if non-completion condition, return update status
+ * attempt interrupt processing cleanup first
+ */
+ (void) hci1394_ixl_update_endup(&uv);
+
+ *resultp = uv.upd_status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* evaluate update completion, setting completion status */
+ if (hci1394_ixl_update_evaluate(&uv) != DDI_SUCCESS) {
+ /*
+ * update failed - bad, just release interrupt context
+ * take the lock here too (jsut like above) to ensure an
+ * atomic read, modify, write of the "intr_flags" field
+ * while we try to clear the "in update" flag. protects
+ * from the interrupt routine.
+ */
+ mutex_enter(&ctxtp->intrprocmutex);
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
+ mutex_exit(&ctxtp->intrprocmutex);
+
+ /* if DMA stopped or lost, formally stop context */
+ if (uv.upd_status == HCI1394_IXL_INTR_DMASTOP) {
+ hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
+ ID1394_DONE);
+ } else if (uv.upd_status == HCI1394_IXL_INTR_DMALOST) {
+ hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
+ ID1394_FAIL);
+ }
+
+ *resultp = uv.upd_status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* perform interrupt processing cleanup */
+ uv.upd_status = hci1394_ixl_update_endup(&uv);
+
+ /* return update completion status */
+ *resultp = uv.upd_status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit, HCI1394_TNF_HAL_STACK_ISOCH,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_ixl_update_enable
+ * Used to coordinate dynamic update activities with simultaneous
+ * interrupt handler processing, while holding the context mutex
+ * for as short a time as possible.
+ */
+static int
+hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp)
+{
+ int status;
+ boolean_t retry;
+ uint_t remretries;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ retry = B_TRUE;
+ /* set arbitrary number of retries before giving up */
+ remretries = hci1394_upd_retries_before_fail;
+ status = DDI_SUCCESS;
+
+ /*
+ * if waited for completion of interrupt processing generated callback,
+ * retry here
+ */
+ ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
+ mutex_enter(&uvp->ctxtp->intrprocmutex);
+
+ while (retry == B_TRUE) {
+ retry = B_FALSE;
+ remretries--;
+
+ /* failure if update processing is already in progress */
+ if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
+ uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
+ status = DDI_FAILURE;
+ } else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
+ /*
+ * if have retried max number of times or if this update
+ * request is on the interrupt stack, which means that
+ * the callback function of the target driver initiated
+ * the update, set update failure.
+ */
+ if ((remretries <= 0) ||
+ (curthread->t_flag == T_INTR_THREAD)) {
+ uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
+ status = DDI_FAILURE;
+ } else {
+ /*
+ * if not on interrupt stack and retries not
+ * exhausted, free mutex, wait a short time
+ * and then retry.
+ */
+ retry = B_TRUE;
+ mutex_exit(&uvp->ctxtp->intrprocmutex);
+ drv_usecwait(1);
+ mutex_enter(&uvp->ctxtp->intrprocmutex);
+ continue;
+ }
+ } else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+ status = DDI_FAILURE;
+ }
+ }
+
+ /* if context is available, reserve it for this update request */
+ if (status == DDI_SUCCESS) {
+ uvp->ctxtp->intr_flags |= HCI1394_ISO_CTXT_INUPDATE;
+ }
+
+ ASSERT(MUTEX_HELD(&uvp->ctxtp->intrprocmutex));
+ mutex_exit(&uvp->ctxtp->intrprocmutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (status);
+}
+
+/*
+ * hci1394_ixl_update_endup()
+ * The ending stage of coordinating with simultaneously running interrupts.
+ * Perform interrupt processing sync tasks if we (update) had blocked the
+ * interrupt out when it wanted a turn.
+ */
+static int
+hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp)
+{
+ uint_t status;
+ hci1394_iso_ctxt_t *ctxtp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ status = HCI1394_IXL_INTR_NOERROR;
+ ctxtp = uvp->ctxtp;
+
+ while (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
+
+ if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET) {
+ /*
+ * We don't need to grab the lock here because
+ * the "intr_flags" field is only modified in two
+ * ways - one in UPDATE and one in INTR routine. Since
+ * we know that it can't be modified simulataneously
+ * in another UDPATE thread - that is assured by the
+ * checks in "update_enable" - we would only be trying
+ * to protect against the INTR thread. And since we
+ * are going to clear a bit here (and check it again
+ * at the top of the loop) we are not really concerned
+ * about missing its being set by the INTR routine.
+ */
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
+
+ status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
+ if (status == HCI1394_IXL_INTR_DMALOST) {
+ /*
+ * Unlike above, we do care here as we are
+ * trying to clear the "in update" flag, and
+ * we don't want that lost because the INTR
+ * routine is trying to set its flag.
+ */
+ mutex_enter(&uvp->ctxtp->intrprocmutex);
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
+ mutex_exit(&uvp->ctxtp->intrprocmutex);
+ continue;
+ }
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
+ mutex_enter(&uvp->ctxtp->intrprocmutex);
+ if (!(ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET)) {
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
+ }
+ mutex_exit(&uvp->ctxtp->intrprocmutex);
+ }
+
+ /* if DMA stopped or lost, formally stop context */
+ if (status == HCI1394_IXL_INTR_DMASTOP) {
+ hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_DONE);
+ } else if (status == HCI1394_IXL_INTR_DMALOST) {
+ hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (status);
+}
+
+/*
+ * hci1394_ixl_update_prepare()
+ * Preparation for the actual update (using temp uvp struct)
+ */
+static int
+hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp)
+{
+ int ret;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* both new and old ixl commands must be the same */
+ if (uvp->ixlnewp->ixl_opcode != uvp->ixloldp->ixl_opcode) {
+
+ uvp->upd_status = IXL1394_EOPCODE_MISMATCH;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
+ "EOPCODE_MISMATCH");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * perform evaluation and prepare update based on specific
+ * IXL command type
+ */
+ switch (uvp->ixl_opcode) {
+
+ case IXL1394_OP_CALLBACK_U: {
+ ixl1394_callback_t *old_callback_ixlp;
+ ixl1394_callback_t *new_callback_ixlp;
+
+ old_callback_ixlp = (ixl1394_callback_t *)uvp->ixloldp;
+ new_callback_ixlp = (ixl1394_callback_t *)uvp->ixlnewp;
+
+ /* perform update now without further evaluation */
+ old_callback_ixlp->callback_arg =
+ new_callback_ixlp->callback_arg;
+ old_callback_ixlp->callback = new_callback_ixlp->callback;
+
+ /* nothing else to do, return with done ok status */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_SUCCESS);
+ }
+
+ case IXL1394_OP_JUMP_U:
+ ret = hci1394_ixl_update_prep_jump(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_SET_SKIPMODE_U:
+ ret = hci1394_ixl_update_prep_set_skipmode(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_SET_TAGSYNC_U:
+ ret = hci1394_ixl_update_prep_set_tagsync(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_RECV_PKT_U:
+ case IXL1394_OP_RECV_PKT_ST_U:
+ ret = hci1394_ixl_update_prep_recv_pkt(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_RECV_BUF_U:
+ ret = hci1394_ixl_update_prep_recv_buf(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_SEND_PKT_U:
+ case IXL1394_OP_SEND_PKT_ST_U:
+ case IXL1394_OP_SEND_PKT_WHDR_ST_U:
+ ret = hci1394_ixl_update_prep_send_pkt(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ case IXL1394_OP_SEND_BUF_U:
+ ret = hci1394_ixl_update_prep_send_buf(uvp);
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (ret);
+
+ default:
+ /* ixl command being updated must be one of above, else error */
+ uvp->upd_status = IXL1394_EOPCODE_DISALLOWED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+}
+
+/*
+ * hci1394_ixl_update_prep_jump()
+ * Preparation for update of an IXL1394_OP_JUMP_U command.
+ */
+static int
+hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_jump_t *old_jump_ixlp;
+ ixl1394_jump_t *new_jump_ixlp;
+ ixl1394_command_t *ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+ hci1394_desc_t *hcidescp;
+ uint_t cbcnt;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ uint32_t desc_hdr;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
+ new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
+
+ /* check if any change between new and old ixl jump command */
+ if (new_jump_ixlp->label == old_jump_ixlp->label) {
+
+ /* if none, return with done ok status */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* new ixl jump command label must be ptr to valid ixl label or NULL */
+ if ((new_jump_ixlp->label != NULL) &&
+ (new_jump_ixlp->label->ixl_opcode != IXL1394_OP_LABEL)) {
+
+ /* if not jumping to label, return an error */
+ uvp->upd_status = IXL1394_EJUMP_NOT_TO_LABEL;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EJUMP_NOT_TO_LABEL");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * follow exec path from new ixl jump command label to determine new
+ * jump destination ixl xfer command
+ */
+ (void) hci1394_ixl_find_next_exec_xfer(new_jump_ixlp->label, &cbcnt,
+ &ixlp);
+ if (ixlp != NULL) {
+ /*
+ * get the bound address of the first descriptor block reached
+ * by the jump destination. (This descriptor is the first
+ * transfer command following the jumped-to label.) Set the
+ * descriptor's address (with Z bits) into jumpaddr.
+ */
+ uvp->jumpaddr = ((hci1394_xfer_ctl_t *)
+ ixlp->compiler_privatep)->dma[0].dma_bound;
+ }
+
+ /*
+ * get associated xfer IXL command from compiler_privatep of old
+ * jump command
+ */
+ if ((uvp->ixlxferp = (ixl1394_command_t *)
+ old_jump_ixlp->compiler_privatep) == NULL) {
+
+ /* if none, return an error */
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EORIG_IXL_CORRUPTED");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * get the associated IXL xfer command's last dma descriptor block
+ * last descriptor, then get hcihdr from its hdr field,
+ * removing interrupt enabled bits
+ */
+ xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep;
+ hcidescp = (hci1394_desc_t *)xferctlp->dma[xferctlp->cnt - 1].dma_descp;
+ acc_hdl = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
+
+ /* Sync the descriptor before we grab the header(s) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
+ DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EINTERNAL_ERROR: dma_sync() failed");
+ return (IXL_PREP_FAILURE);
+ }
+
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+ uvp->hcihdr = desc_hdr & ~DESC_INTR_ENBL;
+
+ /* set depth to last dma descriptor block & update count to 1 */
+ uvp->ixldepth = xferctlp->cnt - 1;
+ uvp->ixlcount = 1;
+
+ /*
+ * if there is only one dma descriptor block and IXL xfer command
+ * inited by a label or have found callbacks along the exec path to the
+ * new destination IXL xfer command, enable interrupt in hcihdr value
+ */
+ if (((xferctlp->cnt == 1) &&
+ ((xferctlp->ctl_flags & XCTL_LABELLED) != 0)) || (cbcnt != 0)) {
+
+ uvp->hcihdr |= DESC_INTR_ENBL;
+ }
+
+ /* If either old or new destination was/is NULL, enable interrupt */
+ if ((new_jump_ixlp->label == NULL) || (old_jump_ixlp->label == NULL)) {
+ uvp->hcihdr |= DESC_INTR_ENBL;
+ }
+
+ /*
+ * if xfer type is xmit and skip mode for this for this xfer command is
+ * IXL1394_SKIP_TO_NEXT then set uvp->skipmode to IXL1394_SKIP_TO_NEXT
+ * and set uvp->skipxferp to uvp->jumpaddr and set uvp->hci_offset to
+ * offset from last dma descriptor to first dma descriptor
+ * (where skipaddr goes).
+ *
+ * update perform processing will have to set skip branch address to
+ * same location as jump destination in this case.
+ */
+ uvp->skipmode = IXL1394_SKIP_TO_STOP;
+ if ((uvp->ixlxferp->ixl_opcode & IXL1394_OPF_ONXMIT) != 0) {
+
+ if ((xferctlp->skipmodep && (((ixl1394_set_skipmode_t *)
+ xferctlp->skipmodep)->skipmode == IXL1394_SKIP_TO_NEXT)) ||
+ (uvp->ctxtp->default_skipmode == IXL1394_OPF_ONXMIT)) {
+
+ uvp->skipmode = IXL1394_SKIP_TO_NEXT;
+ uvp->skipaddr = uvp->jumpaddr;
+
+ /*
+ * calc hci_offset to first descriptor (where skipaddr
+ * goes) of dma descriptor block from current (last)
+ * descriptor of the descriptor block (accessed in
+ * xfer_ctl dma_descp of IXL xfer command)
+ */
+ if (uvp->ixlxferp->ixl_opcode ==
+ IXL1394_OP_SEND_HDR_ONLY) {
+ /*
+ * send header only is (Z bits - 2)
+ * descriptor components back from last one
+ */
+ uvp->hci_offset -= 2;
+ } else {
+ /*
+ * all others are (Z bits - 1) descriptor
+ * components back from last component
+ */
+ uvp->hci_offset -= 1;
+ }
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_set_skipmode()
+ * Preparation for update of an IXL1394_OP_SET_SKIPMODE_U command.
+ */
+static int
+hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_set_skipmode_t *old_set_skipmode_ixlp;
+ ixl1394_set_skipmode_t *new_set_skipmode_ixlp;
+ ixl1394_command_t *ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
+ new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
+
+ /* check if new set skipmode is change from old set skipmode */
+ if (new_set_skipmode_ixlp->skipmode ==
+ old_set_skipmode_ixlp->skipmode) {
+
+ if ((new_set_skipmode_ixlp->skipmode !=
+ IXL1394_SKIP_TO_LABEL) ||
+ (old_set_skipmode_ixlp->label ==
+ new_set_skipmode_ixlp->label)) {
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* No change, return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+ }
+
+ /* find associated ixl xfer commnd by following old ixl links */
+ uvp->ixlxferp = uvp->ixloldp->next_ixlp;
+ while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
+ IXL1394_OPF_ISXFER) == 0) ||
+ ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) != 0))) {
+
+ uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
+ }
+
+ /* return an error if no ixl xfer command found */
+ if (uvp->ixlxferp == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
+ errmsg, "EORIG_IXL_CORRUPTED");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * get Z bits (number of descriptor components in descriptor block)
+ * from a dma bound addr in the xfer_ctl struct of the IXL xfer command
+ */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EORIG_IXL_CORRUPTED");
+ return (IXL_PREP_FAILURE);
+ }
+ uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
+
+ /*
+ * determine hci_offset to first component (where skipaddr goes) of
+ * dma descriptor block from current (last) descriptor component of
+ * desciptor block (accessed in xfer_ctl dma_descp of IXL xfer command)
+ */
+ if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
+ /*
+ * "send header only" is (Z bits - 2) descriptors back
+ * from last one
+ */
+ uvp->hci_offset -= 2;
+ } else {
+ /*
+ * all others are (Z bits - 1) descroptors back from
+ * last descriptor.
+ */
+ uvp->hci_offset -= 1;
+ }
+
+ /* set depth to zero and count to update all dma descriptors */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = xferctlp->cnt;
+
+ /* set new skipmode and validate */
+ uvp->skipmode = new_set_skipmode_ixlp->skipmode;
+
+ if ((uvp->skipmode != IXL1394_SKIP_TO_NEXT) &&
+ (uvp->skipmode != IXL1394_SKIP_TO_SELF) &&
+ (uvp->skipmode != IXL1394_SKIP_TO_STOP) &&
+ (uvp->skipmode != IXL1394_SKIP_TO_LABEL)) {
+
+ /* return an error if invalid mode */
+ uvp->upd_status = IXL1394_EBAD_SKIPMODE;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
+ errmsg, "EBAD_SKIPMODE");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* if mode is skip to label */
+ if (uvp->skipmode == IXL1394_SKIP_TO_LABEL) {
+
+ /* verify label field is valid ixl label cmd */
+ if ((new_set_skipmode_ixlp->label == NULL) ||
+ (new_set_skipmode_ixlp->label->ixl_opcode !=
+ IXL1394_OP_LABEL)) {
+
+ /* Error - not skipping to valid label */
+ uvp->upd_status = IXL1394_EBAD_SKIP_LABEL;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * follow new skip exec path after label to next xfer
+ * IXL command
+ */
+ (void) hci1394_ixl_find_next_exec_xfer(
+ new_set_skipmode_ixlp->label, NULL, &ixlp);
+
+ /*
+ * set skip destination IXL xfer command.
+ * after update set into old set skip mode IXL compiler_privatep
+ */
+ if ((uvp->skipxferp = ixlp) != NULL) {
+ /*
+ * set skipaddr to be the first dma descriptor block's
+ * dma bound address w/Z bits
+ */
+ xferctlp = (hci1394_xfer_ctl_t *)
+ ixlp->compiler_privatep;
+ uvp->skipaddr = xferctlp->dma[0].dma_bound;
+ }
+ }
+
+ /*
+ * if mode is skip to next, get skipaddr for last dma descriptor block
+ */
+ if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
+ /* follow normal exec path to next xfer ixl command */
+ (void) hci1394_ixl_find_next_exec_xfer(uvp->ixlxferp->next_ixlp,
+ NULL, &ixlp);
+
+ /*
+ * get skip_next destination IXL xfer command
+ * (for last iteration)
+ */
+ if (ixlp != NULL) {
+ /*
+ * set skipaddr to first dma descriptor block's
+ * dma bound address w/Z bits
+ */
+ xferctlp = (hci1394_xfer_ctl_t *)
+ ixlp->compiler_privatep;
+ uvp->skipaddr = xferctlp->dma[0].dma_bound;
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_set_tagsync()
+ * Preparation for update of an IXL1394_OP_SET_TAGSYNC_U command.
+ */
+static int
+hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_set_tagsync_t *old_set_tagsync_ixlp;
+ ixl1394_set_tagsync_t *new_set_tagsync_ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
+ new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
+
+ /* check if new set tagsync is change from old set tagsync. */
+ if ((new_set_tagsync_ixlp->tag == old_set_tagsync_ixlp->tag) &&
+ (new_set_tagsync_ixlp->sync == old_set_tagsync_ixlp->sync)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no change, return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* find associated IXL xfer commnd by following old ixl links */
+ uvp->ixlxferp = uvp->ixloldp->next_ixlp;
+ while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
+ IXL1394_OPF_ISXFER) == 0) ||
+ ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) != 0))) {
+
+ uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
+ }
+
+ /* return an error if no IXL xfer command found */
+ if (uvp->ixlxferp == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* is IXL xfer command an IXL1394_OP_SEND_NO_PKT? */
+ if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_NO_PKT) {
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no update needed, return done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* build new pkthdr1 from new IXL tag/sync bits */
+ uvp->pkthdr1 = (uvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
+ (new_set_tagsync_ixlp->tag << DESC_PKT_TAG_SHIFT) |
+ (uvp->ctxtp->isochan << DESC_PKT_CHAN_SHIFT) |
+ (new_set_tagsync_ixlp->sync << DESC_PKT_SY_SHIFT);
+
+ /*
+ * get Z bits (# of descriptor components in descriptor block) from
+ * any dma bound address in the xfer_ctl struct of the IXL xfer cmd
+ */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+ uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
+
+ /*
+ * determine hdr_offset from the current(last) descriptor of the
+ * DMA descriptor block to the descriptor where pkthdr1 goes
+ * by examining IXL xfer command
+ */
+ if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
+ /*
+ * if IXL send header only, the current (last)
+ * descriptor is the one
+ */
+ uvp->hdr_offset = 0;
+ } else {
+ /*
+ * all others are the first descriptor (Z bits - 1)
+ * back from the last
+ */
+ uvp->hdr_offset -= 1;
+ }
+
+ /* set depth to zero and count to update all dma descriptors */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = xferctlp->cnt;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_recv_pkt()
+ * Preparation for update of an IXL1394_OP_RECV_PKT_U or
+ * IXL1394_OP_RECV_PKT_ST_U command.
+ */
+static int
+hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
+ ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+ hci1394_desc_t *hcidescp;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ uint32_t desc_hdr;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
+ new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
+
+ /* check if any change between new and old IXL xfer commands */
+ if ((new_xfer_pkt_ixlp->size == old_xfer_pkt_ixlp->size) &&
+ (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
+ old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
+ (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no change. return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* if new IXL buffer addrs are null, return error */
+ if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == NULL) ||
+ (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
+
+ uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* if IXL xfer command is not xfer start command */
+ if (uvp->ixl_opcode == IXL1394_OP_RECV_PKT_U) {
+ /*
+ * find IXL xfer start command in the compiler_privatep of the
+ * old IXL xfer command
+ */
+ uvp->ixlxferp = (ixl1394_command_t *)
+ uvp->ixloldp->compiler_privatep;
+
+ if (uvp->ixlxferp == NULL) {
+
+ /* Error - no IXL xfer start command found */
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+ } else {
+ /* IXL xfer command is the IXL xfer start command */
+ uvp->ixlxferp = uvp->ixloldp;
+ }
+
+ /* check that xfer_ctl is present in the IXL xfer start command */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL) {
+
+ /* Error - no xfer_ctl struct found */
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* set depth to zero and count to 1 to update dma descriptor */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = 1;
+
+ /*
+ * get Z bits (number of descriptors in descriptor block) from the DMA
+ * bound address in the xfer_ctl struct of the IXL xfer start cpmmand.
+ */
+ uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
+
+ /*
+ * set offset from the current(last) descriptor to the descriptor for
+ * this packet command
+ */
+ uvp->hci_offset -= (1 + uvp->ixloldp->compiler_resv);
+
+ /*
+ * set bufsize to the new IXL xfer size, and bufaddr to the new
+ * IXL xfer bufp
+ */
+ uvp->bufsize = ((ixl1394_xfer_pkt_t *)uvp->ixlnewp)->size;
+ uvp->bufaddr = ((ixl1394_xfer_pkt_t *)
+ uvp->ixlnewp)->ixl_buf.ixldmac_addr;
+
+ /*
+ * update old hcihdr w/new bufsize, set hcistatus rescnt to
+ * new bufsize
+ */
+ hcidescp = (hci1394_desc_t *)xferctlp->dma[0].dma_descp -
+ uvp->hci_offset;
+ acc_hdl = xferctlp->dma[0].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[0].dma_buf->bi_dma_handle;
+
+ /* Sync the descriptor before we grab the header(s) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
+ DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EINTERNAL_ERROR: dma_sync() failed");
+ return (IXL_PREP_FAILURE);
+ }
+
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+ uvp->hcihdr = desc_hdr;
+ uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
+ uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
+ DESC_HDR_REQCOUNT_MASK;
+ uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
+ DESC_ST_RESCOUNT_MASK;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_recv_buf()
+ * Preparation for update of an IXL1394_OP_RECV_BUF_U command.
+ */
+static int
+hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
+ ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
+ new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
+
+ /* check if any change between new and old IXL xfer commands */
+ if ((new_xfer_buf_ixlp->size == old_xfer_buf_ixlp->size) &&
+ (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
+ old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
+ (new_xfer_buf_ixlp->mem_bufp == new_xfer_buf_ixlp->mem_bufp)) {
+
+ if (((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) != 0) ||
+ (new_xfer_buf_ixlp->pkt_size ==
+ old_xfer_buf_ixlp->pkt_size)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no change. return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+ }
+
+ /* if new IXL buffer addrs are null, return error */
+ if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == NULL) ||
+ (new_xfer_buf_ixlp->mem_bufp == NULL)) {
+
+ uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * if not buffer fill mode, check that the new pkt_size > 0 and
+ * new size/pkt_size doesn't change the count of dma descriptor
+ * blocks required
+ */
+ if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
+ if ((new_xfer_buf_ixlp->pkt_size == 0) ||
+ ((new_xfer_buf_ixlp->size / new_xfer_buf_ixlp->pkt_size) !=
+ (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
+
+ /* count changes. return an error */
+ uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_prep_recv_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+ }
+
+ /* set old IXL xfer command as the current IXL xfer command */
+ uvp->ixlxferp = uvp->ixloldp;
+
+ /* check that the xfer_ctl struct is present in IXL xfer command */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep)
+ == NULL) {
+
+ /* return an error if no xfer_ctl struct is found for command */
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* set depth to zero and count to update all dma descriptors */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = xferctlp->cnt;
+
+ /* set bufsize to new pkt_size (or to new size if buffer fill mode) */
+ if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
+ uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
+ } else {
+ uvp->bufsize = new_xfer_buf_ixlp->size;
+ }
+
+ /* set bufaddr to new ixl_buf */
+ uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
+
+ /* set hcihdr reqcnt and hcistatus rescnt to new bufsize */
+ uvp->hci_offset = 0;
+ uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
+ DESC_HDR_REQCOUNT_MASK;
+ uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
+ DESC_ST_RESCOUNT_MASK;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_send_pkt()
+ * Preparation for update of an IXL1394_OP_SEND_PKT_U command,
+ * IXL1394_OP_SEND_PKT_ST_U command and IXL1394_OP_SEND_PKT_WHDR_ST_U
+ * command.
+ */
+static int
+hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
+ ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+ hci1394_desc_imm_t *hcidescp;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ uint32_t desc_hdr, desc_hdr2;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
+ new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
+
+ /* check if any change between new and old IXL xfer commands */
+ if ((new_xfer_pkt_ixlp->size == old_xfer_pkt_ixlp->size) &&
+ (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
+ old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
+ (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* if none, return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* if new ixl buffer addrs are null, return error */
+ if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == NULL) ||
+ (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
+
+ uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* error if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode and size < 4 */
+ if ((uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) &&
+ (new_xfer_pkt_ixlp->size < 4)) {
+
+ uvp->upd_status = IXL1394_EPKT_HDR_MISSING;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* if IXL xfer command is not an IXL xfer start command */
+ if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_U) {
+ /*
+ * find IXL xfer start command in the compiler_privatep of the
+ * old IXL xfer command
+ */
+ uvp->ixlxferp = (ixl1394_command_t *)
+ old_xfer_pkt_ixlp->compiler_privatep;
+
+ if (uvp->ixlxferp == NULL) {
+ /* error if no IXL xfer start command found */
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+ } else {
+ /* IXL xfer command is the IXL xfer start command */
+ uvp->ixlxferp = uvp->ixloldp;
+ }
+
+ /*
+ * get Z bits (number of descriptor components in the descriptor block)
+ * from a dma bound address in the xfer_ctl structure of the IXL
+ * xfer start command
+ */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* set depth to zero and count to 1 to update dma descriptor */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = 1;
+
+ /*
+ * set offset to the header(first) descriptor from the
+ * current(last) descriptor
+ */
+ uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
+
+ /*
+ * set offset from the current(last) descriptor to the descriptor for
+ * this packet command
+ */
+ uvp->hci_offset = uvp->hdr_offset - 2 - uvp->ixloldp->compiler_resv;
+
+ /* set bufsize to new pkt buffr size, set bufaddr to new bufp */
+ uvp->bufsize = new_xfer_pkt_ixlp->size;
+ uvp->bufaddr = new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
+
+ /*
+ * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
+ * step over hdr
+ */
+ if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
+ uvp->bufsize -= 4;
+ uvp->bufaddr += 4;
+ }
+
+ /* update old hcihdr w/new bufsize */
+ hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
+ uvp->hci_offset;
+ acc_hdl = xferctlp->dma[0].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[0].dma_buf->bi_dma_handle;
+
+ /* Sync the descriptor before we grab the header(s) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
+ "EINTERNAL_ERROR: dma_sync() failed");
+ return (IXL_PREP_FAILURE);
+ }
+
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+ uvp->hcihdr = desc_hdr;
+ uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
+ uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
+ DESC_HDR_REQCOUNT_MASK;
+
+ /* update old pkthdr2 w/new bufsize. error if exceeds 16k */
+ desc_hdr2 = ddi_get32(acc_hdl, &hcidescp->q2);
+ uvp->pkthdr2 = desc_hdr2;
+ uvp->pkthdr2 = (uvp->pkthdr2 & DESC_PKT_DATALEN_MASK) >>
+ DESC_PKT_DATALEN_SHIFT;
+ uvp->pkthdr2 -= old_xfer_pkt_ixlp->size;
+ uvp->pkthdr2 += uvp->bufsize;
+
+ if (uvp->pkthdr2 > 0xFFFF) {
+ uvp->upd_status = IXL1394_EPKTSIZE_MAX_OFLO;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+ uvp->pkthdr2 = (uvp->pkthdr2 << DESC_PKT_DATALEN_SHIFT) &
+ DESC_PKT_DATALEN_MASK;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_prep_send_buf()
+ * Preparation for update of an IXL1394_OP_SEND_BUF_U command.
+ */
+static int
+hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp)
+{
+ ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
+ ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
+ hci1394_xfer_ctl_t *xferctlp;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
+ new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
+
+ /* check if any change between new and old IXL xfer commands */
+ if ((new_xfer_buf_ixlp->size == old_xfer_buf_ixlp->size) &&
+ (new_xfer_buf_ixlp->pkt_size == old_xfer_buf_ixlp->pkt_size) &&
+ (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
+ old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
+ (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /* no change, return with done ok status */
+ return (IXL_PREP_SUCCESS);
+ }
+
+ /* if new IXL buffer addresses are null, return error */
+ if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == NULL) ||
+ (new_xfer_buf_ixlp->mem_bufp == NULL)) {
+
+ uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /*
+ * check that the new pkt_size > 0 and the new size/pkt_size
+ * doesn't change the count of DMA descriptor blocks required
+ */
+ if ((new_xfer_buf_ixlp->pkt_size == 0) ||
+ ((new_xfer_buf_ixlp->size / new_xfer_buf_ixlp->pkt_size) !=
+ (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
+
+ /* Error - new has different pkt count than old */
+ uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* set the old IXL xfer command as the current IXL xfer command */
+ uvp->ixlxferp = uvp->ixloldp;
+
+ /*
+ * get Z bits (number of descriptor components in descriptor block)
+ * from a DMA bound address in the xfer_ctl struct of the
+ * IXL xfer command
+ */
+ if ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL) {
+
+ uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_FAILURE);
+ }
+
+ /* set depth to zero and count to update all dma descriptors */
+ uvp->ixldepth = 0;
+ uvp->ixlcount = xferctlp->cnt;
+
+ /*
+ * set offset to the header(first) descriptor from the current (last)
+ * descriptor.
+ */
+ uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
+
+ /* set offset to the only(last) xfer descriptor */
+ uvp->hci_offset = 0;
+
+ /* set bufsize to the new pkt_size, set bufaddr to the new bufp */
+ uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
+ uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
+
+ /*
+ * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
+ * step over header (a quadlet)
+ */
+ if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
+ uvp->bufsize -= 4;
+ uvp->bufaddr += 4;
+ }
+
+ /* set hcihdr to new bufsize */
+ uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
+ DESC_HDR_REQCOUNT_MASK;
+
+ /* set pkthdr2 to new bufsize */
+ uvp->pkthdr2 = (uvp->bufsize << DESC_PKT_DATALEN_SHIFT) &
+ DESC_PKT_DATALEN_MASK;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (IXL_PREP_READY);
+}
+
+/*
+ * hci1394_ixl_update_perform()
+ * performs the actual update into DMA memory.
+ */
+static int
+hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp)
+{
+ int ii;
+ uint_t skipaddrlast;
+ hci1394_xfer_ctl_t *xferctlp;
+ hci1394_desc_imm_t *hcidescp;
+ hci1394_iso_ctxt_t *ctxtp;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_handle_t dma_hdl;
+ int err;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ctxtp = uvp->ctxtp;
+
+ /*
+ * if no target ixl xfer command to be updated or it has
+ * no xfer_ctl struct, then internal error.
+ */
+ if ((uvp->ixlxferp == NULL) ||
+ ((xferctlp = (hci1394_xfer_ctl_t *)
+ uvp->ixlxferp->compiler_privatep) == NULL)) {
+
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ return (DDI_FAILURE);
+ }
+
+ /* perform update based on specific ixl command type */
+ switch (uvp->ixl_opcode) {
+
+ case IXL1394_OP_JUMP_U: {
+ ixl1394_jump_t *old_jump_ixlp;
+ ixl1394_jump_t *new_jump_ixlp;
+
+ old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
+ new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
+
+ /*
+ * set new hdr and new branch fields into last component of last
+ * dma descriptor block of ixl xfer cmd associated with
+ * ixl jump cmd
+ */
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[xferctlp->cnt - 1].dma_descp;
+ acc_hdl = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
+ dma_hdl =
+ xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
+
+ ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
+ ddi_put32(acc_hdl, &hcidescp->branch, uvp->jumpaddr);
+
+ /*
+ * if xfer type is send and skip mode is IXL1394__SKIP_TO_NEXT
+ * also set branch location into branch field of first
+ * component (skip to address) of last dma descriptor block
+ */
+ if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
+ hcidescp -= uvp->hci_offset;
+ ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
+ }
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* set old ixl jump cmd label from new ixl jump cmd label */
+ old_jump_ixlp->label = new_jump_ixlp->label;
+ break;
+ }
+ case IXL1394_OP_SET_SKIPMODE_U: {
+ ixl1394_set_skipmode_t *old_set_skipmode_ixlp;
+ ixl1394_set_skipmode_t *new_set_skipmode_ixlp;
+
+ old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
+ new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
+
+ /*
+ * if skip to next mode, save skip addr for last iteration
+ * thru dma descriptor blocks for associated ixl xfer command
+ */
+ if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
+ skipaddrlast = uvp->skipaddr;
+ }
+
+ /*
+ * iterate through set of dma descriptor blocks for associated
+ * ixl xfer start cmd and set new skip address into first hci
+ * descriptor of each if skip next or skip self, first determine
+ * address in each iteration
+ */
+ for (ii = 0; ii < xferctlp->cnt; ii++) {
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[ii].dma_descp - uvp->hci_offset;
+ acc_hdl = xferctlp->dma[ii].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[ii].dma_buf->bi_dma_handle;
+
+ if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
+ if (ii < (xferctlp->cnt - 1)) {
+ uvp->skipaddr =
+ xferctlp->dma[ii + 1].dma_bound;
+ } else {
+ uvp->skipaddr = skipaddrlast;
+ }
+ } else if (uvp->skipmode == IXL1394_SKIP_TO_SELF) {
+ uvp->skipaddr = xferctlp->dma[ii].dma_bound;
+ }
+
+ ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * set old ixl set skip mode cmd mode and label from new ixl cmd
+ * set old ixl set skip mode cmd compilier_privatep to
+ * uvp->skipxferp
+ */
+ old_set_skipmode_ixlp->skipmode = uvp->skipmode;
+ old_set_skipmode_ixlp->label = new_set_skipmode_ixlp->label;
+ old_set_skipmode_ixlp->compiler_privatep =
+ (ixl1394_priv_t)uvp->skipxferp;
+ break;
+ }
+ case IXL1394_OP_SET_TAGSYNC_U: {
+ ixl1394_set_tagsync_t *old_set_tagsync_ixlp;
+ ixl1394_set_tagsync_t *new_set_tagsync_ixlp;
+
+ old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
+ new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
+
+ /*
+ * iterate through set of descriptor blocks for associated IXL
+ * xfer command and set new pkthdr1 value into output more/last
+ * immediate hci descriptor (first/last hci descriptor of each
+ * descriptor block)
+ */
+ for (ii = 0; ii < xferctlp->cnt; ii++) {
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
+ acc_hdl = xferctlp->dma[ii].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[ii].dma_buf->bi_dma_handle;
+ ddi_put32(acc_hdl, &hcidescp->q1, uvp->pkthdr1);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * set old ixl set tagsync cmd tag & sync from new ixl set
+ * tagsync cmd
+ */
+ old_set_tagsync_ixlp->tag = new_set_tagsync_ixlp->tag;
+ old_set_tagsync_ixlp->sync = new_set_tagsync_ixlp->sync;
+ break;
+ }
+ case IXL1394_OP_RECV_PKT_U:
+ case IXL1394_OP_RECV_PKT_ST_U: {
+ ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
+ ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
+ uint32_t desc_status;
+
+ old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
+ new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
+
+ /*
+ * alter buffer address, count and rescount in ixl recv pkt cmd
+ * related hci component in dma descriptor block
+ */
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[0].dma_descp - uvp->hci_offset;
+ acc_hdl = xferctlp->dma[0].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[0].dma_buf->bi_dma_handle;
+ ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
+ ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
+
+ /* Sync the descriptor before we grab the status */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* change only low 1/2 word and leave status bits unchanged */
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+ desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
+ uvp->hcistatus;
+ ddi_put32(acc_hdl, &hcidescp->status, desc_status);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * set old ixl recv pkt size and buffers from new
+ * ixl recv pkt command
+ */
+ old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
+ old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
+ new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
+ old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
+ break;
+ }
+ case IXL1394_OP_RECV_BUF_U: {
+ ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
+ ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
+ uint32_t desc_hdr;
+ uint32_t desc_status;
+
+ old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
+ new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
+
+ /*
+ * iterate through set of descriptor blocks for this IXL xfer
+ * command altering buffer, count and rescount in each
+ * input more/last(the only) hci descriptor block descriptor.
+ */
+ for (ii = 0; ii < xferctlp->cnt; ii++) {
+
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[ii].dma_descp - uvp->hci_offset;
+ acc_hdl = xferctlp->dma[ii].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[ii].dma_buf->bi_dma_handle;
+
+ ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
+
+ /*
+ * advance to next buffer segment, adjust over header
+ * if appropriate
+ */
+ uvp->bufaddr += uvp->bufsize;
+
+ /* Sync the descriptor before we grab the header(s) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * this preserves interrupt enable bits, et al. in each
+ * descriptor block header.
+ */
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+ desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
+ uvp->hcihdr;
+ ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
+
+ /*
+ * change only low 1/2 word leaving status bits
+ * unchanged
+ */
+ desc_status = ddi_get32(acc_hdl, &hcidescp->status);
+ desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
+ uvp->hcistatus;
+ ddi_put32(acc_hdl, &hcidescp->status, desc_status);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * set old ixl recv buf sizes and buffers from
+ * new ixl recv pkt cmd
+ */
+ old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
+ old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
+ old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
+ new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
+ old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
+ break;
+ }
+ case IXL1394_OP_SEND_PKT_U:
+ case IXL1394_OP_SEND_PKT_ST_U:
+ case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
+ ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
+ ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
+
+ old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
+ new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
+
+ /*
+ * replace pkthdr2 in output more immediate (the first) hci
+ * descriptor in block, then alter buffer address and count in
+ * IXL send pkt command related output more/last hci descriptor.
+ */
+ hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
+ uvp->hdr_offset;
+ acc_hdl = xferctlp->dma[0].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[0].dma_buf->bi_dma_handle;
+
+ ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
+ ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
+ ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * set old ixl recv pkt size and buffers from
+ * new ixl recv pkt cmd
+ */
+ old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
+ old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
+ new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
+ old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
+ break;
+ }
+ case IXL1394_OP_SEND_BUF_U: {
+ ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
+ ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
+ uint32_t desc_hdr;
+
+ old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
+ new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
+
+ /*
+ * iterate through set of descriptor blocks for this IXL xfer
+ * command replacing pkthdr2 in output more immediate
+ * (the first) hci descriptor block descriptor, then altering
+ * buffer address and count in each output last (the only other)
+ * hci descriptor block descriptor.
+ */
+ for (ii = 0; ii < xferctlp->cnt; ii++) {
+ hcidescp = (hci1394_desc_imm_t *)
+ xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
+ acc_hdl = xferctlp->dma[ii].dma_buf->bi_handle;
+ dma_hdl = xferctlp->dma[ii].dma_buf->bi_dma_handle;
+
+ ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
+ ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
+
+ /* advance to next buffer segment */
+ uvp->bufaddr += uvp->bufsize;
+
+ /* Sync the descriptor before we grab the header(s) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * this preserves interrupt enable bits, et al
+ * in each desc block hdr
+ */
+ desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
+ desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
+ uvp->hcihdr;
+ ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
+
+ /* Sync descriptor for device (desc was modified) */
+ err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
+ sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
+ if (err != DDI_SUCCESS) {
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(
+ hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * set old ixl recv buf sizes and buffers from
+ * new ixl recv pkt cmd
+ */
+ old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
+ old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
+ old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
+ new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
+ old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
+ break;
+ }
+ default:
+ /* ixl command being updated must be one of above, else error */
+ uvp->upd_status = IXL1394_EINTERNAL_ERROR;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+
+ /* hit the WAKE bit in the context control register */
+ if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
+ HCI1394_IRCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
+ 0, 0, 0, 0, 0, 1 /* wake */);
+ } else {
+ HCI1394_ITCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
+ 0, 0, 0, 1 /* wake */);
+ }
+
+ /* perform update completed successfully */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_ixl_update_evaluate()
+ * Evaluate where the hardware is in running through the DMA descriptor
+ * blocks.
+ */
+static int
+hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp)
+{
+ hci1394_iso_ctxt_t *ctxtp;
+ ixl1394_command_t *ixlp;
+ int ixldepth;
+ int ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ctxtp = uvp->ctxtp;
+
+ ixlp = NULL;
+ ixldepth = 0xFFFFFFFF;
+
+ /*
+ * repeat until IXL execution status evaluation function returns error
+ * or until pointer to currently executing IXL command and its depth
+ * stablize
+ */
+ while ((ixlp != ctxtp->ixl_execp) ||
+ (ixldepth != ctxtp->ixl_exec_depth)) {
+
+ ixlp = ctxtp->ixl_execp;
+ ixldepth = ctxtp->ixl_exec_depth;
+
+ /*
+ * call IXL execution status evaluation (ixl_dma_sync)
+ * function returning if error (HCI1394_IXL_INTR_DMALOST is
+ * only error condition).
+ *
+ * Note: interrupt processing function can only return one of
+ * the following statuses here:
+ * HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP,
+ * HCI1394_IXL_INTR_DMALOST
+ *
+ * it can not return the following status here:
+ * HCI1394_IXL_INTR_NOADV
+ *
+ * Don't need to grab the lock here... for the same reason
+ * explained in hci1394_ixl_update_endup() above.
+ */
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
+ if (hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp) ==
+ HCI1394_IXL_INTR_DMALOST) {
+
+ /* return post-perform update failed status */
+ uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * if the currently executing IXL command is one of the IXL_MAX_LOCN
+ * locations saved before update was performed, return update
+ * successful status.
+ */
+ for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
+ if ((uvp->locn_info[ii].ixlp == ixlp) &&
+ (uvp->locn_info[ii].ixldepth == ixldepth)) {
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+ }
+ }
+
+ /*
+ * else return post-perform update failed status.
+ * note: later can make more sophisticated evaluations about where
+ * execution processing went, and if update has really failed.
+ */
+ uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+}
+
+/*
+ * hci1394_ixl_update_analysis()
+ * Determine if the hardware is within the range we expected it to be.
+ * If so the update succeeded.
+ */
+static int
+hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp)
+{
+ hci1394_iso_ctxt_t *ctxtp;
+ ixl1394_command_t *ixlp;
+ int ixldepth;
+ int ii;
+ int status;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ ctxtp = uvp->ctxtp;
+
+ ixlp = NULL;
+ ixldepth = 0xFFFFFFFF;
+
+ /*
+ * repeat until ixl execution status evaluation function returns error
+ * or until pointer to currently executing ixl command and its depth
+ * stablize.
+ */
+ while ((ixlp != ctxtp->ixl_execp) ||
+ (ixldepth != ctxtp->ixl_exec_depth)) {
+
+ ixlp = ctxtp->ixl_execp;
+ ixldepth = ctxtp->ixl_exec_depth;
+
+ /*
+ * call ixl execution status evaluation (interrupt processing).
+ * set IXL1394_EIDU_PRE_UPD_DMALOST if status INTR_DMALOST and
+ * return.
+ *
+ * Note: interrupt processing function can only return one of
+ * the following statuses here:
+ * HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP or
+ * HCI1394_IXL_INTR_DMALOST
+ *
+ * it can not return the following status here:
+ * HCI1394_IXL_INTR_NOADV
+ *
+ * Don't need to grab the lock here... for the same reason
+ * explained in hci1394_ixl_update_endup() above.
+ */
+ ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
+
+ status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
+ if (status == HCI1394_IXL_INTR_DMALOST) {
+ /*
+ * set pre-update dma processing lost status and
+ * return error
+ */
+ uvp->upd_status = IXL1394_EPRE_UPD_DMALOST;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * save locations of currently executing ixl command and the
+ * 3 following it.
+ */
+ hci1394_ixl_update_set_locn_info(uvp);
+
+ /*
+ * if xfer_ixl_cmd associated with the IXL_command being updated is one
+ * of the saved (currently executing) IXL commands, risk is too great to
+ * perform update now, set IXL1394_ERISK_PROHIBITS_UPD status and
+ * return error.
+ *
+ * Note: later can implement more sophisticated risk override
+ * evaluations and processing.
+ */
+ for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
+
+ if ((uvp->locn_info[ii].ixlp == uvp->ixlxferp) &&
+ (uvp->locn_info[ii].ixldepth >= uvp->ixldepth) &&
+ (uvp->locn_info[ii].ixldepth <
+ (uvp->ixldepth + uvp->ixlcount))) {
+
+ uvp->upd_status = IXL1394_ERISK_PROHIBITS_UPD;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* is save for update to be performed, return ok status */
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_ixl_update_set_locn_info()
+ * set up the local list of the IXL_MAX_LOCN next commandPtr locations we
+ * expect the hardware to get to in the next 125 microseconds.
+ */
+static void
+hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp)
+{
+ hci1394_iso_ctxt_t *ctxtp;
+ ixl1394_command_t *ixlp;
+ int ixldepth;
+ int ii;
+
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_enter,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+
+ /*
+ * find next xfer start ixl command, starting with current ixl command
+ * where execution last left off
+ */
+ ctxtp = uvp->ctxtp;
+
+ ixldepth = ctxtp->ixl_exec_depth;
+ (void) hci1394_ixl_find_next_exec_xfer(ctxtp->ixl_execp, NULL, &ixlp);
+
+ /*
+ * if the current IXL command wasn't a xfer start command, then reset
+ * the depth to 0 for xfer command found
+ */
+ if (ixlp != ctxtp->ixl_execp)
+ ixldepth = 0;
+
+ /*
+ * save xfer start IXL command & its depth and also save location and
+ * depth of the next IXL_MAX_LOCN-1 xfer start IXL commands following
+ * it (if any)
+ */
+ for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
+ uvp->locn_info[ii].ixlp = ixlp;
+ uvp->locn_info[ii].ixldepth = ixldepth;
+
+ if (ixlp) {
+ /*
+ * if more dma commands generated by this xfer command
+ * still follow, use them. else, find the next xfer
+ * start IXL command and set its depth to 0.
+ */
+ if (++ixldepth >= ((hci1394_xfer_ctl_t *)
+ ixlp->compiler_privatep)->cnt) {
+
+ (void) hci1394_ixl_find_next_exec_xfer(
+ ixlp->next_ixlp, NULL, &ixlp);
+ ixldepth = 0;
+ }
+ }
+ }
+ TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_exit,
+ HCI1394_TNF_HAL_STACK_ISOCH, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_misc.c b/usr/src/uts/common/io/1394/adapters/hci1394_misc.c
new file mode 100644
index 0000000000..3bdefa1169
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_misc.c
@@ -0,0 +1,223 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_misc.c
+ * Misc. HBA functions. These include getinfo, open, close, shutdown, and
+ * overall driver state control functions.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/mkdev.h>
+
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+
+
+
+/* ARGSUSED */
+int
+hci1394_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
+{
+ dev_t dev;
+ hci1394_state_t *soft_state;
+ minor_t instance;
+ int status;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_getinfo_enter, HCI1394_TNF_HAL_STACK, "");
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ dev = (dev_t)arg;
+ instance = getminor(dev);
+ soft_state = ddi_get_soft_state(hci1394_statep, instance);
+ if (soft_state == NULL) {
+ TNF_PROBE_1(hci1394_getinfo_gss_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "ddi_get_soft_state() failed");
+ TNF_PROBE_0_DEBUG(hci1394_getinfo_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ *result = (void *)soft_state->drvinfo.di_dip;
+ status = DDI_SUCCESS;
+ break;
+
+ case DDI_INFO_DEVT2INSTANCE:
+ dev = (dev_t)arg;
+ instance = getminor(dev);
+ *result = (void *)(uintptr_t)instance;
+ status = DDI_SUCCESS;
+ break;
+
+ default:
+ TNF_PROBE_1(hci1394_getinfo_def_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "reached default in switch");
+ status = DDI_FAILURE;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_getinfo_exit, HCI1394_TNF_HAL_STACK, "");
+ return (status);
+}
+
+
+/* ARGSUSED */
+int
+hci1394_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ hci1394_state_t *soft_state;
+
+
+ TNF_PROBE_0_DEBUG(hci1394_open_enter, HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = ddi_get_soft_state(hci1394_statep, getminor(*devp));
+ if (soft_state == NULL) {
+ TNF_PROBE_1(hci1394_open_gss_fail, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "ddi_get_soft_state() failed");
+ TNF_PROBE_0_DEBUG(hci1394_open_exit, HCI1394_TNF_HAL_STACK, "");
+ return (ENXIO);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_open_exit, HCI1394_TNF_HAL_STACK, "");
+ return (0);
+}
+
+
+/* ARGSUSED */
+int
+hci1394_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ TNF_PROBE_0_DEBUG(hci1394_close_enter, HCI1394_TNF_HAL_STACK, "");
+
+ TNF_PROBE_0_DEBUG(hci1394_close_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (0);
+}
+
+
+/*
+ * hci1394_shutdown()
+ * Shutdown the HW. Something bad that we cannot recover from happened.
+ */
+void
+hci1394_shutdown(dev_info_t *dip)
+{
+ hci1394_state_t *soft_state;
+
+
+ /*
+ * In the debug version of the driver, we want to do an assert here so
+ * that we don't reset the hardware and can look and see what happened
+ * to cause the shutdown.
+ */
+#ifndef TEST_SHUTDOWN
+ ASSERT(0);
+#endif
+
+ TNF_PROBE_0_DEBUG(hci1394_shutdown_enter, HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = ddi_get_soft_state(hci1394_statep, ddi_get_instance(dip));
+ if (soft_state == NULL) {
+ TNF_PROBE_1(hci1394_shutdown_gss_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "ddi_get_soft_state() failed");
+ TNF_PROBE_0_DEBUG(hci1394_shutdown_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * Don't allow the HW to generate any more interrupts. Make sure we
+ * disable interrupts before setting the driver state to shutdown.
+ */
+ hci1394_ohci_intr_master_disable(soft_state->ohci);
+
+ /* don't accept anymore commands from services layer */
+ (void) hci1394_state_set(&soft_state->drvinfo, HCI1394_SHUTDOWN);
+
+ /* Reset the OHCI HW */
+ (void) hci1394_ohci_soft_reset(soft_state->ohci);
+
+ /* Flush out async DMA Q's (cancels pendingQ timeouts too) */
+ hci1394_async_flush(soft_state->async);
+
+ TNF_PROBE_0_DEBUG(hci1394_shutdown_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_state()
+ * returns the current state of the driver
+ */
+hci1394_statevar_t
+hci1394_state(hci1394_drvinfo_t *drvinfo)
+{
+ hci1394_statevar_t hal_state;
+
+ TNF_PROBE_0_DEBUG(hci1394_state_enter, HCI1394_TNF_HAL_STACK, "");
+ mutex_enter(&drvinfo->di_drvstate.ds_mutex);
+ hal_state = drvinfo->di_drvstate.ds_state;
+ mutex_exit(&drvinfo->di_drvstate.ds_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_state_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (hal_state);
+}
+
+
+/*
+ * hci1394_state_set()
+ * Set the current state of the driver. This routine will return failure
+ * if the driver state is currently set to HCI1394_SHUTDOWN. We do not
+ * allow a transition out of shutdown.
+ */
+int
+hci1394_state_set(hci1394_drvinfo_t *drvinfo, hci1394_statevar_t state)
+{
+ TNF_PROBE_0_DEBUG(hci1394_state_set_enter, HCI1394_TNF_HAL_STACK, "");
+ mutex_enter(&drvinfo->di_drvstate.ds_mutex);
+
+ /* Do not allow a transition out of shutdown */
+ if (drvinfo->di_drvstate.ds_state == HCI1394_SHUTDOWN) {
+ mutex_exit(&drvinfo->di_drvstate.ds_mutex);
+ TNF_PROBE_1(hci1394_state_set_fail, HCI1394_TNF_HAL_STACK, "",
+ tnf_string, errmsg, "driver shutdown");
+ TNF_PROBE_0_DEBUG(hci1394_state_set_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ drvinfo->di_drvstate.ds_state = state;
+ mutex_exit(&drvinfo->di_drvstate.ds_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_state_set_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_ohci.c b/usr/src/uts/common/io/1394/adapters/hci1394_ohci.c
new file mode 100644
index 0000000000..87babb1ba5
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_ohci.c
@@ -0,0 +1,3338 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_ohci.c
+ * Provides access routines to the OpenHCI HW.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/mkdev.h>
+#include <sys/kmem.h>
+#include <sys/pci.h>
+
+#include <sys/1394/adapters/hci1394.h>
+#include <sys/1394/adapters/hci1394_extern.h>
+
+
+/*
+ * Data swap macros used to swap config rom data that is going to be placed
+ * in OpenHCI registers. The config rom is treated like a byte stream. When
+ * the services layer calls into us to update the config rom, they pass us a
+ * byte stream of data. This works well except for the the fact that the
+ * hardware uses its internal registers for the first 5 quadlets. We have to
+ * copy the cfgrom header and bus options into their corresponding OpenHCI
+ * registers. On an x86 machine, this means we have to byte swap them first.
+ */
+#ifdef _LITTLE_ENDIAN
+#define OHCI_SWAP32(DATA) (ddi_swap32(DATA))
+#else
+#define OHCI_SWAP32(DATA) (DATA)
+#endif
+
+
+static int hci1394_ohci_selfid_init(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_cfgrom_init(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_chip_init(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_phy_resume(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_1394a_init(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_1394a_resume(hci1394_ohci_handle_t ohci_hdl);
+static int hci1394_ohci_phy_read_no_lock(hci1394_ohci_handle_t ohci_hdl,
+ uint_t address, uint_t *data);
+static int hci1394_ohci_phy_write_no_lock(hci1394_ohci_handle_t ohci_hdl,
+ uint_t address, uint_t data);
+
+
+/*
+ * hci1394_ohci_init()
+ * Initialize the OpenHCI hardware.
+ */
+int
+hci1394_ohci_init(hci1394_state_t *soft_state, hci1394_drvinfo_t *drvinfo,
+ hci1394_ohci_handle_t *ohci_hdl)
+{
+ int status;
+ uint32_t version;
+ hci1394_ohci_t *ohci;
+#if defined(__x86)
+ uint16_t cmdreg;
+#endif
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* alloc the space for ohci */
+ ohci = kmem_alloc(sizeof (hci1394_ohci_t), KM_SLEEP);
+ *ohci_hdl = ohci;
+
+ /*
+ * Start with the cycle timer rollover interrupt disabled. When it is
+ * enabled, we will get an interrupt every 64 seconds, even if we have
+ * nothing plugged into the bus. This interrupt is used to keep track
+ * of the bus time. We will enable the interrupt when the bus manager
+ * writes to the bus_time CSR register (Currently there are not known
+ * implementations that write to the bus_time register)
+ */
+ ohci->ohci_bustime_enabled = B_FALSE;
+ ohci->ohci_bustime_count = 0;
+
+ ohci->ohci_set_root_holdoff = B_FALSE;
+ ohci->ohci_set_gap_count = B_FALSE;
+ ohci->ohci_gap_count = 0;
+
+ mutex_init(&ohci->ohci_mutex, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+
+ /* Map OpenHCI Registers */
+ status = ddi_regs_map_setup(drvinfo->di_dip, OHCI_REG_SET,
+ (caddr_t *)&ohci->ohci_regs, 0, 0, &drvinfo->di_reg_attr,
+ &ohci->ohci_reg_handle);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(ddi_regs_map_setup_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ ohci->soft_state = soft_state;
+ ohci->ohci_drvinfo = drvinfo;
+
+ /*
+ * make sure PCI Master and PCI Memory Access are enabled on x86
+ * platforms. This may not be the case if plug and play OS is
+ * set in the BIOS
+ */
+#if defined(__x86)
+ cmdreg = pci_config_get16(soft_state->pci_config, PCI_CONF_COMM);
+ if ((cmdreg & (PCI_COMM_MAE | PCI_COMM_ME)) != (PCI_COMM_MAE |
+ PCI_COMM_ME)) {
+ cmdreg |= PCI_COMM_MAE | PCI_COMM_ME;
+ pci_config_put16(soft_state->pci_config, PCI_CONF_COMM, cmdreg);
+ }
+#endif
+
+ /*
+ * Initialize the openHCI chip. This is broken out because we need to
+ * do this when resuming too.
+ */
+ status = hci1394_ohci_chip_init(ohci);
+ if (status != DDI_SUCCESS) {
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_chip_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Init the 1394 PHY */
+ status = hci1394_ohci_phy_init(ohci);
+ if (status != DDI_SUCCESS) {
+ (void) hci1394_ohci_soft_reset(ohci);
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_phy_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Init 1394a features if present */
+ if (ohci->ohci_phy == H1394_PHY_1394A) {
+ status = hci1394_ohci_1394a_init(ohci);
+ if (status != DDI_SUCCESS) {
+ (void) hci1394_ohci_soft_reset(ohci);
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_1394a_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* save away guid, phy type, and vendor info */
+ soft_state->halinfo.guid = hci1394_ohci_guid(ohci);
+ soft_state->halinfo.phy = ohci->ohci_phy;
+ soft_state->vendor_info.ohci_vendor_id =
+ ddi_get32(ohci->ohci_reg_handle, &ohci->ohci_regs->vendor_id);
+ version = ddi_get32(ohci->ohci_reg_handle, &ohci->ohci_regs->version);
+ soft_state->vendor_info.ohci_version = version;
+
+ /* We do not support version < 1.0 */
+ if (OHCI_VERSION(version) == 0) {
+ cmn_err(CE_NOTE,
+ "hci1394(%d): OpenHCI version %x.%x is not supported",
+ drvinfo->di_instance, OHCI_VERSION(version),
+ OHCI_REVISION(version));
+ (void) hci1394_ohci_soft_reset(ohci);
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_selfid_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the selfid buffer */
+ status = hci1394_ohci_selfid_init(ohci);
+ if (status != DDI_SUCCESS) {
+ (void) hci1394_ohci_soft_reset(ohci);
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_selfid_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the config rom buffer */
+ status = hci1394_ohci_cfgrom_init(ohci);
+ if (status != DDI_SUCCESS) {
+ (void) hci1394_ohci_soft_reset(ohci);
+ hci1394_buf_free(&ohci->ohci_selfid_handle);
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+ mutex_destroy(&ohci->ohci_mutex);
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+ TNF_PROBE_0(hci1394_ohci_cfgrom_init_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_fini()
+ * Cleanup after OpenHCI init. This should be called during detach.
+ */
+void
+hci1394_ohci_fini(hci1394_ohci_handle_t *ohci_hdl)
+{
+ hci1394_ohci_t *ohci;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ ohci = *ohci_hdl;
+
+ /* reset chip */
+ (void) hci1394_ohci_soft_reset(ohci);
+
+ /* Free config rom space */
+ hci1394_buf_free(&ohci->ohci_cfgrom_handle);
+
+ /* Free selfid buffer space */
+ hci1394_buf_free(&ohci->ohci_selfid_handle);
+
+ /* Free up the OpenHCI registers */
+ ddi_regs_map_free(&ohci->ohci_reg_handle);
+
+ mutex_destroy(&ohci->ohci_mutex);
+
+ /* Free the OpenHCI state space */
+ kmem_free(ohci, sizeof (hci1394_ohci_t));
+ *ohci_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_chip_init()
+ * Initialize the OpenHCI registers. This contains the bulk of the initial
+ * register setup.
+ */
+static int
+hci1394_ohci_chip_init(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_chip_init_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* Reset 1394 OHCI HW */
+ status = hci1394_ohci_soft_reset(ohci_hdl);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_soft_reset_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Setup Host Control Register. The software reset does not put all
+ * registers in a known state. The Host Control Register is one of these
+ * registers. First make sure noByteSwapData and postedWriteEnable and
+ * are cleared.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_clr, OHCI_HC_NO_BSWAP |
+ OHCI_HC_POSTWR_ENBL);
+
+ /*
+ * the determination if we should swap data is made during the PCI
+ * initialization.
+ */
+ if (ohci_hdl->soft_state->swap_data == B_FALSE) {
+ /*
+ * most hba's don't swap data. It will be swapped in the
+ * global swap for SPARC. Enable Link Power(LPS). Enable
+ * Posted Writes
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_NO_BSWAP |
+ OHCI_HC_LPS | OHCI_HC_POSTWR_ENBL);
+ } else {
+ /*
+ * Swap Data. Enable Link Power(LPS). Enable Posted Writes
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_LPS |
+ OHCI_HC_POSTWR_ENBL);
+ }
+
+ /*
+ * Wait for PHY to come up. There does not seem to be standard time for
+ * how long wait for the PHY to come up. The problem is that the PHY
+ * provides a clock to the link layer and if that is not stable, we
+ * could get a PCI timeout error when reading/writing a phy register
+ * (and maybe an OpenHCI register?) This used to be set to 10mS which
+ * works for just about every adapter we tested on. We got a new TI
+ * adapter which would crash the system once in a while if nothing
+ * (1394 device) was pluged into the adapter. Changing this delay to
+ * 50mS made that problem go away. This value is set via a patchable
+ * variable located in hci1394_extern.c
+ */
+ delay(drv_usectohz(hci1394_phy_stabilization_delay_uS));
+
+ /* Clear Isochrounous receive multi-chan mode registers */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_multi_maskhi_clr, 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_multi_masklo_clr, 0xFFFFFFFF);
+
+ /*
+ * Setup async retry on busy or ack_data_error
+ * secondlimit = 0 <= bits 31-29
+ * cycleLimit = 0 <= bits 28-16
+ * maxPhysRespRetries = 0 <= bits 11-8
+ * maxARRespRetries = 0 <= bits 7-4
+ * maxATReqRetries = 2 <= bits 3-0
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_retries, 0x00000002);
+
+ /*
+ * Setup Link Control
+ * Enable cycleMaster, cycleTimerEnable, and rcvPhyPkt.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->link_ctrl_clr, 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->link_ctrl_set, OHCI_LC_CYC_MAST |
+ OHCI_LC_CTIME_ENBL | OHCI_LC_RCV_PHY);
+
+ /*
+ * Set the Physical address map boundary to 0x0000FFFFFFFF. The
+ * phys_upper_bound is the upper 32-bits of the 48-bit 1394 address. The
+ * lower 16 bits are assumed to be 0xFFFF.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phys_upper_bound, (uint32_t)0x0000FFFF);
+
+ /*
+ * Enable all async requests.
+ * The asyncReqResourceAll bit (0x80000000) does not get cleared during
+ * a bus reset. If this code is changed to selectively allow nodes to
+ * perform ARREQ's, the ARREQ filter bits will need to be updated after
+ * every bus reset.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_req_filterhi_set, (uint32_t)0x80000000);
+
+ /*
+ * clear isochronous interrupt event and mask registers clearing the
+ * mask registers disable all isoc tx & rx ints
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_event_clr, (uint32_t)0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_mask_clr, (uint32_t)0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_event_clr, (uint32_t)0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_mask_clr, (uint32_t)0xFFFFFFFF);
+
+ /* Clear interrupt event/mask register */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_event_clr, (uint32_t)0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_mask_clr, (uint32_t)0xFFFFFFFF);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_chip_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_soft_reset()
+ * Reset OpenHCI HW.
+ */
+int
+hci1394_ohci_soft_reset(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t resetStatus;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_soft_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Reset 1394 HW - Reset is bit 16 in HCControl */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_SOFT_RESET);
+
+ /* Wait for reset to complete */
+ drv_usecwait(OHCI_CHIP_RESET_TIME_IN_uSEC);
+
+ /* Verify reset is complete */
+ resetStatus = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set);
+ resetStatus = resetStatus & OHCI_HC_SOFT_RESET;
+ if (resetStatus != 0) {
+ TNF_PROBE_0(hci1394_ohci_reset_not_complete_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_soft_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_soft_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_reg_read()
+ * Read OpenHCI register. This is called from the test ioctl interface
+ * through devctl.
+ */
+void
+hci1394_ohci_reg_read(hci1394_ohci_handle_t ohci_hdl,
+ uint_t offset, uint32_t *data)
+{
+ uint32_t *addr;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_reg_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ addr = (uint32_t *)((uintptr_t)ohci_hdl->ohci_regs +
+ (uintptr_t)(offset & OHCI_REG_ADDR_MASK));
+ *data = ddi_get32(ohci_hdl->ohci_reg_handle, addr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_reg_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_reg_write()
+ * Write OpenHCI register. This is called from the test ioctl interface
+ * through devctl.
+ */
+void
+hci1394_ohci_reg_write(hci1394_ohci_handle_t ohci_hdl,
+ uint_t offset, uint32_t data)
+{
+ uint32_t *addr;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_reg_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ addr = (uint32_t *)((uintptr_t)ohci_hdl->ohci_regs +
+ (uintptr_t)(offset & OHCI_REG_ADDR_MASK));
+ ddi_put32(ohci_hdl->ohci_reg_handle, addr, data);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_reg_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_intr_master_enable()
+ * Enable interrupts to be passed on from OpenHCI. This is a global mask.
+ * Individual interrupts still need to be enabled for interrupts to be
+ * generated.
+ */
+void
+hci1394_ohci_intr_master_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_master_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_mask_set, OHCI_INTR_MASTER_INTR_ENBL);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_master_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_intr_master_disable()
+ * Disable all OpenHCI interrupts from being passed on. This does not affect
+ * the individual interrupt mask settings. When interrupts are enabled
+ * again, the same individual interrupts will still be enabled.
+ */
+void
+hci1394_ohci_intr_master_disable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_master_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_mask_clr, OHCI_INTR_MASTER_INTR_ENBL);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_master_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_intr_asserted()
+ * Return which ENABLED interrupts are asserted. If an interrupt is disabled
+ * via its mask bit, it will not be returned from here.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+uint32_t
+hci1394_ohci_intr_asserted(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t interrupts_asserted;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_asserted_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Only look at interrupts which are enabled by reading the
+ * intr_event_clr register.
+ */
+ interrupts_asserted = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_event_clr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_asserted_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (interrupts_asserted);
+}
+
+
+/*
+ * hci1394_ohci_intr_enable()
+ * Enable an individual interrupt or set of interrupts. This does not affect
+ * the global interrupt mask.
+ */
+void
+hci1394_ohci_intr_enable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_mask_set, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_intr_disable()
+ * Disable an individual interrupt or set of interrupts. This does not affect
+ * the global interrupt mask.
+ */
+void
+hci1394_ohci_intr_disable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_mask_clr, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_intr_clear()
+ * Clear a set of interrupts so that they are not asserted anymore.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+void
+hci1394_ohci_intr_clear(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_clear_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_event_clr, interrupt_mask);
+ TNF_PROBE_1_DEBUG(hci1394_ohci_intr_clear, HCI1394_TNF_HAL, "",
+ tnf_uint, intr_mask, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_intr_clear_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_it_intr_asserted()
+ * Return which ENABLED isoch TX interrupts are asserted. If an interrupt is
+ * disabled via its mask bit, it will not be returned from here.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+uint32_t
+hci1394_ohci_it_intr_asserted(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t interrupts_asserted;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_asserted_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Only look at interrupts which are enabled */
+ interrupts_asserted = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_event_clr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_asserted_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (interrupts_asserted);
+}
+
+
+/*
+ * hci1394_ohci_it_intr_enable()
+ * Enable an individual isoch TX interrupt. This does not affect the general
+ * isoch interrupt mask in the OpenHCI Mask register. That is enabled/
+ * disabled via hci1394_ohci_intr_enable/hci1394_ohci_intr_disable.
+ */
+void
+hci1394_ohci_it_intr_enable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_mask_set, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_it_intr_disable()
+ * Disable an individual isoch TX interrupt. This does not affect the general
+ * isoch interrupt mask in the OpenHCI Mask register. That is enabled/
+ * disabled via hci1394_ohci_intr_enable/hci1394_ohci_intr_disable.
+ */
+void
+hci1394_ohci_it_intr_disable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_mask_clr, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_it_intr_clear()
+ * Clear an individual isoch TX interrupt so that it is not asserted anymore.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+void
+hci1394_ohci_it_intr_clear(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_clear_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_event_clr, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_intr_clear_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_it_ctxt_count_get()
+ * Determine the number of supported isochronous transmit contexts.
+ */
+int
+hci1394_ohci_it_ctxt_count_get(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t channel_mask;
+ int count;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_ctxt_count_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * hw is required to support contexts 0 to N, where N <= 31
+ * the interrupt mask bits are wired to ground for unsupported
+ * contexts. Write 1's to all it mask bits, then read the mask.
+ * Implemented contexts will read (sequentially) as 1
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_mask_set, 0xFFFFFFFF);
+ channel_mask = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it_intr_mask_set);
+ count = 0;
+ while (channel_mask != 0) {
+ channel_mask = channel_mask >> 1;
+ count++;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_ctxt_count_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (count);
+}
+
+
+/*
+ * hci1394_ohci_it_cmd_ptr_set()
+ * Set the context pointer for a given isoch TX context. This is the IO
+ * address for the HW to fetch the first descriptor. The context should
+ * not be running when this routine is called.
+ */
+void
+hci1394_ohci_it_cmd_ptr_set(hci1394_ohci_handle_t ohci_hdl,
+ uint_t context_number, uint32_t io_addr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_cmd_ptr_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->it[context_number].cmd_ptrlo,
+ io_addr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_it_cmd_ptr_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_ir_intr_asserted()
+ * Return which ENABLED isoch RX interrupts are asserted. If an interrupt is
+ * disabled via its mask bit, it will not be returned from here.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+uint32_t
+hci1394_ohci_ir_intr_asserted(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t interrupts_asserted;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_asserted_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Only look at interrupts which are enabled */
+ interrupts_asserted = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_event_clr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_asserted_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (interrupts_asserted);
+}
+
+
+/*
+ * hci1394_ohci_ir_intr_enable()
+ * Enable an individual isoch RX interrupt. This does not affect the isoch
+ * interrupt mask in the OpenHCI Mask register. That is enabled/disabled
+ * via hci1394_ohci_intr_enable/hci1394_ohci_intr_disable.
+ */
+void
+hci1394_ohci_ir_intr_enable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_mask_set, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_ir_intr_disable()
+ * Disable an individual isoch RX interrupt. This does not affect the isoch
+ * interrupt mask in the OpenHCI Mask register. That is enabled/disabled
+ * via hci1394_ohci_intr_enable/hci1394_ohci_intr_disable.
+ */
+void
+hci1394_ohci_ir_intr_disable(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_mask_clr, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_ir_intr_clear()
+ * Clear an individual isoch RX interrupt so that it is not asserted anymore.
+ *
+ * NOTE: we may want to make this a macro at some point.
+ */
+void
+hci1394_ohci_ir_intr_clear(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t interrupt_mask)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_clear_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_event_clr, interrupt_mask);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_intr_clear_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_ir_ctxt_count_get()
+ * Determine the number of supported isochronous receive contexts.
+ */
+int
+hci1394_ohci_ir_ctxt_count_get(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t channel_mask;
+ int count;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_ctxt_count_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * hw is required to support contexts 0 to N, where N <= 31
+ * the interrupt mask bits are wired to ground for unsupported
+ * contexts. Write 1's to all ir mask bits, then read the mask.
+ * Implemented contexts will read (sequentially) as 1
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_mask_set, 0xFFFFFFFF);
+ channel_mask = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir_intr_mask_set);
+ count = 0;
+ while (channel_mask != 0) {
+ channel_mask = channel_mask >> 1;
+ count++;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_ctxt_count_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (count);
+}
+
+
+/*
+ * hci1394_ohci_ir_cmd_ptr_set()
+ * Set the context pointer for a given isoch RX context. This is the IO
+ * address for the HW to fetch the first descriptor. The context should
+ * not be running when this routine is called.
+ */
+void
+hci1394_ohci_ir_cmd_ptr_set(hci1394_ohci_handle_t ohci_hdl,
+ uint_t context_number, uint32_t io_addr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_cmd_ptr_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ir[context_number].cmd_ptrlo,
+ io_addr);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_ir_cmd_ptr_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_link_enable()
+ * Enable the 1394 link layer. When the link is enabled, the PHY will pass
+ * up any 1394 bus transactions which would normally come up to the link.
+ */
+void
+hci1394_ohci_link_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_link_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_LINK_ENBL);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_link_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_link_disable()
+ * Disable the 1394 link layer. When the link is disabled, the PHY will NOT
+ * pass up any 1394 bus transactions which would normally come up to the
+ * link. This "logically" disconnects us from the 1394 bus.
+ */
+void
+hci1394_ohci_link_disable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_link_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_clr, OHCI_HC_LINK_ENBL);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_link_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_bus_reset()
+ * Reset the 1394 bus. This performs a "long" bus reset and can be called
+ * when the adapter has either a 1394-1995 or 1394A PHY.
+ */
+int
+hci1394_ohci_bus_reset(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+ uint_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * We want to reset the bus. We also handle the root_holdoff and gap
+ * count cacheing explained at the top of this file.
+ */
+ reg = OHCI_PHY_IBR;
+ if (ohci_hdl->ohci_set_root_holdoff == B_TRUE) {
+ reg = reg | OHCI_PHY_RHB;
+ }
+ if (ohci_hdl->ohci_set_gap_count == B_TRUE) {
+ reg = reg | ohci_hdl->ohci_gap_count;
+ } else {
+ reg = reg | OHCI_PHY_MAX_GAP;
+ }
+
+ /*
+ * Reset the bus. We intentionally do NOT do a PHY read here. A PHY
+ * read could introduce race conditions and would be more likely to fail
+ * due to a timeout.
+ */
+ status = hci1394_ohci_phy_write(ohci_hdl, 0x1, reg);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_write_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* clear the root holdoff and gap count state bits */
+ ohci_hdl->ohci_set_root_holdoff = B_FALSE;
+ ohci_hdl->ohci_set_gap_count = B_FALSE;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_init()
+ * Setup the PHY. This should be called during attach and performs any PHY
+ * initialization required including figuring out what kind of PHY we have.
+ */
+int
+hci1394_ohci_phy_init(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+ uint_t phy_reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_init_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * if the phy has extended set to 7, the phy is a not a 1394-1995 PHY.
+ * It could be a 1394a phy or beyond. The PHY type can be found in PHY
+ * register page 1 in the compliance_level register.
+ *
+ * Since there are not any current standards beyond 1394A, we are going
+ * to consider the PHY to be a 1394A phy if the extended bit is set.
+ *
+ * phy registers are byte wide registers and are addressed as 0, 1, 2,
+ * 3, ... Phy register 0 may not be read or written.
+ *
+ * Phy register 0x2 (bit 0 MSB, 7 LSB)
+ * Extended - bits 0 - 2
+ * Total Ports - bits 4 - 7
+ */
+ status = hci1394_ohci_phy_read(ohci_hdl, 2, &phy_reg);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_read_failed,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if ((phy_reg & OHCI_PHY_EXTND_MASK) != OHCI_PHY_EXTND) {
+ /*
+ * if the extended bit is not set, we have to be a 1394-1995
+ * PHY
+ */
+ ohci_hdl->ohci_phy = H1394_PHY_1995;
+ } else {
+ /* Treat all other PHY's as a 1394A PHY */
+ ohci_hdl->ohci_phy = H1394_PHY_1394A;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_resume()
+ * re-initialize the PHY. This routine should be called during a resume after
+ * a successful suspend has been done.
+ */
+/* ARGSUSED */
+static int
+hci1394_ohci_phy_resume(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_resume_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* There is currently nothing to re-initialize here */
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_set()
+ * Perform bitset operation on PHY register.
+ */
+int
+hci1394_ohci_phy_set(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t bits)
+{
+ int status;
+ uint_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&ohci_hdl->ohci_mutex);
+
+ /* read the PHY register */
+ status = hci1394_ohci_phy_read_no_lock(ohci_hdl, address, &reg);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0(hci1394_ohci_phy_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Set the bits and write the result back */
+ reg = reg | bits;
+ status = hci1394_ohci_phy_write_no_lock(ohci_hdl, address, reg);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0(hci1394_ohci_phy_write_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_exit(&ohci_hdl->ohci_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_set_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_clr()
+ * Perform bitclr operation on PHY register.
+ */
+int
+hci1394_ohci_phy_clr(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t bits)
+{
+ int status;
+ uint_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_clr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&ohci_hdl->ohci_mutex);
+
+ /* read the PHY register */
+ status = hci1394_ohci_phy_read_no_lock(ohci_hdl, address, &reg);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0(hci1394_ohci_phy_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Set the bits and write the result back */
+ reg = reg & ~bits;
+ status = hci1394_ohci_phy_write_no_lock(ohci_hdl, address, reg);
+ if (status != DDI_SUCCESS) {
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0(hci1394_ohci_phy_write_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_exit(&ohci_hdl->ohci_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_clr_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_read()
+ * Atomic PHY register read
+ */
+int
+hci1394_ohci_phy_read(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t *data)
+{
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ mutex_enter(&ohci_hdl->ohci_mutex);
+ status = hci1394_ohci_phy_read_no_lock(ohci_hdl, address, data);
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_phy_write()
+ * Atomic PHY register write
+ */
+int
+hci1394_ohci_phy_write(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t data)
+{
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ mutex_enter(&ohci_hdl->ohci_mutex);
+ status = hci1394_ohci_phy_write_no_lock(ohci_hdl, address, data);
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_phy_read_no_lock()
+ * This routine actually performs the PHY register read. It is seperated
+ * out from phy_read so set & clr lock can perform an atomic PHY register
+ * operation. It assumes the OpenHCI mutex is held.
+ */
+static int
+hci1394_ohci_phy_read_no_lock(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t *data)
+{
+ uint32_t ohci_reg;
+ int count;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(data != NULL);
+ ASSERT(MUTEX_HELD(&ohci_hdl->ohci_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_no_lock_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* You can't read or write PHY register #0 */
+ if (address == 0) {
+ TNF_PROBE_1(hci1394_ohci_phy_addr_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "can't rd/wr PHY reg #0");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_no_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Verify phy access not in progress */
+ ohci_reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl);
+ if ((ohci_reg & (OHCI_PHYC_RDREG | OHCI_PHYC_WRREG)) != 0) {
+ TNF_PROBE_1(hci1394_ohci_phy_xfer_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "transfer already in progress?");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_no_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Start the PHY register read */
+ ohci_reg = OHCI_PHYC_RDREG | ((address & 0xF) <<
+ OHCI_PHYC_REGADDR_SHIFT);
+ ddi_put32(ohci_hdl->ohci_reg_handle, &ohci_hdl->ohci_regs->phy_ctrl,
+ ohci_reg);
+
+ /*
+ * The PHY read usually takes less than 1uS. It is not worth having
+ * this be interrupt driven. Having this be interrupt driven would also
+ * make the bus reset and self id processing much more complex for
+ * 1995 PHY's. We will wait up to hci1394_phy_delay_uS for the read
+ * to complete (this was initially set to 10). I have yet to see
+ * count > 1. The delay is a patchable variable.
+ */
+ count = 0;
+ while (count < hci1394_phy_delay_uS) {
+ /* See if the read is done yet */
+ ohci_reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl);
+ if ((ohci_reg & OHCI_PHYC_RDDONE) != 0) {
+ /*
+ * The read is done. clear the phyRegRecv interrupt. We
+ * do not have this interrupt enabled but this keeps
+ * things clean in case someone in the future does.
+ * Break out of the loop, we are done.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_event_clr,
+ OHCI_INTR_PHY_REG_RCVD);
+ break;
+ }
+
+ /*
+ * the phy read did not yet complete, wait 1uS, increment the
+ * count and try again.
+ */
+ drv_usecwait(1);
+ count++;
+ }
+
+ /* Check to see if we timed out */
+ if (count >= hci1394_phy_delay_uS) {
+ /* we timed out, return failure */
+ *data = 0;
+ TNF_PROBE_0(hci1394_ohci_phy_rd_timeout_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_no_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* setup the PHY read data to be returned */
+ *data = (ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl) & OHCI_PHYC_RDDATA_MASK) >>
+ OHCI_PHYC_RDDATA_SHIFT;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_read_no_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_write_no_lock()
+ * This routine actually performs the PHY register write. It is separated
+ * out from phy_write so set & clr lock can perform an atomic PHY register
+ * operation. It assumes the OpenHCI mutex is held.
+ */
+static int
+hci1394_ohci_phy_write_no_lock(hci1394_ohci_handle_t ohci_hdl, uint_t address,
+ uint_t data)
+{
+ uint32_t ohci_reg;
+ int count;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(MUTEX_HELD(&ohci_hdl->ohci_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* You can't read or write PHY register #0 */
+ if (address == 0) {
+ TNF_PROBE_1(hci1394_ohci_phy_addr_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "can't rd/wr PHY reg #0");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Verify phy access not in progress */
+ ohci_reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl);
+ if ((ohci_reg & (OHCI_PHYC_RDREG | OHCI_PHYC_WRREG)) != 0) {
+ TNF_PROBE_1(hci1394_ohci_phy_xfer_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "transfer already in progress?");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Start the PHY register write */
+ ohci_reg = OHCI_PHYC_WRREG | ((address & 0xF) <<
+ OHCI_PHYC_REGADDR_SHIFT) | (data & OHCI_PHYC_WRDATA_MASK);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl, ohci_reg);
+
+ /*
+ * The PHY write usually takes less than 1uS. It is not worth having
+ * this be interrupt driven. Having this be interrupt driven would also
+ * make the bus reset and self id processing much more complex. We will
+ * wait up to hci1394_phy_delay_uS for the write to complete (this was
+ * initially set to 10). I have yet to see count > 0. The delay is a
+ * patchable variable.
+ */
+ count = 0;
+ while (count < hci1394_phy_delay_uS) {
+ /* See if the write is done yet */
+ ohci_reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phy_ctrl);
+ if ((ohci_reg & OHCI_PHYC_WRREG) == 0) {
+ /*
+ * The write completed. Break out of the loop, we are
+ * done.
+ */
+ break;
+ }
+
+ /*
+ * the phy write did not yet complete, wait 1uS, increment the
+ * count and try again.
+ */
+ drv_usecwait(1);
+ count++;
+ }
+
+ /* Check to see if we timed out */
+ if (count >= hci1394_phy_delay_uS) {
+ /* we timed out, return failure */
+ TNF_PROBE_0(hci1394_ohci_phy_wr_timeout_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_info()
+ * Return selfid word for our PHY. This routine should ONLY be called for
+ * adapters with a 1394-1995 PHY. These PHY's do not embed their own selfid
+ * information in the selfid buffer so we need to do it for them in the
+ * selfid complete interrupt handler. This routine only supports building
+ * selfid info for a 3 port PHY. Since we will probably not ever see a
+ * 1394-1995 PHY in any production system, and if we do it will have 3 ports
+ * or less, this is a pretty safe assumption.
+ */
+int
+hci1394_ohci_phy_info(hci1394_ohci_handle_t ohci_hdl, uint32_t *info)
+{
+ int status;
+ uint32_t phy_info;
+ uint32_t reg;
+ int index;
+ int num_ports;
+ int count;
+ uint32_t port_status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(info != NULL);
+ ASSERT(ohci_hdl->ohci_phy == H1394_PHY_1995);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Set Link on. We are using power class 0 since we have no idea what
+ * our real power class is.
+ */
+ phy_info = 0x80400000;
+
+ /* Add in Physical ID */
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->node_id);
+ phy_info = phy_info | ((reg << IEEE1394_SELFID_PHYID_SHIFT) &
+ IEEE1394_SELFID_PHYID_MASK);
+
+ /* Add in Gap Count */
+ status = hci1394_ohci_phy_read(ohci_hdl, 1, &reg);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ phy_info = phy_info | ((reg << IEEE1394_SELFID_GAP_CNT_SHIFT) &
+ IEEE1394_SELFID_GAP_CNT_MASK);
+
+ /* Add in speed & ports */
+ status = hci1394_ohci_phy_read(ohci_hdl, 2, &reg);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ phy_info = phy_info | ((reg & 0xC0) << 8);
+ num_ports = reg & 0x1F;
+
+ /* PHY reports that it has 0 ports?? */
+ if (num_ports == 0) {
+ TNF_PROBE_1(hci1394_ohci_phy_zero_ports_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "1995 phy has zero ports?");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Build up the port information for each port in the PHY */
+ count = 0;
+ for (index = 0; index < 3; index++) {
+ if (num_ports > 0) {
+ status = hci1394_ohci_phy_read(ohci_hdl,
+ count + 3, &reg);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_read_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ /* if port is not connected */
+ if ((reg & 0x04) == 0) {
+ port_status =
+ IEEE1394_SELFID_PORT_NOT_CONNECTED;
+
+ /* else if port is connected to parent */
+ } else if ((reg & 0x08) == 0) {
+ port_status = IEEE1394_SELFID_PORT_TO_PARENT;
+
+ /* else port is connected to child */
+ } else {
+ port_status = IEEE1394_SELFID_PORT_TO_CHILD;
+ }
+
+ num_ports--;
+ } else {
+ port_status = IEEE1394_SELFID_PORT_NO_PORT;
+ }
+
+ /* add in the port information */
+ phy_info = phy_info | (port_status << (6 - (index * 2)));
+ count++;
+ }
+
+ /* Copy the PHY selfid info to the return parameter */
+ *info = phy_info;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_current_busgen()
+ * return the current bus generation.
+ */
+uint_t
+hci1394_ohci_current_busgen(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ uint_t generation_count;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_current_busgen_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->self_id_count);
+ generation_count = (reg & OHCI_SLFC_GEN_MASK) >> OHCI_SLFC_GEN_SHIFT;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_current_busgen_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (generation_count);
+}
+
+
+/*
+ * hci1394_ohci_startup()
+ * Startup the 1394 nexus driver. This is called after all of the HW has
+ * been initialized (in both attach and resume) and we are ready to
+ * participate on the bus.
+ */
+int
+hci1394_ohci_startup(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_startup_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Turn on 1394 link. This allows us to receive 1394 traffic off the
+ * bus
+ */
+ hci1394_ohci_link_enable(ohci_hdl);
+
+ /*
+ * Reset the 1394 Bus.
+ * Need to do this so that the link layer can collect all of the self-id
+ * packets. The Interrupt routine will cause further initialization
+ * after the bus reset has completed
+ */
+ status = hci1394_ohci_bus_reset(ohci_hdl);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1_DEBUG(hci1394_ohci_startup_exit,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "failed to reset bus");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_startup_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* setup out initial interrupt mask and enable interrupts */
+ hci1394_isr_mask_setup(ohci_hdl->soft_state);
+ hci1394_ohci_intr_master_enable(ohci_hdl);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_startup_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_postwr_addr()
+ * Read the Posted Write Address registers. This should be read when a
+ * posted write error is detected to find out what transaction had an error.
+ */
+void
+hci1394_ohci_postwr_addr(hci1394_ohci_handle_t ohci_hdl, uint64_t *addr)
+{
+ uint32_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(addr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_postwr_addr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* read in the errored address */
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->posted_write_addrhi);
+ *addr = ((uint64_t)reg) << 32;
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->posted_write_addrlo);
+ *addr = *addr | (uint64_t)reg;
+
+ /*
+ * Interrupt should be cleared after reading the posted write address.
+ * See 13.2.8.1 in OpenHCI spec v1.0.
+ */
+ hci1394_ohci_intr_clear(ohci_hdl, OHCI_INTR_POST_WR_ERR);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_postwr_addr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_guid()
+ * Return the adapter's GUID
+ */
+uint64_t
+hci1394_ohci_guid(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ uint64_t guid;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_guid_enter, HCI1394_TNF_HAL_STACK, "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->guid_hi);
+ guid = ((uint64_t)reg) << 32;
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->guid_lo);
+ guid = guid | (uint64_t)reg;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_guid_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (guid);
+}
+
+
+/*
+ * hci1394_ohci_csr_read()
+ * Read one of the HW implemented CSR registers. These include
+ * bus_manager_id, bandwidth_available, channels_available_hi, and
+ * channels_available_lo. Offset should be set to
+ * OHCI_CSR_SEL_BUS_MGR_ID, OHCI_CSR_SEL_BANDWIDTH_AVAIL
+ * OHCI_CSR_SEL_CHANS_AVAIL_HI, or OHCI_CSR_SEL_CHANS_AVAIL_LO.
+ */
+int
+hci1394_ohci_csr_read(hci1394_ohci_handle_t ohci_hdl, uint_t offset,
+ uint32_t *data)
+{
+ uint_t generation;
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_read_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /*
+ * read the CSR register by doing a cswap with the same compare and
+ * swap value.
+ */
+ generation = hci1394_ohci_current_busgen(ohci_hdl);
+ status = hci1394_ohci_csr_cswap(ohci_hdl, generation, offset, 0, 0,
+ data);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_csr_read_csw_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_read_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_csr_cswap()
+ * Perform a compare/swap on one of the HW implemented CSR registers. These
+ * include bus_manager_id, bandwidth_available, channels_available_hi, and
+ * channels_available_lo. Offset should be set to
+ * OHCI_CSR_SEL_BUS_MGR_ID, OHCI_CSR_SEL_BANDWIDTH_AVAIL
+ * OHCI_CSR_SEL_CHANS_AVAIL_HI, or OHCI_CSR_SEL_CHANS_AVAIL_LO.
+ */
+int
+hci1394_ohci_csr_cswap(hci1394_ohci_handle_t ohci_hdl, uint_t generation,
+ uint_t offset, uint32_t compare, uint32_t swap, uint32_t *old)
+{
+ int count;
+ uint32_t ohci_reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(old != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_cswap_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Make sure we have not gotten a bus reset since this action was
+ * started.
+ */
+ if (generation != hci1394_ohci_current_busgen(ohci_hdl)) {
+ TNF_PROBE_1(hci1394_ohci_invbusgen_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "Invalid Bus Generation");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_cswap_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&ohci_hdl->ohci_mutex);
+
+ /* init csrData and csrCompare */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->csr_data, swap);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->csr_compare_data, compare);
+
+ /* start the compare swap */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->csr_ctrl, offset & OHCI_CSR_SELECT);
+
+ /*
+ * The CSR access should be immediate. There in nothing that officially
+ * states this so we will wait up to 2uS just in case before we timeout.
+ * We actually perform a compare swap with both compare and swap set
+ * to the same value. This will return the old value which is in
+ * essence, a read.
+ */
+ count = 0;
+ while (count < 2) {
+ /* See if the compare swap is done */
+ ohci_reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->csr_ctrl);
+ if ((ohci_reg & OHCI_CSR_DONE) != 0) {
+ /* The compare swap is done, break out of the loop */
+ break;
+ }
+ /*
+ * The compare swap has not completed yet, wait 1uS, increment
+ * the count and try again
+ */
+ drv_usecwait(1);
+ count++;
+ }
+
+ /* If we timed out, return an error */
+ if (count >= 2) {
+ *old = 0;
+ mutex_exit(&ohci_hdl->ohci_mutex);
+ TNF_PROBE_0(hci1394_ohci_phy_csr_timeout_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_cswap_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy the old data into the return parameter */
+ *old = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->csr_data);
+
+ mutex_exit(&ohci_hdl->ohci_mutex);
+
+ /*
+ * There is a race condition in the OpenHCI design here. After checking
+ * the generation and before performing the cswap, we could get a bus
+ * reset and incorrectly set something like the bus manager. This would
+ * put us into a condition where we would not have a bus manager and
+ * we would think there was one. If it is possible that this race
+ * condition occured, we will reset the bus to clean things up. We only
+ * care about this if the compare swap was successful.
+ */
+ if (generation != hci1394_ohci_current_busgen(ohci_hdl)) {
+ if (*old == compare) {
+ (void) hci1394_ohci_bus_reset(ohci_hdl);
+ TNF_PROBE_1(hci1394_ohci_invbusgen_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "Invalid Bus Generation");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_cswap_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_csr_cswap_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_contender_enable()
+ * Set the contender bit in the PHY. This routine should only be called
+ * if our PHY is 1394A compliant. (i.e. this routine should not be called
+ * for a 1394-1995 PHY).
+ */
+int
+hci1394_ohci_contender_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_contender_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Make sure that phy is not a 1394-1995 phy. Those phy's do not have a
+ * contender bit to set.
+ */
+ if (ohci_hdl->ohci_phy == H1394_PHY_1995) {
+ TNF_PROBE_0(hci1394_ohci_phy_type_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_contender_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Set the Contender Bit */
+ status = hci1394_ohci_phy_set(ohci_hdl, 0x4, OHCI_PHY_CNTDR);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_set_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_contender_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_contender_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_root_holdoff_enable()
+ * Set the root holdoff bit in the PHY. Since there are race conditions when
+ * writing to PHY register 1 (which can get updated from a PHY packet off the
+ * bus), we cache this state until a "long" bus reset is issued.
+ */
+int
+hci1394_ohci_root_holdoff_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_root_holdoff_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ohci_hdl->ohci_set_root_holdoff = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_root_holdoff_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_gap_count_set()
+ * Set the gap count in the PHY. Since there are race conditions when writing
+ * to PHY register 1 (which can get updated from a PHY packet off the bus),
+ * we cache this gap count until a "long" bus reset is issued.
+ */
+int
+hci1394_ohci_gap_count_set(hci1394_ohci_handle_t ohci_hdl, uint_t gap_count)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_gap_count_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ohci_hdl->ohci_set_gap_count = B_TRUE;
+ ohci_hdl->ohci_gap_count = gap_count & OHCI_PHY_MAX_GAP;
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_gap_count_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_filter_set()
+ * Enable a node (or nodes) to perform transactions to our physical
+ * memory. OpenHCI allows you to disable/enable physical requests on a node
+ * per node basis. A physical request is basically a read/write to 1394
+ * address space 0x0 - 0xFFFFFFFF. This address goes out to the IO MMU (in
+ * the case of a SPARC machine). The HAL starts with all nodes unable to
+ * read/write physical memory. The Services Layer will call down and enable
+ * nodes via setting a physical filter bit for that given node. Since node
+ * numbers change every bus reset, the services layer has to call down after
+ * every bus reset to re-enable physical accesses. (NOTE: the hardware
+ * automatically clears these bits.
+ */
+int
+hci1394_ohci_phy_filter_set(hci1394_ohci_handle_t ohci_hdl, uint64_t mask,
+ uint_t generation)
+{
+ uint32_t data;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Make sure we have not gotten a bus reset since this action was
+ * started.
+ */
+ if (generation != hci1394_ohci_current_busgen(ohci_hdl)) {
+ TNF_PROBE_1(hci1394_ohci_invbusgen_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "Invalid Bus Generation");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ data = (uint32_t)((mask >> 32) & 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phys_req_filterhi_set, data);
+ data = (uint32_t)(mask & 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phys_req_filterlo_set, data);
+
+ /*
+ * There is a race condition in the OpenHCI design here. After checking
+ * the generation and before setting the physical filter bits, we could
+ * get a bus reset and incorrectly set the physical filter bits. If it
+ * is possible that this race condition occured, we will reset the bus
+ * to clean things up.
+ */
+ if (generation != hci1394_ohci_current_busgen(ohci_hdl)) {
+ (void) hci1394_ohci_bus_reset(ohci_hdl);
+ TNF_PROBE_1(hci1394_ohci_filterrace_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "Invalid Bus Generation");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_phy_filter_clr()
+ * Disable a node (or nodes) from performing transactions to our physical
+ * memory. See hci1394_ohci_phy_filter_set() above for more info.
+ */
+int
+hci1394_ohci_phy_filter_clr(hci1394_ohci_handle_t ohci_hdl,
+ uint64_t mask, uint_t generation)
+{
+ uint32_t data;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_clr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Make sure we have not gotten a bus reset since this action was
+ * started.
+ */
+ if (generation != hci1394_ohci_current_busgen(ohci_hdl)) {
+ TNF_PROBE_1(hci1394_ohci_invbusgen_fail, HCI1394_TNF_HAL_ERROR,
+ "", tnf_string, errmsg, "Invalid Bus Generation");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ data = (uint32_t)((mask >> 32) & 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phys_req_filterhi_clr, data);
+ data = (uint32_t)(mask & 0xFFFFFFFF);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->phys_req_filterlo_clr, data);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_phy_filter_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_bus_reset_short()
+ * Perform a 1394A short bus reset. This function should only be called
+ * on an adapter with a 1394A PHY (or later).
+ */
+int
+hci1394_ohci_bus_reset_short(hci1394_ohci_handle_t ohci_hdl)
+{
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_short_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Make sure that phy is not a 1394-1995 phy. Those phy's do not have a
+ * contender bit to set.
+ */
+ if (ohci_hdl->ohci_phy == H1394_PHY_1995) {
+ TNF_PROBE_0(hci1394_ohci_brs_phy_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_short_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initiate the short bus reset */
+ status = hci1394_ohci_phy_set(ohci_hdl, 0x5, OHCI_PHY_ISBR);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_set_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_short_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_reset_short_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_cfgrom_update()
+ * Update the config rom with the provided contents. The config rom is
+ * provided as a byte stream which is multiple of 4 bytes large. The
+ * size is passed as a quadlet (4 bytes) count. The entire contents
+ * of the config rom is updated at once. We do not provide a partial
+ * update interface.
+ */
+void
+hci1394_ohci_cfgrom_update(hci1394_ohci_handle_t ohci_hdl, void *local_buf,
+ uint_t quadlet_count)
+{
+ uint32_t *data;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(local_buf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cfgrom_update_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ data = (uint32_t *)local_buf;
+
+ /* zero out the config ROM header to start */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->config_rom_hdr, 0);
+
+ /* copy Services Layer buffer into config rom buffer */
+ ddi_rep_put8(ohci_hdl->ohci_cfgrom.bi_handle, local_buf,
+ (uint8_t *)ohci_hdl->ohci_cfgrom.bi_kaddr, quadlet_count << 2,
+ DDI_DEV_AUTOINCR);
+
+ (void) ddi_dma_sync(ohci_hdl->ohci_cfgrom.bi_dma_handle, 0,
+ quadlet_count << 2, DDI_DMA_SYNC_FORDEV);
+
+ /*
+ * setup OHCI bus options and config rom hdr registers. We need to swap
+ * the config rom header and bus options on an X86 machine since the
+ * data is provided to us as a byte stream and the OHCI registers expect
+ * a big endian 32-bit number.
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->bus_options, OHCI_SWAP32(data[2]));
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->config_rom_hdr, OHCI_SWAP32(data[0]));
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cfgrom_update_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_nodeid_get()
+ * Return our current nodeid (bus #/Node #)
+ */
+void
+hci1394_ohci_nodeid_get(hci1394_ohci_handle_t ohci_hdl, uint_t *nodeid)
+{
+ uint32_t reg;
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(nodeid != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_get_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->node_id);
+ *nodeid = (reg & 0xFFFF) << 16;
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_get_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_ohci_nodeid_set()
+ * Set our current nodeid (bus #/Node #). This actually sets our bus number.
+ * Our node number cannot be set by software. This is usually trigered via
+ * a write to the CSR NODEIDS register.
+ */
+void
+hci1394_ohci_nodeid_set(hci1394_ohci_handle_t ohci_hdl, uint_t nodeid)
+{
+ uint32_t reg;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ reg = ((nodeid & 0xFFC00000) >> 16);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->node_id, reg);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_nodeid_info()
+ * Return our current nodeid (bus #/Node #). This also returns whether or
+ * not our nodeid error bit is set. This is useful in determining if the
+ * bus reset completed without errors in the selfid complete interrupt
+ * processing.
+ */
+void
+hci1394_ohci_nodeid_info(hci1394_ohci_handle_t ohci_hdl, uint_t *nodeid,
+ boolean_t *error)
+{
+ uint32_t reg;
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(nodeid != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_info_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->node_id);
+ *nodeid = reg & 0xFFFF;
+ if ((reg & OHCI_NDID_IDVALID) == 0) {
+ *error = B_TRUE;
+ } else {
+ *error = B_FALSE;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_nodeid_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_cycletime_get()
+ * Return the current cycle time
+ */
+void
+hci1394_ohci_cycletime_get(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t *cycle_time)
+{
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(cycle_time != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycletime_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ *cycle_time = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->isoch_cycle_timer);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycletime_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_cycletime_get()
+ * Set the cycle time
+ */
+void
+hci1394_ohci_cycletime_set(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t cycle_time)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycletime_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->isoch_cycle_timer, cycle_time);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycletime_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_bustime_get()
+ * Return the current bus time.
+ */
+void
+hci1394_ohci_bustime_get(hci1394_ohci_handle_t ohci_hdl, uint32_t *bus_time)
+{
+ uint32_t bus_time1;
+ uint32_t bus_time2;
+ uint32_t cycle_time;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(bus_time != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bustime_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * The bus time is composed of a portion of the cycle time and the
+ * cycle time rollover count (ohci_bustime_count). There is a race
+ * condition where we read the rollover count and then the cycle
+ * timer rolls over. This is the reason for the double read of the
+ * rollover count.
+ */
+ do {
+ bus_time1 = ohci_hdl->ohci_bustime_count;
+ cycle_time = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->isoch_cycle_timer);
+ bus_time2 = ohci_hdl->ohci_bustime_count;
+ } while (bus_time1 != bus_time2);
+
+ *bus_time = (bus_time2 << 7) | (cycle_time >> 25);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bustime_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_bustime_set()
+ * Set the cycle timer rollover portion of the bus time.
+ */
+void
+hci1394_ohci_bustime_set(hci1394_ohci_handle_t ohci_hdl, uint32_t bus_time)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bustime_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * we will start with the cycle 64 seconds interrupt disabled. If this
+ * is the first write to bus time, enable the interrupt.
+ */
+ if (ohci_hdl->ohci_bustime_enabled == B_FALSE) {
+ ohci_hdl->ohci_bustime_enabled = B_TRUE;
+ /* Clear the cycle64Seconds interrupt then enable it */
+ hci1394_ohci_intr_clear(ohci_hdl, OHCI_INTR_CYC_64_SECS);
+ hci1394_ohci_intr_enable(ohci_hdl, OHCI_INTR_CYC_64_SECS);
+ }
+ ohci_hdl->ohci_bustime_count = (bus_time >> 7);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bustime_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atreq_retries_get()
+ * Get the number of atreq retries we will perform.
+ */
+void
+hci1394_ohci_atreq_retries_get(hci1394_ohci_handle_t ohci_hdl,
+ uint_t *atreq_retries)
+{
+ uint32_t reg;
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(atreq_retries != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_retries_get_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_retries);
+ *atreq_retries = reg & OHCI_RET_MAX_ATREQ_MASK;
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_retries_get_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atreq_retries_get()
+ * Set the number of atreq retries we will perform.
+ */
+void
+hci1394_ohci_atreq_retries_set(hci1394_ohci_handle_t ohci_hdl,
+ uint_t atreq_retries)
+{
+ uint32_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_retries_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&ohci_hdl->ohci_mutex);
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_retries);
+ reg = reg & ~OHCI_RET_MAX_ATREQ_MASK;
+ reg = reg | (atreq_retries & OHCI_RET_MAX_ATREQ_MASK);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_retries, reg);
+ mutex_exit(&ohci_hdl->ohci_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_retries_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_isr_cycle64seconds()
+ * Interrupt handler for the cycle64seconds interrupt.
+ */
+void
+hci1394_ohci_isr_cycle64seconds(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t cycle_time;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_cycle64seconds_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ hci1394_ohci_intr_clear(ohci_hdl, OHCI_INTR_CYC_64_SECS);
+ cycle_time = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->isoch_cycle_timer);
+
+ /*
+ * cycle64second interrupts when the MSBit in the cycle timer changes
+ * state. We only care about rollover so we will increment only when
+ * the MSBit is set to 0.
+ */
+ if ((cycle_time & 0x80000000) == 0) {
+ ohci_hdl->ohci_bustime_count++;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_cycle64seconds_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_isr_phy()
+ * Interrupt handler for a PHY event
+ */
+void
+hci1394_ohci_isr_phy(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint_t phy_status;
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_phy_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* clear the interrupt */
+ hci1394_ohci_intr_clear(ohci_hdl, OHCI_INTR_PHY);
+
+ /* increment the statistics count */
+ ohci_hdl->ohci_drvinfo->di_stats.st_phy_isr++;
+
+ /*
+ * If the PHY is a 1995 phy, just return since there are no status bits
+ * to read.
+ */
+ if (ohci_hdl->ohci_phy == H1394_PHY_1995) {
+ TNF_PROBE_0(hci1394_ohci_phy_isr_1995,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /* See why we got this interrupt */
+ status = hci1394_ohci_phy_read(ohci_hdl, 5, &phy_status);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_read_failed,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ if (phy_status & OHCI_PHY_LOOP_ERR) {
+ ohci_hdl->ohci_drvinfo->di_stats.st_phy_loop_err++;
+ cmn_err(CE_NOTE, "hci1394(%d): ERROR - bus loop detected",
+ ohci_hdl->ohci_drvinfo->di_instance);
+ TNF_PROBE_0(hci1394_ohci_phy_isr_loop, HCI1394_TNF_HAL, "");
+ }
+ if (phy_status & OHCI_PHY_PWRFAIL_ERR) {
+ ohci_hdl->ohci_drvinfo->di_stats.st_phy_pwrfail_err++;
+ TNF_PROBE_0(hci1394_ohci_phy_isr_pwr, HCI1394_TNF_HAL, "");
+ }
+ if (phy_status & OHCI_PHY_TIMEOUT_ERR) {
+ ohci_hdl->ohci_drvinfo->di_stats.st_phy_timeout_err++;
+ TNF_PROBE_0(hci1394_ohci_phy_isr_tmout, HCI1394_TNF_HAL, "");
+ }
+ if (phy_status & OHCI_PHY_PORTEVT_ERR) {
+ ohci_hdl->ohci_drvinfo->di_stats.st_phy_portevt_err++;
+ TNF_PROBE_0(hci1394_ohci_phy_isr_pevt, HCI1394_TNF_HAL, "");
+ }
+
+ /* clear any set status bits */
+ status = hci1394_ohci_phy_write(ohci_hdl, 5, phy_status);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_write_failed,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /*
+ * Disable the PHY interrupt. We are getting stuck in this ISR in
+ * certain PHY implementations so we will disable the interrupt until
+ * we see a selfid complete.
+ */
+ hci1394_ohci_intr_disable(ohci_hdl, OHCI_INTR_PHY);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_isr_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_root_check
+ * Returns status about if we are currently the root node on the 1394 bus.
+ * returns B_TRUE if we are the root, B_FALSE if we are not the root.
+ */
+boolean_t
+hci1394_ohci_root_check(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_root_check_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->node_id);
+ if ((reg & OHCI_REG_NODEID_ROOT) && (reg & OHCI_NDID_IDVALID)) {
+ status = B_TRUE;
+ } else {
+ status = B_FALSE;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_ohci_root_check_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_cmc_check()
+ * Returns status about if we are cycle master capable. Returns
+ * B_TRUE if we are the cycle master capable, B_FALSE if we are not the cycle
+ * master capable.
+ */
+boolean_t
+hci1394_ohci_cmc_check(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cmc_check_enter, HCI1394_TNF_HAL_STACK,
+ "");
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->bus_options);
+ if (reg & OHCI_REG_BUSOPTIONS_CMC) {
+ status = B_TRUE;
+ } else {
+ status = B_FALSE;
+ }
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cmc_check_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_cycle_master_enable()
+ * Enables us to be cycle master. If we are root, we will start generating
+ * cycle start packets.
+ */
+void
+hci1394_ohci_cycle_master_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycle_master_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* First make sure that cycleTooLong is clear */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->intr_event_clr, OHCI_INTR_CYC_TOO_LONG);
+
+ /* Enable Cycle Master */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->link_ctrl_set, OHCI_LC_CYC_MAST);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycle_master_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_cycle_master_disable()
+ * Disabled us from being cycle master. If we are root, we will stop
+ * generating cycle start packets.
+ */
+void
+hci1394_ohci_cycle_master_disable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycle_master_disable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* disable cycle master */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->link_ctrl_clr, OHCI_LC_CYC_MAST);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cycle_master_disable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_resume()
+ * Re-initialize the openHCI HW during a resume. (after a power suspend)
+ */
+int
+hci1394_ohci_resume(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t quadlet;
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_resume_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* Re-initialize the OpenHCI chip */
+ status = hci1394_ohci_chip_init(ohci_hdl);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_chip_init_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Re-initialize the PHY */
+ status = hci1394_ohci_phy_resume(ohci_hdl);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_phy_resume_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Re-initialize any 1394A features we are using */
+ status = hci1394_ohci_1394a_resume(ohci_hdl);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_1394a_resume_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Tell OpenHCI where the Config ROM buffer is */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->config_rom_maplo,
+ (uint32_t)ohci_hdl->ohci_cfgrom.bi_cookie.dmac_address);
+
+ /* Tell OpenHCI where the SelfId buffer is */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->self_id_buflo,
+ (uint32_t)ohci_hdl->ohci_selfid.bi_cookie.dmac_address);
+
+ /* Enable selfid DMA engine */
+ hci1394_ohci_selfid_enable(ohci_hdl);
+
+ /*
+ * re-setup OHCI bus options and config rom hdr registers. We need to
+ * read from the config rom using ddi_rep_get8 since it is stored as
+ * a byte stream. We need to swap he config rom header and bus options
+ * on an X86 machine since the data is a byte stream and the OHCI
+ * registers expect a big endian 32-bit number.
+ */
+ ddi_rep_get8(ohci_hdl->ohci_cfgrom.bi_handle, (uint8_t *)&quadlet,
+ &((uint8_t *)ohci_hdl->ohci_cfgrom.bi_kaddr)[8], 4,
+ DDI_DEV_AUTOINCR);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->bus_options, OHCI_SWAP32(quadlet));
+ ddi_rep_get8(ohci_hdl->ohci_cfgrom.bi_handle, (uint8_t *)&quadlet,
+ &((uint8_t *)ohci_hdl->ohci_cfgrom.bi_kaddr)[0], 4,
+ DDI_DEV_AUTOINCR);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->config_rom_hdr, OHCI_SWAP32(quadlet));
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_selfid_init()
+ * Initialize the selfid buffer
+ */
+static int
+hci1394_ohci_selfid_init(hci1394_ohci_handle_t ohci_hdl)
+{
+ hci1394_buf_parms_t parms;
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_init_selfid_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Setup for 2K buffer, aligned on a 2Kbyte address boundary. Make sure
+ * that the buffer is not broken up into multiple cookies. OpenHCI can
+ * only handle one address for the selfid buffer location.
+ */
+ parms.bp_length = 2048;
+ parms.bp_max_cookies = 1;
+ parms.bp_alignment = 2048;
+ status = hci1394_buf_alloc(ohci_hdl->ohci_drvinfo, &parms,
+ &ohci_hdl->ohci_selfid, &ohci_hdl->ohci_selfid_handle);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_buf_alloc_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_init_selfid_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Tell OpenHCI where the buffer is */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->self_id_buflo,
+ (uint32_t)ohci_hdl->ohci_selfid.bi_cookie.dmac_address);
+
+ /* Enable selfid DMA engine */
+ hci1394_ohci_selfid_enable(ohci_hdl);
+
+ TNF_PROBE_0_DEBUG(hci1394_init_selfid_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_selfid_enable()
+ * Allow selfid packets to be placed into the selfid buffer. This should be
+ * called after the selfid buffer address has been setup in the HW.
+ */
+void
+hci1394_ohci_selfid_enable(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_enable_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Allow selfid packets to be received. This should be called during
+ * driver attach after the selfid buffer address has been initialized.
+ *
+ * Link Control Register
+ * rscSelfId = 1 <= bit 9
+ */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->link_ctrl_set, OHCI_LC_RCV_SELF);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_enable_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_selfid_read()
+ * Read a word out of the selfid buffer.
+ */
+void
+hci1394_ohci_selfid_read(hci1394_ohci_handle_t ohci_hdl, uint_t offset,
+ uint32_t *data)
+{
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ *data = ddi_get32(ohci_hdl->ohci_selfid.bi_handle,
+ &((uint32_t *)ohci_hdl->ohci_selfid.bi_kaddr)[offset]);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_selfid_info()
+ * Return the current bus generation, the number of bytes currently in the
+ * selfid buffer, and if we have seen any selfid errors.
+ */
+void
+hci1394_ohci_selfid_info(hci1394_ohci_handle_t ohci_hdl, uint_t *busgen,
+ uint_t *size, boolean_t *error)
+{
+ uint32_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ ASSERT(busgen != NULL);
+ ASSERT(size != NULL);
+ ASSERT(error != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_info_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->self_id_count);
+ *busgen = (reg & OHCI_SLFC_GEN_MASK) >> OHCI_SLFC_GEN_SHIFT;
+ *size = reg & OHCI_SLFC_NUM_QUADS_MASK;
+ if ((reg & OHCI_SLFC_ERROR) == 0) {
+ *error = B_FALSE;
+ } else {
+ *error = B_TRUE;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_info_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_selfid_buf_current()
+ * Test if the selfid buffer is current. Return B_TRUE if it is current and
+ * B_FALSE if it is not current.
+ */
+boolean_t
+hci1394_ohci_selfid_buf_current(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_buf_current_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * if the generation stored in the selfid buffer is not equal to the
+ * generation we have previously stored, the selfid buffer is not
+ * current. (It maybe older or it maybe newer)
+ */
+ reg = ddi_get32(ohci_hdl->ohci_selfid.bi_handle,
+ &((uint32_t *)ohci_hdl->ohci_selfid.bi_kaddr)[0]);
+ if (ohci_hdl->ohci_drvinfo->di_gencnt != ((reg & OHCI_SLFC_GEN_MASK) >>
+ OHCI_SLFC_GEN_SHIFT)) {
+ status = B_FALSE;
+ } else {
+ status = B_TRUE;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_buf_current_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_ohci_selfid_sync()
+ * Perform a ddi_dma_sync on the selfid buffer
+ */
+void
+hci1394_ohci_selfid_sync(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_sync_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ (void) ddi_dma_sync(ohci_hdl->ohci_selfid.bi_dma_handle, 0,
+ ohci_hdl->ohci_selfid.bi_length, DDI_DMA_SYNC_FORKERNEL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_selfid_sync_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_ohci_cfgrom_init()
+ * Initialize the configuration ROM buffer
+ */
+static int
+hci1394_ohci_cfgrom_init(hci1394_ohci_handle_t ohci_hdl)
+{
+ hci1394_buf_parms_t parms;
+ int status;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cfgrom_init_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Setup for 1K buffer, aligned at 1K address boundary, and allow no
+ * less than 4 byte data transfers. Create the Buffer. Make sure that
+ * the buffer is not broken up into multiple cookies. OpenHCI can only
+ * handle one address for the config ROM buffer location.
+ */
+ parms.bp_length = 1024;
+ parms.bp_max_cookies = 1;
+ parms.bp_alignment = 1024;
+ status = hci1394_buf_alloc(ohci_hdl->ohci_drvinfo, &parms,
+ &ohci_hdl->ohci_cfgrom, &ohci_hdl->ohci_cfgrom_handle);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_buf_alloc_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cfgrom_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Tell OpenHCI where the buffer is */
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->config_rom_maplo,
+ (uint32_t)ohci_hdl->ohci_cfgrom.bi_cookie.dmac_address);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_cfgrom_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_bus_capabilities()
+ * Return our current bus capabilities
+ */
+void
+hci1394_ohci_bus_capabilities(hci1394_ohci_handle_t ohci_hdl,
+ uint32_t *bus_capabilities)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_capabilities_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ /*
+ * read in the bus options register. Set bits saying that we are isoch
+ * resource manager capable, Cycle master capable, and Isoch capable
+ */
+ *bus_capabilities = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->bus_options) | (OHCI_BOPT_IRMC |
+ OHCI_BOPT_CMC | OHCI_BOPT_ISC);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_bus_capabilities_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_at_active()
+ * Returns status one if either of the AT engines are active. If either AT
+ * engine is active, we return B_TRUE. If both AT engines are not active, we
+ * return B_FALSE.
+ */
+boolean_t
+hci1394_ohci_at_active(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_at_active_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* see if atreq active bit set */
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_req.ctxt_ctrl_set);
+ if (reg & OHCI_CC_ACTIVE_MASK) {
+ /* atreq engine is still active */
+ TNF_PROBE_0(hci1394_ohci_atreq_active_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_at_active_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (B_TRUE);
+ }
+
+ /* see if atresp active bit set */
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_resp.ctxt_ctrl_set);
+ if (reg & OHCI_CC_ACTIVE_MASK) {
+ /* atresp engine is still active */
+ TNF_PROBE_0(hci1394_ohci_atresp_active_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_at_active_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (B_TRUE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_at_active_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* both atreq and atresp active bits are cleared */
+ return (B_FALSE);
+}
+
+
+/*
+ * hci1394_ohci_atreq_start()
+ * Start the atreq dma engine. Set the address of the first descriptor
+ * to read in equal to cmdptr.
+ */
+void
+hci1394_ohci_atreq_start(hci1394_ohci_handle_t ohci_hdl, uint32_t cmdptr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_req.cmd_ptrlo, cmdptr);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_req.ctxt_ctrl_set, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atreq_wake()
+ * Wake up the atreq dma engine. This should be called when a new descriptor
+ * is added to the Q and the dma engine has already be started. It it OK to
+ * call this when the DMA engine is active.
+ */
+void
+hci1394_ohci_atreq_wake(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_req.ctxt_ctrl_set, OHCI_CC_WAKE_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atreq_stop()
+ * Stop the atreq dma engine. No further descriptors will be read until
+ * it dma engine is started again.
+ */
+void
+hci1394_ohci_atreq_stop(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_stop_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_req.ctxt_ctrl_clr, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atreq_stop_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_arresp_start()
+ * Start the arresp dma engine. Set the address of the first descriptor
+ * to read in equal to cmdptr.
+ */
+void
+hci1394_ohci_arresp_start(hci1394_ohci_handle_t ohci_hdl, uint32_t cmdptr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_resp.cmd_ptrlo, cmdptr);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_resp.ctxt_ctrl_set, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_arresp_wake()
+ * Wake up the arresp dma engine. This should be called when a new
+ * descriptor is added to the Q and the dma engine has already be started.
+ * It is OK to call this when the DMA engine is active.
+ */
+void
+hci1394_ohci_arresp_wake(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_resp.ctxt_ctrl_set, OHCI_CC_WAKE_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atreq_stop()
+ * Stop the arresp dma engine. No further data will be received after any
+ * current packets being received have finished.
+ */
+void
+hci1394_ohci_arresp_stop(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_stop_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_resp.ctxt_ctrl_clr, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arresp_stop_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_arreq_start()
+ * Start the arreq dma engine. Set the address of the first descriptor
+ * to read in equal to cmdptr.
+ */
+void
+hci1394_ohci_arreq_start(hci1394_ohci_handle_t ohci_hdl, uint32_t cmdptr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_req.cmd_ptrlo, cmdptr);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_req.ctxt_ctrl_set, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_arreq_wake()
+ * Wake up the arreq dma engine. This should be called when a new descriptor
+ * is added to the Q and the dma engine has already be started. It is OK to
+ * call this when the DMA engine is active.
+ */
+void
+hci1394_ohci_arreq_wake(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_req.ctxt_ctrl_set, OHCI_CC_WAKE_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_arreq_stop()
+ * Stop the arreq dma engine. No further data will be received after any
+ * current packets being received have finished.
+ */
+void
+hci1394_ohci_arreq_stop(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_stop_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->ar_req.ctxt_ctrl_clr, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_arreq_stop_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atresp_start()
+ * Start the atresp dma engine. Set the address of the first descriptor
+ * to read in equal to cmdptr.
+ */
+void
+hci1394_ohci_atresp_start(hci1394_ohci_handle_t ohci_hdl, uint32_t cmdptr)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_start_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_resp.cmd_ptrlo, cmdptr);
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_resp.ctxt_ctrl_set, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_start_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atresp_wake()
+ * Wake up the atresp dma engine. This should be called when a new
+ * descriptor is added to the Q and the dma engine has already be started.
+ * It is OK to call this when the DMA engine is active.
+ */
+void
+hci1394_ohci_atresp_wake(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_wake_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_resp.ctxt_ctrl_set, OHCI_CC_WAKE_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_wake_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_atresp_stop()
+ * Stop the atresp dma engine. No further descriptors will be read until
+ * it dma engine is started again.
+ */
+void
+hci1394_ohci_atresp_stop(hci1394_ohci_handle_t ohci_hdl)
+{
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_stop_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->at_resp.ctxt_ctrl_clr, OHCI_CC_RUN_MASK);
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_atresp_stop_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_ohci_1394a_init()
+ * Initialize any 1394a features that we are using.
+ */
+/* ARGSUSED */
+int
+hci1394_ohci_1394a_init(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_init_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set);
+ if (reg & OHCI_HC_PROG_PHY_ENBL) {
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_APHY_ENBL);
+ status = hci1394_ohci_phy_set(ohci_hdl, 5,
+ (OHCI_PHY_ENBL_ACCEL | OHCI_PHY_ENBL_MULTI));
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_1394a_init_phy_fail,
+ HCI1394_TNF_HAL_STACK, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_ohci_1394a_init()
+ * Re-initialize any 1394a features that we are using.
+ */
+/* ARGSUSED */
+int
+hci1394_ohci_1394a_resume(hci1394_ohci_handle_t ohci_hdl)
+{
+ uint32_t reg;
+ int status;
+
+ ASSERT(ohci_hdl != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_resume_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ reg = ddi_get32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set);
+ if (reg & OHCI_HC_PROG_PHY_ENBL) {
+ ddi_put32(ohci_hdl->ohci_reg_handle,
+ &ohci_hdl->ohci_regs->hc_ctrl_set, OHCI_HC_APHY_ENBL);
+ status = hci1394_ohci_phy_set(ohci_hdl, 5,
+ (OHCI_PHY_ENBL_ACCEL | OHCI_PHY_ENBL_MULTI));
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_ohci_1394a_resume_phy_fail,
+ HCI1394_TNF_HAL_STACK, "");
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_ohci_1394a_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_q.c b/usr/src/uts/common/io/1394/adapters/hci1394_q.c
new file mode 100644
index 0000000000..1296c62460
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_q.c
@@ -0,0 +1,1764 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_q.c
+ * This code decouples some of the OpenHCI async descriptor logic/structures
+ * from the async processing. The goal was to combine as much of the
+ * duplicate code as possible for the different type of async transfers
+ * without going too overboard.
+ *
+ * There are two parts to the Q, the descriptor buffer and the data buffer.
+ * For the most part, data to be transmitted and data which is received go
+ * in the data buffers. The information of where to get the data and put
+ * the data reside in the descriptor buffers. There are exceptions to this.
+ */
+
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/note.h>
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
+ uint32_t *io_addr);
+static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
+static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
+static void hci1394_q_reset(hci1394_q_handle_t q_handle);
+static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
+
+static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
+ hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
+ uint_t hdrsize);
+static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
+ hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
+ uint_t hdrsize);
+static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
+ hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
+ uint_t datasize);
+static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
+ uint8_t *data, uint_t datasize);
+static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
+ hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
+
+static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
+ hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
+
+_NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
+
+/*
+ * hci1394_q_init()
+ * Initialize a Q. A Q consists of a descriptor buffer and a data buffer and
+ * can be either an AT or AR Q. hci1394_q_init() returns a handle which
+ * should be used for the reset of the hci1394_q_* calls.
+ */
+int
+hci1394_q_init(hci1394_drvinfo_t *drvinfo,
+ hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
+ hci1394_q_handle_t *q_handle)
+{
+ hci1394_q_buf_t *desc;
+ hci1394_q_buf_t *data;
+ hci1394_buf_parms_t parms;
+ hci1394_q_t *q;
+ int status;
+ int index;
+
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(qinfo != NULL);
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * allocate the memory to track this Q. Initialize the internal Q
+ * structure.
+ */
+ q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
+ q->q_drvinfo = drvinfo;
+ q->q_info = *qinfo;
+ q->q_ohci = ohci_handle;
+ mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
+ desc = &q->q_desc;
+ data = &q->q_data;
+
+ /*
+ * Allocate the Descriptor buffer.
+ *
+ * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
+ * after we have tested the multiple cookie code on x86.
+ */
+ parms.bp_length = qinfo->qi_desc_size;
+ parms.bp_max_cookies = 1;
+ parms.bp_alignment = 16;
+ status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
+ &desc->qb_buf_handle);
+ if (status != DDI_SUCCESS) {
+ mutex_destroy(&q->q_mutex);
+ kmem_free(q, sizeof (hci1394_q_t));
+ *q_handle = NULL;
+ TNF_PROBE_0(hci1394_q_init_bae_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy in buffer cookies into our local cookie array */
+ desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
+ for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
+ ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
+ &desc->qb_buf.bi_cookie);
+ desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
+ }
+
+ /*
+ * Allocate the Data buffer.
+ *
+ * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
+ * after we have tested the multiple cookie code on x86.
+ */
+ parms.bp_length = qinfo->qi_data_size;
+ parms.bp_max_cookies = 1;
+ parms.bp_alignment = 16;
+ status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
+ &data->qb_buf_handle);
+ if (status != DDI_SUCCESS) {
+ hci1394_buf_free(&data->qb_buf_handle);
+ mutex_destroy(&q->q_mutex);
+ kmem_free(q, sizeof (hci1394_q_t));
+ *q_handle = NULL;
+ TNF_PROBE_0(hci1394_q_init_baa_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * We must have at least 2 ARQ data buffers, If we only have one, we
+ * will artificially create 2. We must have 2 so that we always have a
+ * descriptor with free data space to write AR data to. When one is
+ * empty, it will take us a bit to get a new descriptor back into the
+ * chain.
+ */
+ if ((qinfo->qi_mode == HCI1394_ARQ) &&
+ (data->qb_buf.bi_cookie_count == 1)) {
+ data->qb_buf.bi_cookie_count = 2;
+ data->qb_cookie[0] = data->qb_buf.bi_cookie;
+ data->qb_cookie[0].dmac_size /= 2;
+ data->qb_cookie[1] = data->qb_cookie[0];
+ data->qb_cookie[1].dmac_laddress =
+ data->qb_cookie[0].dmac_laddress +
+ data->qb_cookie[0].dmac_size;
+ data->qb_cookie[1].dmac_address =
+ data->qb_cookie[0].dmac_address +
+ data->qb_cookie[0].dmac_size;
+
+ /* We have more than 1 cookie or we are an AT Q */
+ } else {
+ /* Copy in buffer cookies into our local cookie array */
+ data->qb_cookie[0] = data->qb_buf.bi_cookie;
+ for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
+ ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
+ &data->qb_buf.bi_cookie);
+ data->qb_cookie[index] = data->qb_buf.bi_cookie;
+ }
+ }
+
+ /* The top and bottom of the Q are only set once */
+ desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
+ desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
+ desc->qb_buf.bi_real_length - 1;
+ data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
+ data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
+ data->qb_buf.bi_real_length - 1;
+
+ /*
+ * reset the Q pointers to their original settings. Setup IM
+ * descriptors if this is an AR Q.
+ */
+ hci1394_q_reset(q);
+
+ /* if this is an AT Q, create a queued list for the AT descriptors */
+ if (qinfo->qi_mode == HCI1394_ATQ) {
+ hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
+ }
+
+ *q_handle = q;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_q_fini()
+ * Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
+ * handle is used for the parameter. fini() will set your handle to NULL
+ * before returning.
+ */
+void
+hci1394_q_fini(hci1394_q_handle_t *q_handle)
+{
+ hci1394_q_t *q;
+
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ q = *q_handle;
+ if (q->q_info.qi_mode == HCI1394_ATQ) {
+ hci1394_tlist_fini(&q->q_queued_list);
+ }
+ mutex_destroy(&q->q_mutex);
+ hci1394_buf_free(&q->q_desc.qb_buf_handle);
+ hci1394_buf_free(&q->q_data.qb_buf_handle);
+ kmem_free(q, sizeof (hci1394_q_t));
+ *q_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_buf_setup()
+ * Initialization of buffer pointers which are present in both the descriptor
+ * buffer and data buffer (No reason to duplicate the code)
+ */
+static void
+hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
+{
+ ASSERT(qbuf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* start with the first cookie */
+ qbuf->qb_ptrs.qp_current_buf = 0;
+ qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
+ qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
+ qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
+ qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
+ qbuf->qb_ptrs.qp_offset = 0;
+
+ /*
+ * The free_buf and free pointer will change everytime an ACK (of some
+ * type) is processed. Free is the last byte in the last cookie.
+ */
+ qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
+ qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
+
+ /*
+ * Start with no space to write descriptors. We first need to call
+ * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
+ */
+ qbuf->qb_ptrs.qp_resv_size = 0;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_reset()
+ * Resets the buffers to an initial state. This should be called during
+ * attach and resume.
+ */
+static void
+hci1394_q_reset(hci1394_q_handle_t q_handle)
+{
+ hci1394_q_buf_t *desc;
+ hci1394_q_buf_t *data;
+ int index;
+
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_reset_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&q_handle->q_mutex);
+ desc = &q_handle->q_desc;
+ data = &q_handle->q_data;
+
+ hci1394_q_buf_setup(desc);
+ hci1394_q_buf_setup(data);
+
+ /* DMA starts off stopped, no previous descriptor to link from */
+ q_handle->q_dma_running = B_FALSE;
+ q_handle->q_block_cnt = 0;
+ q_handle->q_previous = NULL;
+
+ /* If this is an AR Q, setup IM's for the data buffers that we have */
+ if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
+ /*
+ * This points to where to find the first IM descriptor. Since
+ * we just reset the pointers in hci1394_q_buf_setup(), the
+ * first IM we write below will be found at the top of the Q.
+ */
+ q_handle->q_head = desc->qb_ptrs.qp_top;
+
+ for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
+ hci1394_q_ar_write_IM(q_handle, desc,
+ data->qb_cookie[index].dmac_address,
+ data->qb_cookie[index].dmac_size);
+ }
+
+ /*
+ * The space left in the current IM is the size of the buffer.
+ * The current buffer is the first buffer added to the AR Q.
+ */
+ q_handle->q_space_left = data->qb_cookie[0].dmac_size;
+ }
+
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_reset_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_resume()
+ * This is called during a resume (after a successful suspend). Currently
+ * we only call reset. Since this is not a time critical function, we will
+ * leave this as a separate function to increase readability.
+ */
+void
+hci1394_q_resume(hci1394_q_handle_t q_handle)
+{
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_resume_enter, HCI1394_TNF_HAL_STACK, "");
+ hci1394_q_reset(q_handle);
+ TNF_PROBE_0_DEBUG(hci1394_q_resume_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_stop()
+ * This call informs us that a DMA engine has been stopped. It does not
+ * perform the actual stop. We need to know this so that when we add a
+ * new descriptor, we do a start instead of a wake.
+ */
+void
+hci1394_q_stop(hci1394_q_handle_t q_handle)
+{
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_stop_enter, HCI1394_TNF_HAL_STACK, "");
+ mutex_enter(&q_handle->q_mutex);
+ q_handle->q_dma_running = B_FALSE;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_stop_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_reserve()
+ * Reserve space in the AT descriptor or data buffer. This ensures that we
+ * can get a contiguous buffer. Descriptors have to be in a contiguous
+ * buffer. Data does not have to be in a contiguous buffer but we do this to
+ * reduce complexity. For systems with small page sizes (e.g. x86), this
+ * could result in inefficient use of the data buffers when sending large
+ * data blocks (this only applies to non-physical block write ATREQs and
+ * block read ATRESP). Since it looks like most protocols that use large data
+ * blocks (like SPB-2), use physical transfers to do this (due to their
+ * efficiency), this will probably not be a real world problem. If it turns
+ * out to be a problem, the options are to force a single cookie for the data
+ * buffer, allow multiple cookies and have a larger data space, or change the
+ * data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
+ */
+static int
+hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
+{
+ uint_t aligned_size;
+
+
+ ASSERT(qbuf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_reserve_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* Save backup of pointers in case we have to unreserve */
+ qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
+
+ /*
+ * Make sure all alloc's are quadlet aligned. The data doesn't have to
+ * be, so we will force it to be.
+ */
+ aligned_size = HCI1394_ALIGN_QUAD(size);
+
+ /*
+ * if the free pointer is in the current buffer and the free pointer
+ * is below the current pointer (i.e. has not wrapped around)
+ */
+ if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
+ (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
+ /*
+ * The free pointer is in this buffer below the current pointer.
+ * Check to see if we have enough free space left.
+ */
+ if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
+ qbuf->qb_ptrs.qp_free) {
+ /* Setup up our reserved size, return the IO address */
+ qbuf->qb_ptrs.qp_resv_size = aligned_size;
+ *io_addr = (uint32_t)(qbuf->qb_cookie[
+ qbuf->qb_ptrs.qp_current_buf].dmac_address +
+ qbuf->qb_ptrs.qp_offset);
+
+ /*
+ * The free pointer is in this buffer below the current pointer.
+ * We do not have enough free space for the alloc. Return
+ * failure.
+ */
+ } else {
+ qbuf->qb_ptrs.qp_resv_size = 0;
+ TNF_PROBE_0(hci1394_q_reserve_ns_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If there is not enough room to fit in the current buffer (not
+ * including wrap around), we will go to the next buffer and check
+ * there. If we only have one buffer (i.e. one cookie), we will end up
+ * staying at the current buffer and wrapping the address back to the
+ * top.
+ */
+ } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
+ qbuf->qb_ptrs.qp_end) {
+ /* Go to the next buffer (or the top of ours for one cookie) */
+ hci1394_q_next_buf(qbuf);
+
+ /* If the free pointer is in the new current buffer */
+ if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
+ /*
+ * The free pointer is in this buffer. If we do not have
+ * enough free space for the alloc. Return failure.
+ */
+ if ((qbuf->qb_ptrs.qp_current + aligned_size) >
+ qbuf->qb_ptrs.qp_free) {
+ qbuf->qb_ptrs.qp_resv_size = 0;
+ TNF_PROBE_0(hci1394_q_reserve_ns_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ /*
+ * The free pointer is in this buffer. We have enough
+ * free space left.
+ */
+ } else {
+ /*
+ * Setup up our reserved size, return the IO
+ * address
+ */
+ qbuf->qb_ptrs.qp_resv_size = aligned_size;
+ *io_addr = (uint32_t)(qbuf->qb_cookie[
+ qbuf->qb_ptrs.qp_current_buf].dmac_address +
+ qbuf->qb_ptrs.qp_offset);
+ }
+
+ /*
+ * We switched buffers and the free pointer is still in another
+ * buffer. We have sufficient space in this buffer for the alloc
+ * after changing buffers.
+ */
+ } else {
+ /* Setup up our reserved size, return the IO address */
+ qbuf->qb_ptrs.qp_resv_size = aligned_size;
+ *io_addr = (uint32_t)(qbuf->qb_cookie[
+ qbuf->qb_ptrs.qp_current_buf].dmac_address +
+ qbuf->qb_ptrs.qp_offset);
+ }
+ /*
+ * The free pointer is in another buffer. We have sufficient space in
+ * this buffer for the alloc.
+ */
+ } else {
+ /* Setup up our reserved size, return the IO address */
+ qbuf->qb_ptrs.qp_resv_size = aligned_size;
+ *io_addr = (uint32_t)(qbuf->qb_cookie[
+ qbuf->qb_ptrs.qp_current_buf].dmac_address +
+ qbuf->qb_ptrs.qp_offset);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_q_unreserve()
+ * Set the buffer pointer to what they were before hci1394_reserve(). This
+ * will be called when we encounter errors during hci1394_q_at*().
+ */
+static void
+hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
+{
+ ASSERT(qbuf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_unreserve_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* Go back to pointer setting before the reserve */
+ qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_unreserve_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_next_buf()
+ * Set our current buffer to the next cookie. If we only have one cookie, we
+ * will go back to the top of our buffer.
+ */
+void
+hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
+{
+ ASSERT(qbuf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_next_buf_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * go to the next cookie, if we are >= the cookie count, go back to the
+ * first cookie.
+ */
+ qbuf->qb_ptrs.qp_current_buf++;
+ if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
+ qbuf->qb_ptrs.qp_current_buf = 0;
+ }
+
+ /* adjust the begin, end, current, and offset pointers */
+ qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
+ if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
+ qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
+ }
+ qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
+ qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
+ qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
+ qbuf->qb_ptrs.qp_offset = 0;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_next_buf_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_at()
+ * Place an AT command that does NOT need the data buffer into the DMA chain.
+ * Some examples of this are quadlet read/write, PHY packets, ATREQ Block
+ * Read, and ATRESP block write. result is only valid on failure.
+ */
+int
+hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
+ hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
+{
+ int status;
+ uint32_t ioaddr;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(hdr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&q_handle->q_mutex);
+
+ /*
+ * Check the HAL state and generation when the AT Q is locked. This
+ * will make sure that we get all the commands when we flush the Q's
+ * during a reset or shutdown.
+ */
+ if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
+ (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
+ cmd->qc_generation)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_st_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* save away the argument to pass up when this command completes */
+ cmd->qc_node.tln_addr = cmd;
+
+ /* we have not written any 16 byte blocks to the descriptor yet */
+ q_handle->q_block_cnt = 0;
+
+ /* Reserve space for an OLI in the descriptor buffer */
+ status = hci1394_q_reserve(&q_handle->q_desc,
+ sizeof (hci1394_desc_imm_t), &ioaddr);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_NOMORE_SPACE;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_qre_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* write the OLI to the descriptor buffer */
+ hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
+
+ /* Add the AT command to the queued list */
+ hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
+
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
+ * ATREQ Block read and write's that go through software are not very
+ * efficient (one of the reasons to use physical space). A copy is forced
+ * on all block reads due to the design of OpenHCI. Writes do not have this
+ * same restriction. This design forces a copy for writes too (we always
+ * copy into a data buffer before sending). There are many reasons for this
+ * including complexity reduction. There is a data size threshold where a
+ * copy is more expensive than mapping the data buffer address (or worse
+ * case a big enough difference where it pays to do it). However, we move
+ * block data around in mblks which means that our data may be scattered
+ * over many buffers. This adds to the complexity of mapping and setting
+ * up the OpenHCI descriptors.
+ *
+ * If someone really needs a speedup on block write ATREQs, my recommendation
+ * would be to add an additional command type at the target interface for a
+ * fast block write. The target driver would pass a mapped io addr to use.
+ * A function like "hci1394_q_at_with_ioaddr()" could be created which would
+ * be almost an exact copy of hci1394_q_at_with_data() without the
+ * hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
+ */
+
+
+/*
+ * hci1394_q_at_with_data()
+ * Place an AT command that does need the data buffer into the DMA chain.
+ * The data is passed as a pointer to a kernel virtual address. An example of
+ * this is the lock operations. result is only valid on failure.
+ */
+int
+hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
+ hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
+ int *result)
+{
+ uint32_t desc_ioaddr;
+ uint32_t data_ioaddr;
+ int status;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(hdr != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&q_handle->q_mutex);
+
+ /*
+ * Check the HAL state and generation when the AT Q is locked. This
+ * will make sure that we get all the commands when we flush the Q's
+ * during a reset or shutdown.
+ */
+ if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
+ (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
+ cmd->qc_generation)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_wd_st_fail,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* save away the argument to pass up when this command completes */
+ cmd->qc_node.tln_addr = cmd;
+
+ /* we have not written any 16 byte blocks to the descriptor yet */
+ q_handle->q_block_cnt = 0;
+
+ /* Reserve space for an OMI and OL in the descriptor buffer */
+ status = hci1394_q_reserve(&q_handle->q_desc,
+ (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
+ &desc_ioaddr);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_NOMORE_SPACE;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_wd_qre_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* allocate space for data in the data buffer */
+ status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_NOMORE_SPACE;
+ hci1394_q_unreserve(&q_handle->q_desc);
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_wd_qra_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy data into data buffer */
+ hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
+
+ /* write the OMI to the descriptor buffer */
+ hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
+
+ /* write the OL to the descriptor buffer */
+ hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
+ datasize);
+
+ /* Add the AT command to the queued list */
+ hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
+
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_q_at_with_mblk()
+ * Place an AT command that does need the data buffer into the DMA chain.
+ * The data is passed in mblk_t(s). Examples of this are a block write
+ * ATREQ and a block read ATRESP. The services layer and the hal use a
+ * private structure (h1394_mblk_t) to keep track of how much of the mblk
+ * to send since we may have to break the transfer up into smaller blocks.
+ * (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
+ * valid on failure.
+ */
+int
+hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
+ hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
+{
+ uint32_t desc_ioaddr;
+ uint32_t data_ioaddr;
+ int status;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(hdr != NULL);
+ ASSERT(mblk != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ mutex_enter(&q_handle->q_mutex);
+
+ /*
+ * Check the HAL state and generation when the AT Q is locked. This
+ * will make sure that we get all the commands when we flush the Q's
+ * during a reset or shutdown.
+ */
+ if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
+ (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
+ cmd->qc_generation)) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_wm_st_fail,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* save away the argument to pass up when this command completes */
+ cmd->qc_node.tln_addr = cmd;
+
+ /* we have not written any 16 byte blocks to the descriptor yet */
+ q_handle->q_block_cnt = 0;
+
+ /* Reserve space for an OMI and OL in the descriptor buffer */
+ status = hci1394_q_reserve(&q_handle->q_desc,
+ (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
+ &desc_ioaddr);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_NOMORE_SPACE;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_wm_qre_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Reserve space for data in the data buffer */
+ status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
+ &data_ioaddr);
+ if (status != DDI_SUCCESS) {
+ *result = H1394_STATUS_NOMORE_SPACE;
+ hci1394_q_unreserve(&q_handle->q_desc);
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0(hci1394_q_at_wm_qra_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy mblk data into data buffer */
+ hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
+
+ /* write the OMI to the descriptor buffer */
+ hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
+
+ /* write the OL to the descriptor buffer */
+ hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
+ mblk->length);
+
+ /* Add the AT command to the queued list */
+ hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
+
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_q_at_next()
+ * Return the next completed AT command in cmd. If flush_q is true, we will
+ * return the command regardless if it finished or not. We will flush
+ * during bus reset processing, shutdown, and detach.
+ */
+void
+hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
+ hci1394_q_cmd_t **cmd)
+{
+ hci1394_q_buf_t *desc;
+ hci1394_q_buf_t *data;
+ hci1394_tlist_node_t *node;
+ uint32_t cmd_status;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(cmd != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_next_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&q_handle->q_mutex);
+
+ desc = &q_handle->q_desc;
+ data = &q_handle->q_data;
+
+ /* Sync descriptor buffer */
+ (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
+ desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
+
+ /* Look at the top cmd on the queued list (without removing it) */
+ hci1394_tlist_peek(q_handle->q_queued_list, &node);
+ if (node == NULL) {
+ /* There are no more commands left on the queued list */
+ *cmd = NULL;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return;
+ }
+
+ /*
+ * There is a command on the list, read its status and timestamp when
+ * it was sent
+ */
+ *cmd = (hci1394_q_cmd_t *)node->tln_addr;
+ cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
+ (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
+ cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
+
+ /*
+ * If we are flushing the q (e.g. due to a bus reset), we will return
+ * the command regardless of its completion status. If we are not
+ * flushing the Q and we do not have status on the command (e.g. status
+ * = 0), we are done with this Q for now.
+ */
+ if (flush_q == B_FALSE) {
+ if (cmd_status == 0) {
+ *cmd = NULL;
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+ }
+
+ /*
+ * The command completed, remove it from the queued list. There is not
+ * a race condition to delete the node in the list here. This is the
+ * only place the node will be deleted so we do not need to check the
+ * return status.
+ */
+ (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
+
+ /*
+ * Free the space used by the command in the descriptor and data
+ * buffers.
+ */
+ desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
+ desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
+ if ((*cmd)->qc_data_used == B_TRUE) {
+ data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
+ data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
+ }
+
+ /* return command status */
+ (*cmd)->qc_status = cmd_status;
+
+ mutex_exit(&q_handle->q_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_at_write_OMI()
+ * Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
+ * Buffer state information is stored in cmd. Use the hdr and hdr size for
+ * the additional information attached to an immediate descriptor.
+ */
+void
+hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
+ hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
+{
+ hci1394_desc_imm_t *desc;
+ uint32_t data;
+
+
+ ASSERT(qbuf != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(hdr != NULL);
+ ASSERT(MUTEX_HELD(&q_handle->q_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
+ ASSERT((hdrsize == 8) || (hdrsize == 16));
+
+ /* Make sure enough room for OMI */
+ ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
+
+ /* Store the offset of the top of this descriptor block */
+ qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
+ qbuf->qb_ptrs.qp_begin);
+
+ /* Setup OpenHCI OMI Header */
+ desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
+ data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
+
+ /*
+ * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
+ * count.
+ */
+ ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
+ hdrsize >> 2, DDI_DEV_AUTOINCR);
+
+ /*
+ * We wrote 2 16 byte blocks in the descriptor buffer, update the count
+ * accordingly. Update the reserved size and current pointer.
+ */
+ q_handle->q_block_cnt += 2;
+ qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
+ qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
+
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_q_at_write_OLI()
+ * Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
+ * Buffer state information is stored in cmd. Use the hdr and hdr size for
+ * the additional information attached to an immediate descriptor.
+ */
+void
+hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
+ hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
+{
+ hci1394_desc_imm_t *desc;
+ uint32_t data;
+ uint32_t command_ptr;
+ uint32_t tcode;
+
+
+ ASSERT(qbuf != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(hdr != NULL);
+ ASSERT(MUTEX_HELD(&q_handle->q_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
+ ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
+
+ /* make sure enough room for 1 OLI */
+ ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
+
+ /* Store the offset of the top of this descriptor block */
+ qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
+ qbuf->qb_ptrs.qp_begin);
+
+ /* Setup OpenHCI OLI Header */
+ desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
+ data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
+
+ /* Setup 1394 Header */
+ tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
+ if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
+ (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
+ /*
+ * if the tcode = a quadlet write, move the last quadlet as
+ * 8-bit data. All data is treated as 8-bit data (even quadlet
+ * reads and writes). Therefore, target drivers MUST take that
+ * into consideration when accessing device registers.
+ */
+ ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
+ DDI_DEV_AUTOINCR);
+ ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
+ (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
+ } else {
+ ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
+ hdrsize >> 2, DDI_DEV_AUTOINCR);
+ }
+
+ /*
+ * We wrote 2 16 byte blocks in the descriptor buffer, update the count
+ * accordingly.
+ */
+ q_handle->q_block_cnt += 2;
+
+ /*
+ * Sync buffer in case DMA engine currently running. This must be done
+ * before writing the command pointer in the previous descriptor.
+ */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+
+ /* save away the status address for quick access in at_next() */
+ cmd->qc_status_addr = &desc->status;
+
+ /*
+ * Setup the command pointer. This tells the HW where to get the
+ * descriptor we just setup. This includes the IO address along with
+ * a 4 bit 16 byte block count
+ */
+ command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
+ ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
+ DESC_Z_MASK));
+
+ /*
+ * if we previously setup a descriptor, add this new descriptor into
+ * the previous descriptor's "next" pointer.
+ */
+ if (q_handle->q_previous != NULL) {
+ ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
+ command_ptr);
+ /* Sync buffer again, this gets the command pointer */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+ }
+
+ /*
+ * this is now the previous descriptor. Update the current pointer,
+ * clear the block count and reserved size since this is the end of
+ * this command.
+ */
+ q_handle->q_previous = (hci1394_desc_t *)desc;
+ qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
+ q_handle->q_block_cnt = 0;
+ qbuf->qb_ptrs.qp_resv_size = 0;
+
+ /* save away cleanup info when we are done with the command */
+ cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
+ cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
+
+ /* If the DMA is not running, start it */
+ if (q_handle->q_dma_running == B_FALSE) {
+ q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
+ command_ptr);
+ q_handle->q_dma_running = B_TRUE;
+ /* the DMA is running, wake it up */
+ } else {
+ q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_q_at_write_OL()
+ * Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
+ * Buffer state information is stored in cmd. The IO address of the data
+ * buffer is passed in io_addr. Size is the size of the data to be
+ * transferred.
+ */
+void
+hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
+ hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
+{
+ hci1394_desc_t *desc;
+ uint32_t data;
+ uint32_t command_ptr;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(qbuf != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(MUTEX_HELD(&q_handle->q_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* make sure enough room for OL */
+ ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
+
+ /* Setup OpenHCI OL Header */
+ desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
+ data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
+
+ /*
+ * We wrote 1 16 byte block in the descriptor buffer, update the count
+ * accordingly.
+ */
+ q_handle->q_block_cnt++;
+
+ /*
+ * Sync buffer in case DMA engine currently running. This must be done
+ * before writing the command pointer in the previous descriptor.
+ */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+
+ /* save away the status address for quick access in at_next() */
+ cmd->qc_status_addr = &desc->status;
+
+ /*
+ * Setup the command pointer. This tells the HW where to get the
+ * descriptor we just setup. This includes the IO address along with
+ * a 4 bit 16 byte block count
+ */
+ command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
+ ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
+ DESC_Z_MASK));
+
+ /*
+ * if we previously setup a descriptor, add this new descriptor into
+ * the previous descriptor's "next" pointer.
+ */
+ if (q_handle->q_previous != NULL) {
+ ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
+ command_ptr);
+ /* Sync buffer again, this gets the command pointer */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+ }
+
+ /*
+ * this is now the previous descriptor. Update the current pointer,
+ * clear the block count and reserved size since this is the end of
+ * this command.
+ */
+ q_handle->q_previous = desc;
+ qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
+ q_handle->q_block_cnt = 0;
+ qbuf->qb_ptrs.qp_resv_size = 0;
+
+ /* save away cleanup info when we are done with the command */
+ cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
+ cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
+
+ /* If the DMA is not running, start it */
+ if (q_handle->q_dma_running == B_FALSE) {
+ q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
+ command_ptr);
+ q_handle->q_dma_running = B_TRUE;
+ /* the DMA is running, wake it up */
+ } else {
+ q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_q_at_rep_put8()
+ * Copy a byte stream from a kernel virtual address (data) to a IO mapped
+ * data buffer (qbuf). Copy datasize bytes. State information for the
+ * data buffer is kept in cmd.
+ */
+void
+hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
+ uint8_t *data, uint_t datasize)
+{
+ ASSERT(qbuf != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* Make sure enough room for data */
+ ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
+
+ /* Copy in data into the data buffer */
+ ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
+ (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
+
+ /* Update the current pointer, offset, and reserved size */
+ qbuf->qb_ptrs.qp_current += datasize;
+ qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
+ qbuf->qb_ptrs.qp_begin);
+ qbuf->qb_ptrs.qp_resv_size -= datasize;
+
+ /* save away cleanup info when we are done with the command */
+ cmd->qc_data_used = B_TRUE;
+ cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
+ cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
+
+ /* Sync data buffer */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+
+ TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_q_at_copy_from_mblk()
+ * Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
+ * Copy mblk->length bytes. The services layer and the hal use a private
+ * structure (h1394_mblk_t) to keep track of how much of the mblk to send
+ * since we may have to break the transfer up into smaller blocks. (i.e. a
+ * 1MByte block write would go out in 2KByte chunks. State information for
+ * the data buffer is kept in cmd.
+ */
+static void
+hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
+ h1394_mblk_t *mblk)
+{
+ uint_t bytes_left;
+ uint_t length;
+
+
+ ASSERT(qbuf != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(mblk != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* We return these variables to the Services Layer when we are done */
+ mblk->next_offset = mblk->curr_offset;
+ mblk->next_mblk = mblk->curr_mblk;
+ bytes_left = mblk->length;
+
+ /* do while there are bytes left to copy */
+ do {
+ /*
+ * If the entire data portion of the current block transfer is
+ * contained within a single mblk.
+ */
+ if ((mblk->next_offset + bytes_left) <=
+ (mblk->next_mblk->b_wptr)) {
+ /* Copy the data into the data Q */
+ hci1394_q_at_rep_put8(qbuf, cmd,
+ (uint8_t *)mblk->next_offset, bytes_left);
+
+ /* increment the mblk offset */
+ mblk->next_offset += bytes_left;
+
+ /* we have no more bytes to put into the buffer */
+ bytes_left = 0;
+
+ /*
+ * If our offset is at the end of data in this mblk, go
+ * to the next mblk.
+ */
+ if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
+ mblk->next_mblk = mblk->next_mblk->b_cont;
+ if (mblk->next_mblk != NULL) {
+ mblk->next_offset =
+ mblk->next_mblk->b_rptr;
+ }
+ }
+
+ /*
+ * The data portion of the current block transfer is spread
+ * across two or more mblk's
+ */
+ } else {
+ /*
+ * Figure out how much data is in this mblk.
+ */
+ length = mblk->next_mblk->b_wptr - mblk->next_offset;
+
+ /* Copy the data into the atreq data Q */
+ hci1394_q_at_rep_put8(qbuf, cmd,
+ (uint8_t *)mblk->next_offset, length);
+
+ /* update the bytes left count, go to the next mblk */
+ bytes_left = bytes_left - length;
+ mblk->next_mblk = mblk->next_mblk->b_cont;
+ ASSERT(mblk->next_mblk != NULL);
+ mblk->next_offset = mblk->next_mblk->b_rptr;
+ }
+ } while (bytes_left > 0);
+
+ TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_ar_next()
+ * Return an address to the next received AR packet. If there are no more
+ * AR packets in the buffer, q_addr will be set to NULL.
+ */
+void
+hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
+{
+ hci1394_desc_t *desc;
+ hci1394_q_buf_t *descb;
+ hci1394_q_buf_t *datab;
+ uint32_t residual_count;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(q_addr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_next_enter, HCI1394_TNF_HAL_STACK, "");
+
+ descb = &q_handle->q_desc;
+ datab = &q_handle->q_data;
+
+ /* Sync Descriptor buffer */
+ (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
+ descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
+
+ /*
+ * Check residual in current IM count vs q_space_left to see if we have
+ * received any more responses
+ */
+ desc = (hci1394_desc_t *)q_handle->q_head;
+ residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
+ residual_count &= DESC_ST_RESCOUNT_MASK;
+ if (residual_count >= q_handle->q_space_left) {
+ /* No new packets received */
+ *q_addr = NULL;
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return;
+ }
+
+ /* Sync Data Q */
+ (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
+ datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
+
+ /*
+ * We have a new packet, return the address of the start of the
+ * packet.
+ */
+ *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
+
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_ar_free()
+ * Free the space used by the AR packet at the top of the data buffer. AR
+ * packets are processed in the order that they are received. This will
+ * free the oldest received packet which has not yet been freed. size is
+ * how much space the packet takes up.
+ */
+void
+hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
+{
+ hci1394_q_buf_t *descb;
+ hci1394_q_buf_t *datab;
+
+
+ ASSERT(q_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_free_enter, HCI1394_TNF_HAL_STACK, "");
+
+ descb = &q_handle->q_desc;
+ datab = &q_handle->q_data;
+
+ /*
+ * Packet is in multiple buffers. Theoretically a buffer could be broken
+ * in more than two buffers for an ARRESP. Since the buffers should be
+ * in at least 4K increments this will not happen since the max packet
+ * size is 2KBytes.
+ */
+ if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
+ /* Add IM descriptor for used buffer back into Q */
+ hci1394_q_ar_write_IM(q_handle, descb,
+ datab->qb_cookie[datab->qb_ptrs.qp_current_buf
+ ].dmac_address,
+ datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
+
+ /* Go to the next buffer */
+ hci1394_q_next_buf(datab);
+
+ /* Update next buffers pointers for partial packet */
+ size -= q_handle->q_space_left;
+ datab->qb_ptrs.qp_current += size;
+ q_handle->q_space_left =
+ datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
+ size;
+
+ /* Change the head pointer to the next IM descriptor */
+ q_handle->q_head += sizeof (hci1394_desc_t);
+ if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
+ (descb->qb_ptrs.qp_bottom + 1)) {
+ q_handle->q_head = descb->qb_ptrs.qp_top;
+ }
+
+ /* Packet is only in one buffer */
+ } else {
+ q_handle->q_space_left -= size;
+ datab->qb_ptrs.qp_current += size;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_free_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_ar_get32()
+ * Read a quadlet of data regardless if it is in the current buffer or has
+ * wrapped to the top buffer. If the address passed to this routine is
+ * passed the bottom of the data buffer, this routine will automatically
+ * wrap back to the top of the Q and look in the correct offset from the
+ * top. Copy the data into the kernel virtual address provided.
+ */
+uint32_t
+hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
+{
+ hci1394_q_buf_t *data;
+ uintptr_t new_addr;
+ uint32_t data32;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(addr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_get32_enter, HCI1394_TNF_HAL_STACK, "");
+
+ data = &q_handle->q_data;
+
+ /*
+ * if the data has wrapped to the top of the buffer, adjust the address.
+ */
+ if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
+ new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
+ ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
+ data32 = ddi_get32(data->qb_buf.bi_handle,
+ (uint32_t *)new_addr);
+
+ /* data is before end of buffer */
+ } else {
+ data32 = ddi_get32(data->qb_buf.bi_handle, addr);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_get32_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (data32);
+}
+
+
+/*
+ * hci1394_q_ar_rep_get8()
+ * Read a byte stream of data regardless if it is contiguous or has partially
+ * or fully wrapped to the top buffer. If the address passed to this routine
+ * is passed the bottom of the data buffer, or address + size is past the
+ * bottom of the data buffer. this routine will automatically wrap back to
+ * the top of the Q and look in the correct offset from the top. Copy the
+ * data into the kernel virtual address provided.
+ */
+void
+hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
+ uint8_t *q_addr, uint_t size)
+{
+ hci1394_q_buf_t *data;
+ uintptr_t new_addr;
+ uint_t new_size;
+ uintptr_t new_dest;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(dest != NULL);
+ ASSERT(q_addr != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ data = &q_handle->q_data;
+
+ /*
+ * There are three cases:
+ * 1) All of the data has wrapped.
+ * 2) Some of the data has not wrapped and some has wrapped.
+ * 3) None of the data has wrapped.
+ */
+
+ /* All of the data has wrapped, just adjust the starting address */
+ if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
+ new_addr = (uintptr_t)data->qb_ptrs.qp_top +
+ ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
+ (uintptr_t)1));
+ ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
+ size, DDI_DEV_AUTOINCR);
+
+ /*
+ * Some of the data has wrapped. Copy the data that hasn't wrapped,
+ * adjust the address, then copy the rest.
+ */
+ } else if (((uintptr_t)q_addr + (uintptr_t)size) >
+ ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
+ /* Copy first half */
+ new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
+ (uintptr_t)1) - (uintptr_t)q_addr);
+ ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
+ DDI_DEV_AUTOINCR);
+
+ /* copy second half */
+ new_dest = (uintptr_t)dest + (uintptr_t)new_size;
+ new_size = size - new_size;
+ new_addr = (uintptr_t)data->qb_ptrs.qp_top;
+ ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
+ (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
+
+ /* None of the data has wrapped */
+ } else {
+ ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
+ DDI_DEV_AUTOINCR);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
+
+
+/*
+ * hci1394_q_ar_copy_to_mblk()
+ * Read a byte stream of data regardless if it is contiguous or has partially
+ * or fully wrapped to the top buffer. If the address passed to this routine
+ * is passed the bottom of the data buffer, or address + size is passed the
+ * bottom of the data buffer. this routine will automatically wrap back to
+ * the top of the Q and look in the correct offset from the top. Copy the
+ * data into the mblk provided. The services layer and the hal use a private
+ * structure (h1394_mblk_t) to keep track of how much of the mblk to receive
+ * into since we may have to break the transfer up into smaller blocks.
+ * (i.e. a 1MByte block read would go out in 2KByte requests.
+ */
+void
+hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
+ h1394_mblk_t *mblk)
+{
+ uint8_t *new_addr;
+ uint_t bytes_left;
+ uint_t length;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(addr != NULL);
+ ASSERT(mblk != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* We return these variables to the Services Layer when we are done */
+ mblk->next_offset = mblk->curr_offset;
+ mblk->next_mblk = mblk->curr_mblk;
+ bytes_left = mblk->length;
+
+ /* the address we copy from will change as we change mblks */
+ new_addr = addr;
+
+ /* do while there are bytes left to copy */
+ do {
+ /*
+ * If the entire data portion of the current block transfer is
+ * contained within a single mblk.
+ */
+ if ((mblk->next_offset + bytes_left) <=
+ (mblk->next_mblk->b_datap->db_lim)) {
+ /* Copy the data into the mblk */
+ hci1394_q_ar_rep_get8(q_handle,
+ (uint8_t *)mblk->next_offset, new_addr, bytes_left);
+
+ /* increment the offset */
+ mblk->next_offset += bytes_left;
+ mblk->next_mblk->b_wptr = mblk->next_offset;
+
+ /* we have no more bytes to put into the buffer */
+ bytes_left = 0;
+
+ /*
+ * If our offset is at the end of data in this mblk, go
+ * to the next mblk.
+ */
+ if (mblk->next_offset >=
+ mblk->next_mblk->b_datap->db_lim) {
+ mblk->next_mblk = mblk->next_mblk->b_cont;
+ if (mblk->next_mblk != NULL) {
+ mblk->next_offset =
+ mblk->next_mblk->b_wptr;
+ }
+ }
+
+ /*
+ * The data portion of the current block transfer is spread
+ * across two or more mblk's
+ */
+ } else {
+ /* Figure out how much data is in this mblk */
+ length = mblk->next_mblk->b_datap->db_lim -
+ mblk->next_offset;
+
+ /* Copy the data into the mblk */
+ hci1394_q_ar_rep_get8(q_handle,
+ (uint8_t *)mblk->next_offset, new_addr, length);
+ mblk->next_mblk->b_wptr =
+ mblk->next_mblk->b_datap->db_lim;
+
+ /*
+ * update the bytes left and address to copy from, go
+ * to the next mblk.
+ */
+ bytes_left = bytes_left - length;
+ new_addr = (uint8_t *)((uintptr_t)new_addr +
+ (uintptr_t)length);
+ mblk->next_mblk = mblk->next_mblk->b_cont;
+ ASSERT(mblk->next_mblk != NULL);
+ mblk->next_offset = mblk->next_mblk->b_wptr;
+ }
+ } while (bytes_left > 0);
+
+ TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_q_ar_write_IM()
+ * Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
+ * The IO address of the data buffer is passed in io_addr. datasize is the
+ * size of the data data buffer to receive into.
+ */
+void
+hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
+ uint32_t io_addr, uint_t datasize)
+{
+ hci1394_desc_t *desc;
+ uint32_t data;
+ uint32_t command_ptr;
+
+
+ ASSERT(q_handle != NULL);
+ ASSERT(qbuf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ /* Make sure enough room for IM */
+ if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
+ (qbuf->qb_ptrs.qp_bottom + 1)) {
+ hci1394_q_next_buf(qbuf);
+ } else {
+ /* Store the offset of the top of this descriptor block */
+ qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
+ qbuf->qb_ptrs.qp_begin);
+ }
+
+ /* Setup OpenHCI IM Header */
+ desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
+ data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
+ ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
+ DESC_ST_RESCOUNT_MASK);
+
+ /*
+ * Sync buffer in case DMA engine currently running. This must be done
+ * before writing the command pointer in the previous descriptor.
+ */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+
+ /*
+ * Setup the command pointer. This tells the HW where to get the
+ * descriptor we just setup. This includes the IO address along with
+ * a 4 bit 16 byte block count. We only wrote 1 16 byte block.
+ */
+ command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
+ ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
+
+ /*
+ * if we previously setup a descriptor, add this new descriptor into
+ * the previous descriptor's "next" pointer.
+ */
+ if (q_handle->q_previous != NULL) {
+ ddi_put32(qbuf->qb_buf.bi_handle,
+ &q_handle->q_previous->branch, command_ptr);
+ /* Sync buffer again, this gets the command pointer */
+ (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
+ qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
+ }
+
+ /* this is the new previous descriptor. Update the current pointer */
+ q_handle->q_previous = desc;
+ qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
+
+ /* If the DMA is not running, start it */
+ if (q_handle->q_dma_running == B_FALSE) {
+ q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
+ command_ptr);
+ q_handle->q_dma_running = B_TRUE;
+ /* the DMA is running, wake it up */
+ } else {
+ q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_s1394if.c b/usr/src/uts/common/io/1394/adapters/hci1394_s1394if.c
new file mode 100644
index 0000000000..80948d8d9e
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_s1394if.c
@@ -0,0 +1,1256 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_s1394if.c
+ * The interface into the HAL from the services layer.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+
+#include <sys/1394/h1394.h>
+#include <sys/1394/ixl1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+static void hci1394_s1394if_shutdown(void *hal_private);
+static int hci1394_s1394if_phy(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_write(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_read(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_lock(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_write_response(void *hal_private,
+ cmd1394_cmd_t *cmd_id, h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_read_response(void *hal_private,
+ cmd1394_cmd_t *cmd_id, h1394_cmd_priv_t *cmd_private, int *result);
+static int hci1394_s1394if_lock_response(void *hal_private,
+ cmd1394_cmd_t *cmd_id, h1394_cmd_priv_t *cmd_private, int *result);
+static void hci1394_s1394if_response_complete(void *hal_private,
+ cmd1394_cmd_t *cmd_id, h1394_cmd_priv_t *cmd_private);
+static int hci1394_s1394if_reset_bus(void *hal_private);
+static int hci1394_s1394if_set_contender_bit(void *hal_private);
+static int hci1394_s1394if_set_root_holdoff_bit(void *hal_private);
+static int hci1394_s1394if_set_gap_count(void *hal_private, uint_t gap_count);
+static int hci1394_s1394if_update_config_rom(void *hal_private,
+ void *local_buf, uint_t quadlet_count);
+static int hci1394_s1394if_phy_filter_set(void *hal_private,
+ uint64_t mask, uint_t generation);
+static int hci1394_s1394if_phy_filter_clr(void *hal_private,
+ uint64_t mask, uint_t generation);
+static int hci1394_s1394if_short_bus_reset(void *hal_private);
+static int hci1394_s1394if_csr_read(void *hal_private,
+ uint_t offset, uint32_t *data);
+static int hci1394_s1394if_csr_write(void *hal_private,
+ uint_t offset, uint32_t data);
+static int hci1394_s1394if_csr_cswap32(void *hal_private, uint_t generation,
+ uint_t offset, uint32_t compare, uint32_t swap, uint32_t *old);
+static void hci1394_s1394if_power_state_change(void *hal_private,
+ h1394_node_pwr_flags_t nodeflags);
+
+
+/* entry points into HAL from Services Layer */
+h1394_evts_t hci1394_evts = {
+ H1394_EVTS_V1, /* hal_version */
+ 0, /* reserved */
+ hci1394_s1394if_shutdown, /* shutdown */
+ hci1394_s1394if_phy, /* send_phy_config_pkt */
+ hci1394_s1394if_read, /* read */
+ hci1394_s1394if_read_response, /* read_response */
+ hci1394_s1394if_write, /* write */
+ hci1394_s1394if_write_response, /* write_response */
+ hci1394_s1394if_response_complete, /* response_complete */
+ hci1394_s1394if_lock, /* lock */
+ hci1394_s1394if_lock_response, /* lock_response */
+ hci1394_alloc_isoch_dma, /* allocate_isoch_dma */
+ hci1394_free_isoch_dma, /* free_isoch_dma */
+ hci1394_start_isoch_dma, /* start_isoch_dma */
+ hci1394_stop_isoch_dma, /* stop_isoch_dma */
+ hci1394_update_isoch_dma, /* update_isoch_dma */
+ hci1394_s1394if_update_config_rom, /* update_config_rom */
+ hci1394_s1394if_reset_bus, /* bus_reset */
+ hci1394_s1394if_short_bus_reset, /* short_bus_reset */
+ hci1394_s1394if_set_contender_bit, /* set_contender_bit */
+ hci1394_s1394if_set_root_holdoff_bit, /* set_root_holdoff_bit */
+ hci1394_s1394if_set_gap_count, /* set_gap_count */
+ hci1394_s1394if_csr_read, /* csr_read */
+ hci1394_s1394if_csr_write, /* csr_write */
+ hci1394_s1394if_csr_cswap32, /* csr_cswap32 */
+ hci1394_s1394if_phy_filter_set, /* phys_arreq_enable_set */
+ hci1394_s1394if_phy_filter_clr, /* phys_arreq_enable_clr */
+ hci1394_s1394if_power_state_change /* node_power_state_change */
+};
+
+
+/*
+ * hci1394_s1394if_shutdown()
+ * Shutdown the HAL. This is called when a critical error has been detected.
+ * This routine should shutdown the HAL so that it will no longer send or
+ * receive information to/from the 1394 bus. The purpose of this function is
+ * to try and keep the machine from crashing.
+ */
+static void
+hci1394_s1394if_shutdown(void *hal_private)
+{
+ hci1394_state_t *soft_state;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_shutdown_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+ hci1394_shutdown(soft_state->drvinfo.di_dip);
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_shutdown_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_s1394if_phy()
+ * write a phy packet out to the 1394 bus. A phy packet consists of one
+ * quadlet of data.
+ */
+static int
+hci1394_s1394if_phy(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_phy(soft_state->async, cmd_id, cmd_private,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_phy_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_write()
+ * Perform a 1394 write operation. This can be either a quadlet or block
+ * write.
+ */
+static int
+hci1394_s1394if_write(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_write(soft_state->async, cmd_id, cmd_private,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_write_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_read()
+ * Perform a 1394 read operation. This can be either a quadlet or block
+ * read.
+ */
+static int
+hci1394_s1394if_read(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_read(soft_state->async, cmd_id, cmd_private,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_lock()
+ * Perform a 1394/1212 lock operation. This can be one of the following lock
+ * operations: (CMD1394_LOCK_MASK_SWAP, CMD1394_LOCK_COMPARE_SWAP
+ * CMD1394_LOCK_FETCH_ADD, CMD1394_LOCK_LITTLE_ADD, CMD1394_LOCK_BOUNDED_ADD
+ * CMD1394_LOCK_WRAP_ADD)
+ */
+static int
+hci1394_s1394if_lock(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_lock(soft_state->async, cmd_id, cmd_private,
+ result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_lock_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_write_response()
+ * Send a response to a write request received off of the 1394 bus. This
+ * could have been with a quadlet or block write request.
+ */
+static int
+hci1394_s1394if_write_response(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_write_response(soft_state->async, cmd_id,
+ cmd_private, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_write_response_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_write_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_read_response()
+ * Send a response to a read request received off of the 1394 bus. This
+ * could have been with a quadlet or block read request.
+ */
+static int
+hci1394_s1394if_read_response(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_read_response(soft_state->async, cmd_id,
+ cmd_private, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_read_response_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_read_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_lock_response()
+ * Send a response to a lock request received off of the 1394 bus. This
+ * could have been one of the following lock operations:
+ * (CMD1394_LOCK_MASK_SWAP, CMD1394_LOCK_COMPARE_SWAP CMD1394_LOCK_FETCH_ADD,
+ * CMD1394_LOCK_LITTLE_ADD, CMD1394_LOCK_BOUNDED_ADD, CMD1394_LOCK_WRAP_ADD)
+ */
+static int
+hci1394_s1394if_lock_response(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private, int *result)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_response_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not in a bus reset or shutdown */
+ if (hci1394_state(&soft_state->drvinfo) != HCI1394_NORMAL) {
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_BUS_RESET) {
+ *result = H1394_STATUS_INVALID_BUSGEN;
+ } else {
+ *result = H1394_STATUS_INTERNAL_ERROR;
+ }
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_async_lock_response(soft_state->async, cmd_id,
+ cmd_private, result);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_async_lock_response_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_lock_response_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_response_complete()
+ * This notifies the HAL that the services layer and target driver are done
+ * with a command that was received off of the 1394 bus. This will usually
+ * be called after the response to the command has been command_complete'd.
+ * The HAL is free to re-use the command or free up the memory from this
+ * command after this call has returned. This should only be called for
+ * ARREQ's.
+ */
+static void
+hci1394_s1394if_response_complete(void *hal_private, cmd1394_cmd_t *cmd_id,
+ h1394_cmd_priv_t *cmd_private)
+{
+ hci1394_state_t *soft_state;
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_response_complete_enter,
+ HCI1394_TNF_HAL_STACK, "");
+ soft_state = (hci1394_state_t *)hal_private;
+ hci1394_async_response_complete(soft_state->async, cmd_id, cmd_private);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_response_complete_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_s1394if_reset_bus()
+ * This routine resets the 1394 bus. It performs a "long" bus reset. It
+ * should work on all OpenHCI adapters.
+ */
+static int
+hci1394_s1394if_reset_bus(void *hal_private)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_reset_bus_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_reset_bus_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_bus_reset(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_reset_bus_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_reset_bus_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_reset_bus_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_set_contender_bit()
+ * This routine sets up the PHY so that the selfid contender bit will be set
+ * on subsequent bus resets. This routine will fail when we have a 1394-1995
+ * PHY since this PHY does not have a SW controllable contender bit.
+ */
+static int
+hci1394_s1394if_set_contender_bit(void *hal_private)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_contender_bit_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_contender_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (soft_state->halinfo.phy == H1394_PHY_1995) {
+ TNF_PROBE_0(hci1394_s1394if_set_contender_bit_phyver_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_contender_enable(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_set_contender_bit_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_contender_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_contender_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_set_root_holdoff_bit()
+ * This routine will set the root holdoff bit in the PHY. The Services Layer
+ * should send out a PHY configuration packet first to tell everyone which
+ * node to set the root holdoff bit on. If it is our root holdoff bit we
+ * are setting, the PHY will automatically set it unless we have an old
+ * (1394-1995) PHY. If we have a 1394-1995 PHY, the SL needs to call this
+ * routine after sending the PHY configuration packet. The SL also needs to
+ * call this if they want to perform a long bus reset and have the root
+ * holdoff bit set. We do this so that we do not have to do a read before
+ * the write. A PHY register write has less of a chance of failing.
+ */
+static int
+hci1394_s1394if_set_root_holdoff_bit(void *hal_private)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_root_holdoff_enable(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_set_root_holdoff_bit_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_set_gap_count()
+ * This routine will set the gap count bit in the PHY. The Services Layer
+ * should send out a PHY configuration packet first to tell everyone what
+ * gap count to use. Our PHY will automatically set the gap count unless we
+ * have an old (1394-1995) PHY. If we have a 1394-1995 PHY, the SL needs to
+ * call this routine after sending the PHY configuration packet and before
+ * generating a bus reset. The SL also needs to call before the they call to
+ * perform a long bus reset. We do this so that we do not have to do a PHY
+ * read before the write. A PHY register write has less of a chance of
+ * failing.
+ */
+static int
+hci1394_s1394if_set_gap_count(void *hal_private, uint_t gap_count)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_gap_count_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_gap_count_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_gap_count_set(soft_state->ohci, gap_count);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_set_gap_count_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_gap_count_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_gap_count_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_phy_filter_set()
+ * reads/writes to physically mapped memory from devices on the bus are
+ * disabled by default. They can be enabled on a node by node basis. All
+ * physical accesses are disabled every bus reset so they must be re-enabled
+ * every bus reset (This is due to the fact the the node ids change every bus
+ * reset). A 64-bit mask is passed in to enable nodes to be able to rd/wr
+ * physically mapped memory over the 1394 bus. A bit = to 1 enables that
+ * node's physical accesses, a bit = to 0 does nothing (i.e. a bitwise or is
+ * performed). The LSB of the mask (bit 0), maps to node #0, bit #62, maps to
+ * node 62. The MSB (#63) is not used since the can only be 63 nodes
+ * (0 - 62) on the bus.
+ *
+ * hci1394_s1394if_phy_filter_clr() is used to disable access to physical
+ * memory. This is only required if the node had previously been enabled.
+ *
+ * generation is used to verify that we are have not gotten a bus reset since
+ * the mask was built.
+ */
+static int
+hci1394_s1394if_phy_filter_set(void *hal_private,
+ uint64_t mask, uint_t generation)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_set_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_phy_filter_set(soft_state->ohci, mask,
+ generation);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_phy_filter_set_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_set_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_phy_filter_clr()
+ * reads/writes to physically mapped memory from devices on the bus are
+ * disabled by default. They can be enabled/disabled on a node by node basis.
+ * All physical accesses are disabled every bus reset so they must be
+ * re-enabled every bus reset (This is due to the fact the the node ids
+ * change every bus reset). Only nodes which have been enabled and no longer
+ * need access to physical memory need to be disabled.
+ *
+ * A 64-bit mask is passed in to disable nodes from being able to rd/wr
+ * physically mapped memory over the 1394 bus. A bit = to 1 disables that
+ * node's physical accesses, a bit = to 0 does nothing (i.e. a bitwise or is
+ * performed). The LSB of the mask (bit 0), maps to node #0, bit #62, maps to
+ * node 62. The MSB (#63) is not used since there can only be 63 nodes
+ * (0 - 62) on the bus.
+ *
+ * hci1394_s1394if_phy_filter_set() is used to enable access to physical
+ * memory.
+ *
+ * generation is used to verify that we are have not gotten a bus reset since
+ * the mask was build.
+ */
+static int
+hci1394_s1394if_phy_filter_clr(void *hal_private,
+ uint64_t mask, uint_t generation)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_clr_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_phy_filter_clr(soft_state->ohci, mask,
+ generation);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_phy_filter_clr_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_phy_filter_clr_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_short_bus_reset()
+ * This routine resets the 1394 bus. It performs a "short" bus reset. It
+ * will only work on adapters with a 1394A or later PHY. Calling this routine
+ * when we have a 1394-1995 PHY is an error.
+ */
+static int
+hci1394_s1394if_short_bus_reset(void *hal_private)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_short_bus_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_short_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (soft_state->halinfo.phy == H1394_PHY_1995) {
+ TNF_PROBE_0(hci1394_s1394if_short_bus_reset_phyver_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_set_root_holdoff_bit_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ status = hci1394_ohci_bus_reset_short(soft_state->ohci);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_s1394if_short_bus_reset_fail,
+ HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_short_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_short_bus_reset_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_update_config_rom()
+ * This routine updates the configuration ROM. It copies "quadlet_count"
+ * 32-bit words from "local_buf" to the config ROM starting at the first
+ * location in config ROM. This routine is meant to update the entire config
+ * ROM and not meant for a partial update.
+ */
+static int
+hci1394_s1394if_update_config_rom(void *hal_private,
+ void *local_buf, uint_t quadlet_count)
+{
+ hci1394_state_t *soft_state;
+
+
+ ASSERT(hal_private != NULL);
+ ASSERT(local_buf != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_update_config_rom_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_update_config_rom_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hci1394_ohci_cfgrom_update(soft_state->ohci, local_buf, quadlet_count);
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_update_config_rom_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_s1394if_csr_read()
+ * CSR register read interface
+ * For more information on CSR registers, see
+ * IEEE 1212
+ * IEEE 1394-1995
+ * section 8.3.2
+ * IEEE P1394A Draft 3.0
+ * sections 10.32,10.33
+ */
+static int
+hci1394_s1394if_csr_read(void *hal_private, uint_t offset, uint32_t *data)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ switch (offset) {
+ case CSR_STATE_CLEAR:
+ hci1394_csr_state_get(soft_state->csr, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_STATE_SET:
+ /* Write Only Register */
+ status = DDI_FAILURE;
+ break;
+ case CSR_NODE_IDS:
+ hci1394_ohci_nodeid_get(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_RESET_START:
+ /* Not supported */
+ status = DDI_FAILURE;
+ break;
+ case CSR_SPLIT_TIMEOUT_HI:
+ hci1394_csr_split_timeout_hi_get(soft_state->csr, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_SPLIT_TIMEOUT_LO:
+ hci1394_csr_split_timeout_lo_get(soft_state->csr, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_CYCLE_TIME:
+ /* CYCLE_TIME is implemented in HW */
+ hci1394_ohci_cycletime_get(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUS_TIME:
+ /* BUS_TIME is implemented in the hci1394_ohci_* SW */
+ hci1394_ohci_bustime_get(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUSY_TIMEOUT:
+ hci1394_ohci_atreq_retries_get(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUS_MANAGER_ID:
+ /* BUS_MANAGER_ID is implemented in HW */
+ status = hci1394_ohci_csr_read(soft_state->ohci, 0, data);
+ break;
+ case CSR_BANDWIDTH_AVAILABLE:
+ /* BANDWIDTH_AVAILABLE is implemented in HW */
+ status = hci1394_ohci_csr_read(soft_state->ohci, 1, data);
+ break;
+ case CSR_CHANNELS_AVAILABLE_HI:
+ /* CHANNELS_AVAILABLE_HI is implemented in HW */
+ status = hci1394_ohci_csr_read(soft_state->ohci, 2, data);
+ break;
+ case CSR_CHANNELS_AVAILABLE_LO:
+ /* CHANNELS_AVAILABLE_LO is implemented in HW */
+ status = hci1394_ohci_csr_read(soft_state->ohci, 3, data);
+ break;
+ default:
+ status = DDI_FAILURE;
+ break;
+ }
+
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_s1394if_csr_read_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, csr_address, offset);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_s1394if_csr_write()
+ * CSR register write interface
+ * For more information on CSR registers, see
+ * IEEE 1212
+ * IEEE 1394-1995
+ * section 8.3.2
+ * IEEE P1394A Draft 3.0
+ * sections 10.32,10.33
+ */
+static int
+hci1394_s1394if_csr_write(void *hal_private, uint_t offset, uint32_t data)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_write_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ switch (offset) {
+ case CSR_STATE_CLEAR:
+ hci1394_csr_state_bclr(soft_state->csr, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_STATE_SET:
+ hci1394_csr_state_bset(soft_state->csr, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_NODE_IDS:
+ hci1394_ohci_nodeid_set(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_RESET_START:
+ /* Not supported */
+ status = DDI_FAILURE;
+ break;
+
+ /*
+ * there is a race condition when updating the split timeout
+ * due to the nature of the interface. (i.e. having a separate
+ * hi an lo register)
+ */
+ case CSR_SPLIT_TIMEOUT_HI:
+ hci1394_csr_split_timeout_hi_set(soft_state->csr, data);
+ /*
+ * update the pending list timeout value. The split timeout
+ * is stored in 1394 bus cycles and the timeout is specified in
+ * nS. Therefore, we need to convert the split timeout into nS.
+ */
+ hci1394_async_pending_timeout_update(soft_state->async,
+ OHCI_BUS_CYCLE_TO_nS(hci1394_csr_split_timeout_get(
+ soft_state->csr)));
+ status = DDI_SUCCESS;
+ break;
+ case CSR_SPLIT_TIMEOUT_LO:
+ hci1394_csr_split_timeout_lo_set(soft_state->csr, data);
+ /*
+ * update the pending list timeout value. The split timeout
+ * is stored in 1394 bus cycles and the timeout is specified in
+ * nS. Therefore, we need to convert the split timeout into nS.
+ */
+ hci1394_async_pending_timeout_update(soft_state->async,
+ OHCI_BUS_CYCLE_TO_nS(hci1394_csr_split_timeout_get(
+ soft_state->csr)));
+ status = DDI_SUCCESS;
+ break;
+
+ case CSR_CYCLE_TIME:
+ hci1394_ohci_cycletime_set(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUS_TIME:
+ hci1394_ohci_bustime_set(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUSY_TIMEOUT:
+ hci1394_ohci_atreq_retries_set(soft_state->ohci, data);
+ status = DDI_SUCCESS;
+ break;
+ case CSR_BUS_MANAGER_ID:
+ /* Invalid access, only read/cswap32 allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_BANDWIDTH_AVAILABLE:
+ /* Invalid access, only read/cswap32 allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_CHANNELS_AVAILABLE_HI:
+ /* Invalid access, only read/cswap32 allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_CHANNELS_AVAILABLE_LO:
+ /* Invalid access, only read/cswap32 allowed */
+ status = DDI_FAILURE;
+ break;
+ default:
+ status = DDI_FAILURE;
+ break;
+ }
+
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_s1394if_csr_write_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, csr_address, offset);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_s1394if_csr_cswap32()
+ * CSR register cswap32 interface
+ * For more information on CSR registers, see
+ * IEEE 1212
+ * IEEE 1394-1995
+ * section 8.3.2
+ * IEEE P1394A Draft 3.0
+ * sections 10.32,10.33
+ */
+static int
+hci1394_s1394if_csr_cswap32(void *hal_private, uint_t generation, uint_t offset,
+ uint32_t compare, uint32_t swap, uint32_t *old)
+{
+ hci1394_state_t *soft_state;
+ int status;
+
+
+ ASSERT(hal_private != NULL);
+ ASSERT(old != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_cswap32_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ soft_state = (hci1394_state_t *)hal_private;
+
+ /* make sure we are not shutdown */
+ if (hci1394_state(&soft_state->drvinfo) == HCI1394_SHUTDOWN) {
+ TNF_PROBE_0(hci1394_state_fail, HCI1394_TNF_HAL_ERROR, "");
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_cswap32_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ switch (offset) {
+ case CSR_STATE_CLEAR:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_STATE_SET:
+ /* Invalid access, only write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_NODE_IDS:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_RESET_START:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_SPLIT_TIMEOUT_HI:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_SPLIT_TIMEOUT_LO:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_CYCLE_TIME:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_BUS_TIME:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_BUSY_TIMEOUT:
+ /* Invalid access, only read/write allowed */
+ status = DDI_FAILURE;
+ break;
+ case CSR_BUS_MANAGER_ID:
+ /* BUS_MANAGER_ID is implemented in HW */
+ status = hci1394_ohci_csr_cswap(soft_state->ohci, generation,
+ OHCI_CSR_SEL_BUS_MGR_ID, compare, swap, old);
+ break;
+ case CSR_BANDWIDTH_AVAILABLE:
+ /* BANDWIDTH_AVAILABLE is implemented in HW */
+ status = hci1394_ohci_csr_cswap(soft_state->ohci, generation,
+ OHCI_CSR_SEL_BANDWIDTH_AVAIL, compare, swap, old);
+ break;
+ case CSR_CHANNELS_AVAILABLE_HI:
+ /* CHANNELS_AVAILABLE_HI is implemented in HW */
+ status = hci1394_ohci_csr_cswap(soft_state->ohci, generation,
+ OHCI_CSR_SEL_CHANS_AVAIL_HI, compare, swap, old);
+ break;
+ case CSR_CHANNELS_AVAILABLE_LO:
+ /* CHANNELS_AVAILABLE_LO is implemented in HW */
+ status = hci1394_ohci_csr_cswap(soft_state->ohci, generation,
+ OHCI_CSR_SEL_CHANS_AVAIL_LO, compare, swap, old);
+ break;
+ default:
+ status = DDI_FAILURE;
+ break;
+ }
+
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_s1394if_csr_read_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, csr_address, offset);
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_s1394if_csr_cswap32_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (status);
+}
+
+
+/*
+ * hci1394_s1394if_power_state_change()
+ * Signals that a change in the bus topology has taken place which may affect
+ * power management.
+ */
+/*ARGSUSED*/
+static void
+hci1394_s1394if_power_state_change(void *hal_private,
+ h1394_node_pwr_flags_t nodeflags)
+{
+ /* not implemented */
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_tlabel.c b/usr/src/uts/common/io/1394/adapters/hci1394_tlabel.c
new file mode 100644
index 0000000000..f96a48f66d
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_tlabel.c
@@ -0,0 +1,519 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_tlabel.h
+ * These routines track the tlabel usage for a 1394 adapter.
+ */
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/adapters/hci1394.h>
+
+
+/*
+ * hci1394_tlabel_init()
+ * Initialize the tlabel structures. These structures will be protected
+ * by a mutex at the iblock_cookie passed in. Bad tlabels will be usable
+ * when > reclaim_time_nS has gone by. init() returns a handle to be used
+ * for the rest of the tlabel functions.
+ */
+void
+hci1394_tlabel_init(hci1394_drvinfo_t *drvinfo, hrtime_t reclaim_time_nS,
+ hci1394_tlabel_handle_t *tlabel_handle)
+{
+ hci1394_tlabel_t *tstruct;
+
+
+ ASSERT(tlabel_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* alloc space for tlabel data */
+ tstruct = kmem_alloc(sizeof (hci1394_tlabel_t), KM_SLEEP);
+
+ /* setup handle which is returned from this function */
+ *tlabel_handle = tstruct;
+
+ /*
+ * Initialize tlabel structure. We start with max node set to the
+ * maxiumum node we could have so that we make sure the arrays are
+ * initialized correctly in hci1394_tlabel_reset().
+ */
+ tstruct->tb_drvinfo = drvinfo;
+ tstruct->tb_reclaim_time = reclaim_time_nS;
+ tstruct->tb_max_node = TLABEL_RANGE - 1;
+ tstruct->tb_bcast_sent = B_FALSE;
+
+ mutex_init(&tstruct->tb_mutex, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+
+ /*
+ * The mutex must be initialized before tlabel_reset()
+ * is called. This is because tlabel_reset is also
+ * used in normal tlabel processing (i.e. not just during
+ * initialization)
+ */
+ hci1394_tlabel_reset(tstruct);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_init_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_fini()
+ * Frees up the space allocated in init(). Notice that a pointer to the
+ * handle is used for the parameter. fini() will set your handle to NULL
+ * before returning.
+ */
+void
+hci1394_tlabel_fini(hci1394_tlabel_handle_t *tlabel_handle)
+{
+ hci1394_tlabel_t *tstruct;
+
+
+ ASSERT(tlabel_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ tstruct = (hci1394_tlabel_t *)*tlabel_handle;
+
+ mutex_destroy(&tstruct->tb_mutex);
+ kmem_free(tstruct, sizeof (hci1394_tlabel_t));
+
+ /* set handle to null. This helps catch bugs. */
+ *tlabel_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_alloc()
+ * alloc a tlabel based on the node id. If alloc fails, we are out of
+ * tlabels for that node. See comments before set_reclaim_time() on when
+ * bad tlabel's are free to be used again.
+ */
+int
+hci1394_tlabel_alloc(hci1394_tlabel_handle_t tlabel_handle, uint_t destination,
+ hci1394_tlabel_info_t *tlabel_info)
+{
+ uint_t node_number;
+ uint_t index;
+ uint64_t bad;
+ uint64_t free;
+ hrtime_t time;
+ uint8_t last;
+
+
+ ASSERT(tlabel_handle != NULL);
+ ASSERT(tlabel_info != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* copy destination into tlabel_info */
+ tlabel_info->tbi_destination = destination;
+
+ /* figure out what node we are going to */
+ node_number = IEEE1394_NODE_NUM(destination);
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ /*
+ * Keep track of if we have sent out a broadcast request and what the
+ * maximum # node we have sent to for reset processing optimization
+ */
+ if (node_number == IEEE1394_BROADCAST_NODEID) {
+ tlabel_handle->tb_bcast_sent = B_TRUE;
+ } else if (node_number > tlabel_handle->tb_max_node) {
+ tlabel_handle->tb_max_node = node_number;
+ }
+
+ /* setup copies so we don't take up so much space :-) */
+ bad = tlabel_handle->tb_bad[node_number];
+ free = tlabel_handle->tb_free[node_number];
+ time = tlabel_handle->tb_bad_timestamp[node_number];
+ last = tlabel_handle->tb_last[node_number];
+
+ /*
+ * If there are any bad tlabels, see if the last bad tlabel recorded for
+ * this nodeid is now good to use. If so, add all bad tlabels for that
+ * node id back into the free list
+ *
+ * NOTE: This assumes that bad tlabels are infrequent.
+ */
+ if (bad != 0) {
+ if (gethrtime() > time) {
+
+ /* add the bad tlabels back into the free list */
+ free |= bad;
+
+ /* clear the bad list */
+ bad = 0;
+
+ TNF_PROBE_1(hci1394_tlabel_free_bad,
+ HCI1394_TNF_HAL_ERROR, "", tnf_uint, nodeid,
+ node_number);
+ }
+ }
+
+ /*
+ * Find a free tlabel. This will break out of the loop once it finds a
+ * tlabel. There are a total of TLABEL_RANGE tlabels. The alloc
+ * rotates the check so that we don't always use the same tlabel. It
+ * stores the last tlabel used in last.
+ */
+ for (index = 0; index < TLABEL_RANGE; index++) {
+
+ /* if the next tlabel to check is free */
+ if ((free & ((uint64_t)1 << last)) != 0) {
+ /* we are using this tlabel */
+ tlabel_info->tbi_tlabel = last;
+
+ TNF_PROBE_2_DEBUG(hci1394_tlabel_alloc,
+ HCI1394_TNF_HAL_TLABEL, "", tnf_uint, nodeid,
+ node_number, tnf_uint, alloced_tlabel,
+ tlabel_info->tbi_tlabel);
+
+ /* take it out of the free list */
+ free = free & ~((uint64_t)1 << last);
+
+ /*
+ * increment the last count so we start checking on the
+ * next tlabel next alloc(). Note the rollover at
+ * TLABEL_RANGE since we only have TLABEL_RANGE tlabels.
+ */
+ (last)++;
+ if (last >= TLABEL_RANGE) {
+ last = 0;
+ }
+
+ /* Copy the copies back */
+ tlabel_handle->tb_bad[node_number] = bad;
+ tlabel_handle->tb_free[node_number] = free;
+ tlabel_handle->tb_bad_timestamp[node_number] = time;
+ tlabel_handle->tb_last[node_number] = last;
+
+ /* unlock the tlabel structure */
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * This tlabel is not free, lets go to the next one. Note the
+ * rollover at TLABEL_RANGE since we only have TLABEL_RANGE
+ * tlabels.
+ */
+ (last)++;
+ if (last >= TLABEL_RANGE) {
+ last = 0;
+ }
+ }
+
+ /* Copy the copies back */
+ tlabel_handle->tb_bad[node_number] = bad;
+ tlabel_handle->tb_free[node_number] = free;
+ tlabel_handle->tb_bad_timestamp[node_number] = time;
+ tlabel_handle->tb_last[node_number] = last;
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_1(hci1394_tlabel_alloc_empty, HCI1394_TNF_HAL_ERROR, "",
+ tnf_string, errmsg, "No more tlabels left to alloc");
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_FAILURE);
+}
+
+
+/*
+ * hci1394_tlabel_free()
+ * free the previously alloc()'d tlabel. Once a tlabel has been free'd, it
+ * can be used again when alloc() is called.
+ */
+void
+hci1394_tlabel_free(hci1394_tlabel_handle_t tlabel_handle,
+ hci1394_tlabel_info_t *tlabel_info)
+{
+ uint_t node_number;
+ uint_t tlabel;
+
+
+ ASSERT(tlabel_handle != NULL);
+ ASSERT(tlabel_info != NULL);
+ ASSERT(tlabel_info->tbi_tlabel <= TLABEL_MASK);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_free_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* figure out what node and tlabel we are using */
+ node_number = IEEE1394_NODE_NUM(tlabel_info->tbi_destination);
+ tlabel = tlabel_info->tbi_tlabel;
+
+ TNF_PROBE_2_DEBUG(hci1394_tlabel_free,
+ HCI1394_TNF_HAL_TLABEL, "", tnf_uint, nodeid, node_number,
+ tnf_uint, freed_tlabel, tlabel_info->tbi_tlabel);
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ /*
+ * Put the tlabel back in the free list and NULL out the (void *) in the
+ * lookup structure. You wouldn't expect to have to null out the lookup
+ * structure, but we know first hand that bad HW will send invalid
+ * tlabels which could really mess things up if you didn't :-)
+ */
+ tlabel_handle->tb_lookup[node_number][tlabel] = NULL;
+ tlabel_handle->tb_free[node_number] |= ((uint64_t)1 << tlabel);
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_free_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_register()
+ * Register an opaque command with an alloc()'d tlabel. Each nodeID has it's
+ * own tlabel list.
+ */
+void
+hci1394_tlabel_register(hci1394_tlabel_handle_t tlabel_handle,
+ hci1394_tlabel_info_t *tlabel_info, void *cmd)
+{
+ uint_t node_number;
+ uint_t tlabel;
+
+
+ ASSERT(tlabel_handle != NULL);
+ ASSERT(tlabel_info != NULL);
+ ASSERT(tlabel_info->tbi_tlabel <= TLABEL_MASK);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_register_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* figure out what node and tlabel we are using */
+ node_number = IEEE1394_NODE_NUM(tlabel_info->tbi_destination);
+ tlabel = tlabel_info->tbi_tlabel;
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ /* enter the (void *) into the lookup table */
+ tlabel_handle->tb_lookup[node_number][tlabel] = cmd;
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_register_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_lookup()
+ * returns (in cmd) the opaque command which was registered with the
+ * specified tlabel from alloc(). If a tlabel was not registered, cmd ='s
+ * NULL.
+ */
+void
+hci1394_tlabel_lookup(hci1394_tlabel_handle_t tlabel_handle,
+ hci1394_tlabel_info_t *tlabel_info, void **cmd)
+{
+ uint_t node_number;
+ uint_t tlabel;
+
+
+ ASSERT(tlabel_handle != NULL);
+ ASSERT(tlabel_info != NULL);
+ ASSERT(cmd != NULL);
+ ASSERT(tlabel_info->tbi_tlabel <= TLABEL_MASK);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_lookup_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* figure out what node and tlabel we are using */
+ node_number = IEEE1394_NODE_NUM(tlabel_info->tbi_destination);
+ tlabel = tlabel_info->tbi_tlabel;
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ /*
+ * fetch the (void *) from the lookup table. The case where the pointer
+ * equals NULL will be handled by the layer above.
+ */
+ *cmd = tlabel_handle->tb_lookup[node_number][tlabel];
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_2_DEBUG(hci1394_tlabel_lookup,
+ HCI1394_TNF_HAL_TLABEL, "", tnf_uint, nodeid, node_number,
+ tnf_uint, lookup_tlabel, tlabel_info->tbi_tlabel);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_lookup_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_bad()
+ * Register the specified tlabel as bad. tlabel_lookup() will no longer
+ * return a registered opaque command and this tlabel will not be returned
+ * from alloc() until > reclaim_time has passed. See set_reclaim_time() for
+ * more info.
+ */
+void
+hci1394_tlabel_bad(hci1394_tlabel_handle_t tlabel_handle,
+ hci1394_tlabel_info_t *tlabel_info)
+{
+ uint_t node_number;
+ uint_t tlabel;
+
+
+ ASSERT(tlabel_handle != NULL);
+ ASSERT(tlabel_info != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_bad_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* figure out what node and tlabel we are using */
+ node_number = IEEE1394_NODE_NUM(tlabel_info->tbi_destination);
+ tlabel = tlabel_info->tbi_tlabel & TLABEL_MASK;
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_2(hci1394_tlabel_timeout, HCI1394_TNF_HAL_ERROR, "", tnf_uint,
+ nodeid, node_number, tnf_uint, bad_tlabel, tlabel_info->tbi_tlabel);
+
+ /*
+ * Put the tlabel in the bad list and NULL out the (void *) in the
+ * lookup structure. We may see this tlabel shortly if the device is
+ * late in responding. We want to make sure to drop the message if we
+ * do. Set the bad timestamp to the current time plus the reclaim time.
+ * This is the "new" time when all of the bad tlabels for this node will
+ * be free'd.
+ */
+ tlabel_handle->tb_bad_timestamp[node_number] = gethrtime() +
+ tlabel_handle->tb_reclaim_time;
+ tlabel_handle->tb_bad[node_number] |= ((uint64_t)1 << tlabel);
+ tlabel_handle->tb_lookup[node_number][tlabel] = NULL;
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_bad_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_reset()
+ * resets the tlabel tracking structures to an initial state where no
+ * tlabels are outstanding and all tlabels are registered as good. This
+ * routine should be called every bus reset.
+ */
+void
+hci1394_tlabel_reset(hci1394_tlabel_handle_t tlabel_handle)
+{
+ int index;
+ int index2;
+
+
+ ASSERT(tlabel_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_reset_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_reset, HCI1394_TNF_HAL_TLABEL, "");
+
+ /* Bus reset optimization. handle broadcast writes separately */
+ if (tlabel_handle->tb_bcast_sent == B_TRUE) {
+ tlabel_handle->tb_free[IEEE1394_BROADCAST_NODEID] =
+ (uint64_t)0xFFFFFFFFFFFFFFFF;
+ tlabel_handle->tb_bad[IEEE1394_BROADCAST_NODEID] =
+ (uint64_t)0;
+ tlabel_handle->tb_bad_timestamp[IEEE1394_BROADCAST_NODEID] =
+ (hrtime_t)0;
+ tlabel_handle->tb_last[IEEE1394_BROADCAST_NODEID] = 0;
+ for (index2 = 0; index2 < TLABEL_RANGE; index2++) {
+ tlabel_handle->tb_lookup[IEEE1394_BROADCAST_NODEID
+ ][index2] = NULL;
+ }
+ }
+
+ /*
+ * Mark all tlabels as free. No bad tlabels. Start the first tlabel
+ * alloc at 0. Cleanout the lookup table. An optimization to only do
+ * this up to the max node we have seen on the bus has been added.
+ */
+ for (index = 0; index <= tlabel_handle->tb_max_node; index++) {
+ tlabel_handle->tb_free[index] = (uint64_t)0xFFFFFFFFFFFFFFFF;
+ tlabel_handle->tb_bad[index] = (uint64_t)0;
+ tlabel_handle->tb_bad_timestamp[index] = (hrtime_t)0;
+ tlabel_handle->tb_last[index] = 0;
+ for (index2 = 0; index2 < TLABEL_RANGE; index2++) {
+ tlabel_handle->tb_lookup[index][index2] = NULL;
+ }
+ }
+
+ tlabel_handle->tb_max_node = 0;
+ tlabel_handle->tb_bcast_sent = B_FALSE;
+
+ mutex_exit(&tlabel_handle->tb_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_reset_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlabel_set_reclaim_time()
+ * This function should be called if a change to the reclaim_time is
+ * required after the initial call to init(). It is not necessary to call
+ * this function if the reclaim time never changes.
+ *
+ * Currently, bad tlabels are reclaimed in tlabel_alloc().
+ * It looks like the following for a given node:
+ *
+ * if bad tlabels exist
+ * if ((current time + reclaim time) >= last bad tlabel time)
+ * free all bad tlabels.
+ */
+void
+hci1394_tlabel_set_reclaim_time(hci1394_tlabel_handle_t tlabel_handle,
+ hrtime_t reclaim_time_nS)
+{
+ ASSERT(tlabel_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_set_reclaim_time_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * We do not need to lock the tlabel structure in this because we are
+ * doing a single write to reclaim_time. If this changes in the future,
+ * we may need to add calls to lock() and unlock().
+ */
+ tlabel_handle->tb_reclaim_time = reclaim_time_nS;
+
+ TNF_PROBE_0_DEBUG(hci1394_tlabel_set_reclaim_time_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_tlist.c b/usr/src/uts/common/io/1394/adapters/hci1394_tlist.c
new file mode 100644
index 0000000000..e32ab382c0
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_tlist.c
@@ -0,0 +1,510 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_tlist.c
+ * This implements a timed double linked list.
+ * This list supports:
+ * - addition of node to the end of the list
+ * - atomic deletion of node anywhere in list
+ * - get and remove node from head of list
+ * - enable/disable of timeout feature
+ * - timeout feature, if enabled, will remove each node on the list which
+ * has been on the list for > timeout. The callback provided will be
+ * called for each node removed. The worst case time is around
+ * timer_resolution after the timeout has occurred (i.e. if you set the
+ * timer resolution to 50uS and the timeout to 100uS, you could get the
+ * callback anywhere from 100uS to 150uS from when you added the node to
+ * the list. This is a general statement and ignores things like
+ * interrupt latency, context switching, etc. So if you see a time
+ * around 155uS, don't complain :-)
+ * - The timer is only used when something is on the list
+ */
+
+#include <sys/kmem.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+static clock_t t1394_tlist_nsectohz(hrtime_t nS);
+static void hci1394_tlist_remove(hci1394_tlist_t *list,
+ hci1394_tlist_node_t *node);
+static void hci1394_tlist_callback(void *tlist_handle);
+
+
+/*
+ * hci1394_tlist_init()
+ * Initialize the tlist. The list will be protected by a mutex at the
+ * iblock_cookie passed in. init() returns a handle to be used for the rest
+ * of the functions. If you do not wish to use the timeout feature, set
+ * (hci1394_timer_t *) to null.
+ */
+void
+hci1394_tlist_init(hci1394_drvinfo_t *drvinfo, hci1394_tlist_timer_t *timer,
+ hci1394_tlist_handle_t *tlist_handle)
+{
+ hci1394_tlist_t *list;
+
+
+ ASSERT(tlist_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* try to alloc the space to keep track of the list */
+ list = kmem_alloc(sizeof (hci1394_tlist_t), KM_SLEEP);
+
+ /* setup the return parameter */
+ *tlist_handle = list;
+
+ /* initialize the list structure */
+ list->tl_drvinfo = drvinfo;
+ list->tl_state = HCI1394_TLIST_TIMEOUT_OFF;
+ list->tl_head = NULL;
+ list->tl_tail = NULL;
+ if (timer == NULL) {
+ list->tl_timer_enabled = B_FALSE;
+ } else {
+ ASSERT(timer->tlt_callback != NULL);
+ list->tl_timer_enabled = B_TRUE;
+ list->tl_timer_info = *timer;
+ }
+ mutex_init(&list->tl_mutex, NULL, MUTEX_DRIVER,
+ drvinfo->di_iblock_cookie);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_init_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_fini()
+ * Frees up the space allocated in init(). Notice that a pointer to the
+ * handle is used for the parameter. fini() will set your handle to NULL
+ * before returning. Make sure that any pending timeouts are canceled.
+ */
+void
+hci1394_tlist_fini(hci1394_tlist_handle_t *tlist_handle)
+{
+ hci1394_tlist_t *list;
+
+
+ ASSERT(tlist_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ list = (hci1394_tlist_t *)*tlist_handle;
+ hci1394_tlist_timeout_cancel(list);
+ mutex_destroy(&list->tl_mutex);
+ kmem_free(list, sizeof (hci1394_tlist_t));
+
+ /* set handle to null. This helps catch bugs. */
+ *tlist_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_add()
+ * Add the node to the tail of the linked list. The list is protected by a
+ * mutex at the iblock_cookie passed in during init.
+ */
+void
+hci1394_tlist_add(hci1394_tlist_handle_t tlist_handle,
+ hci1394_tlist_node_t *node)
+{
+ ASSERT(tlist_handle != NULL);
+ ASSERT(node != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_add_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&tlist_handle->tl_mutex);
+
+ /* add's always go at the end of the list */
+ node->tln_next = NULL;
+
+ /* Set state that this node is currently on the tlist */
+ node->tln_on_list = B_TRUE;
+
+ /* enter in the expire time (in uS) */
+ if (tlist_handle->tl_timer_enabled == B_TRUE) {
+ node->tln_expire_time = gethrtime() +
+ tlist_handle->tl_timer_info.tlt_timeout;
+ }
+
+ /* if there is nothing in the list */
+ if (tlist_handle->tl_tail == NULL) {
+ tlist_handle->tl_head = node;
+ tlist_handle->tl_tail = node;
+ node->tln_prev = NULL;
+
+ if ((tlist_handle->tl_timer_enabled == B_TRUE) &&
+ (tlist_handle->tl_state == HCI1394_TLIST_TIMEOUT_OFF)) {
+ /* turn the timer on */
+ tlist_handle->tl_timeout_id = timeout(
+ hci1394_tlist_callback, tlist_handle,
+ t1394_tlist_nsectohz(
+ tlist_handle->tl_timer_info.tlt_timer_resolution));
+ tlist_handle->tl_state = HCI1394_TLIST_TIMEOUT_ON;
+ }
+ } else {
+ /* put the node on the end of the list */
+ tlist_handle->tl_tail->tln_next = node;
+ node->tln_prev = tlist_handle->tl_tail;
+ tlist_handle->tl_tail = node;
+ /*
+ * if timeouts are enabled, we don't have to call
+ * timeout() because the timer is already on.
+ */
+ }
+
+ mutex_exit(&tlist_handle->tl_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_add_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_delete()
+ * Remove the node from the list. The node can be anywhere in the list. Make
+ * sure that the node is only removed once since different threads maybe
+ * trying to delete the same node at the same time.
+ */
+int
+hci1394_tlist_delete(hci1394_tlist_handle_t tlist_handle,
+ hci1394_tlist_node_t *node)
+{
+ ASSERT(tlist_handle != NULL);
+ ASSERT(node != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_delete_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&tlist_handle->tl_mutex);
+
+ /*
+ * check for race condition. Someone else may have already removed this
+ * node from the list. hci1394_tlist_delete() supports two threads
+ * trying to delete the node at the same time. The "losing" thread will
+ * have DDI_FAILURE returned.
+ */
+ if (node->tln_on_list == B_FALSE) {
+ mutex_exit(&tlist_handle->tl_mutex);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_delete_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hci1394_tlist_remove(tlist_handle, node);
+ mutex_exit(&tlist_handle->tl_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_delete_exit, HCI1394_TNF_HAL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_tlist_get()
+ * get the node at the head of the linked list. This function also removes
+ * the node from the list.
+ */
+void
+hci1394_tlist_get(hci1394_tlist_handle_t tlist_handle,
+ hci1394_tlist_node_t **node)
+{
+ ASSERT(tlist_handle != NULL);
+ ASSERT(node != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_get_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&tlist_handle->tl_mutex);
+
+ /* set the return parameter */
+ *node = tlist_handle->tl_head;
+
+ /* remove the node from the tlist */
+ if (*node != NULL) {
+ hci1394_tlist_remove(tlist_handle, *node);
+ }
+
+ mutex_exit(&tlist_handle->tl_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_get_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_peek()
+ * get the node at the head of the linked list. This function does not
+ * remove the node from the list.
+ */
+void
+hci1394_tlist_peek(hci1394_tlist_handle_t tlist_handle,
+ hci1394_tlist_node_t **node)
+{
+ ASSERT(tlist_handle != NULL);
+ ASSERT(node != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_peek_enter, HCI1394_TNF_HAL_STACK, "");
+
+ mutex_enter(&tlist_handle->tl_mutex);
+ *node = tlist_handle->tl_head;
+ mutex_exit(&tlist_handle->tl_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_peek_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_timeout_update()
+ * update the timeout to a different value. timeout is in uS. The update
+ * does not happen immediately. The new timeout will not take effect until
+ * the all of nodes currently present in the list are gone. It only makes
+ * sense to call this function when you have the timeout feature enabled.
+ */
+void
+hci1394_tlist_timeout_update(hci1394_tlist_handle_t tlist_handle,
+ hrtime_t timeout)
+{
+ ASSERT(tlist_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_update_timeout_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* set timeout to the new timeout */
+ tlist_handle->tl_timer_info.tlt_timeout = timeout;
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_update_timeout_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_timeout_cancel()
+ * cancel any scheduled timeouts. This should be called after the list is
+ * empty and there is no chance for any other nodes to be placed on the list.
+ * This function is meant to be called during a suspend or detach.
+ */
+void
+hci1394_tlist_timeout_cancel(hci1394_tlist_handle_t tlist_handle)
+{
+ ASSERT(tlist_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_timeout_cancel_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * Cancel the timeout. Do NOT use the tlist mutex here. It could cause a
+ * deadlock.
+ */
+ if (tlist_handle->tl_state == HCI1394_TLIST_TIMEOUT_ON) {
+ (void) untimeout(tlist_handle->tl_timeout_id);
+ tlist_handle->tl_state = HCI1394_TLIST_TIMEOUT_OFF;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_timeout_cancel_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_callback()
+ * The callback we use for the timeout() function. See if there are any nodes
+ * on the list which have timed out. If so, call the registered callback for
+ * each timed out node. We always start looking at the top of the list since
+ * the list is time sorted (oldest at the top).
+ */
+static void
+hci1394_tlist_callback(void *tlist_handle)
+{
+ hci1394_tlist_t *list;
+ hci1394_tlist_node_t *node;
+ hrtime_t current_time;
+
+
+ ASSERT(tlist_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_tlist_callback_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ list = (hci1394_tlist_t *)tlist_handle;
+
+ mutex_enter(&list->tl_mutex);
+
+ /*
+ * if there is something on the list, check to see if the oldest has
+ * expired. If there is nothing on the list, there is no reason to
+ * renew the timeout.
+ */
+ node = list->tl_head;
+ current_time = gethrtime();
+ while (node != NULL) {
+ /*
+ * if current time is greater than the time the command expires,
+ * AND, the expire time has not rolled over, then the command
+ * has timed out.
+ */
+ if (((uint64_t)current_time >=
+ (uint64_t)node->tln_expire_time) &&
+ (((uint64_t)node->tln_expire_time -
+ (uint64_t)list->tl_timer_info.tlt_timeout) <
+ (uint64_t)node->tln_expire_time)) {
+ /* remove the node from the tlist */
+ hci1394_tlist_remove(list, node);
+
+ /*
+ * Call the timeout callback. We unlock the the mutex
+ * around the callback so that other transactions will
+ * not be blocked while the callback is running. This
+ * is OK to do here because we have already removed this
+ * entry from our list. This code should not reference
+ * "node" again after the callback! After the callback
+ * returns, we need to resync node to the head of the
+ * list since we released/acquired the list mutex around
+ * the callback.
+ */
+ mutex_exit(&list->tl_mutex);
+ list->tl_timer_info.tlt_callback(node,
+ list->tl_timer_info.tlt_callback_arg);
+ mutex_enter(&list->tl_mutex);
+ node = list->tl_head;
+
+ /*
+ * else, if current time is greater than the time the command
+ * expires, AND, current_time is not about to rollover. (this
+ * works since it is in the else and we periodically sample
+ * well below the rollover time)
+ */
+ } else if ((uint64_t)(current_time >=
+ (uint64_t)node->tln_expire_time) &&
+ (((uint64_t)current_time +
+ (uint64_t)list->tl_timer_info.tlt_timeout) >
+ (uint64_t)current_time)) {
+ /* remove the node from the tlist */
+ hci1394_tlist_remove(list, node);
+
+ /*
+ * Call the timeout callback. We unlock the the mutex
+ * around the callback so that other transactions will
+ * not be blocked while the callback is running. This
+ * is OK to do here because we have already removed this
+ * entry from our list. This code should not reference
+ * "node" again after the callback! After the callback
+ * returns, we need to resync node to the head of the
+ * list since we released/acquired the list mutex around
+ * the callback.
+ */
+ mutex_exit(&list->tl_mutex);
+ list->tl_timer_info.tlt_callback(node,
+ list->tl_timer_info.tlt_callback_arg);
+ mutex_enter(&list->tl_mutex);
+ node = list->tl_head;
+
+ } else {
+ /*
+ * this command has not timed out.
+ * Since this list is time sorted, we are
+ * done looking for nodes that have expired
+ */
+ break;
+ }
+ }
+
+ /*
+ * if there are nodes still on the pending list, kick
+ * off the timer again.
+ */
+ if (node != NULL) {
+ list->tl_timeout_id = timeout(hci1394_tlist_callback, list,
+ t1394_tlist_nsectohz(
+ list->tl_timer_info.tlt_timer_resolution));
+ list->tl_state = HCI1394_TLIST_TIMEOUT_ON;
+ } else {
+ list->tl_state = HCI1394_TLIST_TIMEOUT_OFF;
+ }
+
+ mutex_exit(&list->tl_mutex);
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_callback_exit,
+ HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_tlist_remove()
+ * This is an internal function which removes the given node from the list.
+ * The list MUST be locked before calling this function.
+ */
+static void
+hci1394_tlist_remove(hci1394_tlist_t *list, hci1394_tlist_node_t *node)
+{
+ ASSERT(list != NULL);
+ ASSERT(node != NULL);
+ ASSERT(node->tln_on_list == B_TRUE);
+ ASSERT(MUTEX_HELD(&list->tl_mutex));
+ TNF_PROBE_0_DEBUG(hci1394_tlist_remove_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ /* if this is the only node on the list */
+ if ((list->tl_head == node) &&
+ (list->tl_tail == node)) {
+ list->tl_head = NULL;
+ list->tl_tail = NULL;
+
+ /* if the node is at the head of the list */
+ } else if (list->tl_head == node) {
+ list->tl_head = node->tln_next;
+ node->tln_next->tln_prev = NULL;
+
+ /* if the node is at the tail of the list */
+ } else if (list->tl_tail == node) {
+ list->tl_tail = node->tln_prev;
+ node->tln_prev->tln_next = NULL;
+
+ /* if the node is in the middle of the list */
+ } else {
+ node->tln_prev->tln_next = node->tln_next;
+ node->tln_next->tln_prev = node->tln_prev;
+ }
+
+ /* Set state that this node has been removed from the list */
+ node->tln_on_list = B_FALSE;
+
+ /* cleanup the node's link pointers */
+ node->tln_prev = NULL;
+ node->tln_next = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_tlist_remove_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * t1394_tlist_nsectohz()
+ * Convert nS to hz. This allows us to call timeout() but keep our time
+ * reference in nS.
+ */
+#define HCI1394_TLIST_nS_TO_uS(nS) ((clock_t)(nS / 1000))
+static clock_t t1394_tlist_nsectohz(hrtime_t nS)
+{
+ return (drv_usectohz(HCI1394_TLIST_nS_TO_uS(nS)));
+}
diff --git a/usr/src/uts/common/io/1394/adapters/hci1394_vendor.c b/usr/src/uts/common/io/1394/adapters/hci1394_vendor.c
new file mode 100644
index 0000000000..cba68f5629
--- /dev/null
+++ b/usr/src/uts/common/io/1394/adapters/hci1394_vendor.c
@@ -0,0 +1,451 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * hci1394_vendor.c
+ * These routines provide initialization, cleanup, and general access to
+ * vendor specific features on the OpenHCI adapter.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+
+#include <sys/1394/adapters/hci1394.h>
+
+
+/*
+ * Macro which makes sure vendor register offset is not greater that 0x7FC and
+ * that it is quadlet aligned.
+ */
+#define VENDOR_ALIGN_ADDR(addr) (addr & 0x7FC)
+
+
+/*
+ * Patchable variable to have the driver set the GUID on a Sun RIO chip.
+ * Normally this will be done by the firmware, but for PPX cards and OBP images
+ * without 1394 support, we need to fo this. This is only used for RIO. Other
+ * vendor cards are not effected.
+ * 0 - don't set GUID (default)
+ * non zero - set GUID on RIO
+ */
+int hci1394_set_rio_guid = 0;
+
+
+static int hci1394_rio_init(hci1394_vendor_t *vendor);
+static void hci1394_rio_guid_init(hci1394_vendor_t *vendor);
+static int hci1394_rio_resume(hci1394_vendor_t *vendor);
+
+
+/*
+ * hci1394_vendor_init()
+ * Initialize the Vendor Specific portions of the OpenHCI chipset. This is
+ * not required according to the OpenHCI spec, but may be needed for
+ * performance optimizations, etc. dip, accattrp, and vendor_info are inputs.
+ * num_reg_sets and vendor_handle are outputs. num_reg_sets is the number of
+ * registers sets (or mappings) that are present for this device. This will
+ * usually be 0 or 1. vendor_handle is an opaque handle used in rest of
+ * vendor routines.
+ */
+int
+hci1394_vendor_init(hci1394_drvinfo_t *drvinfo, hci1394_ohci_handle_t ohci,
+ hci1394_vendor_info_t *vendor_info, hci1394_vendor_handle_t *vendor_handle)
+{
+ int status;
+ hci1394_vendor_t *vendor;
+
+
+ ASSERT(drvinfo != NULL);
+ ASSERT(vendor_info != NULL);
+ ASSERT(vendor_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_vendor_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /*
+ * alloc the space to keep track of the vendor registers.
+ */
+ vendor = kmem_alloc(sizeof (hci1394_vendor_t), KM_SLEEP);
+ vendor->ve_info = *vendor_info;
+ vendor->ve_drvinfo = drvinfo;
+ vendor->ve_ohci = ohci;
+
+ /* setup the vendor_handle return parameter */
+ *vendor_handle = vendor;
+
+ /* call vendor specific initialization routine */
+ switch (vendor_info->vendor_id) {
+
+ /* Sun Microsystems 1394 Device */
+ case VENDOR_VID_SUN_MICROSYSTEMS:
+ switch (vendor_info->device_id) {
+
+ /* RIO base chip. Call the RIO specific init routine */
+ case VENDOR_DID_RIO_1394:
+ status = hci1394_rio_init(vendor);
+ if (status != DDI_SUCCESS) {
+ kmem_free(vendor,
+ sizeof (hci1394_vendor_t));
+ *vendor_handle = NULL;
+ TNF_PROBE_1(hci1394_vendor_init_rio_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string,
+ errmsg, "hci1394_rio_init() failed");
+ TNF_PROBE_0_DEBUG(hci1394_vendor_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+ /* VENDOR_DID_RIO_1394 */
+
+ /* unrecognized device - don't map any registers */
+ default:
+ vendor->ve_reg_count = 0;
+ break;
+ }
+ break;
+ /* VENDOR_VID_SUN_MICROSYSTEMS */
+
+ /* unrecognized vendor - don't map any registers */
+ default:
+ vendor->ve_reg_count = 0;
+ break;
+ }
+
+ vendor_info->vendor_reg_count = vendor->ve_reg_count;
+
+ TNF_PROBE_0_DEBUG(hci1394_vendor_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_vendor_fini()
+ * Cleanup after Vendor Specific init. This includes freeing any allocated
+ * kernel memory and freeing any mapped registers.
+ *
+ * NOTE: This routine must be called after a successful vendor_init even if the
+ * num_reg_sets = 0 during init. This routine is normally called during
+ * the detach process.
+ *
+ * NOTE: A pointer to the handle is used for the parameter. fini() will set
+ * your handle to NULL before returning.
+ */
+void
+hci1394_vendor_fini(hci1394_vendor_handle_t *vendor_handle)
+{
+ uint_t index;
+
+
+ ASSERT(vendor_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_vendor_fini_enter, HCI1394_TNF_HAL_STACK, "");
+
+ for (index = 0; index < (*vendor_handle)->ve_reg_count; index++) {
+ ddi_regs_map_free(&(*vendor_handle)->
+ ve_reg_array[index]->vr_reg_handle);
+ }
+ kmem_free(*vendor_handle, sizeof (hci1394_vendor_t));
+
+ /* Set the vendor_handle to NULL to help catch bugs */
+ *vendor_handle = NULL;
+
+ TNF_PROBE_0_DEBUG(hci1394_vendor_fini_exit, HCI1394_TNF_HAL_STACK, "");
+}
+
+
+/*
+ * hci1394_vendor_resume()
+ * Vendor Specific init for a power resume (DDI_RESUME). This includes
+ * re-setting up any vendor specific registers.
+ */
+int
+hci1394_vendor_resume(hci1394_vendor_handle_t vendor_handle)
+{
+ int status;
+ hci1394_vendor_info_t *vendor_info;
+
+
+ ASSERT(vendor_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_vendor_resume_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ vendor_info = &vendor_handle->ve_info;
+
+ /* call vendor specific initialization routine */
+ switch (vendor_info->vendor_id) {
+
+ /* Sun Microsystems 1394 Device */
+ case VENDOR_VID_SUN_MICROSYSTEMS:
+ switch (vendor_info->device_id) {
+
+ /* RIO base chip. Call the RIO specific resume routine */
+ case VENDOR_DID_RIO_1394:
+ status = hci1394_rio_resume(vendor_handle);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_1(hci1394_vendor_resume_rio_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string,
+ errmsg, "hci1394_rio_resume() failed");
+ TNF_PROBE_0_DEBUG(hci1394_vendor_resume_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+ /* VENDOR_DID_RIO_1394 */
+
+ /* unrecognized device - don't map any registers */
+ default:
+ break;
+ }
+ break;
+ /* VENDOR_VID_SUN_MICROSYSTEMS */
+
+ /* unrecognized vendor - don't map any registers */
+ default:
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_vendor_resume_exit, HCI1394_TNF_HAL_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_vendor_reg_write()
+ * Write vendor specific register. reg_set is the register set to write. The
+ * first register set would be reg_set = 0, the second reg_set = 1, etc.
+ * offset is the offset into the vendor specific register space. An offset of
+ * 0 would be the first vendor register for that register set. data is the
+ * data to write to the vendor register.
+ */
+int
+hci1394_vendor_reg_write(hci1394_vendor_handle_t vendor_handle,
+ uint_t reg_set, uint_t offset, uint32_t data)
+{
+ hci1394_vendor_reg_t *venreg;
+ uint32_t *regaddr;
+
+
+ ASSERT(vendor_handle != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_write_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ if (vendor_handle->ve_reg_count < (reg_set + 1)) {
+ TNF_PROBE_1(hci1394_vendor_reg_write_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "reg_set not present");
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ venreg = vendor_handle->ve_reg_array[reg_set];
+ regaddr = (uint32_t *)((uintptr_t)venreg->vr_reg_addr +
+ (uintptr_t)VENDOR_ALIGN_ADDR(offset));
+
+ ddi_put32(venreg->vr_reg_handle, regaddr, data);
+
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_write_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_vendor_reg_read()
+ * Read vendor specific register. reg_set is the register set to write. The
+ * first register set would be reg_set = 0, the second reg_set = 1, etc.
+ * offset is the offset into the vendor specific register space. An offset
+ * of 0 would be the first vendor register for that register set. data is
+ * the address to put the data read.
+ */
+int
+hci1394_vendor_reg_read(hci1394_vendor_handle_t vendor_handle, uint_t reg_set,
+ uint_t offset, uint32_t *data)
+{
+ hci1394_vendor_reg_t *venreg;
+ uint32_t *regaddr;
+
+
+ ASSERT(vendor_handle != NULL);
+ ASSERT(data != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_read_enter,
+ HCI1394_TNF_HAL_STACK, "");
+
+ if (vendor_handle->ve_reg_count < (reg_set + 1)) {
+ TNF_PROBE_1(hci1394_vendor_reg_read_fail,
+ HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
+ "reg_set not present");
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ venreg = vendor_handle->ve_reg_array[reg_set];
+ regaddr = (uint32_t *)((uintptr_t)venreg->vr_reg_addr +
+ (uintptr_t)VENDOR_ALIGN_ADDR(offset));
+
+ *data = ddi_get32(venreg->vr_reg_handle, regaddr);
+
+ TNF_PROBE_0_DEBUG(hci1394_vendor_reg_read_exit,
+ HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * hci1394_rio_init()
+ * Initialize SUNW RIO vendor specific registers.
+ */
+static int
+hci1394_rio_init(hci1394_vendor_t *vendor)
+{
+ int status;
+
+
+ ASSERT(vendor != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ vendor->ve_reg_count = 1;
+ vendor->ve_reg_array[0] = kmem_alloc(sizeof (hci1394_vendor_reg_t),
+ KM_SLEEP);
+
+ status = ddi_regs_map_setup(vendor->ve_drvinfo->di_dip, RIOREG_REG_BASE,
+ &vendor->ve_reg_array[0]->vr_reg_addr, RIOREG_OFFSET, RIOREG_LENGTH,
+ &vendor->ve_drvinfo->di_reg_attr,
+ &vendor->ve_reg_array[0]->vr_reg_handle);
+ if (status != DDI_SUCCESS) {
+ vendor->ve_reg_count = 0;
+ kmem_free(vendor->ve_reg_array[0],
+ sizeof (hci1394_vendor_reg_t));
+ TNF_PROBE_0(hci1394_rio_init_rms_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Setup RIO Host Control Register */
+ status = hci1394_vendor_reg_write(vendor, 0, RIOREG_HOST_CONTROL,
+ RIOREG_HOST_CONTROL_SETTING);
+ if (status != DDI_SUCCESS) {
+ ddi_regs_map_free(&vendor->ve_reg_array[0]->vr_reg_handle);
+ vendor->ve_reg_count = 0;
+ kmem_free(vendor->ve_reg_array[0],
+ sizeof (hci1394_vendor_reg_t));
+ vendor->ve_reg_array[0] = NULL;
+ TNF_PROBE_0(hci1394_rio_init_vrw_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_exit,
+ HCI1394_TNF_HAL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Setup GUID on RIO without firmware support */
+ hci1394_rio_guid_init(vendor);
+
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_rio_resume()
+ * Re-initialize RIO. This routine should be called during a resume.
+ */
+static int
+hci1394_rio_resume(hci1394_vendor_t *vendor)
+{
+ int status;
+
+
+ ASSERT(vendor != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_enter, HCI1394_TNF_HAL_STACK, "");
+
+ /* Setup RIO Host Control Register */
+ status = hci1394_vendor_reg_write(vendor, 0, RIOREG_HOST_CONTROL,
+ RIOREG_HOST_CONTROL_SETTING);
+ if (status != DDI_SUCCESS) {
+ TNF_PROBE_0(hci1394_rio_resume_vrw_fail, HCI1394_TNF_HAL_ERROR,
+ "");
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Setup GUID on RIO PPX */
+ hci1394_rio_guid_init(vendor);
+
+ TNF_PROBE_0_DEBUG(hci1394_rio_init_exit, HCI1394_TNF_HAL_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * hci1394_rio_guid_init()
+ * Setup a GUID in the RIO. Normally firmware would do this for the
+ * motherboard version. This will not hurt a RIO on the motherboard since we
+ * won't be able to write the GUID. We should not get to this code anyway in
+ * production systems. Use a timestamp for the lower 40 bits of the GUID.
+ */
+static void
+hci1394_rio_guid_init(hci1394_vendor_t *vendor)
+{
+ hrtime_t guid_timestamp;
+
+ ASSERT(vendor != NULL);
+ TNF_PROBE_0_DEBUG(hci1394_rio_guid_init_enter, HCI1394_TNF_HAL_STACK,
+ "");
+
+ if (hci1394_set_rio_guid != 0) {
+ guid_timestamp = gethrtime();
+
+ /* mask out the vendor field of the GUID */
+ guid_timestamp = guid_timestamp & RIOREG_GUID_MASK;
+
+ /* fill in Sun Microsystems */
+ guid_timestamp = guid_timestamp | RIOREG_GUID_SUN_MICROSYSTEMS;
+
+ /* write this to the GUID registers */
+ ddi_put32(vendor->ve_ohci->ohci_reg_handle,
+ &vendor->ve_ohci->ohci_regs->guid_hi,
+ (uint32_t)(guid_timestamp >> 32));
+ ddi_put32(vendor->ve_ohci->ohci_reg_handle,
+ &vendor->ve_ohci->ohci_regs->guid_lo,
+ (uint32_t)(guid_timestamp & 0xFFFFFFFF));
+ }
+
+ TNF_PROBE_0_DEBUG(hci1394_rio_guid_init_exit, HCI1394_TNF_HAL_STACK,
+ "");
+}
diff --git a/usr/src/uts/common/io/1394/h1394.c b/usr/src/uts/common/io/1394/h1394.c
new file mode 100644
index 0000000000..09b888ee45
--- /dev/null
+++ b/usr/src/uts/common/io/1394/h1394.c
@@ -0,0 +1,1791 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * h1394.c
+ * 1394 Services Layer HAL Interface
+ * Contains all of the routines that define the HAL to Services Layer
+ * interface
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/sunndi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/thread.h>
+#include <sys/proc.h>
+#include <sys/disp.h>
+#include <sys/time.h>
+#include <sys/devctl.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+
+extern struct bus_ops nx1394_busops;
+extern int nx1394_define_events(s1394_hal_t *hal);
+extern void nx1394_undefine_events(s1394_hal_t *hal);
+extern int s1394_ignore_invalid_gap_cnt;
+
+/*
+ * Function: h1394_init()
+ * Input(s): modlp The structure containing all of the
+ * HAL's relevant information
+ *
+ * Output(s):
+ *
+ * Description: h1394_init() is called by the HAL's _init function and is
+ * used to set up the nexus bus ops.
+ */
+int
+h1394_init(struct modlinkage *modlp)
+{
+ struct dev_ops *devops;
+
+ TNF_PROBE_0_DEBUG(h1394_init_enter, S1394_TNF_SL_STACK, "");
+
+ devops = ((struct modldrv *)(modlp->ml_linkage[0]))->drv_dev_ops;
+ devops->devo_bus_ops = &nx1394_busops;
+
+ TNF_PROBE_0_DEBUG(h1394_init_exit, S1394_TNF_SL_STACK, "");
+ return (0);
+}
+
+/*
+ * Function: h1394_fini()
+ * Input(s): modlp The structure containing all of the
+ * HAL's relevant information
+ *
+ * Output(s):
+ *
+ * Description: h1394_fini() is called by the HAL's _fini function and is
+ * used to NULL out the nexus bus ops.
+ */
+void
+h1394_fini(struct modlinkage *modlp)
+{
+ struct dev_ops *devops;
+
+ TNF_PROBE_0_DEBUG(h1394_fini_enter, S1394_TNF_SL_STACK, "");
+
+ devops = ((struct modldrv *)(modlp->ml_linkage[0]))->drv_dev_ops;
+ devops->devo_bus_ops = NULL;
+
+ TNF_PROBE_0_DEBUG(h1394_fini_enter, S1394_TNF_SL_STACK, "");
+}
+
+/*
+ * Function: h1394_attach()
+ * Input(s): halinfo The structure containing all of the
+ * HAL's relevant information
+ * cmd The ddi_attach_cmd_t that tells us
+ * if this is a RESUME or a regular
+ * attach() call
+ *
+ * Output(s): sl_private The HAL "handle" to be used for
+ * all subsequent calls into the
+ * 1394 Software Framework
+ *
+ * Description: h1394_attach() registers the HAL with the 1394 Software
+ * Framework. It returns a HAL "handle" to be used for
+ * all subsequent calls into the 1394 Software Framework.
+ */
+int
+h1394_attach(h1394_halinfo_t *halinfo, ddi_attach_cmd_t cmd, void **sl_private)
+{
+ s1394_hal_t *hal;
+ int ret;
+ char buf[32];
+ uint_t cmd_size;
+
+ TNF_PROBE_0_DEBUG(h1394_attach_enter, S1394_TNF_SL_STACK, "");
+
+ ASSERT(sl_private != NULL);
+
+ /* If this is a DDI_RESUME, return success */
+ if (cmd == DDI_RESUME) {
+ hal = (s1394_hal_t *)(*sl_private);
+ /* If we have a 1394A PHY, then reset the "contender bit" */
+ if (hal->halinfo.phy == H1394_PHY_1394A)
+ (void) HAL_CALL(hal).set_contender_bit(
+ hal->halinfo.hal_private);
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_SUCCESS);
+ } else if (cmd != DDI_ATTACH) {
+ TNF_PROBE_2(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Invalid ddi_attach_cmd received",
+ tnf_uint, attach_cmd, (uint_t)cmd);
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Allocate space for s1394_hal_t */
+ hal = kmem_zalloc(sizeof (s1394_hal_t), KM_SLEEP);
+
+ /* Setup HAL state */
+ hal->hal_state = S1394_HAL_INIT;
+
+ /* Copy in the halinfo struct */
+ hal->halinfo = *halinfo;
+
+ /* Create the topology tree mutex */
+ mutex_init(&hal->topology_tree_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Create the Cycle Mater timer mutex */
+ mutex_init(&hal->cm_timer_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the Isoch CEC list */
+ hal->isoch_cec_list_head = NULL;
+ hal->isoch_cec_list_tail = NULL;
+ mutex_init(&hal->isoch_cec_list_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the Bus Manager node ID mutex and cv */
+ mutex_init(&hal->bus_mgr_node_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ cv_init(&hal->bus_mgr_node_cv, NULL, CV_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the Bus Manager node ID - "-1" means undetermined */
+ hal->bus_mgr_node = -1;
+ hal->incumbent_bus_mgr = B_FALSE;
+
+ /* Initialize the Target list */
+ hal->target_head = NULL;
+ hal->target_tail = NULL;
+ rw_init(&hal->target_list_rwlock, NULL, RW_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Setup Request Q's */
+ hal->outstanding_q_head = NULL;
+ hal->outstanding_q_tail = NULL;
+ mutex_init(&hal->outstanding_q_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ hal->pending_q_head = NULL;
+ hal->pending_q_tail = NULL;
+ mutex_init(&hal->pending_q_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Create the kmem_cache for command allocations */
+ (void) sprintf(buf, "hal%d_cache", ddi_get_instance(hal->halinfo.dip));
+ cmd_size = sizeof (cmd1394_cmd_t) + sizeof (s1394_cmd_priv_t) +
+ hal->halinfo.hal_overhead;
+
+ hal->hal_kmem_cachep = kmem_cache_create(buf, cmd_size, 8, NULL, NULL,
+ NULL, NULL, NULL, 0);
+
+ /* Setup the event stuff */
+ ret = nx1394_define_events(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL0);
+
+ TNF_PROBE_1(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Unable to define attach events");
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the mutexes and cv's used by the bus reset thread */
+ mutex_init(&hal->br_thread_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ cv_init(&hal->br_thread_cv, NULL, CV_DRIVER, hal->halinfo.hw_interrupt);
+ mutex_init(&hal->br_cmplq_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ cv_init(&hal->br_cmplq_cv, NULL, CV_DRIVER, hal->halinfo.hw_interrupt);
+
+ /*
+ * Create a bus reset thread to handle the device discovery.
+ * It should take the default stack sizes, it should run
+ * the s1394_br_thread() routine at the start, passing the
+ * HAL pointer as its argument. The thread should be put
+ * on processor p0, its state should be set to runnable,
+ * but not yet on a processor, and its scheduling priority
+ * should be the minimum level of any system class.
+ */
+ hal->br_thread = thread_create((caddr_t)NULL, 0, s1394_br_thread,
+ hal, 0, &p0, TS_RUN, minclsyspri);
+
+ /* Until we see a bus reset this HAL has no nodes */
+ hal->number_of_nodes = 0;
+ hal->num_bus_reset_till_fail = NUM_BR_FAIL;
+
+ /* Initialize the SelfID Info */
+ hal->current_buffer = 0;
+ hal->selfid_buf0 = kmem_zalloc(S1394_SELFID_BUF_SIZE, KM_SLEEP);
+ hal->selfid_buf1 = kmem_zalloc(S1394_SELFID_BUF_SIZE, KM_SLEEP);
+
+ /* Initialize kstat structures */
+ ret = s1394_kstat_init(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL3);
+
+ TNF_PROBE_1(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Failure in s1394_kstat_init");
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+ hal->hal_kstats->guid = hal->halinfo.guid;
+
+ /* Setup the node tree pointers */
+ hal->old_tree = &hal->last_valid_tree[0];
+ hal->topology_tree = &hal->current_tree[0];
+
+ /* Initialize the local Config ROM entry */
+ ret = s1394_init_local_config_rom(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL4);
+
+ TNF_PROBE_1(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Failure in s1394_init_local_config_rom");
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize 1394 Address Space */
+ ret = s1394_init_addr_space(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL5);
+
+ TNF_PROBE_1(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Invalid 1394 address space");
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize FCP subsystem */
+ ret = s1394_fcp_hal_init(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL6);
+
+ TNF_PROBE_1(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "FCP initialization failure");
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize the IRM node ID - "-1" means invalid, undetermined */
+ hal->IRM_node = -1;
+
+ /* If we have a 1394A PHY, then set the "contender bit" */
+ if (hal->halinfo.phy == H1394_PHY_1394A)
+ (void) HAL_CALL(hal).set_contender_bit(
+ hal->halinfo.hal_private);
+
+ /* Add into linked list */
+ mutex_enter(&s1394_statep->hal_list_mutex);
+ if ((s1394_statep->hal_head == NULL) &&
+ (s1394_statep->hal_tail == NULL)) {
+ s1394_statep->hal_head = hal;
+ s1394_statep->hal_tail = hal;
+ } else {
+ s1394_statep->hal_tail->hal_next = hal;
+ hal->hal_prev = s1394_statep->hal_tail;
+ s1394_statep->hal_tail = hal;
+ }
+ mutex_exit(&s1394_statep->hal_list_mutex);
+
+ /* Fill in services layer private info */
+ *sl_private = (void *)hal;
+
+ TNF_PROBE_0_DEBUG(h1394_attach_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: h1394_detach()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * cmd The ddi_detach_cmd_t that tells us
+ * if this is a SUSPEND or a regular
+ * detach() call
+ *
+ * Output(s): DDI_SUCCESS HAL successfully detached
+ * DDI_FAILURE HAL failed to detach
+ *
+ * Description: h1394_detach() unregisters the HAL from the 1394 Software
+ * Framework. It can be called during a SUSPEND operation or
+ * for a real detach() event.
+ */
+int
+h1394_detach(void **sl_private, ddi_detach_cmd_t cmd)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(h1394_detach_enter, S1394_TNF_SL_STACK, "");
+
+ hal = (s1394_hal_t *)(*sl_private);
+
+ switch (cmd) {
+ case DDI_DETACH:
+ /* Clean up before leaving */
+ s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL7);
+ /* NULL out the HAL "handle" */
+ *sl_private = NULL;
+ break;
+
+ case DDI_SUSPEND:
+ /* Turn off any timers that might be set */
+ s1394_destroy_timers(hal);
+ /* Set the hal_was_suspended bit */
+ hal->hal_was_suspended = B_TRUE;
+ break;
+
+ default:
+ TNF_PROBE_2(h1394_attach_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Invalid ddi_detach_cmd_t type specified",
+ tnf_uint, detach_cmd, (uint_t)cmd);
+ TNF_PROBE_0_DEBUG(h1394_detach_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_detach_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: h1394_alloc_cmd()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * flags The flags parameter is described below
+ *
+ * Output(s): cmdp Pointer to the newly allocated command
+ * hal_priv_ptr Offset into the command, points to
+ * the HAL's private area
+ *
+ * Description: h1394_alloc_cmd() allocates a command for use with the
+ * h1394_read_request(), h1394_write_request(), or
+ * h1394_lock_request() interfaces of the 1394 Software Framework.
+ * By default, h1394_alloc_cmd() may sleep while allocating
+ * memory for the command structure. If this is undesirable,
+ * the HAL may set the H1394_ALLOC_CMD_NOSLEEP bit in the flags
+ * parameter.
+ */
+int
+h1394_alloc_cmd(void *sl_private, uint_t flags, cmd1394_cmd_t **cmdp,
+ h1394_cmd_priv_t **hal_priv_ptr)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+
+ TNF_PROBE_0_DEBUG(h1394_alloc_cmd_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ if (s1394_alloc_cmd(hal, flags, cmdp) != DDI_SUCCESS) {
+ TNF_PROBE_1(h1394_alloc_cmd_error, S1394_TNF_SL_ARREQ_ERROR, "",
+ tnf_string, msg, "Failed to allocate command structure");
+ TNF_PROBE_0_DEBUG(h1394_alloc_cmd_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ *hal_priv_ptr = &s_priv->hal_cmd_private;
+
+ TNF_PROBE_0_DEBUG(h1394_alloc_cmd_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: h1394_free_cmd()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * cmdp Pointer to the command to be freed
+ *
+ * Output(s): DDI_SUCCESS HAL successfully freed command
+ * DDI_FAILURE HAL failed to free command
+ *
+ * Description: h1394_free_cmd() attempts to free a command that has previously
+ * been allocated by the HAL. It is possible for h1394_free_cmd()
+ * to fail because the command is currently in-use by the 1394
+ * Software Framework.
+ */
+int
+h1394_free_cmd(void *sl_private, cmd1394_cmd_t **cmdp)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+
+ TNF_PROBE_0_DEBUG(h1394_free_cmd_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ /* Check that command isn't in use */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ TNF_PROBE_1(h1394_free_cmd_error, S1394_TNF_SL_ARREQ_ERROR, "",
+ tnf_string, msg, "Attempted to free an in-use command");
+ TNF_PROBE_0_DEBUG(h1394_free_cmd_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+
+ kmem_cache_free(hal->hal_kmem_cachep, *cmdp);
+
+ /* Command pointer is set to NULL before returning */
+ *cmdp = NULL;
+
+ /* kstats - number of cmds freed */
+ hal->hal_kstats->cmd_free++;
+
+ TNF_PROBE_0_DEBUG(h1394_free_cmd_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: h1394_cmd_is_complete()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * command_id Pointer to the command that has
+ * just completed
+ * cmd_type AT_RESP => AT response or ATREQ =
+ * AT request
+ * status Command's completion status
+ *
+ * Output(s): None
+ *
+ * Description: h1394_cmd_is_complete() is called by the HAL whenever an
+ * outstanding command has completed (successfully or otherwise).
+ * After determining whether it was an AT request or and AT
+ * response that we are handling, the command is dispatched to
+ * the appropriate handler in the 1394 Software Framework.
+ */
+void
+h1394_cmd_is_complete(void *sl_private, cmd1394_cmd_t *command_id,
+ uint32_t cmd_type, int status)
+{
+ s1394_hal_t *hal;
+ dev_info_t *dip;
+
+ TNF_PROBE_0_DEBUG(h1394_cmd_is_complete_enter,
+ S1394_TNF_SL_ATREQ_ATRESP_STACK, "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Is it AT_RESP or AT_REQ? */
+ switch (cmd_type) {
+ case H1394_AT_REQ:
+ s1394_atreq_cmd_complete(hal, command_id, status);
+ break;
+
+ case H1394_AT_RESP:
+ s1394_atresp_cmd_complete(hal, command_id, status);
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_cmd_is_complete_error,
+ S1394_TNF_SL_ATREQ_ATRESP_ERROR, "",
+ tnf_string, msg, "Invalid command type specified");
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_cmd_is_complete_exit,
+ S1394_TNF_SL_ATREQ_ATRESP_STACK, "");
+}
+
+/*
+ * Function: h1394_bus_reset()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ *
+ * Output(s): selfid_buf_addr The pointer to a buffer into which
+ * any Self ID packets should be put
+ *
+ * Description: h1394_bus_reset() is called whenever a 1394 bus reset event
+ * is detected by the HAL. This routine simply prepares for
+ * the subsequent Self ID packets.
+ */
+void
+h1394_bus_reset(void *sl_private, void **selfid_buf_addr)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(h1394_bus_reset_enter, S1394_TNF_SL_BR_STACK, "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ mutex_enter(&hal->topology_tree_mutex);
+
+ /* Update the HAL's state */
+ if (hal->hal_state != S1394_HAL_SHUTDOWN) {
+ hal->hal_state = S1394_HAL_RESET;
+ } else {
+ mutex_exit(&hal->topology_tree_mutex);
+ return;
+ }
+
+ if (hal->initiated_bus_reset == B_TRUE) {
+ hal->initiated_bus_reset = B_FALSE;
+ if (hal->num_bus_reset_till_fail > 0) {
+ hal->num_bus_reset_till_fail--;
+ } else {
+ TNF_PROBE_2(h1394_bus_reset_error,
+ S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "Bus reset fail (too many resets)",
+ tnf_uint, br_type, hal->initiated_br_reason);
+ }
+ } else {
+ hal->num_bus_reset_till_fail = NUM_BR_FAIL;
+ }
+
+ /* Reset the IRM node ID */
+ hal->IRM_node = -1;
+
+ /* Slowest node defaults to IEEE1394_S400 */
+ hal->slowest_node_speed = IEEE1394_S400;
+
+ /* Pick a SelfID buffer to give */
+ if (hal->current_buffer == 0) {
+ *selfid_buf_addr = (void *)hal->selfid_buf1;
+ hal->current_buffer = 1;
+ } else {
+ *selfid_buf_addr = (void *)hal->selfid_buf0;
+ hal->current_buffer = 0;
+ }
+
+ /* Disable the CSR topology_map (temporarily) */
+ s1394_CSR_topology_map_disable(hal);
+
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Reset the Bus Manager node ID */
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ hal->bus_mgr_node = -1;
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ TNF_PROBE_0_DEBUG(h1394_bus_reset_exit, S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * Function: h1394_self_ids()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * selfid_buf_addr Pointer to the Self ID buffer
+ * selfid_size The size of the filled part of the
+ * Self ID buffer
+ * node_id The local (host) node ID for the
+ * current generation
+ * generation_count The current generation number
+ *
+ * Output(s): None
+ *
+ * Description: h1394_self_ids() does alot of the work at bus reset. It
+ * takes the Self ID packets and parses them, builds a topology
+ * tree representation of them, calculates gap count, IRM, speed
+ * map, does any node matching that's possible, and then wakes
+ * up the br_thread.
+ */
+void
+h1394_self_ids(void *sl_private, void *selfid_buf_addr, uint32_t selfid_size,
+ uint32_t node_id, uint32_t generation_count)
+{
+ s1394_hal_t *hal;
+ int diameter;
+ uint_t gen_diff, gen_rollover;
+ boolean_t tree_copied = B_FALSE;
+ ushort_t saved_number_of_nodes;
+
+ /*
+ * NOTE: current topology tree is referred to as topology_tree
+ * and the old topology tree is referred to as old_tree.
+ * tree_valid indicates selfID buffer checked out OK and we were
+ * able to build the topology tree.
+ * tree_processed indicates we read the config ROMs as needed.
+ */
+ TNF_PROBE_1_DEBUG(h1394_self_ids_enter, S1394_TNF_SL_BR_STACK, "",
+ tnf_uint, hal_generation, generation_count);
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+ if (hal->hal_state == S1394_HAL_SHUTDOWN) {
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_0_DEBUG(h1394_self_ids_exit, S1394_TNF_SL_BR_STACK,
+ "");
+ return;
+ }
+
+ /* kstats - number of selfid completes */
+ hal->hal_kstats->selfid_complete++;
+
+ if (generation_count > hal->generation_count) {
+ gen_diff = generation_count - hal->generation_count;
+ hal->hal_kstats->bus_reset += gen_diff;
+ } else {
+ gen_diff = hal->generation_count - generation_count;
+ /* Use max_generation to determine how many bus resets */
+ hal->hal_kstats->bus_reset +=
+ (hal->halinfo.max_generation - gen_diff);
+ }
+
+ /*
+ * If the current tree has a valid topology tree (selfids
+ * checked out OK etc) and config roms read as needed,
+ * then make it the old tree before building a new one.
+ */
+ if ((hal->topology_tree_valid == B_TRUE) &&
+ (hal->topology_tree_processed == B_TRUE)) {
+ TNF_PROBE_0_DEBUG(h1394_self_ids_tree_copy,
+ S1394_TNF_SL_BR_STACK, "");
+ /* Trees are switched after the copy completes */
+ s1394_copy_old_tree(hal);
+ tree_copied = B_TRUE;
+ }
+
+ /* Set the new generation and node id */
+ hal->node_id = node_id;
+ hal->generation_count = generation_count;
+
+ /* Invalidate the current topology tree */
+ hal->topology_tree_valid = B_FALSE;
+ hal->topology_tree_processed = B_FALSE;
+ hal->cfgroms_being_read = 0;
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_parse_selfid, S1394_TNF_SL_BR_STACK,
+ "");
+
+ /*
+ * Save the number of nodes prior to parsing the self id buffer.
+ * We need this saved value while initializing the topology tree
+ * (for non-copy case).
+ */
+ saved_number_of_nodes = hal->number_of_nodes;
+
+ /* Parse the SelfID buffer */
+ if (s1394_parse_selfid_buffer(hal, selfid_buf_addr, selfid_size) !=
+ DDI_SUCCESS) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_1(h1394_self_ids_error, S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "Unable to parse selfID buffer");
+ TNF_PROBE_0_DEBUG(h1394_self_ids_exit, S1394_TNF_SL_BR_STACK,
+ "");
+
+ /* kstats - SelfID buffer error */
+ hal->hal_kstats->selfid_buffer_error++;
+ return; /* Error parsing SelfIDs */
+ }
+
+ /* Sort the SelfID packets by node number (if it's a 1995 PHY) */
+ if (hal->halinfo.phy == H1394_PHY_1995) {
+ TNF_PROBE_0_DEBUG(h1394_self_ids_sort, S1394_TNF_SL_BR_STACK,
+ "");
+ s1394_sort_selfids(hal);
+ }
+
+ /*
+ * Update the cycle master timer - if the timer is set and
+ * we were the root but we are not anymore, then disable it.
+ */
+ mutex_enter(&hal->cm_timer_mutex);
+ if ((hal->cm_timer_set == B_TRUE) &&
+ ((hal->old_number_of_nodes - 1) ==
+ IEEE1394_NODE_NUM(hal->old_node_id)) &&
+ ((hal->number_of_nodes - 1) !=
+ IEEE1394_NODE_NUM(hal->node_id))) {
+ mutex_exit(&hal->cm_timer_mutex);
+ (void) untimeout(hal->cm_timer);
+ } else {
+ mutex_exit(&hal->cm_timer_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_init_topology, S1394_TNF_SL_BR_STACK,
+ "");
+ s1394_init_topology_tree(hal, tree_copied, saved_number_of_nodes);
+
+ /* Determine the 1394 bus gap count */
+ hal->gap_count = s1394_get_current_gap_count(hal);
+ /* If gap counts are inconsistent, reset */
+ if (hal->gap_count == -1) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_1(h1394_self_ids_error, S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "Invalid gap counts in SelfID pkts");
+ TNF_PROBE_0_DEBUG(h1394_self_ids_exit, S1394_TNF_SL_BR_STACK,
+ "");
+
+ /* kstats - SelfID buffer error (invalid gap counts) */
+ hal->hal_kstats->selfid_buffer_error++;
+
+ if (s1394_ignore_invalid_gap_cnt == 1) {
+ /* Lock the topology tree again */
+ mutex_enter(&hal->topology_tree_mutex);
+ hal->gap_count = 0x3F;
+ } else {
+ return; /* Invalid gap counts in SelfID buffer */
+ }
+ }
+
+ TNF_PROBE_1_DEBUG(h1394_self_ids_get_gap_count, S1394_TNF_SL_BR_STACK,
+ "", tnf_uint, gap_count, hal->gap_count);
+
+ /* Determine the Isoch Resource Manager */
+ hal->IRM_node = s1394_get_isoch_rsrc_mgr(hal);
+
+ TNF_PROBE_1_DEBUG(h1394_self_ids_IRM_node, S1394_TNF_SL_BR_STACK, "",
+ tnf_int, IRM_node, hal->IRM_node);
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_build_topology_tree,
+ S1394_TNF_SL_BR_STACK, "");
+
+ /* Build the topology tree */
+ if (s1394_topology_tree_build(hal) != DDI_SUCCESS) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_1(h1394_self_ids_error, S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "Error building the topology tree");
+ TNF_PROBE_0_DEBUG(h1394_self_ids_exit, S1394_TNF_SL_BR_STACK,
+ "");
+
+ /* kstats - SelfID buffer error (Invalid topology tree) */
+ hal->hal_kstats->selfid_buffer_error++;
+ return; /* Error building topology tree from SelfIDs */
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_topology_CSRs, S1394_TNF_SL_BR_STACK,
+ "");
+
+ /* Update the CSR topology_map */
+ s1394_CSR_topology_map_update(hal);
+
+ /* Calculate the diameter */
+ diameter = s1394_topology_tree_calculate_diameter(hal);
+
+ /* Determine the optimum gap count */
+ hal->optimum_gap_count = s1394_gap_count_optimize(diameter);
+
+ TNF_PROBE_1_DEBUG(h1394_self_ids_diameter_and_gap_count,
+ S1394_TNF_SL_BR_STACK, "",
+ tnf_uint, optimum_gap, hal->optimum_gap_count);
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_speed_map, S1394_TNF_SL_BR_STACK, "");
+
+ /* Fill in the speed map */
+ s1394_speed_map_fill(hal);
+
+ /* Initialize the two trees (for tree walking) */
+ s1394_topology_tree_mark_all_unvisited(hal);
+ s1394_old_tree_mark_all_unvisited(hal);
+ s1394_old_tree_mark_all_unmatched(hal);
+
+ /* Are both trees (old and new) valid? */
+ if ((hal->old_tree_valid == B_TRUE) &&
+ (hal->topology_tree_valid == B_TRUE)) {
+ /* If HAL was in a suspended state, then do no matching */
+ if (hal->hal_was_suspended == B_TRUE) {
+ hal->hal_was_suspended = B_FALSE;
+ } else {
+ gen_rollover = hal->halinfo.max_generation + 1;
+ /* If only one bus reset occurred, match the trees */
+ if (((hal->old_generation_count + 1) % gen_rollover) ==
+ generation_count) {
+ TNF_PROBE_0_DEBUG(h1394_self_ids_tree_matching,
+ S1394_TNF_SL_BR_STACK, "");
+ s1394_match_tree_nodes(hal);
+ }
+ }
+ }
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Wake up the bus reset processing thread */
+ s1394_tickle_bus_reset_thread(hal);
+
+ TNF_PROBE_0_DEBUG(h1394_self_ids_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * Function: h1394_read_request()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * req The incoming AR request
+ *
+ * Output(s): None
+ *
+ * Description: h1394_read_request() receives incoming AR requests. These
+ * asynchronous read requests are dispatched to the appropriate
+ * target (if one has registered) or are handled by the 1394
+ * Software Framework, which will send out an appropriate
+ * response.
+ */
+void
+h1394_read_request(void *sl_private, cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+ s1394_addr_space_blk_t *addr_blk;
+ dev_info_t *dip;
+ uint64_t end_of_request;
+ uint32_t offset;
+ size_t cmd_length;
+ uchar_t *bufp_addr;
+ uchar_t *begin_ptr;
+ uchar_t *end_ptr;
+ uchar_t *tmp_ptr;
+ void (*recv_read_req)(cmd1394_cmd_t *);
+
+ TNF_PROBE_0_DEBUG(h1394_read_request_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(req);
+
+ s_priv->cmd_priv_xfer_type = S1394_CMD_READ;
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ cmd_length = IEEE1394_QUADLET;
+ hal->hal_kstats->arreq_quad_rd++;
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ cmd_length = req->cmd_u.b.blk_length;
+ hal->hal_kstats->arreq_blk_rd++;
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_read_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "",
+ tnf_string, msg, "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Lock the "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ /* Has the 1394 address been allocated? */
+ addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
+
+ TNF_PROBE_0_DEBUG(h1394_read_request_addr_search,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* If it wasn't found, it isn't owned... */
+ if (addr_blk == NULL) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Does the WHOLE request fit in the allocated block? */
+ end_of_request = (req->cmd_addr + cmd_length) - 1;
+ if (end_of_request > addr_blk->addr_hi) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Is a read request valid for this address space? */
+ if (!(addr_blk->addr_enable & T1394_ADDR_RDENBL)) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Make sure quadlet requests are quadlet-aligned */
+ offset = req->cmd_addr - addr_blk->addr_lo;
+ if ((req->cmd_type == CMD1394_ASYNCH_RD_QUAD) &&
+ ((offset & 0x3) != 0)) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Fill in the backing store if necessary */
+ if (addr_blk->kmem_bufp != NULL) {
+ offset = req->cmd_addr - addr_blk->addr_lo;
+ bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ bcopy((void *)bufp_addr,
+ (void *)&(req->cmd_u.q.quadlet_data), cmd_length);
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ begin_ptr = req->cmd_u.b.data_block->b_wptr;
+ end_ptr = begin_ptr + cmd_length;
+ tmp_ptr = req->cmd_u.b.data_block->b_datap->db_lim;
+ if (end_ptr <= tmp_ptr) {
+ bcopy((void *)bufp_addr, (void *)begin_ptr,
+ cmd_length);
+ /* Update b_wptr to refelect the new data */
+ req->cmd_u.b.data_block->b_wptr = end_ptr;
+ } else {
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_read_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "Error - mblk too small for request");
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_read_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ }
+
+ /* Fill in the rest of the info in the request */
+ s_priv->arreq_valid_addr = B_TRUE;
+ req->cmd_callback_arg = addr_blk->addr_arg;
+ recv_read_req = addr_blk->addr_events.recv_read_request;
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /*
+ * Add no code that modifies the command after the target
+ * callback is called or after the response is sent to the
+ * HAL.
+ */
+ if (recv_read_req != NULL) {
+ TNF_PROBE_0_DEBUG(h1394_read_request_do_callback,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ recv_read_req(req);
+ } else {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_read_request_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+}
+
+/*
+ * Function: h1394_write_request()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * req The incoming AR request
+ *
+ * Output(s): None
+ *
+ * Description: h1394_write_request() receives incoming AR requests. These
+ * asynchronous write requests are dispatched to the appropriate
+ * target (if one has registered) or are handled by the 1394
+ * Software Framework, which will send out an appropriate
+ * response.
+ */
+void
+h1394_write_request(void *sl_private, cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ s1394_addr_space_blk_t *addr_blk;
+ dev_info_t *dip;
+ uint32_t offset;
+ size_t cmd_length;
+ uchar_t *bufp_addr;
+ uchar_t *begin_ptr;
+ uchar_t *end_ptr;
+ uchar_t *tmp_ptr;
+ uint64_t end_of_request;
+ boolean_t posted_write = B_FALSE;
+ boolean_t write_error = B_FALSE;
+ void (*recv_write_req)(cmd1394_cmd_t *);
+
+ TNF_PROBE_0_DEBUG(h1394_write_request_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(req);
+
+ s_priv->cmd_priv_xfer_type = S1394_CMD_WRITE;
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_WR_QUAD:
+ cmd_length = IEEE1394_QUADLET;
+ hal->hal_kstats->arreq_quad_wr++;
+ break;
+
+ case CMD1394_ASYNCH_WR_BLOCK:
+ cmd_length = req->cmd_u.b.blk_length;
+ hal->hal_kstats->arreq_blk_wr++;
+ hal->hal_kstats->arreq_blk_wr_size += cmd_length;
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_write_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Lock the "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ /* Has the 1394 address been allocated? */
+ addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
+
+ TNF_PROBE_0_DEBUG(h1394_write_request_addr_search,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* Is this a posted write request? */
+ posted_write = s1394_is_posted_write(hal, req->cmd_addr);
+
+ /* If it wasn't found, it isn't owned... */
+ if (addr_blk == NULL) {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ write_error = B_TRUE;
+ goto write_error_check;
+ }
+
+ /* Does the WHOLE request fit in the allocated block? */
+ end_of_request = (req->cmd_addr + cmd_length) - 1;
+ if (end_of_request > addr_blk->addr_hi) {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ write_error = B_TRUE;
+ goto write_error_check;
+ }
+
+ /* Is a write request valid for this address space? */
+ if (!(addr_blk->addr_enable & T1394_ADDR_WRENBL)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ write_error = B_TRUE;
+ goto write_error_check;
+ }
+
+ /* Make sure quadlet request is quadlet aligned */
+ offset = req->cmd_addr - addr_blk->addr_lo;
+ if ((req->cmd_type == CMD1394_ASYNCH_WR_QUAD) &&
+ ((offset & 0x3) != 0)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ write_error = B_TRUE;
+ goto write_error_check;
+ }
+
+write_error_check:
+ /* Check if posted-write when sending error responses */
+ if (write_error == B_TRUE) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ if (posted_write == B_TRUE) {
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+ hal->hal_kstats->arreq_posted_write_error++;
+ /* Free the command - Pass it back to the HAL */
+ HAL_CALL(hal).response_complete(
+ hal->halinfo.hal_private, req, h_priv);
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ } else {
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ }
+
+ /* Fill in the backing store if necessary */
+ if (addr_blk->kmem_bufp != NULL) {
+ offset = req->cmd_addr - addr_blk->addr_lo;
+ bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_WR_QUAD:
+ bcopy((void *)&(req->cmd_u.q.quadlet_data),
+ (void *)bufp_addr, cmd_length);
+ break;
+
+ case CMD1394_ASYNCH_WR_BLOCK:
+ begin_ptr = req->cmd_u.b.data_block->b_rptr;
+ end_ptr = begin_ptr + cmd_length;
+ tmp_ptr = req->cmd_u.b.data_block->b_wptr;
+ if (end_ptr <= tmp_ptr) {
+ bcopy((void *)begin_ptr, (void *)bufp_addr,
+ cmd_length);
+ } else {
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_write_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "Error - mblk too small for request");
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_write_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ }
+
+ /* Fill in the rest of the info in the request */
+ if (addr_blk->addr_type == T1394_ADDR_POSTED_WRITE)
+ s_priv->posted_write = B_TRUE;
+
+ s_priv->arreq_valid_addr = B_TRUE;
+ req->cmd_callback_arg = addr_blk->addr_arg;
+ recv_write_req = addr_blk->addr_events.recv_write_request;
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /*
+ * Add no code that modifies the command after the target
+ * callback is called or after the response is sent to the
+ * HAL.
+ */
+ if (recv_write_req != NULL) {
+ TNF_PROBE_0_DEBUG(h1394_write_request_do_callback,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ recv_write_req(req);
+ } else {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_write_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * Function: h1394_lock_request()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * req The incoming AR request
+ *
+ * Output(s): None
+ *
+ * Description: h1394_lock_request() receives incoming AR requests. These
+ * asynchronous lock requests are dispatched to the appropriate
+ * target (if one has registered) or are handled by the 1394
+ * Software Framework, which will send out an appropriate
+ * response.
+ */
+void
+h1394_lock_request(void *sl_private, cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+ s1394_addr_space_blk_t *addr_blk;
+ dev_info_t *dip;
+ uint64_t end_of_request;
+ uint32_t offset;
+ uchar_t *bufp_addr;
+ cmd1394_lock_type_t lock_type;
+ void (*recv_lock_req)(cmd1394_cmd_t *);
+
+ TNF_PROBE_0_DEBUG(h1394_lock_request_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(req);
+
+ s_priv->cmd_priv_xfer_type = S1394_CMD_LOCK;
+
+ /* Lock the "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ /* Has the 1394 address been allocated? */
+ addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
+
+ /* If it wasn't found, it isn't owned... */
+ if (addr_blk == NULL) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Does the WHOLE request fit in the allocated block? */
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_LOCK_32:
+ end_of_request = (req->cmd_addr + IEEE1394_QUADLET) - 1;
+ /* kstats - 32-bit lock request */
+ hal->hal_kstats->arreq_lock32++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_64:
+ end_of_request = (req->cmd_addr + IEEE1394_OCTLET) - 1;
+ /* kstats - 64-bit lock request */
+ hal->hal_kstats->arreq_lock64++;
+ break;
+
+ default:
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(h1394_lock_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ if (end_of_request > addr_blk->addr_hi) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Is a lock request valid for this address space? */
+ if (!(addr_blk->addr_enable & T1394_ADDR_LKENBL)) {
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ /* Fill in the backing store if necessary */
+ if (addr_blk->kmem_bufp != NULL) {
+ offset = req->cmd_addr - addr_blk->addr_lo;
+ bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
+
+ if (req->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ uint32_t old_value;
+ uint32_t arg_value;
+ uint32_t data_value;
+ uint32_t new_value;
+
+ arg_value = req->cmd_u.l32.arg_value;
+ data_value = req->cmd_u.l32.data_value;
+ lock_type = req->cmd_u.l32.lock_type;
+ bcopy((void *)bufp_addr, (void *)&old_value,
+ IEEE1394_QUADLET);
+
+ switch (lock_type) {
+ case CMD1394_LOCK_MASK_SWAP:
+ /* Mask-Swap (see P1394A - Table 1.7) */
+ new_value = (data_value & arg_value) |
+ (old_value & ~arg_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_COMPARE_SWAP:
+ /* Compare-Swap */
+ if (old_value == arg_value) {
+ new_value = data_value;
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value,
+ (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ }
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_FETCH_ADD:
+ /* Fetch-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA32(old_value);
+ new_value = old_value + data_value;
+ new_value = T1394_DATA32(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_LITTLE_ADD:
+ /* Little-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA32(old_value);
+ new_value = old_value + data_value;
+ new_value = T1394_DATA32(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_BOUNDED_ADD:
+ /* Bounded-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA32(old_value);
+ if (old_value != arg_value) {
+ new_value = old_value + data_value;
+ new_value = T1394_DATA32(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value,
+ (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ }
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_WRAP_ADD:
+ /* Wrap-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA32(old_value);
+ if (old_value != arg_value) {
+ new_value = old_value + data_value;
+ } else {
+ new_value = data_value;
+ }
+ new_value = T1394_DATA32(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_QUADLET);
+ req->cmd_u.l32.old_value = old_value;
+ break;
+
+ default:
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_1(h1394_lock_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "Invalid lock_type");
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ } else {
+ /* Handling for the 8-byte (64-bit) lock requests */
+ uint64_t old_value;
+ uint64_t arg_value;
+ uint64_t data_value;
+ uint64_t new_value;
+
+ arg_value = req->cmd_u.l64.arg_value;
+ data_value = req->cmd_u.l64.data_value;
+ lock_type = req->cmd_u.l64.lock_type;
+ bcopy((void *)bufp_addr, (void *)&old_value,
+ IEEE1394_OCTLET);
+
+ switch (lock_type) {
+ case CMD1394_LOCK_MASK_SWAP:
+ /* Mask-Swap (see P1394A - Table 1.7) */
+ new_value = (data_value & arg_value) |
+ (old_value & ~arg_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_COMPARE_SWAP:
+ /* Compare-Swap */
+ if (old_value == arg_value) {
+ new_value = data_value;
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value,
+ (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ }
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_FETCH_ADD:
+ /* Fetch-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA64(old_value);
+ new_value = old_value + data_value;
+ new_value = T1394_DATA64(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_LITTLE_ADD:
+ /* Little-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA64(old_value);
+ new_value = old_value + data_value;
+ new_value = T1394_DATA64(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_BOUNDED_ADD:
+ /* Bounded-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA64(old_value);
+ if (old_value != arg_value) {
+ new_value = old_value + data_value;
+ new_value = T1394_DATA64(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value,
+ (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ }
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ case CMD1394_LOCK_WRAP_ADD:
+ /* Wrap-Add (see P1394A - Table 1.7) */
+ old_value = T1394_DATA64(old_value);
+ if (old_value != arg_value) {
+ new_value = old_value + data_value;
+ } else {
+ new_value = data_value;
+ }
+ new_value = T1394_DATA64(new_value);
+ /* Copy new_value into backing store */
+ bcopy((void *)&new_value, (void *)bufp_addr,
+ IEEE1394_OCTLET);
+ req->cmd_u.l64.old_value = old_value;
+ break;
+
+ default:
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_1(h1394_lock_request_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "Invalid lock_type");
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+ }
+ }
+
+ /* Fill in the rest of the info in the request */
+ s_priv->arreq_valid_addr = B_TRUE;
+ req->cmd_callback_arg = addr_blk->addr_arg;
+ recv_lock_req = addr_blk->addr_events.recv_lock_request;
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ /*
+ * Add no code that modifies the command after the target
+ * callback is called or after the response is sent to the
+ * HAL.
+ */
+ if (recv_lock_req != NULL) {
+ TNF_PROBE_0_DEBUG(h1394_lock_request_do_callback,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ recv_lock_req(req);
+ } else {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_lock_request_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * Function: h1394_ioctl()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * cmd ioctl cmd
+ * arg argument for the ioctl cmd
+ * mode mode bits (see ioctl(9e))
+ * cred_p cred structure pointer
+ * rval_p pointer to return value (see ioctl(9e))
+ *
+ * Output(s): EINVAL if not a DEVCTL ioctl, else return value from s1394_ioctl
+ *
+ * Description: h1394_ioctl() implements non-HAL specific ioctls. Currently,
+ * DEVCTL ioctls are the only generic ioctls supported.
+ */
+int
+h1394_ioctl(void *sl_private, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p)
+{
+ int status;
+
+ TNF_PROBE_0_DEBUG(h1394_ioctl_enter, S1394_TNF_SL_IOCTL_STACK, "");
+
+ if ((cmd & DEVCTL_IOC) != DEVCTL_IOC)
+ return (EINVAL);
+
+ status = s1394_ioctl((s1394_hal_t *)sl_private, cmd, arg, mode,
+ cred_p, rval_p);
+
+ TNF_PROBE_1_DEBUG(h1394_ioctl_exit, S1394_TNF_SL_IOCTL_STACK, "",
+ tnf_int, status, status);
+ return (status);
+}
+
+/*
+ * Function: h1394_phy_packet()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * packet_data Pointer to a buffer of packet data
+ * quadlet_count Length of the buffer
+ * timestamp Timestamp indicating time of arrival
+ *
+ * Output(s): None
+ *
+ * Description: h1394_phy_packet() is not implemented currently, but would
+ * be used to process the responses to PHY ping packets in P1394A
+ * When one is sent out, a timestamp is given indicating its time
+ * of departure. Comparing that old timestamp with this new
+ * timestamp, we can determine the time of flight and can use
+ * those times to optimize the gap count.
+ */
+/* ARGSUSED */
+void
+h1394_phy_packet(void *sl_private, uint32_t *packet_data, uint_t quadlet_count,
+ uint_t timestamp)
+{
+ TNF_PROBE_0_DEBUG(h1394_phy_packet_enter, S1394_TNF_SL_STACK, "");
+
+ /* This interface is not yet implemented */
+ TNF_PROBE_1_DEBUG(h1394_phy_packet, S1394_TNF_SL_STACK, "",
+ tnf_string, msg, "h1394_phy_packet: Received");
+
+ TNF_PROBE_0_DEBUG(h1394_phy_packet_exit, S1394_TNF_SL_STACK, "");
+}
+
+/*
+ * Function: h1394_error_detected()
+ * Input(s): sl_private The HAL "handle" returned by
+ * h1394_attach()
+ * type The type of error the HAL detected
+ * arg Pointer to any extra information
+ *
+ * Output(s): None
+ *
+ * Description: h1394_error_detected() is used by the HAL to report errors
+ * to the 1394 Software Framework.
+ */
+void
+h1394_error_detected(void *sl_private, h1394_error_t type, void *arg)
+{
+ s1394_hal_t *hal;
+ uint_t hal_node_num;
+ uint_t IRM_node_num;
+
+ TNF_PROBE_0_DEBUG(h1394_error_detected_enter, S1394_TNF_SL_STACK, "");
+
+ hal = (s1394_hal_t *)sl_private;
+
+ switch (type) {
+ case H1394_LOCK_RESP_ERR:
+ TNF_PROBE_1(h1394_error_detected, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Lock response error");
+ /* If we are the IRM, then initiate a bus reset */
+ mutex_enter(&hal->topology_tree_mutex);
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node_num = hal->IRM_node;
+ mutex_exit(&hal->topology_tree_mutex);
+ if (IRM_node_num == hal_node_num)
+ s1394_initiate_hal_reset(hal, NON_CRITICAL);
+ break;
+
+ case H1394_POSTED_WR_ERR:
+ TNF_PROBE_2(h1394_error_detected, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Posted write error detected",
+ tnf_opaque, addr, ((h1394_posted_wr_err_t *)arg)->addr);
+ break;
+
+ case H1394_SELF_INITIATED_SHUTDOWN:
+ TNF_PROBE_1(h1394_error_detected, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "HAL self-initiated shutdown");
+ s1394_hal_shutdown(hal, B_FALSE);
+ break;
+
+ case H1394_CYCLE_TOO_LONG:
+ /* Set a timer to become cycle master after 1 second */
+ mutex_enter(&hal->cm_timer_mutex);
+ hal->cm_timer_set = B_TRUE;
+ mutex_exit(&hal->cm_timer_mutex);
+ hal->cm_timer = timeout(s1394_cycle_too_long_callback, hal,
+ drv_usectohz(CYCLE_MASTER_TIMER * 1000));
+
+ TNF_PROBE_1(h1394_error_detected, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Isochronous cycle too long error");
+ break;
+
+ default:
+ TNF_PROBE_2(h1394_error_detected, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Unknown error type received",
+ tnf_uint, type, type);
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(h1394_error_detected_exit, S1394_TNF_SL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/inc.flg b/usr/src/uts/common/io/1394/inc.flg
new file mode 100644
index 0000000000..bc5b654fc7
--- /dev/null
+++ b/usr/src/uts/common/io/1394/inc.flg
@@ -0,0 +1,82 @@
+#!/bin/sh
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2003 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#ident "%Z%%M% %I% %E% SMI"
+
+# This file brings down all that is needed to build just
+# 1394 drivers.
+#
+
+find_files "s.*.h" \
+ usr/src/uts/common/sys/1394
+
+find_files "s.*.h" \
+ usr/src/uts/common/sys
+
+find_files "s.*.h" \
+ usr/src/uts/sparc/sys
+
+find_files "s.*.h" \
+ usr/src/uts/sparc/v7/sys
+
+find_files "s.*.h" \
+ usr/src/uts/sparc/v9/sys
+
+find_files "s.*.h" \
+ usr/src/uts/sun/sys
+
+find_files "s.*.h" \
+ usr/src/uts/sun4u/sys
+
+find_files "s.*.h" \
+ usr/src/uts/common/vm
+
+find_files "s.*" \
+ usr/src/uts/sparc/hci1394 \
+ usr/src/uts/sparc/s1394
+
+echo_file usr/src/Makefile.master
+echo_file usr/src/Makefile.master.64
+echo_file usr/src/req.flg
+echo_file usr/src/Makefile.psm
+echo_file usr/src/Makefile.psm.targ
+echo_file usr/src/uts/Makefile
+echo_file usr/src/uts/Makefile.targ
+echo_file usr/src/uts/Makefile.uts
+echo_file usr/src/uts/sun/Makefile.files
+echo_file usr/src/uts/sun/Makefile.rules
+echo_file usr/src/uts/common/Makefile.files
+echo_file usr/src/uts/common/Makefile.rules
+echo_file usr/src/uts/common/sys/Makefile
+echo_file usr/src/uts/sparc/Makefile
+echo_file usr/src/uts/sparc/Makefile.files
+echo_file usr/src/uts/sparc/Makefile.rules
+echo_file usr/src/uts/sparc/Makefile.sparc
+echo_file usr/src/uts/sparc/Makefile.targ
+echo_file usr/src/uts/sparc/ml/sparc.il
+echo_file usr/src/uts/sparc/req.flg
+
diff --git a/usr/src/uts/common/io/1394/nx1394.c b/usr/src/uts/common/io/1394/nx1394.c
new file mode 100644
index 0000000000..ddc89240e3
--- /dev/null
+++ b/usr/src/uts/common/io/1394/nx1394.c
@@ -0,0 +1,566 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * nx1394.c
+ * 1394 Services Layer Nexus Support Routines
+ * Routines in this file implement nexus bus_ops.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/modctl.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/ddi_impldefs.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+
+static int nx1394_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
+ ddi_dma_attr_t *attr, int (*waitfnp)(caddr_t), caddr_t arg,
+ ddi_dma_handle_t *handlep);
+
+static int nx1394_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
+ void *arg, void *result);
+
+static int nx1394_get_event_cookie(dev_info_t *dip, dev_info_t *rdip,
+ char *name, ddi_eventcookie_t *event_cookiep);
+
+static int nx1394_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
+ ddi_eventcookie_t eventhdl, void (*callback)(), void *arg,
+ ddi_callback_id_t *cb_id);
+
+static int nx1394_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id);
+
+static int nx1394_post_event(dev_info_t *dip, dev_info_t *rdip,
+ ddi_eventcookie_t eventhdl, void *impl_data);
+
+struct bus_ops nx1394_busops = {
+ BUSO_REV,
+ nullbusmap, /* bus_map */
+ NULL, /* bus_get_intrspec */
+ NULL, /* bus_add_intrspec */
+ NULL, /* bus_remove_intrspec */
+ i_ddi_map_fault, /* XXXX bus_map_fault */
+ ddi_dma_map, /* bus_dma_map */
+ nx1394_dma_allochdl,
+ ddi_dma_freehdl,
+ ddi_dma_bindhdl,
+ ddi_dma_unbindhdl,
+ ddi_dma_flush,
+ ddi_dma_win,
+ ddi_dma_mctl, /* bus_dma_ctl */
+ nx1394_bus_ctl, /* bus_ctl */
+ ddi_bus_prop_op, /* bus_prop_op */
+ nx1394_get_event_cookie, /* (*bus_get_eventcookie() */
+ nx1394_add_eventcall, /* (*bus_add_eventcall)(); */
+ nx1394_remove_eventcall, /* (*bus_remove_eventcall)(); */
+ nx1394_post_event, /* (*bus_post_event)(); */
+ 0, /* (*interrupt control)(); */
+ 0, /* (*bus_config)(); */
+ 0, /* (*bus_unconfig)(); */
+ 0, /* (*bus_fm_init)(); */
+ 0, /* (*bus_fm_fini)(); */
+ 0, /* (*bus_fm_access_enter)(); */
+ 0, /* (*bus_fm_access_exit)(); */
+ 0, /* (*bus_power)(); */
+ i_ddi_intr_ops /* (*bus_intr_op)(); */
+};
+
+/*
+ * removal/insertion/reset events
+ */
+#define NX1394_EVENT_TAG_HOT_REMOVAL 0
+#define NX1394_EVENT_TAG_HOT_INSERTION 1
+#define NX1394_EVENT_TAG_BUS_RESET 2
+
+static ndi_event_definition_t nx1394_event_defs[] = {
+ {NX1394_EVENT_TAG_HOT_REMOVAL, DDI_DEVI_REMOVE_EVENT, EPL_KERNEL,
+ NDI_EVENT_POST_TO_TGT},
+ {NX1394_EVENT_TAG_HOT_INSERTION, DDI_DEVI_INSERT_EVENT, EPL_KERNEL,
+ NDI_EVENT_POST_TO_TGT},
+ {NX1394_EVENT_TAG_BUS_RESET, DDI_DEVI_BUS_RESET_EVENT, EPL_KERNEL,
+ NDI_EVENT_POST_TO_ALL},
+};
+
+#define NX1394_N_EVENTS \
+ (sizeof (nx1394_event_defs) / sizeof (ndi_event_definition_t))
+
+static ndi_event_set_t nx1394_events = {
+ NDI_EVENTS_REV1, NX1394_N_EVENTS, nx1394_event_defs
+};
+
+/*
+ * nx1394_bus_ctl()
+ * This routine implements nexus bus ctl operations. Of importance are
+ * DDI_CTLOPS_REPORTDEV, DDI_CTLOPS_INITCHILD, DDI_CTLOPS_UNINITCHILD
+ * and DDI_CTLOPS_POWER. For DDI_CTLOPS_INITCHILD, it tries to lookup
+ * reg property on the child node and builds and sets the name
+ * (name is of the form GGGGGGGGGGGGGGGG[,AAAAAAAAAAAA], where
+ * GGGGGGGGGGGGGGGG is the GUID and AAAAAAAAAAAA is the optional unit
+ * address).
+ */
+static int
+nx1394_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
+ void *result)
+{
+ int status;
+
+ TNF_PROBE_0_DEBUG(nx1394_bus_ctl_enter, S1394_TNF_SL_NEXUS_STACK, "");
+
+ switch (op) {
+ case DDI_CTLOPS_REPORTDEV: {
+ dev_info_t *pdip = ddi_get_parent(rdip);
+ cmn_err(CE_CONT, "?%s%d at %s%d",
+ ddi_node_name(rdip), ddi_get_instance(rdip),
+ ddi_node_name(pdip), ddi_get_instance(pdip));
+ TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "");
+ return (DDI_SUCCESS);
+ }
+
+ case DDI_CTLOPS_INITCHILD: {
+ dev_info_t *ocdip, *cdip = (dev_info_t *)arg;
+ dev_info_t *pdip = ddi_get_parent(cdip);
+ int reglen, i;
+ uint32_t *regptr;
+ char addr[MAXNAMELEN];
+
+ TNF_PROBE_1(nx1394_bus_ctl_init_child,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_opaque, dip, cdip);
+
+ i = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
+ DDI_PROP_DONTPASS, "reg", (int **)&regptr,
+ (uint_t *)&reglen);
+
+ if (i != DDI_PROP_SUCCESS) {
+ cmn_err(CE_NOTE, "!%s(%d): \"reg\" property not found",
+ ddi_node_name(cdip), ddi_get_instance(cdip));
+ TNF_PROBE_2(nx1394_bus_ctl,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_string, msg,
+ "Reg property not found", tnf_int, reason, i);
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op,
+ "initchild");
+ return (DDI_NOT_WELL_FORMED);
+ }
+
+ ASSERT(reglen != 0);
+
+ /*
+ * addr is of the format GGGGGGGGGGGGGGGG[,AAAAAAAAAAAA]
+ */
+ if (regptr[2] || regptr[3]) {
+ (void) sprintf(addr, "%08x%08x,%04x%08x", regptr[0],
+ regptr[1], regptr[2], regptr[3]);
+ } else {
+ (void) sprintf(addr, "%08x%08x", regptr[0], regptr[1]);
+ }
+ ddi_prop_free(regptr);
+ ddi_set_name_addr(cdip, addr);
+
+ /*
+ * Check for a node with the same name & addr as the current
+ * node. If such a node exists, return failure.
+ */
+ if ((ocdip = ndi_devi_find(pdip, ddi_node_name(cdip), addr)) !=
+ NULL && ocdip != cdip) {
+ cmn_err(CE_NOTE,
+ "!%s(%d): Duplicate dev_info node found %s@%s",
+ ddi_node_name(cdip), ddi_get_instance(cdip),
+ ddi_node_name(ocdip), addr);
+ TNF_PROBE_1(nx1394_bus_ctl,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_string, msg,
+ "Duplicate nodes");
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op,
+ "initchild");
+ ddi_set_name_addr(cdip, NULL);
+ return (DDI_NOT_WELL_FORMED);
+ }
+
+ /*
+ * If HAL (parent dip) has "active-dma-flush" property, then
+ * add property to child as well. Workaround for active
+ * context flushing bug in Schizo rev 2.1 and 2.2.
+ */
+ if (ddi_prop_exists(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
+ "active-dma-flush") != 0) {
+ status = ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
+ "active-dma-flush", 1);
+ if (status != NDI_SUCCESS) {
+ cmn_err(CE_NOTE, "!%s(%d): Unable to add "
+ "\"active-dma-flush\" property",
+ ddi_node_name(cdip),
+ ddi_get_instance(cdip));
+ TNF_PROBE_1(nx1394_bus_ctl,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_string,
+ msg, "Unable to add \"active-dma-flush\" "
+ "property");
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string,
+ op, "initchild");
+ ddi_set_name_addr(cdip, NULL);
+ return (DDI_NOT_WELL_FORMED);
+ }
+ }
+
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op, "initchild");
+ return (DDI_SUCCESS);
+ }
+
+ case DDI_CTLOPS_UNINITCHILD: {
+ ddi_prop_remove_all((dev_info_t *)arg);
+ ddi_set_name_addr((dev_info_t *)arg, NULL);
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "", tnf_string, op, "uninitchild");
+ return (DDI_SUCCESS);
+ }
+
+ case DDI_CTLOPS_IOMIN: {
+ status = ddi_ctlops(dip, rdip, op, arg, result);
+ TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "", tnf_string, op, "iomin");
+ return (status);
+ }
+
+ case DDI_CTLOPS_POWER: {
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * These ops correspond to functions that "shouldn't" be called
+ * by a 1394 client driver.
+ */
+ case DDI_CTLOPS_DMAPMAPC:
+ case DDI_CTLOPS_REPORTINT:
+ case DDI_CTLOPS_REGSIZE:
+ case DDI_CTLOPS_NREGS:
+ case DDI_CTLOPS_NINTRS:
+ case DDI_CTLOPS_SIDDEV:
+ case DDI_CTLOPS_SLAVEONLY:
+ case DDI_CTLOPS_AFFINITY:
+ case DDI_CTLOPS_INTR_HILEVEL:
+ case DDI_CTLOPS_XLATE_INTRS:
+ case DDI_CTLOPS_POKE:
+ case DDI_CTLOPS_PEEK: {
+ cmn_err(CE_CONT, "!%s(%d): invalid op (%d) from %s(%d)",
+ ddi_node_name(dip), ddi_get_instance(dip),
+ op, ddi_node_name(rdip), ddi_get_instance(rdip));
+ TNF_PROBE_2(nx1394_bus_ctl, S1394_TNF_SL_NEXUS_ERROR, "",
+ tnf_string, msg, "invalid op", tnf_int, op, op);
+ TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
+ */
+ default: {
+ status = ddi_ctlops(dip, rdip, op, arg, result);
+ TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "");
+ return (status);
+ }
+ }
+}
+
+/*
+ * nx1394_dma_allochdl()
+ * Merges the ddi_dma_attr_t passed in by the target (using
+ * ddi_dma_alloc_handle() call) with that of the hal and passes the alloc
+ * handle request up the device by calling ddi_dma_allochdl().
+ */
+static int
+nx1394_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
+ int (*waitfnp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
+{
+ s1394_hal_t *hal;
+ ddi_dma_attr_t *hal_attr;
+ int status;
+
+ _NOTE(SCHEME_PROTECTS_DATA("unique (per thread)", ddi_dma_attr_t))
+
+ TNF_PROBE_0_DEBUG(nx1394_dma_allochdl_enter, S1394_TNF_SL_NEXUS_STACK,
+ "");
+
+ /*
+ * If hal calls ddi_dma_alloc_handle, dip == rdip == hal dip.
+ * Unfortunately, we cannot verify this (by way of looking up for hal
+ * dip) here because h1394_attach() may happen much later.
+ */
+ if (dip != rdip) {
+ hal = s1394_dip_to_hal(ddi_get_parent(rdip));
+ ASSERT(hal);
+ hal_attr = &hal->halinfo.dma_attr;
+ ASSERT(hal_attr);
+ ddi_dma_attr_merge(attr, hal_attr);
+ }
+ status = ddi_dma_allochdl(dip, rdip, attr, waitfnp, arg, handlep);
+ TNF_PROBE_1_DEBUG(nx1394_dma_allochdl_exit, S1394_TNF_SL_NEXUS_STACK,
+ "", tnf_int, status, status);
+ return (status);
+}
+
+/*
+ * nx1394_get_event_cookie()
+ * Called when a child node calls ddi_get_eventcookie().
+ * Returns event cookie corresponding to event "name".
+ */
+static int
+nx1394_get_event_cookie(dev_info_t *dip, dev_info_t *rdip, char *name,
+ ddi_eventcookie_t *event_cookiep)
+{
+ int ret;
+ s1394_hal_t *hal;
+
+ TNF_PROBE_1_DEBUG(nx1394_get_event_cookie_enter,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string, name, name);
+
+ hal = s1394_dip_to_hal(dip);
+ ASSERT(hal);
+
+ ret = ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl,
+ rdip, name, event_cookiep, 0);
+
+ TNF_PROBE_4_DEBUG(nx1394_get_event_cookie_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_opaque, parent_dip, (void *)dip,
+ tnf_opaque, requestor_dip, (void *)rdip, tnf_string, event_name,
+ name, tnf_int, request_status, ret);
+
+ return (ret);
+
+}
+
+/*
+ * nx1394_add_eventcall()
+ * This gets called when a child node calls ddi_add_eventcall(). Registers
+ * the specified callback for the requested event cookie with the ndi
+ * event framework.
+ * dip is the hal dip. This routine calls ndi_event_add_callback(),
+ * allowing requests for events we don't generate to pass up the tree.
+ */
+static int
+nx1394_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
+ ddi_eventcookie_t cookie, void (*callback)(), void *arg,
+ ddi_callback_id_t *cb_id)
+{
+ int ret;
+ s1394_hal_t *hal;
+#if defined(DEBUG)
+ char *event_name = NULL;
+#endif
+
+ hal = s1394_dip_to_hal(dip);
+ ASSERT(hal);
+
+ TNF_PROBE_0_DEBUG(nx1394_add_eventcall_enter, S1394_TNF_SL_NEXUS_STACK,
+ "");
+
+ ret = ndi_event_add_callback(hal->hal_ndi_event_hdl, rdip, cookie,
+ callback, arg, NDI_NOSLEEP, cb_id);
+#if defined(DEBUG)
+ event_name = ndi_event_cookie_to_name(hal->hal_ndi_event_hdl, cookie);
+ if (event_name == NULL)
+ event_name = "";
+#endif
+ TNF_PROBE_4_DEBUG(nx1394_add_eventcall_exit, S1394_TNF_SL_NEXUS_STACK,
+ "", tnf_opaque, parent_dip, (void *)dip, tnf_opaque, requestor_dip,
+ (void *)rdip, tnf_string, event_name, event_name, tnf_int,
+ request_status, ret);
+
+ return (ret);
+}
+
+/*
+ * nx1394_remove_eventcall()
+ * Called as a result of a child node calling ddi_remove_eventcall().
+ * Unregisters the callback corresponding to the callback id passed in.
+ */
+static int
+nx1394_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
+{
+ int ret;
+ s1394_hal_t *hal;
+ ddi_eventcookie_t cookie;
+#if defined(DEBUG)
+ char *event_name = NULL;
+#endif
+
+ ASSERT(cb_id);
+ cookie = ((ndi_event_callbacks_t *)cb_id)->ndi_evtcb_cookie;
+
+ hal = s1394_dip_to_hal(dip);
+ ASSERT(hal);
+
+ TNF_PROBE_0_DEBUG(nx1394_remove_eventcall_enter,
+ S1394_TNF_SL_NEXUS_STACK, "");
+
+ ret = ndi_event_remove_callback(hal->hal_ndi_event_hdl, cb_id);
+
+#if defined(DEBUG)
+ event_name = ndi_event_cookie_to_name(hal->hal_ndi_event_hdl, cookie);
+ if (event_name == NULL)
+ event_name = "";
+
+ TNF_PROBE_4_DEBUG(nx1394_remove_eventcall_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_opaque, parent_dip, (void *)dip,
+ tnf_opaque, callback_id, (void *)cb_id, tnf_string, event_name,
+ event_name, tnf_int, request_status, ret);
+#endif
+
+ return (ret);
+}
+
+/*
+ * nx1394_post_event()
+ * Called when a child node calls ddi_post_event. If the event is one of
+ * the events supported by us (bus reset/insert/remove, for now), builds
+ * a t1394_localinfo_t structure and calls ndi_event_run_callbacks(). This
+ * will result in all registered callbacks being invoked with
+ * t1394_localinfo_t as the impl_data. (see ddi_add_eventcall for callback
+ * arguments.) If the event is not defined by us, the request is
+ * propagated up the device tree by calling ndi_post_event().
+ */
+static int
+nx1394_post_event(dev_info_t *dip, dev_info_t *rdip, ddi_eventcookie_t cookie,
+ void *impl_data)
+{
+ int ret;
+ char *name;
+ s1394_hal_t *hal;
+ t1394_localinfo_t localinfo;
+
+ hal = s1394_dip_to_hal(dip);
+ ASSERT(hal);
+
+ TNF_PROBE_0_DEBUG(nx1394_post_event_enter, S1394_TNF_SL_NEXUS_STACK,
+ "");
+
+ name = ndi_event_cookie_to_name(hal->hal_ndi_event_hdl, cookie);
+ /* name is NULL if we don't generate the event */
+ if (name != NULL) {
+
+ mutex_enter(&hal->topology_tree_mutex);
+ localinfo.bus_generation = hal->generation_count;
+ localinfo.local_nodeID = hal->node_id;
+ mutex_exit(&hal->topology_tree_mutex);
+ impl_data = &localinfo;
+
+ ret = ndi_event_run_callbacks(hal->hal_ndi_event_hdl,
+ rdip, cookie, impl_data);
+
+ TNF_PROBE_4_DEBUG(nx1394_post_event_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_opaque, parent_dip,
+ (void *)dip, tnf_opaque, requestor_dip, (void *)rdip,
+ tnf_string, event_name, name, tnf_int, request_status, ret);
+ return (ret);
+
+ } else {
+ ret = ndi_post_event(ddi_get_parent(dip), rdip, cookie,
+ impl_data);
+ TNF_PROBE_2_DEBUG(nx1394_post_event_exit,
+ S1394_TNF_SL_NEXUS_STACK, "", tnf_string, msg,
+ "Not our event", tnf_int, ret, ret);
+ return (ret);
+ }
+}
+
+/*
+ * nx1394_define_events()
+ * Allocates event handle for the hal dip and binds event set to it.
+ */
+int
+nx1394_define_events(s1394_hal_t *hal)
+{
+ int ret;
+
+ TNF_PROBE_0_DEBUG(nx1394_define_events_enter, S1394_TNF_SL_NEXUS_STACK,
+ "");
+
+ /* get event handle */
+ ret = ndi_event_alloc_hdl(hal->halinfo.dip, hal->halinfo.hw_interrupt,
+ &hal->hal_ndi_event_hdl, NDI_SLEEP);
+ if (ret != NDI_SUCCESS) {
+ TNF_PROBE_1(nx1394_define_events_alloc_fail,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
+ } else {
+ /* and bind to it */
+ ret = ndi_event_bind_set(hal->hal_ndi_event_hdl, &nx1394_events,
+ NDI_SLEEP);
+ if (ret != NDI_SUCCESS) {
+ TNF_PROBE_1(nx1394_define_events_bind_fail,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
+ (void) ndi_event_free_hdl(hal->hal_ndi_event_hdl);
+ TNF_PROBE_0_DEBUG(nx1394_define_events_exit,
+ S1394_TNF_SL_NEXUS_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(nx1394_define_events_exit, S1394_TNF_SL_NEXUS_STACK,
+ "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * nx1394_undefine_events()
+ * Unbinds event set bound to the hal and frees the event handle.
+ */
+void
+nx1394_undefine_events(s1394_hal_t *hal)
+{
+ int ret;
+
+ TNF_PROBE_0_DEBUG(nx1394_undefine_events_enter,
+ S1394_TNF_SL_NEXUS_STACK, "");
+
+ ret = ndi_event_unbind_set(hal->hal_ndi_event_hdl, &nx1394_events,
+ NDI_SLEEP);
+ if (ret != NDI_SUCCESS) {
+ TNF_PROBE_1(nx1394_undefine_events_unbind_fail,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
+ } else {
+ ret = ndi_event_free_hdl(hal->hal_ndi_event_hdl);
+ if (ret != NDI_SUCCESS) {
+ TNF_PROBE_1(nx1394_undefine_events_free_hdl_fail,
+ S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(nx1394_undefine_events_exit,
+ S1394_TNF_SL_NEXUS_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394.c b/usr/src/uts/common/io/1394/s1394.c
new file mode 100644
index 0000000000..a3dcd4b5be
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394.c
@@ -0,0 +1,159 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394.c
+ * 1394 Services Layer Initialization and Cleanup Routines
+ * The routines do all initialization and cleanup for the Sevices Layer
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+
+/* Driver State Pointer */
+s1394_state_t *s1394_statep;
+
+/* Module Driver Info */
+static struct modlmisc s1394_modlmisc = {
+ &mod_miscops,
+ "IEEE 1394 Services Library 1.0"
+};
+
+/* Module Linkage */
+static struct modlinkage s1394_modlinkage = {
+ MODREV_1,
+ &s1394_modlmisc,
+ NULL
+};
+
+static int s1394_init();
+static void s1394_fini();
+
+#ifndef NPROBE
+extern int tnf_mod_load(void);
+extern int tnf_mod_unload(struct modlinkage *mlp);
+#endif
+
+int
+_init()
+{
+ int status;
+
+#ifndef NPROBE
+ (void) tnf_mod_load();
+#endif
+ status = s1394_init();
+ if (status != 0) {
+ TNF_PROBE_1(_init_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "s1394: failed in s1394_init");
+#ifndef NPROBE
+ (void) tnf_mod_unload(&s1394_modlinkage);
+#endif
+ return (status);
+ }
+
+ status = mod_install(&s1394_modlinkage);
+ if (status != 0) {
+ TNF_PROBE_1(_init_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "s1394: failed in mod_install");
+#ifndef NPROBE
+ (void) tnf_mod_unload(&s1394_modlinkage);
+#endif
+ }
+ return (status);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&s1394_modlinkage, modinfop));
+}
+
+int
+_fini()
+{
+ int status;
+
+ status = mod_remove(&s1394_modlinkage);
+ if (status != 0) {
+ TNF_PROBE_1(_fini_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "s1394: failed in mod_remove");
+ return (status);
+ }
+
+ s1394_fini();
+#ifndef NPROBE
+ (void) tnf_mod_unload(&s1394_modlinkage);
+#endif
+ return (status);
+}
+
+/*
+ * s1394_init()
+ * initializes the 1394 Software Framework's structures, i.e. the HAL list
+ * and associated mutex.
+ */
+static int
+s1394_init()
+{
+ TNF_PROBE_0_DEBUG(s1394_init_enter, S1394_TNF_SL_STACK, "");
+
+ s1394_statep = kmem_zalloc(sizeof (s1394_state_t), KM_SLEEP);
+
+ s1394_statep->hal_head = NULL;
+ s1394_statep->hal_tail = NULL;
+ mutex_init(&s1394_statep->hal_list_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ TNF_PROBE_0_DEBUG(s1394_init_exit, S1394_TNF_SL_STACK, "");
+ return (0);
+}
+
+/*
+ * s1394_fini()
+ * cleans up the 1394 Software Framework's structures that were allocated
+ * in s1394_init().
+ */
+static void
+s1394_fini()
+{
+ TNF_PROBE_0_DEBUG(s1394_fini_enter, S1394_TNF_SL_STACK, "");
+
+ mutex_destroy(&s1394_statep->hal_list_mutex);
+
+ kmem_free(s1394_statep, sizeof (s1394_state_t));
+
+ TNF_PROBE_0_DEBUG(s1394_fini_exit, S1394_TNF_SL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_addr.c b/usr/src/uts/common/io/1394/s1394_addr.c
new file mode 100644
index 0000000000..4ff928bb14
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_addr.c
@@ -0,0 +1,1704 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_addr.c
+ * 1394 Address Space Routines
+ * Implements all the routines necessary for alloc/free and lookup
+ * of the 1394 address space
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+static s1394_addr_space_blk_t *s1394_free_list_search(s1394_hal_t *hal,
+ uint64_t addr);
+
+static s1394_addr_space_blk_t *s1394_free_list_find(s1394_hal_t *hal,
+ uint32_t type, uint32_t length);
+
+static s1394_addr_space_blk_t *s1394_free_list_delete(s1394_hal_t *hal,
+ s1394_addr_space_blk_t *del_blk);
+
+static void s1394_used_tree_insert(s1394_hal_t *hal, s1394_addr_space_blk_t *x);
+
+static void s1394_tree_insert(s1394_addr_space_blk_t **root,
+ s1394_addr_space_blk_t *z);
+
+static s1394_addr_space_blk_t *s1394_tree_search(s1394_addr_space_blk_t *x,
+ uint64_t address);
+
+static void s1394_used_tree_delete_fixup(s1394_addr_space_blk_t **root,
+ s1394_addr_space_blk_t *p, s1394_addr_space_blk_t *x,
+ s1394_addr_space_blk_t *w, int side_of_x);
+
+static void s1394_left_rotate(s1394_addr_space_blk_t **root,
+ s1394_addr_space_blk_t *x);
+
+static void s1394_right_rotate(s1394_addr_space_blk_t **root,
+ s1394_addr_space_blk_t *x);
+
+static s1394_addr_space_blk_t *s1394_tree_minimum(s1394_addr_space_blk_t *x);
+
+static s1394_addr_space_blk_t *s1394_tree_successor(s1394_addr_space_blk_t *x);
+
+/*
+ * s1394_request_addr_blk()
+ * is called when a target driver is requesting a block of 1394 Address
+ * Space of a particular type without regard for its exact location. It
+ * searches the free list for a block that's big enough and of the specified
+ * type, and it inserts it into the used tree.
+ */
+int
+s1394_request_addr_blk(s1394_hal_t *hal, t1394_alloc_addr_t *addr_allocp)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ s1394_addr_space_blk_t *new_blk;
+ uint64_t amount_free;
+
+ TNF_PROBE_0_DEBUG(s1394_request_addr_blk_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(hal != NULL);
+
+ /* Lock the address space "free" list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ curr_blk = s1394_free_list_find(hal, addr_allocp->aa_type,
+ addr_allocp->aa_length);
+ if (curr_blk == NULL) {
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_1(s1394_request_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "1394 address space - no more memory");
+ TNF_PROBE_0_DEBUG(s1394_request_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ amount_free = (curr_blk->addr_hi - curr_blk->addr_lo) + 1;
+ /* Does it fit exact? */
+ if (amount_free == addr_allocp->aa_length) {
+ /* Take it out of the "free" list */
+ curr_blk = s1394_free_list_delete(hal, curr_blk);
+
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ curr_blk->addr_enable = addr_allocp->aa_enable;
+ curr_blk->kmem_bufp = addr_allocp->aa_kmem_bufp;
+ curr_blk->addr_arg = addr_allocp->aa_arg;
+ curr_blk->addr_events = addr_allocp->aa_evts;
+
+ addr_allocp->aa_address = curr_blk->addr_lo;
+ addr_allocp->aa_hdl = (t1394_addr_handle_t)curr_blk;
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, curr_blk);
+
+ s1394_addr_alloc_kstat(hal, addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_request_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* Needs to be broken up */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t), KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(s1394_request_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_request_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->addr_lo = curr_blk->addr_lo;
+ new_blk->addr_hi = curr_blk->addr_lo +
+ (addr_allocp->aa_length - 1);
+ new_blk->addr_type = curr_blk->addr_type;
+ new_blk->addr_enable = addr_allocp->aa_enable;
+ new_blk->kmem_bufp = addr_allocp->aa_kmem_bufp;
+ new_blk->addr_arg = addr_allocp->aa_arg;
+ new_blk->addr_events = addr_allocp->aa_evts;
+
+ curr_blk->addr_lo = new_blk->addr_hi + 1;
+
+ addr_allocp->aa_address = new_blk->addr_lo;
+ addr_allocp->aa_hdl = (t1394_addr_handle_t)new_blk;
+
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, new_blk);
+
+ s1394_addr_alloc_kstat(hal, addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_request_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+}
+
+/*
+ * s1394_claim_addr_blk()
+ * is called when a target driver is requesting a block of 1394 Address
+ * Space with a specific address. If the block containing that address
+ * is not in the free list, or if the block is too small, then
+ * s1394_claim_addr_blk() returns failure. If the block is found,
+ * however, it is inserted into the used tree.
+ */
+int
+s1394_claim_addr_blk(s1394_hal_t *hal, t1394_alloc_addr_t *addr_allocp)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ s1394_addr_space_blk_t *new_blk;
+ s1394_addr_space_blk_t *middle_blk;
+ uint64_t upper_bound;
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(hal != NULL);
+
+ /* Lock the address space "free" list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ /* Find the block in the "free" list */
+ curr_blk = s1394_free_list_search(hal, addr_allocp->aa_address);
+
+ /* If it wasn't found, it isn't free... */
+ if (curr_blk == NULL) {
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_1(s1394_claim_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "1394 address space - address unavailable");
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Does the request fit in the block? */
+ upper_bound = (addr_allocp->aa_address + addr_allocp->aa_length) - 1;
+ if ((upper_bound >= curr_blk->addr_lo) &&
+ (upper_bound <= curr_blk->addr_hi)) {
+
+ /* How does the requested range fit in the current range? */
+ if (addr_allocp->aa_address == curr_blk->addr_lo) {
+ if (upper_bound == curr_blk->addr_hi) {
+ /* Exact fit */
+
+ /* Take it out of the "free" list */
+ curr_blk = s1394_free_list_delete(hal,
+ curr_blk);
+
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ curr_blk->addr_enable = addr_allocp->aa_enable;
+ curr_blk->kmem_bufp = addr_allocp->aa_kmem_bufp;
+ curr_blk->addr_arg = addr_allocp->aa_arg;
+ curr_blk->addr_events = addr_allocp->aa_evts;
+
+ addr_allocp->aa_hdl =
+ (t1394_addr_handle_t)curr_blk;
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, curr_blk);
+
+ s1394_addr_alloc_kstat(hal,
+ addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* If space is reserved, must claim it all */
+ if (curr_blk->addr_reserved == ADDR_RESERVED) {
+ goto claim_error;
+ }
+
+ /* Front part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(s1394_claim_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->addr_lo = curr_blk->addr_lo;
+ new_blk->addr_hi = upper_bound;
+ new_blk->addr_type = curr_blk->addr_type;
+ new_blk->addr_enable = addr_allocp->aa_enable;
+ new_blk->kmem_bufp = addr_allocp->aa_kmem_bufp;
+ new_blk->addr_arg = addr_allocp->aa_arg;
+ new_blk->addr_events = addr_allocp->aa_evts;
+
+ curr_blk->addr_lo = new_blk->addr_hi + 1;
+
+ addr_allocp->aa_hdl =
+ (t1394_addr_handle_t)new_blk;
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, new_blk);
+
+ s1394_addr_alloc_kstat(hal,
+ addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ } else {
+ if (upper_bound == curr_blk->addr_hi) {
+ /* If space is reserved, must claim it all */
+ if (curr_blk->addr_reserved == ADDR_RESERVED) {
+ goto claim_error;
+ }
+
+ /* End part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(s1394_claim_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG
+ (s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->addr_lo = addr_allocp->aa_address;
+ new_blk->addr_hi = upper_bound;
+ new_blk->addr_type = curr_blk->addr_type;
+ new_blk->addr_enable = addr_allocp->aa_enable;
+ new_blk->kmem_bufp = addr_allocp->aa_kmem_bufp;
+ new_blk->addr_arg = addr_allocp->aa_arg;
+ new_blk->addr_events = addr_allocp->aa_evts;
+
+ curr_blk->addr_hi = addr_allocp->aa_address - 1;
+
+ addr_allocp->aa_hdl =
+ (t1394_addr_handle_t)new_blk;
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, new_blk);
+
+ s1394_addr_alloc_kstat(hal,
+ addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* If space is reserved, must claim it all */
+ if (curr_blk->addr_reserved == ADDR_RESERVED) {
+ goto claim_error;
+ }
+
+ /* Middle part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(s1394_claim_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ middle_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (middle_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ kmem_free(new_blk,
+ sizeof (s1394_addr_space_blk_t));
+ TNF_PROBE_0(s1394_claim_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG
+ (s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ middle_blk->addr_lo = addr_allocp->aa_address;
+ middle_blk->addr_hi = upper_bound;
+ new_blk->addr_lo = upper_bound + 1;
+ new_blk->addr_hi = curr_blk->addr_hi;
+
+ new_blk->addr_type = curr_blk->addr_type;
+
+ middle_blk->addr_type = curr_blk->addr_type;
+ middle_blk->addr_enable =
+ addr_allocp->aa_enable;
+ middle_blk->kmem_bufp =
+ addr_allocp->aa_kmem_bufp;
+ middle_blk->addr_arg = addr_allocp->aa_arg;
+ middle_blk->addr_events = addr_allocp->aa_evts;
+
+ curr_blk->addr_hi = addr_allocp->aa_address - 1;
+
+ addr_allocp->aa_hdl =
+ (t1394_addr_handle_t)middle_blk;
+
+ /* Put part back into the "free" tree */
+ s1394_free_list_insert(hal, new_blk);
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ /* Put it into the "used" tree */
+ s1394_used_tree_insert(hal, middle_blk);
+
+ s1394_addr_alloc_kstat(hal,
+ addr_allocp->aa_address);
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ }
+ }
+
+claim_error:
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_claim_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+}
+
+/*
+ * s1394_free_addr_blk()
+ * An opposite of s1394_claim_addr_blk(): takes the address block
+ * out of the "used" tree and puts it into the "free" tree.
+ */
+int
+s1394_free_addr_blk(s1394_hal_t *hal, s1394_addr_space_blk_t *blk)
+{
+ TNF_PROBE_0_DEBUG(s1394_free_addr_blk_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ /* Lock the address space "free" list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ /* Take it out of the "used" tree */
+ blk = s1394_used_tree_delete(hal, blk);
+
+ if (blk == NULL) {
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_1(s1394_free_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Can't free block not found in used list");
+ TNF_PROBE_0_DEBUG(s1394_free_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Put it into the "free" tree */
+ s1394_free_list_insert(hal, blk);
+
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_free_addr_blk_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_reserve_addr_blk()
+ * is similar to s1394_claim_addr_blk(), with the difference being that
+ * after the address block is found, it is marked as "reserved" rather
+ * than inserted into the used tree. Blocks of data that are marked
+ * "reserved" cannot be unintentionally allocated by a target, they must
+ * be specifically requested by specifying the exact address and size of
+ * the "reserved" block.
+ */
+int
+s1394_reserve_addr_blk(s1394_hal_t *hal, t1394_alloc_addr_t *addr_allocp)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ s1394_addr_space_blk_t *new_blk;
+ s1394_addr_space_blk_t *middle_blk;
+ uint64_t upper_bound;
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(hal != NULL);
+
+ /* Lock the address space "free" list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ /* Find the block in the "free" list */
+ curr_blk = s1394_free_list_search(hal, addr_allocp->aa_address);
+ /* If it wasn't found, it isn't free... */
+ if (curr_blk == NULL) {
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_1(s1394_reserve_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "1394 address space - address unavailable");
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is this block already reserved? */
+ if (curr_blk->addr_reserved == ADDR_RESERVED) {
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Does the request fit in the block? */
+ upper_bound = (addr_allocp->aa_address + addr_allocp->aa_length) - 1;
+ if ((upper_bound >= curr_blk->addr_lo) &&
+ (upper_bound <= curr_blk->addr_hi)) {
+
+ /* How does the requested range fit in the current range? */
+ if (addr_allocp->aa_address == curr_blk->addr_lo) {
+ if (upper_bound == curr_blk->addr_hi) {
+ /* Exact fit */
+ curr_blk->addr_reserved = ADDR_RESERVED;
+
+ /* Unlock the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* Front part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(
+ s1394_reserve_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->addr_lo = curr_blk->addr_lo;
+ new_blk->addr_hi = upper_bound;
+ new_blk->addr_type = curr_blk->addr_type;
+ new_blk->addr_reserved = ADDR_RESERVED;
+
+ curr_blk->addr_lo = new_blk->addr_hi + 1;
+
+ /* Put it back into the "free" list */
+ s1394_free_list_insert(hal, new_blk);
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ "stacktrace 1394 s1394 arreq", "");
+ return (DDI_SUCCESS);
+ }
+
+ } else {
+ if (upper_bound == curr_blk->addr_hi) {
+ /* End part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(
+ s1394_reserve_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->addr_lo = addr_allocp->aa_address;
+ new_blk->addr_hi = upper_bound;
+ new_blk->addr_type = curr_blk->addr_type;
+ new_blk->addr_reserved = ADDR_RESERVED;
+
+ curr_blk->addr_hi = addr_allocp->aa_address - 1;
+
+ /* Put it back into the "free" list */
+ s1394_free_list_insert(hal, new_blk);
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* Middle part of range */
+ new_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (new_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ TNF_PROBE_0(
+ s1394_reserve_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ middle_blk = (s1394_addr_space_blk_t *)
+ kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_NOSLEEP);
+ if (middle_blk == NULL) {
+ /* Unlock the addr space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ kmem_free(new_blk,
+ sizeof (s1394_addr_space_blk_t));
+ TNF_PROBE_0(
+ s1394_reserve_addr_blk_error,
+ S1394_TNF_SL_ARREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(
+ s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ middle_blk->addr_lo = addr_allocp->aa_address;
+ middle_blk->addr_hi = upper_bound;
+ new_blk->addr_lo = upper_bound + 1;
+ new_blk->addr_hi = curr_blk->addr_hi;
+
+ new_blk->addr_type = curr_blk->addr_type;
+
+ middle_blk->addr_type = curr_blk->addr_type;
+ middle_blk->addr_reserved = ADDR_RESERVED;
+
+ curr_blk->addr_hi = addr_allocp->aa_address - 1;
+
+ /* Put pieces back into the "free" list */
+ s1394_free_list_insert(hal, middle_blk);
+ s1394_free_list_insert(hal, new_blk);
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ }
+ }
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_reserve_addr_blk_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+}
+
+/*
+ * s1394_init_addr_space()
+ * is called in the HAL attach routine - h1394_attach() - to setup the
+ * initial address space with the appropriate ranges, etc. At attach,
+ * the HAL specifies not only the type and bounds for each kind of 1394
+ * address space, but also a list of the blocks that are to be marked
+ * ¨reserved". Prior to marking the "reserved" ranges the local hosts
+ * CSR registers are allocated/setup in s1394_setup_CSR_space().
+ */
+int
+s1394_init_addr_space(s1394_hal_t *hal)
+{
+ s1394_addr_space_blk_t *addr_blk;
+ t1394_alloc_addr_t addr_alloc;
+ h1394_addr_map_t *addr_map;
+ h1394_addr_map_t *resv_map;
+ uint_t num_blks;
+ uint64_t lo;
+ uint64_t hi;
+ int i;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* Setup Address Space */
+ mutex_init(&hal->addr_space_free_mutex,
+ NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&hal->addr_space_used_mutex,
+ NULL, MUTEX_DRIVER, hal->halinfo.hw_interrupt);
+
+ /* Set address space to NULL (empty) */
+ hal->addr_space_free_list = NULL;
+ hal->addr_space_used_tree = NULL;
+
+ /* Initialize the 1394 Address Space from HAL's description */
+ num_blks = hal->halinfo.addr_map_num_entries;
+ addr_map = hal->halinfo.addr_map;
+
+ /* Lock the address space free list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ /* Default to NO posted write space */
+ hal->posted_write_addr_lo = ADDR_LO_INVALID;
+ hal->posted_write_addr_hi = ADDR_HI_INVALID;
+
+ /* Default to NO physical space */
+ hal->physical_addr_lo = ADDR_LO_INVALID;
+ hal->physical_addr_hi = ADDR_HI_INVALID;
+
+ /* Default to NO CSR space */
+ hal->csr_addr_lo = ADDR_LO_INVALID;
+ hal->csr_addr_hi = ADDR_HI_INVALID;
+
+ /* Default to NO normal space */
+ hal->normal_addr_lo = ADDR_LO_INVALID;
+ hal->normal_addr_hi = ADDR_HI_INVALID;
+
+ for (i = 0; i < num_blks; i++) {
+ if (addr_map[i].length == 0)
+ continue;
+ addr_blk = kmem_zalloc(sizeof (s1394_addr_space_blk_t),
+ KM_SLEEP);
+ addr_blk->addr_lo = addr_map[i].address;
+ addr_blk->addr_hi =
+ (addr_blk->addr_lo + addr_map[i].length) - 1;
+
+ switch (addr_map[i].addr_type) {
+ case H1394_ADDR_POSTED_WRITE:
+ addr_blk->addr_type = T1394_ADDR_POSTED_WRITE;
+ hal->posted_write_addr_lo = addr_blk->addr_lo;
+ hal->posted_write_addr_hi = addr_blk->addr_hi;
+ break;
+
+ case H1394_ADDR_NORMAL:
+ addr_blk->addr_type = T1394_ADDR_NORMAL;
+ hal->normal_addr_lo = addr_blk->addr_lo;
+ hal->normal_addr_hi = addr_blk->addr_hi;
+ break;
+
+ case H1394_ADDR_CSR:
+ addr_blk->addr_type = T1394_ADDR_CSR;
+ hal->csr_addr_lo = addr_blk->addr_lo;
+ hal->csr_addr_hi = addr_blk->addr_hi;
+ break;
+
+ case H1394_ADDR_PHYSICAL:
+ addr_blk->addr_type = T1394_ADDR_FIXED;
+ hal->physical_addr_lo = addr_blk->addr_lo;
+ hal->physical_addr_hi = addr_blk->addr_hi;
+ break;
+
+ default:
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ s1394_destroy_addr_space(hal);
+ TNF_PROBE_1(s1394_init_addr_space_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid addr_type specified");
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ s1394_free_list_insert(hal, addr_blk);
+ }
+
+ /* Unlock the address space free list */
+ mutex_exit(&hal->addr_space_free_mutex);
+
+ /* Setup the necessary CSR space */
+ if (s1394_setup_CSR_space(hal) != DDI_SUCCESS) {
+ s1394_destroy_addr_space(hal);
+ TNF_PROBE_1(s1394_init_addr_space_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Failed in s1394_setup_CSR_space()");
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+
+ /* Handle all the HAL's reserved spaces */
+ num_blks = hal->halinfo.resv_map_num_entries;
+ resv_map = hal->halinfo.resv_map;
+
+ for (i = 0; i < num_blks; i++) {
+ /* Can't reserve physical addresses */
+ lo = resv_map[i].address;
+ hi = (lo + resv_map[i].length) - 1;
+ if ((lo >= hal->physical_addr_lo) &&
+ (hi <= hal->physical_addr_hi)) {
+ s1394_destroy_addr_space(hal);
+ TNF_PROBE_1(s1394_init_addr_space_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Attempted to reserve physical memory");
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ addr_alloc.aa_address = resv_map[i].address;
+ addr_alloc.aa_length = resv_map[i].length;
+ ret = s1394_reserve_addr_blk(hal, &addr_alloc);
+ if (ret != DDI_SUCCESS) {
+ s1394_destroy_addr_space(hal);
+ TNF_PROBE_1(s1394_init_addr_space_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Unable to reserve 1394 address");
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_init_addr_space_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_destroy_addr_space()
+ * is necessary for h1394_detach(). It undoes all the work that
+ * s1394_init_addr_space() had setup and more. By pulling everything out
+ * of the used tree and free list and then freeing the structures,
+ * mutexes, and (if necessary) any backing store memory, the 1394 address
+ * space is completely dismantled.
+ */
+void
+s1394_destroy_addr_space(s1394_hal_t *hal)
+{
+ s1394_addr_space_blk_t *addr_blk;
+ s1394_addr_space_blk_t *next_blk;
+ uint64_t lo;
+ uint64_t hi;
+ uint_t length;
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_addr_space_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* Lock the address space "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ addr_blk = hal->addr_space_used_tree;
+
+ while (addr_blk != NULL) {
+ if (addr_blk->asb_left != NULL) {
+ addr_blk = addr_blk->asb_left;
+ } else if (addr_blk->asb_right != NULL) {
+ addr_blk = addr_blk->asb_right;
+ } else {
+ /* Free any of our own backing store (if necessary) */
+ if ((addr_blk->free_kmem_bufp == B_TRUE) &&
+ (addr_blk->kmem_bufp != NULL)) {
+ lo = addr_blk->addr_lo;
+ hi = addr_blk->addr_hi;
+ length = (uint_t)((hi - lo) + 1);
+ kmem_free((void *)addr_blk->kmem_bufp, length);
+ }
+
+ next_blk = addr_blk->asb_parent;
+
+ /* Free the s1394_addr_space_blk_t structure */
+ kmem_free((void *)addr_blk,
+ sizeof (s1394_addr_space_blk_t));
+
+ if (next_blk != NULL) {
+ if (next_blk->asb_left != NULL)
+ next_blk->asb_left = NULL;
+ else
+ next_blk->asb_right = NULL;
+ }
+
+ addr_blk = next_blk;
+ }
+ }
+
+ /* Unlock and destroy the address space "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+ mutex_destroy(&hal->addr_space_used_mutex);
+
+ /* Lock the address space "free" list */
+ mutex_enter(&hal->addr_space_free_mutex);
+
+ addr_blk = hal->addr_space_free_list;
+
+ while (addr_blk != NULL) {
+ next_blk = addr_blk->asb_right;
+
+ /* Free the s1394_addr_space_blk_t structure */
+ kmem_free((void *)addr_blk, sizeof (s1394_addr_space_blk_t));
+ addr_blk = next_blk;
+ }
+
+ /* Unlock & destroy the address space "free" list */
+ mutex_exit(&hal->addr_space_free_mutex);
+ mutex_destroy(&hal->addr_space_free_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_addr_space_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * s1394_free_list_insert()
+ * takes an s1394_addr_space_blk_t and inserts it into the free list in the
+ * appropriate place. It will concatenate into a single structure on the
+ * list any two neighboring blocks that can be joined (same type,
+ * consecutive addresses, neither is "reserved", etc.)
+ */
+void
+s1394_free_list_insert(s1394_hal_t *hal, s1394_addr_space_blk_t *new_blk)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ s1394_addr_space_blk_t *left_blk;
+ s1394_addr_space_blk_t *right_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_insert_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->addr_space_free_mutex));
+
+ /* Start at the head of the "free" list */
+ curr_blk = hal->addr_space_free_list;
+
+ if (curr_blk != NULL)
+ left_blk = curr_blk->asb_left;
+ else
+ left_blk = NULL;
+
+ while (curr_blk != NULL) {
+ if (new_blk->addr_lo < curr_blk->addr_lo)
+ break;
+ /* Go to the next element in the list */
+ left_blk = curr_blk;
+ curr_blk = curr_blk->asb_right;
+ }
+
+ new_blk->asb_left = left_blk;
+ new_blk->asb_right = curr_blk;
+
+ if (left_blk != NULL)
+ left_blk->asb_right = new_blk;
+ else
+ hal->addr_space_free_list = new_blk;
+
+ if (curr_blk != NULL)
+ curr_blk->asb_left = new_blk;
+
+ right_blk = new_blk->asb_right;
+ left_blk = new_blk->asb_left;
+
+ /* Can we merge with block to the left? */
+ if ((left_blk != NULL) &&
+ (new_blk->addr_type == left_blk->addr_type) &&
+ (new_blk->addr_reserved != ADDR_RESERVED) &&
+ (left_blk->addr_reserved != ADDR_RESERVED) &&
+ (new_blk->addr_lo == left_blk->addr_hi + 1)) {
+
+ new_blk->addr_lo = left_blk->addr_lo;
+ new_blk->asb_left = left_blk->asb_left;
+
+ if (left_blk->asb_left != NULL)
+ left_blk->asb_left->asb_right = new_blk;
+ if (hal->addr_space_free_list == left_blk)
+ hal->addr_space_free_list = new_blk;
+ kmem_free((void *)left_blk, sizeof (s1394_addr_space_blk_t));
+ }
+
+ /* Can we merge with block to the right? */
+ if ((right_blk != NULL) &&
+ (new_blk->addr_type == right_blk->addr_type) &&
+ (new_blk->addr_reserved != ADDR_RESERVED) &&
+ (right_blk->addr_reserved != ADDR_RESERVED) &&
+ (new_blk->addr_hi + 1 == right_blk->addr_lo)) {
+
+ new_blk->addr_hi = right_blk->addr_hi;
+ new_blk->asb_right = right_blk->asb_right;
+
+ if (right_blk->asb_right != NULL)
+ right_blk->asb_right->asb_left = new_blk;
+ kmem_free((void *)right_blk, sizeof (s1394_addr_space_blk_t));
+ }
+
+ new_blk->addr_enable = 0;
+ new_blk->kmem_bufp = NULL;
+ new_blk->addr_arg = NULL;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_insert_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * s1394_free_list_search()
+ * attempts to find a block in the free list that contains the address
+ * specified. If none is found, it returns NULL.
+ */
+static s1394_addr_space_blk_t *
+s1394_free_list_search(s1394_hal_t *hal, uint64_t addr)
+{
+ s1394_addr_space_blk_t *curr_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_search_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->addr_space_free_mutex));
+
+ /* Start at the head of the list */
+ curr_blk = hal->addr_space_free_list;
+ while (curr_blk != NULL) {
+ if ((addr >= curr_blk->addr_lo) && (addr <= curr_blk->addr_hi))
+ break;
+ else
+ curr_blk = curr_blk->asb_right;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_search_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (curr_blk);
+}
+
+/*
+ * s1394_free_list_find()
+ * attempts to find a block in the free list that is of the specified
+ * type and size. It will ignore any blocks marked "reserved".
+ */
+static s1394_addr_space_blk_t *
+s1394_free_list_find(s1394_hal_t *hal, uint32_t type, uint32_t length)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ uint64_t size;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_find_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ ASSERT(MUTEX_HELD(&hal->addr_space_free_mutex));
+
+ /* Start at the head of the list */
+ curr_blk = hal->addr_space_free_list;
+
+ while (curr_blk != NULL) {
+ /* Find block of right "type" - that isn't "reserved" */
+ if ((curr_blk->addr_type == type) &&
+ (curr_blk->addr_reserved != ADDR_RESERVED)) {
+
+ /* CSR allocs above IEEE1394_UCSR_RESERVED_BOUNDARY */
+ if ((type == T1394_ADDR_CSR) &&
+ (curr_blk->addr_lo <
+ IEEE1394_UCSR_RESERVED_BOUNDARY)) {
+ curr_blk = curr_blk->asb_right;
+ continue;
+ }
+
+ size = (curr_blk->addr_hi - curr_blk->addr_lo) + 1;
+ if (size >= (uint64_t)length)
+ break;
+ }
+ curr_blk = curr_blk->asb_right;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_find_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (curr_blk);
+}
+
+/*
+ * s1394_free_list_delete()
+ * will remove the block pointed to by del_blk from the free list.
+ * Typically, this is done so that it may be inserted into the used tree.
+ */
+static s1394_addr_space_blk_t *
+s1394_free_list_delete(s1394_hal_t *hal, s1394_addr_space_blk_t *del_blk)
+{
+ s1394_addr_space_blk_t *left_blk;
+ s1394_addr_space_blk_t *right_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_delete_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->addr_space_free_mutex));
+
+ left_blk = del_blk->asb_left;
+ right_blk = del_blk->asb_right;
+
+ del_blk->asb_left = NULL;
+ del_blk->asb_right = NULL;
+
+ if (left_blk != NULL)
+ left_blk->asb_right = right_blk;
+ else
+ hal->addr_space_free_list = right_blk;
+
+ if (right_blk != NULL)
+ right_blk->asb_left = left_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_free_list_delete_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (del_blk);
+}
+
+/*
+ * s1394_used_tree_insert()
+ * is used to insert a 1394 address block that has been removed from the
+ * free list into the used tree. In the used tree it will be possible
+ * to search for a given address when an AR request arrives. Since the
+ * used tree is implemented as a red-black tree, the insertion is done
+ * with s1394_tree_insert() which does a simple binary tree insertion.
+ * It is then followed by cleanup of links and red-black coloring. This
+ * particulat implementation of the red-black tree is modified from code
+ * included in "Introduction to Algorithms" - Cormen, Leiserson, and Rivest,
+ * pp. 263 - 277.
+ */
+static void
+s1394_used_tree_insert(s1394_hal_t *hal, s1394_addr_space_blk_t *x)
+{
+ s1394_addr_space_blk_t *y;
+ s1394_addr_space_blk_t **root;
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_insert_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* Lock the "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ /* Get the head of the "used" tree */
+ root = &hal->addr_space_used_tree;
+
+ s1394_tree_insert(root, x);
+
+ x->asb_color = RED;
+ while ((x != *root) && (x->asb_parent->asb_color == RED)) {
+ /* Is x's parent the "left-child" or the "right-child"? */
+ if (x->asb_parent == x->asb_parent->asb_parent->asb_left) {
+ /* Left-child, set y to the sibling */
+ y = x->asb_parent->asb_parent->asb_right;
+ if ((y != NULL) && (y->asb_color == RED)) {
+ x->asb_parent->asb_color = BLACK;
+ y->asb_color = BLACK;
+ x->asb_parent->asb_parent->asb_color = RED;
+ x = x->asb_parent->asb_parent;
+
+ } else {
+ if (x == x->asb_parent->asb_right) {
+ x = x->asb_parent;
+ s1394_left_rotate(root, x);
+ }
+ x->asb_parent->asb_color = BLACK;
+ x->asb_parent->asb_parent->asb_color = RED;
+ s1394_right_rotate(root,
+ x->asb_parent->asb_parent);
+ }
+
+ } else {
+ /* Right-child, set y to the sibling */
+ y = x->asb_parent->asb_parent->asb_left;
+ if ((y != NULL) && (y->asb_color == RED)) {
+ x->asb_parent->asb_color = BLACK;
+ y->asb_color = BLACK;
+ x->asb_parent->asb_parent->asb_color = RED;
+ x = x->asb_parent->asb_parent;
+
+ } else {
+ if (x == x->asb_parent->asb_left) {
+ x = x->asb_parent;
+ s1394_right_rotate(root, x);
+ }
+ x->asb_parent->asb_color = BLACK;
+ x->asb_parent->asb_parent->asb_color = RED;
+ s1394_left_rotate(root,
+ x->asb_parent->asb_parent);
+ }
+ }
+ }
+
+ (*root)->asb_color = BLACK;
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_insert_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * s1394_tree_insert()
+ * is a "helper" function for s1394_used_tree_insert(). It inserts an
+ * address block into a binary tree (red-black tree), and
+ * s1394_used_tree_insert() then cleans up the links and colorings, etc.
+ */
+static void
+s1394_tree_insert(s1394_addr_space_blk_t **root, s1394_addr_space_blk_t *z)
+{
+ s1394_addr_space_blk_t *y = NULL;
+ s1394_addr_space_blk_t *x = *root;
+
+ TNF_PROBE_0_DEBUG(s1394_tree_insert_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ while (x != NULL) {
+ y = x;
+ if (z->addr_lo < x->addr_lo)
+ x = x->asb_left;
+ else
+ x = x->asb_right;
+ }
+
+ z->asb_parent = y;
+ z->asb_right = NULL;
+ z->asb_left = NULL;
+
+ if (y == NULL)
+ *root = z;
+ else if (z->addr_lo < y->addr_lo)
+ y->asb_left = z;
+ else
+ y->asb_right = z;
+
+ TNF_PROBE_0_DEBUG(s1394_tree_insert_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+}
+
+/*
+ * s1394_used_tree_search()
+ * is called when an AR request arrives. By calling s1394_tree_search()
+ * with the destination address, it can quickly find a block for that
+ * address (if one exists in the used tree) and return a pointer to it.
+ */
+s1394_addr_space_blk_t *
+s1394_used_tree_search(s1394_hal_t *hal, uint64_t addr)
+{
+ s1394_addr_space_blk_t *curr_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_search_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->addr_space_used_mutex));
+
+ /* Search the HAL's "used" tree for this address */
+ curr_blk = s1394_tree_search(hal->addr_space_used_tree, addr);
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_search_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (curr_blk);
+}
+
+/*
+ * s1394_tree_search()
+ * is a "helper" function for s1394_used_tree_search(). It implements a
+ * typical binary tree search with the address as the search key.
+ */
+static s1394_addr_space_blk_t *
+s1394_tree_search(s1394_addr_space_blk_t *x, uint64_t address)
+{
+ TNF_PROBE_0_DEBUG(s1394_tree_search_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ while (x != NULL) {
+ if (x->addr_lo > address)
+ x = x->asb_left;
+ else if (x->addr_hi < address)
+ x = x->asb_right;
+ else
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_tree_search_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (x);
+}
+
+/*
+ * s1394_used_tree_delete()
+ * is used to remove an address block from the used tree. This is
+ * necessary when address spaces are freed. The removal is accomplished
+ * in two steps, the removal done by this function and the cleanup done
+ * by s1394_used_tree_delete_fixup().
+ */
+s1394_addr_space_blk_t *
+s1394_used_tree_delete(s1394_hal_t *hal, s1394_addr_space_blk_t *z)
+{
+ s1394_addr_space_blk_t *y;
+ s1394_addr_space_blk_t *x;
+ s1394_addr_space_blk_t *w;
+ s1394_addr_space_blk_t *p;
+ s1394_addr_space_blk_t **root;
+ int old_color;
+ int side_of_x;
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_delete_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* Lock the "used" tree */
+ mutex_enter(&hal->addr_space_used_mutex);
+
+ /* Get the head of the "used" tree */
+ root = &hal->addr_space_used_tree;
+
+ if ((z->asb_left == NULL) || (z->asb_right == NULL))
+ y = z;
+ else
+ y = s1394_tree_successor(z);
+
+ if (y->asb_parent == z)
+ p = y;
+ else
+ p = y->asb_parent;
+
+ if (y->asb_left != NULL) {
+ x = y->asb_left;
+ if ((y != *root) && (y == y->asb_parent->asb_left)) {
+ w = y->asb_parent->asb_right;
+ side_of_x = LEFT;
+ }
+
+ if ((y != *root) && (y == y->asb_parent->asb_right)) {
+ w = y->asb_parent->asb_left;
+ side_of_x = RIGHT;
+ }
+
+ } else {
+ x = y->asb_right;
+ if ((y != *root) && (y == y->asb_parent->asb_left)) {
+ w = y->asb_parent->asb_right;
+ side_of_x = LEFT;
+ }
+
+ if ((y != *root) && (y == y->asb_parent->asb_right)) {
+ w = y->asb_parent->asb_left;
+ side_of_x = RIGHT;
+ }
+
+ }
+
+ if (x != NULL)
+ x->asb_parent = y->asb_parent;
+
+ if (y->asb_parent == NULL)
+ *root = x;
+ else if (y == y->asb_parent->asb_left)
+ y->asb_parent->asb_left = x;
+ else
+ y->asb_parent->asb_right = x;
+
+ old_color = y->asb_color;
+
+ /* Substitute the y-node for the z-node (deleted) */
+ if (y != z) {
+ y->asb_color = z->asb_color;
+ y->asb_parent = z->asb_parent;
+ if (z->asb_parent != NULL) {
+ if (z->asb_parent->asb_left == z)
+ z->asb_parent->asb_left = y;
+ if (z->asb_parent->asb_right == z)
+ z->asb_parent->asb_right = y;
+ }
+
+ y->asb_left = z->asb_left;
+ if (z->asb_left != NULL)
+ z->asb_left->asb_parent = y;
+ y->asb_right = z->asb_right;
+ if (z->asb_right != NULL)
+ z->asb_right->asb_parent = y;
+
+ if (z == *root)
+ *root = y;
+ }
+
+ z->asb_parent = NULL;
+ z->asb_right = NULL;
+ z->asb_left = NULL;
+
+ if (old_color == BLACK)
+ s1394_used_tree_delete_fixup(root, p, x, w, side_of_x);
+
+ /* Unlock the "used" tree */
+ mutex_exit(&hal->addr_space_used_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_delete_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (z);
+}
+
+/*
+ * s1394_used_tree_delete_fixup()
+ * is the "helper" function for s1394_used_tree_delete(). It is used to
+ * cleanup/enforce the red-black coloring in the tree.
+ */
+static void
+s1394_used_tree_delete_fixup(s1394_addr_space_blk_t **root,
+ s1394_addr_space_blk_t *p, s1394_addr_space_blk_t *x,
+ s1394_addr_space_blk_t *w, int side_of_x)
+{
+ boolean_t first_time;
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_delete_fixup_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ first_time = B_TRUE;
+ while ((x != *root) && ((x == NULL) || (x->asb_color == BLACK))) {
+ if (((first_time == B_TRUE) && (side_of_x == LEFT)) ||
+ ((first_time == B_FALSE) && (x == p->asb_left))) {
+
+ if (first_time != B_TRUE)
+ w = p->asb_right;
+
+ if ((w != NULL) && (w->asb_color == RED)) {
+ w->asb_color = BLACK;
+ p->asb_color = RED;
+ s1394_left_rotate(root, p);
+ w = p->asb_right;
+ }
+
+ if (w == NULL) {
+ x = p;
+ p = p->asb_parent;
+ first_time = B_FALSE;
+
+ } else if (((w->asb_left == NULL) ||
+ (w->asb_left->asb_color == BLACK)) &&
+ ((w->asb_right == NULL) ||
+ (w->asb_right->asb_color == BLACK))) {
+ w->asb_color = RED;
+ x = p;
+ p = p->asb_parent;
+ first_time = B_FALSE;
+
+ } else {
+ if ((w->asb_right == NULL) ||
+ (w->asb_right->asb_color == BLACK)) {
+ w->asb_left->asb_color = BLACK;
+ w->asb_color = RED;
+ s1394_right_rotate(root, w);
+ w = p->asb_right;
+ }
+
+ w->asb_color = p->asb_color;
+ p->asb_color = BLACK;
+ if (w->asb_right != NULL)
+ w->asb_right->asb_color = BLACK;
+ s1394_left_rotate(root, p);
+ x = *root;
+ first_time = B_FALSE;
+ }
+
+ } else {
+ if (first_time == B_FALSE)
+ w = p->asb_left;
+
+ if ((w != NULL) && (w->asb_color == RED)) {
+ w->asb_color = BLACK;
+ p->asb_color = RED;
+ s1394_right_rotate(root, p);
+ w = p->asb_left;
+ }
+
+ if (w == NULL) {
+ x = p;
+ p = p->asb_parent;
+ first_time = B_FALSE;
+
+ } else if (((w->asb_left == NULL) ||
+ (w->asb_left->asb_color == BLACK)) &&
+ ((w->asb_right == NULL) ||
+ (w->asb_right->asb_color == BLACK))) {
+ w->asb_color = RED;
+ x = p;
+ p = p->asb_parent;
+ first_time = B_FALSE;
+
+ } else {
+ if ((w->asb_left == NULL) ||
+ (w->asb_left->asb_color == BLACK)) {
+
+ w->asb_right->asb_color = BLACK;
+ w->asb_color = RED;
+ s1394_left_rotate(root, w);
+ w = p->asb_left;
+ }
+
+ w->asb_color = p->asb_color;
+ p->asb_color = BLACK;
+ if (w->asb_left != NULL)
+ w->asb_left->asb_color = BLACK;
+ s1394_right_rotate(root, p);
+ x = *root;
+ first_time = B_FALSE;
+ }
+ }
+ }
+ if (x != NULL)
+ x->asb_color = BLACK;
+
+ TNF_PROBE_0_DEBUG(s1394_used_tree_delete_fixup_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+}
+
+/*
+ * s1394_left_rotate()
+ * is necessary with a red-black tree to help maintain the coloring in the
+ * tree as items are inserted and removed. Its operation, the opposite of
+ * s1394_right_rotate(), is a fundamental operation on the red-black tree.
+ */
+static void
+s1394_left_rotate(s1394_addr_space_blk_t **root, s1394_addr_space_blk_t *x)
+{
+ s1394_addr_space_blk_t *y;
+
+ TNF_PROBE_0_DEBUG(s1394_left_rotate_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ y = x->asb_right;
+ x->asb_right = y->asb_left;
+
+ if (y->asb_left != NULL)
+ y->asb_left->asb_parent = x;
+
+ y->asb_parent = x->asb_parent;
+ if (x->asb_parent == NULL)
+ *root = y;
+ else if (x == x->asb_parent->asb_left)
+ x->asb_parent->asb_left = y;
+ else
+ x->asb_parent->asb_right = y;
+
+ y->asb_left = x;
+ x->asb_parent = y;
+
+ TNF_PROBE_0_DEBUG(s1394_left_rotate_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+}
+
+/*
+ * s1394_right_rotate()
+ * is necessary with a red-black tree to help maintain the coloring in the
+ * tree as items are inserted and removed. Its operation, the opposite of
+ * s1394_left_rotate(), is a fundamental operation on the red-black tree.
+ */
+static void
+s1394_right_rotate(s1394_addr_space_blk_t **root, s1394_addr_space_blk_t *x)
+{
+ s1394_addr_space_blk_t *y;
+
+ TNF_PROBE_0_DEBUG(s1394_right_rotate_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ y = x->asb_left;
+ x->asb_left = y->asb_right;
+
+ if (y->asb_right != NULL)
+ y->asb_right->asb_parent = x;
+
+ y->asb_parent = x->asb_parent;
+ if (x->asb_parent == NULL)
+ *root = y;
+ else if (x == x->asb_parent->asb_right)
+ x->asb_parent->asb_right = y;
+ else
+ x->asb_parent->asb_left = y;
+
+ y->asb_right = x;
+ x->asb_parent = y;
+
+ TNF_PROBE_0_DEBUG(s1394_right_rotate_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+}
+
+/*
+ * s1394_tree_minimum()
+ * is used to find the smallest key in a binary tree.
+ */
+static s1394_addr_space_blk_t *
+s1394_tree_minimum(s1394_addr_space_blk_t *x)
+{
+ TNF_PROBE_0_DEBUG(s1394_tree_minimum_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ while (x->asb_left != NULL)
+ x = x->asb_left;
+
+ TNF_PROBE_0_DEBUG(s1394_tree_minimum_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (x);
+}
+
+/*
+ * s1394_tree_successor()
+ * is used to find the next largest key is a binary tree, given a starting
+ * point.
+ */
+static s1394_addr_space_blk_t *
+s1394_tree_successor(s1394_addr_space_blk_t *x)
+{
+ s1394_addr_space_blk_t *y;
+
+ TNF_PROBE_0_DEBUG(s1394_tree_successor_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ if (x->asb_right != NULL) {
+ y = s1394_tree_minimum(x->asb_right);
+
+ TNF_PROBE_0_DEBUG(s1394_tree_successor_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (y);
+ }
+
+ y = x->asb_parent;
+ while ((y != NULL) && (x == y->asb_right)) {
+ x = y;
+ y = y->asb_parent;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_tree_successor_exit, S1394_TNF_SL_ARREQ_STACK,
+ "");
+ return (y);
+}
+
+/*
+ * s1394_is_posted_write()
+ * returns a B_TRUE if the given address is in the "posted write" range
+ * of the given HAL's 1394 address space and B_FALSE if it isn't.
+ */
+boolean_t
+s1394_is_posted_write(s1394_hal_t *hal, uint64_t addr)
+{
+ addr = addr & IEEE1394_ADDR_OFFSET_MASK;
+
+ if ((addr >= hal->posted_write_addr_lo) &&
+ (addr <= hal->posted_write_addr_hi))
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+/*
+ * s1394_is_physical_addr()
+ * returns a B_TRUE if the given address is in the "physical" range of
+ * the given HAL's 1394 address space and B_FALSE if it isn't.
+ */
+boolean_t
+s1394_is_physical_addr(s1394_hal_t *hal, uint64_t addr)
+{
+ addr = addr & IEEE1394_ADDR_OFFSET_MASK;
+
+ if ((addr >= hal->physical_addr_lo) &&
+ (addr <= hal->physical_addr_hi))
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+/*
+ * s1394_is_csr_addr()
+ * returns a B_TRUE if the given address is in the "CSR" range of the
+ * given HAL's 1394 address space and B_FALSE if it isn't.
+ */
+boolean_t
+s1394_is_csr_addr(s1394_hal_t *hal, uint64_t addr)
+{
+ addr = addr & IEEE1394_ADDR_OFFSET_MASK;
+
+ if ((addr >= hal->csr_addr_lo) &&
+ (addr <= hal->csr_addr_hi))
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+/*
+ * s1394_is_normal_addr()
+ * returns a B_TRUE if the given address is in the "normal" range of
+ * the given HAL's 1394 address space and B_FALSE if it isn't.
+ */
+boolean_t
+s1394_is_normal_addr(s1394_hal_t *hal, uint64_t addr)
+{
+ addr = addr & IEEE1394_ADDR_OFFSET_MASK;
+
+ if ((addr >= hal->normal_addr_lo) &&
+ (addr <= hal->normal_addr_hi))
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
diff --git a/usr/src/uts/common/io/1394/s1394_asynch.c b/usr/src/uts/common/io/1394/s1394_asynch.c
new file mode 100644
index 0000000000..32bb6a5083
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_asynch.c
@@ -0,0 +1,2340 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_asynch.c
+ * 1394 Services Layer Asynchronous Communications Routines
+ * These routines handle all of the tasks relating to asynch commands
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/ieee1212.h>
+
+static void s1394_handle_lock(cmd1394_cmd_t *cmd);
+
+static cmd1394_cmd_t *s1394_pending_q_remove(s1394_hal_t *hal);
+
+static boolean_t s1394_process_pending_q(s1394_hal_t *hal);
+
+static boolean_t s1394_pending_q_helper(s1394_hal_t *hal, cmd1394_cmd_t *cmd);
+
+static int s1394_process_split_lock(cmd1394_cmd_t *cmd,
+ cmd1394_cmd_t *target_cmd);
+
+static int s1394_finish_split_lock(cmd1394_cmd_t *cmd,
+ cmd1394_cmd_t *target_cmd);
+
+/*
+ * s1394_alloc_cmd()
+ * is used to allocate a command for a target or for a HAL.
+ */
+int
+s1394_alloc_cmd(s1394_hal_t *hal, uint_t flags, cmd1394_cmd_t **cmdp)
+{
+ s1394_cmd_priv_t *s_priv;
+ void *hal_overhead;
+ uint_t cmd_size;
+ int alloc_sleep;
+
+ TNF_PROBE_0_DEBUG(s1394_alloc_cmd_enter, S1394_TNF_SL_STACK, "");
+
+ alloc_sleep = (flags & T1394_ALLOC_CMD_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
+
+ if ((alloc_sleep == KM_SLEEP) &&
+ (curthread->t_flag == T_INTR_THREAD)) {
+ TNF_PROBE_1(s1394_alloc_cmd_error, S1394_TNF_SL_ATREQ_ERROR,
+ "", tnf_string, msg, "Tried to sleep in intr context");
+ TNF_PROBE_0_DEBUG(s1394_alloc_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ ASSERT(alloc_sleep != KM_SLEEP); /* fail */
+ return (DDI_FAILURE);
+ }
+
+ /* either FCP command or response, but not both */
+ if ((flags &
+ (T1394_ALLOC_CMD_FCP_COMMAND | T1394_ALLOC_CMD_FCP_RESPONSE)) ==
+ (T1394_ALLOC_CMD_FCP_COMMAND | T1394_ALLOC_CMD_FCP_RESPONSE)) {
+ TNF_PROBE_1(s1394_alloc_cmd_error, S1394_TNF_SL_ATREQ_ERROR,
+ "", tnf_string, msg, "Both FCP cmd and resp flags");
+ TNF_PROBE_0_DEBUG(s1394_alloc_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ *cmdp = kmem_cache_alloc(hal->hal_kmem_cachep, alloc_sleep);
+ if (*cmdp == NULL) {
+ TNF_PROBE_0_DEBUG(s1394_alloc_cmd_exit,
+ S1394_TNF_SL_STACK, "");
+ return (DDI_FAILURE);
+ }
+ cmd_size = sizeof (cmd1394_cmd_t) +
+ sizeof (s1394_cmd_priv_t) + hal->halinfo.hal_overhead;
+ bzero((void *)*cmdp, cmd_size);
+
+ (*cmdp)->cmd_version = T1394_VERSION_V1;
+ (*cmdp)->cmd_result = CMD1394_NOSTATUS;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ /* Set extension type */
+ if (flags & T1394_ALLOC_CMD_FCP_COMMAND) {
+ s1394_fa_init_cmd(s_priv, S1394_FA_TYPE_FCP_CTL);
+ } else if (flags & T1394_ALLOC_CMD_FCP_RESPONSE) {
+ s1394_fa_init_cmd(s_priv, S1394_FA_TYPE_FCP_TGT);
+ }
+
+ /* Set up the hal_overhead ptr in the hal_cmd_private */
+ hal_overhead = (uchar_t *)s_priv + sizeof (s1394_cmd_priv_t);
+ s_priv->hal_cmd_private.hal_overhead = (void *)hal_overhead;
+
+ TNF_PROBE_1_DEBUG(s1394_alloc_cmd, S1394_TNF_SL_STACK, "",
+ tnf_opaque, cmd, *cmdp);
+
+ /* kstats - number of cmd allocs */
+ hal->hal_kstats->cmd_alloc++;
+
+ TNF_PROBE_0_DEBUG(s1394_alloc_cmd_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_free_cmd()
+ * is used to free a command that had been previously allocated by
+ * s1394_alloc_cmd().
+ */
+int
+s1394_free_cmd(s1394_hal_t *hal, cmd1394_cmd_t **cmdp)
+{
+ s1394_cmd_priv_t *s_priv;
+
+ TNF_PROBE_0_DEBUG(s1394_free_cmd_enter, S1394_TNF_SL_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ /* Check that command isn't in use */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ TNF_PROBE_1(s1394_free_cmd_error, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "Attempted to free an in-use command");
+ TNF_PROBE_0_DEBUG(s1394_free_cmd_exit, S1394_TNF_SL_STACK, "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+ TNF_PROBE_1_DEBUG(s1394_free_cmd, S1394_TNF_SL_STACK, "",
+ tnf_opaque, cmd, *cmdp);
+
+ /* kstats - number of cmd allocs */
+ kmem_cache_free(hal->hal_kmem_cachep, *cmdp);
+
+ /* Command pointer is set to NULL before returning */
+ *cmdp = NULL;
+
+ /* kstats - number of cmd frees */
+ hal->hal_kstats->cmd_free++;
+
+ TNF_PROBE_0_DEBUG(s1394_free_cmd_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_xfer_asynch_command()
+ * is used to send an asynch command down to the HAL. Based upon the type
+ * of command that is being sent, the appropriate HAL function is called.
+ * Command failures are handled be returning an error and/or shutting down
+ * the HAL, depending on the severity of the error.
+ */
+int
+s1394_xfer_asynch_command(s1394_hal_t *hal, cmd1394_cmd_t *cmd, int *err)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ s1394_hal_state_t state;
+ dev_info_t *dip;
+ int result_from_hal;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_xfer_asynch_command_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ state = hal->hal_state;
+ if (((state != S1394_HAL_NORMAL) && (state != S1394_HAL_RESET)) ||
+ (hal->disable_requests_bit == 1)) {
+ *err = s1394_HAL_asynch_error(hal, cmd, state);
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_0_DEBUG(s1394_xfer_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ /* kstats - number of AT requests sent */
+ switch (cmd->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ hal->hal_kstats->atreq_quad_rd++;
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ hal->hal_kstats->atreq_blk_rd++;
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ hal->hal_kstats->atreq_quad_wr++;
+ break;
+
+ case CMD1394_ASYNCH_WR_BLOCK:
+ hal->hal_kstats->atreq_blk_wr++;
+ hal->hal_kstats->atreq_blk_wr_size += h_priv->mblk.length;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_32:
+ hal->hal_kstats->atreq_lock32++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_64:
+ hal->hal_kstats->atreq_lock64++;
+ break;
+ }
+
+ switch (s_priv->cmd_priv_xfer_type) {
+ /* Call the HAL's read entry point */
+ case S1394_CMD_READ:
+ ret = HAL_CALL(hal).read(hal->halinfo.hal_private,
+ (cmd1394_cmd_t *)cmd,
+ (h1394_cmd_priv_t *)&s_priv->hal_cmd_private,
+ &result_from_hal);
+ break;
+
+ /* Call the HAL's write entry point */
+ case S1394_CMD_WRITE:
+ ret = HAL_CALL(hal).write(hal->halinfo.hal_private,
+ (cmd1394_cmd_t *)cmd,
+ (h1394_cmd_priv_t *)&s_priv->hal_cmd_private,
+ &result_from_hal);
+ break;
+
+ /* Call the HAL's lock entry point */
+ case S1394_CMD_LOCK:
+ ret = HAL_CALL(hal).lock(hal->halinfo.hal_private,
+ (cmd1394_cmd_t *)cmd,
+ (h1394_cmd_priv_t *)&s_priv->hal_cmd_private,
+ &result_from_hal);
+ break;
+
+ default:
+ *err = CMD1394_EUNKNOWN_ERROR;
+
+ TNF_PROBE_1(s1394_xfer_asynch_command_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(s1394_xfer_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_xfer_asynch_command_return_from_HAL,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ if (ret == DDI_FAILURE) {
+ switch (result_from_hal) {
+ case H1394_STATUS_EMPTY_TLABEL:
+ /* Out of TLABELs - Unable to send AT req */
+ *err = CMD1394_ENO_ATREQ;
+ break;
+
+ case H1394_STATUS_INVALID_BUSGEN:
+ /* Out of TLABELs - Unable to send AT req */
+ *err = CMD1394_ESTALE_GENERATION;
+ break;
+
+ case H1394_STATUS_NOMORE_SPACE:
+ /* No more space on HAL's HW queue */
+ *err = CMD1394_ENO_ATREQ;
+ break;
+
+ case H1394_STATUS_INTERNAL_ERROR:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ *err = CMD1394_EFATAL_ERROR;
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ *err = CMD1394_EFATAL_ERROR;
+ break;
+ }
+
+ TNF_PROBE_2_DEBUG(s1394_xfer_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "",
+ tnf_int, result_from_hal, result_from_hal,
+ tnf_int, err, *err);
+ return (DDI_FAILURE);
+ }
+
+ /* No errors, return success */
+ *err = CMD1394_NOSTATUS;
+
+ TNF_PROBE_0_DEBUG(s1394_xfer_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_setup_asynch_command()
+ * is used to setup an asynch command to be sent down to the HAL and out
+ * onto the bus. This function handles setting up the destination address
+ * (if necessary), speed, max_payload, putting the command onto the
+ * outstanding Q list, and any other things that must be done prior to
+ * calling the HAL.
+ */
+int
+s1394_setup_asynch_command(s1394_hal_t *hal, s1394_target_t *target,
+ cmd1394_cmd_t *cmd, uint32_t xfer_type, int *err)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ uint64_t node;
+ uint32_t from_node;
+ uint32_t to_node;
+ uint32_t bus_capabilities;
+ uint_t current_max_payload;
+ uint_t max_rec;
+ uint_t max_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ switch (cmd->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ case CMD1394_ASYNCH_WR_QUAD:
+ case CMD1394_ASYNCH_RD_BLOCK:
+ case CMD1394_ASYNCH_WR_BLOCK:
+ case CMD1394_ASYNCH_LOCK_32:
+ case CMD1394_ASYNCH_LOCK_64:
+ break;
+
+ default:
+ *err = CMD1394_EINVALID_COMMAND;
+ TNF_PROBE_1(s1394_setup_asynch_command_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string, msg,
+ "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Check for potential address roll-over */
+ if (s1394_address_rollover(cmd) != B_FALSE) {
+ *err = CMD1394_EADDRESS_ERROR;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Set up who sent command on which hal */
+ s_priv->sent_by_target = (s1394_target_t *)target;
+ s_priv->sent_on_hal = (s1394_hal_t *)hal;
+
+ /* Set up command transfer type */
+ s_priv->cmd_priv_xfer_type = xfer_type;
+
+ if (cmd->cmd_options & CMD1394_OVERRIDE_ADDR) {
+ /* Compare the current generation from the HAL struct */
+ /* to the one given by the target */
+
+ /* Speed is to be filled in from speed map */
+ from_node = IEEE1394_NODE_NUM(hal->node_id);
+ to_node = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
+
+ if (cmd->bus_generation != hal->generation_count) {
+ *err = CMD1394_ESTALE_GENERATION;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ } else {
+ /* Set the generation */
+ cmd->bus_generation = hal->generation_count;
+
+ /* If not OVERRIDE_ADDR, then target may not be NULL */
+ ASSERT(target != NULL);
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ if ((target->target_state & S1394_TARG_GONE) != 0 ||
+ target->on_node == NULL) {
+ rw_exit(&hal->target_list_rwlock);
+ *err = CMD1394_EDEVICE_REMOVED;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ ASSERT((target->target_state & S1394_TARG_GONE) == 0);
+ node = target->on_node->node_num;
+ rw_exit(&hal->target_list_rwlock);
+
+ /* Mask in the top 16-bits */
+ cmd->cmd_addr = (cmd->cmd_addr & IEEE1394_ADDR_OFFSET_MASK);
+ cmd->cmd_addr = (cmd->cmd_addr |
+ (node << IEEE1394_ADDR_PHY_ID_SHIFT));
+ cmd->cmd_addr = (cmd->cmd_addr | IEEE1394_ADDR_BUS_ID_MASK);
+
+ /* Speed is to be filled in from speed map */
+ from_node = IEEE1394_NODE_NUM(hal->node_id);
+ to_node = (uint32_t)node;
+ }
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ /* Copy the generation into the HAL's private field */
+ h_priv->bus_generation = cmd->bus_generation;
+
+ /* Fill in the nodeID */
+ cmd->nodeID = (cmd->cmd_addr & IEEE1394_ADDR_NODE_ID_MASK) >>
+ IEEE1394_ADDR_NODE_ID_SHIFT;
+
+ if (cmd->cmd_options & CMD1394_OVERRIDE_SPEED) {
+ if (cmd->cmd_speed > IEEE1394_S400) {
+ *err = CMD1394_EINVALID_COMMAND;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+
+ } else {
+ s_priv->hal_cmd_private.speed = (int)cmd->cmd_speed;
+ }
+
+ } else {
+ /* Speed is to be filled in from speed map */
+ s_priv->hal_cmd_private.speed = (int)s1394_speed_map_get(hal,
+ from_node, to_node);
+ }
+
+ /* Is it a block request? */
+ if ((cmd->cmd_type == CMD1394_ASYNCH_RD_BLOCK) ||
+ (cmd->cmd_type == CMD1394_ASYNCH_WR_BLOCK)) {
+
+ if (cmd->cmd_u.b.data_block == NULL) {
+ *err = CMD1394_ENULL_MBLK;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Also need to check for MBLK_TOO_SMALL */
+ if (s1394_mblk_too_small(cmd) != B_FALSE) {
+ *err = CMD1394_EMBLK_TOO_SMALL;
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Initialize bytes_transferred to zero */
+ cmd->cmd_u.b.bytes_transferred = 0;
+
+ /* Handle the MAX_PAYLOAD size */
+ if (cmd->cmd_options & CMD1394_OVERRIDE_ADDR) {
+
+ current_max_payload = 512 <<
+ (s_priv->hal_cmd_private.speed);
+ if (hal->topology_tree[to_node].cfgrom) {
+ bus_capabilities =
+ hal->topology_tree[to_node].cfgrom[
+ IEEE1212_NODE_CAP_QUAD];
+ max_rec = (bus_capabilities &
+ IEEE1394_BIB_MAXREC_MASK) >>
+ IEEE1394_BIB_MAXREC_SHIFT;
+ } else {
+ max_rec = 0;
+ }
+
+ if ((max_rec > 0) && (max_rec < 14)) {
+ max_blk = 1 << (max_rec + 1);
+
+ } else {
+ /* These are either unspecified or reserved */
+ max_blk = 4;
+ }
+ if (max_blk < current_max_payload)
+ current_max_payload = max_blk;
+
+ } else {
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+ current_max_payload = target->current_max_payload;
+ rw_exit(&hal->target_list_rwlock);
+ }
+
+ if (cmd->cmd_options & CMD1394_OVERRIDE_MAX_PAYLOAD) {
+ if (current_max_payload > cmd->cmd_u.b.max_payload)
+ current_max_payload = cmd->cmd_u.b.max_payload;
+ }
+
+ h_priv->mblk.curr_mblk = cmd->cmd_u.b.data_block;
+
+ if (cmd->cmd_type == CMD1394_ASYNCH_WR_BLOCK) {
+ h_priv->mblk.curr_offset =
+ cmd->cmd_u.b.data_block->b_rptr;
+ } else {
+ h_priv->mblk.curr_offset =
+ cmd->cmd_u.b.data_block->b_wptr;
+ }
+
+ if (cmd->cmd_u.b.blk_length > current_max_payload) {
+ h_priv->mblk.length = current_max_payload;
+ s_priv->data_remaining = cmd->cmd_u.b.blk_length;
+
+ } else {
+ h_priv->mblk.length = cmd->cmd_u.b.blk_length;
+ s_priv->data_remaining = cmd->cmd_u.b.blk_length;
+ }
+ }
+
+ /* Mark command as being used */
+ s_priv->cmd_in_use = B_TRUE;
+
+ /* Put command on the HAL's outstanding request Q */
+ s1394_insert_q_asynch_cmd(hal, cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_setup_asynch_command_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_insert_q_asynch_cmd()
+ * is used to insert a given command structure onto a HAL's outstanding
+ * asynch queue.
+ */
+void
+s1394_insert_q_asynch_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_cmd_priv_t *c_priv;
+ cmd1394_cmd_t *temp_cmd;
+
+ TNF_PROBE_0_DEBUG(s1394_insert_q_asynch_cmd_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ mutex_enter(&hal->outstanding_q_mutex);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is the outstanding request queue empty? */
+ if ((hal->outstanding_q_head == NULL) &&
+ (hal->outstanding_q_tail == NULL)) {
+
+ hal->outstanding_q_head = (cmd1394_cmd_t *)cmd;
+ hal->outstanding_q_tail = (cmd1394_cmd_t *)cmd;
+ s_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+
+ } else {
+ s_priv->cmd_priv_next = hal->outstanding_q_head;
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+
+ temp_cmd = (cmd1394_cmd_t *)hal->outstanding_q_head;
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)temp_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_prev = (cmd1394_cmd_t *)cmd;
+
+ hal->outstanding_q_head = (cmd1394_cmd_t *)cmd;
+ }
+
+ mutex_exit(&hal->outstanding_q_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_insert_q_asynch_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_remove_q_asynch_cmd()
+ * is used to remove a given command structure from a HAL's outstanding
+ * asynch queue.
+ */
+void
+s1394_remove_q_asynch_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_cmd_priv_t *c_priv;
+ cmd1394_cmd_t *prev_cmd;
+ cmd1394_cmd_t *next_cmd;
+
+ TNF_PROBE_0_DEBUG(s1394_remove_q_asynch_cmd_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ mutex_enter(&hal->outstanding_q_mutex);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ prev_cmd = (cmd1394_cmd_t *)s_priv->cmd_priv_prev;
+ next_cmd = (cmd1394_cmd_t *)s_priv->cmd_priv_next;
+
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+ s_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+
+ if (prev_cmd != NULL) {
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)prev_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_next = (cmd1394_cmd_t *)next_cmd;
+
+ } else {
+ if (hal->outstanding_q_head == (cmd1394_cmd_t *)cmd)
+ hal->outstanding_q_head = (cmd1394_cmd_t *)next_cmd;
+ }
+
+ if (next_cmd != NULL) {
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)next_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_prev = (cmd1394_cmd_t *)prev_cmd;
+
+ } else {
+ if (hal->outstanding_q_tail == (cmd1394_cmd_t *)cmd)
+ hal->outstanding_q_tail = (cmd1394_cmd_t *)prev_cmd;
+ }
+
+ mutex_exit(&hal->outstanding_q_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_remove_q_asynch_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_atreq_cmd_complete()
+ * is called by h1394_cmd_is_complete() when an AT request has completed.
+ * Based upon a command's completion status, s1394_atreq_cmd_complete()
+ * determines whether to call the target (or unblock), put the command onto
+ * the pending Q to be sent out later, or to resend the command
+ * (multi-part command).
+ */
+void
+s1394_atreq_cmd_complete(s1394_hal_t *hal, cmd1394_cmd_t *req, int status)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ dev_info_t *dip;
+ int ret;
+ int cmd_result;
+ int err;
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(req);
+
+ /* If not an ack_complete... */
+ if (status != H1394_CMD_SUCCESS) {
+ /* kstats - number of failure AT responses */
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ hal->hal_kstats->atresp_quad_rd_fail++;
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ hal->hal_kstats->atresp_blk_rd_fail++;
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ hal->hal_kstats->atresp_quad_wr_fail++;
+ break;
+
+ case CMD1394_ASYNCH_WR_BLOCK:
+ hal->hal_kstats->atresp_blk_wr_fail++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_32:
+ hal->hal_kstats->atresp_lock32_fail++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_64:
+ hal->hal_kstats->atresp_lock64_fail++;
+ break;
+ }
+
+
+ switch (status) {
+ /* evt_missing_ack */
+ case H1394_CMD_ETIMEOUT:
+ cmd_result = CMD1394_ETIMEOUT;
+ break;
+
+ /* evt_flushed */
+ case H1394_CMD_EBUSRESET:
+ /* Move request to pending Q if cancel on */
+ /* reset is not set */
+ if (req->cmd_options & CMD1394_CANCEL_ON_BUS_RESET) {
+ cmd_result = CMD1394_EBUSRESET;
+ break;
+ }
+ s1394_remove_q_asynch_cmd(hal, req);
+ s1394_pending_q_insert(hal, req, S1394_PENDING_Q_REAR);
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+
+ /* ack_busy_X */
+ /* ack_busy_A */
+ /* ack_busy_B */
+ case H1394_CMD_EDEVICE_BUSY:
+ cmd_result = CMD1394_EDEVICE_BUSY;
+ break;
+
+ /* ack_data_error */
+ case H1394_CMD_EDATA_ERROR:
+ cmd_result = CMD1394_EDATA_ERROR;
+ break;
+
+ /* ack_type_error */
+ case H1394_CMD_ETYPE_ERROR:
+ cmd_result = CMD1394_ETYPE_ERROR;
+ break;
+
+ /* resp_address_error */
+ /* ack_address_error */
+ case H1394_CMD_EADDR_ERROR:
+ cmd_result = CMD1394_EADDRESS_ERROR;
+ break;
+
+ /* resp_conflict_error */
+ /* ack_conflict_error */
+ case H1394_CMD_ERSRC_CONFLICT:
+ cmd_result = CMD1394_ERSRC_CONFLICT;
+ break;
+
+ /* ack_tardy */
+ case H1394_CMD_EDEVICE_POWERUP:
+ cmd_result = CMD1394_EDEVICE_BUSY;
+ break;
+
+ /* device errors (bad tcodes, ACKs, etc...) */
+ case H1394_CMD_EDEVICE_ERROR:
+ cmd_result = CMD1394_EDEVICE_ERROR;
+ break;
+
+ /* Unknown error type */
+ case H1394_CMD_EUNKNOWN_ERROR:
+ cmd_result = CMD1394_EUNKNOWN_ERROR;
+ break;
+
+ /* Unrecognized error */
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_2(s1394_atreq_cmd_complete_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string, msg,
+ "Unrecognized cmd status code",
+ tnf_int, status, status);
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ /* Remove command from the HAL's outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, req);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ req->cmd_result = cmd_result;
+
+ /* Is this a blocking command? */
+ if (req->cmd_options & CMD1394_BLOCKING) {
+ /* Unblock the waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ /* Call the target's completion_callback() */
+ if (req->completion_callback != NULL) {
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_do_callback,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ req->completion_callback(req);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ /* Successful unless otherwise modified */
+ err = CMD1394_CMDSUCCESS;
+
+ if ((req->cmd_type == CMD1394_ASYNCH_RD_BLOCK) ||
+ (req->cmd_type == CMD1394_ASYNCH_WR_BLOCK)) {
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ /* Update data_remaining */
+ s_priv->data_remaining -= h_priv->mblk.length;
+
+ /* Increment bytes_transferred */
+ req->cmd_u.b.bytes_transferred += h_priv->mblk.length;
+
+ if (req->cmd_type == CMD1394_ASYNCH_RD_BLOCK)
+ hal->hal_kstats->atreq_blk_rd_size +=
+ h_priv->mblk.length;
+
+ /* Is there still more to send? */
+ if (s_priv->data_remaining > 0) {
+
+ /* Setup the new mblk and offset */
+ h_priv->mblk.curr_mblk = h_priv->mblk.next_mblk;
+ h_priv->mblk.curr_offset = h_priv->mblk.next_offset;
+
+ /* Update destination address */
+ if (!(req->cmd_options &
+ CMD1394_DISABLE_ADDR_INCREMENT)) {
+ req->cmd_addr += h_priv->mblk.length;
+ }
+
+ /*
+ * Use the current MAX_PAYLOAD size. This value
+ * doesn't need to be recalculated because we must
+ * be in the same generation on the bus, else we
+ * would have seen a bus reset error.
+ */
+ if (s_priv->data_remaining < h_priv->mblk.length) {
+ h_priv->mblk.length = s_priv->data_remaining;
+ }
+
+ /* Send command out again */
+ ret = s1394_xfer_asynch_command(hal, req, &err);
+
+ if (ret == DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+
+ } else if (err == CMD1394_ESTALE_GENERATION) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, req);
+ s1394_pending_q_insert(hal, req,
+ S1394_PENDING_Q_REAR);
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+ }
+ }
+
+ /* Remove command from the HAL's outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, req);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ /* Set status */
+ req->cmd_result = err;
+
+ /* Is this a blocking command? */
+ if (req->cmd_options & CMD1394_BLOCKING) {
+ /* Unblock the waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ /* Set status and call completion_callback() */
+ if (req->completion_callback != NULL) {
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_do_callback,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ req->completion_callback(req);
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_atreq_cmd_complete_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_atresp_cmd_complete()
+ * is similar to s1394_atreq_cmd_complete(). It is also called by
+ * h1394_cmd_is_complete(), but when an AT response has completed.
+ * Again, based upon the command's completion status,
+ * s1394_atresp_cmd_complete() determines whether to call the target or
+ * to simply cleanup the command and return.
+ */
+void
+s1394_atresp_cmd_complete(s1394_hal_t *hal, cmd1394_cmd_t *resp, int status)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ dev_info_t *dip;
+ boolean_t valid_addr_blk;
+ int target_status;
+
+ TNF_PROBE_0_DEBUG(s1394_atresp_cmd_complete_enter,
+ S1394_TNF_SL_ATRESP_STACK, "");
+
+ target_status = CMD1394_CMDSUCCESS;
+
+ /* If not an ack_complete */
+ if (status != H1394_CMD_SUCCESS) {
+ switch (status) {
+ /* evt_missing_ack */
+ case H1394_CMD_ETIMEOUT:
+ target_status = CMD1394_ETIMEOUT;
+ break;
+
+ /* evt_flushed */
+ case H1394_CMD_EBUSRESET:
+ target_status = CMD1394_EBUSRESET;
+ break;
+
+ /* ack_busy_X */
+ /* ack_busy_A */
+ /* ack_busy_B */
+ case H1394_CMD_EDEVICE_BUSY:
+ target_status = CMD1394_EDEVICE_BUSY;
+ break;
+
+ /* ack_data_error */
+ case H1394_CMD_EDATA_ERROR:
+ target_status = CMD1394_EDATA_ERROR;
+ break;
+
+ /* ack_type_error */
+ case H1394_CMD_ETYPE_ERROR:
+ target_status = CMD1394_ETYPE_ERROR;
+ break;
+
+ /* ack_address_error */
+ case H1394_CMD_EADDR_ERROR:
+ target_status = CMD1394_EADDRESS_ERROR;
+ break;
+
+ /* ack_conflict_error */
+ case H1394_CMD_ERSRC_CONFLICT:
+ target_status = CMD1394_ERSRC_CONFLICT;
+ break;
+
+ /* ack_tardy */
+ case H1394_CMD_EDEVICE_POWERUP:
+ target_status = CMD1394_EDEVICE_BUSY;
+ break;
+
+ /* device errors (bad tcodes, ACKs, etc...) */
+ case H1394_CMD_EDEVICE_ERROR:
+ target_status = CMD1394_EDEVICE_ERROR;
+ break;
+
+ /* Unknown error type */
+ case H1394_CMD_EUNKNOWN_ERROR:
+ target_status = CMD1394_EUNKNOWN_ERROR;
+ break;
+
+ /* Unrecognized error */
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_2(s1394_atresp_cmd_complete_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "Unrecognized cmd status code",
+ tnf_int, status, status);
+ TNF_PROBE_0_DEBUG(s1394_atresp_cmd_complete_exit,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ return;
+ }
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(resp);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ valid_addr_blk = s_priv->arreq_valid_addr;
+
+ if (valid_addr_blk == B_TRUE) {
+ /* Set the command status */
+ resp->cmd_result = target_status;
+
+ switch (s_priv->cmd_priv_xfer_type) {
+ case S1394_CMD_READ:
+ case S1394_CMD_WRITE:
+ case S1394_CMD_LOCK:
+ if (resp->completion_callback != NULL) {
+ TNF_PROBE_0_DEBUG(
+ s1394_atresp_cmd_complete_do_callback,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ resp->completion_callback(resp);
+ }
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(s1394_atresp_cmd_complete_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "Unrecognized transfer type");
+ TNF_PROBE_0_DEBUG(s1394_atresp_cmd_complete_exit,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ return;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_atresp_cmd_complete_call_hal_cmplt,
+ S1394_TNF_SL_ATRESP_STACK, "");
+
+ /* Free the command - Pass it back to the HAL */
+ HAL_CALL(hal).response_complete(hal->halinfo.hal_private, resp, h_priv);
+
+ TNF_PROBE_1_DEBUG(s1394_atresp_cmd_complete_exit,
+ S1394_TNF_SL_ATRESP_STACK, "", tnf_int, status, target_status);
+}
+
+/*
+ * s1394_send_response()
+ * is used to send a response to an AR request. Depending on whether the
+ * request was a broadcast request, a write to posted write address space,
+ * or some other request, either a response packet is sent, or the command
+ * is returned to the HAL. A return value of DDI_SUCCESS means that the
+ * command has been handled correctly. It was either successfully sent to
+ * the HAL, or, if it was posted_write of broadcast, it was freed up. A
+ * return value of DDI_FAILURE indicates either a serious error, in which
+ * case the HAL is shutdown, or a failure returned by the HAL, in which
+ * case the command is freed up and notice of the failure is returned.
+ */
+int
+s1394_send_response(s1394_hal_t *hal, cmd1394_cmd_t *resp)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ dev_info_t *dip;
+ int ret;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_enter, S1394_TNF_SL_ATRESP_STACK,
+ "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(resp);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ /*
+ * If request was broadcast or a write request to a posted write
+ * address, don't send a response
+ */
+ if ((resp->broadcast == 1) || ((s_priv->posted_write == B_TRUE) &&
+ ((resp->cmd_result == CMD1394_ASYNCH_WR_QUAD) ||
+ (resp->cmd_result == CMD1394_ASYNCH_WR_BLOCK)))) {
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_call_hal_cmplt,
+ S1394_TNF_SL_ATRESP_STACK, "");
+
+ /* Free the command - Pass it back to the HAL */
+ HAL_CALL(hal).response_complete(hal->halinfo.hal_private,
+ resp, h_priv);
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_exit,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_call_hal_resp,
+ S1394_TNF_SL_ATRESP_STACK, "");
+
+ /* kstats - number of failure responses sent */
+ if (resp->cmd_result != IEEE1394_RESP_COMPLETE) {
+ switch (resp->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ hal->hal_kstats->arresp_quad_rd_fail++;
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ hal->hal_kstats->arresp_blk_rd_fail++;
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ hal->hal_kstats->arresp_quad_wr_fail++;
+ break;
+
+ case CMD1394_ASYNCH_WR_BLOCK:
+ hal->hal_kstats->arresp_blk_wr_fail++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_32:
+ hal->hal_kstats->arresp_lock32_fail++;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_64:
+ hal->hal_kstats->arresp_lock64_fail++;
+ break;
+ }
+ } else {
+ if (resp->cmd_type == CMD1394_ASYNCH_RD_BLOCK)
+ hal->hal_kstats->arreq_blk_rd_size +=
+ resp->cmd_u.b.blk_length;
+ }
+
+ if (resp->cmd_type == CMD1394_ASYNCH_RD_BLOCK) {
+ h_priv->mblk.curr_mblk = resp->cmd_u.b.data_block;
+ h_priv->mblk.curr_offset = resp->cmd_u.b.data_block->b_rptr;
+ h_priv->mblk.length = resp->cmd_u.b.blk_length;
+ }
+
+ switch (s_priv->cmd_priv_xfer_type) {
+ case S1394_CMD_READ:
+ ret = HAL_CALL(hal).read_response(hal->halinfo.hal_private,
+ resp, h_priv, &result);
+ break;
+
+ case S1394_CMD_WRITE:
+ ret = HAL_CALL(hal).write_response(hal->halinfo.hal_private,
+ resp, h_priv, &result);
+ break;
+
+ case S1394_CMD_LOCK:
+ ret = HAL_CALL(hal).lock_response(hal->halinfo.hal_private,
+ resp, h_priv, &result);
+ break;
+
+ default:
+ dip = hal->halinfo.dip;
+
+ /* An unexpected error in the HAL */
+ cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
+ ddi_node_name(dip), ddi_get_instance(dip));
+
+ /* Disable the HAL */
+ s1394_hal_shutdown(hal, B_TRUE);
+
+ TNF_PROBE_1(s1394_send_response_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "Unrecognized transfer type");
+ TNF_PROBE_0_DEBUG(s1394_send_response_exit,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Unable to send a response */
+ if (ret != DDI_SUCCESS) {
+ if (result == H1394_STATUS_INVALID_BUSGEN) {
+ TNF_PROBE_1(s1394_send_response_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "Invalid generation in response");
+ } else if (result == H1394_STATUS_NOMORE_SPACE) {
+ TNF_PROBE_1(s1394_send_response_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "No more space on AT response queue");
+ } else {
+ TNF_PROBE_1(s1394_send_response_error,
+ S1394_TNF_SL_ATRESP_ERROR, "", tnf_string, msg,
+ "Unknown problem in s1394_send_response");
+ }
+ TNF_PROBE_0_DEBUG(s1394_send_response_call_hal_cmplt,
+ S1394_TNF_SL_ATRESP_STACK, "");
+
+ /* Free the command - Pass it back to the HAL */
+ HAL_CALL(hal).response_complete(hal->halinfo.hal_private,
+ resp, h_priv);
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_exit,
+ S1394_TNF_SL_ATRESP_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_send_response_exit, S1394_TNF_SL_ATRESP_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_compare_swap()
+ * is used by t1394_lock() to send a lock request. Any of the lock
+ * requests specified explicitly by the 1394 spec will pass thru here,
+ * i.e compare-swap, mask-swap, etc.
+ */
+int
+s1394_compare_swap(s1394_hal_t *hal, s1394_target_t *target, cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_state_t state;
+ int err;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_enter, S1394_TNF_SL_ATREQ_STACK,
+ "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ /* Lock the topology tree - protect from bus reset */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ ret = s1394_setup_asynch_command(hal, target, cmd, S1394_CMD_LOCK,
+ &err);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Command has now been put onto the queue! */
+ if (ret != DDI_SUCCESS) {
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+
+ TNF_PROBE_0(s1394_compare_swap_error_in_setup_asynch,
+ S1394_TNF_SL_ATREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&hal->topology_tree_mutex);
+ state = hal->hal_state;
+ /* If this command was sent during a bus reset, */
+ /* then put it onto the pending Q. */
+ if (state == S1394_HAL_RESET) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+
+ /* Are we on the bus reset event stack? */
+ if (s1394_on_br_thread(hal) == B_TRUE) {
+ /* Blocking commands are not allowed */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ mutex_exit(&hal->topology_tree_mutex);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+
+ TNF_PROBE_1(s1394_compare_swap_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string,
+ msg, "CMD1394_BLOCKING in bus reset ctxt");
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+ }
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Send the command out */
+ ret = s1394_xfer_asynch_command(hal, cmd, &err);
+
+ if (ret != DDI_SUCCESS) {
+ if (err == CMD1394_ESTALE_GENERATION) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+ s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
+
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+
+ TNF_PROBE_0(s1394_compare_swap_error_in_xfer,
+ S1394_TNF_SL_ATREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_compare_swap_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+}
+
+/*
+ * s1394_split_lock_req()
+ * is also used by t1394_lock() to send a lock request. The difference
+ * is that s1394_split_lock_req() is used to send the software supported
+ * lock types, i.e. bit_and, bit_or, etc. These lock requests require
+ * more than one transaction, typically compare-swap's.
+ */
+int
+s1394_split_lock_req(s1394_hal_t *hal, s1394_target_t *target,
+ cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ cmd1394_cmd_t *tmp_cmd;
+
+ TNF_PROBE_0_DEBUG(s1394_split_lock_req_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ /* Allocate a temporary command */
+ if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &tmp_cmd) !=
+ DDI_SUCCESS) {
+ cmd->cmd_result = CMD1394_EUNKNOWN_ERROR;
+
+ TNF_PROBE_0(s1394_split_lock_req_error_alloc_cmd,
+ S1394_TNF_SL_ATREQ_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_split_lock_req_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(tmp_cmd);
+
+ tmp_cmd->completion_callback = s1394_handle_lock;
+ tmp_cmd->cmd_callback_arg = (opaque_t)cmd;
+ tmp_cmd->cmd_type = cmd->cmd_type;
+ tmp_cmd->cmd_addr = cmd->cmd_addr;
+ tmp_cmd->cmd_options = cmd->cmd_options;
+ tmp_cmd->bus_generation = cmd->bus_generation;
+
+ /* The temporary command can not block */
+ tmp_cmd->cmd_options = tmp_cmd->cmd_options & ~CMD1394_BLOCKING;
+
+ /* Setup compare-swap with data_value == arg_value (read) */
+ if (tmp_cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ tmp_cmd->cmd_u.l32.data_value = 0;
+ tmp_cmd->cmd_u.l32.arg_value = 0;
+ tmp_cmd->cmd_u.l32.lock_type = CMD1394_LOCK_COMPARE_SWAP;
+ s_priv->temp_num_retries = cmd->cmd_u.l32.num_retries;
+ } else {
+ tmp_cmd->cmd_u.l64.data_value = 0;
+ tmp_cmd->cmd_u.l64.arg_value = 0;
+ tmp_cmd->cmd_u.l64.lock_type = CMD1394_LOCK_COMPARE_SWAP;
+ s_priv->temp_num_retries = cmd->cmd_u.l64.num_retries;
+ }
+
+ /* Initialize lock_req_step */
+ s_priv->lock_req_step = 0;
+
+ /* Get the Services Layer private area for the target cmd */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ s_priv->cmd_in_use = B_TRUE;
+
+ /* Send the request */
+ if (s1394_compare_swap(hal, target, tmp_cmd) != DDI_SUCCESS) {
+ s_priv->cmd_in_use = B_FALSE;
+
+ /* Free the temporary command */
+ if (s1394_free_cmd(hal, &tmp_cmd) != DDI_SUCCESS)
+ cmd->cmd_result = CMD1394_EUNKNOWN_ERROR;
+
+ TNF_PROBE_0_DEBUG(s1394_split_lock_req_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_split_lock_req_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_handle_lock()
+ * is the callback for s1394_split_lock_req(). It does all of the real
+ * work. Based on the specific lock type all necessary manipulation is
+ * performed and another compare swap is sent out. If the transaction
+ * is unsuccessful, it is retried.
+ */
+static void
+s1394_handle_lock(cmd1394_cmd_t *cmd)
+{
+ s1394_hal_t *to_hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ cmd1394_cmd_t *target_cmd;
+ uint32_t lock_req_step;
+ int tcmd_result;
+ int ret;
+
+
+ TNF_PROBE_0_DEBUG(s1394_handle_lock_enter, S1394_TNF_SL_ATREQ_STACK,
+ "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ lock_req_step = s_priv->lock_req_step;
+
+ /* Get the target's command */
+ target_cmd = (cmd1394_cmd_t *)cmd->cmd_callback_arg;
+
+ /* Get the destination of the command */
+ to_hal = s_priv->sent_on_hal;
+
+lock_req_step_0:
+ /* Is this step 0 completing? */
+ if (lock_req_step == 0) {
+ /* Was the request successful? */
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ /* Do any math, bit ops, or byte-swapping necessary */
+ ret = s1394_process_split_lock(cmd, target_cmd);
+
+ if (ret != DDI_SUCCESS) {
+ tcmd_result = target_cmd->cmd_result;
+ goto lock_req_done;
+ }
+
+ s_priv->lock_req_step = 1;
+
+ target = s_priv->sent_by_target;
+
+ if (s1394_compare_swap(to_hal, target, cmd) !=
+ DDI_SUCCESS) {
+ tcmd_result = cmd->cmd_result;
+ goto lock_req_done;
+ } else {
+ TNF_PROBE_0_DEBUG(s1394_handle_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+ } else {
+ /* Command failed for some reason */
+ tcmd_result = cmd->cmd_result;
+ goto lock_req_done;
+ }
+ } else { /* lock_req_step == 1 */
+ /* Was the request successful? */
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ /* Do whatever's necessary to finish up the lock */
+ ret = s1394_finish_split_lock(cmd, target_cmd);
+
+ if (ret != DDI_SUCCESS) {
+ lock_req_step = 0;
+ goto lock_req_step_0;
+ } else {
+ tcmd_result = cmd->cmd_result;
+ goto lock_req_done;
+ }
+ } else {
+ /* Command failed for some reason */
+ tcmd_result = cmd->cmd_result;
+ goto lock_req_done;
+ }
+ }
+
+lock_req_done:
+ if (s1394_free_cmd(to_hal, &cmd) != DDI_SUCCESS) {
+ TNF_PROBE_0(s1394_handle_lock_error_in_freecmd,
+ S1394_TNF_SL_ATREQ_ERROR, "");
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(target_cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ target_cmd->cmd_result = tcmd_result;
+
+ /* Is this a blocking command? */
+ if (target_cmd->cmd_options & CMD1394_BLOCKING) {
+ /* Unblock the waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_handle_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return;
+ }
+
+ /* Call the target's completion_callback() */
+ if (target_cmd->completion_callback != NULL)
+ target_cmd->completion_callback(target_cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_handle_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_pending_q_insert()
+ * is used to insert a given command structure onto a HAL's pending queue
+ * for later processing (after the bus reset). All commands returned by
+ * the HAL, are inserted onto the rear of the list (first priority), and
+ * all other commands (from targets during bus reset) are put onto the front.
+ */
+void
+s1394_pending_q_insert(s1394_hal_t *hal, cmd1394_cmd_t *cmd, uint_t flags)
+{
+ cmd1394_cmd_t *temp_cmd;
+ s1394_cmd_priv_t *s_priv;
+ s1394_cmd_priv_t *c_priv;
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_insert_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ mutex_enter(&hal->pending_q_mutex);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is the outstanding request queue empty? */
+ if ((hal->pending_q_head == NULL) && (hal->pending_q_tail == NULL)) {
+
+ hal->pending_q_head = (cmd1394_cmd_t *)cmd;
+ hal->pending_q_tail = (cmd1394_cmd_t *)cmd;
+ s_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+
+ } else if (flags == S1394_PENDING_Q_FRONT) {
+ s_priv->cmd_priv_next = hal->pending_q_head;
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+
+ temp_cmd = (cmd1394_cmd_t *)hal->pending_q_head;
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)temp_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_prev = (cmd1394_cmd_t *)cmd;
+
+ hal->pending_q_head = (cmd1394_cmd_t *)cmd;
+
+ } else {
+ s_priv->cmd_priv_prev = hal->pending_q_tail;
+ s_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+
+ temp_cmd = (cmd1394_cmd_t *)hal->pending_q_tail;
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)temp_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_next = (cmd1394_cmd_t *)cmd;
+
+ hal->pending_q_tail = (cmd1394_cmd_t *)cmd;
+ }
+
+ mutex_exit(&hal->pending_q_mutex);
+
+ /* kstats - number of pending Q insertions */
+ hal->hal_kstats->pending_q_insert++;
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_insert_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_pending_q_remove()
+ * is used to remove a command structure from a HAL's pending queue for
+ * processing.
+ */
+static cmd1394_cmd_t *
+s1394_pending_q_remove(s1394_hal_t *hal)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_cmd_priv_t *c_priv;
+ cmd1394_cmd_t *cmd;
+ cmd1394_cmd_t *prev_cmd;
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_remove_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ mutex_enter(&hal->pending_q_mutex);
+
+ cmd = (cmd1394_cmd_t *)hal->pending_q_tail;
+ if (cmd == NULL) {
+ mutex_exit(&hal->pending_q_mutex);
+ TNF_PROBE_0_DEBUG(s1394_pending_q_remove_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (NULL);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ prev_cmd = (cmd1394_cmd_t *)s_priv->cmd_priv_prev;
+
+ s_priv->cmd_priv_prev = (cmd1394_cmd_t *)NULL;
+ s_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+
+ if (prev_cmd != NULL) {
+ c_priv = (s1394_cmd_priv_t *)((uchar_t *)prev_cmd +
+ sizeof (cmd1394_cmd_t));
+ c_priv->cmd_priv_next = (cmd1394_cmd_t *)NULL;
+
+ } else {
+ hal->pending_q_head = (cmd1394_cmd_t *)NULL;
+ }
+ hal->pending_q_tail = (cmd1394_cmd_t *)prev_cmd;
+
+ mutex_exit(&hal->pending_q_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_remove_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (cmd);
+}
+
+/*
+ * s1394_resend_pending_cmds()
+ * is called when the pending queue is to be flushed. After most of the
+ * bus reset processing is completed, the pending commands are sent/resent.
+ */
+void
+s1394_resend_pending_cmds(s1394_hal_t *hal)
+{
+ int done;
+
+ TNF_PROBE_0_DEBUG(s1394_resend_pending_cmds_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ do {
+ done = s1394_process_pending_q(hal);
+ } while (done == B_FALSE);
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_resend_pending_cmds_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_process_pending_q()
+ * is called to send/resend the commands on the pending queue. All command
+ * handling can be done here, including notifying the target of failed
+ * commands, etc. If it is necessary to recompute the address, speed,
+ * or max_payload for a command, that can be done here too. And if there
+ * is no reason not to continue sending commands from the pending queue,
+ * then a B_FALSE is returned, else B_TRUE is returned.
+ */
+static boolean_t
+s1394_process_pending_q(s1394_hal_t *hal)
+{
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ s1394_target_t *target;
+ cmd1394_cmd_t *cmd;
+ uint64_t node;
+ uint32_t from_node;
+ uint32_t to_node;
+ uint_t current_max_payload;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_process_pending_q_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ /* Pull a command from the Pending Q */
+ cmd = s1394_pending_q_remove(hal);
+
+ if (cmd == NULL) {
+ TNF_PROBE_0_DEBUG(s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_TRUE);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ if ((cmd->cmd_options & CMD1394_OVERRIDE_ADDR) ||
+ (cmd->cmd_options & CMD1394_CANCEL_ON_BUS_RESET)) {
+ if (h_priv->bus_generation == hal->generation_count) {
+ ret = s1394_pending_q_helper(hal, cmd);
+ return (ret);
+ } else {
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ cmd->cmd_result = CMD1394_EBUSRESET;
+
+ /* Is this a blocking command? */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ /* Unblock the waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ }
+
+ /* Call the target's completion_callback() */
+ if (cmd->completion_callback != NULL) {
+ cmd->completion_callback(cmd);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ }
+ } else {
+ if (h_priv->bus_generation == hal->generation_count) {
+ ret = s1394_pending_q_helper(hal, cmd);
+ return (ret);
+ } else {
+ /* Make sure we can get the topology_tree_mutex */
+ if (s1394_lock_tree(hal) != DDI_SUCCESS)
+ return (B_TRUE);
+
+ /* Set the generation */
+ cmd->bus_generation = hal->generation_count;
+
+ /* Copy the generation into the HAL's private field */
+ h_priv->bus_generation = cmd->bus_generation;
+
+ target = s_priv->sent_by_target;
+
+ /* If not OVERRIDE_ADDR, then target may not be NULL */
+ ASSERT(target != NULL);
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ if (((target->target_state & S1394_TARG_GONE) == 0) &&
+ (target->on_node != NULL)) {
+ node = target->on_node->node_num;
+ rw_exit(&hal->target_list_rwlock);
+ } else {
+ rw_exit(&hal->target_list_rwlock);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ cmd->cmd_result = CMD1394_EDEVICE_REMOVED;
+
+ /* Is this a blocking command? */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ s1394_unlock_tree(hal);
+
+ /* Unblock the waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(
+ s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK,
+ "");
+ return (B_FALSE);
+ }
+
+ /* Call the target's completion_callback() */
+ if (cmd->completion_callback != NULL) {
+ s1394_unlock_tree(hal);
+ cmd->completion_callback(cmd);
+ TNF_PROBE_0_DEBUG(
+ s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ } else {
+ s1394_unlock_tree(hal);
+ TNF_PROBE_0_DEBUG(
+ s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ }
+ }
+
+ /* Mask in the top 16-bits */
+ cmd->cmd_addr = cmd->cmd_addr &
+ IEEE1394_ADDR_OFFSET_MASK;
+ cmd->cmd_addr = cmd->cmd_addr |
+ (node << IEEE1394_ADDR_PHY_ID_SHIFT);
+ cmd->cmd_addr = cmd->cmd_addr |
+ IEEE1394_ADDR_BUS_ID_MASK;
+
+ /* Speed is to be filled in from speed map */
+ from_node = IEEE1394_NODE_NUM(hal->node_id);
+ to_node = (uint32_t)node;
+
+ /* Fill in the nodeID */
+ cmd->nodeID =
+ (cmd->cmd_addr & IEEE1394_ADDR_NODE_ID_MASK) >>
+ IEEE1394_ADDR_NODE_ID_SHIFT;
+
+ if (cmd->cmd_options & CMD1394_OVERRIDE_SPEED) {
+ s_priv->hal_cmd_private.speed =
+ (int)cmd->cmd_speed;
+ } else {
+ /* Speed is to be filled in from speed map */
+ s_priv->hal_cmd_private.speed =
+ (int)s1394_speed_map_get(hal, from_node,
+ to_node);
+ }
+
+ /* Is it a block request? */
+ if ((cmd->cmd_type == CMD1394_ASYNCH_RD_BLOCK) ||
+ (cmd->cmd_type == CMD1394_ASYNCH_WR_BLOCK)) {
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->
+ hal_cmd_private;
+
+ /* Handle the MAX_PAYLOAD size */
+ if (s_priv->sent_by_target != NULL) {
+ current_max_payload =
+ s_priv->sent_by_target->
+ current_max_payload;
+ } else {
+ current_max_payload = 4;
+ }
+ if (cmd->cmd_options &
+ CMD1394_OVERRIDE_MAX_PAYLOAD) {
+ if (current_max_payload >
+ cmd->cmd_u.b.max_payload)
+ current_max_payload =
+ cmd->cmd_u.b.max_payload;
+ }
+ if (s_priv->data_remaining <
+ current_max_payload) {
+ h_priv->mblk.length =
+ s_priv->data_remaining;
+ } else {
+ h_priv->mblk.length =
+ current_max_payload;
+ }
+ }
+ s1394_unlock_tree(hal);
+ ret = s1394_pending_q_helper(hal, cmd);
+ TNF_PROBE_0_DEBUG(s1394_process_pending_q_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (ret);
+ }
+ }
+}
+
+/*
+ * s1394_pending_q_helper()
+ * is a "helper" function for s1394_process_pending_q(). It attempts to
+ * resend commands, handling error conditions whenever necessary.
+ */
+static boolean_t
+s1394_pending_q_helper(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ int err;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Put cmd on outstanding request Q */
+ s1394_insert_q_asynch_cmd(hal, cmd);
+
+ /* Send command out again */
+ ret = s1394_xfer_asynch_command(hal, cmd, &err);
+
+ if (ret != DDI_SUCCESS) {
+ if (err == CMD1394_ESTALE_GENERATION) {
+ /* Remove cmd outstanding req Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+ s1394_pending_q_insert(hal, cmd, S1394_PENDING_Q_FRONT);
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_TRUE);
+ } else {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ cmd->cmd_result = err;
+
+ /* Is this a blocking command? */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ /* Unblock waiting command */
+ mutex_enter(&s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_TRUE;
+ cv_signal(&s_priv->blocking_cv);
+ mutex_exit(&s_priv->blocking_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ }
+
+ /* Call target completion_callback() */
+ if (cmd->completion_callback != NULL) {
+ cmd->completion_callback(cmd);
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ } else {
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_pending_q_helper_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (B_FALSE);
+}
+
+/*
+ * s1394_process_split_lock()
+ * is a "helper" function for the s1394_handle_lock() callback. Its
+ * job is to perform whatever manipulation is required for the given
+ * request.
+ */
+static int
+s1394_process_split_lock(cmd1394_cmd_t *cmd, cmd1394_cmd_t *target_cmd)
+{
+ uint64_t new_value64;
+ uint64_t data_value64;
+ uint64_t arg_value64;
+ uint64_t old_value64;
+ uint64_t temp_value64;
+ uint32_t new_value32;
+ uint32_t data_value32;
+ uint32_t arg_value32;
+ uint32_t old_value32;
+ uint32_t temp_value32;
+
+ TNF_PROBE_0_DEBUG(s1394_process_split_lock_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ old_value32 = cmd->cmd_u.l32.old_value;
+ data_value32 = target_cmd->cmd_u.l32.data_value;
+ arg_value32 = target_cmd->cmd_u.l32.arg_value;
+
+ /* Lock type specific */
+ switch (target_cmd->cmd_u.l32.lock_type) {
+ case CMD1394_LOCK_BIT_AND:
+ new_value32 = old_value32 & data_value32;
+ break;
+
+ case CMD1394_LOCK_BIT_OR:
+ new_value32 = old_value32 | data_value32;
+ break;
+
+ case CMD1394_LOCK_BIT_XOR:
+ new_value32 = old_value32 ^ data_value32;
+ break;
+
+ case CMD1394_LOCK_INCREMENT:
+ old_value32 = T1394_DATA32(old_value32);
+ new_value32 = old_value32 + 1;
+ new_value32 = T1394_DATA32(new_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+
+ case CMD1394_LOCK_DECREMENT:
+ old_value32 = T1394_DATA32(old_value32);
+ new_value32 = old_value32 - 1;
+ new_value32 = T1394_DATA32(new_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+
+ case CMD1394_LOCK_ADD:
+ old_value32 = T1394_DATA32(old_value32);
+ new_value32 = old_value32 + data_value32;
+ new_value32 = T1394_DATA32(new_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+
+ case CMD1394_LOCK_SUBTRACT:
+ old_value32 = T1394_DATA32(old_value32);
+ new_value32 = old_value32 - data_value32;
+ new_value32 = T1394_DATA32(new_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+
+ case CMD1394_LOCK_THRESH_ADD:
+ old_value32 = T1394_DATA32(old_value32);
+ temp_value32 = (old_value32 + data_value32);
+ if ((temp_value32 >= old_value32) &&
+ (temp_value32 <= arg_value32)) {
+ new_value32 = T1394_DATA32(temp_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ } else {
+ /* Failed threshold add */
+ target_cmd->cmd_u.l32.old_value =
+ T1394_DATA32(cmd->cmd_u.l32.old_value);
+ target_cmd->cmd_result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(
+ s1394_process_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case CMD1394_LOCK_THRESH_SUBTRACT:
+ old_value32 = T1394_DATA32(old_value32);
+ temp_value32 = (old_value32 - data_value32);
+ if ((old_value32 >= data_value32) &&
+ (temp_value32 >= arg_value32)) {
+ new_value32 = T1394_DATA32(temp_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ } else {
+ /* Failed threshold subtract */
+ target_cmd->cmd_u.l32.old_value =
+ T1394_DATA32(cmd->cmd_u.l32.old_value);
+ target_cmd->cmd_result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(
+ s1394_process_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case CMD1394_LOCK_CLIP_ADD:
+ old_value32 = T1394_DATA32(old_value32);
+ temp_value32 = (old_value32 + data_value32);
+ if ((temp_value32 < old_value32) ||
+ (temp_value32 > arg_value32))
+ new_value32 = T1394_DATA32(arg_value32);
+ else
+ new_value32 = T1394_DATA32(temp_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+
+ case CMD1394_LOCK_CLIP_SUBTRACT:
+ old_value32 = T1394_DATA32(old_value32);
+ temp_value32 = (old_value32 - data_value32);
+ if ((data_value32 > old_value32) ||
+ (temp_value32 < arg_value32))
+ new_value32 = T1394_DATA32(arg_value32);
+ else
+ new_value32 = T1394_DATA32(temp_value32);
+ old_value32 = T1394_DATA32(old_value32);
+ break;
+ }
+
+ /* Send compare-swap lock request */
+ cmd->cmd_u.l32.arg_value = old_value32;
+ cmd->cmd_u.l32.data_value = new_value32;
+ } else {
+ old_value64 = cmd->cmd_u.l64.old_value;
+ data_value64 = target_cmd->cmd_u.l64.data_value;
+ arg_value64 = target_cmd->cmd_u.l64.arg_value;
+
+ /* Lock type specific */
+ switch (target_cmd->cmd_u.l64.lock_type) {
+ case CMD1394_LOCK_BIT_AND:
+ new_value64 = old_value64 & data_value64;
+ break;
+
+ case CMD1394_LOCK_BIT_OR:
+ new_value64 = old_value64 | data_value64;
+ break;
+
+ case CMD1394_LOCK_BIT_XOR:
+ new_value64 = old_value64 ^ data_value64;
+ break;
+
+ case CMD1394_LOCK_INCREMENT:
+ old_value64 = T1394_DATA64(old_value64);
+ new_value64 = old_value64 + 1;
+ new_value64 = T1394_DATA64(new_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+
+ case CMD1394_LOCK_DECREMENT:
+ old_value64 = T1394_DATA64(old_value64);
+ new_value64 = old_value64 - 1;
+ new_value64 = T1394_DATA64(new_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+
+ case CMD1394_LOCK_ADD:
+ old_value64 = T1394_DATA64(old_value64);
+ new_value64 = old_value64 + data_value64;
+ new_value64 = T1394_DATA64(new_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+
+ case CMD1394_LOCK_SUBTRACT:
+ old_value64 = T1394_DATA64(old_value64);
+ new_value64 = old_value64 - data_value64;
+ new_value64 = T1394_DATA64(new_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+
+ case CMD1394_LOCK_THRESH_ADD:
+ old_value64 = T1394_DATA64(old_value64);
+ temp_value64 = (old_value64 + data_value64);
+ if ((temp_value64 >= old_value64) &&
+ (temp_value64 <= arg_value64)) {
+ new_value64 = T1394_DATA64(temp_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ } else {
+ /* Failed threshold add */
+ target_cmd->cmd_u.l64.old_value =
+ T1394_DATA64(cmd->cmd_u.l64.old_value);
+ target_cmd->cmd_result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(
+ s1394_process_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case CMD1394_LOCK_THRESH_SUBTRACT:
+ old_value64 = T1394_DATA64(old_value64);
+ temp_value64 = (old_value64 - data_value64);
+ if ((old_value64 >= data_value64) &&
+ (temp_value64 >= arg_value64)) {
+ new_value64 = T1394_DATA64(temp_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ } else {
+ /* Failed threshold subtract */
+ target_cmd->cmd_u.l64.old_value =
+ T1394_DATA64(cmd->cmd_u.l64.old_value);
+ target_cmd->cmd_result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(
+ s1394_process_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case CMD1394_LOCK_CLIP_ADD:
+ old_value64 = T1394_DATA64(old_value64);
+ temp_value64 = (old_value64 + data_value64);
+ if ((temp_value64 < old_value64) ||
+ (temp_value64 > arg_value64))
+ new_value64 = T1394_DATA64(arg_value64);
+ else
+ new_value64 = T1394_DATA64(temp_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+
+ case CMD1394_LOCK_CLIP_SUBTRACT:
+ old_value64 = T1394_DATA64(old_value64);
+ temp_value64 = (old_value64 - data_value64);
+ if ((data_value64 > old_value64) ||
+ (temp_value64 < arg_value64))
+ new_value64 = T1394_DATA64(arg_value64);
+ else
+ new_value64 = T1394_DATA64(temp_value64);
+ old_value64 = T1394_DATA64(old_value64);
+ break;
+ }
+
+ /* Send compare-swap lock request */
+ cmd->cmd_u.l64.arg_value = old_value64;
+ cmd->cmd_u.l64.data_value = new_value64;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_process_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_finish_split_lock()
+ * is another "helper" function for the s1394_handle_lock() callback.
+ * Its job is to finish up whatever lock request procesing is necessary.
+ */
+static int
+s1394_finish_split_lock(cmd1394_cmd_t *cmd, cmd1394_cmd_t *target_cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ uint64_t tmp_value64;
+ uint32_t tmp_value32;
+
+ TNF_PROBE_0_DEBUG(s1394_finish_split_lock_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ if (((cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) &&
+ (cmd->cmd_u.l32.old_value == cmd->cmd_u.l32.arg_value)) ||
+ ((cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) &&
+ (cmd->cmd_u.l64.old_value == cmd->cmd_u.l64.arg_value))) {
+
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ switch (cmd->cmd_u.l32.lock_type) {
+ case CMD1394_LOCK_INCREMENT:
+ case CMD1394_LOCK_DECREMENT:
+ case CMD1394_LOCK_ADD:
+ case CMD1394_LOCK_SUBTRACT:
+ case CMD1394_LOCK_THRESH_ADD:
+ case CMD1394_LOCK_THRESH_SUBTRACT:
+ case CMD1394_LOCK_CLIP_ADD:
+ case CMD1394_LOCK_CLIP_SUBTRACT:
+ tmp_value32 = cmd->cmd_u.l32.old_value;
+ tmp_value32 = T1394_DATA32(tmp_value32);
+ target_cmd->cmd_u.l32.old_value = tmp_value32;
+ break;
+ default:
+ tmp_value32 = cmd->cmd_u.l32.old_value;
+ target_cmd->cmd_u.l32.old_value = tmp_value32;
+ break;
+ }
+ } else {
+ switch (cmd->cmd_u.l64.lock_type) {
+ case CMD1394_LOCK_INCREMENT:
+ case CMD1394_LOCK_DECREMENT:
+ case CMD1394_LOCK_ADD:
+ case CMD1394_LOCK_SUBTRACT:
+ case CMD1394_LOCK_THRESH_ADD:
+ case CMD1394_LOCK_THRESH_SUBTRACT:
+ case CMD1394_LOCK_CLIP_ADD:
+ case CMD1394_LOCK_CLIP_SUBTRACT:
+ tmp_value64 = cmd->cmd_u.l64.old_value;
+ tmp_value64 = T1394_DATA64(tmp_value64);
+ target_cmd->cmd_u.l64.old_value = tmp_value64;
+ break;
+ default:
+ tmp_value64 = cmd->cmd_u.l64.old_value;
+ target_cmd->cmd_u.l64.old_value = tmp_value64;
+ break;
+ }
+ }
+ /* Set status */
+ target_cmd->cmd_result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(s1394_finish_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+ } else {
+ if (s_priv->temp_num_retries > 0) {
+ /* Decrement retry count */
+ s_priv->temp_num_retries--;
+
+ /* Reset lock_req_step */
+ s_priv->lock_req_step = 0;
+
+ TNF_PROBE_0_DEBUG(s1394_finish_split_lock_start_over,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ /* Resend... start at step 0 again */
+ TNF_PROBE_0_DEBUG(s1394_finish_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ } else {
+ /* Failed... RETRIES_EXCEEDED */
+ target_cmd->cmd_result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_0_DEBUG(s1394_finish_split_lock_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ }
+}
diff --git a/usr/src/uts/common/io/1394/s1394_bus_reset.c b/usr/src/uts/common/io/1394/s1394_bus_reset.c
new file mode 100644
index 0000000000..70d95f8031
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_bus_reset.c
@@ -0,0 +1,1566 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_bus_reset.c
+ * 1394 Services Layer Bus Reset Routines
+ * These routines handle all of the tasks relating to 1394 bus resets
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/ieee1212.h>
+
+static uint8_t selfid_speed(s1394_selfid_pkt_t *s);
+
+static int selfid_num_ports(s1394_selfid_pkt_t *s);
+
+static int selfid_port_type(s1394_selfid_pkt_t *s, int port);
+
+static void s1394_hal_stack_push(s1394_hal_t *hal, void *o);
+
+static void *s1394_hal_stack_pop(s1394_hal_t *hal);
+
+static void s1394_hal_queue_insert(s1394_hal_t *hal, void *o);
+
+static void *s1394_hal_queue_remove(s1394_hal_t *hal);
+
+static void s1394_node_number_list_add(s1394_hal_t *hal, int node_num);
+
+static void s1394_speed_map_fill_speed_N(s1394_hal_t *hal, int min_spd);
+
+static void s1394_speed_map_initialize(s1394_hal_t *hal);
+
+int s1394_ignore_invalid_gap_cnt = 0; /* patch for invalid gap_cnts */
+
+/*
+ * Gap_count look-up table (See IEEE P1394a Table C-2) - Draft 3.0
+ * (modified from original table IEEE 1394-1995 8.4.6.2)
+ */
+static int gap_count[MAX_HOPS + 1] = {
+ 0, 5, 7, 8, 10, 13, 16, 18, 21,
+ 24, 26, 29, 32, 35, 37, 40, 43,
+ 46, 48, 51, 54, 57, 59, 62
+};
+
+/*
+ * s1394_parse_selfid_buffer()
+ * takes the SelfID data buffer and parses it, testing whether each packet
+ * is valid (has a correct inverse packet) and setting the pointers in
+ * selfid_ptrs[] to the appropriate offsets within the buffer.
+ */
+int
+s1394_parse_selfid_buffer(s1394_hal_t *hal, void *selfid_buf_addr,
+ uint32_t selfid_size)
+{
+ s1394_selfid_pkt_t *s;
+ uint32_t *data;
+ uint_t i = 0;
+ uint_t j = 0;
+ boolean_t error = B_FALSE;
+ int valid_pkt_id;
+
+ TNF_PROBE_0_DEBUG(s1394_parse_selfid_buffer_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ data = (uint32_t *)selfid_buf_addr;
+
+ if (selfid_size == 0) {
+ TNF_PROBE_1(s1394_parse_selfid_buffer_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "SelfID buffer error - zero size");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ /* Set error status */
+ error = B_TRUE;
+
+ /* Release HAL lock and return */
+ goto parse_buffer_done;
+ }
+
+ /* Convert bytes to quadlets */
+ selfid_size = selfid_size >> 2;
+
+ while (j < selfid_size) {
+ valid_pkt_id = ((data[j] & IEEE1394_SELFID_PCKT_ID_MASK) >>
+ IEEE1394_SELFID_PCKT_ID_SHIFT);
+
+ s = (s1394_selfid_pkt_t *)(&data[j]);
+
+ /* Test if packet has valid inverse quadlet */
+ if (IEEE1394_SELFID_ISVALID(s) &&
+ (valid_pkt_id == IEEE1394_SELFID_PCKT_ID_VALID)) {
+
+ hal->selfid_ptrs[i] = s;
+
+ /* While this packet contains multiple quadlets */
+ j += 2;
+
+ while (IEEE1394_SELFID_ISMORE(s)) {
+ valid_pkt_id =
+ ((data[j] & IEEE1394_SELFID_PCKT_ID_MASK) >>
+ IEEE1394_SELFID_PCKT_ID_SHIFT);
+
+ s = (s1394_selfid_pkt_t *)(&data[j]);
+
+ /* Test if packet has valid inverse quadlet */
+ if (IEEE1394_SELFID_ISVALID(s) &&
+ (valid_pkt_id ==
+ IEEE1394_SELFID_PCKT_ID_VALID)) {
+ j += 2;
+ } else {
+ TNF_PROBE_1(
+ s1394_parse_selfid_buffer_error,
+ S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "SelfID packet "
+ "error - invalid inverse");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ /* Set error status */
+ error = B_TRUE;
+
+ /* Release HAL lock and return */
+ goto parse_buffer_done;
+ }
+ }
+ i++;
+ } else {
+ TNF_PROBE_1(s1394_parse_selfid_buffer_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "SelfID packet error - invalid inverse");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ /* Set error status */
+ error = B_TRUE;
+
+ /* Release HAL lock and return */
+ goto parse_buffer_done;
+ }
+ }
+
+ hal->number_of_nodes = i;
+
+parse_buffer_done:
+ TNF_PROBE_0_DEBUG(s1394_parse_selfid_buffer_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ if (error == B_TRUE)
+ return (DDI_FAILURE);
+ else
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_sort_selfids()
+ * takes the selfid_ptrs[] in the HAL struct and sorts them by node number,
+ * using a heapsort.
+ */
+void
+s1394_sort_selfids(s1394_hal_t *hal)
+{
+ s1394_selfid_pkt_t *current;
+ uint_t number_of_nodes;
+ int i;
+ int j;
+
+ TNF_PROBE_0_DEBUG(s1394_sort_selfids_enter, S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+
+ /* We start at one because the root has no parent to check */
+ for (i = 1; i < number_of_nodes; i++) {
+ current = hal->selfid_ptrs[i];
+ j = i;
+ while ((j > 0) && (IEEE1394_SELFID_PHYID(current) >
+ IEEE1394_SELFID_PHYID(hal->selfid_ptrs[j / 2]))) {
+ hal->selfid_ptrs[j] = hal->selfid_ptrs[j / 2];
+ hal->selfid_ptrs[j / 2] = current;
+ j = j / 2;
+ }
+ }
+
+ for (i = number_of_nodes - 1; i > 0; i--) {
+ current = hal->selfid_ptrs[i];
+ hal->selfid_ptrs[i] = hal->selfid_ptrs[0];
+ hal->selfid_ptrs[0] = current;
+ j = 0;
+ while (2 * j + 1 < i) {
+ if (2 * j + 2 >= i) {
+ if (IEEE1394_SELFID_PHYID(current) <
+ IEEE1394_SELFID_PHYID(
+ hal->selfid_ptrs[2 * j + 1])) {
+ hal->selfid_ptrs[j] =
+ hal->selfid_ptrs[2 * j + 1];
+ hal->selfid_ptrs[2 * j + 1] = current;
+ j = 2 * j + 1;
+ }
+ break;
+ }
+
+ if (IEEE1394_SELFID_PHYID(hal->selfid_ptrs[2 * j + 1]) >
+ IEEE1394_SELFID_PHYID(
+ hal->selfid_ptrs[2 * j + 2])) {
+ if (IEEE1394_SELFID_PHYID(current) <
+ IEEE1394_SELFID_PHYID(
+ hal->selfid_ptrs[2 * j + 1])) {
+ hal->selfid_ptrs[j] =
+ hal->selfid_ptrs[2 * j + 1];
+ hal->selfid_ptrs[2 * j + 1] = current;
+ j = 2 * j + 1;
+ } else {
+ break;
+ }
+ } else {
+ if (IEEE1394_SELFID_PHYID(current) <
+ IEEE1394_SELFID_PHYID(
+ hal->selfid_ptrs[2 * j + 2])) {
+ hal->selfid_ptrs[j] =
+ hal->selfid_ptrs[2 * j + 2];
+ hal->selfid_ptrs[2 * j + 2] = current;
+ j = 2 * j + 2;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_sort_selfids_exit, S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * selfid_speed()
+ * examines the "sp" bits for a given packet (see IEEE 1394-1995 4.3.4.1)
+ * and returns the node's speed capabilities.
+ */
+static uint8_t
+selfid_speed(s1394_selfid_pkt_t *s)
+{
+ uint32_t sp;
+
+ sp = ((s->spkt_data & IEEE1394_SELFID_SP_MASK) >>
+ IEEE1394_SELFID_SP_SHIFT);
+
+ switch (sp) {
+ case IEEE1394_S100:
+ case IEEE1394_S200:
+ case IEEE1394_S400:
+ return (sp);
+
+ /*
+ * To verify higher speeds we should look at PHY register #3
+ * on this node. This will need to be done to support P1394b
+ */
+ default:
+ return (IEEE1394_S400);
+ }
+}
+
+/*
+ * selfid_num_ports()
+ * determines whether a packet is multi-part or single, and from this it
+ * calculates the number of ports which have been specified.
+ * (See IEEE 1394-1995 4.3.4.1)
+ */
+static int
+selfid_num_ports(s1394_selfid_pkt_t *s)
+{
+ int p = 3;
+
+ while (IEEE1394_SELFID_ISMORE(s)) {
+ p += 8;
+ s++;
+ }
+
+ /* Threshold the number of ports at the P1394A defined maximum */
+ /* (see P1394A Draft 3.0 - Section 8.5.1) */
+ if (p > IEEE1394_MAX_NUM_PORTS)
+ p = IEEE1394_MAX_NUM_PORTS;
+
+ return (p);
+}
+
+/*
+ * selfid_port_type()
+ * determines what type of node the specified port connects to.
+ * (See IEEE 1394-1995 4.3.4.1)
+ */
+static int
+selfid_port_type(s1394_selfid_pkt_t *s, int port)
+{
+ int block;
+ int offset = IEEE1394_SELFID_PORT_OFFSET_FIRST;
+
+ if (port > selfid_num_ports(s)) {
+ TNF_PROBE_1(selfid_port_type_error,
+ "1394 s1394 error",
+ "Invalid port number requested for node",
+ tnf_uint, node_num, IEEE1394_SELFID_PHYID(s));
+ }
+
+ if (port > 2) {
+ /* Calculate which quadlet and bits for this port */
+ port -= 3;
+ block = (port >> 3) + 1;
+ port = port % 8;
+ /* Move to the correct quadlet */
+ s += block;
+ offset = IEEE1394_SELFID_PORT_OFFSET_OTHERS;
+ }
+
+ /* Shift by appropriate number of bits and mask */
+ return ((s->spkt_data >> (offset - 2 * port)) & 0x00000003);
+}
+
+/*
+ * s1394_init_topology_tree()
+ * frees any config rom's allocated in the topology tree before zapping it.
+ * If it gets a bus reset before the tree is marked processed, there will
+ * be memory allocated for cfgrom's being read. If there is no tree copy,
+ * topology would still be topology tree from the previous generation and
+ * if we bzero'd the tree, we will have a memory leak. To avoid this leak,
+ * walk through the tree and free any config roms in nodes that are NOT
+ * matched. (For matched nodes, we ensure that nodes in old and topology
+ * tree point to the same area of memory.)
+ */
+void
+s1394_init_topology_tree(s1394_hal_t *hal, boolean_t copied,
+ ushort_t number_of_nodes)
+{
+ s1394_node_t *node;
+ uint32_t *config_rom;
+ uint_t tree_size;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_init_topology_tree_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /*
+ * if copied is false, we want to free any cfgrom memory that is
+ * not referenced to in both topology and old trees. However, we
+ * don't use hal->number_of_nodes as the number of nodes to look at.
+ * The reason being we could be seeing the bus reset before the
+ * state is appropriate for a tree copy (which need
+ * toplogy_tree_processed to be true) and some nodes might have
+ * departed in this generation and hal->number_of_nodes reflects
+ * the number of nodes in this generation. Use number_of_nodes that
+ * gets passed into this routine as the actual number of nodes to
+ * look at.
+ */
+ if (copied == B_FALSE) {
+ /* Free any cfgrom alloced and zap the node */
+ for (i = 0; i < number_of_nodes; i++) {
+ node = &hal->topology_tree[i];
+ config_rom = node->cfgrom;
+ if (config_rom != NULL) {
+ if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
+ TNF_PROBE_2_DEBUG(
+ s1394_init_top_tree_free_cfgrom,
+ S1394_TNF_SL_BR_STACK,
+ "cfgrom free", tnf_int, node_num, i,
+ tnf_opaque, cfgrom, config_rom);
+ kmem_free((void *)config_rom,
+ IEEE1394_CONFIG_ROM_SZ);
+ } else {
+ TNF_PROBE_2_DEBUG(s1394_init_top_tree,
+ S1394_TNF_SL_BR_STACK, "",
+ tnf_int, node_num, i,
+ tnf_opaque, cfgrom, config_rom);
+ }
+ }
+ }
+ }
+
+ tree_size = hal->number_of_nodes * sizeof (s1394_node_t);
+ bzero((void *)hal->topology_tree, tree_size);
+
+ TNF_PROBE_0_DEBUG(s1394_init_topology_tree_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_topology_tree_build()
+ * takes the selfid_ptrs[] and builds the topology_tree[] by examining
+ * the node numbers (the order in which the nodes responded to SelfID).
+ * It sets the port pointers, leaf label, parent port, and
+ * s1394_selfid_packet_t pointer in each node.
+ */
+int
+s1394_topology_tree_build(s1394_hal_t *hal)
+{
+ s1394_node_t *tmp;
+ uint32_t number_of_nodes;
+ boolean_t push_to_orphan_stack = B_FALSE;
+ boolean_t found_parent = B_FALSE;
+ boolean_t found_connection = B_FALSE;
+ int i;
+ int j;
+
+ /*
+ * The method for building the tree is described in IEEE 1394-1995
+ * (Annex E.3.4). We use an "Orphan" stack to keep track of Child
+ * nodes which have yet to find their Parent node.
+ */
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_build_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+
+ /* Flush the Stack */
+ hal->hal_stack_depth = -1;
+
+ /* For each node on the bus initialize its topology_tree entry */
+ for (i = 0; i < number_of_nodes; i++) {
+ /* Make sure that node numbers are correct */
+ if (i != IEEE1394_SELFID_PHYID(hal->selfid_ptrs[i])) {
+ TNF_PROBE_1(s1394_topology_tree_build_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "SelfIDs - Invalid node numbering");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal->topology_tree[i].selfid_packet = hal->selfid_ptrs[i];
+ hal->topology_tree[i].parent_port = (char)NO_PARENT;
+ hal->topology_tree[i].is_a_leaf = 1;
+ hal->topology_tree[i].node_num = (uchar_t)i;
+ }
+
+ for (i = 0; i < number_of_nodes; i++) {
+ /* Current node has no parent yet */
+ found_parent = B_FALSE;
+
+ /* Current node has no connections yet */
+ found_connection = B_FALSE;
+
+ /* Initialize all ports on this node */
+ for (j = 0; j < IEEE1394_MAX_NUM_PORTS; j++)
+ hal->topology_tree[i].phy_port[j] = NULL;
+
+ /* For each port on the node - highest to lowest */
+ for (j = selfid_num_ports(hal->selfid_ptrs[i]) - 1;
+ j >= 0; j--) {
+ if (selfid_port_type(hal->selfid_ptrs[i], j) ==
+ IEEE1394_SELFID_PORT_TO_PARENT) {
+
+ found_connection = B_TRUE;
+ if (found_parent == B_FALSE) {
+ push_to_orphan_stack = B_TRUE;
+ hal->topology_tree[i].parent_port =
+ (char)j;
+ found_parent = B_TRUE;
+
+ } else {
+ TNF_PROBE_1(
+ s1394_topology_tree_build_error,
+ S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "SelfID packet - "
+ "Has multiple parents");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ TNF_PROBE_0_DEBUG(
+ s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else if (selfid_port_type(hal->selfid_ptrs[i], j) ==
+ IEEE1394_SELFID_PORT_TO_CHILD) {
+
+ found_connection = B_TRUE;
+ tmp = (s1394_node_t *)s1394_hal_stack_pop(hal);
+ if (tmp == NULL) {
+ TNF_PROBE_1(
+ s1394_topology_tree_build_error,
+ S1394_TNF_SL_BR_ERROR, "",
+ tnf_string, msg, "Topology Tree "
+ "invalid - Tree build failed");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ TNF_PROBE_0_DEBUG(
+ s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal->topology_tree[i].phy_port[j] = tmp;
+ hal->topology_tree[i].is_a_leaf = 0;
+ tmp->phy_port[tmp->parent_port] =
+ &hal->topology_tree[i];
+ }
+ }
+
+ /* If current node has no parents or children - Invalid */
+ if ((found_connection == B_FALSE) && (number_of_nodes > 1)) {
+ TNF_PROBE_1(s1394_topology_tree_build_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "SelfID packet - Has no connections");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Push it on the "Orphan" stack if it has no parent yet */
+ if (push_to_orphan_stack == B_TRUE) {
+ push_to_orphan_stack = B_FALSE;
+ s1394_hal_stack_push(hal, &hal->topology_tree[i]);
+ }
+ }
+
+ /* If the stack is not empty, then something has gone seriously wrong */
+ if (hal->hal_stack_depth != -1) {
+ TNF_PROBE_1(s1394_topology_tree_build_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Topology Tree invalid - Tree build failed");
+
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* New topology tree is now valid */
+ hal->topology_tree_valid = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_build_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_hal_stack_push()
+ * checks that the stack is not full, and puts the pointer on top of the
+ * HAL's stack if it isn't. This routine is used only by the
+ * h1394_self_ids() interrupt.
+ */
+static void
+s1394_hal_stack_push(s1394_hal_t *hal, void *obj)
+{
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_push_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ if (hal->hal_stack_depth < IEEE1394_MAX_NODES - 1) {
+ hal->hal_stack_depth++;
+ hal->hal_stack[hal->hal_stack_depth] = obj;
+ } else {
+ TNF_PROBE_1(s1394_hal_stack_push_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "HAL stack - Overflow");
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_push_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_push_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_hal_stack_pop()
+ * checks that the stack is not empty, and pops and returns the pointer
+ * from the top of the HAL's stack if it isn't. This routine is used
+ * only by the h1394_self_ids() interrupt.
+ */
+static void *
+s1394_hal_stack_pop(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_pop_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ if (hal->hal_stack_depth > -1) {
+ hal->hal_stack_depth--;
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_pop_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (hal->hal_stack[hal->hal_stack_depth + 1]);
+
+ } else {
+ TNF_PROBE_1(s1394_hal_stack_pop_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "HAL stack - Underflow");
+ TNF_PROBE_0_DEBUG(s1394_hal_stack_pop_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (NULL);
+ }
+}
+
+/*
+ * s1394_hal_queue_insert()
+ * checks that the queue is not full, and puts the object in the front
+ * of the HAL's queue if it isn't. This routine is used only by the
+ * h1394_self_ids() interrupt.
+ */
+static void
+s1394_hal_queue_insert(s1394_hal_t *hal, void *obj)
+{
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_insert_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ if (((hal->hal_queue_front + 1) % IEEE1394_MAX_NODES) ==
+ hal->hal_queue_back) {
+ TNF_PROBE_1(s1394_hal_queue_insert_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "HAL Queue - Overflow");
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_insert_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+
+ } else {
+ hal->hal_queue[hal->hal_queue_front] = obj;
+ hal->hal_queue_front = (hal->hal_queue_front + 1) %
+ IEEE1394_MAX_NODES;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_insert_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+
+/*
+ * s1394_hal_queue_remove()
+ * checks that the queue is not empty, and pulls the object off the back
+ * of the HAL's queue (and returns it) if it isn't. This routine is used
+ * only by the h1394_self_ids() interrupt.
+ */
+static void *
+s1394_hal_queue_remove(s1394_hal_t *hal)
+{
+ void *tmp;
+
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_remove_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ if (hal->hal_queue_back == hal->hal_queue_front) {
+ TNF_PROBE_1(s1394_hal_queue_remove_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "HAL Queue - Underflow");
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_remove_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (NULL);
+
+ } else {
+ tmp = hal->hal_queue[hal->hal_queue_back];
+ hal->hal_queue_back = (hal->hal_queue_back + 1) %
+ IEEE1394_MAX_NODES;
+ TNF_PROBE_0_DEBUG(s1394_hal_queue_remove_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (tmp);
+ }
+}
+
+
+/*
+ * s1394_node_number_list_add()
+ * checks that the node_number_list is not full and puts the node number
+ * in the list. The function is used primarily by s1394_speed_map_fill()
+ * to keep track of which connections need to be set in the speed_map[].
+ * This routine is used only by the h1394_self_ids() interrupt.
+ */
+static void
+s1394_node_number_list_add(s1394_hal_t *hal, int node_num)
+{
+ TNF_PROBE_1_DEBUG(s1394_node_number_list_add_enter,
+ S1394_TNF_SL_BR_STACK, "", tnf_int, node_num, node_num);
+
+ if (hal->hal_node_number_list_size >= IEEE1394_MAX_NODES - 1) {
+ TNF_PROBE_1(s1394_node_number_list_add_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Node Number List - Overflow");
+ TNF_PROBE_0_DEBUG(s1394_node_number_list_add_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+
+ hal->hal_node_number_list[hal->hal_node_number_list_size] = node_num;
+ hal->hal_node_number_list_size++;
+
+ TNF_PROBE_0_DEBUG(s1394_node_number_list_add_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_topology_tree_mark_all_unvisited()
+ * is used to initialize the topology_tree[] prior to tree traversals.
+ * It resets the "visited" flag for each node in the tree.
+ */
+void
+s1394_topology_tree_mark_all_unvisited(s1394_hal_t *hal)
+{
+ uint_t number_of_nodes;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_mark_all_unvisited_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+ for (i = 0; i < number_of_nodes; i++)
+ CLEAR_NODE_VISITED(&hal->topology_tree[i]);
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_mark_all_unvisited_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_old_tree_mark_all_unvisited()
+ * is used to initialize the old_tree[] prior to tree traversals. It
+ * resets the "visited" flag for each node in the tree.
+ */
+void
+s1394_old_tree_mark_all_unvisited(s1394_hal_t *hal)
+{
+ uint_t number_of_nodes;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_old_tree_mark_all_unvisited_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->old_number_of_nodes;
+ for (i = 0; i < number_of_nodes; i++)
+ CLEAR_NODE_VISITED(&hal->old_tree[i]);
+
+ TNF_PROBE_0_DEBUG(s1394_old_tree_mark_all_unvisited_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_old_tree_mark_all_unmatched()
+ * is used to initialize the old_tree[] prior to tree traversals. It
+ * resets the "matched" flag for each node in the tree.
+ */
+void
+s1394_old_tree_mark_all_unmatched(s1394_hal_t *hal)
+{
+ uint_t number_of_nodes;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_old_tree_mark_all_unmatched_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->old_number_of_nodes;
+
+ for (i = 0; i < number_of_nodes; i++)
+ CLEAR_NODE_MATCHED(&hal->old_tree[i]);
+
+ TNF_PROBE_0_DEBUG(s1394_old_tree_mark_all_unmatched_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_copy_old_tree()
+ * switches the pointers for old_tree[] and topology_tree[].
+ */
+void
+s1394_copy_old_tree(s1394_hal_t *hal)
+{
+ s1394_node_t *temp;
+
+ TNF_PROBE_0_DEBUG(s1394_copy_old_tree_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ temp = hal->old_tree;
+ hal->old_tree = hal->topology_tree;
+ hal->topology_tree = temp;
+
+ hal->old_number_of_nodes = hal->number_of_nodes;
+ hal->old_node_id = hal->node_id;
+ hal->old_generation_count = hal->generation_count;
+
+ /* Old tree is now valid and filled also */
+ hal->old_tree_valid = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(s1394_copy_old_tree_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+
+/*
+ * s1394_match_tree_nodes()
+ * uses the information contained in the SelfID packets of the nodes in
+ * both the old_tree[] and the topology_tree[] to determine which new
+ * nodes correspond to old nodes. Starting with the local node, we
+ * compare both old and new node's ports. Assuming that only one bus
+ * reset has occurred, any node that was connected to another in the old
+ * bus and is still connected to another in the new bus must be connected
+ * (physically) to the same node. Using this information, we can rebuild
+ * and match the old nodes to new ones. Any nodes which aren't matched
+ * are either departing or arriving nodes and must be handled appropriately.
+ */
+void
+s1394_match_tree_nodes(s1394_hal_t *hal)
+{
+ s1394_node_t *tmp;
+ uint_t hal_node_num;
+ uint_t hal_node_num_old;
+ int i;
+ int port_type;
+
+ TNF_PROBE_0_DEBUG(s1394_match_tree_nodes_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /* To ensure that the queue is empty */
+ hal->hal_queue_front = hal->hal_queue_back = 0;
+
+ /* Set up the first matched nodes (which are our own local nodes) */
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
+ hal->topology_tree[hal_node_num].old_node =
+ &hal->old_tree[hal_node_num_old];
+ hal->old_tree[hal_node_num_old].cur_node =
+ &hal->topology_tree[hal_node_num];
+
+ /* Put the node on the queue */
+ s1394_hal_queue_insert(hal, &hal->topology_tree[hal_node_num]);
+
+ /* While the queue is not empty, remove a node */
+ while (hal->hal_queue_front != hal->hal_queue_back) {
+ tmp = (s1394_node_t *)s1394_hal_queue_remove(hal);
+
+ /* Mark both old and new nodes as "visited" */
+ SET_NODE_VISITED(tmp);
+ SET_NODE_VISITED(tmp->old_node);
+ tmp->old_node->cur_node = tmp;
+
+ /* Mark old and new nodes as "matched" */
+ SET_NODE_MATCHED(tmp);
+ SET_NODE_MATCHED(tmp->old_node);
+ s1394_copy_cfgrom(tmp, tmp->old_node);
+
+ /* s1394_copy_cfgrom() clears "matched" for some cases... */
+ if ((tmp->cfgrom != NULL && CONFIG_ROM_GEN(tmp->cfgrom) <= 1) ||
+ NODE_MATCHED(tmp) == B_TRUE) {
+ /* Move the target list over to the new node and update */
+ /* the node info. */
+ s1394_target_t *t;
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ t = tmp->target_list = tmp->old_node->target_list;
+ while (t != NULL) {
+ t->on_node = tmp;
+ t = t->target_sibling;
+ }
+ rw_exit(&hal->target_list_rwlock);
+ }
+
+ for (i = 0; i < selfid_num_ports(tmp->selfid_packet); i++) {
+ port_type = selfid_port_type(tmp->selfid_packet, i);
+
+ /* Is the new port connected? */
+ if ((port_type == IEEE1394_SELFID_PORT_TO_CHILD) ||
+ (port_type == IEEE1394_SELFID_PORT_TO_PARENT)) {
+ port_type = selfid_port_type(
+ tmp->old_node->selfid_packet, i);
+
+ /* Is the old port connected? */
+ if ((port_type ==
+ IEEE1394_SELFID_PORT_TO_CHILD) ||
+ (port_type ==
+ IEEE1394_SELFID_PORT_TO_PARENT)) {
+ /* Found a match, check if */
+ /* we've already visited it */
+ if (!NODE_VISITED(tmp->phy_port[i])) {
+ tmp->phy_port[i]->old_node =
+ tmp->old_node->phy_port[i];
+ s1394_hal_queue_insert(hal,
+ tmp->phy_port[i]);
+ }
+ }
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_match_tree_nodes_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_topology_tree_calculate_diameter()
+ * does a depth-first tree traversal, tracking at each branch the first
+ * and second deepest paths though that branch's children. The diameter
+ * is given by the maximum of these over all branch nodes
+ */
+int
+s1394_topology_tree_calculate_diameter(s1394_hal_t *hal)
+{
+ s1394_node_t *current;
+ uint_t number_of_nodes;
+ int i;
+ int start;
+ int end;
+ boolean_t done;
+ boolean_t found_a_child;
+ int distance = 0;
+ int diameter = 0;
+ int local_diameter = 0;
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_calculate_diameter_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+
+ /* Initialize topology tree */
+ for (i = 0; i < number_of_nodes; i++) {
+ hal->topology_tree[i].max_1st = 0;
+ hal->topology_tree[i].max_2nd = 0;
+ hal->topology_tree[i].last_port_checked = 0;
+ }
+
+ /* Start at the root node */
+ current = s1394_topology_tree_get_root_node(hal);
+
+ /* Flush the stack before we start */
+ hal->hal_stack_depth = -1;
+
+ do {
+ done = B_FALSE;
+ found_a_child = B_FALSE;
+ start = current->last_port_checked;
+ end = selfid_num_ports(current->selfid_packet);
+
+ /* Check every previously unchecked port for children */
+ for (i = start; i < end; i++) {
+ current->last_port_checked++;
+ /* If there is a child push it on the stack */
+ if (selfid_port_type(current->selfid_packet, i) ==
+ IEEE1394_SELFID_PORT_TO_CHILD) {
+ found_a_child = B_TRUE;
+ s1394_hal_stack_push(hal, current);
+ current = current->phy_port[i];
+ break;
+ }
+ }
+
+ /* If we reach here and the stack is empty, we're done */
+ if (hal->hal_stack_depth == -1) {
+ done = B_TRUE;
+ continue;
+ }
+
+ /* If no children were found, we're at a leaf */
+ if (found_a_child == B_FALSE) {
+ distance = current->max_1st + 1;
+ /* Pop the child and set the appropriate fields */
+ current = s1394_hal_stack_pop(hal);
+ if (distance > current->max_1st) {
+ current->max_2nd = current->max_1st;
+ current->max_1st = (uchar_t)distance;
+
+ } else if (distance > current->max_2nd) {
+ current->max_2nd = (uchar_t)distance;
+ }
+
+ /* Update maximum distance (diameter), if necessary */
+ local_diameter = current->max_1st + current->max_2nd;
+ if (local_diameter > diameter)
+ diameter = local_diameter;
+ }
+ } while (done == B_FALSE);
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_calculate_diameter_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (diameter);
+}
+
+/*
+ * s1394_gap_count_optimize()
+ * looks in a table to find the appropriate gap_count for a given diameter.
+ * (See above - gap_count[])
+ */
+int
+s1394_gap_count_optimize(int diameter)
+{
+ if ((diameter >= 0) && (diameter <= MAX_HOPS)) {
+ return (gap_count[diameter]);
+ } else {
+ cmn_err(CE_NOTE, "Too may point-to-point links on the 1394"
+ " bus - If new devices have recently been added, remove"
+ " them.");
+ return (gap_count[MAX_HOPS]);
+ }
+}
+
+/*
+ * s1394_get_current_gap_count()
+ * looks at all the SelfID packets to determine the current gap_count on
+ * the 1394 bus. If the gap_counts differ from node to node, it initiates
+ * a bus reset and returns -1.
+ */
+int
+s1394_get_current_gap_count(s1394_hal_t *hal)
+{
+ int i;
+ int gap_count = -1;
+
+ TNF_PROBE_0_DEBUG(s1394_get_current_gap_count_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /* Grab the first gap_count in the SelfID packets */
+ gap_count = IEEE1394_SELFID_GAP_CNT(hal->selfid_ptrs[0]);
+
+ /* Compare it too all the rest */
+ for (i = 1; i < hal->number_of_nodes; i++) {
+ if (gap_count !=
+ IEEE1394_SELFID_GAP_CNT(hal->selfid_ptrs[i])) {
+
+ /* Inconsistent gap counts */
+ TNF_PROBE_1(s1394_get_current_gap_count_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Inconsistent gap count");
+
+ if (s1394_ignore_invalid_gap_cnt == 0) {
+ /* Initiate a bus reset */
+ s1394_initiate_hal_reset(hal, CRITICAL);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_get_current_gap_count_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (-1);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_get_current_gap_count_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (gap_count);
+}
+
+/*
+ * s1394_speed_map_fill()
+ * determines, for each pair of nodes, the maximum speed at which those
+ * nodes can communicate. The speed of each node as well as the speed of
+ * any intermediate nodes on a given path must be accounted for, as the
+ * minimum speed on a given edge determines the maximum speed for all
+ * communications across that edge.
+ * In the method we implement below, a current minimum speed is selected.
+ * With this minimum speed in mind, we create subgraphs of the original
+ * bus which contain only edges that connect two nodes whose speeds are
+ * equal to or greater than the current minimum speed. Then, for each of
+ * the subgraphs, we visit every node, keeping a list of the nodes we've
+ * visited. When this list is completed, we can fill in the entries in
+ * the speed map which correspond to a pairs of these nodes. Doing this
+ * for each subgraph and then for each speed we progressively fill in the
+ * parts of the speed map which weren't previously filled in.
+ */
+void
+s1394_speed_map_fill(s1394_hal_t *hal)
+{
+ uint_t number_of_nodes;
+ int i;
+ int j;
+ int node_num;
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_fill_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+ s1394_speed_map_initialize(hal);
+
+ /* Mark all speed = IEEE1394_S100 nodes in the Speed Map */
+ for (i = 0; i < number_of_nodes; i++) {
+ if (selfid_speed(hal->topology_tree[i].selfid_packet) ==
+ IEEE1394_S100) {
+ hal->slowest_node_speed = IEEE1394_S100;
+ node_num = IEEE1394_SELFID_PHYID(
+ hal->topology_tree[i].selfid_packet);
+ for (j = 0; j < number_of_nodes; j++) {
+ if (j != node_num) {
+ hal->speed_map[node_num][j] =
+ IEEE1394_S100;
+ hal->speed_map[j][node_num] =
+ IEEE1394_S100;
+ }
+ }
+ }
+ }
+
+ s1394_speed_map_fill_speed_N(hal, IEEE1394_S200);
+ s1394_speed_map_fill_speed_N(hal, IEEE1394_S400);
+
+ /* Fill in the diagonal */
+ for (i = 0; i < number_of_nodes; i++) {
+ hal->speed_map[i][i] =
+ selfid_speed(hal->topology_tree[i].selfid_packet);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_fill_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_speed_map_fill_speed_N(),
+ * given a minimum link speed, creates subgraphs of the original bus which
+ * contain only the necessary edges (see speed_map_fill() above). For each
+ * of the subgraphs, it visits and fills in the entries in the speed map
+ * which correspond to a pair of these nodes.
+ */
+static void
+s1394_speed_map_fill_speed_N(s1394_hal_t *hal, int min_spd)
+{
+ s1394_node_t *tmp;
+ uint_t number_of_nodes;
+ int i;
+ int j;
+ int k;
+ int size;
+ int ix_a, ix_b;
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_fill_speed_N_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+
+ /* Prepare the topology tree */
+ s1394_topology_tree_mark_all_unvisited(hal);
+
+ /* To ensure that the queue is empty */
+ hal->hal_queue_front = hal->hal_queue_back = 0;
+
+ for (i = 0; i < number_of_nodes; i++) {
+ /* If the node's speed == min_spd and it hasn't been visited */
+ if (!NODE_VISITED(&hal->topology_tree[i]) &&
+ (selfid_speed(hal->topology_tree[i].selfid_packet) ==
+ min_spd)) {
+
+ if (min_spd < hal->slowest_node_speed)
+ hal->slowest_node_speed = (uint8_t)min_spd;
+
+ SET_NODE_VISITED(&hal->topology_tree[i]);
+ s1394_hal_queue_insert(hal, &hal->topology_tree[i]);
+
+ while (hal->hal_queue_front != hal->hal_queue_back) {
+ tmp = (s1394_node_t *)s1394_hal_queue_remove(
+ hal);
+ /* Add node number to the list */
+ s1394_node_number_list_add(hal,
+ IEEE1394_SELFID_PHYID(tmp->selfid_packet));
+
+ for (j = 0; j < IEEE1394_MAX_NUM_PORTS; j++) {
+ if ((tmp->phy_port[j] != NULL) &&
+ (!NODE_VISITED(tmp->phy_port[j]))) {
+ if (selfid_speed(
+ tmp->phy_port[j]->
+ selfid_packet) >= min_spd) {
+ SET_NODE_VISITED(
+ tmp->phy_port[j]);
+ s1394_hal_queue_insert(
+ hal,
+ tmp->phy_port[j]);
+ }
+ }
+ }
+ }
+
+ /* For each pair, mark speed_map as min_spd */
+ size = hal->hal_node_number_list_size;
+ for (j = 0; j < size; j++) {
+ for (k = 0; k < size; k++) {
+ if (j != k) {
+ ix_a = hal->
+ hal_node_number_list[j];
+ ix_b = hal->
+ hal_node_number_list[k];
+ hal->speed_map[ix_a][ix_b] =
+ (uint8_t)min_spd;
+ }
+ }
+ }
+
+ /* Flush the Node Number List */
+ hal->hal_node_number_list_size = 0;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_fill_speed_N_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_speed_map_initialize()
+ * fills in the speed_map with IEEE1394_S100's and SPEED_MAP_INVALID's in
+ * the appropriate places. These will be overwritten by
+ * s1394_speed_map_fill().
+ */
+static void
+s1394_speed_map_initialize(s1394_hal_t *hal)
+{
+ uint_t number_of_nodes;
+ int i, j;
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_initialize_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+ for (i = 0; i < number_of_nodes; i++) {
+ for (j = 0; j < number_of_nodes; j++) {
+ if (i != j)
+ hal->speed_map[i][j] = IEEE1394_S100;
+ else
+ hal->speed_map[i][j] = SPEED_MAP_INVALID;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_speed_map_initialize_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_speed_map_get()
+ * queries the speed_map[] for a given pair of nodes.
+ */
+uint8_t
+s1394_speed_map_get(s1394_hal_t *hal, uint_t from_node, uint_t to_node)
+{
+ /* If it's not a valid node, then return slowest_node_speed */
+ if (to_node >= hal->number_of_nodes) {
+ /* Send at fastest speed everyone will see */
+ return (hal->slowest_node_speed);
+ }
+ /* else return the correct maximum speed */
+ return (hal->speed_map[from_node][to_node]);
+}
+
+/*
+ * s1394_update_speed_map_link_speeds()
+ * takes into account information from Config ROM queries. Any P1394A
+ * device can have a link with a different speed than its PHY. In this
+ * case, the slower speed must be accounted for in order for communication
+ * with the remote node to work.
+ */
+void
+s1394_update_speed_map_link_speeds(s1394_hal_t *hal)
+{
+ uint32_t bus_capabilities;
+ uint8_t link_speed;
+ uint_t number_of_nodes;
+ int i, j;
+
+ TNF_PROBE_0_DEBUG(s1394_update_speed_map_link_speeds_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ number_of_nodes = hal->number_of_nodes;
+
+ for (i = 0; i < number_of_nodes; i++) {
+
+ /* Skip invalid config ROMs */
+ if (CFGROM_VALID(&hal->topology_tree[i])) {
+
+ ASSERT(hal->topology_tree[i].cfgrom);
+
+ bus_capabilities = hal->topology_tree[i].
+ cfgrom[IEEE1212_NODE_CAP_QUAD];
+
+ /* Skip if Bus_Info_Block generation is 0 */
+ /* because it isn't a P1394a device */
+ if ((bus_capabilities & IEEE1394_BIB_GEN_MASK) != 0) {
+ link_speed = (bus_capabilities &
+ IEEE1394_BIB_LNK_SPD_MASK);
+
+ for (j = 0; j < number_of_nodes; j++) {
+ /* Update if link_speed is slower */
+ if (hal->speed_map[i][j] > link_speed) {
+ hal->speed_map[i][j] =
+ link_speed;
+ hal->speed_map[j][i] =
+ link_speed;
+ }
+
+ if (link_speed <
+ hal->slowest_node_speed)
+ hal->slowest_node_speed =
+ link_speed;
+ }
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_update_speed_map_link_speeds_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_get_isoch_rsrc_mgr()
+ * looks at the SelfID packets to determine the Isochronous Resource
+ * Manager's node ID. The IRM is the highest numbered node with both
+ * the "L"-bit and the "C"-bit in its SelfID packets turned on. If no
+ * IRM is found on the bus, then -1 is returned.
+ */
+int
+s1394_get_isoch_rsrc_mgr(s1394_hal_t *hal)
+{
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_get_isoch_rsrc_mgr_enter, S1394_TNF_SL_BR_STACK,
+ "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ for (i = hal->number_of_nodes - 1; i >= 0; i--) {
+ /* Highest numbered node with L=1 and C=1 */
+ if ((IEEE1394_SELFID_ISLINKON(hal->selfid_ptrs[i])) &&
+ (IEEE1394_SELFID_ISCONTENDER(hal->selfid_ptrs[i]))) {
+
+ TNF_PROBE_0_DEBUG(s1394_get_isoch_rsrc_mgr_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (i);
+ }
+ }
+
+ /* No Isochronous Resource Manager */
+ TNF_PROBE_0_DEBUG(s1394_get_isoch_rsrc_mgr_exit, S1394_TNF_SL_BR_STACK,
+ "");
+ return (-1);
+}
+
+/*
+ * s1394_physical_arreq_setup_all()
+ * is used to enable the physical filters for the link. If a target has
+ * registered physical space allocations, then the corresponding node's
+ * bit is set. This is done for all targets on a HAL (usually after bus
+ * reset).
+ */
+void
+s1394_physical_arreq_setup_all(s1394_hal_t *hal)
+{
+ s1394_target_t *curr_target;
+ uint64_t mask = 0;
+ uint32_t node_num;
+ uint_t generation;
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_setup_all_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ generation = hal->generation_count;
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ curr_target = hal->target_head;
+ while (curr_target != NULL) {
+ if ((curr_target->on_node != NULL) &&
+ (curr_target->physical_arreq_enabled != 0)) {
+ node_num = curr_target->on_node->node_num;
+ mask = mask | (1 << node_num);
+ }
+ curr_target = curr_target->target_next;
+ }
+ rw_exit(&hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /*
+ * Since it is cleared to 0 on bus reset, set the bits for all
+ * nodes. This call returns DDI_FAILURE if the generation passed
+ * is invalid or if the HAL is shutdown. In either case, it is
+ * acceptable to simply ignore the result and return.
+ */
+ (void) HAL_CALL(hal).physical_arreq_enable_set(
+ hal->halinfo.hal_private, mask, generation);
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_setup_all_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_physical_arreq_set_one()
+ * is used to enable the physical filters for the link. If a target has
+ * registered physical space allocations, then the corresponding node's
+ * bit is set. This is done for one target.
+ */
+void
+s1394_physical_arreq_set_one(s1394_target_t *target)
+{
+ s1394_hal_t *hal;
+ uint64_t mask = 0;
+ uint32_t node_num;
+ uint_t generation;
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_set_one_enter,
+ S1394_TNF_SL_STACK, "");
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ if ((target->on_node != NULL) &&
+ (target->physical_arreq_enabled != 0)) {
+ node_num = target->on_node->node_num;
+ mask = mask | (1 << node_num);
+
+ generation = hal->generation_count;
+
+ rw_exit(&hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /*
+ * Set the bit corresponding to this node. This call
+ * returns DDI_FAILURE if the generation passed
+ * is invalid or if the HAL is shutdown. In either case,
+ * it is acceptable to simply ignore the result and return.
+ */
+ (void) HAL_CALL(hal).physical_arreq_enable_set(
+ hal->halinfo.hal_private, mask, generation);
+ } else {
+ rw_exit(&hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_set_one_exit,
+ S1394_TNF_SL_STACK, "");
+}
+
+/*
+ * s1394_physical_arreq_clear_one()
+ * is used to disable the physical filters for OpenHCI. If a target frees
+ * up the last of its registered physical space, then the corresponding
+ * node's bit is cleared. This is done for one target.
+ */
+void
+s1394_physical_arreq_clear_one(s1394_target_t *target)
+{
+ s1394_hal_t *hal;
+ uint64_t mask = 0;
+ uint32_t node_num;
+ uint_t generation;
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_clear_one_enter,
+ S1394_TNF_SL_STACK, "");
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ if ((target->on_node != NULL) &&
+ (target->physical_arreq_enabled == 0)) {
+ node_num = target->on_node->node_num;
+ mask = mask | (1 << node_num);
+
+ generation = hal->generation_count;
+
+ rw_exit(&hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /*
+ * Set the bit corresponding to this node. This call
+ * returns DDI_FAILURE if the generation passed
+ * is invalid or if the HAL is shutdown. In either case,
+ * it is acceptable to simply ignore the result and return.
+ */
+ (void) HAL_CALL(hal).physical_arreq_enable_clr(
+ hal->halinfo.hal_private, mask, generation);
+ } else {
+ rw_exit(&hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+ }
+
+
+ TNF_PROBE_0_DEBUG(s1394_physical_arreq_clear_one_exit,
+ S1394_TNF_SL_STACK, "");
+}
+
+/*
+ * s1394_topology_tree_get_root_node()
+ * returns the last entry in topology_tree[] as this must always be the
+ * root node.
+ */
+s1394_node_t *
+s1394_topology_tree_get_root_node(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_get_root_node_enter,
+ S1394_TNF_SL_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_topology_tree_get_root_node_exit,
+ S1394_TNF_SL_STACK, "");
+
+ return (&hal->topology_tree[hal->number_of_nodes - 1]);
+}
diff --git a/usr/src/uts/common/io/1394/s1394_cmp.c b/usr/src/uts/common/io/1394/s1394_cmp.c
new file mode 100644
index 0000000000..05944d7379
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_cmp.c
@@ -0,0 +1,438 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_cmp.c
+ * 1394 Services Layer Connection Management Procedures Support Routines
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+
+static void s1394_cmp_init(s1394_hal_t *hal);
+static void s1394_cmp_fini(s1394_hal_t *hal);
+static void s1394_cmp_ompr_recv_read_request(cmd1394_cmd_t *req);
+static void s1394_cmp_impr_recv_read_request(cmd1394_cmd_t *req);
+static void s1394_cmp_ompr_recv_lock_request(cmd1394_cmd_t *req);
+static void s1394_cmp_impr_recv_lock_request(cmd1394_cmd_t *req);
+static void s1394_cmp_notify_reg_change(s1394_hal_t *hal, t1394_cmp_reg_t reg,
+ s1394_target_t *self);
+
+
+/*
+ * number of retries to notify registered targets in case target list
+ * changes while the list rwlock is dropped for the time of callback
+ */
+uint_t s1394_cmp_notify_retry_cnt = 3;
+
+s1394_fa_descr_t s1394_cmp_ompr_descr = {
+ IEC61883_CMP_OMPR_ADDR,
+ 4,
+ T1394_ADDR_RDENBL | T1394_ADDR_LKENBL,
+ {
+ s1394_cmp_ompr_recv_read_request,
+ NULL,
+ s1394_cmp_ompr_recv_lock_request
+ },
+ 0
+};
+
+s1394_fa_descr_t s1394_cmp_impr_descr = {
+ IEC61883_CMP_IMPR_ADDR,
+ 4,
+ T1394_ADDR_RDENBL | T1394_ADDR_LKENBL,
+ {
+ s1394_cmp_impr_recv_read_request,
+ NULL,
+ s1394_cmp_impr_recv_lock_request
+ },
+ 0
+};
+
+
+int
+s1394_cmp_register(s1394_target_t *target, t1394_cmp_evts_t *evts)
+{
+ s1394_hal_t *hal = target->on_hal;
+ static t1394_cmp_evts_t default_evts = { NULL, NULL };
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_register_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ /*
+ * if registering the first target, claim and initialize addresses
+ */
+ if (s1394_fa_list_is_empty(hal, S1394_FA_TYPE_CMP)) {
+ if (s1394_fa_claim_addr(hal, S1394_FA_TYPE_CMP_OMPR,
+ &s1394_cmp_ompr_descr) != DDI_SUCCESS) {
+ rw_exit(&hal->target_list_rwlock);
+ return (DDI_FAILURE);
+ }
+
+ if (s1394_fa_claim_addr(hal, S1394_FA_TYPE_CMP_IMPR,
+ &s1394_cmp_impr_descr) != DDI_SUCCESS) {
+ s1394_fa_free_addr(hal, S1394_FA_TYPE_CMP_OMPR);
+ rw_exit(&hal->target_list_rwlock);
+ return (DDI_FAILURE);
+ }
+
+ s1394_cmp_init(hal);
+ }
+
+ /* Add on the target list (we only use one list) */
+ s1394_fa_list_add(hal, target, S1394_FA_TYPE_CMP);
+
+ if (evts == NULL) {
+ evts = &default_evts;
+ }
+ target->target_fa[S1394_FA_TYPE_CMP].fat_u.cmp.cm_evts = *evts;
+
+ rw_exit(&hal->target_list_rwlock);
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_register_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+int
+s1394_cmp_unregister(s1394_target_t *target)
+{
+ s1394_hal_t *hal = target->on_hal;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_unregister_enter, S1394_TNF_SL_CMP_STACK,
+ "");
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+
+ if (s1394_fa_list_remove(hal, target,
+ S1394_FA_TYPE_CMP) == DDI_SUCCESS) {
+ if (s1394_fa_list_is_empty(hal, S1394_FA_TYPE_CMP)) {
+ s1394_fa_free_addr(hal, S1394_FA_TYPE_CMP_OMPR);
+ s1394_fa_free_addr(hal, S1394_FA_TYPE_CMP_IMPR);
+ s1394_cmp_fini(hal);
+ }
+ } else {
+ TNF_PROBE_0(s1394_cmp_unregister_common_error_list,
+ S1394_TNF_SL_CMP_ERROR, "");
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_unregister_exit, S1394_TNF_SL_CMP_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+int
+s1394_cmp_read(s1394_target_t *target, t1394_cmp_reg_t reg, uint32_t *valp)
+{
+ s1394_hal_t *hal = target->on_hal;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+ int ret = DDI_FAILURE;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_read_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ if (reg == T1394_CMP_OMPR) {
+ rw_enter(&cmp->cmp_ompr_rwlock, RW_READER);
+ *valp = cmp->cmp_ompr_val;
+ rw_exit(&cmp->cmp_ompr_rwlock);
+ ret = DDI_SUCCESS;
+ } else if (reg == T1394_CMP_IMPR) {
+ rw_enter(&cmp->cmp_impr_rwlock, RW_READER);
+ *valp = cmp->cmp_impr_val;
+ rw_exit(&cmp->cmp_impr_rwlock);
+ ret = DDI_SUCCESS;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_read_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (ret);
+}
+
+int
+s1394_cmp_cas(s1394_target_t *target, t1394_cmp_reg_t reg, uint32_t arg_val,
+ uint32_t new_val, uint32_t *old_valp)
+{
+ s1394_hal_t *hal = target->on_hal;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+ int ret = DDI_SUCCESS;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_cas_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ if (reg == T1394_CMP_OMPR) {
+ rw_enter(&cmp->cmp_ompr_rwlock, RW_WRITER);
+ *old_valp = cmp->cmp_ompr_val;
+ if (cmp->cmp_ompr_val == arg_val) {
+ cmp->cmp_ompr_val = new_val;
+ }
+ rw_exit(&cmp->cmp_ompr_rwlock);
+ } else if (reg == T1394_CMP_IMPR) {
+ rw_enter(&cmp->cmp_impr_rwlock, RW_WRITER);
+ *old_valp = cmp->cmp_impr_val;
+ if (cmp->cmp_impr_val == arg_val) {
+ cmp->cmp_impr_val = new_val;
+ }
+ rw_exit(&cmp->cmp_impr_rwlock);
+ } else {
+ ret = DDI_FAILURE;
+ }
+
+ /* notify other targets */
+ if (ret == DDI_SUCCESS) {
+ s1394_cmp_notify_reg_change(hal, reg, target);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_cas_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (ret);
+}
+
+static void
+s1394_cmp_init(s1394_hal_t *hal)
+{
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+
+ rw_init(&cmp->cmp_ompr_rwlock, NULL, RW_DRIVER, NULL);
+ rw_init(&cmp->cmp_impr_rwlock, NULL, RW_DRIVER, NULL);
+
+ cmp->cmp_ompr_val = IEC61883_CMP_OMPR_INIT_VAL;
+ cmp->cmp_impr_val = IEC61883_CMP_IMPR_INIT_VAL;
+}
+
+static void
+s1394_cmp_fini(s1394_hal_t *hal)
+{
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+
+ rw_destroy(&cmp->cmp_ompr_rwlock);
+ rw_destroy(&cmp->cmp_impr_rwlock);
+}
+
+/*
+ * iMPR/oMPR read/lock requests
+ */
+static void
+s1394_cmp_ompr_recv_read_request(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal = req->cmd_callback_arg;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_ompr_recv_read_request_enter,
+ S1394_TNF_SL_CMP_STACK, "");
+
+ if (req->cmd_type != CMD1394_ASYNCH_RD_QUAD) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ } else {
+ rw_enter(&cmp->cmp_ompr_rwlock, RW_READER);
+ req->cmd_u.q.quadlet_data = cmp->cmp_ompr_val;
+ rw_exit(&cmp->cmp_ompr_rwlock);
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ (void) s1394_send_response(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_ompr_recv_read_request_exit,
+ S1394_TNF_SL_CMP_STACK, "");
+}
+
+static void
+s1394_cmp_impr_recv_read_request(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal = req->cmd_callback_arg;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_enter,
+ S1394_TNF_SL_CMP_STACK, "");
+
+ if (req->cmd_type != CMD1394_ASYNCH_RD_QUAD) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ } else {
+ rw_enter(&cmp->cmp_impr_rwlock, RW_READER);
+ req->cmd_u.q.quadlet_data = cmp->cmp_impr_val;
+ rw_exit(&cmp->cmp_impr_rwlock);
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ (void) s1394_send_response(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_exit,
+ S1394_TNF_SL_CMP_STACK, "");
+}
+
+static void
+s1394_cmp_ompr_recv_lock_request(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal = req->cmd_callback_arg;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+ boolean_t notify = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_ompr_recv_lock_request_enter,
+ S1394_TNF_SL_CMP_STACK, "");
+
+ if ((req->cmd_type != CMD1394_ASYNCH_LOCK_32) ||
+ (req->cmd_u.l32.lock_type != CMD1394_LOCK_COMPARE_SWAP)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ notify = B_FALSE;
+ } else {
+ rw_enter(&cmp->cmp_ompr_rwlock, RW_WRITER);
+ req->cmd_u.l32.old_value = cmp->cmp_ompr_val;
+ if (cmp->cmp_ompr_val == req->cmd_u.l32.arg_value) {
+ /* write only allowed bits */
+ cmp->cmp_ompr_val = (req->cmd_u.l32.data_value &
+ IEC61883_CMP_OMPR_LOCK_MASK) |
+ (cmp->cmp_ompr_val & ~IEC61883_CMP_OMPR_LOCK_MASK);
+ }
+ rw_exit(&cmp->cmp_ompr_rwlock);
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ (void) s1394_send_response(hal, req);
+
+ /* notify all targets */
+ if (notify) {
+ s1394_cmp_notify_reg_change(hal, T1394_CMP_OMPR, NULL);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_ompr_recv_lock_request_exit,
+ S1394_TNF_SL_CMP_STACK, "");
+}
+
+static void
+s1394_cmp_impr_recv_lock_request(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal = req->cmd_callback_arg;
+ s1394_cmp_hal_t *cmp = &hal->hal_cmp;
+ boolean_t notify = B_TRUE;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_enter,
+ S1394_TNF_SL_CMP_STACK, "");
+
+ if ((req->cmd_type != CMD1394_ASYNCH_LOCK_32) ||
+ (req->cmd_u.l32.lock_type != CMD1394_LOCK_COMPARE_SWAP)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ notify = B_FALSE;
+ } else {
+ rw_enter(&cmp->cmp_impr_rwlock, RW_WRITER);
+ req->cmd_u.l32.old_value = cmp->cmp_impr_val;
+ if (cmp->cmp_impr_val == req->cmd_u.l32.arg_value) {
+ /* write only allowed bits */
+ cmp->cmp_impr_val = (req->cmd_u.l32.data_value &
+ IEC61883_CMP_IMPR_LOCK_MASK) |
+ (cmp->cmp_impr_val & ~IEC61883_CMP_IMPR_LOCK_MASK);
+ }
+ rw_exit(&cmp->cmp_impr_rwlock);
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ (void) s1394_send_response(hal, req);
+
+ /* notify all targets */
+ if (notify) {
+ s1394_cmp_notify_reg_change(hal, T1394_CMP_IMPR, NULL);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_exit,
+ S1394_TNF_SL_CMP_STACK, "");
+}
+
+/*
+ * Notify registered targets except 'self' about register value change
+ */
+static void
+s1394_cmp_notify_reg_change(s1394_hal_t *hal, t1394_cmp_reg_t reg,
+ s1394_target_t *self)
+{
+ s1394_target_t *target;
+ s1394_fa_target_t *fat;
+ uint_t saved_gen;
+ int num_retries = 0;
+ void (*cb)(opaque_t, t1394_cmp_reg_t);
+ opaque_t arg;
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_notify_reg_change_enter,
+ S1394_TNF_SL_CMP_STACK, "");
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+start:
+ target = hal->hal_fa[S1394_FA_TYPE_CMP].fal_head;
+
+ for (; target; target = fat->fat_next) {
+ fat = &target->target_fa[S1394_FA_TYPE_CMP];
+
+ /*
+ * even if the target list changes when the lock is dropped,
+ * comparing with self is safe because the target should
+ * not unregister until all CMP operations are completed
+ */
+ if (target == self) {
+ continue;
+ }
+
+ cb = fat->fat_u.cmp.cm_evts.cmp_reg_change;
+ if (cb == NULL) {
+ continue;
+ }
+ arg = fat->fat_u.cmp.cm_evts.cmp_arg;
+
+ saved_gen = s1394_fa_list_gen(hal, S1394_FA_TYPE_CMP);
+
+ rw_exit(&hal->target_list_rwlock);
+ cb(arg, reg);
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ /*
+ * List could change while we dropped the lock. In such
+ * case, start all over again, because missing a register
+ * change can have more serious consequences for a
+ * target than receiving same notification more than once
+ */
+ if (saved_gen != s1394_fa_list_gen(hal, S1394_FA_TYPE_CMP)) {
+ TNF_PROBE_2(s1394_cmp_notify_reg_change_error,
+ S1394_TNF_SL_CMP_ERROR, "",
+ tnf_string, msg, "list gen changed",
+ tnf_opaque, num_retries, num_retries);
+ if (++num_retries <= s1394_cmp_notify_retry_cnt) {
+ goto start;
+ } else {
+ break;
+ }
+ }
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+
+ TNF_PROBE_0_DEBUG(s1394_cmp_notify_reg_change_exit,
+ S1394_TNF_SL_CMP_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_csr.c b/usr/src/uts/common/io/1394/s1394_csr.c
new file mode 100644
index 0000000000..306a800886
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_csr.c
@@ -0,0 +1,2198 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_csr.c
+ * 1394 Services Layer CSR and Config ROM Routines
+ * Contains all of the CSR callback routines for various required
+ * CSR registers. Also contains routines for their initialization
+ * and destruction, as well as routines to handle the processing
+ * of Config ROM update requests.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/ieee1212.h>
+
+static void s1394_CSR_state_clear(cmd1394_cmd_t *req);
+
+static void s1394_CSR_state_set(cmd1394_cmd_t *req);
+
+static void s1394_CSR_node_ids(cmd1394_cmd_t *req);
+
+static void s1394_CSR_reset_start(cmd1394_cmd_t *req);
+
+static void s1394_CSR_split_timeout(cmd1394_cmd_t *req);
+
+static void s1394_CSR_argument_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_test_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_interrupt_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_clock_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_message_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_cycle_time(cmd1394_cmd_t *req);
+
+static void s1394_CSR_bus_time(cmd1394_cmd_t *req);
+
+static void s1394_CSR_busy_timeout(cmd1394_cmd_t *req);
+
+static void s1394_CSR_IRM_regs(cmd1394_cmd_t *req);
+
+static void s1394_CSR_topology_map(cmd1394_cmd_t *req);
+
+static void s1394_common_CSR_routine(s1394_hal_t *hal, cmd1394_cmd_t *req);
+
+static int s1394_init_config_rom_structures(s1394_hal_t *hal);
+
+static int s1394_destroy_config_rom_structures(s1394_hal_t *hal);
+
+/*
+ * s1394_setup_CSR_space()
+ * setups up the local host's CSR registers and callback routines.
+ */
+int
+s1394_setup_CSR_space(s1394_hal_t *hal)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ t1394_alloc_addr_t addr;
+ t1394_addr_enable_t rw_flags;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_enter, S1394_TNF_SL_CSR_STACK,
+ "");
+
+ /*
+ * Although they are not freed up in this routine, if
+ * one of the s1394_claim_addr_blk() routines fails,
+ * all of the previously successful claims will be
+ * freed up in s1394_destroy_addr_space() upon returning
+ * DDI_FAILURE from this routine.
+ */
+
+ rw_flags = T1394_ADDR_RDENBL | T1394_ADDR_WRENBL;
+
+ /*
+ * STATE_CLEAR
+ * see IEEE 1394-1995, Section 8.3.2.2.1 or
+ * IEEE 1212-1994, Section 7.4.1
+ */
+ addr.aa_address = IEEE1394_CSR_STATE_CLEAR;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_state_clear;
+ addr.aa_evts.recv_write_request = s1394_CSR_state_clear;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "STATE_CLEAR: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * STATE_SET
+ * see IEEE 1394-1995, Section 8.3.2.2.2 or
+ * IEEE 1212-1994, Section 7.4.2
+ */
+ addr.aa_address = IEEE1394_CSR_STATE_SET;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = T1394_ADDR_WRENBL;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = NULL;
+ addr.aa_evts.recv_write_request = s1394_CSR_state_set;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "STATE_SET: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * NODE_IDS
+ * see IEEE 1394-1995, Section 8.3.2.2.3 or
+ * IEEE 1212-1994, Section 7.4.3
+ */
+ addr.aa_address = IEEE1394_CSR_NODE_IDS;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_node_ids;
+ addr.aa_evts.recv_write_request = s1394_CSR_node_ids;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "NODE_IDS: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * RESET_START
+ * see IEEE 1394-1995, Section 8.3.2.2.4 or
+ * IEEE 1212-1994, Section 7.4.4
+ */
+ addr.aa_address = IEEE1394_CSR_RESET_START;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = T1394_ADDR_WRENBL;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = NULL;
+ addr.aa_evts.recv_write_request = s1394_CSR_reset_start;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "RESET_START: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * SPLIT_TIMEOUT
+ * see IEEE 1394-1995, Section 8.3.2.2.6 or
+ * IEEE 1212-1994, Section 7.4.7
+ */
+ addr.aa_address = IEEE1394_CSR_SPLIT_TIMEOUT_HI;
+ addr.aa_length = IEEE1394_OCTLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_split_timeout;
+ addr.aa_evts.recv_write_request = s1394_CSR_split_timeout;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "SPLIT_TIMEOUT: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * ARGUMENT_HI and ARGUMENT_LO
+ * see IEEE 1394-1995, Section 8.3.2.2.7 or
+ * IEEE 1212-1994, Section 7.4.8
+ */
+ addr.aa_address = IEEE1394_CSR_ARG_HI;
+ addr.aa_length = 2 * (IEEE1394_QUADLET);
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_argument_regs;
+ addr.aa_evts.recv_write_request = s1394_CSR_argument_regs;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "ARGUMENT registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * TEST_START and TEST_STATUS
+ * see IEEE 1394-1995, Section 8.3.2.2.7 or
+ * IEEE 1212-1994, Section 7.4.9 - 7.4.10
+ */
+ addr.aa_address = IEEE1394_CSR_TEST_START;
+ addr.aa_length = 2 * (IEEE1394_QUADLET);
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_test_regs;
+ addr.aa_evts.recv_write_request = s1394_CSR_test_regs;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "TEST registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * INTERRUPT_TARGET and INTERRUPT_MASK
+ * see IEEE 1394-1995, Section 8.3.2.2.9 or
+ * IEEE 1212-1994, Section 7.4.15 - 7.4.16
+ */
+ addr.aa_address = IEEE1394_CSR_INTERRUPT_TARGET;
+ addr.aa_length = 2 * (IEEE1394_QUADLET);
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_interrupt_regs;
+ addr.aa_evts.recv_write_request = s1394_CSR_interrupt_regs;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "INTERRUPT registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * CLOCK_VALUE, CLOCK_TICK_PERIOD, CLOCK_INFO, etc.
+ * see IEEE 1394-1995, Section 8.3.2.2.10 or
+ * IEEE 1212-1994, Section 7.4.17 - 7.4.20
+ */
+ addr.aa_address = IEEE1394_CSR_CLOCK_VALUE;
+ addr.aa_length = IEEE1394_CSR_CLOCK_VALUE_SZ;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_clock_regs;
+ addr.aa_evts.recv_write_request = s1394_CSR_clock_regs;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "CLOCK registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * MESSAGE_REQUEST and MESSAGE_RESPONSE
+ * see IEEE 1394-1995, Section 8.3.2.2.11 or
+ * IEEE 1212-1994, Section 7.4.21
+ */
+ addr.aa_address = IEEE1394_CSR_MESSAGE_REQUEST;
+ addr.aa_length = IEEE1394_CSR_MESSAGE_REQUEST_SZ;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_message_regs;
+ addr.aa_evts.recv_write_request = s1394_CSR_message_regs;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "MESSAGE registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * CYCLE_TIME
+ * see IEEE 1394-1995, Section 8.3.2.3.1
+ */
+ addr.aa_address = IEEE1394_SCSR_CYCLE_TIME;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_cycle_time;
+ addr.aa_evts.recv_write_request = s1394_CSR_cycle_time;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "CYCLE_TIME: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * BUS_TIME
+ * see IEEE 1394-1995, Section 8.3.2.3.2
+ */
+ addr.aa_address = IEEE1394_SCSR_BUS_TIME;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_bus_time;
+ addr.aa_evts.recv_write_request = s1394_CSR_bus_time;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "BUS_TIME: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * BUSY_TIMEOUT
+ * see IEEE 1394-1995, Section 8.3.2.3.5
+ */
+ addr.aa_address = IEEE1394_SCSR_BUSY_TIMEOUT;
+ addr.aa_length = IEEE1394_QUADLET;
+ addr.aa_enable = rw_flags;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_busy_timeout;
+ addr.aa_evts.recv_write_request = s1394_CSR_busy_timeout;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "BUSY_TIMEOUT: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * BUS_MANAGER_ID
+ * BANDWIDTH_AVAILABLE
+ * CHANNELS_AVAILABLE
+ * see IEEE 1394-1995, Section 8.3.2.3.6 - 8.3.2.3.8
+ */
+ addr.aa_address = IEEE1394_SCSR_BUSMGR_ID;
+ addr.aa_length = 3 * (IEEE1394_QUADLET);
+ addr.aa_enable = T1394_ADDR_RDENBL | T1394_ADDR_LKENBL;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_IRM_regs;
+ addr.aa_evts.recv_write_request = NULL;
+ addr.aa_evts.recv_lock_request = s1394_CSR_IRM_regs;
+ addr.aa_kmem_bufp = NULL;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "IRM registers: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Reserved for Configuration ROM
+ * see IEEE 1394-1995, Section 8.3.2.5.3
+ */
+ addr.aa_address = IEEE1394_CONFIG_ROM_ADDR;
+ addr.aa_length = IEEE1394_CONFIG_ROM_SZ;
+ result = s1394_reserve_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "Unable to reserve Config ROM");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * TOPOLOGY_MAP
+ * see IEEE 1394-1995, Section 8.3.2.4.1
+ */
+ hal->CSR_topology_map = kmem_zalloc(IEEE1394_UCSR_TOPOLOGY_MAP_SZ,
+ KM_SLEEP);
+ addr.aa_address = IEEE1394_UCSR_TOPOLOGY_MAP;
+ addr.aa_length = IEEE1394_UCSR_TOPOLOGY_MAP_SZ;
+ addr.aa_enable = T1394_ADDR_RDENBL;
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_evts.recv_read_request = s1394_CSR_topology_map;
+ addr.aa_evts.recv_write_request = NULL;
+ addr.aa_evts.recv_lock_request = NULL;
+ addr.aa_kmem_bufp = (caddr_t)hal->CSR_topology_map;
+ addr.aa_arg = hal;
+ result = s1394_claim_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ kmem_free((void *)hal->CSR_topology_map,
+ IEEE1394_UCSR_TOPOLOGY_MAP_SZ);
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "TOPOLOGY_MAP: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+ curr_blk = (s1394_addr_space_blk_t *)(addr.aa_hdl);
+ /* Set up the block so that we free kmem_bufp at detach */
+ curr_blk->free_kmem_bufp = B_TRUE;
+
+ /*
+ * Reserve the SPEED_MAP
+ * see IEEE 1394-1995, Section 8.3.2.4.1
+ * (obsoleted in P1394A)
+ */
+ addr.aa_address = IEEE1394_UCSR_SPEED_MAP;
+ addr.aa_length = IEEE1394_UCSR_SPEED_MAP_SZ;
+ result = s1394_reserve_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "SPEED_MAP: CSR setup failed");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Reserved - Boundary between reserved Serial Bus
+ * dependent registers and other CSR register space.
+ * See IEEE 1394-1995, Table 8-4 for this address.
+ *
+ * This quadlet is reserved as a way of preventing
+ * the inadvertant allocation of a part of CSR space
+ * that will likely be used by future specifications
+ */
+ addr.aa_address = IEEE1394_UCSR_RESERVED_BOUNDARY;
+ addr.aa_length = IEEE1394_QUADLET;
+ result = s1394_reserve_addr_blk(hal, &addr);
+ if (result != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_setup_CSR_space_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "Unable to reserve boundary quadlet");
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit,
+ "stacktrace 1394 s1394", "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_setup_CSR_space_exit, S1394_TNF_SL_CSR_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_CSR_state_clear()
+ * handles all requests to the STATE_CLEAR CSR register. It enforces
+ * that certain bits that can be twiddled only by a given node (IRM or
+ * Bus Manager).
+ */
+static void
+s1394_CSR_state_clear(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t data;
+ uint_t offset;
+ uint_t is_from;
+ uint_t should_be_from;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_clear_enter, S1394_TNF_SL_CSR_STACK,
+ "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* Register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_clear_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ /* Only writes from IRM or Bus Mgr allowed (in some cases) */
+ mutex_enter(&hal->topology_tree_mutex);
+ is_from = IEEE1394_NODE_NUM(req->nodeID);
+ if (hal->bus_mgr_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->bus_mgr_node);
+ else if (hal->IRM_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->IRM_node);
+ else
+ should_be_from = S1394_INVALID_NODE_NUM;
+ mutex_exit(&hal->topology_tree_mutex);
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the STATE_CLEAR register
+ * is required to be implemented and readable, we will
+ * return IEEE1394_RESP_ADDRESS_ERROR in the response if
+ * we ever see this error.
+ */
+ result = HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.q.quadlet_data = data;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ data = req->cmd_u.q.quadlet_data;
+
+ /* CMSTR bit - request must be from bus_mgr/IRM */
+ if (is_from != should_be_from) {
+ data = data & ~IEEE1394_CSR_STATE_CMSTR;
+ }
+
+ mutex_enter(&hal->topology_tree_mutex);
+ /* DREQ bit - disabling DREQ can come from anyone */
+ if (data & IEEE1394_CSR_STATE_DREQ) {
+ hal->disable_requests_bit = 0;
+ if (hal->hal_state == S1394_HAL_DREQ)
+ hal->hal_state = S1394_HAL_NORMAL;
+ }
+
+ /* ABDICATE bit */
+ if (data & IEEE1394_CSR_STATE_ABDICATE) {
+ hal->abdicate_bus_mgr_bit = 0;
+ }
+ mutex_exit(&hal->topology_tree_mutex);
+ /*
+ * The csr_write() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the STATE_CLEAR register
+ * is required to be implemented and writeable, we will
+ * return IEEE1394_RESP_ADDRESS_ERROR in the response if
+ * we ever see this error.
+ */
+ result = HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_clear_exit, S1394_TNF_SL_CSR_STACK,
+ "");
+}
+
+/*
+ * s1394_CSR_state_set()
+ * handles all requests to the STATE_SET CSR register. It enforces that
+ * certain bits that can be twiddled only by a given node (IRM or Bus
+ * Manager).
+ */
+static void
+s1394_CSR_state_set(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t data;
+ uint_t offset;
+ uint_t is_from;
+ uint_t should_be_from;
+ uint_t hal_node_num;
+ uint_t hal_number_of_nodes;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_set_enter, S1394_TNF_SL_CSR_STACK,
+ "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* Register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_set_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ /* Only writes from IRM or Bus Mgr allowed (in some cases) */
+ mutex_enter(&hal->topology_tree_mutex);
+ is_from = IEEE1394_NODE_NUM(req->nodeID);
+ if (hal->bus_mgr_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->bus_mgr_node);
+ else if (hal->IRM_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->IRM_node);
+ else
+ should_be_from = S1394_INVALID_NODE_NUM;
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ hal_number_of_nodes = hal->number_of_nodes;
+ mutex_exit(&hal->topology_tree_mutex);
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_WR_QUAD:
+ data = req->cmd_u.q.quadlet_data;
+
+ /* CMSTR bit - request must be from bus_mgr/IRM */
+ /* & must be root to have bit set */
+ if ((is_from != should_be_from) ||
+ (hal_node_num != (hal_number_of_nodes - 1))) {
+ data = data & ~IEEE1394_CSR_STATE_CMSTR;
+ }
+
+ mutex_enter(&hal->topology_tree_mutex);
+ /* DREQ bit - only bus_mgr/IRM can set this bit */
+ if (is_from != should_be_from) {
+ data = data & ~IEEE1394_CSR_STATE_DREQ;
+
+ } else if (data & IEEE1394_CSR_STATE_DREQ) {
+ hal->disable_requests_bit = 1;
+ if (hal->hal_state == S1394_HAL_NORMAL)
+ hal->hal_state = S1394_HAL_DREQ;
+ }
+ /* ABDICATE bit */
+ if (data & IEEE1394_CSR_STATE_ABDICATE) {
+ hal->abdicate_bus_mgr_bit = 1;
+ }
+ mutex_exit(&hal->topology_tree_mutex);
+ /*
+ * The csr_write() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the STATE_SET register
+ * is required to be implemented and writeable, we will
+ * return IEEE1394_RESP_ADDRESS_ERROR in the response if
+ * we ever see this error.
+ */
+ result = HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_state_set_exit, S1394_TNF_SL_CSR_STACK,
+ "");
+}
+
+/*
+ * s1394_CSR_node_ids()
+ * handles all requests to the NODE_IDS CSR register. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_node_ids(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_node_ids_enter, S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_node_ids_exit, S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_reset_start()
+ * handles all requests to the RESET_START CSR register. Only write
+ * requests are legal, everything else gets a type_error response.
+ */
+static void
+s1394_CSR_reset_start(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t data;
+ uint_t offset;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_reset_start_enter, S1394_TNF_SL_CSR_STACK,
+ "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* RESET_START register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_reset_start_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_WR_QUAD:
+ data = req->cmd_u.q.quadlet_data;
+ /*
+ * The csr_write() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. Because we don't do any thing with
+ * the RESET_START register we will ignore failures and
+ * return IEEE1394_RESP_COMPLETE regardless.
+ */
+ (void) HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_reset_start_exit, S1394_TNF_SL_CSR_STACK,
+ "");
+}
+
+/*
+ * s1394_CSR_split_timeout()
+ * handles all requests to the SPLIT_TIMEOUT CSR register. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_split_timeout(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_split_timeout_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_split_timeout_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_argument_regs()
+ * handles all requests to the ARGUMENT CSR registers. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_argument_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_argument_regs_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_argument_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_test_regs()
+ * handles all requests to the TEST CSR registers. It passes all requests
+ * to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_test_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint_t offset;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_test_regs_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* TEST register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* TEST_STATUS is Read-Only */
+ if ((offset == (IEEE1394_CSR_TEST_STATUS & IEEE1394_CSR_OFFSET_MASK)) &&
+ (req->cmd_type == CMD1394_ASYNCH_WR_QUAD)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ } else {
+ s1394_common_CSR_routine(hal, req);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_test_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_interrupt_regs()
+ * handles all requests to the INTERRUPT CSR registers. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_interrupt_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_interrupt_regs_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_interrupt_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_clock_regs()
+ * handles all requests to the CLOCK CSR registers. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_clock_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_clock_regs_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_clock_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_message_regs()
+ * handles all requests to the MESSAGE CSR registers. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_message_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_message_regs_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_message_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_cycle_time()
+ * handles all requests to the CYCLE_TIME CSR register.
+ */
+static void
+s1394_CSR_cycle_time(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t data;
+ uint_t offset;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_cycle_time_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* CYCLE_TIME register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_cycle_time_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the CYCLE_TIME register
+ * is required to be implemented on devices capable of
+ * providing isochronous services (like us), we will
+ * return IEEE1394_RESP_ADDRESS_ERROR in the response
+ * if we ever see this error.
+ */
+ result = HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.q.quadlet_data = data;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ data = req->cmd_u.q.quadlet_data;
+ /*
+ * The csr_write() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the CYCLE_TIME register
+ * is required to be implemented on devices capable of
+ * providing isochronous services (like us), the effects
+ * of a write are "node-dependent" so we will return
+ * IEEE1394_RESP_ADDRESS_ERROR in the response if we
+ * ever see this error.
+ */
+ result = HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_cycle_time_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_bus_time()
+ * handles all requests to the BUS_TIME CSR register. It enforces that
+ * only a broadcast write request from the IRM or Bus Manager can change
+ * its value.
+ */
+static void
+s1394_CSR_bus_time(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t data;
+ uint_t offset;
+ uint_t is_from;
+ uint_t should_be_from;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_bus_time_enter, S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* BUS_TIME register offset */
+ offset = req->cmd_addr & IEEE1394_CSR_OFFSET_MASK;
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_bus_time_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the BUS_TIME register
+ * is required to be implemented by devices capable of
+ * being cycle master (like us), we will return
+ * IEEE1394_RESP_ADDRESS_ERROR in the response if we
+ * ever see this error.
+ */
+ result = HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.q.quadlet_data = data;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ /* Only broadcast writes from IRM or Bus Mgr allowed */
+ mutex_enter(&hal->topology_tree_mutex);
+ is_from = IEEE1394_NODE_NUM(req->nodeID);
+ if (hal->bus_mgr_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->bus_mgr_node);
+ else if (hal->IRM_node != -1)
+ should_be_from = IEEE1394_NODE_NUM(hal->IRM_node);
+ else
+ should_be_from = S1394_INVALID_NODE_NUM;
+ mutex_exit(&hal->topology_tree_mutex);
+
+ if ((req->broadcast != 1) || (is_from != should_be_from)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ break;
+ }
+
+ data = req->cmd_u.q.quadlet_data;
+ /*
+ * The csr_write() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. But although the BUS_TIME register
+ * is required to be implemented on devices capable of
+ * being cycle master (like us), we will return
+ * IEEE1394_RESP_ADDRESS_ERROR in the response if we
+ * ever see this error.
+ */
+ result = HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_bus_time_exit, S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_busy_timeout()
+ * handles all requests to the BUSY_TIMEOUT CSR register. It passes all
+ * requests to the common routine - s1394_common_CSR_routine().
+ */
+static void
+s1394_CSR_busy_timeout(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_busy_timeout_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ s1394_common_CSR_routine(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_busy_timeout_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_IRM_regs()
+ * handles all requests to the IRM registers, including BANDWIDTH_AVAILABLE,
+ * CHANNELS_AVAILABLE, and the BUS_MANAGER_ID. Only quadlet read and lock
+ * requests are allowed.
+ */
+static void
+s1394_CSR_IRM_regs(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+ uint32_t generation;
+ uint32_t data;
+ uint32_t compare;
+ uint32_t swap;
+ uint32_t old;
+ uint_t offset;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_IRM_regs_enter, S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* IRM register offset */
+ offset = (req->cmd_addr & IEEE1394_CSR_OFFSET_MASK);
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_IRM_regs_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return;
+ }
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. In many cases these registers will
+ * have been implemented in HW. We are not likely to ever
+ * receive this callback. If we do, though, we will
+ * return IEEE1394_RESP_ADDRESS_ERROR when we get an error
+ * and IEEE1394_RESP_COMPLETE for success.
+ */
+ result = HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.q.quadlet_data = data;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ case CMD1394_ASYNCH_LOCK_32:
+ mutex_enter(&hal->topology_tree_mutex);
+ generation = hal->generation_count;
+ mutex_exit(&hal->topology_tree_mutex);
+ if (req->cmd_u.l32.lock_type == CMD1394_LOCK_COMPARE_SWAP) {
+ compare = req->cmd_u.l32.arg_value;
+ swap = req->cmd_u.l32.data_value;
+ /*
+ * The csr_cswap32() call can return DDI_FAILURE if
+ * the HAL is shutdown, if the register at "offset"
+ * is unimplemented, or if the generation has changed.
+ * In the last case, it shouldn't matter because the
+ * call to s1394_send_response will fail on a bad
+ * generation and the command will be freed.
+ */
+ result = HAL_CALL(hal).csr_cswap32(
+ hal->halinfo.hal_private, generation,
+ offset, compare, swap, &old);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.l32.old_value = old;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+ } else {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_CSR_IRM_regs_exit, S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_topology_map()
+ * handles all request for the TOPOLOGY_MAP[]. Since it is implemented
+ * with backing store, there isn't much to do besides return success or
+ * failure.
+ */
+static void
+s1394_CSR_topology_map(cmd1394_cmd_t *req)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ hal = (s1394_hal_t *)req->cmd_callback_arg;
+
+ /* Make sure it's a quadlet read request */
+ if (req->cmd_type == CMD1394_ASYNCH_RD_QUAD)
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ else
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+
+ (void) s1394_send_response(hal, req);
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_topology_map_update()
+ * is used to update the local host's TOPOLOGY_MAP[] buffer. It copies in
+ * the SelfID packets, updates the generation and other fields, and
+ * computes the necessary CRC values before returning.
+ * Callers must be holding the topology_tree_mutex.
+ */
+void
+s1394_CSR_topology_map_update(s1394_hal_t *hal)
+{
+ s1394_selfid_pkt_t *selfid_packet;
+ uint32_t *tm_ptr;
+ uint32_t *data_ptr;
+ uint32_t node_count;
+ uint32_t self_id_count;
+ uint_t CRC;
+ uint32_t length;
+ int i, j, c;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_update_enter,
+ S1394_TNF_SL_BR_CSR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ tm_ptr = (uint32_t *)hal->CSR_topology_map;
+ data_ptr = (uint32_t *)&(tm_ptr[3]);
+
+ c = 0;
+ for (i = 0; i < hal->number_of_nodes; i++) {
+ j = -1;
+ selfid_packet = hal->selfid_ptrs[i];
+
+ do {
+ j++;
+ data_ptr[c++] = selfid_packet[j].spkt_data;
+ }
+ while (IEEE1394_SELFID_ISMORE(&selfid_packet[j]));
+ }
+
+ /* Update Topology Map Generation */
+ tm_ptr[1] = tm_ptr[1] + 1;
+
+ /* Update Node_Count and Self_Id_Count */
+ node_count = (i & IEEE1394_TOP_MAP_LEN_MASK);
+ self_id_count = (c & IEEE1394_TOP_MAP_LEN_MASK);
+ tm_ptr[2] = (node_count << IEEE1394_TOP_MAP_LEN_SHIFT) |
+ (self_id_count);
+
+ /* Calculate CRC-16 */
+ length = self_id_count + 2;
+ CRC = s1394_CRC16(&(tm_ptr[1]), length);
+ tm_ptr[0] = (length << IEEE1394_TOP_MAP_LEN_SHIFT) | CRC;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_update_exit,
+ S1394_TNF_SL_BR_CSR_STACK, "");
+}
+
+/*
+ * s1394_CSR_topology_map_disable()
+ * is used to disable the local host's TOPOLOGY_MAP[] buffer (during bus
+ * reset processing). It sets the topology map's length to zero to
+ * indicate that it is invalid.
+ */
+void
+s1394_CSR_topology_map_disable(s1394_hal_t *hal)
+{
+ uint32_t *tm_ptr;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_disable_enter,
+ S1394_TNF_SL_BR_CSR_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ tm_ptr = (uint32_t *)hal->CSR_topology_map;
+
+ /* Set length = 0 */
+ tm_ptr[0] = tm_ptr[0] & IEEE1394_TOP_MAP_LEN_MASK;
+
+ TNF_PROBE_0_DEBUG(s1394_CSR_topology_map_disable_exit,
+ S1394_TNF_SL_BR_CSR_STACK, "");
+}
+
+/*
+ * s1394_common_CSR_routine()
+ * is used to handle most of the CSR register requests. They are passed
+ * to the appropriate HAL entry point for further processing. Then they
+ * are filled in with an appropriate response code, and the response is sent.
+ */
+static void
+s1394_common_CSR_routine(s1394_hal_t *hal, cmd1394_cmd_t *req)
+{
+ uint32_t data;
+ uint_t offset;
+ int result;
+
+ TNF_PROBE_0_DEBUG(s1394_common_CSR_routine_enter,
+ S1394_TNF_SL_CSR_STACK, "");
+
+ /* Register offset */
+ offset = (req->cmd_addr & IEEE1394_CSR_OFFSET_MASK);
+
+ /* Verify that request is quadlet aligned */
+ if ((offset & 0x3) != 0) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ (void) s1394_send_response(hal, req);
+ }
+
+ switch (req->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. We will return IEEE1394_RESP_ADDRESS_ERROR
+ * in the response if we see this error.
+ */
+ result = HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_u.q.quadlet_data = data;
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ case CMD1394_ASYNCH_WR_QUAD:
+ data = req->cmd_u.q.quadlet_data;
+ /*
+ * The csr_read() call can return DDI_FAILURE if the HAL
+ * is shutdown or if the register at "offset" is
+ * unimplemented. We will return IEEE1394_RESP_ADDRESS_ERROR
+ * in the response if we see this error.
+ */
+ result = HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ if (result == DDI_SUCCESS) {
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ } else {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ }
+ break;
+
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+
+ (void) s1394_send_response(hal, req);
+ TNF_PROBE_0_DEBUG(s1394_common_CSR_routine_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+}
+
+/*
+ * s1394_init_local_config_rom()
+ * is called in the HAL attach routine - h1394_attach() - to setup the
+ * initial Config ROM entries on the local host, including the
+ * bus_info_block and the root and unit directories.
+ */
+int
+s1394_init_local_config_rom(s1394_hal_t *hal)
+{
+ uint32_t *config_rom;
+ uint32_t *node_unique_id_leaf;
+ uint32_t *unit_dir;
+ uint32_t *text_leaf;
+ void *n_handle;
+ uint64_t guid;
+ uint32_t guid_hi, guid_lo;
+ uint32_t bus_capabilities;
+ uint32_t irmc, g;
+ uint32_t module_vendor_id;
+ uint32_t node_capabilities;
+ uint32_t root_dir_len;
+ uint32_t CRC;
+ int status, i, ret;
+
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ /* Setup Config ROM mutex */
+ mutex_init(&hal->local_config_rom_mutex,
+ NULL, MUTEX_DRIVER, hal->halinfo.hw_interrupt);
+
+ /* Allocate 1K for the Config ROM buffer */
+ hal->local_config_rom = (uint32_t *)kmem_zalloc(IEEE1394_CONFIG_ROM_SZ,
+ KM_SLEEP);
+
+ /* Allocate 1K for the temporary buffer */
+ hal->temp_config_rom_buf = (uint32_t *)kmem_zalloc(
+ IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
+
+ config_rom = hal->local_config_rom;
+
+ /* Lock the Config ROM buffer */
+ mutex_enter(&hal->local_config_rom_mutex);
+
+ /* Build the config ROM structures */
+ ret = s1394_init_config_rom_structures(hal);
+ if (ret != DDI_SUCCESS) {
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+ kmem_free((void *)hal->temp_config_rom_buf,
+ IEEE1394_CONFIG_ROM_SZ);
+ kmem_free((void *)hal->local_config_rom,
+ IEEE1394_CONFIG_ROM_SZ);
+ mutex_destroy(&hal->local_config_rom_mutex);
+ TNF_PROBE_1(s1394_init_local_config_rom_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failed in s1394_init_config_rom_structures()");
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+ /* Build the Bus_Info_Block - see IEEE 1394-1995, Section 8.3.2.5.4 */
+ bus_capabilities = hal->halinfo.bus_capabilities;
+
+ /*
+ * If we are Isoch Resource Manager capable then we are
+ * Bus Manager capable too.
+ */
+ irmc = (bus_capabilities & IEEE1394_BIB_IRMC_MASK) >>
+ IEEE1394_BIB_IRMC_SHIFT;
+ if (irmc)
+ bus_capabilities = bus_capabilities | IEEE1394_BIB_BMC_MASK;
+
+ /*
+ * Set generation to P1394a valid (but changeable)
+ * Even if we have a 1995 PHY, we will still provide
+ * certain P1394A functionality (especially with respect
+ * to Config ROM updates). So we must publish this
+ * information.
+ */
+ g = 2 << IEEE1394_BIB_GEN_SHIFT;
+ bus_capabilities = bus_capabilities | g;
+
+ /* Get the GUID */
+ guid = hal->halinfo.guid;
+ guid_hi = (uint32_t)(guid >> 32);
+ guid_lo = (uint32_t)(guid & 0x00000000FFFFFFFF);
+
+ config_rom[1] = 0x31333934; /* "1394" */
+ config_rom[2] = bus_capabilities;
+ config_rom[3] = guid_hi;
+ config_rom[4] = guid_lo;
+
+ /* The CRC covers only our Bus_Info_Block */
+ CRC = s1394_CRC16(&config_rom[1], 4);
+ config_rom[0] = (0x04040000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < IEEE1394_BIB_QUAD_SZ; i++)
+ config_rom[i] = T1394_DATA32(config_rom[i]);
+
+ /* Build the Root_Directory - see IEEE 1394-1995, Section 8.3.2.5.5 */
+
+ /* MODULE_VENDOR_ID - see IEEE 1394-1995, Section 8.3.2.5.5.1 */
+ module_vendor_id = S1394_SUNW_OUI;
+
+ /* NODE_CAPABILITIES - see IEEE 1394-1995, Section 8.3.2.5.5.2 */
+ node_capabilities = hal->halinfo.node_capabilities &
+ IEEE1212_NODE_CAPABILITIES_MASK;
+ root_dir_len = 2;
+
+ config_rom[6] = (IEEE1212_MODULE_VENDOR_ID <<
+ IEEE1212_KEY_VALUE_SHIFT) | module_vendor_id;
+ config_rom[7] = (IEEE1212_NODE_CAPABILITIES <<
+ IEEE1212_KEY_VALUE_SHIFT) | node_capabilities;
+
+ CRC = s1394_CRC16(&config_rom[6], root_dir_len);
+ config_rom[IEEE1394_BIB_QUAD_SZ] =
+ (root_dir_len << IEEE1394_CFG_ROM_LEN_SHIFT) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = IEEE1394_BIB_QUAD_SZ; i < 8; i++)
+ config_rom[i] = T1394_DATA32(config_rom[i]);
+
+ /* Build the Root Text leaf - see IEEE 1394-1995, Section 8.3.2.5.7 */
+ text_leaf = (uint32_t *)kmem_zalloc(S1394_ROOT_TEXT_LEAF_SZ, KM_SLEEP);
+ text_leaf[1] = 0x00000000;
+ text_leaf[2] = 0x00000000;
+ text_leaf[3] = 0x53756e20; /* "Sun " */
+ text_leaf[4] = 0x4d696372; /* "Micr" */
+ text_leaf[5] = 0x6f737973; /* "osys" */
+ text_leaf[6] = 0x74656d73; /* "tems" */
+ text_leaf[7] = 0x2c20496e; /* ", In" */
+ text_leaf[8] = 0x632e0000; /* "c." */
+ CRC = s1394_CRC16(&text_leaf[1], S1394_ROOT_TEXT_LEAF_QUAD_SZ - 1);
+ text_leaf[0] = (0x00080000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < 9; i++)
+ text_leaf[i] = T1394_DATA32(text_leaf[i]);
+
+ ret = s1394_add_config_rom_entry(hal, S1394_ROOT_TEXT_KEY, text_leaf,
+ S1394_ROOT_TEXT_LEAF_QUAD_SZ, &n_handle, &status);
+ if (ret != DDI_SUCCESS) {
+ kmem_free((void *)text_leaf, S1394_ROOT_TEXT_LEAF_SZ);
+ /* Destroy the config_rom structures */
+ (void) s1394_destroy_config_rom_structures(hal);
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+ kmem_free((void *)hal->temp_config_rom_buf,
+ IEEE1394_CONFIG_ROM_SZ);
+ kmem_free((void *)hal->local_config_rom,
+ IEEE1394_CONFIG_ROM_SZ);
+ mutex_destroy(&hal->local_config_rom_mutex);
+ TNF_PROBE_1(s1394_init_local_config_rom_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failure in kmem_zalloc");
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+ kmem_free((void *)text_leaf, S1394_ROOT_TEXT_LEAF_SZ);
+
+ /* Build the Node_Unique_Id leaf - IEEE 1394-1995, Sect. 8.3.2.5.7.1 */
+ node_unique_id_leaf = (uint32_t *)kmem_zalloc(S1394_NODE_UNIQUE_ID_SZ,
+ KM_SLEEP);
+ node_unique_id_leaf[1] = guid_hi;
+ node_unique_id_leaf[2] = guid_lo;
+ CRC = s1394_CRC16(&node_unique_id_leaf[1],
+ S1394_NODE_UNIQUE_ID_QUAD_SZ - 1);
+ node_unique_id_leaf[0] = (0x00020000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < S1394_NODE_UNIQUE_ID_QUAD_SZ; i++)
+ node_unique_id_leaf[i] = T1394_DATA32(node_unique_id_leaf[i]);
+
+ ret = s1394_add_config_rom_entry(hal, S1394_NODE_UNIQUE_ID_KEY,
+ node_unique_id_leaf, S1394_NODE_UNIQUE_ID_QUAD_SZ, &n_handle,
+ &status);
+ if (ret != DDI_SUCCESS) {
+ kmem_free((void *)node_unique_id_leaf,
+ S1394_NODE_UNIQUE_ID_SZ);
+ /* Destroy the config_rom structures */
+ (void) s1394_destroy_config_rom_structures(hal);
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+ kmem_free((void *)hal->temp_config_rom_buf,
+ IEEE1394_CONFIG_ROM_SZ);
+ kmem_free((void *)hal->local_config_rom,
+ IEEE1394_CONFIG_ROM_SZ);
+ mutex_destroy(&hal->local_config_rom_mutex);
+ TNF_PROBE_1(s1394_init_local_config_rom_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failure in kmem_zalloc");
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+ kmem_free((void *)node_unique_id_leaf, S1394_NODE_UNIQUE_ID_SZ);
+
+ /* Build the Unit_Directory for 1394 Framework */
+ unit_dir = (uint32_t *)kmem_zalloc(S1394_UNIT_DIR_SZ, KM_SLEEP);
+ unit_dir[1] = 0x12080020; /* Sun Microsystems */
+ unit_dir[2] = 0x13000001; /* Version 1 */
+ unit_dir[3] = 0x81000001; /* offset to the text leaf */
+ CRC = s1394_CRC16(&unit_dir[1], 3);
+ unit_dir[0] = (0x00030000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < 4; i++)
+ unit_dir[i] = T1394_DATA32(unit_dir[i]);
+
+ /* Build the Unit Directory text leaf */
+ unit_dir[5] = 0x00000000;
+ unit_dir[6] = 0x00000000;
+ unit_dir[7] = 0x536f6c61; /* "Sola" */
+ unit_dir[8] = 0x72697320; /* "ris " */
+ unit_dir[9] = 0x31333934; /* "1394" */
+ unit_dir[10] = 0x20535720; /* " SW " */
+ unit_dir[11] = 0x4672616d; /* "Fram" */
+ unit_dir[12] = 0x65576f72; /* "ewor" */
+ unit_dir[13] = 0x6b000000; /* "k" */
+ CRC = s1394_CRC16(&unit_dir[5], 9);
+ unit_dir[4] = (0x00090000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 4; i < S1394_UNIT_DIR_QUAD_SZ; i++)
+ unit_dir[i] = T1394_DATA32(unit_dir[i]);
+
+ ret = s1394_add_config_rom_entry(hal, S1394_UNIT_DIR_KEY, unit_dir,
+ S1394_UNIT_DIR_QUAD_SZ, &n_handle, &status);
+ if (ret != DDI_SUCCESS) {
+ kmem_free((void *)unit_dir, S1394_UNIT_DIR_SZ);
+ /* Destroy the config_rom structures */
+ (void) s1394_destroy_config_rom_structures(hal);
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+ kmem_free((void *)hal->temp_config_rom_buf,
+ IEEE1394_CONFIG_ROM_SZ);
+ /* Free the 1K for the Config ROM buffer */
+ kmem_free((void *)hal->local_config_rom,
+ IEEE1394_CONFIG_ROM_SZ);
+ mutex_destroy(&hal->local_config_rom_mutex);
+ TNF_PROBE_1(s1394_init_local_config_rom_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failure in kmem_zalloc");
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+ kmem_free((void *)unit_dir, S1394_UNIT_DIR_SZ);
+
+ hal->config_rom_update_amount = (IEEE1394_CONFIG_ROM_QUAD_SZ -
+ hal->free_space);
+
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+
+ /*
+ * The update_config_rom() call can return DDI_FAILURE if the
+ * HAL is shutdown.
+ */
+ (void) HAL_CALL(hal).update_config_rom(hal->halinfo.hal_private,
+ config_rom, IEEE1394_CONFIG_ROM_QUAD_SZ);
+
+ TNF_PROBE_0_DEBUG(s1394_init_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_destroy_local_config_rom()
+ * is necessary for h1394_detach(). It undoes all the work that
+ * s1394_init_local_config_rom() had setup and more. By pulling
+ * everything out of the conig rom structures and freeing them and their
+ * associated mutexes, the Config ROM is completely cleaned up.
+ */
+void
+s1394_destroy_local_config_rom(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_destroy_local_config_rom_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ /* Lock the Config ROM buffer */
+ mutex_enter(&hal->local_config_rom_mutex);
+
+ /* Destroy the config_rom structures */
+ (void) s1394_destroy_config_rom_structures(hal);
+
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+
+ /* Free the 1K for the temporary buffer */
+ kmem_free((void *)hal->temp_config_rom_buf, IEEE1394_CONFIG_ROM_SZ);
+ /* Free the 1K for the Config ROM buffer */
+ kmem_free((void *)hal->local_config_rom, IEEE1394_CONFIG_ROM_SZ);
+
+ /* Setup Config ROM mutex */
+ mutex_destroy(&hal->local_config_rom_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_local_config_rom_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+}
+
+/*
+ * s1394_init_config_rom_structures()
+ * initializes the structures that are used to maintain the local Config ROM.
+ * Callers must be holding the local_config_rom_mutex.
+ */
+static int
+s1394_init_config_rom_structures(s1394_hal_t *hal)
+{
+ s1394_config_rom_t *root_directory;
+ s1394_config_rom_t *rest_of_config_rom;
+
+ TNF_PROBE_0_DEBUG(s1394_init_config_rom_structures_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->local_config_rom_mutex));
+
+ root_directory = (s1394_config_rom_t *)kmem_zalloc(
+ sizeof (s1394_config_rom_t), KM_SLEEP);
+
+ root_directory->cfgrom_used = B_TRUE;
+ root_directory->cfgrom_addr_lo = IEEE1394_BIB_QUAD_SZ;
+ root_directory->cfgrom_addr_hi = IEEE1394_BIB_QUAD_SZ + 2;
+
+ rest_of_config_rom = (s1394_config_rom_t *)kmem_zalloc(
+ sizeof (s1394_config_rom_t), KM_SLEEP);
+
+ rest_of_config_rom->cfgrom_used = B_FALSE;
+ rest_of_config_rom->cfgrom_addr_lo = root_directory->cfgrom_addr_hi + 1;
+ rest_of_config_rom->cfgrom_addr_hi = IEEE1394_CONFIG_ROM_QUAD_SZ - 1;
+
+ root_directory->cfgrom_next = rest_of_config_rom;
+ root_directory->cfgrom_prev = NULL;
+ rest_of_config_rom->cfgrom_next = NULL;
+ rest_of_config_rom->cfgrom_prev = root_directory;
+
+ hal->root_directory = root_directory;
+ hal->free_space = IEEE1394_CONFIG_ROM_QUAD_SZ -
+ (rest_of_config_rom->cfgrom_addr_lo);
+
+ TNF_PROBE_0_DEBUG(s1394_init_config_rom_structures_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_destroy_config_rom_structures()
+ * is used to destroy the structures that maintain the local Config ROM.
+ * Callers must be holding the local_config_rom_mutex.
+ */
+static int
+s1394_destroy_config_rom_structures(s1394_hal_t *hal)
+{
+ s1394_config_rom_t *curr_blk;
+ s1394_config_rom_t *next_blk;
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_config_rom_structures_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->local_config_rom_mutex));
+
+ curr_blk = hal->root_directory;
+
+ while (curr_blk != NULL) {
+ next_blk = curr_blk->cfgrom_next;
+ kmem_free(curr_blk, sizeof (s1394_config_rom_t));
+ curr_blk = next_blk;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_config_rom_structures_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_add_config_rom_entry()
+ * is used to add a new entry to the local host's config ROM. By
+ * specifying a key and a buffer, it is possible to update the Root
+ * Directory to point to the new entry (in buffer). Additionally, all
+ * of the relevant CRCs, lengths, and generations are updated as well.
+ * By returning a Config ROM "handle", we can allow targets to remove
+ * the corresponding entry.
+ * Callers must be holding the local_config_rom_mutex.
+ */
+int
+s1394_add_config_rom_entry(s1394_hal_t *hal, uint8_t key, uint32_t *buffer,
+ uint_t size, void **handle, int *status)
+{
+ s1394_config_rom_t *curr_blk;
+ s1394_config_rom_t *new_blk;
+ uint32_t *config_rom;
+ uint32_t *temp_buf;
+ uint32_t CRC;
+ uint_t tmp_offset;
+ uint_t tmp_size, temp;
+ uint_t last_entry_offset;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_add_config_rom_entry_enter,
+ "stacktrace 1394 s1394", "");
+
+ ASSERT(MUTEX_HELD(&hal->local_config_rom_mutex));
+
+ if (size > hal->free_space) {
+ /* Out of space */
+ *status = CMD1394_ERSRC_CONFLICT;
+ TNF_PROBE_0_DEBUG(s1394_add_config_rom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ config_rom = hal->local_config_rom;
+ temp_buf = hal->temp_config_rom_buf;
+
+ /* Copy the Bus_Info_Block */
+ bcopy(&config_rom[0], &temp_buf[0], IEEE1394_BIB_SZ);
+
+ /* Copy and add to the Root_Directory */
+ tmp_offset = hal->root_directory->cfgrom_addr_lo;
+ tmp_size = (hal->root_directory->cfgrom_addr_hi - tmp_offset) + 1;
+ tmp_size = tmp_size + 1; /* For the new entry */
+ bcopy(&config_rom[tmp_offset], &temp_buf[tmp_offset], tmp_size << 2);
+ last_entry_offset = hal->root_directory->cfgrom_addr_hi + 1;
+
+ curr_blk = hal->root_directory;
+ curr_blk->cfgrom_addr_hi = curr_blk->cfgrom_addr_hi + 1;
+ while (curr_blk->cfgrom_next != NULL) {
+ if (curr_blk->cfgrom_next->cfgrom_used == B_TRUE) {
+ tmp_offset = curr_blk->cfgrom_next->cfgrom_addr_lo;
+ tmp_size = (curr_blk->cfgrom_next->cfgrom_addr_hi -
+ tmp_offset) + 1;
+
+ bcopy(&config_rom[tmp_offset],
+ &temp_buf[tmp_offset + 1], tmp_size << 2);
+ curr_blk->cfgrom_next->cfgrom_addr_lo++;
+ curr_blk->cfgrom_next->cfgrom_addr_hi++;
+ last_entry_offset =
+ curr_blk->cfgrom_next->cfgrom_addr_hi;
+
+ tmp_offset = curr_blk->cfgrom_next->root_dir_offset;
+
+ /* Swap... add one... then unswap */
+ temp = T1394_DATA32(temp_buf[tmp_offset]);
+ temp++;
+ temp_buf[tmp_offset] = T1394_DATA32(temp);
+ } else {
+ curr_blk->cfgrom_next->cfgrom_addr_lo++;
+ hal->free_space--;
+ break;
+ }
+
+ curr_blk = curr_blk->cfgrom_next;
+ }
+
+ /* Get the pointer to the "free" space */
+ curr_blk = curr_blk->cfgrom_next;
+
+ /* Is it an exact fit? */
+ if (hal->free_space == size) {
+ curr_blk->cfgrom_used = B_TRUE;
+
+ } else { /* Must break this piece */
+ new_blk = (s1394_config_rom_t *)kmem_zalloc(
+ sizeof (s1394_config_rom_t), KM_SLEEP);
+ if (new_blk == NULL) {
+ TNF_PROBE_0_DEBUG(s1394_add_config_rom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ new_blk->cfgrom_addr_hi = curr_blk->cfgrom_addr_hi;
+ new_blk->cfgrom_addr_lo = curr_blk->cfgrom_addr_lo + size;
+ curr_blk->cfgrom_addr_hi = new_blk->cfgrom_addr_lo - 1;
+ new_blk->cfgrom_next = curr_blk->cfgrom_next;
+ curr_blk->cfgrom_next = new_blk;
+ new_blk->cfgrom_prev = curr_blk;
+ curr_blk->cfgrom_used = B_TRUE;
+ last_entry_offset = curr_blk->cfgrom_addr_hi;
+ }
+ hal->free_space = hal->free_space - size;
+
+ /* Copy in the new entry */
+ tmp_offset = curr_blk->cfgrom_addr_lo;
+ bcopy(buffer, &temp_buf[tmp_offset], size << 2);
+
+ /* Update root directory */
+ tmp_offset = hal->root_directory->cfgrom_addr_hi;
+ tmp_size = tmp_offset - hal->root_directory->cfgrom_addr_lo;
+ curr_blk->root_dir_offset = tmp_offset;
+ tmp_offset = curr_blk->cfgrom_addr_lo - tmp_offset;
+
+ temp_buf[hal->root_directory->cfgrom_addr_hi] =
+ T1394_DATA32((((uint32_t)key) << IEEE1212_KEY_VALUE_SHIFT) |
+ tmp_offset);
+ tmp_offset = hal->root_directory->cfgrom_addr_lo;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = (tmp_offset + 1); i <= hal->root_directory->cfgrom_addr_hi;
+ i++)
+ temp_buf[i] = T1394_DATA32(temp_buf[i]);
+
+ CRC = s1394_CRC16(&temp_buf[tmp_offset + 1], tmp_size);
+ temp_buf[tmp_offset] = (tmp_size << IEEE1394_CFG_ROM_LEN_SHIFT) | CRC;
+
+ /* Redo byte-swapping if necessary (x86) */
+ for (i = tmp_offset; i <= hal->root_directory->cfgrom_addr_hi; i++)
+ temp_buf[i] = T1394_DATA32(temp_buf[i]);
+
+ /* Copy it back to config_rom buffer */
+ last_entry_offset++;
+ bcopy(&temp_buf[0], &config_rom[0], last_entry_offset << 2);
+
+ /* Return a handle to this block */
+ *handle = curr_blk;
+
+ *status = T1394_NOERROR;
+
+ TNF_PROBE_0_DEBUG(s1394_add_config_rom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_remove_config_rom_entry()
+ * is used to remove an entry from the local host's config ROM. By
+ * specifying the Config ROM "handle" that was given in the allocation,
+ * it is possible to remove the entry. Subsequently, the Config ROM is
+ * updated again.
+ * Callers must be holding the local_config_rom_mutex.
+ */
+int
+s1394_remove_config_rom_entry(s1394_hal_t *hal, void **handle, int *status)
+{
+ s1394_config_rom_t *del_blk;
+ s1394_config_rom_t *curr_blk;
+ s1394_config_rom_t *last_blk;
+ s1394_config_rom_t *free_blk;
+ uint32_t *config_rom;
+ uint32_t *temp_buf;
+ uint32_t entry;
+ uint_t CRC;
+ uint_t root_offset;
+ uint_t del_offset;
+ uint_t tmp_offset;
+ uint_t tmp_size;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_remove_config_rom_entry_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->local_config_rom_mutex));
+
+ del_blk = (s1394_config_rom_t *)(*handle);
+
+ config_rom = hal->local_config_rom;
+ temp_buf = hal->temp_config_rom_buf;
+
+ /* Copy the Bus_Info_Block */
+ bcopy(&config_rom[0], &temp_buf[0], IEEE1394_BIB_SZ);
+
+ root_offset = hal->root_directory->cfgrom_addr_lo;
+ del_offset = del_blk->root_dir_offset;
+
+ /* Update Root_Directory entries before the deleted one */
+ for (i = root_offset; i < del_offset; i++) {
+ entry = T1394_DATA32(config_rom[i]);
+
+ /* If entry is an offset address - update it */
+ if (entry & 0x80000000)
+ temp_buf[i] = T1394_DATA32(entry - 1);
+ else
+ temp_buf[i] = T1394_DATA32(entry);
+ }
+
+ /* Move all Unit_Directories prior to the deleted one */
+ curr_blk = hal->root_directory->cfgrom_next;
+
+ while (curr_blk != del_blk) {
+ tmp_offset = curr_blk->cfgrom_addr_lo;
+ tmp_size = (curr_blk->cfgrom_addr_hi - tmp_offset) + 1;
+
+ bcopy(&config_rom[tmp_offset], &temp_buf[tmp_offset - 1],
+ tmp_size << 2);
+ curr_blk->cfgrom_addr_lo--;
+ curr_blk->cfgrom_addr_hi--;
+ curr_blk = curr_blk->cfgrom_next;
+ }
+
+ /* Move all Unit_Directories after the deleted one */
+ curr_blk = del_blk->cfgrom_next;
+ last_blk = del_blk->cfgrom_prev;
+
+ del_offset = (del_blk->cfgrom_addr_hi - del_blk->cfgrom_addr_lo) + 1;
+
+ while ((curr_blk != NULL) && (curr_blk->cfgrom_used == B_TRUE)) {
+ tmp_offset = curr_blk->cfgrom_addr_lo;
+ tmp_size = (curr_blk->cfgrom_addr_hi - tmp_offset) + 1;
+
+ bcopy(&config_rom[tmp_offset],
+ &temp_buf[tmp_offset - (del_offset + 1)], tmp_size << 2);
+
+ root_offset = curr_blk->root_dir_offset;
+ temp_buf[root_offset - 1] =
+ config_rom[root_offset] - del_offset;
+ curr_blk->root_dir_offset--;
+ curr_blk->cfgrom_addr_lo = curr_blk->cfgrom_addr_lo -
+ (del_offset + 1);
+ curr_blk->cfgrom_addr_hi = curr_blk->cfgrom_addr_hi -
+ (del_offset + 1);
+
+ last_blk = curr_blk;
+ curr_blk = curr_blk->cfgrom_next;
+ }
+
+ /* Remove del_blk from the list */
+ if (del_blk->cfgrom_prev != NULL)
+ del_blk->cfgrom_prev->cfgrom_next = del_blk->cfgrom_next;
+
+ if (del_blk->cfgrom_next != NULL)
+ del_blk->cfgrom_next->cfgrom_prev = del_blk->cfgrom_prev;
+
+ del_blk->cfgrom_prev = NULL;
+ del_blk->cfgrom_next = NULL;
+ kmem_free((void *)del_blk, sizeof (s1394_config_rom_t));
+
+ /* Update and zero out the "free" block */
+ if (curr_blk != NULL) {
+ curr_blk->cfgrom_addr_lo = curr_blk->cfgrom_addr_lo -
+ (del_offset + 1);
+
+ } else {
+ free_blk = (s1394_config_rom_t *)kmem_zalloc(
+ sizeof (s1394_config_rom_t), KM_SLEEP);
+ if (free_blk == NULL) {
+ TNF_PROBE_0_DEBUG(s1394_remove_config_rom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ free_blk->cfgrom_used = B_FALSE;
+ free_blk->cfgrom_addr_lo = (IEEE1394_CONFIG_ROM_QUAD_SZ - 1) -
+ (del_offset + 1);
+ free_blk->cfgrom_addr_hi = (IEEE1394_CONFIG_ROM_QUAD_SZ - 1);
+
+ free_blk->cfgrom_prev = last_blk;
+ free_blk->cfgrom_next = NULL;
+ curr_blk = free_blk;
+ }
+ hal->free_space = hal->free_space + (del_offset + 1);
+ tmp_offset = curr_blk->cfgrom_addr_lo;
+ tmp_size = (curr_blk->cfgrom_addr_hi - tmp_offset) + 1;
+ bzero(&temp_buf[tmp_offset], tmp_size << 2);
+
+
+ /* Update root directory */
+ hal->root_directory->cfgrom_addr_hi--;
+ tmp_offset = hal->root_directory->cfgrom_addr_lo;
+ tmp_size = hal->root_directory->cfgrom_addr_hi - tmp_offset;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = (tmp_offset + 1); i <= hal->root_directory->cfgrom_addr_hi;
+ i++)
+ temp_buf[i] = T1394_DATA32(temp_buf[i]);
+
+ CRC = s1394_CRC16(&temp_buf[tmp_offset + 1], tmp_size);
+ temp_buf[tmp_offset] = (tmp_size << IEEE1394_CFG_ROM_LEN_SHIFT) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = (tmp_offset + 1); i <= hal->root_directory->cfgrom_addr_hi;
+ i++)
+ temp_buf[i] = T1394_DATA32(temp_buf[i]);
+
+ /* Copy it back to config_rom buffer */
+ tmp_size = IEEE1394_CONFIG_ROM_SZ - (hal->free_space << 2);
+ bcopy(&temp_buf[0], &config_rom[0], tmp_size);
+
+ /* Return a handle to this block */
+ *handle = NULL;
+
+ *status = T1394_NOERROR;
+
+ TNF_PROBE_0_DEBUG(s1394_remove_config_rom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_update_config_rom_callback()
+ * is the callback used by t1394_add_cfgrom_entry() and
+ * t1394_rem_cfgrom_entry(). After a target updates the Config ROM, a
+ * timer is set with this as its callback function. This is to reduce
+ * the number of bus resets that would be necessary if many targets
+ * wished to update the Config ROM simultaneously.
+ */
+void
+s1394_update_config_rom_callback(void *arg)
+{
+ s1394_hal_t *hal;
+ uint32_t *config_rom;
+ uint32_t bus_capabilities;
+ uint32_t g;
+ uint_t CRC;
+ uint_t last_entry_offset;
+ int i, ret;
+
+ TNF_PROBE_0_DEBUG(s1394_update_config_rom_callback_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ hal = (s1394_hal_t *)arg;
+
+ /* Lock the Config ROM buffer */
+ mutex_enter(&hal->local_config_rom_mutex);
+
+ config_rom = hal->local_config_rom;
+
+ /* Update Generation and CRC for Bus_Info_Block */
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < IEEE1394_BIB_QUAD_SZ; i++)
+ config_rom[i] = T1394_DATA32(config_rom[i]);
+
+ bus_capabilities = config_rom[IEEE1212_NODE_CAP_QUAD];
+ g = ((bus_capabilities & IEEE1394_BIB_GEN_MASK) >>
+ IEEE1394_BIB_GEN_SHIFT) + 1;
+ if (g > 15)
+ g = 2;
+ g = g << IEEE1394_BIB_GEN_SHIFT;
+
+ bus_capabilities = (bus_capabilities & (~IEEE1394_BIB_GEN_MASK)) | g;
+ config_rom[IEEE1212_NODE_CAP_QUAD] = bus_capabilities;
+
+ CRC = s1394_CRC16(&config_rom[1], IEEE1394_BIB_QUAD_SZ - 1);
+ config_rom[0] = (0x04040000) | CRC;
+
+ /* Do byte-swapping if necessary (x86) */
+ for (i = 0; i < IEEE1394_BIB_QUAD_SZ; i++)
+ config_rom[i] = T1394_DATA32(config_rom[i]);
+
+ /* Make sure we update only what is necessary */
+ last_entry_offset = (IEEE1394_CONFIG_ROM_QUAD_SZ - hal->free_space);
+ if (last_entry_offset < hal->config_rom_update_amount)
+ last_entry_offset = hal->config_rom_update_amount;
+
+ hal->config_rom_update_amount = (IEEE1394_CONFIG_ROM_QUAD_SZ -
+ hal->free_space);
+
+ /* Clear the timer flag */
+ hal->config_rom_timer_set = B_FALSE;
+
+ /* Unlock the Config ROM buffer */
+ mutex_exit(&hal->local_config_rom_mutex);
+
+ /*
+ * The update_config_rom() call can return DDI_FAILURE if the
+ * HAL is shutdown.
+ */
+ (void) HAL_CALL(hal).update_config_rom(hal->halinfo.hal_private,\
+ config_rom, last_entry_offset);
+
+ /* Initiate a bus reset */
+ ret = HAL_CALL(hal).bus_reset(hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_update_config_rom_callback_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating bus reset");
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_update_config_rom_callback_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_dev_disc.c b/usr/src/uts/common/io/1394/s1394_dev_disc.c
new file mode 100644
index 0000000000..ec5c77d48b
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_dev_disc.c
@@ -0,0 +1,3421 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_dev_disc.c
+ * 1394 Services Layer Device Discovery Routines
+ * This file contains the bus reset thread code, bus manager routines and
+ * various routines that are used to implement remote Config ROM reading.
+ *
+ * FUTURE:
+ * Rescan the bus if invalid nodes are seen.
+ * Investigate taskq for reading phase2 config rom reads.
+ * If we are reading the entire bus info blk, we should attempt
+ * a block read and fallback to quad reads if this fails.
+ */
+
+#include <sys/conf.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/sunndi.h>
+#include <sys/modctl.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/kstat.h>
+#include <sys/varargs.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+#include <sys/1394/ieee1212.h>
+
+/* hcmd_ret_t */
+typedef enum {
+ S1394_HCMD_INVALID,
+ S1394_HCMD_NODE_DONE,
+ S1394_HCMD_NODE_EXPECT_MORE,
+ S1394_HCMD_LOCK_FAILED
+} hcmd_ret_t;
+
+#define QUAD_TO_CFGROM_ADDR(b, n, q, addr) { \
+ uint64_t bl = (b); \
+ uint64_t nl = (n); \
+ addr = ((bl) << IEEE1394_ADDR_BUS_ID_SHIFT) | \
+ ((nl) << IEEE1394_ADDR_PHY_ID_SHIFT); \
+ addr += IEEE1394_CONFIG_ROM_ADDR + ((q) << 2); \
+}
+
+#define CFGROM_READ_PAUSE(d) \
+ ((s1394_cfgrom_read_delay_ms == 0) ? (void) 0 : \
+ delay(drv_usectohz((d) * 1000)))
+
+#define BUMP_CFGROM_READ_DELAY(n) \
+ (n)->cfgrom_read_delay += s1394_cfgrom_read_delay_incr
+
+#define CFGROM_GET_READ_DELAY(n, d) \
+ ((d) = (n)->cfgrom_read_delay)
+
+#define SETUP_QUAD_READ(n, reset_fails, quadlet, cnt) \
+{ \
+ int i = (reset_fails); \
+ if (i != 0) { \
+ (n)->cfgrom_read_fails = 0; \
+ (n)->cfgrom_read_delay = (uchar_t)s1394_cfgrom_read_delay_ms; \
+ } \
+ (n)->cfgrom_quad_to_read = (quadlet); \
+ (n)->cfgrom_quad_read_cnt = (cnt); \
+}
+
+static void s1394_wait_for_events(s1394_hal_t *hal, int firsttime);
+
+static int s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
+ hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd));
+
+static void s1394_flush_cmplq(s1394_hal_t *hal);
+
+static void s1394_br_thread_exit(s1394_hal_t *hal);
+
+static void s1394_target_bus_reset_notifies(s1394_hal_t *hal,
+ t1394_localinfo_t *localinfo);
+
+static int s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
+ s1394_status_t *status);
+
+static int s1394_cfgrom_scan_phase1(s1394_hal_t *hal);
+
+static hcmd_ret_t s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal,
+ cmd1394_cmd_t *cmd);
+
+static int s1394_cfgrom_scan_phase2(s1394_hal_t *hal);
+
+static hcmd_ret_t s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal,
+ cmd1394_cmd_t *cmd);
+
+static int s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
+ s1394_status_t *status);
+
+static void s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd);
+
+static void s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num,
+ uint32_t *quadlet, uint32_t *data);
+
+static int s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode);
+
+static int s1394_match_all_GUIDs(s1394_hal_t *hal);
+
+static void s1394_become_bus_mgr(void *arg);
+
+static void s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd);
+
+static int s1394_bus_mgr_processing(s1394_hal_t *hal);
+
+static int s1394_do_bus_mgr_processing(s1394_hal_t *hal);
+
+static void s1394_bus_mgr_timers_stop(s1394_hal_t *hal,
+ timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
+
+static void s1394_bus_mgr_timers_start(s1394_hal_t *hal,
+ timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
+
+static int s1394_cycle_master_capable(s1394_hal_t *hal);
+
+static int s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root,
+ int new_gap_cnt, uint32_t IRM_flags);
+
+static void s1394_phy_config_callback(cmd1394_cmd_t *cmd);
+
+static int s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node,
+ uint32_t quadlet, uint32_t *nextquadp);
+
+static int s1394_cfgrom_read_retry_cnt = 3; /* 1 + 3 retries */
+static int s1394_cfgrom_read_delay_ms = 20; /* start with 20ms */
+static int s1394_cfgrom_read_delay_incr = 10; /* 10ms increments */
+static int s1394_enable_crc_validation = 0;
+static int s1394_turn_off_dir_stack = 0;
+static int s1394_crcsz_is_cfgsz = 0;
+static int s1394_enable_rio_pass1_workarounds = 0;
+
+/*
+ * s1394_br_thread()
+ * is the bus reset thread. Its sole purpose is to read/reread config roms
+ * as appropriate and do bus reset time things (bus manager processing,
+ * isoch resource reallocation etc.).
+ */
+void
+s1394_br_thread(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_br_thread_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ /* Initialize the Bus Mgr timers */
+ hal->bus_mgr_timeout_id = 0;
+ hal->bus_mgr_query_timeout_id = 0;
+
+ /* Initialize the cmpletion Q */
+ mutex_enter(&hal->br_cmplq_mutex);
+ hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ s1394_wait_for_events(hal, 1);
+
+ for (;;) {
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_br_thread_wait,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ s1394_wait_for_events(hal, 0);
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_1_DEBUG(s1394_br_thread_restart,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_instance, ddi_get_instance(hal->halinfo.dip));
+
+ /* stop bus manager timeouts, if needed */
+ s1394_bus_mgr_timers_stop(hal, &hal->bus_mgr_query_timeout_id,
+ &hal->bus_mgr_timeout_id);
+
+ s1394_flush_cmplq(hal);
+
+ /* start timers for checking bus manager, if needed */
+ s1394_bus_mgr_timers_start(hal, &hal->bus_mgr_query_timeout_id,
+ &hal->bus_mgr_timeout_id);
+
+ /* Try to reallocate all isoch resources */
+ s1394_isoch_rsrc_realloc(hal);
+
+ if (s1394_cfgrom_scan_phase1(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(br_thread_phase1_restart,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ continue;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_br_thread_phase1_done,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_instance, ddi_get_instance(hal->halinfo.dip));
+
+ if (s1394_bus_mgr_processing(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(br_thread_bus_mgr_restart,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ continue;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_br_thread_bus_mgr_proc_done,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_instance, ddi_get_instance(hal->halinfo.dip));
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if (s1394_cfgrom_scan_phase2(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(br_thread_phase2_restart,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ continue;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_br_thread_done,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_instance, ddi_get_instance(hal->halinfo.dip));
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ }
+}
+
+/*
+ * s1394_wait_for_events()
+ * blocks waiting for a cv_signal on the bus reset condition variable.
+ * Used by the bus reset thread for synchronizing with the bus reset/
+ * self id interrupt callback from the hal. Does CPR initialization
+ * first time it is called. If services layer sees a valid self id
+ * buffer, it builds the topology tree and signals the bus reset thread
+ * to read the config roms as appropriate (indicated by BR_THR_CFGROM_SCAN).
+ * If the services layer wishes to kill the bus reset thread, it signals
+ * this by signaling a BR_THR_GO_AWAY event.
+ */
+static void
+s1394_wait_for_events(s1394_hal_t *hal, int firsttime)
+{
+ uint_t event;
+
+ TNF_PROBE_0_DEBUG(s1394_wait_for_events_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if (firsttime)
+ CALLB_CPR_INIT(&hal->hal_cprinfo, &hal->br_thread_mutex,
+ callb_generic_cpr, "s1394_br_thread");
+
+ /* Check and wait for a BUS RESET */
+ mutex_enter(&hal->br_thread_mutex);
+ while ((event = hal->br_thread_ev_type) == 0) {
+ CALLB_CPR_SAFE_BEGIN(&hal->hal_cprinfo);
+ cv_wait(&hal->br_thread_cv, &hal->br_thread_mutex);
+ CALLB_CPR_SAFE_END(&hal->hal_cprinfo, &hal->br_thread_mutex);
+ }
+
+ if (event & BR_THR_GO_AWAY) {
+ TNF_PROBE_1(s1394_wait_for_events, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_string, msg, "Go away set");
+ s1394_br_thread_exit(hal);
+ TNF_PROBE_0_DEBUG(s1394_wait_for_events_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ /*NOTREACHED*/
+ return;
+ }
+
+ if (firsttime) {
+ TNF_PROBE_0_DEBUG(s1394_wait_for_events_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ mutex_exit(&hal->br_thread_mutex);
+ return;
+ }
+
+ mutex_enter(&hal->topology_tree_mutex);
+ if (event & BR_THR_CFGROM_SCAN) {
+ TNF_PROBE_2_DEBUG(s1394_wait_for_events_scan,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, br_thread_gen, hal->br_cfgrom_read_gen,
+ tnf_int, hal_generation, hal->generation_count);
+ }
+ hal->br_cfgrom_read_gen = hal->generation_count;
+
+ hal->br_thread_ev_type &= ~BR_THR_CFGROM_SCAN;
+ mutex_exit(&hal->topology_tree_mutex);
+ mutex_exit(&hal->br_thread_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_wait_for_events_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_wait_for_cfgrom_callbacks()
+ * Waits for completed config rom reads. Takes each completion off the
+ * completion queue and passes it to the "completion handler" function
+ * that was passed in as an argument. Further processing of the completion
+ * queue depends on the return status of the completion handler. If there
+ * is a bus reset while waiting for completions or if the services layer
+ * signals BR_THR_GO_AWAY, quits waiting for completions and returns
+ * non-zero. Also returns non-zero if completion handler returns
+ * S1394_HCMD_LOCK_FAILED. Returns 0 if config roms for all nodes have
+ * been dealt with.
+ */
+static int
+s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
+ hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd))
+{
+ cmd1394_cmd_t *cmd;
+ s1394_cmd_priv_t *s_priv;
+ int ret, done = 0;
+ hcmd_ret_t cmdret;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_1_DEBUG(s1394_wait_for_cfgrom_callbacks_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, wait_gen, wait_gen);
+
+ ret = DDI_SUCCESS;
+
+ while (!done) {
+ mutex_enter(&hal->br_cmplq_mutex);
+ mutex_enter(&hal->topology_tree_mutex);
+ while (wait_gen == hal->generation_count &&
+ (hal->br_thread_ev_type & BR_THR_GO_AWAY) == 0 &&
+ hal->br_cmplq_head == NULL) {
+ mutex_exit(&hal->topology_tree_mutex);
+ cv_wait(&hal->br_cmplq_cv, &hal->br_cmplq_mutex);
+ mutex_enter(&hal->topology_tree_mutex);
+ }
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ if (wait_gen != hal->generation_count ||
+ (hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
+
+#if !defined(NPROBE) && defined(TNF_DEBUG)
+ int hal_gen = hal->generation_count;
+#endif
+
+ mutex_exit(&hal->topology_tree_mutex);
+ mutex_exit(&hal->br_cmplq_mutex);
+ s1394_flush_cmplq(hal);
+ TNF_PROBE_1_DEBUG(s1394_wait_for_cfgrom_callbacks_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, hal_gen,
+ hal_gen);
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&hal->topology_tree_mutex);
+
+ if ((cmd = hal->br_cmplq_head) != NULL) {
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ hal->br_cmplq_head = s_priv->cmd_priv_next;
+ }
+ if (cmd == hal->br_cmplq_tail)
+ hal->br_cmplq_tail = NULL;
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ if (cmd != NULL) {
+ if (cmd->bus_generation != wait_gen) {
+ TNF_PROBE_3(
+ s1394_wait_for_cfgrom_callbacks,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "command gen != wait_gen",
+ tnf_uint, cmd_gen, cmd->bus_generation,
+ tnf_uint, wait_gen, wait_gen);
+ (void) s1394_free_cmd(hal, &cmd);
+ continue;
+ }
+ cmdret = (*handle_cmd_fn)(hal, cmd);
+ TNF_PROBE_2_DEBUG(s1394_wait_for_cfgrom_callbacks,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_opaque, cmd, cmd, tnf_int, cmdret, cmdret);
+ ASSERT(cmdret != S1394_HCMD_INVALID);
+ if (cmdret == S1394_HCMD_LOCK_FAILED) {
+ /* flush completion queue */
+ ret = DDI_FAILURE;
+ s1394_flush_cmplq(hal);
+ break;
+ } else if (cmdret == S1394_HCMD_NODE_DONE) {
+ if (--hal->cfgroms_being_read == 0) {
+ /* All done */
+ break;
+ }
+ } else {
+ ASSERT(cmdret == S1394_HCMD_NODE_EXPECT_MORE);
+ done = 0;
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_wait_for_cfgrom_callbacks_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (ret);
+}
+
+/*
+ * s1394_flush_cmplq()
+ * Frees all cmds on the completion queue.
+ */
+static void
+s1394_flush_cmplq(s1394_hal_t *hal)
+{
+ s1394_cmd_priv_t *s_priv;
+ cmd1394_cmd_t *cmd, *tcmd;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_flush_cmplq_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ cmd = NULL;
+
+ do {
+ mutex_enter(&hal->br_cmplq_mutex);
+ cmd = hal->br_cmplq_head;
+ hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ while (cmd != NULL) {
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ tcmd = s_priv->cmd_priv_next;
+ TNF_PROBE_2_DEBUG(s1394_flush_cmplq,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_opaque, cmd,
+ cmd, tnf_uint, cmd_gen, cmd->bus_generation);
+ (void) s1394_free_cmd(hal, &cmd);
+ cmd = tcmd;
+ }
+
+ mutex_enter(&hal->br_cmplq_mutex);
+ cmd = hal->br_cmplq_head;
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ } while (cmd != NULL);
+
+ TNF_PROBE_0_DEBUG(s1394_flush_cmplq_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+}
+
+/*
+ * s1394_br_thread_exit()
+ * Flushes the completion queue and calls thread_exit() (which effectively
+ * kills the bus reset thread).
+ */
+static void
+s1394_br_thread_exit(s1394_hal_t *hal)
+{
+ ASSERT(MUTEX_HELD(&hal->br_thread_mutex));
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_0(s1394_br_thread_exit_enter, S1394_TNF_SL_HOTPLUG_STACK, "");
+ s1394_flush_cmplq(hal);
+#ifndef __lock_lint
+ CALLB_CPR_EXIT(&hal->hal_cprinfo);
+#endif
+ hal->br_thread_ev_type &= ~BR_THR_GO_AWAY;
+ thread_exit();
+ /*NOTREACHED*/
+ TNF_PROBE_0(s1394_br_thread_exit_enter, S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_target_bus_reset_notifies()
+ * tells the ndi event framework to invoke any callbacks registered for
+ * "bus reset event".
+ */
+static void
+s1394_target_bus_reset_notifies(s1394_hal_t *hal, t1394_localinfo_t *localinfo)
+{
+ ddi_eventcookie_t cookie;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_2_DEBUG(s1394_target_bus_reset_notifies_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, bus_gen,
+ localinfo->bus_generation, tnf_uint, node_id,
+ localinfo->local_nodeID);
+
+ if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
+ DDI_DEVI_BUS_RESET_EVENT, &cookie, NDI_EVENT_NOPASS) ==
+ NDI_SUCCESS) {
+ (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
+ cookie, localinfo);
+ }
+ TNF_PROBE_0_DEBUG(s1394_target_bus_reset_notifies_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_alloc_cfgrom()
+ * Allocates config rom for the node. Sets CFGROM_NEW_ALLOC bit in the
+ * node cfgrom state. Drops topology_tree_mutex around the calls to
+ * kmem_zalloc(). If re-locking fails, returns DDI_FAILURE, else returns
+ * DDI_SUCCESS.
+ */
+static int
+s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node, s1394_status_t *status)
+{
+ uint32_t *cfgrom;
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_alloc_cfgrom_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ *status = S1394_NOSTATUS;
+
+ /*
+ * if cfgrom is non-NULL, this has to be generation changed
+ * case (where we allocate cfgrom again to reread the cfgrom)
+ */
+ ASSERT(node->cfgrom == NULL || (node->cfgrom != NULL &&
+ CFGROM_GEN_CHANGED(node) == B_TRUE));
+
+ /*
+ * if node matched, either cfgrom has to be NULL or link should be
+ * off in the last matched node or config rom generations changed.
+ */
+ ASSERT(NODE_MATCHED(node) == B_FALSE || (NODE_MATCHED(node) == B_TRUE &&
+ (node->cfgrom == NULL || LINK_ACTIVE(node->old_node) == B_FALSE) ||
+ CFGROM_GEN_CHANGED(node) == B_TRUE));
+
+ s1394_unlock_tree(hal);
+ cfgrom = (uint32_t *)kmem_zalloc(IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
+ *status |= S1394_LOCK_FAILED;
+ TNF_PROBE_1(s1394_alloc_cfgrom, S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg, "cannot relock the tree");
+ TNF_PROBE_0_DEBUG(s1394_alloc_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ node->cfgrom = cfgrom;
+ node->cfgrom_size = IEEE1394_CONFIG_ROM_QUAD_SZ;
+ SET_CFGROM_NEW_ALLOC(node);
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_3(s1394_alloc_cfgrom_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "cfgrom alloc", tnf_uint, hal_gen, hal->generation_count, tnf_uint,
+ node_num, node->node_num, tnf_opaque, cfgrom, cfgrom);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_free_cfgrom()
+ * Marks the config rom invalid and frees up the config based on otpions.
+ */
+void
+s1394_free_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
+ s1394_free_cfgrom_t options)
+{
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ ASSERT(node->cfgrom != NULL);
+
+ TNF_PROBE_0_DEBUG(s1394_free_cfgrom_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ if (options == S1394_FREE_CFGROM_BOTH) {
+ /*
+ * free in both old and new trees; will be called with
+ * new node.
+ */
+ s1394_node_t *onode = node->old_node;
+
+ if (NODE_MATCHED(node) == B_TRUE && onode->cfgrom != NULL)
+ ASSERT(onode->cfgrom == node->cfgrom);
+
+ TNF_PROBE_4(s1394_free_cfgrom_both,
+ S1394_TNF_SL_HOTPLUG_STACK, "cfgrom free", tnf_uint,
+ hal_gen, hal->generation_count, tnf_int, node_num,
+ node->node_num, tnf_opaque, old_cfgrom, onode->cfgrom,
+ tnf_opaque, cfgrom, node->cfgrom);
+
+ if (onode != NULL && onode->cfgrom != NULL && onode->cfgrom !=
+ node->cfgrom)
+ kmem_free(onode->cfgrom, IEEE1394_CONFIG_ROM_SZ);
+
+ kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
+ onode->cfgrom = NULL;
+ node->cfgrom = NULL;
+
+ CLEAR_CFGROM_STATE(onode);
+ CLEAR_CFGROM_STATE(node);
+
+ } else if (options == S1394_FREE_CFGROM_NEW) {
+
+ TNF_PROBE_2(s1394_free_cfgrom_new,
+ S1394_TNF_SL_HOTPLUG_STACK, "cfgrom free",
+ tnf_int, node_num, node->node_num,
+ tnf_opaque, cfgrom, node->cfgrom);
+
+ ASSERT(CFGROM_NEW_ALLOC(node) == B_TRUE);
+ kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
+ CLEAR_CFGROM_NEW_ALLOC(node);
+ node->cfgrom = NULL;
+ CLEAR_CFGROM_STATE(node);
+
+ } else if (options == S1394_FREE_CFGROM_OLD) {
+
+ /* freeing in old tree */
+ TNF_PROBE_2_DEBUG(s1394_free_cfgrom_old,
+ S1394_TNF_SL_HOTPLUG_STACK, "cfgrom free",
+ tnf_int, node_num, node->node_num,
+ tnf_opaque, cfgrom, node->cfgrom);
+ kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
+ node->cfgrom = NULL;
+ CLEAR_CFGROM_STATE(node);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_free_cfgrom_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+}
+
+/*
+ * s1394_copy_cfgrom()
+ * Copies config rom info from "from" node to "to" node. Clears
+ * CFGROM_NEW_ALLOC bit in cfgrom state in bothe nodes. (CFGROM_NEW_ALLOC
+ * acts as a reference count. If set, only the node in the current tree
+ * has a pointer to it; if clear, both the node in the current tree as
+ * well as the corresponding node in the old tree point to the same memory).
+ */
+void
+s1394_copy_cfgrom(s1394_node_t *to, s1394_node_t *from)
+{
+ TNF_PROBE_3_DEBUG(s1394_copy_cfgrom_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_int, to_node, to->node_num, tnf_int,
+ from_node, from->node_num, tnf_opaque, from_cfgrom, from->cfgrom);
+
+ ASSERT(to->cfgrom == NULL);
+
+ to->cfgrom = from->cfgrom;
+ to->cfgrom_state = from->cfgrom_state;
+ to->cfgrom_valid_size = from->cfgrom_valid_size;
+ to->cfgrom_size = from->cfgrom_size;
+ to->node_state = from->node_state;
+
+ bcopy(from->dir_stack, to->dir_stack,
+ offsetof(s1394_node_t, cfgrom_quad_to_read) -
+ offsetof(s1394_node_t, dir_stack));
+
+ to->cfgrom_quad_to_read = from->cfgrom_quad_to_read;
+
+ CLEAR_CFGROM_NEW_ALLOC(to);
+ CLEAR_CFGROM_NEW_ALLOC(from);
+
+ /*
+ * old link off, new link on => handled in s1394_cfgrom_scan_phase1
+ * old link on, new link off => handled in s1394_process_old_tree
+ */
+ if (LINK_ACTIVE(from) == B_FALSE) {
+ /*
+ * if last time around, link was off, there wouldn't
+ * have been config rom allocated.
+ */
+ ASSERT(from->cfgrom == NULL);
+ TNF_PROBE_0_DEBUG(s1394_copy_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return;
+ } else {
+ s1394_selfid_pkt_t *selfid_pkt = to->selfid_packet;
+
+ if (IEEE1394_SELFID_ISLINKON(selfid_pkt))
+ SET_LINK_ACTIVE(to);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_copy_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_read_bus_info_blk()
+ * Attempts to kick off reading IEEE1212_NODE_CAP_QUAD quad or quad 0.
+ * Increments cfgroms_being_read by 1. Returns DDI_SUCCESS command was
+ * issued, else sets status to the failure reason and returns DDI_FAILURE.
+ */
+static int
+s1394_read_bus_info_blk(s1394_hal_t *hal, s1394_node_t *node,
+ s1394_status_t *status)
+{
+ uint32_t quadlet;
+ cmd1394_cmd_t *cmd;
+ uchar_t node_num;
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ ASSERT(LINK_ACTIVE(node) == B_TRUE);
+
+ node_num = node->node_num;
+
+ TNF_PROBE_2_DEBUG(s1394_read_bus_info_blk_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, hal_gen,
+ hal->generation_count, tnf_int, node_num, node_num);
+
+ /*
+ * drop the topology lock around command allocation. Return failure
+ * if either command allocation fails or cannot reacquire the lock
+ */
+ s1394_unlock_tree(hal);
+ *status = S1394_NOSTATUS;
+
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_read_bus_info_blk, S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg, "command allocation failed");
+ *status |= S1394_CMD_ALLOC_FAILED;
+ }
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ *status |= S1394_LOCK_FAILED;
+ TNF_PROBE_1(s1394_read_bus_info_blk, S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg, "unable to relock the tree");
+ /* free the cmd allocated above */
+ if (((*status) & S1394_CMD_ALLOC_FAILED) != 0)
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ }
+ if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
+ TNF_PROBE_0_DEBUG(s1394_read_bus_info_blk_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* allocate cfgrom if needed */
+ if (node->cfgrom == NULL && s1394_alloc_cfgrom(hal, node, status) !=
+ DDI_SUCCESS) {
+ ASSERT(((*status) & S1394_LOCK_FAILED) != 0);
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_1(s1394_read_bus_info_blk, S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg, "config rom allocation failed");
+ TNF_PROBE_0_DEBUG(s1394_read_bus_info_blk_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * if this is a matched node, read quad 2 (node capabilities) to
+ * see if the generation count changed.
+ */
+ quadlet = CFGROM_BIB_READ(node) ? IEEE1212_NODE_CAP_QUAD : 0;
+
+ /*
+ * read bus info block at 100Mbit. This will help us with the cases
+ * where LINK is slower than PHY; s1394 uses PHY speed till speed map
+ * is updated.
+ */
+ cmd->completion_callback = s1394_cfgrom_read_callback;
+ cmd->bus_generation = hal->generation_count;
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR | CMD1394_OVERRIDE_SPEED);
+ cmd->cmd_speed = IEEE1394_S100;
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+
+ QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
+ quadlet, cmd->cmd_addr);
+
+ TNF_PROBE_3_DEBUG(s1394_read_bus_info_blk,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, hal_gen,
+ hal->generation_count, tnf_int, node_num, node_num, tnf_uint,
+ quadlet, quadlet);
+
+ TNF_PROBE_5_DEBUG(s1394_read_bus_info_blk,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ node_num, node_num, tnf_int, parsed, CFGROM_PARSED(node), tnf_int,
+ matched, NODE_MATCHED(node), tnf_int, visited,
+ NODE_VISITED(node), tnf_int, generation_changed,
+ CFGROM_GEN_CHANGED(node));
+
+ SETUP_QUAD_READ(node, 1, quadlet, 1);
+ if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_read_bus_info_blk, S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg, "Unable to start read");
+ /* free the command if it wasn't handed over to the HAL */
+ if (((*status) & S1394_CMD_INFLIGHT) == 0) {
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ }
+ if (((*status) & S1394_LOCK_FAILED) != 0) {
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ }
+ TNF_PROBE_0_DEBUG(s1394_read_bus_info_blk_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal->cfgroms_being_read++;
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_1_DEBUG(s1394_read_bus_info_blk_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, cfgrom_read_cnt,
+ hal->cfgroms_being_read);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_read_rest_of_cfgrom()
+ * Attempts to start reading node->cfgrom_quad_to_read quadlet. Increments
+ * cfgroms_being_read by 1 and returns DDI_SUCCESS if command was issued,
+ * else sets status to the failure reason and returns DDI_FAILURE.
+ */
+int
+s1394_read_rest_of_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
+ s1394_status_t *status)
+{
+ cmd1394_cmd_t *cmd;
+ uchar_t node_num = node->node_num;
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ ASSERT(LINK_ACTIVE(node) == B_TRUE);
+
+ TNF_PROBE_2_DEBUG(s1394_read_rest_of_cfgrom_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, hal_gen,
+ hal->generation_count, tnf_int, node_num, node_num);
+
+ /*
+ * drop the topology lock around command allocation. Return failure
+ * if either command allocation fails or cannot reacquire the lock
+ */
+ s1394_unlock_tree(hal);
+ *status = S1394_NOSTATUS;
+
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ *status |= S1394_CMD_ALLOC_FAILED;
+ TNF_PROBE_1(s1394_read_rest_of_cfgrom,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "command allocation failed");
+ }
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ *status |= S1394_LOCK_FAILED;
+ /* free if we allocated a cmd above */
+ if (((*status) & S1394_CMD_ALLOC_FAILED) == 0)
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ TNF_PROBE_1(s1394_read_rest_of_cfgrom,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock the tree");
+ }
+ if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
+ TNF_PROBE_0_DEBUG(s1394_read_rest_of_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cmd->completion_callback = s1394_cfgrom_read_callback;
+ cmd->bus_generation = hal->generation_count;
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR);
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+
+ TNF_PROBE_2_DEBUG(s1394_read_rest_of_cfgrom, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_uint, hal_gen, hal->generation_count, tnf_int, node_num,
+ node->node_num);
+
+ QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
+ node->cfgrom_quad_to_read, cmd->cmd_addr);
+ SETUP_QUAD_READ(node, 1, node->cfgrom_quad_to_read, 1);
+ if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_read_rest_of_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to start read");
+ /* free the command if it wasn't handed over to the HAL */
+ if (((*status) & S1394_CMD_INFLIGHT) == 0) {
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ }
+ if (((*status) & S1394_LOCK_FAILED) != 0) {
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ }
+ TNF_PROBE_0_DEBUG(s1394_read_rest_of_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal->cfgroms_being_read++;
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_1_DEBUG(s1394_read_rest_of_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, cfgrom_read_cnt,
+ hal->cfgroms_being_read);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_cfgrom_scan_phase1()
+ * Attempts to read bus info blocks for nodes as needed. Returns DDI_FAILURE
+ * if bus reset generations changed (as indicated by s1394_lock_tree()
+ * return status) or if any of the callees return failure, else returns
+ * DDI_SUCCESS.
+ */
+static int
+s1394_cfgrom_scan_phase1(s1394_hal_t *hal)
+{
+ uint32_t number_of_nodes;
+ int ret;
+ int node;
+ int wait_in_gen;
+ int wait_for_cbs;
+ uint_t hal_node_num;
+ uint_t hal_node_num_old;
+ s1394_node_t *nnode, *onode;
+ s1394_selfid_pkt_t *selfid_pkt;
+ s1394_status_t status;
+
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase1_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase1_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ wait_for_cbs = 0;
+ number_of_nodes = hal->number_of_nodes;
+ hal->cfgroms_being_read = 0;
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
+ s1394_unlock_tree(hal);
+
+ ret = DDI_SUCCESS;
+
+ /* Send requests for all new node config ROM 0 */
+ for (node = 0; node < number_of_nodes; node++) {
+
+ status = S1394_UNKNOWN;
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ status = S1394_LOCK_FAILED;
+ break;
+ }
+
+ nnode = &hal->topology_tree[node];
+ onode = nnode->old_node;
+ /* if node matched, onode should be non NULL */
+ ASSERT(NODE_MATCHED(nnode) == B_FALSE || (NODE_MATCHED(nnode) ==
+ B_TRUE && onode != NULL));
+
+ /*
+ * Read bus info block if it is a brand new node (MATCHED is 0)
+ * or if matched but link was off in previous generations or
+ * or if matched but had invalid cfgrom in last generation
+ * or if matched but config rom generation > 1 (this is to
+ * check if config rom generation changed between bus resets).
+ */
+ if ((node != hal_node_num) &&
+ ((NODE_MATCHED(nnode) == B_FALSE) ||
+ (NODE_MATCHED(nnode) == B_TRUE && LINK_ACTIVE(onode) ==
+ B_FALSE) || (NODE_MATCHED(nnode) == B_TRUE &&
+ (onode->cfgrom == NULL || CFGROM_VALID(onode) ==
+ B_FALSE)) || (NODE_MATCHED(nnode) == B_TRUE &&
+ nnode->cfgrom != NULL && CONFIG_ROM_GEN(nnode->cfgrom) >
+ 1))) {
+
+ SET_NODE_VISITED(nnode);
+ selfid_pkt = nnode->selfid_packet;
+ if (IEEE1394_SELFID_ISLINKON(selfid_pkt)) {
+
+ SET_LINK_ACTIVE(nnode);
+
+ status = S1394_UNKNOWN;
+
+ if (s1394_read_bus_info_blk(hal, nnode,
+ &status) != DDI_SUCCESS) {
+ if ((status & S1394_LOCK_FAILED) != 0)
+ break;
+ } else {
+ wait_for_cbs++;
+ wait_in_gen = hal->br_cfgrom_read_gen;
+ }
+ } else {
+ /*
+ * Special case: if link was active last
+ * time around, this should be treated as
+ * node going away.
+ */
+ CLEAR_LINK_ACTIVE(nnode);
+ if (NODE_MATCHED(nnode) == B_TRUE &&
+ LINK_ACTIVE(onode) == B_TRUE) {
+ CLEAR_CFGROM_STATE(nnode);
+ TNF_PROBE_3(s1394_cfgrom_scan_phase1,
+ S1394_TNF_SL_HOTPLUG_ERROR,
+ "", tnf_string, msg,
+ "link lost power", tnf_int, node,
+ node, tnf_int, onode,
+ onode->node_num);
+ }
+ }
+ } else {
+ if (node == hal_node_num) {
+ onode = &hal->old_tree[hal_node_num_old];
+ /* Set up the local matched nodes */
+ if (onode) {
+ nnode->old_node = onode;
+ SET_NODE_MATCHED(nnode);
+ SET_NODE_MATCHED(onode);
+ s1394_copy_cfgrom(nnode, onode);
+ }
+ }
+ }
+ s1394_unlock_tree(hal);
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if ((status & S1394_LOCK_FAILED) != 0) {
+ TNF_PROBE_1(s1394_cfrom_scan_phase1_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "Generations changed");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If we started any reads, wait for completion callbacks
+ */
+ if (wait_for_cbs != 0) {
+ ret = s1394_wait_for_cfgrom_callbacks(hal, wait_in_gen,
+ s1394_br_thread_handle_cmd_phase1);
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_cfrom_scan_phase1_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (ret);
+}
+
+/*
+ * s1394_br_thread_handle_cmd_phase1()
+ * Process the cmd completion for phase 1 config rom reads. If we
+ * successfully read IEEE1212_NODE_CAP_QUAD quadlet and config rom gen
+ * did not change, move targets hanging off the old node to the current
+ * node. If config rom generations change, alloc new config rom and start
+ * re-reading the new config rom. If all of bus info block is read (as
+ * required), mark the node as CFGROM_BIB_READ. If config rom read fails
+ * retry if not too many failures. Topology tree mutex is dropped and
+ * reacquired in this routine. If reacquiring fails, returns
+ * S1394_HCMD_LOCK_FAILED. If the entire bus info block is read, returns
+ * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
+ * indicate not done with the node yet).
+ *
+ * If we cannot read any of the quadlets in the bus info block, cfgrom
+ * is marked invalid in this generation (a side effect of calling
+ * s1394_free_cfgrom()). We free cfgrom in this routine only if the failure
+ * is not due to bus generations changing.
+ */
+static hcmd_ret_t
+s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_target_t *t;
+ s1394_node_t *node, *onode;
+ uint32_t node_num, quadlet, data;
+ int freecmd, done, locked;
+ hcmd_ret_t cmdret;
+ uchar_t readdelay;
+ s1394_status_t status;
+
+ s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
+ ASSERT(quadlet == 0 || quadlet < IEEE1394_BIB_QUAD_SZ);
+
+ TNF_PROBE_0_DEBUG(s1394_br_thread_handle_cmd_phase1_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ cmdret = S1394_HCMD_NODE_EXPECT_MORE;
+
+ locked = 1;
+ freecmd = 1;
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "unable to lock tree");
+ locked = 0;
+ goto bail;
+ }
+
+ node = &hal->topology_tree[node_num];
+
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+
+ int reread = 0;
+
+ done = 0;
+
+ if (quadlet == IEEE1212_NODE_CAP_QUAD &&
+ CFGROM_BIB_READ(node)) {
+
+ int cur_gen = ((data & IEEE1394_BIB_GEN_MASK) >>
+ IEEE1394_BIB_GEN_SHIFT);
+
+ /*
+ * node->old_node can be NULL if this is a new node &
+ * we are doing a rescan
+ */
+ onode = node->old_node;
+ if (CONFIG_ROM_GEN(node->cfgrom) == cur_gen) {
+
+ if (CFGROM_PARSED(node) == B_TRUE) {
+ rw_enter(&hal->target_list_rwlock,
+ RW_WRITER);
+ /* Update the target list, if any */
+ if (onode != NULL &&
+ (t = onode->target_list) != NULL) {
+ node->target_list = t;
+ while (t != NULL) {
+ t->on_node = node;
+ t = t->target_sibling;
+ }
+ }
+ rw_exit(&hal->target_list_rwlock);
+ }
+ SET_NODE_MATCHED(node);
+ if (onode)
+ SET_NODE_MATCHED(onode);
+ node->cfgrom_quad_to_read =
+ IEEE1394_BIB_QUAD_SZ;
+ done++;
+ } else {
+
+ TNF_PROBE_4(s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string,
+ msg, "config rom generation changed",
+ tnf_int, node_num, node_num,
+ tnf_int, cur_gen, cur_gen, tnf_int, old_gen,
+ CONFIG_ROM_GEN(node->cfgrom));
+
+ SET_CFGROM_GEN_CHANGED(node);
+ if (onode != NULL)
+ SET_CFGROM_GEN_CHANGED(onode);
+ /*
+ * Reset BIB_READ flag and start reading entire
+ * config rom.
+ */
+ CLEAR_CFGROM_BIB_READ(node);
+ reread = 1;
+
+ /*
+ * if generations changed, allocate cfgrom for
+ * the new generation. s1394_match_GUID() will
+ * free up the cfgrom from the old generation.
+ */
+ if (s1394_alloc_cfgrom(hal, node, &status) !=
+ DDI_SUCCESS) {
+ ASSERT((status & S1394_LOCK_FAILED) !=
+ 0);
+ ASSERT(MUTEX_NOT_HELD(&hal->
+ topology_tree_mutex));
+ locked = 0;
+ /* we failed to relock the tree */
+ goto bail;
+ }
+ }
+ }
+
+ /*
+ * we end up here if we don't have bus_info_blk for this
+ * node or if config rom generation changed.
+ */
+
+ /*
+ * Pass1 Rio bug workaround. Due to this bug, if we read
+ * past quadlet 5 of the config rom, the PCI bus gets wedged.
+ * Avoid the hang by not reading past quadlet 5.
+ * We identify a remote Rio by the node vendor id part of
+ * quad 3 (which is == SUNW == S1394_SUNW_OUI (0x80020)).
+ */
+ if (s1394_enable_rio_pass1_workarounds != 0) {
+ if ((quadlet == 3) && ((data >> 8) == S1394_SUNW_OUI)) {
+ node->cfgrom_size = IEEE1394_BIB_QUAD_SZ;
+ node->cfgrom_valid_size = IEEE1394_BIB_QUAD_SZ;
+ }
+ }
+
+ if (!done) {
+
+ if (reread)
+ quadlet = 0;
+ else
+ node->cfgrom[quadlet++] = data;
+
+ /* if we don't have the entire bus_info_blk... */
+ if (quadlet < IEEE1394_BIB_QUAD_SZ) {
+
+ CFGROM_GET_READ_DELAY(node, readdelay);
+ SETUP_QUAD_READ(node, 1, quadlet, 1);
+ s1394_unlock_tree(hal);
+ CFGROM_READ_PAUSE(readdelay);
+ /* get next quadlet */
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_3(
+ s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg,
+ "unable to relock tree", tnf_uint,
+ node_num, node_num, tnf_int,
+ quad_to_read, quadlet);
+ locked = 0;
+ } else if (s1394_read_config_quadlet(hal, cmd,
+ &status) != DDI_SUCCESS) {
+ /*
+ * Failed to get going. If command was
+ * successfully handed over to the HAL,
+ * don't free it (it will get freed
+ * later in the callback).
+ */
+ TNF_PROBE_3(
+ s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg,
+ "unable to read", tnf_uint,
+ node_num, node_num, tnf_int,
+ quad_to_read, quadlet);
+ if ((status & S1394_CMD_INFLIGHT) !=
+ 0) {
+ freecmd = 0;
+ }
+ if ((status & S1394_LOCK_FAILED) != 0) {
+ locked = 0;
+ } else {
+ if (CFGROM_NEW_ALLOC(node) ==
+ B_TRUE) {
+ s1394_free_cfgrom(hal,
+ node,
+ S1394_FREE_CFGROM_NEW);
+ } else {
+ CLEAR_CFGROM_STATE(
+ node);
+ }
+ }
+ done++;
+ } else {
+ freecmd = 0;
+ }
+ } else {
+ /* got all of bus_info_blk */
+ SET_CFGROM_BIB_READ(node);
+ if (node->cfgrom_size == IEEE1394_BIB_QUAD_SZ)
+ SET_CFGROM_ALL_READ(node);
+ node->cfgrom_quad_to_read = quadlet;
+ done++;
+ TNF_PROBE_3_DEBUG(
+ s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_string, msg, "read bus info blk",
+ tnf_int, node_num, node->node_num,
+ tnf_opaque, cfgrom, node->cfgrom);
+ }
+ }
+ } else {
+ done = 1;
+ node->cfgrom_read_fails++;
+ BUMP_CFGROM_READ_DELAY(node);
+
+ /* retry if not too many failures */
+ if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
+ CFGROM_GET_READ_DELAY(node, readdelay);
+ SETUP_QUAD_READ(node, 0, quadlet, 1);
+ s1394_unlock_tree(hal);
+ CFGROM_READ_PAUSE(readdelay);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_3(
+ s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg,
+ "unable to relock tree", tnf_uint,
+ node_num, node_num, tnf_int,
+ quad_to_read, quadlet);
+ locked = 0;
+ } else if (s1394_read_config_quadlet(hal, cmd,
+ &status) != DDI_SUCCESS) {
+ /*
+ * Failed to get going. If command was
+ * successfully handed over to the HAL,
+ * don't free it (it will get freed
+ * later in the callback).
+ */
+ TNF_PROBE_3(
+ s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "unable to re-read", tnf_uint,
+ node_num, node_num, tnf_int, quad_to_read,
+ quadlet);
+ if ((status & S1394_CMD_INFLIGHT) != 0) {
+ freecmd = 0;
+ }
+ if ((status & S1394_LOCK_FAILED) != 0) {
+ locked = 0;
+ } else {
+ if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_NEW);
+ } else {
+ CLEAR_CFGROM_STATE(node);
+ }
+ }
+ } else {
+ done = 0;
+ freecmd = 0;
+ }
+ } else {
+ TNF_PROBE_4(s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "retries exceeded", tnf_int, node_num, node_num,
+ tnf_int, quadlet, quadlet, tnf_opaque, cfgrom,
+ node->cfgrom);
+ if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_NEW);
+ } else {
+ CLEAR_CFGROM_STATE(node);
+ }
+ }
+ }
+bail:
+ if (freecmd) {
+ (void) s1394_free_cmd(hal, &cmd);
+ }
+
+ if (done) {
+ cmdret = S1394_HCMD_NODE_DONE;
+ TNF_PROBE_2_DEBUG(s1394_br_thread_handle_cmd_phase1,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "done with node", tnf_int, node_num, node_num);
+ }
+
+ /* if we are bailing out because locking failed, locked == 0 */
+ if (locked == 0)
+ cmdret = S1394_HCMD_LOCK_FAILED;
+ else
+ s1394_unlock_tree(hal);
+
+ TNF_PROBE_0_DEBUG(s1394_br_thread_handle_cmd_phase1_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (cmdret);
+}
+
+/*
+ * s1394_cfgrom_scan_phase2()
+ * Handles phase 2 of bus reset processing. Matches GUIDs between old
+ * and new topology trees to identify which node moved where. Processes
+ * the old topology tree (involves offlining any nodes that got unplugged
+ * between the last generation and the current generation). Updates speed
+ * map, sets up physical AR request filer and does isoch resource
+ * realloc failure notification and bus reset notifications. Then resends
+ * any commands that were issued by targets while the reset was being
+ * processed. Finally, the current topology tree is processed. This involves
+ * reading config rom past the bus info block for new nodes and parsing
+ * the config rom, creating a devinfo for each unit directory found in the
+ * config rom.
+ * Returns DDI_FAILURE if there was bus reset during any of the function
+ * calls (as indicated by lock failures) or if any of the routines callees
+ * return failure, else returns DDI_SUCCESS.
+ */
+static int
+s1394_cfgrom_scan_phase2(s1394_hal_t *hal)
+{
+ int ret;
+ uint_t wait_gen;
+ int wait_for_cbs = 0;
+ t1394_localinfo_t localinfo;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (s1394_match_all_GUIDs(hal) == DDI_SUCCESS) {
+ s1394_unlock_tree(hal);
+ }
+
+ if (s1394_process_old_tree(hal) != DDI_SUCCESS) {
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "non-success return from process_old_tree");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock the tree");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ s1394_update_speed_map_link_speeds(hal);
+ s1394_unlock_tree(hal);
+
+ /* Setup physical AR request filters */
+ s1394_physical_arreq_setup_all(hal);
+
+ /* Notify targets of isoch resource realloc failures */
+ s1394_isoch_rsrc_realloc_notify(hal);
+
+ /* Notify targets of the end of bus reset processing */
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock the tree after isoch notify");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ localinfo.bus_generation = hal->generation_count;
+ localinfo.local_nodeID = hal->node_id;
+
+ s1394_unlock_tree(hal);
+ s1394_target_bus_reset_notifies(hal, &localinfo);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock the tree after reset notify");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Set HAL state to normal */
+ if (hal->disable_requests_bit == 0)
+ hal->hal_state = S1394_HAL_NORMAL;
+ else
+ hal->hal_state = S1394_HAL_DREQ;
+
+ s1394_unlock_tree(hal);
+
+ /* Flush the pending Q */
+ s1394_resend_pending_cmds(hal);
+
+ if (s1394_process_topology_tree(hal, &wait_for_cbs, &wait_gen)) {
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "non-success return from process_topology_tree");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ return (DDI_FAILURE);
+ }
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock after processing topology tree");
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ s1394_print_node_info(hal);
+
+ s1394_unlock_tree(hal);
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ ret = DDI_SUCCESS;
+
+ /*
+ * If we started any reads, wait for completion callbacks
+ */
+ if (wait_for_cbs != 0) {
+ ret = s1394_wait_for_cfgrom_callbacks(hal, wait_gen,
+ s1394_br_thread_handle_cmd_phase2);
+
+ TNF_PROBE_2_DEBUG(s1394_cfgrom_scan_phase2,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "returned from waiting for cfgrom callbacks", tnf_int, ret,
+ ret);
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_scan_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (ret);
+}
+
+/*
+ * s1394_br_thread_handle_cmd_phase2()
+ * Process the cmd completion for phase 2 config rom reads. If all the
+ * needed quads are read, validates the config rom; if config rom is
+ * invalid (crc failures), frees the config rom, else marks the config rom
+ * valid and calls s1394_update_devinfo_tree() to parse the config rom.
+ * If need to get more quadlets, attempts to kick off the read and returns
+ * S1394_HCMD_NODE_EXPECT_MORE if successfully started the read. If a bus
+ * reset is seen while in this routine, returns S1394_HCMD_LOCK_FAILED. If
+ * done with the node (with or withoug crc errors), returns
+ * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
+ * indicate not done with the node yet).
+ */
+static hcmd_ret_t
+s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_node_t *node;
+ uint32_t node_num, quadlet, data;
+ int update_devinfo, locked, freecmd, done;
+ hcmd_ret_t cmdret;
+ uchar_t readdelay;
+ s1394_status_t status;
+
+ TNF_PROBE_0_DEBUG(s1394_br_thread_handle_cmd_phase2_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /*
+ * we end up here if this is a brand new node or if it is a known node
+ * but the config ROM changed (and triggered a re-read).
+ */
+ s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
+ ASSERT(quadlet == IEEE1394_BIB_QUAD_SZ || quadlet <
+ IEEE1394_CONFIG_ROM_QUAD_SZ);
+
+ locked = freecmd = done = 1;
+ cmdret = S1394_HCMD_NODE_EXPECT_MORE;
+
+ update_devinfo = 0;
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_3(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to lock tree", tnf_int, node_num, node_num,
+ tnf_int, quadlet, quadlet);
+ locked = 0;
+ goto bail;
+ }
+
+ node = &hal->topology_tree[node_num];
+
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+
+ ASSERT(CFGROM_BIB_READ(node) == B_TRUE);
+
+ node->cfgrom[quadlet] = data;
+
+ if (s1394_calc_next_quad(hal, node, quadlet, &quadlet) != 0) {
+ /*
+ * Done with this node. Mark config rom valid and
+ * update the devinfo tree for this node.
+ */
+ TNF_PROBE_4_DEBUG(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "all read", tnf_int, node_num, node->node_num,
+ tnf_opaque, cfgrom, node->cfgrom, tnf_int, quadlet,
+ quadlet);
+
+ node->cfgrom_valid_size = quadlet + 1;
+ if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
+ SET_CFGROM_ALL_READ(node);
+ update_devinfo++;
+ } else {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_BOTH);
+ }
+ } else {
+ CFGROM_GET_READ_DELAY(node, readdelay);
+ SETUP_QUAD_READ(node, 1, quadlet, 1);
+ s1394_unlock_tree(hal);
+ CFGROM_READ_PAUSE(readdelay);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ locked = 0;
+ TNF_PROBE_3(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "unable to relock the tree",
+ tnf_int, node_num, node->node_num,
+ tnf_int, quadlet, quadlet);
+ } else if (s1394_read_config_quadlet(hal, cmd,
+ &status) != DDI_SUCCESS) {
+ /* give up on this guy */
+ TNF_PROBE_3(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "cannot start quadlet read", tnf_int,
+ node_num, node_num, tnf_int, quadlet,
+ quadlet);
+
+ if ((status & S1394_CMD_INFLIGHT) != 0) {
+ freecmd = 0;
+ }
+ if ((status & S1394_LOCK_FAILED) != 0) {
+ locked = 0;
+ } else {
+ node->cfgrom_valid_size = quadlet;
+ if (s1394_valid_cfgrom(hal, node) ==
+ B_TRUE) {
+ SET_CFGROM_ALL_READ(node);
+ update_devinfo++;
+ } else {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_BOTH);
+ }
+ }
+ } else {
+ /* successfully started next read */
+ done = 0;
+ freecmd = 0;
+ }
+ }
+ } else {
+ node->cfgrom_read_fails++;
+ BUMP_CFGROM_READ_DELAY(node);
+
+ /* retry if not too many failures */
+ if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
+ CFGROM_GET_READ_DELAY(node, readdelay);
+ s1394_unlock_tree(hal);
+ SETUP_QUAD_READ(node, 0, quadlet, 1);
+ CFGROM_READ_PAUSE(readdelay);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ locked = 0;
+ TNF_PROBE_3(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "unable to relock for reread",
+ tnf_int, node_num, node->node_num,
+ tnf_int, quadlet, quadlet);
+ } else if (s1394_read_config_quadlet(hal, cmd,
+ &status) != DDI_SUCCESS) {
+ if ((status & S1394_CMD_INFLIGHT) != 0) {
+ freecmd = 0;
+ }
+ if ((status & S1394_LOCK_FAILED) != 0) {
+ locked = 0;
+ } else {
+ /* stop further reads */
+ TNF_PROBE_4(
+ s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg, "unable to retry",
+ tnf_int, node_num, node->node_num,
+ tnf_int, quadlet, quadlet,
+ tnf_opaque, cfgrom, node->cfgrom);
+ node->cfgrom_valid_size = quadlet + 1;
+ if (s1394_valid_cfgrom(hal, node) ==
+ B_TRUE) {
+ SET_CFGROM_ALL_READ(node);
+ update_devinfo++;
+ } else {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_BOTH);
+ }
+ }
+ } else {
+ /* successfully started next read */
+ done = 0;
+ freecmd = 0;
+ }
+ } else {
+
+ TNF_PROBE_4(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "retries exceeded", tnf_int, node_num, node_num,
+ tnf_int, quadlet, quadlet, tnf_opaque, cfgrom,
+ node->cfgrom);
+
+ node->cfgrom_valid_size = quadlet + 1;
+ if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
+ SET_CFGROM_ALL_READ(node);
+ update_devinfo++;
+ } else {
+ s1394_free_cfgrom(hal, node,
+ S1394_FREE_CFGROM_BOTH);
+ }
+ }
+ }
+bail:
+ if (freecmd) {
+ (void) s1394_free_cmd(hal, &cmd);
+ }
+
+ if (done) {
+ cmdret = S1394_HCMD_NODE_DONE;
+ TNF_PROBE_2_DEBUG(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "done with a node", tnf_int, node_num, node_num);
+ }
+
+ if (update_devinfo) {
+ ASSERT(locked);
+ /*
+ * s1394_update_devinfo_tree() drops and reacquires the
+ * topology_tree_mutex. If tree lock fails, it returns
+ * a DDI_FAILURE. Set locked to 0 so in this case so that
+ * we will return S1394_HCMD_LOCK_FAILED below
+ */
+ if (s1394_update_devinfo_tree(hal, node) != DDI_SUCCESS) {
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+ locked = 0;
+ TNF_PROBE_2(s1394_br_thread_handle_cmd_phase2,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "update devinfo returned failure", tnf_int,
+ node_num, node_num);
+ }
+ }
+
+ /* if we are bailing out because locking failed, locked == 0 */
+ if (locked == 0)
+ cmdret = S1394_HCMD_LOCK_FAILED;
+ else
+ s1394_unlock_tree(hal);
+
+ TNF_PROBE_1_DEBUG(s1394_br_thread_handle_cmd_phase2_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, cmdret, (int)cmdret);
+
+ return (cmdret);
+}
+
+/*
+ * s1394_read_config_quadlet()
+ * Starts the reads of a config quadlet (deduced cmd_addr). Returns
+ * DDI_SUCCESS if the read was started with no errors, else DDI_FAILURE
+ * is returned, with status indicating the reason for the failure(s).
+ */
+static int
+s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
+ s1394_status_t *status)
+{
+ s1394_node_t *node;
+ int ret, err, node_num, quadlet;
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ node_num = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
+ node = &hal->topology_tree[node_num];
+ quadlet = node->cfgrom_quad_to_read;
+
+ TNF_PROBE_2_DEBUG(s1394_read_config_quadlet_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, node_num, node_num,
+ tnf_uint, quadlet, quadlet);
+
+ /* Calculate the 64-bit address */
+ QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num, quadlet,
+ cmd->cmd_addr);
+
+ *status = S1394_NOSTATUS;
+
+ ret = s1394_setup_asynch_command(hal, NULL, cmd, S1394_CMD_READ, &err);
+
+ if (ret != DDI_SUCCESS) {
+ *status |= S1394_UNKNOWN;
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_3(s1394_read_config_quadlet,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "failure from setup asynch command", tnf_uint, node_num,
+ node_num, tnf_uint, quadlet, quadlet);
+ TNF_PROBE_0_DEBUG(s1394_read_config_quadlet_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ s1394_unlock_tree(hal);
+ ret = DDI_SUCCESS;
+ /* Send the command out */
+ if (s1394_xfer_asynch_command(hal, cmd, &err) == DDI_SUCCESS) {
+ /* Callers can expect a callback now */
+ *status |= S1394_CMD_INFLIGHT;
+ } else {
+
+ s1394_cmd_priv_t *s_priv;
+
+ TNF_PROBE_3(s1394_read_config_quadlet,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "failure from xfer asynch command",
+ tnf_int, quadlet, quadlet, tnf_int, node_num, node_num);
+
+ /* Remove from queue */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ *status |= S1394_XFER_FAILED;
+ ret = DDI_FAILURE;
+ }
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ *status |= S1394_LOCK_FAILED;
+ ret = DDI_FAILURE;
+ TNF_PROBE_1(s1394_read_config_quadlet,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unable to relock the tree");
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_read_config_quadlet_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (ret);
+}
+
+/*
+ * s1394_cfgrom_read_callback()
+ * callback routine for config rom reads. Frees the command if it failed
+ * due to bus reset else appends the command to the completion queue
+ * and signals the completion queue cv.
+ */
+static void
+s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd)
+{
+ cmd1394_cmd_t *tcmd;
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_t *hal;
+
+#if defined(DEBUG)
+ uint32_t node_num, quadlet, data;
+#endif
+
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_read_callback_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ hal = (s1394_hal_t *)s_priv->sent_on_hal;
+
+#if defined(DEBUG)
+
+ s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
+
+ TNF_PROBE_5_DEBUG(s1394_cfgrom_read_callback,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, gen, cmd->bus_generation,
+ tnf_int, quadlet, quadlet,
+ tnf_int, node_num, node_num,
+ tnf_int, data, data, tnf_int, result, cmd->cmd_result);
+#endif
+
+ if (cmd->cmd_result == CMD1394_EBUSRESET) {
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ } else {
+ mutex_enter(&hal->br_cmplq_mutex);
+
+ /* Put the command on completion queue */
+ s_priv->cmd_priv_next = NULL;
+ if ((tcmd = hal->br_cmplq_tail) != NULL) {
+ s_priv = S1394_GET_CMD_PRIV(tcmd);
+
+ s_priv->cmd_priv_next = cmd;
+ }
+
+ hal->br_cmplq_tail = cmd;
+
+ if (hal->br_cmplq_head == NULL)
+ hal->br_cmplq_head = cmd;
+
+ cv_signal(&hal->br_cmplq_cv);
+ mutex_exit(&hal->br_cmplq_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_read_callback_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_cfgrom_parse_unit_dir()
+ * Parses the unit directory passed in and returns reg[2...5] of reg
+ * property (see 1275 binding for reg property defintion). Currently,
+ * returns 0 for all the values since none of the existing devices implement
+ * this and future devices, per P1212r, need a binding change.
+ */
+/* ARGSUSED */
+void
+s1394_cfgrom_parse_unit_dir(uint32_t *unit_dir, uint32_t *addr_hi,
+ uint32_t *addr_lo, uint32_t *size_hi, uint32_t *size_lo)
+{
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_parse_unit_dir_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ *addr_hi = *addr_lo = *size_hi = *size_lo = 0;
+ TNF_PROBE_0_DEBUG(s1394_cfgrom_parse_unit_dir_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_get_quad_info()
+ * Helper routine that picks apart the various fields of a 1394 address
+ */
+static void
+s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num, uint32_t *quadlet,
+ uint32_t *data)
+{
+ uint64_t addr;
+
+ TNF_PROBE_0_DEBUG(s1394_get_quad_info_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ addr = cmd->cmd_addr;
+ *node_num = IEEE1394_ADDR_PHY_ID(addr);
+ *quadlet = ((addr & IEEE1394_ADDR_OFFSET_MASK) -
+ IEEE1394_CONFIG_ROM_ADDR);
+ *quadlet = (*quadlet >> 2);
+ *data = T1394_DATA32(cmd->cmd_u.q.quadlet_data);
+
+ TNF_PROBE_0_DEBUG(s1394_get_quad_info_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_match_GUID()
+ * attempts to match nnode (which is in the current topology tree) with
+ * a node in the old topology tree by comparing GUIDs. If a match is found
+ * the old_node field of the current node and cur_node field of the old
+ * are set point to each other. Also, this routine makes both the nodes
+ * point at the same config rom. If unable to relock the tree, returns
+ * DDI_FAILURE, else returns DDI_SUCCESS.
+ */
+static int
+s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode)
+{
+ int old_node;
+ int gen_changed;
+ uint32_t old_a, old_b;
+ uint32_t new_a, new_b;
+ s1394_node_t *onode;
+ s1394_target_t *t;
+ int ret = DDI_SUCCESS;
+
+ TNF_PROBE_0_DEBUG(s1394_match_GUID_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ ASSERT(nnode->cfgrom != NULL);
+ ASSERT(CFGROM_BIB_READ(nnode));
+
+ new_a = nnode->node_guid_hi;
+ new_b = nnode->node_guid_lo;
+
+ for (old_node = 0; old_node < hal->old_number_of_nodes; old_node++) {
+
+ onode = &hal->old_tree[old_node];
+ if (onode->cfgrom == NULL || CFGROM_BIB_READ(onode) == B_FALSE)
+ continue;
+
+ old_a = onode->node_guid_hi;
+ old_b = onode->node_guid_lo;
+
+ if ((old_a == new_a) && (old_b == new_b)) {
+
+ if (NODE_MATCHED(onode) == B_TRUE) {
+ TNF_PROBE_4(s1394_match_GUID_duplicate,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_uint, guid_hi, old_a,
+ tnf_uint, guid_lo, old_b,
+ tnf_uint, old_node_num, old_node,
+ tnf_uint, node_num, nnode->node_num);
+ cmn_err(CE_NOTE, "!Duplicate GUIDs: %08x%08x",
+ old_a, old_b);
+ /* offline the new node that last matched */
+ ret = s1394_offline_node(hal, onode->cur_node);
+ /* and make the current new node invalid */
+ ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
+ s1394_free_cfgrom(hal, nnode,
+ S1394_FREE_CFGROM_NEW);
+ break;
+ }
+
+ /*
+ * If there is indeed a cfgrom gen change,
+ * CFGROM_GEN_CHANGED() will be set iff we are matching
+ * tree nodes. Otherwise, CONFIG_ROM_GEN(old) !=
+ * CONFIG_ROM_GEN(new).
+ */
+ if (CFGROM_GEN_CHANGED(nnode) == B_TRUE ||
+ (CONFIG_ROM_GEN(onode->cfgrom) !=
+ CONFIG_ROM_GEN(nnode->cfgrom))) {
+ gen_changed = 1;
+ TNF_PROBE_4_DEBUG(s1394_match_GUID_gen_change,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_opaque, old_cfgrom, onode->cfgrom,
+ tnf_int, old_gen,
+ CONFIG_ROM_GEN(onode->cfgrom), tnf_opaque,
+ cfgrom, nnode->cfgrom, tnf_int, new_gen,
+ CONFIG_ROM_GEN(nnode->cfgrom));
+ } else {
+ gen_changed = 0;
+ }
+
+ onode->cur_node = nnode;
+ nnode->old_node = onode;
+ nnode->node_state = onode->node_state;
+ SET_NODE_VISITED(onode);
+ SET_NODE_MATCHED(onode);
+ SET_NODE_MATCHED(nnode);
+ /*
+ * If generations changed, need to offline any targets
+ * hanging off the old node, prior to freeing up old
+ * cfgrom. If the generations didn't change, we can
+ * free up the new config rom and copy all info from
+ * the old node (this helps in picking up further
+ * reads from where the last generation left off).
+ */
+ if (gen_changed == 1) {
+ if (s1394_offline_node(hal, onode)) {
+ ret = DDI_FAILURE;
+ break;
+ }
+ TNF_PROBE_2(s1394_match_GUID_gen_freecfg,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_opaque, old_cfgrom, onode->cfgrom,
+ tnf_opaque, new_cfgrom, nnode->cfgrom);
+ s1394_free_cfgrom(hal, onode,
+ S1394_FREE_CFGROM_OLD);
+ CLEAR_CFGROM_PARSED(nnode);
+ CLEAR_CFGROM_NEW_ALLOC(nnode);
+ CLEAR_CFGROM_NEW_ALLOC(onode);
+ onode->cfgrom = nnode->cfgrom;
+ /* done */
+ break;
+ }
+
+ /*
+ * Free up cfgrom memory in the new_node and
+ * point it at the same config rom as the old one.
+ */
+ if (onode->cfgrom != nnode->cfgrom) {
+
+ TNF_PROBE_5_DEBUG(s1394_match_GUID,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, node_num, nnode->node_num,
+ tnf_opaque, cfgrom, nnode->cfgrom,
+ tnf_int, old_node_num, old_node,
+ tnf_opaque, old_cfgrom, onode->cfgrom,
+ tnf_uint, cfgrom_state,
+ nnode->cfgrom_state);
+
+ ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
+ s1394_free_cfgrom(hal, nnode,
+ S1394_FREE_CFGROM_NEW);
+ }
+ nnode->cfgrom = onode->cfgrom;
+ nnode->cfgrom_state = onode->cfgrom_state;
+ nnode->cfgrom_valid_size = onode->cfgrom_valid_size;
+ nnode->cfgrom_size = onode->cfgrom_size;
+ nnode->cfgrom_quad_to_read = onode->cfgrom_quad_to_read;
+ bcopy(onode->dir_stack, nnode->dir_stack,
+ offsetof(s1394_node_t, cfgrom_quad_to_read) -
+ offsetof(s1394_node_t, dir_stack));
+ CLEAR_CFGROM_NEW_ALLOC(nnode);
+ CLEAR_CFGROM_NEW_ALLOC(onode);
+
+ if (CFGROM_PARSED(nnode) == B_TRUE) {
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ /* Update the target list */
+ if ((t = onode->target_list) != NULL) {
+ nnode->target_list = t;
+ while (t != NULL) {
+ t->on_node = nnode;
+ t = t->target_sibling;
+ }
+ }
+ rw_exit(&hal->target_list_rwlock);
+ }
+ break;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_match_GUID_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ return (ret);
+}
+
+/*
+ * s1394_match_all_GUIDs()
+ * attempt to match each node in the current topology tree with the a
+ * node in the old topology tree. If unable to relock the tree, returns
+ * DDI_FAILURE, else returns DDI_SUCCESS.
+ */
+static int
+s1394_match_all_GUIDs(s1394_hal_t *hal)
+{
+ int node;
+ int ret = DDI_SUCCESS;
+ s1394_node_t *nnode;
+
+ TNF_PROBE_0_DEBUG(s1394_match_all_GUIDs_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ for (node = 0; node < hal->number_of_nodes; node++) {
+ nnode = &hal->topology_tree[node];
+ if (LINK_ACTIVE(nnode) == B_FALSE || CFGROM_BIB_READ(nnode) ==
+ B_FALSE)
+ continue;
+ if (NODE_MATCHED(nnode)) {
+ /*
+ * Skip if node matched. If config rom generations
+ * changed, we want to call s1394_match_GUID() even
+ * if the nodes matched.
+ */
+ int gen_changed;
+ s1394_node_t *onode = nnode->old_node;
+
+ gen_changed = (onode && onode->cfgrom &&
+ CONFIG_ROM_GEN(onode->cfgrom) != CONFIG_ROM_GEN(
+ nnode->cfgrom)) ? 1 : 0;
+
+ if (CFGROM_GEN_CHANGED(nnode) == 0 && gen_changed == 0)
+ continue;
+ }
+
+ if (s1394_match_GUID(hal, nnode) == DDI_FAILURE) {
+ ret = DDI_FAILURE;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_match_all_GUIDs_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (ret);
+}
+
+/*
+ * s1394_valid_cfgrom()
+ * Performs crc check on the config rom. Returns B_TRUE if config rom has
+ * good CRC else returns B_FALSE.
+ */
+/* ARGSUSED */
+boolean_t
+s1394_valid_cfgrom(s1394_hal_t *hal, s1394_node_t *node)
+{
+ uint32_t crc_len, crc_value, CRC, CRC_old, quad0;
+
+ TNF_PROBE_0_DEBUG(s1394_valid_cfgrom_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ ASSERT(node->cfgrom);
+
+ if (s1394_enable_crc_validation == 0) {
+ TNF_PROBE_1_DEBUG(s1394_valid_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "validation turned off");
+ return (B_TRUE);
+ }
+
+ quad0 = node->cfgrom[0];
+ crc_len = (quad0 >> IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
+ IEEE1394_CFG_ROM_CRC_LEN_MASK;
+ crc_value = quad0 & IEEE1394_CFG_ROM_CRC_VALUE_MASK;
+
+ if (node->cfgrom_valid_size < crc_len + 1) {
+ TNF_PROBE_4(s1394_valid_cfgrom_not_enough,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, crc_len, crc_len,
+ tnf_uint, valid_size, node->cfgrom_valid_size);
+ TNF_PROBE_0_DEBUG(s1394_valid_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (B_FALSE);
+ }
+
+ CRC = s1394_CRC16(&node->cfgrom[1], crc_len);
+
+ if (CRC != crc_value) {
+ CRC_old = s1394_CRC16_old(&node->cfgrom[1], crc_len);
+ if (CRC_old == crc_value) {
+ TNF_PROBE_4_DEBUG(s1394_valid_cfgrom_busted_crc,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, node_num, node->node_num,
+ tnf_uint, crc_len, crc_len);
+ TNF_PROBE_0_DEBUG(s1394_valid_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (B_TRUE);
+ }
+
+ cmn_err(CE_NOTE,
+ "!Bad CRC in config rom (node's GUID %08x%08x)",
+ node->node_guid_hi, node->node_guid_lo);
+
+ TNF_PROBE_5(s1394_valid_cfgrom_bad_crc,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, crc_len, crc_len,
+ tnf_uint, crc, crc_value, tnf_uint, crc_computed, CRC);
+ TNF_PROBE_0_DEBUG(s1394_valid_cfgrom_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (B_FALSE);
+ }
+
+ TNF_PROBE_3_DEBUG(s1394_valid_cfgrom_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_uint, node_num, node->node_num, tnf_uint, crc_len, crc_len,
+ tnf_uint, crc, crc_value);
+
+ return (B_TRUE);
+}
+
+/*
+ * s1394_valid_dir()
+ * Performs crc check on a directory. Returns B_TRUE if dir has good CRC
+ * else returns B_FALSE.
+ */
+/*ARGSUSED*/
+boolean_t
+s1394_valid_dir(s1394_hal_t *hal, s1394_node_t *node,
+ uint32_t key, uint32_t *dir)
+{
+ uint32_t dir_len, crc_value, CRC, CRC_old, quad0;
+
+ TNF_PROBE_0_DEBUG(s1394_valid_dir_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ /*
+ * Ideally, we would like to do crc validations for the entire cfgrom
+ * as well as the individual directories. However, we have seen devices
+ * that have valid directories but busted cfgrom crc and devices that
+ * have bad crcs in directories as well as for the entire cfgrom. This
+ * is sad, but unfortunately, real world!
+ */
+ if (s1394_enable_crc_validation == 0) {
+ TNF_PROBE_1_DEBUG(s1394_valid_dir_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "validation turned off");
+ return (B_TRUE);
+ }
+
+ quad0 = dir[0];
+
+ dir_len = IEEE1212_DIR_LEN(quad0);
+ crc_value = IEEE1212_DIR_CRC(quad0);
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ CRC = s1394_CRC16(&dir[1], dir_len);
+
+ if (CRC != crc_value) {
+ CRC_old = s1394_CRC16_old(&dir[1], dir_len);
+ if (CRC_old == crc_value) {
+ TNF_PROBE_5_DEBUG(s1394_valid_dir_crc_old,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, node_num, node->node_num,
+ tnf_uint, key, key, tnf_uint, dir_len, dir_len);
+ TNF_PROBE_0_DEBUG(s1394_valid_dir_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (B_TRUE);
+ }
+
+ TNF_PROBE_5(s1394_valid_dir_bad_crc,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, node_num, node->node_num,
+ tnf_uint, key, key, tnf_uint, dir_len, dir_len);
+
+ TNF_PROBE_0_DEBUG(s1394_valid_dir_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (B_FALSE);
+ }
+
+ TNF_PROBE_4_DEBUG(s1394_valid_dir,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_uint, node_guid_hi, node->node_guid_hi,
+ tnf_uint, node_guid_lo, node->node_guid_lo,
+ tnf_uint, node_num, node->node_num, tnf_uint, key, key);
+
+ return (B_TRUE);
+}
+
+/*
+ * s1394_become_bus_mgr()
+ * is a callback from a timeout() setup by the main br_thread. After
+ * a bus reset, depending on the Bus Manager's incumbancy and the state
+ * of its abdicate bit, a timer of a certain length is set. After this
+ * time expires, the local host may attempt to become the Bus Manager.
+ * This is done by sending a request to the current IRM on the bus. The
+ * IRM holds the BUS_MANAGER_ID register. Depending on whether or not
+ * the local host is already the IRM, we will send a request onto the
+ * 1394 bus or call into the HAL.
+ */
+static void
+s1394_become_bus_mgr(void *arg)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+ cmd1394_cmd_t *cmd;
+ uint64_t Bus_Mgr_ID_addr;
+ uint32_t hal_node_num;
+ uint32_t old_value;
+ uint32_t generation;
+ uint_t curr_bus_mgr;
+ uint_t bm_node;
+ uint_t IRM_node;
+ int err;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_enter, S1394_TNF_SL_BR_STACK,
+ "");
+
+ hal = (s1394_hal_t *)arg;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ generation = hal->generation_count;
+ IRM_node = hal->IRM_node;
+
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ bm_node = hal->bus_mgr_node;
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Make sure we aren't already the Bus Manager */
+ if (bm_node != -1) {
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+
+ /* Send compare-swap to BUS_MANAGER_ID */
+ /* register on the Isoch Rsrc Mgr */
+ if (IRM_node == hal_node_num) {
+ /* Local */
+ ret = HAL_CALL(hal).csr_cswap32(hal->halinfo.hal_private,
+ generation, (IEEE1394_SCSR_BUSMGR_ID &
+ IEEE1394_CSR_OFFSET_MASK), S1394_INVALID_NODE_NUM,
+ hal_node_num, &old_value);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_become_bus_mgr_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Error in cswap32");
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+ curr_bus_mgr = IEEE1394_NODE_NUM(old_value);
+
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
+ (curr_bus_mgr == hal_node_num)) {
+ hal->bus_mgr_node = hal_node_num;
+ hal->incumbent_bus_mgr = B_TRUE;
+ } else {
+ hal->bus_mgr_node = curr_bus_mgr;
+ hal->incumbent_bus_mgr = B_FALSE;
+ }
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ } else {
+ /* Remote */
+ if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &cmd) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_become_bus_mgr_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Error in s1394_alloc_cmd()");
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR);
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+ cmd->completion_callback = s1394_become_bus_mgr_callback;
+ Bus_Mgr_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_BUSMGR_ID) |
+ (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
+ cmd->cmd_addr = Bus_Mgr_ID_addr;
+ cmd->bus_generation = generation;
+ cmd->cmd_u.l32.arg_value = T1394_DATA32(
+ S1394_INVALID_NODE_NUM);
+ cmd->cmd_u.l32.data_value = T1394_DATA32(hal_node_num);
+ cmd->cmd_u.l32.num_retries = 0;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_COMPARE_SWAP;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ ret = s1394_setup_asynch_command(hal, NULL, cmd,
+ S1394_CMD_LOCK, &err);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Command has now been put onto the queue! */
+ if (ret != DDI_SUCCESS) {
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ TNF_PROBE_1(s1394_become_bus_mgr_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Error in s1394_setup_asynch_command()");
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return;
+ }
+
+ /* Send the command out */
+ ret = s1394_xfer_asynch_command(hal, cmd, &err);
+
+ if (ret != DDI_SUCCESS) {
+ /* Remove cmd outstanding request Q */
+ s1394_remove_q_asynch_cmd(hal, cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ mutex_enter(&hal->bus_mgr_node_mutex);
+
+ /* Don't know who the bus_mgr is */
+ hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
+ hal->incumbent_bus_mgr = B_FALSE;
+
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_exit, S1394_TNF_SL_BR_STACK,
+ "");
+}
+
+/*
+ * s1394_become_bus_mgr_callback()
+ * is the callback used by s1394_become_bus_mgr() when it is necessary
+ * to send the Bus Manager request to a remote IRM. After the completion
+ * of the compare-swap request, this routine looks at the "old_value"
+ * in the request to determine whether or not it has become the Bus
+ * Manager for the current generation. It sets the bus_mgr_node and
+ * incumbent_bus_mgr fields to their appropriate values.
+ */
+static void
+s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_t *hal;
+ uint32_t hal_node_num;
+ uint32_t temp;
+ uint_t curr_bus_mgr;
+
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_callback_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ hal = (s1394_hal_t *)s_priv->sent_on_hal;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+
+ /* Was the command successful? */
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ temp = T1394_DATA32(cmd->cmd_u.l32.old_value);
+ curr_bus_mgr = IEEE1394_NODE_NUM(temp);
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
+ (curr_bus_mgr == hal_node_num)) {
+
+ hal->bus_mgr_node = hal_node_num;
+ hal->incumbent_bus_mgr = B_TRUE;
+
+ } else {
+ hal->bus_mgr_node = curr_bus_mgr;
+ hal->incumbent_bus_mgr = B_FALSE;
+ }
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ } else {
+ TNF_PROBE_2(s1394_become_bus_mgr_callback_error,
+ S1394_TNF_SL_BR_ERROR, "", tnf_string, msg,
+ "Error while attempting to become bus manager",
+ tnf_uint, status, cmd->cmd_result);
+
+ mutex_enter(&hal->bus_mgr_node_mutex);
+
+ /* Don't know who the bus_mgr is */
+ hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
+ hal->incumbent_bus_mgr = B_FALSE;
+
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+ }
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_become_bus_mgr_callback_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_bus_mgr_processing()
+ * is called following "phase1" completion of reading Bus_Info_Blocks.
+ * Its purpose is to determine whether the local node is capable of
+ * becoming the Bus Manager (has the IRMC bit set) and if so to call
+ * the s1394_do_bus_mgr_processing() routine.
+ * NOTE: we overload DDI_FAILURE return value to mean jump back to
+ * the start of bus reset processing.
+ */
+static int
+s1394_bus_mgr_processing(s1394_hal_t *hal)
+{
+ int ret;
+ int IRM_node_num;
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_processing_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ IRM_node_num = hal->IRM_node;
+ s1394_unlock_tree(hal);
+
+ ret = DDI_SUCCESS;
+
+ /* If we are IRM capable, then do bus_mgr stuff... */
+ if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
+ /* If there is an IRM, then do bus_mgr stuff */
+ if (IRM_node_num != -1) {
+ if (s1394_do_bus_mgr_processing(hal))
+ ret = DDI_FAILURE;
+ }
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_processing_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (ret);
+}
+
+/*
+ * s1394_do_bus_mgr_processing()
+ * is used to perform those operations expected of the Bus Manager.
+ * After being called, s1394_do_bus_mgr_processing() looks at the value
+ * in bus_mgr_node and waits if it is -1 (Bus Manager has not been
+ * chosen yet). Then, if there is more than one node on the 1394 bus,
+ * and we are either the Bus Manager or (if there is no Bus Manager)
+ * the IRM, it optimizes the gap_count and/or sets the cycle master's
+ * root holdoff bit (to ensure that the cycle master is/stays root).
+ *
+ * NOTE: we overload DDI_FAILURE return value to mean jump back to
+ * the start of bus reset processing.
+ */
+static int
+s1394_do_bus_mgr_processing(s1394_hal_t *hal)
+{
+ int ret;
+ int IRM_flags, hal_bus_mgr_node;
+ int IRM_node_num;
+ uint_t hal_node_num, number_of_nodes;
+ int new_root, new_gap_cnt;
+
+ TNF_PROBE_0_DEBUG(s1394_do_bus_mgr_processing_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ /* Wait for Bus Manager to be determined */
+ /* or a Bus Reset to happen */
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ if (hal->bus_mgr_node == -1)
+ cv_wait(&hal->bus_mgr_node_cv, &hal->bus_mgr_node_mutex);
+
+ /* Check if a BUS RESET has come while we've been waiting */
+ mutex_enter(&hal->br_thread_mutex);
+ if (hal->br_thread_ev_type & (BR_THR_CFGROM_SCAN | BR_THR_GO_AWAY)) {
+
+ mutex_exit(&hal->br_thread_mutex);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_do_bus_mgr_processing_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (1);
+ }
+ mutex_exit(&hal->br_thread_mutex);
+
+ hal_bus_mgr_node = hal->bus_mgr_node;
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ return (1);
+ }
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node_num = hal->IRM_node;
+ number_of_nodes = hal->number_of_nodes;
+
+ ret = 0;
+
+ /* If we are the bus_mgr or if there is no bus_mgr */
+ /* the IRM and there is > 1 nodes on the bus */
+ if ((number_of_nodes > 1) &&
+ ((hal_bus_mgr_node == (int)hal_node_num) ||
+ ((hal_bus_mgr_node == S1394_INVALID_NODE_NUM) &&
+ (IRM_node_num == (int)hal_node_num)))) {
+
+ IRM_flags = 0;
+
+ /* Make sure the root node is cycle master capable */
+ if (!s1394_cycle_master_capable(hal)) {
+ /* Make the local node root */
+ new_root = hal_node_num;
+ IRM_flags = IRM_flags | ROOT_HOLDOFF;
+
+ /* If setting root, then optimize gap_count */
+ new_gap_cnt = hal->optimum_gap_count;
+ IRM_flags = IRM_flags | GAP_COUNT;
+
+ } else {
+ /* Make sure root's ROOT_HOLDOFF bit is set */
+ new_root = (number_of_nodes - 1);
+ IRM_flags = IRM_flags | ROOT_HOLDOFF;
+ }
+ if (hal->gap_count > hal->optimum_gap_count) {
+ /* Set the gap_count to optimum */
+ new_gap_cnt = hal->optimum_gap_count;
+ IRM_flags = IRM_flags | GAP_COUNT;
+
+ }
+
+ s1394_unlock_tree(hal);
+
+ if (IRM_flags) {
+ ret = s1394_do_phy_config_pkt(hal, new_root,
+ new_gap_cnt, IRM_flags);
+ }
+ TNF_PROBE_0_DEBUG(s1394_do_bus_mgr_processing_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (ret);
+ }
+
+ s1394_unlock_tree(hal);
+
+ TNF_PROBE_0_DEBUG(s1394_do_bus_mgr_processing_exit,
+ S1394_TNF_SL_BR_STACK, "");
+ return (ret);
+}
+
+/*
+ * s1394_bus_mgr_timers_stop()
+ * Cancels bus manager timeouts
+ */
+/*ARGSUSED*/
+static void
+s1394_bus_mgr_timers_stop(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
+ timeout_id_t *bus_mgr_tid)
+{
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_stop_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ /* Cancel the Bus Mgr timeouts (if necessary) */
+ if (*bus_mgr_tid != 0) {
+ (void) untimeout(*bus_mgr_tid);
+ *bus_mgr_tid = 0;
+ }
+ if (*bus_mgr_query_tid != 0) {
+ (void) untimeout(*bus_mgr_query_tid);
+ *bus_mgr_query_tid = 0;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_stop_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_bus_mgr_timers_start()
+ * Starts bus manager timeouts if the hal is IRM capable.
+ */
+static void
+s1394_bus_mgr_timers_start(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
+ timeout_id_t *bus_mgr_tid)
+{
+ boolean_t incumbant;
+ uint_t hal_node_num;
+ int IRM_node_num;
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_start_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ mutex_enter(&hal->topology_tree_mutex);
+
+ IRM_node_num = hal->IRM_node;
+ hal_node_num = hal->node_id;
+
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ incumbant = hal->incumbent_bus_mgr;
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ /* If we are IRM capable, then do bus_mgr stuff... */
+ if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
+ /*
+ * If we are the IRM, then wait 625ms
+ * before checking BUS_MANAGER_ID register
+ */
+ if (IRM_node_num == IEEE1394_NODE_NUM(hal_node_num)) {
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_625ms,
+ S1394_TNF_SL_BR_STACK, "");
+
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Wait 625ms, then check bus manager */
+ *bus_mgr_query_tid = timeout(s1394_become_bus_mgr,
+ hal, drv_usectohz(IEEE1394_BM_IRM_TIMEOUT));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ }
+
+ /* If there is an IRM on the bus */
+ if (IRM_node_num != -1) {
+ if ((incumbant == B_TRUE) &&
+ (hal->abdicate_bus_mgr_bit == 0)) {
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Try to become bus manager */
+ s1394_become_bus_mgr(hal);
+
+ mutex_enter(&hal->topology_tree_mutex);
+ } else {
+ hal->abdicate_bus_mgr_bit = 0;
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_125ms,
+ S1394_TNF_SL_BR_STACK, "");
+
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Wait 125ms, then try to become bus manager */
+ *bus_mgr_tid = timeout(s1394_become_bus_mgr,
+ hal, drv_usectohz(
+ IEEE1394_BM_INCUMBENT_TIMEOUT));
+
+ mutex_enter(&hal->topology_tree_mutex);
+ }
+ } else {
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ hal->incumbent_bus_mgr = B_FALSE;
+ mutex_exit(&hal->bus_mgr_node_mutex);
+ }
+ }
+
+ mutex_exit(&hal->topology_tree_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_bus_mgr_timers_start_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * s1394_get_maxpayload()
+ * is used to determine a device's maximum payload size. That is to
+ * say, the largest packet that can be transmitted or received by the
+ * the target device given the current topological (speed) constraints
+ * and the constraints specified in the local host's and remote device's
+ * Config ROM (max_rec). Caller must hold the topology_tree_mutex and
+ * the target_list_rwlock as an RW_READER (at least).
+ */
+/*ARGSUSED*/
+void
+s1394_get_maxpayload(s1394_target_t *target, uint_t *dev_max_payload,
+ uint_t *current_max_payload)
+{
+ s1394_hal_t *hal;
+ uint32_t bus_capabilities;
+ uint32_t from_node;
+ uint32_t to_node;
+ uint_t local_max_rec;
+ uint_t local_max_blk;
+ uint_t max_rec;
+ uint_t max_blk;
+ uint_t curr_speed;
+ uint_t speed_max_blk;
+ uint_t temp;
+
+ TNF_PROBE_0_DEBUG(s1394_get_maxpayload_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ /* Make sure we're holding the topology_tree_mutex */
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /* Set dev_max_payload to local (HAL's) size */
+ bus_capabilities = target->on_hal->halinfo.bus_capabilities;
+ local_max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
+ IEEE1394_BIB_MAXREC_SHIFT;
+ if ((local_max_rec > 0) && (local_max_rec < 14)) {
+ local_max_blk = 1 << (local_max_rec + 1);
+ } else {
+ /* These are either unspecified or reserved */
+ local_max_blk = 4;
+ }
+
+ /* Is this target on a node? */
+ if ((target->target_state & S1394_TARG_GONE) == 0 &&
+ (target->on_node != NULL)) {
+ ASSERT(target->on_node->cfgrom != NULL);
+
+ bus_capabilities =
+ target->on_node->cfgrom[IEEE1212_NODE_CAP_QUAD];
+ max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
+ IEEE1394_BIB_MAXREC_SHIFT;
+
+ if ((max_rec > 0) && (max_rec < 14)) {
+ max_blk = 1 << (max_rec + 1);
+ } else {
+ /* These are either unspecified or reserved */
+ max_blk = 4;
+ }
+ (*dev_max_payload) = max_blk;
+
+ from_node = IEEE1394_NODE_NUM(target->on_hal->node_id);
+ to_node = (target->on_node->node_num);
+
+ /* Speed is to be filled in from speed map */
+ curr_speed = (uint_t)s1394_speed_map_get(target->on_hal,
+ from_node, to_node);
+ speed_max_blk = 512 << curr_speed;
+ temp = (local_max_blk < max_blk) ? local_max_blk : max_blk;
+ (*current_max_payload) = (temp < speed_max_blk) ? temp :
+ speed_max_blk;
+ } else {
+ /* Set dev_max_payload to local (HAL's) size */
+ (*dev_max_payload) = local_max_blk;
+ (*current_max_payload) = local_max_blk;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_get_maxpayload_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_cycle_master_capable()
+ * is used to determine whether or not the current root node on the
+ * 1394 bus has its CMC-bit set in it Config ROM. If not, then it
+ * is not capable of being cycle master and a new root node must be
+ * selected.
+ */
+static int
+s1394_cycle_master_capable(s1394_hal_t *hal)
+{
+ s1394_node_t *root;
+ int cycle_master_capable;
+ uint_t hal_node_num;
+
+ TNF_PROBE_0_DEBUG(s1394_cycle_master_capable_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+
+ /* Get a pointer to the root node */
+ root = s1394_topology_tree_get_root_node(hal);
+
+ /* Ignore, if we are already root */
+ if (root == &hal->topology_tree[hal_node_num]) {
+ TNF_PROBE_2_DEBUG(s1394_cmstr_capable_hal,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ node_num, hal_node_num, tnf_int, ret, 1);
+ return (1);
+ }
+
+ /*
+ * We want to pick a new root if link is off or we don't have
+ * valid config rom
+ */
+ if (LINK_ACTIVE(root) == B_FALSE || root->cfgrom == NULL ||
+ CFGROM_BIB_READ(root) == 0) {
+
+ TNF_PROBE_4_DEBUG(s1394_cmstr_capable_not_hal,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ root, root->node_num, tnf_int, link_active,
+ LINK_ACTIVE(root), tnf_opaque, cfgrom, root->cfgrom,
+ tnf_int, bib, CFGROM_BIB_READ(root));
+
+ return (0);
+ }
+
+ /* Check the Cycle Master bit in the Bus Info Block */
+ cycle_master_capable = root->cfgrom[IEEE1212_NODE_CAP_QUAD] &
+ IEEE1394_BIB_CMC_MASK;
+
+ if (cycle_master_capable) {
+ TNF_PROBE_1_DEBUG(s1394_cmstr_capable_root,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ root, root->node_num);
+ return (1);
+ } else {
+ TNF_PROBE_1(s1394_cmstr_not_capable_root,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ root, root->node_num);
+ return (0);
+ }
+}
+
+/*
+ * s1394_do_phy_config_pkt()
+ * is called by s1394_do_bus_mgr_processing() to setup and send out
+ * a PHY configuration packet onto the 1394 bus. Depending on the
+ * values in IRM_flags, the gap_count and root_holdoff bits on the
+ * bus will be affected by this packet.
+ *
+ * NOTE: we overload DDI_FAILURE return value to mean jump back to
+ * the start of bus reset processing.
+ */
+static int
+s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root, int new_gap_cnt,
+ uint32_t IRM_flags)
+{
+ cmd1394_cmd_t *cmd;
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ uint32_t pkt_data = 0;
+ uint32_t gap_cnt = 0;
+ uint32_t root = 0;
+ int ret, result;
+ uint_t flags = 0;
+
+ TNF_PROBE_0_DEBUG(s1394_do_phy_config_pkt_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /* Gap count needs to be optimized */
+ if (IRM_flags & GAP_COUNT) {
+
+ pkt_data = pkt_data | IEEE1394_PHY_CONFIG_T_BIT_MASK;
+ gap_cnt = ((uint32_t)new_gap_cnt) <<
+ IEEE1394_PHY_CONFIG_GAP_CNT_SHIFT;
+ gap_cnt = gap_cnt & IEEE1394_PHY_CONFIG_GAP_CNT_MASK;
+ pkt_data = pkt_data | gap_cnt;
+
+ (void) HAL_CALL(hal).set_gap_count(hal->halinfo.hal_private,
+ (uint_t)new_gap_cnt);
+ }
+
+ /* Root node needs to be changed */
+ if (IRM_flags & ROOT_HOLDOFF) {
+
+ pkt_data = pkt_data | IEEE1394_PHY_CONFIG_R_BIT_MASK;
+ root = ((uint32_t)new_root) <<
+ IEEE1394_PHY_CONFIG_ROOT_HOLD_SHIFT;
+ root = root & IEEE1394_PHY_CONFIG_ROOT_HOLD_MASK;
+ pkt_data = pkt_data | root;
+
+ (void) HAL_CALL(hal).set_root_holdoff_bit(
+ hal->halinfo.hal_private);
+ }
+
+
+ if (IRM_flags) {
+ if (s1394_alloc_cmd(hal, flags, &cmd) != DDI_SUCCESS) {
+ TNF_PROBE_1_DEBUG(s1394_do_phy_config_pkt_error,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "Unable to allocate PHY config packet");
+ TNF_PROBE_0_DEBUG(s1394_do_phy_config_pkt_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (0);
+ }
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ /* lock tree failure indicates a bus gen change */
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+ TNF_PROBE_0_DEBUG(s1394_do_phy_config_pkt_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (1);
+ }
+
+ /* Setup the callback routine */
+ cmd->completion_callback = s1394_phy_config_callback;
+ cmd->cmd_callback_arg = (void *)(uintptr_t)IRM_flags;
+ cmd->bus_generation = hal->generation_count;
+ cmd->cmd_options = CMD1394_OVERRIDE_ADDR;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
+ cmd->cmd_u.q.quadlet_data = pkt_data;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ s_priv->sent_by_target = (s1394_target_t *)NULL;
+ s_priv->sent_on_hal = (s1394_hal_t *)hal;
+
+ h_priv->bus_generation = cmd->bus_generation;
+
+ /* Speed must be IEEE1394_S100 on PHY config packets */
+ s_priv->hal_cmd_private.speed = IEEE1394_S100;
+
+ /* Mark command as being used */
+ s_priv->cmd_in_use = B_TRUE;
+
+ s1394_unlock_tree(hal);
+
+ /* Put command on the HAL's outstanding request Q */
+ s1394_insert_q_asynch_cmd(hal, cmd);
+
+ ret = HAL_CALL(hal).send_phy_configuration_packet(
+ hal->halinfo.hal_private, (cmd1394_cmd_t *)cmd,
+ (h1394_cmd_priv_t *)&s_priv->hal_cmd_private, &result);
+
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_2_DEBUG(s1394_do_phy_config_pkt_error,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "Unable to send PHY config packet",
+ tnf_int, result, result);
+
+ (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_do_phy_config_pkt_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (0);
+
+ } else {
+ /*
+ * There will be a bus reset only if GAP_COUNT changed
+ */
+ if (IRM_flags & GAP_COUNT) {
+ return (1);
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_do_phy_config_pkt_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (0);
+}
+
+/*
+ * s1394_phy_config_callback()
+ * is the callback called after the PHY configuration packet has been
+ * sent out onto the 1394 bus. Depending on the values in IRM_flags,
+ * (specifically if the gap_count has been changed) this routine may
+ * initiate a bus reset.
+ */
+static void
+s1394_phy_config_callback(cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_t *hal;
+ uint32_t IRM_flags;
+
+ TNF_PROBE_0_DEBUG(s1394_phy_config_callback_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ hal = (s1394_hal_t *)s_priv->sent_on_hal;
+
+ IRM_flags = (uint32_t)(uintptr_t)cmd->cmd_callback_arg;
+
+ if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
+ TNF_PROBE_2_DEBUG(s1394_do_phy_config_pkt_error,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "Error sending PHY config packet",
+ tnf_int, result, cmd->cmd_result);
+ (void) s1394_free_cmd(hal, &cmd);
+ } else {
+ (void) s1394_free_cmd(hal, &cmd);
+
+ /* Only need a bus reset if we changed GAP_COUNT */
+ if (IRM_flags & GAP_COUNT) {
+ s1394_initiate_hal_reset(hal, NON_CRITICAL);
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_phy_config_callback_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_lock_tree()
+ * Attempts to lock the topology tree. Returns DDI_FAILURE if generations
+ * changed or if the services layer signals the bus reset thread to go
+ * away. Otherwise, returns DDI_SUCCESS.
+ */
+int
+s1394_lock_tree(s1394_hal_t *hal)
+{
+ int circular;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_lock_tree_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ mutex_enter(&hal->br_thread_mutex);
+ ndi_devi_enter(hal->halinfo.dip, &circular);
+ mutex_enter(&hal->topology_tree_mutex);
+
+ if ((hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
+ TNF_PROBE_2(s1394_lock_tree_go_away,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_generation, hal->generation_count,
+ tnf_int, br_thread_gen, hal->br_cfgrom_read_gen);
+ TNF_PROBE_0_DEBUG(s1394_lock_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ mutex_exit(&hal->br_thread_mutex);
+ mutex_exit(&hal->topology_tree_mutex);
+ ndi_devi_exit(hal->halinfo.dip, circular);
+ return (DDI_FAILURE);
+ } else if (hal->br_cfgrom_read_gen != hal->generation_count) {
+ TNF_PROBE_2(s1394_lock_tree_gen_changed,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, hal_generation, hal->generation_count,
+ tnf_int, br_thread_gen, hal->br_cfgrom_read_gen);
+
+ TNF_PROBE_0_DEBUG(s1394_lock_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ mutex_exit(&hal->br_thread_mutex);
+ mutex_exit(&hal->topology_tree_mutex);
+ ndi_devi_exit(hal->halinfo.dip, circular);
+ return (DDI_FAILURE);
+ }
+
+ mutex_exit(&hal->br_thread_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_lock_tree_exit, S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_unlock_tree()
+ * Unlocks the topology tree
+ */
+void
+s1394_unlock_tree(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_unlock_tree_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ mutex_exit(&hal->topology_tree_mutex);
+ ndi_devi_exit(hal->halinfo.dip, 0);
+
+ TNF_PROBE_0_DEBUG(s1394_unlock_tree_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+}
+
+/*
+ * s1394_calc_next_quad()
+ * figures out the next quadlet to read. This maintains a stack of
+ * directories in the node. When the first quad of a directory (the
+ * first directory would be the root directory) is read, it is pushed on
+ * the this stack. When the directory is all read, it scans the directory
+ * looking for indirect entries. If any indirect directory entry is found,
+ * it is pushed on stack and that directory is read. If we are done dealing
+ * with all entries in the current dir, the directory is popped off the
+ * stack. If the stack is empty, we are back at the root directory level
+ * and essentially read the entire directory hierarchy.
+ * Returns 0 is more quads to read, else returns non-zero.
+ */
+static int
+s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node, uint32_t quadlet,
+ uint32_t *nextquadp)
+{
+ uint32_t data, type, key, value, *ptr;
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_4_DEBUG(s1394_calc_next_quad_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, node_num, node->node_num,
+ tnf_uint, quadlet, quadlet, tnf_int, cfgrom_size, node->cfgrom_size,
+ tnf_uint, hal_gen, hal->generation_count);
+
+ if (((quadlet + 1) >= node->cfgrom_size) ||
+ (CFGROM_SIZE_IS_CRCSIZE(node) == B_TRUE && (quadlet + 1) >=
+ node->cfgrom_valid_size)) {
+ TNF_PROBE_0_DEBUG(s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (1);
+ }
+
+ if (s1394_turn_off_dir_stack != 0 || CFGROM_DIR_STACK_OFF(node) ==
+ B_TRUE) {
+ quadlet++;
+ *nextquadp = quadlet;
+ TNF_PROBE_3_DEBUG(s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "dir stack turned off", tnf_uint, quadlet, quadlet,
+ tnf_opaque, cfgrom, node->cfgrom);
+ return (0);
+ }
+
+ data = node->cfgrom[quadlet];
+
+ if (quadlet == IEEE1212_ROOT_DIR_QUAD) {
+ node->dir_stack_top = -1;
+ node->expected_dir_quad = quadlet;
+ node->expected_type = IEEE1212_IMMEDIATE_TYPE;
+ }
+
+ CFGROM_TYPE_KEY_VALUE(data, type, key, value);
+
+ /*
+ * check to make sure we are looking at a dir. If the config rom
+ * is broken, then revert to normal scanning of the config rom
+ */
+ if (node->expected_dir_quad == quadlet) {
+ if (type != 0 || key != 0) {
+ TNF_PROBE_3_DEBUG(s1394_calc_next_quad,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "bad directory turning off stack", tnf_uint,
+ quadlet, quadlet, tnf_uint, data, data);
+ SET_CFGROM_DIR_STACK_OFF(node);
+ quadlet = IEEE1212_ROOT_DIR_QUAD;
+ } else {
+ node->cur_dir_start = quadlet;
+ node->cur_dir_size = IEEE1212_DIR_LEN(data);
+ node->expected_dir_quad = 0;
+ /* get the next quad */
+ quadlet++;
+ }
+ } else {
+ /*
+ * If we read all quads in cur dir and the cur dir is not
+ * a leaf, scan for offsets (if the directory's CRC checks
+ * out OK). If we have a directory or a leaf, we save the
+ * current location on the stack and start reading that
+ * directory. So, we will end up with a depth first read of
+ * the entire config rom. If we are done with the current
+ * directory, pop it off the stack and continue the scanning
+ * as appropriate.
+ */
+ if (quadlet == node->cur_dir_start + node->cur_dir_size) {
+
+ int i, top;
+ boolean_t done_with_cur_dir = B_FALSE;
+
+ if (node->expected_type == IEEE1212_LEAF_TYPE) {
+ node->expected_type = IEEE1212_IMMEDIATE_TYPE;
+ done_with_cur_dir = B_TRUE;
+ TNF_PROBE_2_DEBUG(s1394_calc_next_quad,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "done with a leaf",
+ tnf_uint, quadlet, quadlet);
+ goto donewithcurdir;
+ }
+
+ ptr = &node->cfgrom[node->cur_dir_start];
+ CFGROM_TYPE_KEY_VALUE(*ptr, type, key, value);
+
+ /*
+ * If CRC for this directory is invalid, turn off
+ * dir stack and start re-reading from root dir.
+ * This wastes the work done thus far, but CRC
+ * errors in directories should be rather rare.
+ * if s1394_crcsz_is_cfgsz is set, then set
+ * cfgrom_valid_size to the len specfied as crc len
+ * in quadlet 0.
+ */
+ if (s1394_valid_dir(hal, node, key, ptr) == B_FALSE) {
+ SET_CFGROM_DIR_STACK_OFF(node);
+ if (s1394_crcsz_is_cfgsz != 0) {
+ SET_CFGROM_SIZE_IS_CRCSIZE(node);
+ node->cfgrom_valid_size =
+ ((node->cfgrom[0] >>
+ IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
+ IEEE1394_CFG_ROM_CRC_LEN_MASK);
+ TNF_PROBE_2(s1394_calc_next_quad,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg, "crc sz is cfg sz",
+ tnf_uint, size,
+ node->cfgrom_valid_size);
+ }
+ TNF_PROBE_2_DEBUG(s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string,
+ msg, "crc error", tnf_uint, quadlet,
+ quadlet);
+ *nextquadp = IEEE1212_ROOT_DIR_QUAD;
+ return (0);
+ }
+ i = node->cur_dir_start + 1;
+ rescan:
+ for (done_with_cur_dir = B_FALSE; i <=
+ node->cur_dir_start + node->cur_dir_size; i++) {
+ data = node->cfgrom[i];
+ CFGROM_TYPE_KEY_VALUE(data, type, key, value);
+ /* read leaf type and directory types only */
+ if (type == IEEE1212_LEAF_TYPE || type ==
+ IEEE1212_DIRECTORY_TYPE) {
+
+ /*
+ * push current dir on stack; if the
+ * stack is overflowing, ie, too many
+ * directory level nestings, turn off
+ * dir stack and fall back to serial
+ * scanning, starting at root dir. This
+ * wastes all the work we have done
+ * thus far, but more than 16 levels
+ * of directories is rather odd...
+ */
+ top = ++node->dir_stack_top;
+ if (top == S1394_DIR_STACK_SIZE) {
+
+ TNF_PROBE_2_DEBUG(
+ s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_string, msg,
+ "dir stack overflow",
+ tnf_uint, quadlet, quadlet);
+ SET_CFGROM_DIR_STACK_OFF(node);
+ *nextquadp =
+ IEEE1212_ROOT_DIR_QUAD;
+ return (0);
+ }
+
+ TNF_PROBE_3_DEBUG(
+ s1394_calc_next_quad,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "push dir stack",
+ tnf_uint, quadlet, quadlet,
+ tnf_int, top, top);
+
+ node->dir_stack[top].dir_start =
+ node->cur_dir_start;
+ node->dir_stack[top].dir_size =
+ node->cur_dir_size;
+ node->dir_stack[top].dir_next_quad =
+ i + 1;
+ /* and set the next quadlet to read */
+ quadlet = i + value;
+ node->expected_dir_quad = quadlet;
+ node->expected_type = type;
+ break;
+ }
+ }
+
+ donewithcurdir:
+
+ if ((i > node->cur_dir_start + node->cur_dir_size) ||
+ done_with_cur_dir == B_TRUE) {
+
+ /*
+ * all done with cur dir; pop it off the stack
+ */
+ if (node->dir_stack_top >= 0) {
+ TNF_PROBE_3_DEBUG(
+ s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "pop dir stack",
+ tnf_uint, quadlet, quadlet,
+ tnf_int, top, node->dir_stack_top);
+ top = node->dir_stack_top--;
+ node->cur_dir_start =
+ node->dir_stack[top].dir_start;
+ node->cur_dir_size =
+ node->dir_stack[top].dir_size;
+ i = node->dir_stack[top].dir_next_quad;
+ goto rescan;
+ } else {
+ /*
+ * if empty stack, we are at the top
+ * level; declare done.
+ */
+ TNF_PROBE_1_DEBUG(
+ s1394_calc_next_quad_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_string, msg, "all done");
+ return (1);
+ }
+ }
+ } else {
+ /* get the next quadlet */
+ quadlet++;
+ }
+ }
+ *nextquadp = quadlet;
+
+ TNF_PROBE_1_DEBUG(s1394_calc_next_quad_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_uint, next_quad, quadlet);
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/1394/s1394_fa.c b/usr/src/uts/common/io/1394/s1394_fa.c
new file mode 100644
index 0000000000..04a6f4ec7f
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_fa.c
@@ -0,0 +1,268 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_fa.c
+ * 1394 Services Layer Fixed Address Support Routines
+ * Currently used for FCP support.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+
+static void s1394_fa_completion_cb(cmd1394_cmd_t *cmd);
+
+/*
+ * s1394_fa_claim_addr_blk()
+ * Claim fixed address block.
+ */
+int
+s1394_fa_claim_addr(s1394_hal_t *hal, s1394_fa_type_t type,
+ s1394_fa_descr_t *descr)
+{
+ t1394_alloc_addr_t addr;
+ s1394_fa_hal_t *falp = &hal->hal_fa[type];
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_enter, S1394_TNF_SL_FA_STACK, "");
+
+ /* Might have been claimed already */
+ if (falp->fal_addr_blk != NULL) {
+ TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_exit,
+ S1394_TNF_SL_FA_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ falp->fal_descr = descr;
+
+ bzero(&addr, sizeof (addr));
+ addr.aa_type = T1394_ADDR_FIXED;
+ addr.aa_address = descr->fd_addr;
+ addr.aa_length = descr->fd_size;
+ addr.aa_enable = descr->fd_enable;
+ addr.aa_evts = descr->fd_evts;
+ addr.aa_arg = hal;
+
+ ret = s1394_claim_addr_blk(hal, &addr);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_2(s1394_fa_claim_addr_error, S1394_TNF_SL_FA_ERROR,
+ "", tnf_int, type, type, tnf_int, ret, ret);
+ } else {
+ falp->fal_addr_blk = (s1394_addr_space_blk_t *)addr.aa_hdl;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_fa_claim_addr_exit, S1394_TNF_SL_FA_STACK, "");
+ return (ret);
+}
+
+/*
+ * s1394_fa_free_addr_blk()
+ * Free fixed address block.
+ */
+void
+s1394_fa_free_addr(s1394_hal_t *hal, s1394_fa_type_t type)
+{
+ s1394_fa_hal_t *falp = &hal->hal_fa[type];
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_fa_free_addr_enter, S1394_TNF_SL_FA_STACK, "");
+
+ /* Might have been freed already */
+ if (falp->fal_addr_blk != NULL) {
+ ret = s1394_free_addr_blk(hal, falp->fal_addr_blk);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_fa_free_addr_error,
+ S1394_TNF_SL_FA_STACK, "", tnf_int, ret, ret);
+ }
+ falp->fal_addr_blk = NULL;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_fa_free_addr_exit, S1394_TNF_SL_FA_STACK, "");
+}
+
+/*
+ * s1394_fa_list_add()
+ * Add target to the list of FA clients.
+ * target_list_rwlock should be writer-held.
+ */
+void
+s1394_fa_list_add(s1394_hal_t *hal, s1394_target_t *target,
+ s1394_fa_type_t type)
+{
+ s1394_fa_hal_t *fal = &hal->hal_fa[type];
+
+ if (fal->fal_head == NULL) {
+ ASSERT(fal->fal_tail == NULL);
+ fal->fal_head = fal->fal_tail = target;
+ } else {
+ fal->fal_tail->target_fa[type].fat_next = target;
+ fal->fal_tail = target;
+ }
+ fal->fal_gen++;
+}
+
+/*
+ * s1394_fa_list_remove()
+ * Remove target from the list of FA clients.
+ * target_list_rwlock should be writer-held.
+ */
+int
+s1394_fa_list_remove(s1394_hal_t *hal, s1394_target_t *target,
+ s1394_fa_type_t type)
+{
+ s1394_fa_hal_t *fal = &hal->hal_fa[type];
+ s1394_target_t *curp, **nextp, *prevp = NULL;
+
+ for (nextp = &fal->fal_head; (curp = *nextp) != NULL; ) {
+ if (curp == target) {
+ *nextp = target->target_fa[type].fat_next;
+ if (target == fal->fal_tail) {
+ fal->fal_tail = prevp;
+ }
+ fal->fal_gen++;
+ return (DDI_SUCCESS);
+ }
+ nextp = &curp->target_fa[type].fat_next;
+ prevp = curp;
+ }
+ return (DDI_FAILURE);
+}
+
+/*
+ * s1394_fa_list_is_empty()
+ * Returns B_TRUE if the target list is empty
+ * target_list_rwlock should be at least reader-held.
+ */
+boolean_t
+s1394_fa_list_is_empty(s1394_hal_t *hal, s1394_fa_type_t type)
+{
+ s1394_fa_hal_t *fal = &hal->hal_fa[type];
+
+ return (fal->fal_head == NULL);
+}
+
+/*
+ * s1394_fa_list_gen()
+ * Returns list generation number.
+ * target_list_rwlock should be at least reader-held.
+ */
+uint_t
+s1394_fa_list_gen(s1394_hal_t *hal, s1394_fa_type_t type)
+{
+ s1394_fa_hal_t *fal = &hal->hal_fa[type];
+
+ return (fal->fal_gen);
+}
+
+/*
+ * s1394_fa_init_cmd()
+ * initialize the FA specific part of the command
+ */
+void
+s1394_fa_init_cmd(s1394_cmd_priv_t *s_priv, s1394_fa_type_t type)
+{
+ s_priv->cmd_ext_type = S1394_CMD_EXT_FA;
+ s_priv->cmd_ext.fa.type = type;
+}
+
+/*
+ * s1394_fa_convert_cmd()
+ * convert an FA command (with a relative address) to a regular 1394 command
+ */
+void
+s1394_fa_convert_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_fa_cmd_priv_t *fa_priv = S1394_GET_FA_CMD_PRIV(cmd);
+
+ cmd->cmd_addr += hal->hal_fa[fa_priv->type].fal_descr->fd_conv_base;
+ fa_priv->completion_callback = cmd->completion_callback;
+ fa_priv->callback_arg = cmd->cmd_callback_arg;
+ cmd->completion_callback = s1394_fa_completion_cb;
+ cmd->cmd_callback_arg = hal;
+}
+
+/*
+ * s1394_fa_restore_cmd()
+ * opposite of s1394_fa_convert_cmd(): regular 1394 command to FA command
+ */
+void
+s1394_fa_restore_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_fa_cmd_priv_t *fa_priv = S1394_GET_FA_CMD_PRIV(cmd);
+
+ ASSERT(fa_priv->type < S1394_FA_NTYPES);
+
+ cmd->cmd_addr -= hal->hal_fa[fa_priv->type].fal_descr->fd_conv_base;
+ cmd->completion_callback = fa_priv->completion_callback;
+ cmd->cmd_callback_arg = fa_priv->callback_arg;
+}
+
+/*
+ * s1394_fa_check_restore_cmd()
+ * if a command has FA extension, do s1394_fa_restore_cmd()
+ */
+void
+s1394_fa_check_restore_cmd(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ if (s_priv->cmd_ext_type == S1394_CMD_EXT_FA) {
+ s1394_fa_restore_cmd(hal, cmd);
+ }
+}
+
+/*
+ * s1394_fa_completion_cb()
+ * FA completion callback: restore command and call original callback
+ */
+static void
+s1394_fa_completion_cb(cmd1394_cmd_t *cmd)
+{
+ s1394_hal_t *hal = cmd->cmd_callback_arg;
+
+ TNF_PROBE_0_DEBUG(s1394_fa_completion_cb_enter,
+ S1394_TNF_SL_FA_STACK, "");
+
+ s1394_fa_restore_cmd(hal, cmd);
+
+ if (cmd->completion_callback) {
+ cmd->completion_callback(cmd);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_fa_completion_cb_exit,
+ S1394_TNF_SL_FA_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_fcp.c b/usr/src/uts/common/io/1394/s1394_fcp.c
new file mode 100644
index 0000000000..51617043db
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_fcp.c
@@ -0,0 +1,333 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_fcp.c
+ * 1394 Services Layer FCP Support Routines
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+
+static int s1394_fcp_register_common(s1394_target_t *target,
+ t1394_fcp_evts_t *evts, s1394_fa_type_t type, s1394_fa_descr_t *descr);
+static int s1394_fcp_unregister_common(s1394_target_t *target,
+ s1394_fa_type_t type);
+static void s1394_fcp_resp_recv_write_request(cmd1394_cmd_t *req);
+static void s1394_fcp_cmd_recv_write_request(cmd1394_cmd_t *req);
+static void s1394_fcp_recv_write_request(cmd1394_cmd_t *req,
+ s1394_fa_type_t type);
+static void s1394_fcp_recv_write_unclaimed(s1394_hal_t *hal,
+ cmd1394_cmd_t *req);
+
+
+/*
+ * number of retries to notify registered targets in case target list
+ * changes while the list rwlock is dropped for the time of callback
+ */
+uint_t s1394_fcp_notify_retry_cnt = 3;
+
+s1394_fa_descr_t s1394_fcp_ctl_descr = {
+ IEC61883_FCP_RESP_ADDR,
+ IEC61883_FCP_RESP_SIZE,
+ T1394_ADDR_WRENBL,
+ { NULL, s1394_fcp_resp_recv_write_request, NULL },
+ IEC61883_FCP_CMD_ADDR
+};
+
+s1394_fa_descr_t s1394_fcp_tgt_descr = {
+ IEC61883_FCP_CMD_ADDR,
+ IEC61883_FCP_CMD_SIZE,
+ T1394_ADDR_WRENBL,
+ { NULL, s1394_fcp_cmd_recv_write_request, NULL },
+ IEC61883_FCP_RESP_ADDR
+};
+
+
+int
+s1394_fcp_hal_init(s1394_hal_t *hal)
+{
+ int ret = DDI_SUCCESS;
+
+ TNF_PROBE_0_DEBUG(s1394_fcp_hal_init_enter, S1394_TNF_SL_FCP_STACK, "");
+
+ if ((ddi_prop_exists(DDI_DEV_T_ANY, hal->halinfo.dip, DDI_PROP_DONTPASS,
+ "h1394-fcp-claim-on-demand")) == 0) {
+ /* if not on-demand, claim addresses now */
+ ret = s1394_fa_claim_addr(hal, S1394_FA_TYPE_FCP_CTL,
+ &s1394_fcp_ctl_descr);
+ if (ret == DDI_SUCCESS) {
+ ret = s1394_fa_claim_addr(hal, S1394_FA_TYPE_FCP_TGT,
+ &s1394_fcp_tgt_descr);
+ if (ret != DDI_SUCCESS) {
+ s1394_fa_free_addr(hal, S1394_FA_TYPE_FCP_CTL);
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_fcp_hal_init_exit, S1394_TNF_SL_FCP_STACK, "");
+ return (ret);
+}
+
+int
+s1394_fcp_register_ctl(s1394_target_t *target, t1394_fcp_evts_t *evts)
+{
+ return (s1394_fcp_register_common(target, evts, S1394_FA_TYPE_FCP_CTL,
+ &s1394_fcp_ctl_descr));
+}
+
+int
+s1394_fcp_register_tgt(s1394_target_t *target, t1394_fcp_evts_t *evts)
+{
+ return (s1394_fcp_register_common(target, evts, S1394_FA_TYPE_FCP_TGT,
+ &s1394_fcp_tgt_descr));
+}
+
+int
+s1394_fcp_unregister_ctl(s1394_target_t *target)
+{
+ return (s1394_fcp_unregister_common(target, S1394_FA_TYPE_FCP_CTL));
+}
+
+int
+s1394_fcp_unregister_tgt(s1394_target_t *target)
+{
+ return (s1394_fcp_unregister_common(target, S1394_FA_TYPE_FCP_TGT));
+}
+
+
+static int
+s1394_fcp_register_common(s1394_target_t *target, t1394_fcp_evts_t *evts,
+ s1394_fa_type_t type, s1394_fa_descr_t *descr)
+{
+ s1394_hal_t *hal = target->on_hal;
+ s1394_fcp_target_t *fcp;
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+
+ if (s1394_fa_list_is_empty(hal, type)) {
+ if (s1394_fa_claim_addr(hal, type, descr) != DDI_SUCCESS) {
+ rw_exit(&hal->target_list_rwlock);
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* Add on the target list */
+ s1394_fa_list_add(hal, target, type);
+
+ fcp = &target->target_fa[type].fat_u.fcp;
+ fcp->fc_evts = *evts;
+
+ rw_exit(&hal->target_list_rwlock);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+s1394_fcp_unregister_common(s1394_target_t *target, s1394_fa_type_t type)
+{
+ s1394_hal_t *hal = target->on_hal;
+ int result;
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+
+ result = s1394_fa_list_remove(hal, target, type);
+ if (result == DDI_SUCCESS) {
+ if (s1394_fa_list_is_empty(hal, type)) {
+ s1394_fa_free_addr(hal, type);
+ }
+ } else {
+ TNF_PROBE_0(s1394_fcp_unregister_common_error_list,
+ S1394_TNF_SL_FCP_ERROR, "");
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+
+ return (result);
+}
+
+/*
+ * s1394_fcp_write_check_cmd()
+ * Check if an FCP command is formed correctly;
+ * set cmd_result and return DDI_FAILURE if not.
+ */
+int
+s1394_fcp_write_check_cmd(cmd1394_cmd_t *cmd)
+{
+ int len;
+
+ /* 4-byte writes must be quadlet writes */
+ if (cmd->cmd_type == CMD1394_ASYNCH_WR_BLOCK) {
+ len = cmd->cmd_u.b.blk_length;
+ if (len == 4) {
+ cmd->cmd_result = CMD1394_ETYPE_ERROR;
+ TNF_PROBE_0(t1394_write_error_type,
+ S1394_TNF_SL_FCP_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ len = 4;
+ }
+
+ /*
+ * request must be within FCP range. we avoid extra checks by
+ * using the fact that command and response are of the same size
+ */
+ if ((cmd->cmd_addr & IEEE1394_ADDR_OFFSET_MASK) + len >
+ IEC61883_FCP_CMD_SIZE) {
+ cmd->cmd_result = CMD1394_EADDRESS_ERROR;
+ TNF_PROBE_0(t1394_write_error_addr, S1394_TNF_SL_FCP_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ /* some options don't make sense for FCP commands */
+ if (cmd->cmd_options & CMD1394_OVERRIDE_ADDR) {
+ cmd->cmd_result = CMD1394_EINVALID_COMMAND;
+ TNF_PROBE_0(t1394_write_error_opt, S1394_TNF_SL_FCP_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+s1394_fcp_resp_recv_write_request(cmd1394_cmd_t *req)
+{
+ s1394_fcp_recv_write_request(req, S1394_FA_TYPE_FCP_CTL);
+}
+
+static void
+s1394_fcp_cmd_recv_write_request(cmd1394_cmd_t *req)
+{
+ s1394_fcp_recv_write_request(req, S1394_FA_TYPE_FCP_TGT);
+}
+
+/*
+ * s1394_fcp_recv_write_request()
+ * Common write request handler
+ */
+static void
+s1394_fcp_recv_write_request(cmd1394_cmd_t *req, s1394_fa_type_t type)
+{
+ s1394_hal_t *hal = (s1394_hal_t *)req->cmd_callback_arg;
+ s1394_target_t *target;
+ s1394_fa_target_t *fat;
+ uint_t saved_gen;
+ int num_retries = 0;
+ int (*cb)(cmd1394_cmd_t *req);
+ boolean_t restored = B_FALSE;
+ int ret = T1394_REQ_UNCLAIMED;
+
+ TNF_PROBE_0_DEBUG(s1394_fcp_recv_write_request_enter,
+ S1394_TNF_SL_FCP_STACK, "");
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+start:
+ target = hal->hal_fa[type].fal_head;
+
+ if (target) {
+ s1394_fa_restore_cmd(hal, req);
+ restored = B_TRUE;
+
+ /* Find a target that claims the request */
+ do {
+ fat = &target->target_fa[type];
+
+ cb = fat->fat_u.fcp.fc_evts.fcp_write_request;
+ if (cb == NULL) {
+ continue;
+ }
+ req->cmd_callback_arg = fat->fat_u.fcp.fc_evts.fcp_arg;
+
+ saved_gen = s1394_fa_list_gen(hal, type);
+
+ rw_exit(&hal->target_list_rwlock);
+ ret = cb(req);
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+
+ if (ret == T1394_REQ_CLAIMED) {
+ break;
+ }
+
+ /*
+ * List could change while we dropped the lock. In such
+ * case, start all over again, because missing a write
+ * request can have more serious consequences for a
+ * target than receiving same request more than once
+ */
+ if (saved_gen != s1394_fa_list_gen(hal, type)) {
+ TNF_PROBE_2(s1394_fcp_recv_write_request_error,
+ S1394_TNF_SL_FCP_ERROR, "",
+ tnf_string, msg, "list gen changed",
+ tnf_opaque, num_retries, num_retries);
+ num_retries++;
+ if (num_retries <= s1394_fcp_notify_retry_cnt) {
+ goto start;
+ } else {
+ break;
+ }
+ }
+
+ target = fat->fat_next;
+ } while (target != NULL);
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+
+ if (ret != T1394_REQ_CLAIMED) {
+ TNF_PROBE_0(s1394_fcp_recv_write_request_error_unclaimed,
+ S1394_TNF_SL_FCP_ERROR, "");
+ if (restored) {
+ s1394_fa_convert_cmd(hal, req);
+ }
+ s1394_fcp_recv_write_unclaimed(hal, req);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_fcp_recv_write_request_exit,
+ S1394_TNF_SL_FCP_STACK, "");
+}
+
+/*
+ * none of the targets claimed the request - send an appropriate response
+ */
+static void
+s1394_fcp_recv_write_unclaimed(s1394_hal_t *hal, cmd1394_cmd_t *req)
+{
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ (void) s1394_send_response(hal, req);
+}
diff --git a/usr/src/uts/common/io/1394/s1394_hotplug.c b/usr/src/uts/common/io/1394/s1394_hotplug.c
new file mode 100644
index 0000000000..9aa9850d48
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_hotplug.c
@@ -0,0 +1,1204 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_hotplug.c
+ * 1394 Services Layer Hotplug Routines
+ * This file contains routines that walk the old and topology
+ * trees, at bus reset time, creating devinfo's for new nodes and offlining
+ * nodes that are removed.
+ */
+
+#include <sys/conf.h>
+#include <sys/sysmacros.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/modctl.h>
+#include <sys/sunddi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/types.h>
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+static void s1394_send_remove_event(s1394_hal_t *hal, dev_info_t *dip,
+ t1394_localinfo_t *localinfo);
+static void s1394_send_insert_event(s1394_hal_t *hal, dev_info_t *dip,
+ t1394_localinfo_t *localinfo);
+static dev_info_t *s1394_create_devinfo(s1394_hal_t *hal, s1394_node_t *node,
+ uint32_t *unit_dir, int nunit);
+static void s1394_update_unit_dir_location(s1394_hal_t *hal, dev_info_t *tdip,
+ uint_t offset);
+
+/*
+ * s1394_send_remove_event()
+ * Invokes any "remove event" callback registered for dip. Passes
+ * t1394_localinfo_t as impl_data for the callback.
+ */
+static void
+s1394_send_remove_event(s1394_hal_t *hal, dev_info_t *dip,
+ t1394_localinfo_t *localinfo)
+{
+ char name[128];
+ ddi_eventcookie_t cookie;
+
+ (void) sprintf(name, "%s%d", ddi_driver_name(dip),
+ ddi_get_instance(dip));
+
+ TNF_PROBE_1_DEBUG(s1394_send_remove_event_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, device,
+ name);
+
+ if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, dip,
+ DDI_DEVI_REMOVE_EVENT, &cookie, NDI_EVENT_NOPASS)
+ == NDI_SUCCESS) {
+ (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, dip,
+ cookie, localinfo);
+ }
+ TNF_PROBE_0_DEBUG(s1394_send_remove_event_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_send_insert_event()
+ * Invokes any "insert event" callback registered for dip. Passes
+ * t1394_localinfo_t as impl_data for the callback.
+ */
+static void
+s1394_send_insert_event(s1394_hal_t *hal, dev_info_t *dip,
+ t1394_localinfo_t *localinfo)
+{
+ char name[128];
+ ddi_eventcookie_t cookie;
+
+ (void) sprintf(name, "%s%d", ddi_driver_name(dip),
+ ddi_get_instance(dip));
+
+ TNF_PROBE_1_DEBUG(s1394_send_insert_event_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, device,
+ name);
+
+ if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, dip,
+ DDI_DEVI_INSERT_EVENT, &cookie, NDI_EVENT_NOPASS) ==
+ NDI_SUCCESS)
+ (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, dip,
+ cookie, localinfo);
+
+ TNF_PROBE_0_DEBUG(s1394_send_insert_event_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_create_devinfo()
+ * This routine creates a devinfo corresponding to the unit_dir passed in.
+ * It adds "hp-node", "reg", "compatible" properties to the devinfo
+ * (formats for "reg" and "compatible" properties are specified by 1275
+ * binding for IEEE1394). If unable to create the devinfo and/or add the
+ * the properties, returns NULL, otherwise, returns the devinfo created.
+ *
+ * NOTE: All ndi_* routines are interrupt callable (and thus won't sleep).
+ * So, we don't drop topology_mutex across ndi calls.
+ */
+static dev_info_t *
+s1394_create_devinfo(s1394_hal_t *hal, s1394_node_t *node, uint32_t *unit_dir,
+ int nunit)
+{
+ dev_info_t *hal_dip;
+ uint32_t *root_dir;
+ dev_info_t *target_dip;
+
+ int root_dir_len;
+ int result, i, j, spec_id, sw_version;
+ int mod_ven, mod_hw, mod_spec, mod_sw;
+ int node_ven, node_hw, node_spec, node_sw;
+
+ /*LINTED type is unused*/
+ uint32_t type, key, value;
+ uint32_t unit_spec_id, unit_sw_version;
+ uint32_t node_spec_id, node_sw_version;
+ uint32_t node_vendor_id, node_hw_version;
+ uint32_t module_spec_id, module_sw_version;
+ uint32_t module_vendor_id, module_hw_version;
+
+ char *fmt = "firewire%06x,%06x";
+
+ char *buf[5], data[5][24];
+ uint32_t reg[6];
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_2_DEBUG(s1394_create_devinfo_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, guid_hi,
+ node->node_guid_hi, tnf_uint, guid_lo, node->node_guid_lo);
+
+ hal_dip = hal->halinfo.dip;
+
+ /* Allocate and init a new device node instance. */
+ result = ndi_devi_alloc(hal_dip, "unit", (dnode_t)DEVI_SID_NODEID,
+ &target_dip);
+
+ if (result != NDI_SUCCESS) {
+ cmn_err(CE_NOTE, "!Unable to create devinfo"
+ " (node's GUID %08x%08x)", node->node_guid_hi,
+ node->node_guid_lo);
+ TNF_PROBE_2(s1394_create_devinfo_fail_alloc,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_uint, guid_hi,
+ node->node_guid_hi, tnf_uint, guid_lo, node->node_guid_lo);
+ TNF_PROBE_0_DEBUG(s1394_create_devinfo_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (NULL);
+ }
+
+ /* Add "hp-node" property */
+ result = ndi_prop_update_int(DDI_DEV_T_NONE, target_dip, "hp-node", 0);
+ if (result != NDI_SUCCESS) {
+ cmn_err(CE_NOTE, "!Unable to add \"hp-node\" property"
+ " (node's GUID %08x%08x)", node->node_guid_hi,
+ node->node_guid_lo);
+#if defined(DEBUG)
+ cmn_err(CE_CONT, "!Error code %d", result);
+#endif
+ TNF_PROBE_3(s1394_create_devinfo_hp_node,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_uint, guid_hi,
+ node->node_guid_hi, tnf_uint, guid_lo, node->node_guid_lo,
+ tnf_int, error, result);
+ ndi_prop_remove_all(target_dip);
+ (void) ndi_devi_free(target_dip);
+ TNF_PROBE_0_DEBUG(s1394_create_devinfo_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (NULL);
+ }
+
+ spec_id = sw_version = mod_ven = mod_hw = mod_spec = mod_sw =
+ node_ven = node_hw = node_spec = node_sw = 0;
+ unit_sw_version = node_sw_version = node_hw_version =
+ module_sw_version = module_hw_version = 0;
+
+
+ root_dir = CFGROM_ROOT_DIR(node->cfgrom);
+ root_dir_len = CFGROM_DIR_LEN(root_dir);
+
+ for (i = 0; i < root_dir_len; i++) {
+
+ CFGROM_TYPE_KEY_VALUE(root_dir[i + 1], type, key, value);
+ switch (key) {
+
+ case IEEE1212_MODULE_VENDOR_ID:
+ module_vendor_id = value;
+ mod_ven++;
+ break;
+ case IEEE1212_MODULE_HW_VERSION:
+ module_hw_version = value;
+ mod_hw++;
+ break;
+ case IEEE1212_MODULE_SPEC_ID:
+ module_spec_id = value;
+ mod_spec++;
+ break;
+ case IEEE1212_MODULE_SW_VERSION:
+ module_sw_version = value;
+ mod_sw++;
+ break;
+ case IEEE1212_NODE_VENDOR_ID:
+ node_vendor_id = value;
+ node_ven++;
+ break;
+ case IEEE1212_NODE_UNIQUE_ID: {
+ uint32_t *node_unique_leaf =
+ &root_dir[i + 1] + value;
+ node_vendor_id = (node_unique_leaf[1] >> 8);
+ node_ven++;
+ }
+ break;
+ case IEEE1212_NODE_HW_VERSION:
+ node_hw_version = value;
+ node_hw++;
+ break;
+ case IEEE1212_NODE_SPEC_ID:
+ node_spec_id = value;
+ node_spec++;
+ break;
+ case IEEE1212_NODE_SW_VERSION:
+ node_sw_version = value;
+ node_sw++;
+ break;
+ }
+
+ if (mod_ven && mod_hw && mod_spec && mod_sw && node_ven &&
+ node_hw && node_spec && node_sw) {
+ break;
+ }
+ }
+
+ /*
+ * Search for unit spec and version
+ */
+ for (i = 0; i < CFGROM_DIR_LEN(unit_dir); i++) {
+
+ CFGROM_TYPE_KEY_VALUE(unit_dir[i + 1], type, key, value);
+ if (key == IEEE1212_UNIT_SPEC_ID) {
+
+ unit_spec_id = value;
+ spec_id++;
+ } else if (key == IEEE1212_UNIT_SW_VERSION) {
+
+ unit_sw_version = value;
+ sw_version++;
+ }
+ if (spec_id && sw_version)
+ break;
+ }
+
+ /*
+ * Refer to IEEE1212 (pages 90-92) for information regarding various
+ * id's. Module_Vendor_Id is required. Node_Vendor_Id is optional and
+ * if not implemented, its assumed value is Module_Vendor_Id.
+ * Module_Spec_Id is optional and if not implemented, its assumed value
+ * is Module_Vendor_Id. Node_Spec_Id is optional, and if not
+ * implemented, its assumed value is Node_Vendor_Id. Unit_Spec_Id is
+ * optional, and if not implemented, its assumed value is
+ * Node_Vendor_Id.
+ */
+ if (node_ven == 0) {
+ node_vendor_id = module_vendor_id;
+ node_ven++;
+ }
+
+ if (node_spec == 0) {
+ node_spec_id = node_vendor_id;
+ node_spec++;
+ }
+
+ if (mod_spec == 0) {
+ module_spec_id = module_vendor_id;
+ mod_spec++;
+ }
+
+ if (spec_id == 0) {
+ unit_spec_id = node_vendor_id;
+ spec_id++;
+ }
+
+ i = 0;
+ if (sw_version != 0) {
+ buf[i] = data[i];
+ (void) sprintf(data[i++], fmt, unit_spec_id, unit_sw_version);
+ }
+ if (node_sw != 0) {
+ buf[i] = data[i];
+ (void) sprintf(data[i++], fmt, node_spec_id, node_sw_version);
+ }
+ if (node_hw != 0) {
+ buf[i] = data[i];
+ (void) sprintf(data[i++], fmt, node_vendor_id, node_hw_version);
+ }
+ if (mod_sw != 0) {
+ buf[i] = data[i];
+ (void) sprintf(data[i++], fmt, module_spec_id,
+ module_sw_version);
+ }
+ if (mod_hw != 0) {
+ buf[i] = data[i];
+ (void) sprintf(data[i++], fmt, module_vendor_id,
+ module_hw_version);
+ }
+
+ result = ndi_prop_update_string_array(DDI_DEV_T_NONE, target_dip,
+ "compatible", (char **)&buf, i);
+ if (result != NDI_SUCCESS) {
+ cmn_err(CE_NOTE, "!Unable to add \"compatible\" property"
+ " (node's GUID %08x%08x)", node->node_guid_hi,
+ node->node_guid_lo);
+#if defined(DEBUG)
+ cmn_err(CE_CONT, "!Error code %d; nelements %d", result, i);
+ for (j = 0; j < i; j++) {
+ cmn_err(CE_CONT, "!buf[%d]: %s", j, buf[j]);
+ }
+#endif
+ ndi_prop_remove_all(target_dip);
+ (void) ndi_devi_free(target_dip);
+ TNF_PROBE_4(s1394_create_devinfo_fail_compat,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_uint, guid_hi,
+ node->node_guid_hi, tnf_uint, guid_lo, node->node_guid_lo,
+ tnf_int, error, result, tnf_int, nelements, i);
+ TNF_PROBE_0_DEBUG(s1394_create_devinfo_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (NULL);
+ }
+
+ for (j = 0; j < i; j++) {
+ TNF_PROBE_2_DEBUG(s1394_create_devinfo_props,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, compat_index, j,
+ tnf_string, compat_prop, buf[j]);
+ }
+
+ /* GUID,ADDR */
+ reg[0] = node->node_guid_hi;
+ reg[1] = node->node_guid_lo;
+ s1394_cfgrom_parse_unit_dir(unit_dir, &reg[2], &reg[3], &reg[4],
+ &reg[5]);
+
+ reg[3] = nunit;
+
+ result = ndi_prop_update_int_array(DDI_DEV_T_NONE, target_dip, "reg",
+ (int *)reg, 6);
+ if (result != NDI_SUCCESS) {
+ cmn_err(CE_NOTE, "!Unable to add \"reg\" property");
+#if defined(DEBUG)
+ cmn_err(CE_CONT, "!Error code %d", result);
+ for (j = 0; j < 6; j++) {
+ cmn_err(CE_CONT, "!reg[%d]: 0x%08x", j, reg[j]);
+ }
+#endif
+ ndi_prop_remove_all(target_dip);
+ (void) ndi_devi_free(target_dip);
+ TNF_PROBE_3(s1394_create_devinfo_fail_reg,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_uint, guid_hi,
+ node->node_guid_hi, tnf_uint, guid_lo, node->node_guid_lo,
+ tnf_int, error, result);
+ TNF_PROBE_0_DEBUG(s1394_create_devinfo_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (NULL);
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_create_devinfo_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_opaque, target_dip, target_dip);
+
+ return (target_dip);
+}
+
+/*
+ * s1394_devi_find()
+ * Searches all children of pdip for a match of name@caddr. Builds the
+ * name and address of each child node by looking up the reg property on
+ * the node and compares the built name@addr with the name@addr passed in.
+ * Returns the child dip if a match is found, otherwise, returns NULL.
+ * NOTE:
+ * This routine is decidedly non-ddi. We had to use this one since
+ * ndi_devi_find() can find only nodes that have valid addr field
+ * set and that won't happen unless the node goes through INITCHILD
+ * (at which time nx1394.c calls ddi_set_name_addr()). If, in future,
+ * the ndi_devi_find() provides a way of looking up nodes using criteria
+ * other than addr, we can get rid of this routine.
+ */
+/*ARGSUSED*/
+dev_info_t *
+s1394_devi_find(dev_info_t *pdip, char *name, char *caddr)
+{
+ int i, reglen;
+ char addr[32];
+ uint32_t *regptr;
+ dev_info_t *cdip = NULL;
+
+ ASSERT((name != NULL) && (caddr != NULL));
+
+ TNF_PROBE_1_DEBUG(s1394_devi_find_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_string, addr, caddr);
+
+ /*
+ * for each child of this parent, find name and addr and match with
+ * name and caddr passed in.
+ */
+ for (cdip = (dev_info_t *)DEVI(pdip)->devi_child; cdip != NULL;
+ cdip = (dev_info_t *)DEVI(cdip)->devi_sibling) {
+
+ i = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
+ DDI_PROP_DONTPASS, "reg", (int **)&regptr,
+ (uint_t *)&reglen);
+
+ if (i != DDI_PROP_SUCCESS)
+ continue;
+
+ /*
+ * Construct addr from the reg property (addr is of the format
+ * GGGGGGGGGGGGGGGG[,AAAAAAAAAAAA], where GGGGGGGGGGGGGGGG is
+ * the address and AAAAAAAAAAAA is the optional unit address)
+ */
+ if (regptr[2] != NULL || regptr[3] != NULL) {
+ (void) sprintf(addr, "%08x%08x,%04x%08x", regptr[0],
+ regptr[1], regptr[2], regptr[3]);
+ } else {
+ (void) sprintf(addr, "%08x%08x", regptr[0], regptr[1]);
+ }
+ ddi_prop_free(regptr);
+
+ if (strcmp(caddr, addr) == 0) {
+ ASSERT(strcmp(ddi_node_name(cdip), name) == 0);
+ break;
+ }
+ }
+
+ if (cdip == NULL) {
+ TNF_PROBE_1(s1394_devi_find_no_match,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, addr, caddr);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_devi_find_exit, S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (cdip);
+}
+
+/*
+ * s1394_update_devinfo_tree()
+ * Parses the config rom for the passed in node and creates/updates devinfo's
+ * for each unit directory found. If the devinfo corresponding to a unit
+ * already exists, any insert event callbacks registered for that devinfo
+ * are called (topology tree is unlocked and relocked around these
+ * callbacks). Returns DDI_SUCCESS if everything went fine and DDI_FAILURE
+ * if unable to reacquire the lock after callbacks (relock fails because of
+ * an intervening bus reset or if the services layer kills the bus reset
+ * thread). The node is marked as parsed before returning.
+ */
+int
+s1394_update_devinfo_tree(s1394_hal_t *hal, s1394_node_t *node)
+{
+ dev_info_t *tdip;
+ int j, units, d, lockfail = 0;
+ s1394_target_t *target, *t;
+ uint32_t hi, lo, size_hi, size_lo, type, key, value;
+ uint32_t *ptr, *root_dir, dir_len;
+ t1394_localinfo_t linfo;
+
+ uint32_t *unit_dir_ptrs[32];
+ dev_info_t *devinfo_ptrs[32];
+ uint32_t new_devinfo = 0; /* to keep track of new allocations */
+
+ char caddr[32];
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ ASSERT(CFGROM_PARSED(node) == B_FALSE);
+ ASSERT(node->cfgrom != NULL);
+
+ TNF_PROBE_2_DEBUG(s1394_update_devinfo_tree_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, node_num,
+ node->node_num, tnf_opaque, cfgrom, node->cfgrom);
+
+ /* scan through config rom looking for unit dirs */
+ root_dir = CFGROM_ROOT_DIR(node->cfgrom);
+
+ if (node->cfgrom_valid_size < CFGROM_DIR_LEN(root_dir))
+ dir_len = node->cfgrom_valid_size;
+ else
+ dir_len = CFGROM_DIR_LEN(root_dir);
+
+ CFGROM_TYPE_KEY_VALUE(root_dir[0], type, key, value);
+ if (s1394_valid_dir(hal, node, key, root_dir) == B_FALSE) {
+ cmn_err(CE_NOTE,
+ "!Bad root directory in config rom (node's GUID %08x%08x)",
+ node->node_guid_hi, node->node_guid_lo);
+
+ TNF_PROBE_1_DEBUG(s1394_update_devinfo_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_string, msg,
+ "bad directory");
+
+ SET_CFGROM_PARSED(node);
+ CLEAR_CFGROM_GEN_CHANGED(node); /* if set */
+ CLEAR_CFGROM_NEW_ALLOC(node);
+
+ return (DDI_SUCCESS);
+ }
+
+ for (units = 0, j = 1; j <= dir_len; j++) {
+ CFGROM_TYPE_KEY_VALUE(root_dir[j], type, key, value);
+ if (key == IEEE1212_UNIT_DIRECTORY && type ==
+ IEEE1212_DIRECTORY_TYPE) {
+ ptr = &root_dir[j] + value;
+ if (s1394_valid_dir(hal, node, key, ptr) == B_TRUE) {
+ unit_dir_ptrs[units++] = ptr;
+ } else {
+ cmn_err(CE_NOTE, "!Bad unit directory in config"
+ " rom (node's GUID %08x%08x)",
+ node->node_guid_hi, node->node_guid_lo);
+ TNF_PROBE_2(s1394_update_devinfo_tree_bad_dir,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_uint,
+ guid_hi, node->node_guid_hi, tnf_uint,
+ guid_lo, node->node_guid_lo);
+ }
+ }
+ }
+
+ for (d = 0, j = 0; j < units; j++) {
+
+ s1394_cfgrom_parse_unit_dir(unit_dir_ptrs[j],
+ &hi, &lo, &size_hi, &size_lo);
+
+ lo = j;
+
+ if (hi || lo) {
+ (void) sprintf(caddr, "%08x%08x,%04x%08x",
+ node->node_guid_hi, node->node_guid_lo, hi, lo);
+ } else {
+ (void) sprintf(caddr, "%08x%08x",
+ node->node_guid_hi, node->node_guid_lo);
+ }
+
+ tdip = s1394_devi_find(hal->halinfo.dip, "unit", caddr);
+ if (tdip != NULL) {
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ target = s1394_target_from_dip_locked(hal, tdip);
+ if (target != NULL) {
+ target->target_sibling = NULL;
+ target->on_node = node;
+ target->target_state &= ~S1394_TARG_GONE;
+ target->unit_dir = unit_dir_ptrs[j] - root_dir;
+
+ if ((t = node->target_list) != NULL) {
+ ASSERT(t != target);
+ while (t->target_sibling != NULL) {
+ t = t->target_sibling;
+ ASSERT(t != target);
+ }
+ t->target_sibling = target;
+ } else {
+ node->target_list = target;
+ }
+
+ target->target_list = node->target_list;
+ }
+ rw_exit(&hal->target_list_rwlock);
+
+ s1394_update_unit_dir_location(hal, tdip,
+ unit_dir_ptrs[j] - root_dir);
+
+ } else {
+ /* create devinfo for unit@caddr */
+ tdip = s1394_create_devinfo(hal, node,
+ unit_dir_ptrs[j], j);
+ if (tdip != NULL) {
+ new_devinfo |= (1 << d);
+ s1394_update_unit_dir_location(hal, tdip,
+ unit_dir_ptrs[j] - root_dir);
+ }
+ }
+ if (tdip != NULL)
+ devinfo_ptrs[d++] = tdip;
+ }
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ /* Online all valid units */
+ for (j = 0; j < d; j++) {
+ if ((new_devinfo & (1 << j)) == 0) {
+ linfo.bus_generation = hal->generation_count;
+ linfo.local_nodeID = hal->node_id;
+ }
+ /* don't need to drop topology_tree_mutex across ndi calls */
+ (void) ndi_devi_online_async(devinfo_ptrs[j], 0);
+ if ((new_devinfo & (1 << j)) == 0) {
+ /*
+ * send an insert event if this an existing devinfo.
+ * drop and reacquire topology_tree_mutex across
+ * the event calls
+ */
+ s1394_unlock_tree(hal);
+ s1394_send_insert_event(hal, devinfo_ptrs[j], &linfo);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_4(s1394_update_devinfo_tree_lock_fail,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_int, node_num, node->node_num,
+ tnf_opaque, cfgrom, node->cfgrom,
+ tnf_int, unit, j,
+ tnf_opaque, devinfo, devinfo_ptrs[j]);
+ lockfail = 1;
+ break;
+ }
+ }
+ }
+
+ if (lockfail) {
+ TNF_PROBE_0_DEBUG(s1394_update_devinfo_tree_exit,
+ S1394_TNF_SL_HOTPLUG_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ SET_CFGROM_PARSED(node);
+ CLEAR_CFGROM_GEN_CHANGED(node); /* if set */
+ CLEAR_CFGROM_NEW_ALLOC(node);
+
+ TNF_PROBE_0_DEBUG(s1394_update_devinfo_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_offline_node()
+ * Offlines a node. This involves marking all targets attached to the
+ * node as gone, invoking any remove event callbacks and calling
+ * ndi_devi_offline to mark the devinfo as OFFLINE (for each unit
+ * directory on the node). The tree is unlocked and relocked around
+ * the callbacks. If unable to relock the tree, DDI_FAILURE, else
+ * returns DDI_SUCCESS.
+ */
+int
+s1394_offline_node(s1394_hal_t *hal, s1394_node_t *node)
+{
+ s1394_target_t *t;
+ dev_info_t *tdip;
+ int j, d, units;
+ uint32_t *unit_dir_ptrs[32];
+ dev_info_t *devinfo_ptrs[32];
+ t1394_localinfo_t linfo;
+ uint_t node_num;
+ uint32_t *ptr, *root_dir, dir_len;
+ uint32_t hi, lo, size_hi, size_lo, type, key, value;
+ char caddr[32];
+
+ node_num = node->node_num;
+
+ TNF_PROBE_1_DEBUG(s1394_offline_node_enter, S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_uint, node_num, node_num);
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ d = 0;
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ t = node->target_list;
+ while (t != NULL) {
+ TNF_PROBE_2(s1394_process_old_tree_mark,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, node_num, node_num,
+ tnf_opaque, target, t);
+ t->target_state |= S1394_TARG_GONE;
+ t->on_node = NULL;
+ t = t->target_sibling;
+ }
+ rw_exit(&hal->target_list_rwlock);
+
+ /* scan through config rom looking for unit dirs */
+ root_dir = CFGROM_ROOT_DIR(node->cfgrom);
+
+ if (node->cfgrom_valid_size < CFGROM_DIR_LEN(root_dir))
+ dir_len = node->cfgrom_valid_size;
+ else
+ dir_len = CFGROM_DIR_LEN(root_dir);
+
+ CFGROM_TYPE_KEY_VALUE(root_dir[0], type, key, value);
+
+ for (units = 0, j = 1; j <= dir_len; j++) {
+ CFGROM_TYPE_KEY_VALUE(root_dir[j], type, key, value);
+ if (key == IEEE1212_UNIT_DIRECTORY && type ==
+ IEEE1212_DIRECTORY_TYPE) {
+ ptr = &root_dir[j] + value;
+ if (s1394_valid_dir(hal, node, key, ptr) == B_TRUE) {
+ unit_dir_ptrs[units++] = ptr;
+ }
+ }
+ }
+
+ for (d = 0, j = 0; j < units; j++) {
+
+ s1394_cfgrom_parse_unit_dir(unit_dir_ptrs[j],
+ &hi, &lo, &size_hi, &size_lo);
+
+ lo = j;
+
+ if (hi || lo) {
+ (void) sprintf(caddr, "%08x%08x,%04x%08x",
+ node->node_guid_hi, node->node_guid_lo, hi, lo);
+ } else {
+ (void) sprintf(caddr, "%08x%08x",
+ node->node_guid_hi, node->node_guid_lo);
+ }
+
+ if ((tdip = s1394_devi_find(hal->halinfo.dip, "unit", caddr)) !=
+ NULL)
+ devinfo_ptrs[d++] = tdip;
+ }
+
+ node->old_node = NULL;
+
+ linfo.bus_generation = hal->generation_count;
+ linfo.local_nodeID = hal->node_id;
+
+ for (j = 0; j < d; j++) {
+ s1394_unlock_tree(hal);
+
+ TNF_PROBE_2(s1394_offline_node,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, node_num, node_num,
+ tnf_opaque, devinfo, devinfo_ptrs[j]);
+
+ s1394_send_remove_event(hal, devinfo_ptrs[j], &linfo);
+ (void) ndi_devi_offline(devinfo_ptrs[j], NDI_DEVI_REMOVE);
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_2(s1394_offline_node,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "unlock to relock tree", tnf_uint, node_num,
+ node_num);
+ TNF_PROBE_0_DEBUG(s1394_offline_node_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ TNF_PROBE_0_DEBUG(s1394_offline_node_exit, S1394_TNF_SL_HOTPLUG_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_process_topology_tree()
+ * Walks the topology tree, processing each node. If node that has
+ * already been parsed, updates the generation property on all devinfos
+ * for the node. Also, if the node exists in both old & new trees, ASSERTS
+ * that both point to the same config rom. If the node has valid config
+ * rom but hasn't been parsed yet, calls s1394_update_devinfo_tree()
+ * to parse and create devinfos for the node. Kicks off further config
+ * rom reading if only the bus info block for the node is read.
+ * Returns DDI_SUCCESS if everything went fine, else returns DDI_FAILURE
+ * (for eg. unable to reacquire the tree lock etc). wait_for_cbs argument
+ * tells the caller if some completions can be expected. wait_gen tells
+ * the generation the commands were issued at.
+ */
+int
+s1394_process_topology_tree(s1394_hal_t *hal, int *wait_for_cbs,
+ uint_t *wait_gen)
+{
+ int i;
+ uint_t hal_node_num, number_of_nodes;
+ s1394_node_t *node, *onode;
+ s1394_status_t status;
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_process_topology_tree_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0(s1394_process_topology_tree_lock_failed,
+ S1394_TNF_SL_HOTPLUG_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_process_topology_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ hal->cfgroms_being_read = 0;
+ number_of_nodes = hal->number_of_nodes;
+ s1394_unlock_tree(hal);
+
+ for (i = 0; i < number_of_nodes; i++) {
+
+ if (i == hal_node_num)
+ continue;
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ node = &hal->topology_tree[i];
+
+ TNF_PROBE_4_DEBUG(s1394_process_topology_tree,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, node_num, i,
+ tnf_int, parsed, CFGROM_PARSED(node),
+ tnf_int, matched, NODE_MATCHED(node),
+ tnf_int, visited, NODE_VISITED(node));
+
+ if (LINK_ACTIVE(node) == B_FALSE) {
+ s1394_unlock_tree(hal);
+ continue;
+ }
+ if (node->cfgrom == NULL) {
+ s1394_unlock_tree(hal);
+ continue;
+ }
+
+ onode = node->old_node;
+
+ if (onode != NULL && onode->cfgrom != NULL && node->cfgrom !=
+ NULL) {
+ /*
+ * onode->cfgrom != node->cfgrom should have been
+ * handled by s1394_match_GUID()!!!
+ */
+ if (onode->cfgrom != node->cfgrom)
+ TNF_PROBE_5(s1394_process_topology_tree_err,
+ S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_int, node_num, i, tnf_int, gen_changed,
+ CFGROM_GEN_CHANGED(node), tnf_int, parsed,
+ CFGROM_PARSED(node), tnf_opaque, old_cfgrom,
+ onode->cfgrom, tnf_opaque, new_cfgrom,
+ node->cfgrom);
+ ASSERT(onode->cfgrom == node->cfgrom);
+ }
+
+ if (CFGROM_PARSED(node) == B_FALSE && CFGROM_ALL_READ(node) ==
+ B_TRUE) {
+ ASSERT((node->cfgrom_size <
+ IEEE1394_CONFIG_ROM_QUAD_SZ) ||
+ NODE_MATCHED(node) == B_TRUE);
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+ ASSERT(node->target_list == NULL);
+ rw_exit(&hal->target_list_rwlock);
+ if (s1394_update_devinfo_tree(hal, node) ==
+ DDI_FAILURE) {
+ ASSERT(MUTEX_NOT_HELD(
+ &hal->topology_tree_mutex));
+ TNF_PROBE_1(s1394_process_topology_tree,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "failure from update devinfo");
+ TNF_PROBE_0_DEBUG(
+ s1394_process_topology_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else if (CFGROM_PARSED(node) == B_FALSE && CFGROM_BIB_READ(
+ node) == B_TRUE) {
+ if (s1394_read_rest_of_cfgrom(hal, node, &status) !=
+ DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_process_topology_tree,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "failure reading rest of cfgrom");
+ if ((status & S1394_LOCK_FAILED) == 0) {
+ ASSERT(MUTEX_HELD(&hal->
+ topology_tree_mutex));
+ *wait_for_cbs = 0;
+ s1394_unlock_tree(hal);
+ }
+ TNF_PROBE_0_DEBUG(
+ s1394_process_topology_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ } else {
+ *wait_for_cbs = 1;
+ *wait_gen = hal->br_cfgrom_read_gen;
+ }
+ }
+
+ s1394_unlock_tree(hal);
+ }
+
+ /*
+ * flag the tree as processed; if a single bus reset happens after
+ * this, we will use tree matching.
+ */
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_process_topology_tree,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "relock failed while marking tree processed");
+ TNF_PROBE_0_DEBUG(s1394_process_topology_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hal->topology_tree_processed = B_TRUE;
+ s1394_unlock_tree(hal);
+
+ TNF_PROBE_1_DEBUG(s1394_process_topology_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int, hal_instance,
+ ddi_get_instance(hal->halinfo.dip));
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_process_old_tree()
+ * Walks through the old tree and offlines nodes that are removed. Nodes
+ * with an active link in the old tree but link powered off in the current
+ * generation are also offlined, as well as nodes with invalid config
+ * rom in current generation.
+ * The topology tree is locked/unlocked while walking through all the nodes;
+ * if the locking fails at any stage, stops further walking and returns
+ * DDI_FAILURE. Returns DDI_SUCCESS if everything went fine.
+ */
+int
+s1394_process_old_tree(s1394_hal_t *hal)
+{
+ int i;
+ uint_t hal_node_num_old, old_number_of_nodes;
+ s1394_node_t *onode;
+
+ TNF_PROBE_0_DEBUG(s1394_process_old_tree_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ /*
+ * NODE_MATCHED(onode) == 0 indicates this node doesn't exist
+ * any more.
+ */
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_0(s1394_process_old_tree_lock_failed,
+ S1394_TNF_SL_HOTPLUG_ERROR, "");
+ TNF_PROBE_0_DEBUG(s1394_process_old_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
+ old_number_of_nodes = hal->old_number_of_nodes;
+ s1394_unlock_tree(hal);
+
+ for (i = 0; i < old_number_of_nodes; i++) {
+
+ if (i == hal_node_num_old)
+ continue;
+ if (s1394_lock_tree(hal) != DDI_SUCCESS) {
+ TNF_PROBE_2(s1394_process_old_tree,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "lock failed while processing node", tnf_uint,
+ node_num, i);
+ TNF_PROBE_0_DEBUG(s1394_process_old_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ onode = &hal->old_tree[i];
+
+ if (onode->cfgrom == NULL) {
+ CLEAR_CFGROM_STATE(onode);
+ s1394_unlock_tree(hal);
+ continue;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_process_old_tree,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_opaque,
+ cfgrom, onode->cfgrom);
+
+ TNF_PROBE_5_DEBUG(s1394_process_old_tree,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_int,
+ node_num, i, tnf_int, parsed, CFGROM_PARSED(onode), tnf_int,
+ matched, NODE_MATCHED(onode), tnf_int, visited,
+ NODE_VISITED(onode), tnf_int, generation_changed,
+ CFGROM_GEN_CHANGED(onode));
+
+ /*
+ * onode->cur_node == NULL iff we couldn't read cfgrom in the
+ * current generation in non-tree matching case (and thus
+ * match_GUIDs couldn't set cur_node).
+ */
+ if (NODE_MATCHED(onode) == B_FALSE || (onode->cur_node ==
+ NULL || ((CFGROM_VALID(onode) == B_TRUE &&
+ CFGROM_VALID(onode->cur_node) == B_FALSE) ||
+ (LINK_ACTIVE(onode) == B_TRUE && LINK_ACTIVE(onode->
+ cur_node) == B_FALSE)))) {
+
+ if (onode->cur_node != NULL && CFGROM_VALID(onode) ==
+ B_TRUE && CFGROM_VALID(onode->cur_node) == B_FALSE)
+ TNF_PROBE_1_DEBUG
+ (s1394_process_old_tree_invalid_cfgrom,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_int, node_num, i);
+ if (onode->cur_node != NULL && LINK_ACTIVE(onode) ==
+ B_TRUE && LINK_ACTIVE(onode->cur_node) == B_FALSE)
+ TNF_PROBE_1_DEBUG
+ (s1394_process_old_tree_link_off,
+ S1394_TNF_SL_HOTPLUG_STACK,
+ "", tnf_int, node_num, i);
+ if (s1394_offline_node(hal, onode) != DDI_SUCCESS) {
+ TNF_PROBE_2(s1394_process_old_tree,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string,
+ msg, "failure from offline node", tnf_uint,
+ node_num, i);
+ TNF_PROBE_0_DEBUG(s1394_process_old_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+ s1394_free_cfgrom(hal, onode, S1394_FREE_CFGROM_OLD);
+ }
+
+ s1394_unlock_tree(hal);
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ TNF_PROBE_0_DEBUG(s1394_process_old_tree_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_update_unit_dir_location()
+ * Updates the unit-dir-offset property on the devinfo.
+ * NOTE: ndi_prop_update_int() is interrupt callable (and thus won't block);
+ * so, the caller doesn't drop topology_tree_mutex when calling this routine.
+ */
+/*ARGSUSED*/
+static void
+s1394_update_unit_dir_location(s1394_hal_t *hal, dev_info_t *tdip,
+ uint_t offset)
+{
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+ ASSERT(tdip != NULL);
+
+ TNF_PROBE_1_DEBUG(s1394_update_unit_dir_location_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "", tnf_uint, offset, offset);
+ (void) ndi_prop_update_int(DDI_DEV_T_NONE, tdip, "unit-dir-offset",
+ offset);
+ TNF_PROBE_0_DEBUG(s1394_update_unit_dir_location_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_add_target_to_node()
+ * adds target to the list of targets hanging off the node. Figures out
+ * the node by searching the topology tree for the GUID corresponding
+ * to the target. Points on_node field of target structure at the node.
+ */
+void
+s1394_add_target_to_node(s1394_target_t *target)
+{
+ s1394_target_t *t;
+ s1394_hal_t *hal;
+ uint32_t guid_hi;
+ uint32_t guid_lo;
+ int i;
+ char name[MAXNAMELEN];
+ char *ptr;
+
+ TNF_PROBE_0_DEBUG(s1394_add_target_to_node_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ hal = target->on_hal;
+ ASSERT(hal != NULL);
+
+ /* Topology tree must be locked when it gets here! */
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /* target_list_rwlock should be held in write mode */
+ ASSERT(rw_read_locked(&target->on_hal->target_list_rwlock) == 0);
+
+ if ((ptr = ddi_get_name_addr(target->target_dip)) == NULL) {
+ TNF_PROBE_0_DEBUG(s1394_add_target_to_node_exit_no_name,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return;
+ }
+
+ (void) sprintf(name, ptr);
+ /* Drop the ,<ADDR> part, if present */
+ if ((ptr = strchr(name, ',')) != NULL)
+ *ptr = '\0';
+
+ ptr = name;
+ guid_hi = s1394_stoi(ptr, 8, 16);
+ guid_lo = s1394_stoi(ptr + 8, 8, 16);
+
+ /* Search the HAL's node list for this GUID */
+ for (i = 0; i < hal->number_of_nodes; i++) {
+ if (CFGROM_VALID(&hal->topology_tree[i]) == B_TRUE) {
+ ASSERT(hal->topology_tree[i].cfgrom != NULL);
+
+ if ((hal->topology_tree[i].node_guid_hi == guid_hi) &&
+ (hal->topology_tree[i].node_guid_lo == guid_lo)) {
+ target->on_node = &hal->topology_tree[i];
+ if ((t = hal->topology_tree[i].target_list) !=
+ NULL) {
+ ASSERT(t != target);
+ while (t->target_sibling != NULL) {
+ t = t->target_sibling;
+ ASSERT(t != target);
+ }
+ t->target_sibling = target;
+ } else {
+ hal->topology_tree[i].target_list =
+ target;
+ }
+
+ /*
+ * update target_list in all targets on the
+ * node
+ */
+ t = hal->topology_tree[i].target_list;
+ while (t != NULL) {
+ t->target_list =
+ hal->topology_tree[i].target_list;
+ t = t->target_sibling;
+ }
+ break;
+ }
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_add_target_to_node_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
+
+/*
+ * s1394_remove_target_from_node()
+ * Removes target from the corresponding node's target_list.
+ */
+void
+s1394_remove_target_from_node(s1394_target_t *target)
+{
+ s1394_target_t *t, *t1;
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_remove_target_from_node_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ hal = target->on_hal;
+ ASSERT(hal != NULL);
+
+ /* Topology tree must be locked when it gets here! */
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ /* target_list_rwlock should be held in write mode */
+ ASSERT(rw_read_locked(&target->on_hal->target_list_rwlock) == 0);
+
+ if (target->on_node == NULL) {
+ TNF_PROBE_1_DEBUG(s1394_remove_target_from_node_NULL,
+ S1394_TNF_SL_HOTPLUG_STACK, "",
+ tnf_uint, target_state, target->target_state);
+ }
+
+ t = target->target_list;
+ t1 = NULL;
+ while (t != NULL) {
+ if (t == target) {
+ if (t1 == NULL) {
+ target->target_list = t->target_sibling;
+ } else {
+ t1->target_sibling = t->target_sibling;
+ }
+ break;
+ }
+ t1 = t;
+ t = t->target_sibling;
+ }
+ /* Update the target_list pointer in all the targets */
+ if (target->on_node != NULL)
+ target->on_node->target_list = target->target_list;
+
+ t = t1 = target->target_list;
+ while (t != NULL) {
+ t->target_list = t1;
+ t = t->target_sibling;
+ }
+
+ target->on_node = NULL;
+ target->target_sibling = NULL;
+
+ TNF_PROBE_0_DEBUG(s1394_remove_target_from_node_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_isoch.c b/usr/src/uts/common/io/1394/s1394_isoch.c
new file mode 100644
index 0000000000..2df89157ca
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_isoch.c
@@ -0,0 +1,1257 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_isoch.c
+ * 1394 Services Layer Isochronous Communication Routines
+ * This file contains routines for managing isochronous bandwidth
+ * and channel needs for registered targets (through the target
+ * isoch interfaces).
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+/*
+ * s1394_isoch_rsrc_realloc()
+ * is called during bus reset processing to reallocate any isochronous
+ * resources that were previously allocated.
+ */
+void
+s1394_isoch_rsrc_realloc(s1394_hal_t *hal)
+{
+ s1394_isoch_cec_t *cec_curr;
+ uint32_t chnl_mask;
+ uint32_t old_chnl_mask;
+ uint_t bw_alloc_units;
+ uint_t generation;
+ uint_t chnl_num;
+ int err;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_rsrc_realloc_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /*
+ * Get the current generation number - don't need the
+ * topology tree mutex here because it is read-only, and
+ * there is a race condition with or without it.
+ */
+ generation = hal->generation_count;
+
+ /* Lock the Isoch CEC list */
+ mutex_enter(&hal->isoch_cec_list_mutex);
+
+ cec_curr = hal->isoch_cec_list_head;
+ while (cec_curr != NULL) {
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we supposed to reallocate resources? */
+ if (!(cec_curr->cec_options & T1394_NO_IRM_ALLOC) &&
+ (cec_curr->realloc_valid == B_TRUE) &&
+ (cec_curr->realloc_failed == B_FALSE)) {
+
+ /* Reallocate some bandwidth */
+ bw_alloc_units = s1394_compute_bw_alloc_units(hal,
+ cec_curr->bandwidth, cec_curr->realloc_speed);
+
+ /* Check that the generation has not changed */
+ if (generation != hal->generation_count) {
+ /* Try the next Isoch CEC */
+ goto next_isoch_cec;
+ }
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ /*
+ * We can unlock the Isoch CEC list here
+ * because we know this Isoch CEC can not
+ * go away (we are trying to realloc its
+ * resources so it can't be in a state that
+ * will allow a free).
+ */
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ /* Try to reallocate bandwidth */
+ ret = s1394_bandwidth_alloc(hal, bw_alloc_units,
+ generation, &err);
+
+ /* Lock the Isoch CEC list */
+ mutex_enter(&hal->isoch_cec_list_mutex);
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* If we failed because we couldn't get bandwidth */
+ if (ret == DDI_FAILURE) {
+ cec_curr->realloc_failed = B_TRUE;
+ cec_curr->realloc_fail_reason =
+ T1394_RSRC_BANDWIDTH;
+ }
+ }
+
+ /* Are we supposed to reallocate resources? */
+ if (!(cec_curr->cec_options & T1394_NO_IRM_ALLOC) &&
+ (cec_curr->realloc_valid == B_TRUE) &&
+ (cec_curr->realloc_failed == B_FALSE)) {
+
+ /* Reallocate the channel */
+ chnl_num = cec_curr->realloc_chnl_num;
+ chnl_mask = (1 << ((63 - chnl_num) % 32));
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ /*
+ * We can unlock the Isoch CEC list here
+ * because we know this Isoch CEC can not
+ * go away (we are trying to realloc its
+ * resources so it can't be in a state that
+ * will allow a free).
+ */
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ if (chnl_num < 32) {
+ ret = s1394_channel_alloc(hal, chnl_mask,
+ generation, S1394_CHANNEL_ALLOC_HI,
+ &old_chnl_mask, &err);
+ } else {
+ ret = s1394_channel_alloc(hal, chnl_mask,
+ generation, S1394_CHANNEL_ALLOC_LO,
+ &old_chnl_mask, &err);
+ }
+
+ /* Lock the Isoch CEC list */
+ mutex_enter(&hal->isoch_cec_list_mutex);
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ if (ret == DDI_FAILURE) {
+ if (err != CMD1394_EBUSRESET) {
+ /*
+ * If we successfully reallocate
+ * bandwidth, and then fail getting
+ * the channel, we need to free up
+ * the bandwidth
+ */
+
+ /* Try to free up the bandwidth */
+ ret = s1394_bandwidth_free(hal,
+ bw_alloc_units, generation, &err);
+ if ((ret == DDI_FAILURE) &&
+ (err != CMD1394_EBUSRESET)) {
+ TNF_PROBE_1(
+ s1394_isoch_rsrc_realloc_error,
+ S1394_TNF_SL_ISOCH_ERROR,
+ "", tnf_string, msg,
+ "Unable to free bandwidth");
+ }
+ /* Try the next Isoch CEC */
+ goto next_isoch_cec;
+ }
+ cec_curr->realloc_failed = B_TRUE;
+ cec_curr->realloc_fail_reason =
+ T1394_RSRC_CHANNEL;
+ }
+ }
+next_isoch_cec:
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ cec_curr = cec_curr->cec_next;
+ }
+
+ /* Unlock the Isoch CEC list */
+ mutex_exit(&hal->isoch_cec_list_mutex);
+ TNF_PROBE_0_DEBUG(s1394_isoch_rsrc_realloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * s1394_isoch_rsrc_realloc_notify()
+ * is called during bus reset processing to notify all targets for
+ * which isochronous resources were not able to be reallocated.
+ */
+void
+s1394_isoch_rsrc_realloc_notify(s1394_hal_t *hal)
+{
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ t1394_isoch_rsrc_error_t fail_arg;
+ opaque_t evts_arg;
+ s1394_isoch_cec_type_t type;
+ void (*rsrc_fail_callback)(t1394_isoch_cec_handle_t, opaque_t,
+ t1394_isoch_rsrc_error_t);
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_rsrc_realloc_notify_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /* Lock the Isoch CEC list */
+ mutex_enter(&hal->isoch_cec_list_mutex);
+
+ /* Notify all targets that failed realloc */
+ cec_curr = hal->isoch_cec_list_head;
+ while (cec_curr != NULL) {
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Do we notify of realloc failure? */
+ if (!(cec_curr->cec_options & T1394_NO_IRM_ALLOC) &&
+ (cec_curr->realloc_valid == B_TRUE) &&
+ (cec_curr->realloc_failed == B_TRUE)) {
+
+ /* Reason for realloc failure */
+ fail_arg = cec_curr->realloc_fail_reason;
+
+ /* Now we are going into the callbacks */
+ cec_curr->in_fail_callbacks = B_TRUE;
+
+ type = cec_curr->cec_type;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ /*
+ * We can unlock the Isoch CEC list here
+ * because we have the in_fail_callbacks
+ * field set to B_TRUE. And free will fail
+ * if we are in fail callbacks.
+ */
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ /* Call all of the rsrc_fail_target() callbacks */
+ /* Start at the head (talker first) and */
+ /* go toward the tail (listeners last) */
+ member_curr = cec_curr->cec_member_list_head;
+ while (member_curr != NULL) {
+ rsrc_fail_callback = member_curr->
+ isoch_cec_evts.rsrc_fail_target;
+ evts_arg = member_curr->isoch_cec_evts_arg;
+ if (rsrc_fail_callback != NULL) {
+
+ if (type == S1394_PEER_TO_PEER) {
+ rsrc_fail_callback(
+ (t1394_isoch_cec_handle_t)
+ cec_curr, evts_arg,
+ fail_arg);
+ } else {
+ rsrc_fail_callback(
+ (t1394_isoch_cec_handle_t)
+ cec_curr, evts_arg,
+ fail_arg);
+ }
+ }
+ member_curr = member_curr->cec_mem_next;
+ }
+
+ /* Lock the Isoch CEC list */
+ mutex_enter(&hal->isoch_cec_list_mutex);
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* We are finished with the callbacks */
+ cec_curr->in_fail_callbacks = B_FALSE;
+ if (cec_curr->cec_want_wakeup == B_TRUE) {
+ cec_curr->cec_want_wakeup = B_FALSE;
+ cv_broadcast(&cec_curr->in_callbacks_cv);
+ }
+
+ /* Set flags back to original state */
+ cec_curr->realloc_valid = B_FALSE;
+ cec_curr->realloc_failed = B_FALSE;
+ }
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ cec_curr = cec_curr->cec_next;
+ }
+
+ /* Unlock the Isoch CEC list */
+ mutex_exit(&hal->isoch_cec_list_mutex);
+ TNF_PROBE_0_DEBUG(s1394_isoch_rsrc_realloc_notify_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * s1394_channel_alloc()
+ * is used to allocate an isochronous channel. A channel mask and
+ * generation are passed. A request is sent to whichever node is the
+ * IRM for the appropriate channels. If it fails because of a bus
+ * reset it can be retried. If it fails for another reason the
+ * channel(s) may not be availble or there may be no IRM.
+ */
+int
+s1394_channel_alloc(s1394_hal_t *hal, uint32_t channel_mask, uint_t generation,
+ uint_t flags, uint32_t *old_channels, int *result)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t IRM_ID_addr;
+ uint32_t compare;
+ uint32_t swap;
+ uint32_t old_value;
+ uint_t hal_node_num;
+ uint_t IRM_node;
+ uint_t offset;
+ int ret;
+ int i;
+ int num_retries = S1394_ISOCH_ALLOC_RETRIES;
+
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node = hal->IRM_node;
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Make sure there is a valid IRM on the bus */
+ if (IRM_node == -1) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "No IRM on the 1394 bus");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (flags & S1394_CHANNEL_ALLOC_HI) {
+ offset =
+ (IEEE1394_SCSR_CHANS_AVAIL_HI & IEEE1394_CSR_OFFSET_MASK);
+ } else {
+ offset =
+ (IEEE1394_SCSR_CHANS_AVAIL_LO & IEEE1394_CSR_OFFSET_MASK);
+ }
+
+ /* Send compare-swap to CHANNELS_AVAILABLE */
+ /* register on the Isoch Rsrc Mgr */
+ if (IRM_node == hal_node_num) {
+ /* Local */
+ i = num_retries;
+ do {
+ (void) HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &old_value);
+
+ /* Check that the generation has not changed */
+ if (generation != hal->generation_count) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ compare = old_value;
+ swap = old_value & (~channel_mask);
+
+ ret = HAL_CALL(hal).csr_cswap32(
+ hal->halinfo.hal_private, generation,
+ offset, compare, swap, &old_value);
+ if (ret != DDI_SUCCESS) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error in cswap32");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ "stacktrace 1394 s1394", "");
+ return (DDI_FAILURE);
+ }
+
+ if ((~old_value & channel_mask) != 0) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Channels already taken");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (old_value == compare) {
+ *result = CMD1394_CMDSUCCESS;
+ *old_channels = old_value;
+
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ } while (i--);
+
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Retries exceeded");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+
+ } else {
+ /* Remote */
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ *result = CMD1394_EUNKNOWN_ERROR;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate command");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR | CMD1394_BLOCKING);
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+
+ if (flags & S1394_CHANNEL_ALLOC_HI) {
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_CHANS_AVAIL_HI) |
+ (((uint64_t)IRM_node) <<
+ IEEE1394_ADDR_PHY_ID_SHIFT);
+ } else {
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_CHANS_AVAIL_LO) |
+ (((uint64_t)IRM_node) <<
+ IEEE1394_ADDR_PHY_ID_SHIFT);
+ }
+
+ cmd->cmd_addr = IRM_ID_addr;
+ cmd->bus_generation = generation;
+ cmd->cmd_u.l32.data_value = T1394_DATA32(~channel_mask);
+ cmd->cmd_u.l32.num_retries = num_retries;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_BIT_AND;
+
+ ret = s1394_split_lock_req(hal, NULL, cmd);
+
+ if (ret == DDI_SUCCESS) {
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ *old_channels = T1394_DATA32(
+ cmd->cmd_u.l32.old_value);
+
+ if ((~(*old_channels) & channel_mask) != 0) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "",
+ tnf_string, msg,
+ "Channels already taken");
+ TNF_PROBE_0_DEBUG(
+ s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ ret = DDI_FAILURE;
+ } else {
+ *result = cmd->cmd_result;
+ }
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (ret);
+
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error allocating isoch channel");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ *result = cmd->cmd_result;
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_channel_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Error allocating isoch channel");
+ TNF_PROBE_0_DEBUG(s1394_channel_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+}
+
+/*
+ * s1394_channel_free()
+ * is used to free up an isochronous channel. A channel mask and
+ * generation are passed. A request is sent to whichever node is the
+ * IRM for the appropriate channels. If it fails because of a bus
+ * reset it can be retried. If it fails for another reason the
+ * channel(s) may already be free or there may be no IRM.
+ */
+int
+s1394_channel_free(s1394_hal_t *hal, uint32_t channel_mask, uint_t generation,
+ uint_t flags, uint32_t *old_channels, int *result)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t IRM_ID_addr;
+ uint32_t compare;
+ uint32_t swap;
+ uint32_t old_value;
+ uint_t hal_node_num;
+ uint_t IRM_node;
+ uint_t offset;
+ int ret;
+ int i;
+ int num_retries = S1394_ISOCH_ALLOC_RETRIES;
+
+ TNF_PROBE_0_DEBUG(s1394_channel_free_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node = hal->IRM_node;
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Make sure there is a valid IRM on the bus */
+ if (IRM_node == -1) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "No IRM on the 1394 bus");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (flags & S1394_CHANNEL_ALLOC_HI) {
+ offset =
+ (IEEE1394_SCSR_CHANS_AVAIL_HI & IEEE1394_CSR_OFFSET_MASK);
+ } else {
+ offset =
+ (IEEE1394_SCSR_CHANS_AVAIL_LO & IEEE1394_CSR_OFFSET_MASK);
+ }
+
+ /* Send compare-swap to CHANNELS_AVAILABLE */
+ /* register on the Isoch Rsrc Mgr */
+ if (hal->IRM_node == hal_node_num) {
+ /* Local */
+ i = num_retries;
+ do {
+ (void) HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ offset, &old_value);
+
+ /* Check that the generation has not changed */
+ if (generation != hal->generation_count) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ compare = old_value;
+ swap = old_value | channel_mask;
+
+ ret = HAL_CALL(hal).csr_cswap32(
+ hal->halinfo.hal_private, hal->generation_count,
+ offset, compare, swap, &old_value);
+ if (ret != DDI_SUCCESS) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error in cswap32");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ "stacktrace 1394 s1394", "");
+ return (DDI_FAILURE);
+ }
+
+ if (old_value == compare) {
+ *result = CMD1394_CMDSUCCESS;
+ *old_channels = old_value;
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ } while (i--);
+
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Retries exceeded");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+
+ } else {
+ /* Remote */
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ *result = CMD1394_EUNKNOWN_ERROR;
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate command");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR | CMD1394_BLOCKING);
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+
+ if (flags & S1394_CHANNEL_ALLOC_HI) {
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_CHANS_AVAIL_HI) |
+ (((uint64_t)IRM_node) <<
+ IEEE1394_ADDR_PHY_ID_SHIFT);
+ } else {
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_CHANS_AVAIL_LO) |
+ (((uint64_t)IRM_node) <<
+ IEEE1394_ADDR_PHY_ID_SHIFT);
+ }
+
+ cmd->cmd_addr = IRM_ID_addr;
+ cmd->bus_generation = generation;
+ cmd->cmd_u.l32.data_value = T1394_DATA32(channel_mask);
+ cmd->cmd_u.l32.num_retries = num_retries;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_BIT_OR;
+
+ ret = s1394_split_lock_req(hal, NULL, cmd);
+
+ if (ret == DDI_SUCCESS) {
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+
+ *old_channels = T1394_DATA32(
+ cmd->cmd_u.l32.old_value);
+ *result = cmd->cmd_result;
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ *result = cmd->cmd_result;
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error freeing isoch channel");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_channel_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Error freeing isoch channel");
+ TNF_PROBE_0_DEBUG(s1394_channel_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+}
+
+/*
+ * s1394_bandwidth_alloc()
+ * is used to allocate isochronous bandwidth. A number of bandwidth
+ * allocation units and a generation are passed. The request is sent
+ * to whichever node is the IRM for this amount of bandwidth. If it
+ * fails because of a bus reset it can be retried. If it fails for
+ * another reason the bandwidth may not be available or there may be
+ * no IRM.
+ */
+int
+s1394_bandwidth_alloc(s1394_hal_t *hal, uint32_t bw_alloc_units,
+ uint_t generation, int *result)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t IRM_ID_addr;
+ uint32_t compare;
+ uint32_t swap;
+ uint32_t old_value;
+ uint_t hal_node_num;
+ uint_t IRM_node;
+ int temp_value;
+ int ret;
+ int i;
+ int num_retries = S1394_ISOCH_ALLOC_RETRIES;
+
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node = hal->IRM_node;
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Make sure there is a valid IRM on the bus */
+ if (IRM_node == -1) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "No IRM on the 1394 bus");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Send compare-swap to BANDWIDTH_AVAILABLE */
+ /* register on the Isoch Rsrc Mgr */
+ if (IRM_node == hal_node_num) {
+ /* Local */
+ i = num_retries;
+ do {
+ (void) HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ (IEEE1394_SCSR_BANDWIDTH_AVAIL &
+ IEEE1394_CSR_OFFSET_MASK), &old_value);
+ /*
+ * Check that the generation has not changed -
+ * don't need the lock (read-only)
+ */
+ if (generation != hal->generation_count) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ temp_value = (old_value - bw_alloc_units);
+ if ((old_value >= bw_alloc_units) &&
+ (temp_value >= IEEE1394_BANDWIDTH_MIN)) {
+ compare = old_value;
+ swap = (uint32_t)temp_value;
+ } else {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Retries exceeded");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ ret = HAL_CALL(hal).csr_cswap32(
+ hal->halinfo.hal_private, generation,
+ (IEEE1394_SCSR_BANDWIDTH_AVAIL &
+ IEEE1394_CSR_OFFSET_MASK), compare, swap,
+ &old_value);
+ if (ret != DDI_SUCCESS) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error in cswap32");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (old_value == compare) {
+ *result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ } while (i--);
+
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Too many retries");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+
+ } else {
+ /* Remote */
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ *result = CMD1394_EUNKNOWN_ERROR;
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate command");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR | CMD1394_BLOCKING);
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_BANDWIDTH_AVAIL) | (((uint64_t)IRM_node) <<
+ IEEE1394_ADDR_PHY_ID_SHIFT);
+ cmd->cmd_addr = IRM_ID_addr;
+ cmd->bus_generation = generation;
+ cmd->cmd_u.l32.arg_value = 0;
+ cmd->cmd_u.l32.data_value = bw_alloc_units;
+ cmd->cmd_u.l32.num_retries = num_retries;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_THRESH_SUBTRACT;
+
+ ret = s1394_split_lock_req(hal, NULL, cmd);
+
+ if (ret == DDI_SUCCESS) {
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error allocating isoch bandwidth");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_bandwidth_alloc_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Error allocating isoch bandwidth");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_alloc_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+}
+
+/*
+ * s1394_compute_bw_alloc_units()
+ * is used to compute the number of "bandwidth allocation units" that
+ * are necessary for a given bit rate. It calculates the overhead
+ * necessary for isoch packet headers, bus arbitration, etc. (See
+ * IEEE 1394-1995 Section 8.3.2.3.7 for an explanation of what a
+ * "bandwidth allocation unit" is.
+ */
+uint_t
+s1394_compute_bw_alloc_units(s1394_hal_t *hal, uint_t bandwidth, uint_t speed)
+{
+ uint_t total_quads;
+ uint_t speed_factor;
+ uint_t bau;
+ int max_hops;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ /* Calculate the 1394 bus diameter */
+ max_hops = s1394_topology_tree_calculate_diameter(hal);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Calculate the total bandwidth (including overhead) */
+ total_quads = (bandwidth >> 2) + IEEE1394_ISOCH_HDR_QUAD_SZ;
+ switch (speed) {
+ case IEEE1394_S400:
+ speed_factor = ISOCH_SPEED_FACTOR_S400;
+ break;
+ case IEEE1394_S200:
+ speed_factor = ISOCH_SPEED_FACTOR_S200;
+ break;
+ case IEEE1394_S100:
+ speed_factor = ISOCH_SPEED_FACTOR_S100;
+ break;
+ }
+ /* See IEC 61883-1 pp. 26-29 for this formula */
+ bau = (32 * max_hops) + (total_quads * speed_factor);
+
+ return (bau);
+}
+
+/*
+ * s1394_bandwidth_free()
+ * is used to free up isochronous bandwidth. A number of bandwidth
+ * allocation units and a generation are passed. The request is sent
+ * to whichever node is the IRM for this amount of bandwidth. If it
+ * fails because of a bus reset it can be retried. If it fails for
+ * another reason the bandwidth may already be freed or there may
+ * be no IRM.
+ */
+int
+s1394_bandwidth_free(s1394_hal_t *hal, uint32_t bw_alloc_units,
+ uint_t generation, int *result)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t IRM_ID_addr;
+ uint32_t compare;
+ uint32_t swap;
+ uint32_t old_value;
+ uint32_t temp_value;
+ uint_t hal_node_num;
+ uint_t IRM_node;
+ int ret;
+ int i;
+ int num_retries = S1394_ISOCH_ALLOC_RETRIES;
+
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ IRM_node = hal->IRM_node;
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* Make sure there is a valid IRM on the bus */
+ if (IRM_node == -1) {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "No IRM on the 1394 bus");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Send compare-swap to BANDWIDTH_AVAILABLE */
+ /* register on the Isoch Rsrc Mgr */
+ if (IRM_node == hal_node_num) {
+ i = num_retries;
+ do {
+ (void) HAL_CALL(hal).csr_read(hal->halinfo.hal_private,
+ (IEEE1394_SCSR_BANDWIDTH_AVAIL &
+ IEEE1394_CSR_OFFSET_MASK), &old_value);
+
+ /* Check that the generation has not changed */
+ if (generation != hal->generation_count) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ temp_value = (old_value + bw_alloc_units);
+ if ((temp_value >= old_value) &&
+ (temp_value <= IEEE1394_BANDWIDTH_MAX)) {
+ compare = old_value;
+ swap = temp_value;
+ } else {
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Too many retries");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ ret = HAL_CALL(hal).csr_cswap32(
+ hal->halinfo.hal_private, generation,
+ (IEEE1394_SCSR_BANDWIDTH_AVAIL &
+ IEEE1394_CSR_OFFSET_MASK), compare, swap,
+ &old_value);
+ if (ret != DDI_SUCCESS) {
+ *result = CMD1394_EBUSRESET;
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error in cswap32");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ if (old_value == compare) {
+ *result = CMD1394_CMDSUCCESS;
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+ }
+ } while (i--);
+
+ *result = CMD1394_ERETRIES_EXCEEDED;
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Retries exceeded");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+
+ } else {
+ /* Remote */
+ if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
+ *result = CMD1394_EUNKNOWN_ERROR;
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate command");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
+ CMD1394_OVERRIDE_ADDR | CMD1394_BLOCKING);
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+ IRM_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
+ IEEE1394_SCSR_BANDWIDTH_AVAIL) |
+ (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
+ cmd->cmd_addr = IRM_ID_addr;
+ cmd->bus_generation = generation;
+ cmd->cmd_u.l32.arg_value = IEEE1394_BANDWIDTH_MAX;
+ cmd->cmd_u.l32.data_value = bw_alloc_units;
+ cmd->cmd_u.l32.num_retries = num_retries;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_THRESH_ADD;
+
+ ret = s1394_split_lock_req(hal, NULL, cmd);
+
+ if (ret == DDI_SUCCESS) {
+ if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
+ *result = cmd->cmd_result;
+
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Error freeing isoch bandwidth");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ *result = cmd->cmd_result;
+ /* Need to free the command */
+ (void) s1394_free_cmd(hal, &cmd);
+
+ TNF_PROBE_1(s1394_bandwidth_free_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Error freeing isoch bandwidth");
+ TNF_PROBE_0_DEBUG(s1394_bandwidth_free_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+}
+
+/*
+ * s1394_isoch_cec_list_insert()
+ * is used to insert an Isoch CEC into a given HAL's list of Isoch CECs.
+ */
+void
+s1394_isoch_cec_list_insert(s1394_hal_t *hal, s1394_isoch_cec_t *cec)
+{
+ s1394_isoch_cec_t *cec_temp;
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_list_insert_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->isoch_cec_list_mutex));
+
+ /* Is the Isoch CEC list empty? */
+ if ((hal->isoch_cec_list_head == NULL) &&
+ (hal->isoch_cec_list_tail == NULL)) {
+
+ hal->isoch_cec_list_head = cec;
+ hal->isoch_cec_list_tail = cec;
+
+ cec->cec_next = NULL;
+ cec->cec_prev = NULL;
+
+ } else {
+ cec->cec_next = hal->isoch_cec_list_head;
+ cec->cec_prev = NULL;
+ cec_temp = hal->isoch_cec_list_head;
+ cec_temp->cec_prev = cec;
+
+ hal->isoch_cec_list_head = cec;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_list_insert_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * s1394_isoch_cec_list_remove()
+ * is used to remove an Isoch CEC from a given HAL's list of Isoch CECs.
+ */
+void
+s1394_isoch_cec_list_remove(s1394_hal_t *hal, s1394_isoch_cec_t *cec)
+{
+ s1394_isoch_cec_t *prev_cec;
+ s1394_isoch_cec_t *next_cec;
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_list_remove_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(MUTEX_HELD(&hal->isoch_cec_list_mutex));
+
+ prev_cec = cec->cec_prev;
+ next_cec = cec->cec_next;
+ cec->cec_prev = NULL;
+ cec->cec_next = NULL;
+
+ if (prev_cec != NULL) {
+ prev_cec->cec_next = next_cec;
+
+ } else {
+ if (hal->isoch_cec_list_head == cec)
+ hal->isoch_cec_list_head = next_cec;
+ }
+
+ if (next_cec != NULL) {
+ next_cec->cec_prev = prev_cec;
+
+ } else {
+ if (hal->isoch_cec_list_tail == cec)
+ hal->isoch_cec_list_tail = prev_cec;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_list_remove_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * s1394_isoch_cec_member_list_insert()
+ * is used to insert a new member (target) into the list of members for
+ * a given Isoch CEC.
+ */
+/* ARGSUSED */
+void
+s1394_isoch_cec_member_list_insert(s1394_hal_t *hal, s1394_isoch_cec_t *cec,
+ s1394_isoch_cec_member_t *member)
+{
+ s1394_isoch_cec_member_t *member_temp;
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_member_list_insert_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(MUTEX_HELD(&cec->isoch_cec_mutex));
+
+ /* Is the Isoch CEC member list empty? */
+ if ((cec->cec_member_list_head == NULL) &&
+ (cec->cec_member_list_tail == NULL)) {
+
+ cec->cec_member_list_head = member;
+ cec->cec_member_list_tail = member;
+ member->cec_mem_next = NULL;
+ member->cec_mem_prev = NULL;
+
+ } else if (member->cec_mem_options & T1394_TALKER) {
+ /* Put talker at the head of the list */
+ member->cec_mem_next = cec->cec_member_list_head;
+ member->cec_mem_prev = NULL;
+ member_temp = cec->cec_member_list_head;
+ member_temp->cec_mem_prev = member;
+ cec->cec_member_list_head = member;
+
+ } else {
+ /* Put listeners at the tail of the list */
+ member->cec_mem_prev = cec->cec_member_list_tail;
+ member->cec_mem_next = NULL;
+ member_temp = cec->cec_member_list_tail;
+ member_temp->cec_mem_next = member;
+ cec->cec_member_list_tail = member;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_member_list_insert_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * s1394_isoch_cec_member_list_remove()
+ * is used to remove a member (target) from the list of members for
+ * a given Isoch CEC.
+ */
+/* ARGSUSED */
+void
+s1394_isoch_cec_member_list_remove(s1394_hal_t *hal, s1394_isoch_cec_t *cec,
+ s1394_isoch_cec_member_t *member)
+{
+ s1394_isoch_cec_member_t *prev_member;
+ s1394_isoch_cec_member_t *next_member;
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_member_list_remove_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(MUTEX_HELD(&cec->isoch_cec_mutex));
+
+ prev_member = member->cec_mem_prev;
+ next_member = member->cec_mem_next;
+
+ member->cec_mem_prev = NULL;
+ member->cec_mem_next = NULL;
+
+ if (prev_member != NULL) {
+ prev_member->cec_mem_next = next_member;
+
+ } else {
+ if (cec->cec_member_list_head == member)
+ cec->cec_member_list_head = next_member;
+ }
+
+ if (next_member != NULL) {
+ next_member->cec_mem_prev = prev_member;
+
+ } else {
+ if (cec->cec_member_list_tail == member)
+ cec->cec_member_list_tail = prev_member;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_isoch_cec_member_list_remove_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/s1394_misc.c b/usr/src/uts/common/io/1394/s1394_misc.c
new file mode 100644
index 0000000000..685d775f3a
--- /dev/null
+++ b/usr/src/uts/common/io/1394/s1394_misc.c
@@ -0,0 +1,983 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * s1394_misc.c
+ * 1394 Services Layer Miscellaneous Routines
+ * This file contains miscellaneous routines used as "helper" functions
+ * by various other files in the Services Layer.
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/kstat.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+int s1394_print_guids = 0; /* patch to print GUIDs */
+
+extern void nx1394_undefine_events(s1394_hal_t *hal);
+static void s1394_cleanup_node_cfgrom(s1394_hal_t *hal);
+
+/*
+ * s1394_cleanup_for_detach()
+ * is used to do all of the necessary cleanup to handle a detach or a
+ * failure in h1394_attach(). The cleanup_level specifies how far we
+ * got in h1394_attach() before failure.
+ */
+void
+s1394_cleanup_for_detach(s1394_hal_t *hal, uint_t cleanup_level)
+{
+
+ TNF_PROBE_0_DEBUG(s1394_cleanup_for_detach_enter, S1394_TNF_SL_STACK,
+ "");
+
+ switch (cleanup_level) {
+ case H1394_CLEANUP_LEVEL7:
+ /* remove HAL from the global HAL list */
+ mutex_enter(&s1394_statep->hal_list_mutex);
+ if ((s1394_statep->hal_head == hal) &&
+ (s1394_statep->hal_tail == hal)) {
+ s1394_statep->hal_head = NULL;
+ s1394_statep->hal_tail = NULL;
+ } else {
+ if (hal->hal_prev)
+ hal->hal_prev->hal_next = hal->hal_next;
+ if (hal->hal_next)
+ hal->hal_next->hal_prev = hal->hal_prev;
+ if (s1394_statep->hal_head == hal)
+ s1394_statep->hal_head = hal->hal_next;
+ if (s1394_statep->hal_tail == hal)
+ s1394_statep->hal_tail = hal->hal_prev;
+ }
+ mutex_exit(&s1394_statep->hal_list_mutex);
+ /*
+ * No FCP cleanup needed at this time -- the following call
+ * to s1394_destroy_addr_space() takes care of everything.
+ */
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL6:
+ s1394_destroy_addr_space(hal);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL5:
+ s1394_destroy_local_config_rom(hal);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL4:
+ /* Undo all the kstat stuff */
+ (void) s1394_kstat_delete(hal);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL3:
+ /* Free up the memory for selfID buffer #1 */
+ kmem_free(hal->selfid_buf1, S1394_SELFID_BUF_SIZE);
+ /* Free up the memory for selfID buffer #0 */
+ kmem_free(hal->selfid_buf0, S1394_SELFID_BUF_SIZE);
+ /* Turn off any timers that might be set */
+ s1394_destroy_timers(hal);
+ /* Destroy the bus_reset thread */
+ s1394_destroy_br_thread(hal);
+ /* Cleanup the Config ROM buffers in the topology_tree */
+ s1394_cleanup_node_cfgrom(hal);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL2:
+ /* Destroy the br_cmplq_cv and br_cmplq_mutex */
+ cv_destroy(&hal->br_cmplq_cv);
+ mutex_destroy(&hal->br_cmplq_mutex);
+ /* Destroy the br_thread_cv and br_thread_mutex */
+ cv_destroy(&hal->br_thread_cv);
+ mutex_destroy(&hal->br_thread_mutex);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL1:
+ (void) ddi_prop_remove_all(hal->halinfo.dip);
+ nx1394_undefine_events(hal);
+ /* FALLTHROUGH */
+
+ case H1394_CLEANUP_LEVEL0:
+ kmem_cache_destroy(hal->hal_kmem_cachep);
+ /* Destroy pending_q_mutex and outstanding_q_mutex */
+ mutex_destroy(&hal->pending_q_mutex);
+ mutex_destroy(&hal->outstanding_q_mutex);
+ /* Destroy target_list_rwlock */
+ rw_destroy(&hal->target_list_rwlock);
+ /* Destroy bus_mgr_node_mutex and bus_mgr_node_cv */
+ cv_destroy(&hal->bus_mgr_node_cv);
+ mutex_destroy(&hal->bus_mgr_node_mutex);
+ /* Destroy isoch_cec_list_mutex */
+ mutex_destroy(&hal->isoch_cec_list_mutex);
+ /* Destroy the Cycle Master timer mutex */
+ mutex_destroy(&hal->cm_timer_mutex);
+ /* Destroy topology_tree_mutex */
+ mutex_destroy(&hal->topology_tree_mutex);
+ /* Free the hal structure */
+ kmem_free(hal, sizeof (s1394_hal_t));
+ break;
+
+ default:
+ /* Error */
+ TNF_PROBE_1(s1394_cleanup_for_detach_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Invalid cleanup_level");
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cleanup_for_detach_exit, S1394_TNF_SL_STACK,
+ "");
+}
+
+/*
+ * s1394_hal_shutdown()
+ * is used to shutdown the HAL. If the HAL indicates that an error
+ * condition (hardware or software) has occurred, it is shutdown. This
+ * routine is also called when HAL informs the services layer of a shutdown
+ * (due an internal shutdown, for eg). disable_hal indicates whether the
+ * caller intends to inform the hal of the (services layer) shutdown or not.
+ */
+void
+s1394_hal_shutdown(s1394_hal_t *hal, boolean_t disable_hal)
+{
+ ddi_eventcookie_t cookie;
+ t1394_localinfo_t localinfo;
+
+ TNF_PROBE_0_DEBUG(s1394_hal_shutdown_enter, S1394_TNF_SL_STACK, "");
+
+ mutex_enter(&hal->topology_tree_mutex);
+
+ if (hal->hal_state == S1394_HAL_SHUTDOWN) {
+ mutex_exit(&hal->topology_tree_mutex);
+ if (disable_hal == B_TRUE)
+ HAL_CALL(hal).shutdown(hal->halinfo.hal_private);
+
+ TNF_PROBE_0_DEBUG(s1394_hal_shutdown_exit_already,
+ S1394_TNF_SL_STACK, "");
+ return;
+ }
+
+ hal->hal_state = S1394_HAL_SHUTDOWN;
+ mutex_exit(&hal->topology_tree_mutex);
+ /* Disable the HAL */
+ if (disable_hal == B_TRUE)
+ HAL_CALL(hal).shutdown(hal->halinfo.hal_private);
+
+ /*
+ * Send a remove event to all interested parties
+ */
+ mutex_enter(&hal->topology_tree_mutex);
+ localinfo.bus_generation = hal->generation_count;
+ localinfo.local_nodeID = hal->node_id;
+ mutex_exit(&hal->topology_tree_mutex);
+
+ if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
+ DDI_DEVI_REMOVE_EVENT, &cookie, NDI_EVENT_NOPASS) ==
+ NDI_SUCCESS)
+ (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
+ cookie, &localinfo);
+
+ TNF_PROBE_0_DEBUG(s1394_hal_shutdown_exit, S1394_TNF_SL_STACK, "");
+}
+
+/*
+ * s1394_initiate_hal_reset()
+ * sets up the HAL structure to indicate a self-initiated bus reset and
+ * calls the appropriate HAL entry point. If too many bus resets have
+ * happened, a message is printed out and the call is ignored.
+ */
+void
+s1394_initiate_hal_reset(s1394_hal_t *hal, int reason)
+{
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_initiate_hal_reset_enter, S1394_TNF_SL_BR_STACK,
+ "");
+
+ if (hal->num_bus_reset_till_fail > 0) {
+ hal->initiated_bus_reset = B_TRUE;
+ hal->initiated_br_reason = reason;
+
+ /* Reset the bus */
+ ret = HAL_CALL(hal).bus_reset(hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_initiate_hal_reset_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating bus reset");
+ }
+ } else {
+ cmn_err(CE_NOTE, "Unable to reenumerate the 1394 bus - If new"
+ " devices have recently been added, remove them.");
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_initiate_hal_reset_exit, S1394_TNF_SL_BR_STACK,
+ "");
+}
+
+/*
+ * s1394_on_br_thread()
+ * is used to determine if the current thread of execution is the same
+ * as the bus reset thread. This is useful during bus reset callbacks
+ * to determine whether or not a target may block.
+ */
+boolean_t
+s1394_on_br_thread(s1394_hal_t *hal)
+{
+ if (hal->br_thread == curthread)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+/*
+ * s1394_destroy_br_thread()
+ * is used in h1394_detach() to signal the bus reset thread to go away.
+ */
+void
+s1394_destroy_br_thread(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_destroy_br_thread_enter, S1394_TNF_SL_STACK,
+ "");
+
+ /* Send the signal to the reset thread to go away */
+ mutex_enter(&hal->br_thread_mutex);
+ hal->br_thread_ev_type |= BR_THR_GO_AWAY;
+ cv_signal(&hal->br_thread_cv);
+ mutex_exit(&hal->br_thread_mutex);
+
+ /* Wakeup the bus_reset thread if waiting for bus_mgr timer */
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+
+ mutex_enter(&hal->br_cmplq_mutex);
+ cv_signal(&hal->br_cmplq_cv);
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ /* Wait for the br_thread to be done */
+ while (hal->br_thread_ev_type & BR_THR_GO_AWAY)
+ delay(drv_usectohz(10));
+
+ TNF_PROBE_0_DEBUG(s1394_destroy_br_thread_exit, S1394_TNF_SL_STACK,
+ "");
+}
+
+/*
+ * s1394_tickle_bus_reset_thread()
+ * is used to wakeup the bus reset thread after the interrupt routine
+ * has completed its bus reset processing.
+ */
+void
+s1394_tickle_bus_reset_thread(s1394_hal_t *hal)
+{
+ if (hal->topology_tree_processed != B_TRUE) {
+ /* Send the signal to the reset thread */
+ mutex_enter(&hal->br_thread_mutex);
+ hal->br_thread_ev_type |= BR_THR_CFGROM_SCAN;
+ cv_signal(&hal->br_thread_cv);
+ mutex_exit(&hal->br_thread_mutex);
+
+ /* Signal the msgq wait, too (just in case) */
+ mutex_enter(&hal->br_cmplq_mutex);
+ cv_signal(&hal->br_cmplq_cv);
+ mutex_exit(&hal->br_cmplq_mutex);
+
+ /* Signal the bus_mgr wait, too (just in case) */
+ mutex_enter(&hal->bus_mgr_node_mutex);
+ cv_signal(&hal->bus_mgr_node_cv);
+ mutex_exit(&hal->bus_mgr_node_mutex);
+ }
+}
+
+/*
+ * s1394_block_on_asynch_cmd()
+ * is used by many of the asynch routines to block (if necessary)
+ * while waiting for command completion.
+ */
+void
+s1394_block_on_asynch_cmd(cmd1394_cmd_t *cmd)
+{
+ s1394_cmd_priv_t *s_priv;
+
+ TNF_PROBE_0_DEBUG(s1394_block_on_asynch_cmd_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is this a blocking command? */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ /* Block until command completes */
+ mutex_enter(&s_priv->blocking_mutex);
+ while (s_priv->blocking_flag != B_TRUE)
+ cv_wait(&s_priv->blocking_cv, &s_priv->blocking_mutex);
+ s_priv->blocking_flag = B_FALSE;
+ mutex_exit(&s_priv->blocking_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_block_on_asynch_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+}
+
+/*
+ * s1394_HAL_asynch_error()
+ * is used by many of the asynch routines to determine what error
+ * code is expected in a given situation (based on HAL state).
+ */
+/* ARGSUSED */
+int
+s1394_HAL_asynch_error(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
+ s1394_hal_state_t state)
+{
+
+ ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
+
+ switch (state) {
+ case S1394_HAL_RESET:
+ /* "dreq" bit is set (CSR) */
+ if (hal->disable_requests_bit == 1)
+ return (CMD1394_ENO_ATREQ);
+ else
+ return (CMD1394_CMDSUCCESS);
+
+ case S1394_HAL_DREQ:
+ /* "dreq" bit is set (CSR) */
+ return (CMD1394_ENO_ATREQ);
+
+ case S1394_HAL_SHUTDOWN:
+ return (CMD1394_EFATAL_ERROR);
+
+ default:
+ return (CMD1394_CMDSUCCESS);
+ }
+}
+
+/*
+ * s1394_mblk_too_small()
+ * is used to determine if the mlbk_t structure(s) given in an asynch
+ * block request are sufficient to hold the amount of data requested.
+ */
+boolean_t
+s1394_mblk_too_small(cmd1394_cmd_t *cmd)
+{
+ mblk_t *curr_blk;
+ boolean_t flag;
+ size_t msgb_len;
+ size_t size;
+
+ TNF_PROBE_0_DEBUG(s1394_mblk_too_small_enter, S1394_TNF_SL_ATREQ_STACK,
+ "");
+
+ curr_blk = cmd->cmd_u.b.data_block;
+ msgb_len = 0;
+ flag = B_TRUE;
+ size = cmd->cmd_u.b.blk_length;
+
+ while (curr_blk != NULL) {
+ if (cmd->cmd_type == CMD1394_ASYNCH_WR_BLOCK) {
+ msgb_len += (curr_blk->b_wptr - curr_blk->b_rptr);
+ } else {
+ msgb_len +=
+ (curr_blk->b_datap->db_lim - curr_blk->b_wptr);
+ }
+
+ if (msgb_len >= size) {
+ flag = B_FALSE;
+ break;
+ }
+
+ curr_blk = curr_blk->b_cont;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_mblk_too_small_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (flag);
+}
+
+/*
+ * s1394_address_rollover()
+ * is used to determine if the address given will rollover the 48-bit
+ * address space.
+ */
+boolean_t
+s1394_address_rollover(cmd1394_cmd_t *cmd)
+{
+ uint64_t addr_before;
+ uint64_t addr_after;
+ size_t length;
+
+ TNF_PROBE_0_DEBUG(s1394_address_rollover_enter,
+ S1394_TNF_SL_ATREQ_STACK, "");
+
+ switch (cmd->cmd_type) {
+ case CMD1394_ASYNCH_RD_QUAD:
+ case CMD1394_ASYNCH_WR_QUAD:
+ case CMD1394_ASYNCH_LOCK_32:
+ length = IEEE1394_QUADLET;
+ break;
+
+ case CMD1394_ASYNCH_LOCK_64:
+ length = IEEE1394_OCTLET;
+ break;
+
+ case CMD1394_ASYNCH_RD_BLOCK:
+ case CMD1394_ASYNCH_WR_BLOCK:
+ length = cmd->cmd_u.b.blk_length;
+ break;
+ }
+
+ addr_before = cmd->cmd_addr & IEEE1394_ADDR_OFFSET_MASK;
+ addr_after = (addr_before + length) & IEEE1394_ADDR_OFFSET_MASK;
+
+ if (addr_after < addr_before) {
+ TNF_PROBE_0_DEBUG(s1394_address_rollover_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (B_TRUE);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_address_rollover_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (B_FALSE);
+}
+
+/*
+ * s1394_stoi()
+ * returns the integer value of the string of hex/dec/oct numeric characters
+ * beginning at *p. Does no overflow checking.
+ */
+uint_t
+s1394_stoi(char *p, int len, int base)
+{
+ int n;
+ int c;
+
+ if (len == 0)
+ return (0);
+
+ for (n = 0; len && (c = *p); p++, len--) {
+ if (c >= '0' && c <= '9')
+ c = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ c = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ c = c - 'F' + 10;
+ n = (n * base) + c;
+ }
+
+ return (n);
+}
+
+/*
+ * s1394_CRC16()
+ * implements ISO/IEC 13213:1994, ANSI/IEEE Std 1212, 1994 - 8.1.5
+ */
+uint_t
+s1394_CRC16(uint_t *d, uint_t crc_length)
+{
+ uint_t CRC = 0;
+ uint_t data;
+ uint_t next;
+ uint_t sum;
+ int shift;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_CRC16_enter, S1394_TNF_SL_STACK, "");
+
+ for (i = 0; i < crc_length; i++) {
+ data = d[i];
+
+ /* Another check should be made with "shift > 0" in */
+ /* order to support any devices that coded it wrong. */
+ for (next = CRC, shift = 28; shift >= 0; shift -= 4) {
+ sum = ((next >> 12) ^ (data >> shift)) & 0xF;
+ next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
+ }
+ CRC = next & IEEE1394_CRC16_MASK;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_CRC16_exit, S1394_TNF_SL_STACK, "",
+ tnf_uint, crc, CRC);
+ return (CRC);
+}
+
+/*
+ * s1394_CRC16_old()
+ * implements a slightly modified version of ISO/IEC 13213:1994,
+ * ANSI/IEEE Std 1212, 1994 - 8.1.5. In the original IEEE 1212-1994
+ * specification the C code example was incorrect and some devices
+ * were manufactured using this incorrect CRC. On CRC16 failures
+ * this CRC is tried in case it is a legacy device.
+ */
+uint_t
+s1394_CRC16_old(uint_t *d, uint_t crc_length)
+{
+ uint_t CRC = 0;
+ uint_t data;
+ uint_t next;
+ uint_t sum;
+ int shift;
+ int i;
+
+ TNF_PROBE_0_DEBUG(s1394_CRC16_old_enter, S1394_TNF_SL_STACK, "");
+
+ for (i = 0; i < crc_length; i++) {
+ data = d[i];
+ for (next = CRC, shift = 28; shift > 0; shift -= 4) {
+ sum = ((next >> 12) ^ (data >> shift)) & 0xF;
+ next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
+ }
+ CRC = next & IEEE1394_CRC16_MASK;
+ }
+
+ TNF_PROBE_1_DEBUG(s1394_CRC16_old_exit, S1394_TNF_SL_STACK, "",
+ tnf_uint, crc, CRC);
+ return (CRC);
+}
+
+/*
+ * s1394_ioctl()
+ * implements generic ioctls (eg. devctl support) and any non-HAL ioctls.
+ * Only ioctls required for devctl support are implemented at present.
+ */
+/* ARGSUSED */
+int
+s1394_ioctl(s1394_hal_t *hal, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p)
+{
+ struct devctl_iocdata *dcp;
+ dev_info_t *self;
+ int rv = 0;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(s1394_ioctl_enter, S1394_TNF_SL_IOCTL_STACK, "");
+
+ self = hal->halinfo.dip;
+
+ /*
+ * We can use the generic implementation for these ioctls
+ */
+ switch (cmd) {
+ case DEVCTL_DEVICE_GETSTATE:
+ case DEVCTL_DEVICE_ONLINE:
+ case DEVCTL_DEVICE_OFFLINE:
+ case DEVCTL_DEVICE_REMOVE:
+ case DEVCTL_BUS_GETSTATE:
+ return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
+ }
+
+ /* Read devctl ioctl data */
+ if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(s1394_ioctl_exit, S1394_TNF_SL_IOCTL_STACK,
+ "");
+ return (EFAULT);
+ }
+
+ switch (cmd) {
+
+ case DEVCTL_DEVICE_RESET:
+ case DEVCTL_DEVICE_REMOVE:
+ rv = ENOTSUP;
+ break;
+
+ case DEVCTL_BUS_CONFIGURE:
+ case DEVCTL_BUS_UNCONFIGURE:
+ rv = ENOTSUP;
+ break;
+
+ case DEVCTL_BUS_QUIESCE:
+ case DEVCTL_BUS_UNQUIESCE:
+ rv = ENOTSUP; /* Or call up the tree? */
+ break;
+
+ case DEVCTL_BUS_RESET:
+ case DEVCTL_BUS_RESETALL:
+ if (hal->halinfo.phy == H1394_PHY_1394A) {
+ ret = HAL_CALL(hal).short_bus_reset(
+ hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(s1394_ioctl_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating short bus reset");
+ }
+ } else {
+ ret = HAL_CALL(hal).bus_reset(hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_initiate_bus_reset_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating bus reset");
+ }
+ }
+ break;
+
+ default:
+ rv = ENOTTY;
+ }
+
+ ndi_dc_freehdl(dcp);
+
+ TNF_PROBE_0_DEBUG(s1394_ioctl_exit, S1394_TNF_SL_IOCTL_STACK, "");
+ return (rv);
+}
+
+/*
+ * s1394_kstat_init()
+ * is used to initialize and the Services Layer's kernel statistics.
+ */
+int
+s1394_kstat_init(s1394_hal_t *hal)
+{
+ int instance;
+
+ TNF_PROBE_0_DEBUG(s1394_kstat_init_enter, S1394_TNF_SL_STACK, "");
+
+ hal->hal_kstats = (s1394_kstat_t *)kmem_zalloc(sizeof (s1394_kstat_t),
+ KM_SLEEP);
+
+ instance = ddi_get_instance(hal->halinfo.dip);
+
+ hal->hal_ksp = kstat_create("s1394", instance, "stats", "misc",
+ KSTAT_TYPE_RAW, sizeof (s1394_kstat_t), KSTAT_FLAG_VIRTUAL);
+ if (hal->hal_ksp != NULL) {
+ hal->hal_ksp->ks_private = (void *)hal;
+ hal->hal_ksp->ks_update = s1394_kstat_update;
+ kstat_install(hal->hal_ksp);
+
+ TNF_PROBE_0_DEBUG(s1394_kstat_init_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_SUCCESS);
+ } else {
+ kmem_free((void *)hal->hal_kstats, sizeof (s1394_kstat_t));
+ TNF_PROBE_0_DEBUG(s1394_kstat_init_exit, S1394_TNF_SL_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+}
+
+/*
+ * s1394_kstat_delete()
+ * is used (in h1394_detach()) to cleanup/free and the Services Layer's
+ * kernel statistics.
+ */
+int
+s1394_kstat_delete(s1394_hal_t *hal)
+{
+ TNF_PROBE_0_DEBUG(s1394_kstat_delete_enter, S1394_TNF_SL_STACK, "");
+
+ kstat_delete(hal->hal_ksp);
+ kmem_free((void *)hal->hal_kstats, sizeof (s1394_kstat_t));
+
+ TNF_PROBE_0_DEBUG(s1394_kstat_delete_exit, S1394_TNF_SL_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * s1394_kstat_update()
+ * is a callback that is called whenever a request to read the kernel
+ * statistics is made.
+ */
+int
+s1394_kstat_update(kstat_t *ksp, int rw)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(s1394_kstat_update_enter, S1394_TNF_SL_STACK, "");
+
+ hal = ksp->ks_private;
+
+ if (rw == KSTAT_WRITE) {
+ TNF_PROBE_0_DEBUG(s1394_kstat_update_exit, S1394_TNF_SL_STACK,
+ "");
+ return (EACCES);
+ } else {
+ ksp->ks_data = hal->hal_kstats;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_kstat_update_exit, S1394_TNF_SL_STACK, "");
+ return (0);
+}
+
+/*
+ * s1394_addr_alloc_kstat()
+ * is used by the kernel statistics to update the count for each type of
+ * address allocation.
+ */
+void
+s1394_addr_alloc_kstat(s1394_hal_t *hal, uint64_t addr)
+{
+ /* kstats - number of addr allocs */
+ if (s1394_is_posted_write(hal, addr) == B_TRUE)
+ hal->hal_kstats->addr_posted_alloc++;
+ else if (s1394_is_normal_addr(hal, addr) == B_TRUE)
+ hal->hal_kstats->addr_normal_alloc++;
+ else if (s1394_is_csr_addr(hal, addr) == B_TRUE)
+ hal->hal_kstats->addr_csr_alloc++;
+ else if (s1394_is_physical_addr(hal, addr) == B_TRUE)
+ hal->hal_kstats->addr_phys_alloc++;
+}
+
+/*
+ * s1394_print_node_info()
+ * is used to print speed map and GUID information on the console.
+ */
+void
+s1394_print_node_info(s1394_hal_t *hal)
+{
+ int i, j;
+ uint_t hal_node_num;
+ char str[200], tmp[200];
+
+ /* These are in common/os/logsubr.c */
+ extern void log_enter(void);
+ extern void log_exit(void);
+
+ if (s1394_print_guids == 0)
+ return;
+
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+
+ log_enter();
+
+ cmn_err(CE_CONT, "Speed Map (%d):\n",
+ ddi_get_instance(hal->halinfo.dip));
+
+ (void) strcpy(str, " |");
+ for (i = 0; i < hal->number_of_nodes; i++) {
+ (void) sprintf(tmp, " %2d ", i);
+ (void) strcat(str, tmp);
+ }
+ (void) strcat(str, " | GUID\n");
+ cmn_err(CE_CONT, str);
+
+ (void) strcpy(str, "----|");
+ for (i = 0; i < hal->number_of_nodes; i++) {
+ (void) sprintf(tmp, "----");
+ (void) strcat(str, tmp);
+ }
+ (void) strcat(str, "--|------------------\n");
+ cmn_err(CE_CONT, str);
+
+ for (i = 0; i < hal->number_of_nodes; i++) {
+
+ (void) sprintf(str, " %2d |", i);
+
+ for (j = 0; j < hal->number_of_nodes; j++) {
+ (void) sprintf(tmp, " %3d", hal->speed_map[i][j]);
+ (void) strcat(str, tmp);
+ }
+
+ if (i == hal_node_num) {
+
+ (void) strcat(str, " | Local OHCI Card\n");
+
+ } else if (CFGROM_BIB_READ(&hal->topology_tree[i])) {
+
+ (void) sprintf(tmp, " | %08x%08x\n",
+ hal->topology_tree[i].node_guid_hi,
+ hal->topology_tree[i].node_guid_lo);
+ (void) strcat(str, tmp);
+
+ } else if (hal->topology_tree[i].link_active == 0) {
+
+ (void) strcat(str, " | Link off\n");
+
+ } else {
+
+ (void) strcat(str, " | ????????????????\n");
+ }
+ cmn_err(CE_CONT, str);
+ }
+ cmn_err(CE_CONT, "\n");
+
+ log_exit();
+}
+
+/*
+ * s1394_dip_to_hal()
+ * is used to lookup a HAL's structure pointer by its dip.
+ */
+s1394_hal_t *
+s1394_dip_to_hal(dev_info_t *hal_dip)
+{
+ s1394_hal_t *current_hal = NULL;
+
+ TNF_PROBE_0_DEBUG(s1394_dip_to_hal_enter,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ mutex_enter(&s1394_statep->hal_list_mutex);
+
+ /* Search the HAL list for this dip */
+ current_hal = s1394_statep->hal_head;
+ while (current_hal != NULL) {
+ if (current_hal->halinfo.dip == hal_dip) {
+ break;
+ }
+ current_hal = current_hal->hal_next;
+ }
+
+ mutex_exit(&s1394_statep->hal_list_mutex);
+
+ TNF_PROBE_0_DEBUG(s1394_dip_to_hal_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (current_hal);
+}
+
+/*
+ * s1394_target_from_dip_locked()
+ * searches target_list on the HAL for target corresponding to tdip;
+ * if found, target is returned, else returns NULL. This routine assumes
+ * target_list_rwlock is locked.
+ * NOTE: the callers may have the list locked in either write mode or read
+ * mode. Currently, there is no ddi-compliant way we can assert on the lock
+ * being held in write mode.
+ */
+s1394_target_t *
+s1394_target_from_dip_locked(s1394_hal_t *hal, dev_info_t *tdip)
+{
+ s1394_target_t *temp;
+
+ TNF_PROBE_0_DEBUG(s1394_target_from_dip_locked_enter,
+ S1394_TNF_SL_STACK, "");
+
+ temp = hal->target_head;
+ while (temp != NULL) {
+ if (temp->target_dip == tdip) {
+ return (temp);
+ }
+ temp = temp->target_next;
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_target_from_dip_locked_exit,
+ S1394_TNF_SL_STACK, "");
+ return (NULL);
+}
+/*
+ * s1394_target_from_dip()
+ * searches target_list on the HAL for target corresponding to tdip;
+ * if found, target is returned locked.
+ */
+s1394_target_t *
+s1394_target_from_dip(s1394_hal_t *hal, dev_info_t *tdip)
+{
+ s1394_target_t *target;
+
+ TNF_PROBE_0_DEBUG(s1394_target_from_dip_enter, S1394_TNF_SL_STACK, "");
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+ target = s1394_target_from_dip_locked(hal, tdip);
+ rw_exit(&hal->target_list_rwlock);
+
+ TNF_PROBE_0_DEBUG(s1394_target_from_dip_exit, S1394_TNF_SL_STACK, "");
+ return (target);
+}
+
+/*
+ * s1394_destroy_timers()
+ * turns off any outstanding timers in preparation for detach or suspend.
+ */
+void
+s1394_destroy_timers(s1394_hal_t *hal)
+{
+ /* Destroy both of the Bus Mgr timers */
+ (void) untimeout(hal->bus_mgr_timeout_id);
+ (void) untimeout(hal->bus_mgr_query_timeout_id);
+
+ /* Destroy the Cycle Master timer */
+ (void) untimeout(hal->cm_timer);
+
+ /* Wait for the Config ROM timer (if necessary) */
+ while (hal->config_rom_timer_set == B_TRUE) {
+ delay(drv_usectohz(10));
+ }
+}
+
+
+/*
+ * s1394_cleanup_node_cfgrom()
+ * frees up all of the Config ROM in use by nodes in the topology_tree
+ */
+static void
+s1394_cleanup_node_cfgrom(s1394_hal_t *hal)
+{
+ uint32_t *cfgrom;
+ int i;
+
+ for (i = 0; i < IEEE1394_MAX_NODES; i++) {
+ if ((cfgrom = hal->topology_tree[i].cfgrom) != NULL)
+ kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
+ }
+}
+
+/*
+ * s1394_cycle_too_long_callback()
+ * turns on the cycle master bit of the root node (current Cycle Master)
+ */
+void
+s1394_cycle_too_long_callback(void *arg)
+{
+ s1394_hal_t *hal;
+ ushort_t root_node_num;
+ ushort_t hal_node_num;
+ uint32_t data;
+ uint_t offset;
+
+ TNF_PROBE_0_DEBUG(s1394_cycle_too_long_callback_enter,
+ S1394_TNF_SL_STACK, "");
+
+ hal = (s1394_hal_t *)arg;
+
+ /* Clear the cm_timer_cet bit */
+ mutex_enter(&hal->topology_tree_mutex);
+ mutex_enter(&hal->cm_timer_mutex);
+ hal->cm_timer_set = B_FALSE;
+ mutex_exit(&hal->cm_timer_mutex);
+
+ /* Get the root node and host node numbers */
+ root_node_num = hal->number_of_nodes - 1;
+ hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
+ mutex_exit(&hal->topology_tree_mutex);
+
+ /* If we are the root node, set the cycle master bit */
+ if (hal_node_num == root_node_num) {
+ data = IEEE1394_CSR_STATE_CMSTR;
+ offset = (IEEE1394_CSR_STATE_SET & IEEE1394_CSR_OFFSET_MASK);
+ (void) HAL_CALL(hal).csr_write(hal->halinfo.hal_private,
+ offset, data);
+ }
+
+ TNF_PROBE_0_DEBUG(s1394_cycle_too_long_callback_exit,
+ S1394_TNF_SL_STACK, "");
+}
diff --git a/usr/src/uts/common/io/1394/t1394.c b/usr/src/uts/common/io/1394/t1394.c
new file mode 100644
index 0000000000..0b95c8187c
--- /dev/null
+++ b/usr/src/uts/common/io/1394/t1394.c
@@ -0,0 +1,3779 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * t1394.c
+ * 1394 Target Driver Interface
+ * This file contains all of the 1394 Software Framework routines called
+ * by target drivers
+ */
+
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/h1394.h>
+#include <sys/1394/ieee1394.h>
+
+static int s1394_allow_detach = 0;
+
+/*
+ * Function: t1394_attach()
+ * Input(s): dip The dip given to the target driver
+ * in it's attach() routine
+ * version The version of the target driver -
+ * T1394_VERSION_V1
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): attachinfo Used to pass info back to target,
+ * including bus generation, local
+ * node ID, dma attribute, etc.
+ * t1394_hdl The target "handle" to be used for
+ * all subsequent calls into the
+ * 1394 Software Framework
+ *
+ * Description: t1394_attach() registers the target (based on its dip) with
+ * the 1394 Software Framework. It returns the bus_generation,
+ * local_nodeID, iblock_cookie and other useful information to
+ * the target, as well as a handle (t1394_hdl) that will be used
+ * in all subsequent calls into this framework.
+ */
+/* ARGSUSED */
+int
+t1394_attach(dev_info_t *dip, int version, uint_t flags,
+ t1394_attachinfo_t *attachinfo, t1394_handle_t *t1394_hdl)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ uint_t dev;
+ uint_t curr;
+ uint_t unit_dir;
+ int hp_node = 0;
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(attachinfo != NULL);
+
+ TNF_PROBE_0_DEBUG(t1394_attach_enter, S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ *t1394_hdl = NULL;
+
+ if (version != T1394_VERSION_V1) {
+ TNF_PROBE_1(t1394_attach_error, S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg, "Invalid version");
+ TNF_PROBE_0_DEBUG(t1394_attach_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ hal = s1394_dip_to_hal(ddi_get_parent(dip));
+ if (hal == NULL) {
+ TNF_PROBE_1(t1394_attach_error, S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg, "No parent dip found for target");
+ TNF_PROBE_0_DEBUG(t1394_attach_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
+
+ hp_node = ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "hp-node");
+
+ /* Allocate space for s1394_target_t */
+ target = kmem_zalloc(sizeof (s1394_target_t), KM_SLEEP);
+
+ mutex_enter(&hal->topology_tree_mutex);
+
+ target->target_version = version;
+
+ /* Copy in the params */
+ target->target_dip = dip;
+ target->on_hal = hal;
+
+ /* Place the target on the appropriate node */
+ target->on_node = NULL;
+
+ rw_enter(&target->on_hal->target_list_rwlock, RW_WRITER);
+ if (hp_node != 0) {
+ s1394_add_target_to_node(target);
+ /*
+ * on_node can be NULL if the node got unplugged
+ * while the target driver is in its attach routine.
+ */
+ if (target->on_node == NULL) {
+ s1394_remove_target_from_node(target);
+ rw_exit(&target->on_hal->target_list_rwlock);
+ mutex_exit(&hal->topology_tree_mutex);
+ kmem_free(target, sizeof (s1394_target_t));
+ TNF_PROBE_1(t1394_attach_error,
+ S1394_TNF_SL_HOTPLUG_ERROR, "", tnf_string, msg,
+ "on_node == NULL");
+ TNF_PROBE_0_DEBUG(t1394_attach_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ target->target_state = S1394_TARG_HP_NODE;
+ if (S1394_NODE_BUS_PWR_CONSUMER(target->on_node) == B_TRUE)
+ target->target_state |= S1394_TARG_BUS_PWR_CONSUMER;
+ }
+
+ /* Return the current generation */
+ attachinfo->localinfo.bus_generation = target->on_hal->generation_count;
+
+ /* Fill in hal node id */
+ attachinfo->localinfo.local_nodeID = target->on_hal->node_id;
+
+ /* Give the target driver the iblock_cookie */
+ attachinfo->iblock_cookie = target->on_hal->halinfo.hw_interrupt;
+
+ /* Give the target driver the attributes */
+ attachinfo->acc_attr = target->on_hal->halinfo.acc_attr;
+ attachinfo->dma_attr = target->on_hal->halinfo.dma_attr;
+
+ unit_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "unit-dir-offset", 0);
+ target->unit_dir = unit_dir;
+
+ /* By default, disable all physical AR requests */
+ target->physical_arreq_enabled = 0;
+
+
+ /* Get dev_max_payload & current_max_payload */
+ s1394_get_maxpayload(target, &dev, &curr);
+ target->dev_max_payload = dev;
+ target->current_max_payload = curr;
+
+ /* Add into linked list */
+ if ((target->on_hal->target_head == NULL) &&
+ (target->on_hal->target_tail == NULL)) {
+ target->on_hal->target_head = target;
+ target->on_hal->target_tail = target;
+ } else {
+ target->on_hal->target_tail->target_next = target;
+ target->target_prev = target->on_hal->target_tail;
+ target->on_hal->target_tail = target;
+ }
+ rw_exit(&target->on_hal->target_list_rwlock);
+
+ /* Fill in services layer private info */
+ *t1394_hdl = (t1394_handle_t)target;
+
+ mutex_exit(&hal->topology_tree_mutex);
+
+ TNF_PROBE_0_DEBUG(t1394_attach_exit, S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_detach()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully detached
+ * DDI_FAILURE Target failed to detach
+ *
+ * Description: t1394_detach() unregisters the target from the 1394 Software
+ * Framework. t1394_detach() can fail if the target has any
+ * allocated commands that haven't been freed.
+ */
+/* ARGSUSED */
+int
+t1394_detach(t1394_handle_t *t1394_hdl, uint_t flags)
+{
+ s1394_target_t *target;
+ uint_t num_cmds;
+
+ TNF_PROBE_0_DEBUG(t1394_detach_enter, S1394_TNF_SL_HOTPLUG_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ target = (s1394_target_t *)(*t1394_hdl);
+
+ ASSERT(target->on_hal);
+
+ mutex_enter(&target->on_hal->topology_tree_mutex);
+ rw_enter(&target->on_hal->target_list_rwlock, RW_WRITER);
+
+ /* How many cmds has this target allocated? */
+ num_cmds = target->target_num_cmds;
+
+ if (num_cmds != 0) {
+ rw_exit(&target->on_hal->target_list_rwlock);
+ mutex_exit(&target->on_hal->topology_tree_mutex);
+ TNF_PROBE_1(t1394_detach_error, S1394_TNF_SL_HOTPLUG_ERROR, "",
+ tnf_string, msg, "Must free all commands before detach()");
+ TNF_PROBE_0_DEBUG(t1394_detach_exit,
+ S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Remove from linked lists. Topology tree is already locked
+ * so that the node won't go away while we are looking at it.
+ */
+ if ((target->on_hal->target_head == target) &&
+ (target->on_hal->target_tail == target)) {
+ target->on_hal->target_head = NULL;
+ target->on_hal->target_tail = NULL;
+ } else {
+ if (target->target_prev)
+ target->target_prev->target_next = target->target_next;
+ if (target->target_next)
+ target->target_next->target_prev = target->target_prev;
+ if (target->on_hal->target_head == target)
+ target->on_hal->target_head = target->target_next;
+ if (target->on_hal->target_tail == target)
+ target->on_hal->target_tail = target->target_prev;
+ }
+
+ s1394_remove_target_from_node(target);
+ rw_exit(&target->on_hal->target_list_rwlock);
+
+ mutex_exit(&target->on_hal->topology_tree_mutex);
+
+ /* Free memory */
+ kmem_free(target, sizeof (s1394_target_t));
+
+ *t1394_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_detach_exit, S1394_TNF_SL_HOTPLUG_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_alloc_cmd()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is described below
+ *
+ * Output(s): cmdp Pointer to the newly allocated command
+ *
+ * Description: t1394_alloc_cmd() allocates a command for use with the
+ * t1394_read(), t1394_write(), or t1394_lock() interfaces
+ * of the 1394 Software Framework. By default, t1394_alloc_cmd()
+ * may sleep while allocating memory for the command structure.
+ * If this is undesirable, the target may set the
+ * T1394_ALLOC_CMD_NOSLEEP bit in the flags parameter. Also,
+ * this call may fail because a target driver has already
+ * allocated MAX_NUMBER_ALLOC_CMDS commands.
+ */
+int
+t1394_alloc_cmd(t1394_handle_t t1394_hdl, uint_t flags, cmd1394_cmd_t **cmdp)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ uint_t num_cmds;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_cmd_enter, S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+
+ /* How many cmds has this target allocated? */
+ num_cmds = target->target_num_cmds;
+
+ if (num_cmds >= MAX_NUMBER_ALLOC_CMDS) {
+ rw_exit(&hal->target_list_rwlock);
+ TNF_PROBE_1(t1394_alloc_cmd_error, S1394_TNF_SL_ATREQ_ERROR,
+ "", tnf_string, msg, "Attempted to alloc > "
+ "MAX_NUMBER_ALLOC_CMDS");
+ TNF_PROBE_0_DEBUG(t1394_alloc_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ /* kstats - cmd alloc failures */
+ hal->hal_kstats->cmd_alloc_fail++;
+ return (DDI_FAILURE);
+ }
+
+ /* Increment the number of cmds this target has allocated? */
+ target->target_num_cmds = num_cmds + 1;
+
+ if (s1394_alloc_cmd(hal, flags, cmdp) != DDI_SUCCESS) {
+ target->target_num_cmds = num_cmds; /* Undo increment */
+ rw_exit(&hal->target_list_rwlock);
+ TNF_PROBE_1(t1394_alloc_cmd_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Failed to allocate command structure");
+ TNF_PROBE_0_DEBUG(t1394_alloc_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ /* kstats - cmd alloc failures */
+ hal->hal_kstats->cmd_alloc_fail++;
+ return (DDI_FAILURE);
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ /* Initialize the command's blocking mutex */
+ mutex_init(&s_priv->blocking_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the command's blocking condition variable */
+ cv_init(&s_priv->blocking_cv, NULL, CV_DRIVER, NULL);
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_cmd_exit, S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_free_cmd()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ * cmdp Pointer to the command to be freed
+ *
+ * Output(s): DDI_SUCCESS Target successfully freed command
+ * DDI_FAILURE Target failed to free command
+ *
+ * Description: t1394_free_cmd() attempts to free a command that has previously
+ * been allocated by the target driver. It is possible for
+ * t1394_free_cmd() to fail because the command is currently
+ * in-use by the 1394 Software Framework.
+ */
+/* ARGSUSED */
+int
+t1394_free_cmd(t1394_handle_t t1394_hdl, uint_t flags, cmd1394_cmd_t **cmdp)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ uint_t num_cmds;
+
+ TNF_PROBE_0_DEBUG(t1394_free_cmd_enter, S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+
+ /* How many cmds has this target allocated? */
+ num_cmds = target->target_num_cmds;
+
+ if (num_cmds == 0) {
+ rw_exit(&hal->target_list_rwlock);
+ TNF_PROBE_2(t1394_free_cmd_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "No commands left to be freed "
+ "(num_cmds <= 0)", tnf_uint, num_cmds, num_cmds);
+ TNF_PROBE_0_DEBUG(t1394_free_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ ASSERT(num_cmds != 0);
+ return (DDI_FAILURE);
+ }
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(*cmdp);
+
+ /* Check that command isn't in use */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ rw_exit(&hal->target_list_rwlock);
+ TNF_PROBE_1(t1394_free_cmd_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Attempted to free an in-use command");
+ TNF_PROBE_0_DEBUG(t1394_free_cmd_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+
+ /* Decrement the number of cmds this target has allocated */
+ target->target_num_cmds--;
+
+ rw_exit(&hal->target_list_rwlock);
+
+ /* Destroy the command's blocking condition variable */
+ cv_destroy(&s_priv->blocking_cv);
+
+ /* Destroy the command's blocking mutex */
+ mutex_destroy(&s_priv->blocking_mutex);
+
+ kmem_cache_free(hal->hal_kmem_cachep, *cmdp);
+
+ /* Command pointer is set to NULL before returning */
+ *cmdp = NULL;
+
+ /* kstats - number of cmd frees */
+ hal->hal_kstats->cmd_free++;
+
+ TNF_PROBE_0_DEBUG(t1394_free_cmd_exit, S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_read()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * cmd Pointer to the command to send
+ *
+ * Output(s): DDI_SUCCESS Target successful sent the command
+ * DDI_FAILURE Target failed to send command
+ *
+ * Description: t1394_read() attempts to send an asynchronous read request
+ * onto the 1394 bus.
+ */
+int
+t1394_read(t1394_handle_t t1394_hdl, cmd1394_cmd_t *cmd)
+{
+ s1394_hal_t *to_hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_state_t state;
+ int ret;
+ int err;
+
+ TNF_PROBE_0_DEBUG(t1394_read_enter, S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(cmd != NULL);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is this command currently in use? */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ TNF_PROBE_1(t1394_read_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Attempted to resend an in-use command");
+ TNF_PROBE_0_DEBUG(t1394_read_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Set-up the destination of the command */
+ to_hal = target->on_hal;
+
+ /* No status (default) */
+ cmd->cmd_result = CMD1394_NOSTATUS;
+
+ /* Check for proper command type */
+ if ((cmd->cmd_type != CMD1394_ASYNCH_RD_QUAD) &&
+ (cmd->cmd_type != CMD1394_ASYNCH_RD_BLOCK)) {
+ cmd->cmd_result = CMD1394_EINVALID_COMMAND;
+ TNF_PROBE_1(t1394_read_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is this a blocking command on interrupt stack? */
+ if ((cmd->cmd_options & CMD1394_BLOCKING) &&
+ (curthread->t_flag == T_INTR_THREAD)) {
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+ TNF_PROBE_1(t1394_read_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Tried to use CMD1394_BLOCKING in "
+ "intr context");
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&to_hal->topology_tree_mutex);
+ state = to_hal->hal_state;
+ if (state != S1394_HAL_NORMAL) {
+ ret = s1394_HAL_asynch_error(to_hal, cmd, state);
+ if (ret != CMD1394_CMDSUCCESS) {
+ cmd->cmd_result = ret;
+ mutex_exit(&to_hal->topology_tree_mutex);
+ return (DDI_FAILURE);
+ }
+ }
+
+ ret = s1394_setup_asynch_command(to_hal, target, cmd,
+ S1394_CMD_READ, &err);
+
+ /* Command has now been put onto the queue! */
+ if (ret != DDI_SUCCESS) {
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+ mutex_exit(&to_hal->topology_tree_mutex);
+ TNF_PROBE_1(t1394_read_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Failed in s1394_setup_asynch_command()");
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If this command was sent during a bus reset,
+ * then put it onto the pending Q.
+ */
+ if (state == S1394_HAL_RESET) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+ /* Are we on the bus reset event stack? */
+ if (s1394_on_br_thread(to_hal) == B_TRUE) {
+ /* Blocking commands are not allowed */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ mutex_exit(&to_hal->topology_tree_mutex);
+ s_priv->cmd_in_use = B_FALSE;
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+ TNF_PROBE_1(t1394_read_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string,
+ msg, "CMD1394_BLOCKING in bus reset "
+ "context");
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ s1394_pending_q_insert(to_hal, cmd, S1394_PENDING_Q_FRONT);
+ mutex_exit(&to_hal->topology_tree_mutex);
+
+ /* Block (if necessary) */
+ goto block_on_asynch_cmd;
+ }
+ mutex_exit(&to_hal->topology_tree_mutex);
+
+ /* Send the command out */
+ ret = s1394_xfer_asynch_command(to_hal, cmd, &err);
+
+ if (ret != DDI_SUCCESS) {
+ if (err == CMD1394_ESTALE_GENERATION) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+ s1394_pending_q_insert(to_hal, cmd,
+ S1394_PENDING_Q_FRONT);
+
+ /* Block (if necessary) */
+ goto block_on_asynch_cmd;
+
+ } else {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+
+ TNF_PROBE_1(t1394_read_error, S1394_TNF_SL_ATREQ_ERROR,
+ "", tnf_string, msg, "Failed in "
+ "s1394_xfer_asynch_command()");
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ /* Block (if necessary) */
+ goto block_on_asynch_cmd;
+ }
+
+block_on_asynch_cmd:
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(t1394_read_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_write()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * cmd Pointer to the command to send
+ *
+ * Output(s): DDI_SUCCESS Target successful sent the command
+ * DDI_FAILURE Target failed to send command
+ *
+ * Description: t1394_write() attempts to send an asynchronous write request
+ * onto the 1394 bus.
+ */
+int
+t1394_write(t1394_handle_t t1394_hdl, cmd1394_cmd_t *cmd)
+{
+ s1394_hal_t *to_hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_state_t state;
+ int ret;
+ int err;
+
+ TNF_PROBE_0_DEBUG(t1394_write_enter, S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(cmd != NULL);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is this command currently in use? */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ TNF_PROBE_1(t1394_write_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Attempted to resend an in-use command");
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Set-up the destination of the command */
+ to_hal = target->on_hal;
+
+ /* Is this an FA request? */
+ if (s_priv->cmd_ext_type == S1394_CMD_EXT_FA) {
+ if (S1394_IS_CMD_FCP(s_priv) &&
+ (s1394_fcp_write_check_cmd(cmd) != DDI_SUCCESS)) {
+ TNF_PROBE_0_DEBUG(t1394_write_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ s1394_fa_convert_cmd(to_hal, cmd);
+ }
+
+ /* No status (default) */
+ cmd->cmd_result = CMD1394_NOSTATUS;
+
+ /* Check for proper command type */
+ if ((cmd->cmd_type != CMD1394_ASYNCH_WR_QUAD) &&
+ (cmd->cmd_type != CMD1394_ASYNCH_WR_BLOCK)) {
+ cmd->cmd_result = CMD1394_EINVALID_COMMAND;
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ TNF_PROBE_1(t1394_write_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Invalid command type specified");
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is this a blocking command on interrupt stack? */
+ if ((cmd->cmd_options & CMD1394_BLOCKING) &&
+ (curthread->t_flag == T_INTR_THREAD)) {
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ TNF_PROBE_1(t1394_write_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Tried to use CMD1394_BLOCKING in intr "
+ "context");
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&to_hal->topology_tree_mutex);
+ state = to_hal->hal_state;
+ if (state != S1394_HAL_NORMAL) {
+ ret = s1394_HAL_asynch_error(to_hal, cmd, state);
+ if (ret != CMD1394_CMDSUCCESS) {
+ cmd->cmd_result = ret;
+ mutex_exit(&to_hal->topology_tree_mutex);
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ return (DDI_FAILURE);
+ }
+ }
+
+ ret = s1394_setup_asynch_command(to_hal, target, cmd,
+ S1394_CMD_WRITE, &err);
+
+ /* Command has now been put onto the queue! */
+ if (ret != DDI_SUCCESS) {
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+ mutex_exit(&to_hal->topology_tree_mutex);
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ TNF_PROBE_1(t1394_write_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Failed in s1394_setup_asynch_command()");
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * If this command was sent during a bus reset,
+ * then put it onto the pending Q.
+ */
+ if (state == S1394_HAL_RESET) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+ /* Are we on the bus reset event stack? */
+ if (s1394_on_br_thread(to_hal) == B_TRUE) {
+ /* Blocking commands are not allowed */
+ if (cmd->cmd_options & CMD1394_BLOCKING) {
+ mutex_exit(&to_hal->topology_tree_mutex);
+ s_priv->cmd_in_use = B_FALSE;
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ TNF_PROBE_1(t1394_write_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string,
+ msg, "CMD1394_BLOCKING in bus reset cntxt");
+ TNF_PROBE_0_DEBUG(t1394_write_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ s1394_pending_q_insert(to_hal, cmd, S1394_PENDING_Q_FRONT);
+ mutex_exit(&to_hal->topology_tree_mutex);
+
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+ }
+ mutex_exit(&to_hal->topology_tree_mutex);
+
+ /* Send the command out */
+ ret = s1394_xfer_asynch_command(to_hal, cmd, &err);
+
+ if (ret != DDI_SUCCESS) {
+ if (err == CMD1394_ESTALE_GENERATION) {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+ s1394_pending_q_insert(to_hal, cmd,
+ S1394_PENDING_Q_FRONT);
+
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(t1394_write_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_SUCCESS);
+ } else {
+ /* Remove cmd from outstanding request Q */
+ s1394_remove_q_asynch_cmd(to_hal, cmd);
+
+ s_priv->cmd_in_use = B_FALSE;
+
+ /* Copy error code into result */
+ cmd->cmd_result = err;
+
+ s1394_fa_check_restore_cmd(to_hal, cmd);
+ TNF_PROBE_1(t1394_write_error,
+ S1394_TNF_SL_ATREQ_ERROR, "", tnf_string, msg,
+ "Failed in s1394_xfer_asynch_command()");
+ TNF_PROBE_0_DEBUG(t1394_write_exit,
+ S1394_TNF_SL_ATREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+ } else {
+ /* Block (if necessary) */
+ s1394_block_on_asynch_cmd(cmd);
+
+ TNF_PROBE_0_DEBUG(t1394_write_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_SUCCESS);
+ }
+}
+
+/*
+ * Function: t1394_lock()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * cmd Pointer to the command to send
+ *
+ * Output(s): DDI_SUCCESS Target successful sent the command
+ * DDI_FAILURE Target failed to send command
+ *
+ * Description: t1394_lock() attempts to send an asynchronous lock request
+ * onto the 1394 bus.
+ */
+int
+t1394_lock(t1394_handle_t t1394_hdl, cmd1394_cmd_t *cmd)
+{
+ s1394_hal_t *to_hal;
+ s1394_target_t *target;
+ s1394_cmd_priv_t *s_priv;
+ s1394_hal_state_t state;
+ cmd1394_lock_type_t lock_type;
+ uint_t num_retries;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_lock_enter, S1394_TNF_SL_ATREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(cmd != NULL);
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(cmd);
+
+ /* Is this command currently in use? */
+ if (s_priv->cmd_in_use == B_TRUE) {
+ TNF_PROBE_1(t1394_lock_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Attempted to resend an in-use command");
+ TNF_PROBE_0_DEBUG(t1394_lock_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ ASSERT(s_priv->cmd_in_use == B_FALSE);
+ return (DDI_FAILURE);
+ }
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Set-up the destination of the command */
+ to_hal = target->on_hal;
+
+ mutex_enter(&to_hal->topology_tree_mutex);
+ state = to_hal->hal_state;
+ if (state != S1394_HAL_NORMAL) {
+ ret = s1394_HAL_asynch_error(to_hal, cmd, state);
+ if (ret != CMD1394_CMDSUCCESS) {
+ cmd->cmd_result = ret;
+ mutex_exit(&to_hal->topology_tree_mutex);
+ return (DDI_FAILURE);
+ }
+ }
+ mutex_exit(&to_hal->topology_tree_mutex);
+
+ /* Check for proper command type */
+ if ((cmd->cmd_type != CMD1394_ASYNCH_LOCK_32) &&
+ (cmd->cmd_type != CMD1394_ASYNCH_LOCK_64)) {
+ cmd->cmd_result = CMD1394_EINVALID_COMMAND;
+ TNF_PROBE_1(t1394_lock_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Invalid command type sent to "
+ "t1394_lock()");
+ TNF_PROBE_0_DEBUG(t1394_lock_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ /* No status (default) */
+ cmd->cmd_result = CMD1394_NOSTATUS;
+
+ /* Is this a blocking command on interrupt stack? */
+ if ((cmd->cmd_options & CMD1394_BLOCKING) &&
+ (curthread->t_flag == T_INTR_THREAD)) {
+ cmd->cmd_result = CMD1394_EINVALID_CONTEXT;
+ TNF_PROBE_1(t1394_lock_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Tried to use CMD1394_BLOCKING in intr "
+ "context");
+ TNF_PROBE_0_DEBUG(t1394_lock_exit, S1394_TNF_SL_ATREQ_STACK,
+ "");
+ return (DDI_FAILURE);
+ }
+
+ if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) {
+ lock_type = cmd->cmd_u.l32.lock_type;
+ num_retries = cmd->cmd_u.l32.num_retries;
+ } else { /* (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) */
+ lock_type = cmd->cmd_u.l64.lock_type;
+ num_retries = cmd->cmd_u.l64.num_retries;
+ }
+
+ /* Make sure num_retries is reasonable */
+ ASSERT(num_retries <= MAX_NUMBER_OF_LOCK_RETRIES);
+
+ switch (lock_type) {
+ case CMD1394_LOCK_MASK_SWAP:
+ case CMD1394_LOCK_FETCH_ADD:
+ case CMD1394_LOCK_LITTLE_ADD:
+ case CMD1394_LOCK_BOUNDED_ADD:
+ case CMD1394_LOCK_WRAP_ADD:
+ case CMD1394_LOCK_COMPARE_SWAP:
+ ret = s1394_compare_swap(to_hal, target, cmd);
+ break;
+
+ case CMD1394_LOCK_BIT_AND:
+ case CMD1394_LOCK_BIT_OR:
+ case CMD1394_LOCK_BIT_XOR:
+ case CMD1394_LOCK_INCREMENT:
+ case CMD1394_LOCK_DECREMENT:
+ case CMD1394_LOCK_ADD:
+ case CMD1394_LOCK_SUBTRACT:
+ case CMD1394_LOCK_THRESH_ADD:
+ case CMD1394_LOCK_THRESH_SUBTRACT:
+ case CMD1394_LOCK_CLIP_ADD:
+ case CMD1394_LOCK_CLIP_SUBTRACT:
+ ret = s1394_split_lock_req(to_hal, target, cmd);
+ break;
+
+ default:
+ TNF_PROBE_1(t1394_lock_error, S1394_TNF_SL_ATREQ_ERROR, "",
+ tnf_string, msg, "Invalid lock_type in command");
+ cmd->cmd_result = CMD1394_EINVALID_COMMAND;
+ ret = DDI_FAILURE;
+ break;
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_lock_exit, S1394_TNF_SL_ATREQ_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_alloc_addr()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * addr_allocp The structure used to specify the type,
+ * size, permissions, and callbacks
+ * (if any) for the requested block
+ * of 1394 address space
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_alloc_addr() requests that part of the 1394 Address Space
+ * on the local node be set aside for this target driver, and
+ * associated with this address space should be some permissions
+ * and callbacks. If the request is unable to be fulfilled,
+ * t1394_alloc_addr() will return DDI_FAILURE and result will
+ * indicate the reason. T1394_EINVALID_PARAM indicates that the
+ * combination of flags given is invalid, and T1394_EALLOC_ADDR
+ * indicates that the requested type of address space is
+ * unavailable.
+ */
+/* ARGSUSED */
+int
+t1394_alloc_addr(t1394_handle_t t1394_hdl, t1394_alloc_addr_t *addr_allocp,
+ uint_t flags, int *result)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ uint64_t addr_lo;
+ uint64_t addr_hi;
+ int err;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_enter, S1394_TNF_SL_ARREQ_STACK,
+ "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(addr_allocp != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ /* Get the bounds of the request */
+ addr_lo = addr_allocp->aa_address;
+ addr_hi = addr_lo + addr_allocp->aa_length;
+
+ /* Check combination of flags */
+ if ((addr_allocp->aa_enable & T1394_ADDR_RDENBL) &&
+ (addr_allocp->aa_evts.recv_read_request == NULL) &&
+ (addr_allocp->aa_kmem_bufp == NULL)) {
+ if ((addr_allocp->aa_type != T1394_ADDR_FIXED) ||
+ (addr_lo < hal->physical_addr_lo) ||
+ (addr_hi > hal->physical_addr_hi)) {
+
+ /*
+ * Reads are enabled, but target doesn't want to
+ * be notified and hasn't given backing store
+ */
+ *result = T1394_EINVALID_PARAM;
+
+ TNF_PROBE_1(t1394_alloc_addr_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid flags "
+ "(RDs on, notify off, no backing store)");
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* kstats - addr alloc failures */
+ hal->hal_kstats->addr_alloc_fail++;
+ return (DDI_FAILURE);
+ } else {
+ addr_allocp->aa_enable &= ~T1394_ADDR_RDENBL;
+ }
+ }
+
+ if ((addr_allocp->aa_enable & T1394_ADDR_WRENBL) &&
+ (addr_allocp->aa_evts.recv_write_request == NULL) &&
+ (addr_allocp->aa_kmem_bufp == NULL)) {
+ if ((addr_allocp->aa_type != T1394_ADDR_FIXED) ||
+ (addr_lo < hal->physical_addr_lo) ||
+ (addr_hi > hal->physical_addr_hi)) {
+
+ /*
+ * Writes are enabled, but target doesn't want to
+ * be notified and hasn't given backing store
+ */
+ *result = T1394_EINVALID_PARAM;
+
+ TNF_PROBE_1(t1394_alloc_addr_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid flags "
+ "(WRs on, notify off, no backing store)");
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* kstats - addr alloc failures */
+ hal->hal_kstats->addr_alloc_fail++;
+ return (DDI_FAILURE);
+ } else {
+ addr_allocp->aa_enable &= ~T1394_ADDR_WRENBL;
+ }
+ }
+
+ if ((addr_allocp->aa_enable & T1394_ADDR_LKENBL) &&
+ (addr_allocp->aa_evts.recv_lock_request == NULL) &&
+ (addr_allocp->aa_kmem_bufp == NULL)) {
+ if ((addr_allocp->aa_type != T1394_ADDR_FIXED) ||
+ (addr_lo < hal->physical_addr_lo) ||
+ (addr_hi > hal->physical_addr_hi)) {
+
+ /*
+ * Locks are enabled, but target doesn't want to
+ * be notified and hasn't given backing store
+ */
+ *result = T1394_EINVALID_PARAM;
+
+ TNF_PROBE_1(t1394_alloc_addr_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid flags "
+ "(LKs on, notify off, no backing store)");
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ /* kstats - addr alloc failures */
+ hal->hal_kstats->addr_alloc_fail++;
+ return (DDI_FAILURE);
+ } else {
+ addr_allocp->aa_enable &= ~T1394_ADDR_LKENBL;
+ }
+ }
+
+ /* If not T1394_ADDR_FIXED, then allocate a block */
+ if (addr_allocp->aa_type != T1394_ADDR_FIXED) {
+ err = s1394_request_addr_blk((s1394_hal_t *)target->on_hal,
+ addr_allocp);
+ if (err != DDI_SUCCESS) {
+ *result = T1394_EALLOC_ADDR;
+ /* kstats - addr alloc failures */
+ hal->hal_kstats->addr_alloc_fail++;
+ } else {
+ *result = T1394_NOERROR;
+ }
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (err);
+ } else {
+ err = s1394_claim_addr_blk((s1394_hal_t *)target->on_hal,
+ addr_allocp);
+ if (err != DDI_SUCCESS) {
+ *result = T1394_EALLOC_ADDR;
+ /* kstats - addr alloc failures */
+ hal->hal_kstats->addr_alloc_fail++;
+ } else {
+ *result = T1394_NOERROR;
+ /* If physical, update the AR request counter */
+ if ((addr_lo >= hal->physical_addr_lo) &&
+ (addr_hi <= hal->physical_addr_hi)) {
+ rw_enter(&hal->target_list_rwlock, RW_WRITER);
+ target->physical_arreq_enabled++;
+ rw_exit(&hal->target_list_rwlock);
+
+ s1394_physical_arreq_set_one(target);
+ }
+ }
+ TNF_PROBE_0_DEBUG(t1394_alloc_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (err);
+ }
+}
+
+/*
+ * Function: t1394_free_addr()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * addr_hdl The address "handle" returned by the
+ * the t1394_alloc_addr() routine
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully freed memory
+ * DDI_FAILURE Target failed to free the memory block
+ *
+ * Description: t1394_free_addr() attempts to free up memory that has been
+ * allocated by the target using t1394_alloc_addr().
+ */
+/* ARGSUSED */
+int
+t1394_free_addr(t1394_handle_t t1394_hdl, t1394_addr_handle_t *addr_hdl,
+ uint_t flags)
+{
+ s1394_addr_space_blk_t *curr_blk;
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+
+ TNF_PROBE_0_DEBUG(t1394_free_addr_enter, S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(addr_hdl != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ curr_blk = (s1394_addr_space_blk_t *)(*addr_hdl);
+
+ if (s1394_free_addr_blk(hal, curr_blk) != DDI_SUCCESS) {
+ TNF_PROBE_0_DEBUG(t1394_free_addr_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* If physical, update the AR request counter */
+ if (curr_blk->addr_type == T1394_ADDR_FIXED) {
+ target->physical_arreq_enabled--;
+ s1394_physical_arreq_clear_one(target);
+ }
+
+ *addr_hdl = NULL;
+
+ /* kstats - number of addr frees */
+ hal->hal_kstats->addr_space_free++;
+
+ TNF_PROBE_0_DEBUG(t1394_free_addr_exit, S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_recv_request_done()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * resp Pointer to the command which the
+ * target received in it's callback
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully returned command
+ * to the 1394 Software Framework,
+ * and, if necessary, sent response
+ * DDI_FAILURE Target failed to return the command to
+ * the 1394 Software Framework
+ *
+ * Description: t1394_recv_request_done() takes the command that is given and
+ * determines whether that command requires a response to be
+ * sent on the 1394 bus. If it is necessary and it's response
+ * code (cmd_result) has been set appropriately, then a response
+ * will be sent. If no response is necessary (broadcast or
+ * posted write), then the command resources are reclaimed.
+ */
+/* ARGSUSED */
+int
+t1394_recv_request_done(t1394_handle_t t1394_hdl, cmd1394_cmd_t *resp,
+ uint_t flags)
+{
+ s1394_hal_t *hal;
+ s1394_cmd_priv_t *s_priv;
+ h1394_cmd_priv_t *h_priv;
+ mblk_t *curr_blk;
+ size_t msgb_len;
+ size_t size;
+ int ret;
+ boolean_t response = B_TRUE;
+ boolean_t posted_write = B_FALSE;
+ boolean_t write_cmd = B_FALSE;
+ boolean_t mblk_too_small;
+
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_enter,
+ S1394_TNF_SL_ARREQ_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(resp != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Get the Services Layer private area */
+ s_priv = S1394_GET_CMD_PRIV(resp);
+
+ /* Get a pointer to the HAL private struct */
+ h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
+
+ /* Is this an FA request? */
+ if (s_priv->cmd_ext_type == S1394_CMD_EXT_FA) {
+ s1394_fa_convert_cmd(hal, resp);
+ }
+
+ /* Is this a write request? */
+ if ((resp->cmd_type == CMD1394_ASYNCH_WR_QUAD) ||
+ (resp->cmd_type == CMD1394_ASYNCH_WR_BLOCK)) {
+ write_cmd = B_TRUE;
+ /* Is this a posted write request? */
+ posted_write = s_priv->posted_write;
+ }
+
+ /* If broadcast or posted write cmd, don't send response */
+ if ((resp->broadcast == 1) ||
+ ((write_cmd == B_TRUE) && (posted_write == B_TRUE)))
+ response = B_FALSE;
+
+ if (response == B_FALSE) {
+ if ((write_cmd == B_TRUE) && (posted_write == B_TRUE)) {
+ /* kstats - Posted Write error */
+ hal->hal_kstats->arreq_posted_write_error++;
+ }
+
+ /* Free the command - Pass it back to the HAL */
+ HAL_CALL(hal).response_complete(hal->halinfo.hal_private, resp,
+ h_priv);
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_SUCCESS);
+ }
+
+ ASSERT(response == B_TRUE);
+
+ /* Verify valid response code */
+ switch (resp->cmd_result) {
+ case IEEE1394_RESP_COMPLETE:
+ /* Is the mblk_t too small? */
+ if (resp->cmd_type == CMD1394_ASYNCH_RD_BLOCK) {
+ curr_blk = resp->cmd_u.b.data_block;
+ size = resp->cmd_u.b.blk_length;
+ msgb_len = 0;
+ mblk_too_small = B_TRUE;
+
+ if (curr_blk == NULL) {
+ TNF_PROBE_1(t1394_recv_request_done_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "mblk_t is NULL in response");
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ /*
+ * Free the command - Pass it back
+ * to the HAL
+ */
+ HAL_CALL(hal).response_complete(
+ hal->halinfo.hal_private, resp, h_priv);
+ ASSERT(curr_blk != NULL);
+ return (DDI_FAILURE);
+ }
+
+ while (curr_blk != NULL) {
+ msgb_len +=
+ (curr_blk->b_wptr - curr_blk->b_rptr);
+
+ if (msgb_len >= size) {
+ mblk_too_small = B_FALSE;
+ break;
+ }
+ curr_blk = curr_blk->b_cont;
+ }
+
+ if (mblk_too_small == B_TRUE) {
+ TNF_PROBE_1(t1394_recv_request_done_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string,
+ msg, "mblk_t too small in response");
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ /*
+ * Free the command - Pass it back
+ * to the HAL
+ */
+ HAL_CALL(hal).response_complete(
+ hal->halinfo.hal_private, resp, h_priv);
+ ASSERT(mblk_too_small != B_TRUE);
+ return (DDI_FAILURE);
+ }
+ }
+ /* FALLTHROUGH */
+ case IEEE1394_RESP_CONFLICT_ERROR:
+ case IEEE1394_RESP_DATA_ERROR:
+ case IEEE1394_RESP_TYPE_ERROR:
+ case IEEE1394_RESP_ADDRESS_ERROR:
+ ret = s1394_send_response(hal, resp);
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (ret);
+
+ default:
+ TNF_PROBE_1(t1394_recv_request_done_error,
+ S1394_TNF_SL_ARREQ_ERROR, "", tnf_string, msg,
+ "Invalid response code");
+ TNF_PROBE_0_DEBUG(t1394_recv_request_done_exit,
+ S1394_TNF_SL_ARREQ_STACK, "");
+ return (DDI_FAILURE);
+ }
+}
+
+
+/*
+ * Function: t1394_fcp_register_controller()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * evts The structure in which the target
+ * specifies its callback routines
+ *
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to register the target within the Framework as an FCP
+ * controller.
+ */
+/* ARGSUSED */
+int
+t1394_fcp_register_controller(t1394_handle_t t1394_hdl, t1394_fcp_evts_t *evts,
+ uint_t flags)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_register_controller_enter,
+ S1394_TNF_SL_FCP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_fcp_register_ctl((s1394_target_t *)t1394_hdl, evts);
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_register_controller_exit,
+ S1394_TNF_SL_FCP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_fcp_unregister_controller()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ *
+ * Output(s): DDI_SUCCESS Successfully unregistered.
+ *
+ * DDI_FAILURE Not unregistered due to failure.
+ *
+ * Description: Used to unregister the target within the Framework as an FCP
+ * controller.
+ */
+int
+t1394_fcp_unregister_controller(t1394_handle_t t1394_hdl)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_unregister_controller_enter,
+ S1394_TNF_SL_FCP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_fcp_unregister_ctl((s1394_target_t *)t1394_hdl);
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_unregister_controller_exit,
+ S1394_TNF_SL_FCP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_fcp_register_target()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * evts The structure in which the target
+ * specifies its callback routines
+ *
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to register the target within the Framework as an FCP
+ * target.
+ */
+/* ARGSUSED */
+int
+t1394_fcp_register_target(t1394_handle_t t1394_hdl, t1394_fcp_evts_t *evts,
+ uint_t flags)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_register_target_enter,
+ S1394_TNF_SL_FCP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_fcp_register_tgt((s1394_target_t *)t1394_hdl, evts);
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_register_target_exit,
+ S1394_TNF_SL_FCP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_fcp_unregister_target()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ *
+ * Output(s): DDI_SUCCESS Successfully unregistered.
+ *
+ * DDI_FAILURE Not unregistered due to failure.
+ *
+ * Description: Used to unregister the target within the Framework as an FCP
+ * target.
+ */
+int
+t1394_fcp_unregister_target(t1394_handle_t t1394_hdl)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_unregister_target_enter,
+ S1394_TNF_SL_FCP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_fcp_unregister_tgt((s1394_target_t *)t1394_hdl);
+
+ TNF_PROBE_0_DEBUG(t1394_fcp_unregister_target_exit,
+ S1394_TNF_SL_FCP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_cmp_register()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * evts The structure in which the target
+ * specifies its callback routines
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to register the target within the Framework as a CMP
+ * device.
+ */
+/* ARGSUSED */
+int
+t1394_cmp_register(t1394_handle_t t1394_hdl, t1394_cmp_evts_t *evts,
+ uint_t flags)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_register_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_cmp_register((s1394_target_t *)t1394_hdl, evts);
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_register_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_cmp_unregister()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * evts The structure in which the target
+ * specifies its callback routines
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to unregister the target within the Framework as a CMP
+ * device.
+ */
+int
+t1394_cmp_unregister(t1394_handle_t t1394_hdl)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_unregister_enter, S1394_TNF_SL_CMP_STACK,
+ "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_cmp_unregister((s1394_target_t *)t1394_hdl);
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_unregister_exit, S1394_TNF_SL_CMP_STACK,
+ "");
+ return (result);
+}
+
+/*
+ * Function: t1394_cmp_read()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * reg Register type.
+ * valp Returned register value.
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to read a CMP register value.
+ */
+int
+t1394_cmp_read(t1394_handle_t t1394_hdl, t1394_cmp_reg_t reg, uint32_t *valp)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_read_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_cmp_read((s1394_target_t *)t1394_hdl, reg, valp);
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_read_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_cmp_cas()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * reg Register type.
+ * arg_val Compare argument.
+ * new_val New register value.
+ * old_valp Returned original register value.
+ *
+ * Output(s): DDI_SUCCESS Successfully registered.
+ *
+ * DDI_FAILURE Not registered due to failure.
+ *
+ * Description: Used to compare-swap a CMP register value.
+ */
+int
+t1394_cmp_cas(t1394_handle_t t1394_hdl, t1394_cmp_reg_t reg, uint32_t arg_val,
+ uint32_t new_val, uint32_t *old_valp)
+{
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_read_enter, S1394_TNF_SL_CMP_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ result = s1394_cmp_cas((s1394_target_t *)t1394_hdl, reg, arg_val,
+ new_val, old_valp);
+
+ TNF_PROBE_0_DEBUG(t1394_cmp_read_exit, S1394_TNF_SL_CMP_STACK, "");
+ return (result);
+}
+
+/*
+ * Function: t1394_alloc_isoch_single()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * sii The structure used to set up the
+ * overall characteristics of the
+ * isochronous stream
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): setup_args Contains the channel number that was
+ * allocated
+ * t1394_single_hdl This in the isoch "handle" used in
+ * t1394_free_isoch_single()
+ * result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_alloc_isoch_single() is used to direct the 1394 Software
+ * Framework to allocate an isochronous channel and bandwidth
+ * from the Isochronous Resource Manager (IRM). If a bus reset
+ * occurs, the 1394 Software Framework attempts to reallocate the
+ * same resources, calling the rsrc_fail_target() callback if
+ * it is unsuccessful.
+ */
+/* ARGSUSED */
+int
+t1394_alloc_isoch_single(t1394_handle_t t1394_hdl,
+ t1394_isoch_singleinfo_t *sii, uint_t flags,
+ t1394_isoch_single_out_t *output_args,
+ t1394_isoch_single_handle_t *t1394_single_hdl, int *result)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_new;
+ t1394_join_isochinfo_t jii;
+ int ret;
+ int err;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_single_hdl != NULL);
+ ASSERT(sii != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Check for invalid channel_mask */
+ if (sii->si_channel_mask == 0) {
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid channel mask");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Check for invalid bandwidth */
+ if ((sii->si_bandwidth <= IEEE1394_BANDWIDTH_MIN) ||
+ (sii->si_bandwidth > IEEE1394_BANDWIDTH_MAX)) {
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid bandwidth requirements");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Verify that rsrc_fail_target() callback is non-NULL */
+ if (sii->rsrc_fail_target == NULL) {
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid callback specified");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Allocate an Isoch CEC of type S1394_SINGLE
+ */
+
+ /* Allocate the Isoch CEC structure */
+ cec_new = kmem_zalloc(sizeof (s1394_isoch_cec_t), KM_SLEEP);
+
+ /* Initialize the structure type */
+ cec_new->cec_type = S1394_SINGLE;
+
+ /* Create the mutex and "in_callbacks" cv */
+ mutex_init(&cec_new->isoch_cec_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ cv_init(&cec_new->in_callbacks_cv, NULL, CV_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the Isoch CEC's member list */
+ cec_new->cec_member_list_head = NULL;
+ cec_new->cec_member_list_tail = NULL;
+
+ /* Initialize the filters */
+ cec_new->filter_min_speed = sii->si_speed;
+ cec_new->filter_max_speed = sii->si_speed;
+ cec_new->filter_current_speed = cec_new->filter_max_speed;
+ cec_new->filter_channel_mask = sii->si_channel_mask;
+ cec_new->bandwidth = sii->si_bandwidth;
+ cec_new->state_transitions = ISOCH_CEC_FREE | ISOCH_CEC_JOIN |
+ ISOCH_CEC_SETUP;
+
+ mutex_enter(&hal->isoch_cec_list_mutex);
+
+ /* Insert Isoch CEC into the HAL's list */
+ s1394_isoch_cec_list_insert(hal, cec_new);
+
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ /*
+ * Join the newly created Isoch CEC
+ */
+ jii.req_channel_mask = sii->si_channel_mask;
+ jii.req_max_speed = sii->si_speed;
+ jii.jii_options = T1394_TALKER;
+ jii.isoch_cec_evts_arg = sii->single_evt_arg;
+
+ /* All events are NULL except rsrc_fail_target() */
+ jii.isoch_cec_evts.setup_target = NULL;
+ jii.isoch_cec_evts.start_target = NULL;
+ jii.isoch_cec_evts.stop_target = NULL;
+ jii.isoch_cec_evts.stop_target = NULL;
+ jii.isoch_cec_evts.teardown_target = NULL;
+ jii.isoch_cec_evts.rsrc_fail_target = sii->rsrc_fail_target;
+
+ ret = t1394_join_isoch_cec(t1394_hdl,
+ (t1394_isoch_cec_handle_t)cec_new, 0, &jii);
+
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_join_isoch_cec()");
+
+ ret = t1394_free_isoch_cec(t1394_hdl, flags,
+ (t1394_isoch_cec_handle_t *)&cec_new);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to free the Isoch CEC */
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_free_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /* Handle is nulled out before returning */
+ *t1394_single_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Setup the isoch resources, etc.
+ */
+ ret = t1394_setup_isoch_cec(t1394_hdl,
+ (t1394_isoch_cec_handle_t)cec_new, 0, &err);
+
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_setup_isoch_cec()");
+
+ *result = err;
+
+ /* Leave the Isoch CEC */
+ ret = t1394_leave_isoch_cec(t1394_hdl,
+ (t1394_isoch_cec_handle_t)cec_new, 0);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to leave the Isoch CEC */
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_leave_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /* Free up the Isoch CEC */
+ ret = t1394_free_isoch_cec(t1394_hdl, flags,
+ (t1394_isoch_cec_handle_t *)&cec_new);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to free the Isoch CEC */
+ TNF_PROBE_1(t1394_alloc_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_free_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /* Handle is nulled out before returning */
+ *t1394_single_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Return the setup_args - channel num and speed */
+ mutex_enter(&cec_new->isoch_cec_mutex);
+ output_args->channel_num = cec_new->realloc_chnl_num;
+ mutex_exit(&cec_new->isoch_cec_mutex);
+
+ /* Update the handle */
+ *t1394_single_hdl = (t1394_isoch_single_handle_t)cec_new;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_free_isoch_single()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_single_hdl The isoch "handle" return by
+ * t1394_alloc_isoch_single()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): None
+ *
+ * Description: t1394_free_isoch_single() frees the isochronous resources
+ * and the handle that were allocated during the call to
+ * t1394_alloc_isoch_single().
+ */
+/* ARGSUSED */
+void
+t1394_free_isoch_single(t1394_handle_t t1394_hdl,
+ t1394_isoch_single_handle_t *t1394_single_hdl, uint_t flags)
+{
+ s1394_isoch_cec_t *cec_curr;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_single_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_single_hdl != NULL);
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)(*t1394_single_hdl);
+
+ /*
+ * Teardown the isoch resources, etc.
+ */
+ ret = t1394_teardown_isoch_cec(t1394_hdl,
+ (t1394_isoch_cec_handle_t)cec_curr, 0);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to teardown the Isoch CEC */
+ TNF_PROBE_1(t1394_free_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_teardown_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /*
+ * Leave the Isoch CEC
+ */
+ ret = t1394_leave_isoch_cec(t1394_hdl,
+ (t1394_isoch_cec_handle_t)cec_curr, 0);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to leave the Isoch CEC */
+ TNF_PROBE_1(t1394_free_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_leave_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /*
+ * Free the Isoch CEC
+ */
+ ret = t1394_free_isoch_cec(t1394_hdl, flags,
+ (t1394_isoch_cec_handle_t *)&cec_curr);
+ if (ret != DDI_SUCCESS) {
+ /* Unable to free the Isoch CEC */
+ TNF_PROBE_1(t1394_free_isoch_single_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unexpected error from t1394_free_isoch_cec()");
+ ASSERT(0);
+ }
+
+ /* Handle is nulled out before returning */
+ *t1394_single_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_single_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * Function: t1394_alloc_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * props The structure used to set up the
+ * overall characteristics of for
+ * the Isoch CEC.
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): t1394_isoch_cec_hdl The Isoch CEC "handle" used in all
+ * subsequent isoch_cec() calls
+ *
+ * Description: t1394_alloc_isoch_cec() allocates and initializes an
+ * isochronous channel event coordinator (Isoch CEC) for use
+ * in managing and coordinating activity for an isoch channel
+ */
+/* ARGSUSED */
+int
+t1394_alloc_isoch_cec(t1394_handle_t t1394_hdl, t1394_isoch_cec_props_t *props,
+ uint_t flags, t1394_isoch_cec_handle_t *t1394_isoch_cec_hdl)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_new;
+ uint64_t temp;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+ ASSERT(props != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Check for invalid channel_mask */
+ if (props->cec_channel_mask == 0) {
+ TNF_PROBE_1(t1394_alloc_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid channel mask");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Test conditions specific to T1394_NO_IRM_ALLOC */
+ temp = props->cec_channel_mask;
+ if (props->cec_options & T1394_NO_IRM_ALLOC) {
+ /* If T1394_NO_IRM_ALLOC, then only one bit should be set */
+ if ((temp & (temp - 1)) != 0) {
+ TNF_PROBE_1(t1394_alloc_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid channel mask");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* If T1394_NO_IRM_ALLOC, then speeds should be equal */
+ if (props->cec_min_speed != props->cec_max_speed) {
+ TNF_PROBE_1(t1394_alloc_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid speeds (min != max)");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* Check for invalid bandwidth */
+ if ((props->cec_bandwidth <= IEEE1394_BANDWIDTH_MIN) ||
+ (props->cec_bandwidth > IEEE1394_BANDWIDTH_MAX)) {
+ TNF_PROBE_1(t1394_alloc_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid bandwidth requirements");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Allocate the Isoch CEC structure */
+ cec_new = kmem_zalloc(sizeof (s1394_isoch_cec_t), KM_SLEEP);
+
+ /* Initialize the structure type */
+ cec_new->cec_type = S1394_PEER_TO_PEER;
+
+ /* Create the mutex and "in_callbacks" cv */
+ mutex_init(&cec_new->isoch_cec_mutex, NULL, MUTEX_DRIVER,
+ hal->halinfo.hw_interrupt);
+ cv_init(&cec_new->in_callbacks_cv, NULL, CV_DRIVER,
+ hal->halinfo.hw_interrupt);
+
+ /* Initialize the Isoch CEC's member list */
+ cec_new->cec_member_list_head = NULL;
+ cec_new->cec_member_list_tail = NULL;
+
+ /* Initialize the filters */
+ cec_new->filter_min_speed = props->cec_min_speed;
+ cec_new->filter_max_speed = props->cec_max_speed;
+ cec_new->filter_current_speed = cec_new->filter_max_speed;
+ cec_new->filter_channel_mask = props->cec_channel_mask;
+ cec_new->bandwidth = props->cec_bandwidth;
+ cec_new->cec_options = props->cec_options;
+ cec_new->state_transitions = ISOCH_CEC_FREE | ISOCH_CEC_JOIN |
+ ISOCH_CEC_SETUP;
+
+ mutex_enter(&hal->isoch_cec_list_mutex);
+
+ /* Insert Isoch CEC into the HAL's list */
+ s1394_isoch_cec_list_insert(hal, cec_new);
+
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ /* Update the handle and return */
+ *t1394_isoch_cec_hdl = (t1394_isoch_cec_handle_t)cec_new;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_free_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ *
+ * Output(s): DDI_SUCCESS Target successfully freed the Isoch CEC
+ * DDI_FAILURE Target failed to free the Isoch CEC
+ *
+ * Description: t1394_free_isoch_cec() attempts to free the Isoch CEC
+ * structure. It will fail (DDI_FAILURE) if there are any
+ * remaining members who have not yet left.
+ */
+/* ARGSUSED */
+int
+t1394_free_isoch_cec(t1394_handle_t t1394_hdl, uint_t flags,
+ t1394_isoch_cec_handle_t *t1394_isoch_cec_hdl)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_curr;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)(*t1394_isoch_cec_hdl);
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? */
+ if (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_free_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to free Isoch CEC (in callbacks)");
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is "free" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_FREE) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_free_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to free Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ mutex_enter(&hal->isoch_cec_list_mutex);
+
+ /* Remove Isoch CEC from HAL's list */
+ s1394_isoch_cec_list_remove(hal, cec_curr);
+
+ mutex_exit(&hal->isoch_cec_list_mutex);
+
+ /* Destroy the Isoch CEC's mutex and cv */
+ cv_destroy(&cec_curr->in_callbacks_cv);
+ mutex_destroy(&cec_curr->isoch_cec_mutex);
+
+ /* Free up the memory for the Isoch CEC struct */
+ kmem_free(cec_curr, sizeof (s1394_isoch_cec_t));
+
+ /* Update the handle and return */
+ *t1394_isoch_cec_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_join_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ * join_isoch_info This structure provides infomation
+ * about a target that wishes to join
+ * the given Isoch CEC. It gives
+ * max_speed, channel_mask, etc.
+ *
+ * Output(s): DDI_SUCCESS Target successfully joined the
+ * Isoch CEC
+ * DDI_FAILURE Target failed to join the Isoch CEC
+ *
+ * Description: t1394_join_isoch_cec() determines, based on the information
+ * given in the join_isoch_info structure, if the target may
+ * join the Isoch CEC. If it is determined that the target may
+ * join, the specified callback routines are stored away for
+ * later use in the coordination tasks.
+ */
+/* ARGSUSED */
+int
+t1394_join_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags,
+ t1394_join_isochinfo_t *join_isoch_info)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_new;
+ uint64_t check_mask;
+ uint_t curr_max_speed;
+
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Allocate a new Isoch CEC member structure */
+ member_new = kmem_zalloc(sizeof (s1394_isoch_cec_member_t), KM_SLEEP);
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? (Wait for them to finish) */
+ while (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ cec_curr->cec_want_wakeup = B_TRUE;
+ cv_wait(&cec_curr->in_callbacks_cv,
+ &cec_curr->isoch_cec_mutex);
+ }
+
+ /* Is "join" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_JOIN) == 0) {
+ kmem_free(member_new, sizeof (s1394_isoch_cec_member_t));
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_join_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to join Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Check the channel mask for consistency */
+ check_mask = join_isoch_info->req_channel_mask &
+ cec_curr->filter_channel_mask;
+ if (check_mask == 0) {
+ kmem_free(member_new, sizeof (s1394_isoch_cec_member_t));
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_join_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Inconsistent channel mask specified");
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Check for consistent speeds */
+ if (join_isoch_info->req_max_speed < cec_curr->filter_min_speed) {
+ kmem_free(member_new, sizeof (s1394_isoch_cec_member_t));
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_join_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Inconsistent speed specified");
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ } else if (join_isoch_info->req_max_speed <
+ cec_curr->filter_current_speed) {
+ curr_max_speed = join_isoch_info->req_max_speed;
+ } else {
+ curr_max_speed = cec_curr->filter_current_speed;
+ }
+
+ /* Check for no more than one talker */
+ if ((join_isoch_info->jii_options & T1394_TALKER) &&
+ (cec_curr->cec_member_talker != NULL)) {
+ kmem_free(member_new, sizeof (s1394_isoch_cec_member_t));
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_join_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Multiple talkers specified");
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Verify that all callbacks are non-NULL (for PEER_TO_PEER) */
+ if ((cec_curr->cec_type == S1394_PEER_TO_PEER) &&
+ ((join_isoch_info->isoch_cec_evts.setup_target == NULL) ||
+ (join_isoch_info->isoch_cec_evts.start_target == NULL) ||
+ (join_isoch_info->isoch_cec_evts.stop_target == NULL) ||
+ (join_isoch_info->isoch_cec_evts.rsrc_fail_target == NULL) ||
+ (join_isoch_info->isoch_cec_evts.teardown_target == NULL))) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_join_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Invalid callbacks specified");
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Copy the events information into the struct */
+ member_new->isoch_cec_evts = join_isoch_info->isoch_cec_evts;
+ member_new->isoch_cec_evts_arg = join_isoch_info->isoch_cec_evts_arg;
+ member_new->cec_mem_options = join_isoch_info->jii_options;
+ member_new->cec_mem_target = (s1394_target_t *)t1394_hdl;
+
+ /* Insert new member into Isoch CEC's member list */
+ s1394_isoch_cec_member_list_insert(hal, cec_curr, member_new);
+
+ /* Update the channel mask filter */
+ cec_curr->filter_channel_mask = check_mask;
+
+ /* Update the speed filter */
+ cec_curr->filter_current_speed = curr_max_speed;
+
+ /* Update the talker pointer (if necessary) */
+ if (join_isoch_info->jii_options & T1394_TALKER)
+ cec_curr->cec_member_talker = cec_curr->cec_member_list_head;
+
+ /*
+ * Now "leave" is a legal state transition
+ * and "free" is an illegal state transition
+ */
+ CEC_SET_LEGAL(cec_curr, ISOCH_CEC_LEAVE);
+ CEC_SET_ILLEGAL(cec_curr, ISOCH_CEC_FREE);
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ TNF_PROBE_0_DEBUG(t1394_join_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_leave_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully left the
+ * Isoch CEC
+ * DDI_FAILURE Target failed to leave the Isoch CEC
+ *
+ * Description: t1394_leave_isoch_cec() is used by a target driver to remove
+ * itself from the Isoch CEC's member list. It is possible
+ * for this call to fail because the target is not found in
+ * the current member list, or because it is not an appropriate
+ * time for a target to leave.
+ */
+/* ARGSUSED */
+int
+t1394_leave_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ s1394_isoch_cec_member_t *member_temp;
+ boolean_t found;
+ uint64_t temp_channel_mask;
+ uint_t temp_max_speed;
+
+ TNF_PROBE_0_DEBUG(t1394_leave_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? (Wait for them to finish) */
+ while (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ cec_curr->cec_want_wakeup = B_TRUE;
+ cv_wait(&cec_curr->in_callbacks_cv,
+ &cec_curr->isoch_cec_mutex);
+ }
+
+ /* Is "leave" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_LEAVE) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_leave_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to leave Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_leave_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Find the Target on the CEC's member list */
+ found = B_FALSE;
+ temp_channel_mask = cec_curr->cec_alloc_props.cec_channel_mask;
+ temp_max_speed = cec_curr->cec_alloc_props.cec_max_speed;
+ member_curr = cec_curr->cec_member_list_head;
+ while (member_curr != NULL) {
+ if (member_curr->cec_mem_target ==
+ (s1394_target_t *)t1394_hdl) {
+ member_temp = member_curr;
+ found = B_TRUE;
+ } else {
+ /* Keep track of channel mask and max speed info */
+ temp_channel_mask &= member_curr->req_channel_mask;
+ if (member_curr->req_max_speed < temp_max_speed)
+ temp_max_speed = member_curr->req_max_speed;
+ }
+ member_curr = member_curr->cec_mem_next;
+ }
+
+ /* Target not found on this Isoch CEC */
+ if (found == B_FALSE) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_leave_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Target not found in Isoch CEC member list");
+ TNF_PROBE_0_DEBUG(t1394_leave_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ } else {
+ /* This member's departure may change filter constraints */
+ cec_curr->filter_current_speed = temp_max_speed;
+ cec_curr->filter_channel_mask = temp_channel_mask;
+ }
+
+ /* Remove member from Isoch CEC's member list */
+ s1394_isoch_cec_member_list_remove(hal, cec_curr, member_temp);
+
+ /* If we are removing the talker, then update the pointer */
+ if (cec_curr->cec_member_talker == member_temp)
+ cec_curr->cec_member_talker = NULL;
+
+ /* Is the Isoch CEC's member list empty? */
+ if ((cec_curr->cec_member_list_head == NULL) &&
+ (cec_curr->cec_member_list_tail == NULL)) {
+ /*
+ * Now "free" _might_ be a legal state transition
+ * if we aren't in setup or start phases and "leave"
+ * is definitely an illegal state transition
+ */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_JOIN) != 0)
+ CEC_SET_LEGAL(cec_curr, ISOCH_CEC_FREE);
+ CEC_SET_ILLEGAL(cec_curr, ISOCH_CEC_LEAVE);
+ }
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Free the Isoch CEC member structure */
+ kmem_free(member_temp, sizeof (s1394_isoch_cec_member_t));
+
+ TNF_PROBE_0_DEBUG(t1394_leave_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_setup_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_setup_isoch_cec() directs the 1394 Software Framework
+ * to allocate isochronous resources and invoke the setup_target()
+ * callback for each member of the Isoch CEC. This call may
+ * fail because bandwidth was unavailable (T1394_ENO_BANDWIDTH),
+ * channels were unavailable (T1394_ENO_CHANNEL), or one of the
+ * member targets returned failure from its setup_target()
+ * callback.
+ */
+/* ARGSUSED */
+int
+t1394_setup_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags, int *result)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ t1394_setup_target_args_t target_args;
+ uint64_t temp_chnl_mask;
+ uint32_t old_chnl;
+ uint32_t try_chnl;
+ uint_t bw_alloc_units;
+ uint_t generation;
+ int chnl_num;
+ int err;
+ int ret;
+ int j;
+ int (*setup_callback)(t1394_isoch_cec_handle_t, opaque_t,
+ t1394_setup_target_args_t *);
+
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? */
+ if (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to setup Isoch CEC (in callbacks)");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is "setup" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_SETUP) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to setup Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* If T1394_NO_IRM_ALLOC is set then don't allocate... do callbacks */
+ if (cec_curr->cec_options & T1394_NO_IRM_ALLOC) {
+ goto setup_do_callbacks;
+ }
+
+ /* Allocate bandwidth and channels */
+ for (j = 0; j < S1394_ISOCH_ALLOC_RETRIES; j++) {
+ /*
+ * Get the current generation number - don't
+ * need the lock because we are read only here
+ */
+ generation = hal->generation_count;
+
+ /* Compute how much bandwidth is needed */
+ bw_alloc_units = s1394_compute_bw_alloc_units(hal,
+ cec_curr->bandwidth, cec_curr->filter_current_speed);
+
+ /* Check that the generation has not changed - */
+ /* don't need the lock (read only) */
+ if (generation != hal->generation_count)
+ continue;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Try to allocate the bandwidth */
+ ret = s1394_bandwidth_alloc(hal, bw_alloc_units, generation,
+ &err);
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* If there was a bus reset, start over */
+ if (ret == DDI_FAILURE) {
+ if (err == CMD1394_EBUSRESET) {
+ continue; /* start over and try again */
+ } else {
+ *result = T1394_ENO_BANDWIDTH;
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string,
+ msg, "Unable to allocate isoch bandwidth");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* Check that the generation has not changed - */
+ /* don't need the lock (read only) */
+ if (generation != hal->generation_count)
+ continue;
+
+ /*
+ * Allocate a channel
+ * From IEEE 1394-1995, Section 8.3.2.3.8: "Bits
+ * allocated in the CHANNELS_AVAILABLE_HI field of
+ * this register shall start at bit zero (channel
+ * number zero), and additional channel numbers shall
+ * be represented in a monotonically increasing sequence
+ * of bit numbers up to a maximum of bit 31 (channel
+ * number 31). Bits allocated in the CHANNELS_AVAILABLE_LO
+ * field of this register shall start at bit zero
+ * (channel number 32), and additional channel numbers
+ * shall be represented in a monotonically increasing
+ * sequence of bit numbers up to a maximum of bit 31
+ * (channel number 63).
+ */
+ temp_chnl_mask = cec_curr->filter_channel_mask;
+ for (chnl_num = 63; chnl_num >= 0; chnl_num--) {
+ if ((temp_chnl_mask & 1) == 1) {
+ try_chnl = (1 << ((63 - chnl_num) % 32));
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ if (chnl_num < 32) {
+ ret = s1394_channel_alloc(hal,
+ try_chnl, generation,
+ S1394_CHANNEL_ALLOC_HI, &old_chnl,
+ &err);
+ } else {
+ ret = s1394_channel_alloc(hal,
+ try_chnl, generation,
+ S1394_CHANNEL_ALLOC_LO, &old_chnl,
+ &err);
+ }
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Did we get a channel? (or a bus reset) */
+ if ((ret == DDI_SUCCESS) ||
+ (err == CMD1394_EBUSRESET))
+ break;
+ }
+ temp_chnl_mask = temp_chnl_mask >> 1;
+ }
+
+ /* If we've tried all the possible channels, then fail */
+ if (chnl_num == 0) {
+ *result = T1394_ENO_CHANNEL;
+ /*
+ * If we successfully allocate bandwidth, and
+ * then fail getting a channel, we need to
+ * free up the bandwidth
+ */
+
+ /* Check that the generation has not changed */
+ /* lock not needed here (read only) */
+ if (generation != hal->generation_count)
+ continue;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Try to free up the bandwidth */
+ ret = s1394_bandwidth_free(hal, bw_alloc_units,
+ generation, &err);
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ if (ret == DDI_FAILURE) {
+ if (err == CMD1394_EBUSRESET) {
+ continue;
+ } else {
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "",
+ tnf_string, msg,
+ "Unable to free isoch bandwidth");
+ }
+ }
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate isoch channel");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* If we got a channel, we're done (else start over) */
+ if (ret == DDI_SUCCESS)
+ break;
+ else if (err == CMD1394_EBUSRESET)
+ continue;
+ }
+
+ /* Have we gotten too many bus resets? */
+ if (j == S1394_ISOCH_ALLOC_RETRIES) {
+ *result = T1394_ENO_BANDWIDTH;
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to allocate isoch channel");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ cec_curr->realloc_valid = B_TRUE;
+ cec_curr->realloc_chnl_num = chnl_num;
+ cec_curr->realloc_bandwidth = cec_curr->bandwidth;
+ cec_curr->realloc_speed = cec_curr->filter_current_speed;
+
+setup_do_callbacks:
+ /* Call all of the setup_target() callbacks */
+ target_args.channel_num = chnl_num;
+ target_args.channel_speed = cec_curr->filter_current_speed;
+
+ /* Now we are going into the callbacks */
+ cec_curr->in_callbacks = B_TRUE;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ member_curr = cec_curr->cec_member_list_head;
+ *result = 0;
+ while (member_curr != NULL) {
+ if (member_curr->isoch_cec_evts.setup_target != NULL) {
+ setup_callback =
+ member_curr->isoch_cec_evts.setup_target;
+ ret = setup_callback(t1394_isoch_cec_hdl,
+ member_curr->isoch_cec_evts_arg, &target_args);
+ if (ret != DDI_SUCCESS)
+ *result = T1394_ETARGET;
+ }
+ member_curr = member_curr->cec_mem_next;
+ }
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* We are finished with the callbacks */
+ cec_curr->in_callbacks = B_FALSE;
+ if (cec_curr->cec_want_wakeup == B_TRUE) {
+ cec_curr->cec_want_wakeup = B_FALSE;
+ cv_broadcast(&cec_curr->in_callbacks_cv);
+ }
+
+ /*
+ * Now "start" and "teardown" are legal state transitions
+ * and "join", "free", and "setup" are illegal state transitions
+ */
+ CEC_SET_LEGAL(cec_curr, (ISOCH_CEC_START | ISOCH_CEC_TEARDOWN));
+ CEC_SET_ILLEGAL(cec_curr, (ISOCH_CEC_JOIN | ISOCH_CEC_FREE |
+ ISOCH_CEC_SETUP));
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Return DDI_FAILURE if any targets failed setup */
+ if (*result != 0) {
+ TNF_PROBE_1(t1394_setup_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Target returned error in setup_target()");
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_setup_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_start_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS All start_target() callbacks returned
+ * successfully
+ * DDI_FAILURE One or more start_target() callbacks
+ * returned failure
+ *
+ * Description: t1394_start_isoch_cec() directs the 1394 Software Framework
+ * to invoke each of the start_target() callbacks, first for
+ * each listener, then for the talker.
+ */
+/* ARGSUSED */
+int
+t1394_start_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags)
+{
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ int ret;
+ boolean_t err;
+ int (*start_callback)(t1394_isoch_cec_handle_t, opaque_t);
+
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? */
+ if (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_start_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to start Isoch CEC (in callbacks)");
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is "start" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_START) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_start_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to start Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Now we are going into the callbacks */
+ cec_curr->in_callbacks = B_TRUE;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /*
+ * Call all of the start_target() callbacks
+ * Start at the tail (listeners first) and
+ * go toward the head (talker last)
+ */
+ member_curr = cec_curr->cec_member_list_tail;
+ err = B_FALSE;
+ while (member_curr != NULL) {
+ if (member_curr->isoch_cec_evts.start_target != NULL) {
+ start_callback =
+ member_curr->isoch_cec_evts.start_target;
+ ret = start_callback(t1394_isoch_cec_hdl,
+ member_curr->isoch_cec_evts_arg);
+ if (ret != DDI_SUCCESS)
+ err = B_TRUE;
+ }
+ member_curr = member_curr->cec_mem_prev;
+ }
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* We are finished with the callbacks */
+ cec_curr->in_callbacks = B_FALSE;
+ if (cec_curr->cec_want_wakeup == B_TRUE) {
+ cec_curr->cec_want_wakeup = B_FALSE;
+ cv_broadcast(&cec_curr->in_callbacks_cv);
+ }
+
+ /*
+ * Now "stop" is a legal state transitions
+ * and "start" and "teardown" are illegal state transitions
+ */
+ CEC_SET_LEGAL(cec_curr, ISOCH_CEC_STOP);
+ CEC_SET_ILLEGAL(cec_curr, (ISOCH_CEC_START | ISOCH_CEC_TEARDOWN));
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Return DDI_FAILURE if any targets failed start */
+ if (err == B_TRUE) {
+ TNF_PROBE_1(t1394_start_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Target returned error in start_target()");
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_stop_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully stopped the
+ * Isoch CEC
+ * DDI_FAILURE Target failed to stop the Isoch CEC
+ *
+ * Description: t1394_stop_isoch_cec() directs the 1394 Software Framework
+ * to invoke each of the stop_target() callbacks, first for
+ * the talker, then for each listener.
+ * (This call will fail if it is called at an
+ * inappropriate time, i.e. before the t1394_start_isoch_cec()
+ * call, etc.)
+ */
+/* ARGSUSED */
+int
+t1394_stop_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags)
+{
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ void (*stop_callback)(t1394_isoch_cec_handle_t, opaque_t);
+
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? */
+ if (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_stop_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to stop Isoch CEC (in callbacks)");
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is "stop" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_STOP) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_stop_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to stop Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Now we are going into the callbacks */
+ cec_curr->in_callbacks = B_TRUE;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /*
+ * Call all of the stop_target() callbacks
+ * Start at the head (talker first) and
+ * go toward the tail (listeners last)
+ */
+ member_curr = cec_curr->cec_member_list_head;
+ while (member_curr != NULL) {
+ if (member_curr->isoch_cec_evts.stop_target != NULL) {
+ stop_callback =
+ member_curr->isoch_cec_evts.stop_target;
+ stop_callback(t1394_isoch_cec_hdl,
+ member_curr->isoch_cec_evts_arg);
+ }
+ member_curr = member_curr->cec_mem_next;
+ }
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* We are finished with the callbacks */
+ cec_curr->in_callbacks = B_FALSE;
+ if (cec_curr->cec_want_wakeup == B_TRUE) {
+ cec_curr->cec_want_wakeup = B_FALSE;
+ cv_broadcast(&cec_curr->in_callbacks_cv);
+ }
+
+ /*
+ * Now "start" and "teardown" are legal state transitions
+ * and "stop" is an illegal state transitions
+ */
+ CEC_SET_LEGAL(cec_curr, (ISOCH_CEC_START | ISOCH_CEC_TEARDOWN));
+ CEC_SET_ILLEGAL(cec_curr, ISOCH_CEC_STOP);
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_teardown_isoch_cec()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_isoch_cec_hdl The Isoch CEC "handle" returned by
+ * t1394_alloc_isoch_cec()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): DDI_SUCCESS Target successfully tore down the
+ * Isoch CEC
+ * DDI_FAILURE Target failed to tear down the
+ * Isoch CEC
+ *
+ * Description: t1394_teardown_isoch_cec() directs the 1394 Software Framework
+ * to free up any isochronous resources we might be holding and
+ * call all of the teardown_target() callbacks.
+ * (This call will fail if it is called at an
+ * inappropriate time, i.e. before the t1394_start_isoch_cec()
+ * call, before the t1394_stop_isoch_cec, etc.
+ */
+/* ARGSUSED */
+int
+t1394_teardown_isoch_cec(t1394_handle_t t1394_hdl,
+ t1394_isoch_cec_handle_t t1394_isoch_cec_hdl, uint_t flags)
+{
+ s1394_hal_t *hal;
+ s1394_isoch_cec_t *cec_curr;
+ s1394_isoch_cec_member_t *member_curr;
+ uint32_t chnl_mask;
+ uint32_t old_chnl_mask;
+ uint_t bw_alloc_units;
+ uint_t generation;
+ int ret;
+ int err;
+ void (*teardown_callback)(t1394_isoch_cec_handle_t, opaque_t);
+
+ TNF_PROBE_0_DEBUG(t1394_teardown_isoch_cec_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_isoch_cec_hdl != NULL);
+
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Convert the handle to an Isoch CEC pointer */
+ cec_curr = (s1394_isoch_cec_t *)t1394_isoch_cec_hdl;
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* Are we in any callbacks? */
+ if (CEC_IN_ANY_CALLBACKS(cec_curr)) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_teardown_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to teardown Isoch CEC (in callbacks)");
+ TNF_PROBE_0_DEBUG(t1394_teardown_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Is "teardown" a legal state transition? */
+ if (CEC_TRANSITION_LEGAL(cec_curr, ISOCH_CEC_TEARDOWN) == 0) {
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_1(t1394_teardown_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Not allowed to teardown Isoch CEC");
+ TNF_PROBE_0_DEBUG(t1394_teardown_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* If T1394_NO_IRM_ALLOC is set then don't free... do callbacks */
+ if (cec_curr->cec_options & T1394_NO_IRM_ALLOC) {
+ goto teardown_do_callbacks;
+ }
+
+ /* If nothing has been allocated or we failed to */
+ /* reallocate, then we are done... call the callbacks */
+ if ((cec_curr->realloc_valid == B_FALSE) ||
+ (cec_curr->realloc_failed == B_TRUE)) {
+ goto teardown_do_callbacks;
+ }
+
+ /*
+ * Get the current generation number - don't need the
+ * topology tree mutex here because it is read-only, and
+ * there is a race condition with or without it.
+ */
+ generation = hal->generation_count;
+
+ /* Compute the amount bandwidth to free */
+ bw_alloc_units = s1394_compute_bw_alloc_units(hal,
+ cec_curr->bandwidth, cec_curr->realloc_speed);
+
+ /* Check that the generation has not changed - */
+ /* don't need the lock (read only) */
+ if (generation != hal->generation_count)
+ goto teardown_do_callbacks;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Try to free up the bandwidth */
+ ret = s1394_bandwidth_free(hal, bw_alloc_units, generation, &err);
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ if (ret == DDI_FAILURE) {
+ if (err == CMD1394_EBUSRESET) {
+ goto teardown_do_callbacks;
+ } else {
+ TNF_PROBE_1(t1394_teardown_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to free allocated bandwidth");
+ }
+ }
+
+ /* Free the allocated channel */
+ chnl_mask = (1 << ((63 - cec_curr->realloc_chnl_num) % 32));
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ if (cec_curr->realloc_chnl_num < 32) {
+ ret = s1394_channel_free(hal, chnl_mask, generation,
+ S1394_CHANNEL_ALLOC_HI, &old_chnl_mask, &err);
+ } else {
+ ret = s1394_channel_free(hal, chnl_mask, generation,
+ S1394_CHANNEL_ALLOC_LO, &old_chnl_mask, &err);
+ }
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ if (ret == DDI_FAILURE) {
+ if (err == CMD1394_EBUSRESET) {
+ goto teardown_do_callbacks;
+ } else {
+ TNF_PROBE_1(t1394_teardown_isoch_cec_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "Unable to free allocated bandwidth");
+ }
+ }
+
+teardown_do_callbacks:
+ /* From here on reallocation is unnecessary */
+ cec_curr->realloc_valid = B_FALSE;
+ cec_curr->realloc_chnl_num = 0;
+ cec_curr->realloc_bandwidth = 0;
+
+ /* Now we are going into the callbacks */
+ cec_curr->in_callbacks = B_TRUE;
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+
+ /* Call all of the teardown_target() callbacks */
+ member_curr = cec_curr->cec_member_list_head;
+ while (member_curr != NULL) {
+ if (member_curr->isoch_cec_evts.teardown_target != NULL) {
+ teardown_callback =
+ member_curr->isoch_cec_evts.teardown_target;
+ teardown_callback(t1394_isoch_cec_hdl,
+ member_curr->isoch_cec_evts_arg);
+ }
+ member_curr = member_curr->cec_mem_next;
+ }
+
+ /* Lock the Isoch CEC member list */
+ mutex_enter(&cec_curr->isoch_cec_mutex);
+
+ /* We are finished with the callbacks */
+ cec_curr->in_callbacks = B_FALSE;
+ if (cec_curr->cec_want_wakeup == B_TRUE) {
+ cec_curr->cec_want_wakeup = B_FALSE;
+ cv_broadcast(&cec_curr->in_callbacks_cv);
+ }
+
+ /*
+ * Now "join" and "setup" are legal state transitions
+ * and "start" and "teardown" are illegal state transitions
+ */
+ CEC_SET_LEGAL(cec_curr, (ISOCH_CEC_JOIN | ISOCH_CEC_SETUP));
+ CEC_SET_ILLEGAL(cec_curr, (ISOCH_CEC_START | ISOCH_CEC_TEARDOWN));
+
+ /* And if the member list is empty, then "free" is legal too */
+ if ((cec_curr->cec_member_list_head == NULL) &&
+ (cec_curr->cec_member_list_tail == NULL)) {
+ CEC_SET_LEGAL(cec_curr, ISOCH_CEC_FREE);
+ }
+
+ /* Unlock the Isoch CEC member list */
+ mutex_exit(&cec_curr->isoch_cec_mutex);
+ TNF_PROBE_0_DEBUG(t1394_teardown_isoch_cec_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_alloc_isoch_dma()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * idi This structure contains information
+ * for configuring the data flow for
+ * isochronous DMA
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): t1394_idma_hdl The IDMA "handle" used in all
+ * subsequent isoch_dma() calls
+ * result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_alloc_isoch_dma() allocates and initializes an
+ * isochronous DMA resource for transmitting or receiving
+ * isochronous data. If it fails, result may hold
+ * T1394_EIDMA_NO_RESRCS, indicating that no isoch DMA resource
+ * are available.
+ */
+/* ARGSUSED */
+int
+t1394_alloc_isoch_dma(t1394_handle_t t1394_hdl,
+ id1394_isoch_dmainfo_t *idi, uint_t flags,
+ t1394_isoch_dma_handle_t *t1394_idma_hdl, int *result)
+{
+ s1394_hal_t *hal;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_dma_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(idi != NULL);
+ ASSERT(t1394_idma_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Sanity check dma options. If talk enabled, listen should be off */
+ if ((idi->idma_options & ID1394_TALK) &&
+ (idi->idma_options != ID1394_TALK)) {
+ TNF_PROBE_1(t1394_alloc_isoch_dma_talk_conflict_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "conflicting idma options; talker and listener");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ *result = T1394_EIDMA_CONFLICT;
+ return (DDI_FAILURE);
+ }
+
+ /* Only one listen mode allowed */
+ if ((idi->idma_options & ID1394_LISTEN_PKT_MODE) &&
+ (idi->idma_options & ID1394_LISTEN_BUF_MODE)) {
+ TNF_PROBE_1(t1394_alloc_isoch_dma_listen_conflict_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "conflicting idma options; both listener modes set");
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ *result = T1394_EIDMA_CONFLICT;
+ return (DDI_FAILURE);
+ }
+
+ /* Have HAL alloc a resource and compile ixl */
+ ret = HAL_CALL(hal).alloc_isoch_dma(hal->halinfo.hal_private, idi,
+ (void **)t1394_idma_hdl, result);
+
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_alloc_isoch_dma_hal_error,
+ S1394_TNF_SL_ISOCH_ERROR, "", tnf_string, msg,
+ "HAL alloc_isoch_dma error, maybe IXL compilation");
+ if (*result == IXL1394_ENO_DMA_RESRCS) {
+ *result = T1394_EIDMA_NO_RESRCS;
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_alloc_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_free_isoch_dma()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ * t1394_idma_hdl The IDMA "handle" returned by
+ * t1394_alloc_isoch_dma()
+ *
+ * Output(s): None
+ *
+ * Description: t1394_free_isoch_dma() is used to free all DMA resources
+ * allocated for the isoch stream associated with t1394_idma_hdl.
+ */
+/* ARGSUSED */
+void
+t1394_free_isoch_dma(t1394_handle_t t1394_hdl, uint_t flags,
+ t1394_isoch_dma_handle_t *t1394_idma_hdl)
+{
+ s1394_hal_t *hal;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_dma_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(*t1394_idma_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Tell HAL to release local isoch dma resources */
+ HAL_CALL(hal).free_isoch_dma(hal->halinfo.hal_private, *t1394_idma_hdl);
+
+ /* Null out isoch handle */
+ *t1394_idma_hdl = NULL;
+
+ TNF_PROBE_0_DEBUG(t1394_free_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * Function: t1394_start_isoch_dma()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_idma_hdl The IDMA "handle" returned by
+ * t1394_alloc_isoch_dma()
+ * idma_ctrlinfo This structure contains control args
+ * used when starting isoch DMA for
+ * the allocated resource
+ * flags One flag defined - ID1394_START_ON_CYCLE
+ *
+ * Output(s): result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_start_isoch_dma() is used to start DMA for the isoch
+ * stream associated with t1394_idma_hdl.
+ */
+/* ARGSUSED */
+int
+t1394_start_isoch_dma(t1394_handle_t t1394_hdl,
+ t1394_isoch_dma_handle_t t1394_idma_hdl,
+ id1394_isoch_dma_ctrlinfo_t *idma_ctrlinfo, uint_t flags,
+ int *result)
+{
+ s1394_hal_t *hal;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_dma_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_idma_hdl != NULL);
+ ASSERT(idma_ctrlinfo != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ ret = HAL_CALL(hal).start_isoch_dma(hal->halinfo.hal_private,
+ (void *)t1394_idma_hdl, idma_ctrlinfo, flags, result);
+
+ TNF_PROBE_0_DEBUG(t1394_start_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_stop_isoch_dma()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_idma_hdl The IDMA "handle" returned by
+ * t1394_alloc_isoch_dma()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): None
+ *
+ * Description: t1394_stop_isoch_dma() is used to stop DMA for the isoch
+ * stream associated with t1394_idma_hdl.
+ */
+/* ARGSUSED */
+void
+t1394_stop_isoch_dma(t1394_handle_t t1394_hdl,
+ t1394_isoch_dma_handle_t t1394_idma_hdl, uint_t flags)
+{
+ s1394_hal_t *hal;
+ int result;
+
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_dma_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_idma_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ HAL_CALL(hal).stop_isoch_dma(hal->halinfo.hal_private,
+ (void *)t1394_idma_hdl, &result);
+
+ TNF_PROBE_0_DEBUG(t1394_stop_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+}
+
+/*
+ * Function: t1394_update_isoch_dma()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * t1394_idma_hdl The IDMA "handle" returned by
+ * t1394_alloc_isoch_dma()
+ * idma_updateinfo This structure contains ixl command args
+ * used when updating args in an
+ * existing list of ixl commands with
+ * args in a new list of ixl commands.
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_update_isoch_dma() is used to alter an IXL program that
+ * has already been built (compiled) by t1394_alloc_isoch_dma().
+ */
+/* ARGSUSED */
+int
+t1394_update_isoch_dma(t1394_handle_t t1394_hdl,
+ t1394_isoch_dma_handle_t t1394_idma_hdl,
+ id1394_isoch_dma_updateinfo_t *idma_updateinfo, uint_t flags,
+ int *result)
+{
+ s1394_hal_t *hal;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_update_isoch_dma_enter,
+ S1394_TNF_SL_ISOCH_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+ ASSERT(t1394_idma_hdl != NULL);
+ ASSERT(idma_updateinfo != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ ret = HAL_CALL(hal).update_isoch_dma(hal->halinfo.hal_private,
+ (void *)t1394_idma_hdl, idma_updateinfo, flags, result);
+
+ TNF_PROBE_0_DEBUG(t1394_update_isoch_dma_exit,
+ S1394_TNF_SL_ISOCH_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_initiate_bus_reset()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): None
+ *
+ * Description: t1394_initiate_bus_reset() determines whether the local
+ * device has a P1394A PHY and will support the arbitrated
+ * short bus reset. If not, it will initiate a normal bus reset.
+ */
+/* ARGSUSED */
+void
+t1394_initiate_bus_reset(t1394_handle_t t1394_hdl, uint_t flags)
+{
+ s1394_hal_t *hal;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_initiate_bus_reset_enter,
+ S1394_TNF_SL_BR_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Reset the bus */
+ if (hal->halinfo.phy == H1394_PHY_1394A) {
+ ret = HAL_CALL(hal).short_bus_reset(hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_initiate_bus_reset_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating short bus reset");
+ }
+ } else {
+ ret = HAL_CALL(hal).bus_reset(hal->halinfo.hal_private);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(t1394_initiate_bus_reset_error,
+ S1394_TNF_SL_ERROR, "", tnf_string, msg,
+ "Error initiating bus reset");
+ }
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_initiate_bus_reset_exit,
+ S1394_TNF_SL_BR_STACK, "");
+}
+
+/*
+ * Function: t1394_get_topology_map()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * bus_generation The current generation
+ * tm_length The size of the tm_buffer given
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): tm_buffer Filled in by the 1394 Software Framework
+ * with the contents of the local
+ * TOPOLOGY_MAP
+ *
+ * Description: t1394_get_topology_map() returns the 1394 TOPLOGY_MAP. See
+ * IEEE 1394-1995 Section 8.2.3.4.1 for format information. This
+ * call can fail if there is a generation mismatch or the
+ * tm_buffer is too small to hold the TOPOLOGY_MAP.
+ */
+/* ARGSUSED */
+int
+t1394_get_topology_map(t1394_handle_t t1394_hdl, uint_t bus_generation,
+ size_t tm_length, uint_t flags, uint32_t *tm_buffer)
+{
+ s1394_hal_t *hal;
+ uint32_t *tm_ptr;
+ uint_t length;
+
+ TNF_PROBE_0_DEBUG(t1394_get_topology_map_enter, S1394_TNF_SL_CSR_STACK,
+ "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ /* Check the bus_generation for the Topology Map */
+ if (bus_generation != hal->generation_count) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_1(t1394_get_topology_map_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "Generation mismatch");
+ TNF_PROBE_0_DEBUG(t1394_get_topology_map_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ tm_ptr = (uint32_t *)hal->CSR_topology_map;
+ length = tm_ptr[0] >> 16;
+ length = length * 4; /* Bytes instead of quadlets */
+ length = length + 4; /* don't forget the first quad */
+
+ /* Check that the buffer is big enough */
+ if (length > (uint_t)tm_length) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_1(t1394_get_topology_map_error,
+ S1394_TNF_SL_CSR_ERROR, "", tnf_string, msg,
+ "Buffer size too small");
+ TNF_PROBE_0_DEBUG(t1394_get_topology_map_exit,
+ S1394_TNF_SL_CSR_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Do the copy */
+ bcopy(tm_ptr, tm_buffer, length);
+
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_0_DEBUG(t1394_get_topology_map_exit, S1394_TNF_SL_CSR_STACK,
+ "");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Function: t1394_CRC16()
+ * Input(s): d The data to compute the CRC-16 for
+ * crc_length The length into the data to compute for
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): CRC The CRC-16 computed for the length
+ * of data specified
+ *
+ * Description: t1394_CRC16() implements ISO/IEC 13213:1994, ANSI/IEEE Std
+ * 1212, 1994 - 8.1.5.
+ */
+/* ARGSUSED */
+uint_t
+t1394_CRC16(uint32_t *d, size_t crc_length, uint_t flags)
+{
+ /* Implements ISO/IEC 13213:1994, */
+ /* ANSI/IEEE Std 1212, 1994 - 8.1.5 */
+ uint_t ret;
+
+ TNF_PROBE_0_DEBUG(t1394_CRC16_enter, S1394_TNF_SL_STACK, "");
+
+ ret = s1394_CRC16((uint_t *)d, (uint_t)crc_length);
+
+ TNF_PROBE_0_DEBUG(t1394_CRC16_exit, S1394_TNF_SL_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_add_cfgrom_entry()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * cfgrom_entryinfo This structure holds the cfgrom key,
+ * buffer, and size
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): t1394_cfgrom_hdl The ConfigROM "handle" used in
+ * t1394_rem_cfgrom_entry()
+ * result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_add_cfgrom_entry() adds an entry to the local Config ROM,
+ * updating the directory entries as necessary. This call could
+ * fail because there is no room for the new entry in Config ROM
+ * (T1394_ECFGROM_FULL), the key is invalid (T1394_EINVALID_PARAM),
+ * or it was called in interrupt context (T1394_EINVALID_CONTEXT).
+ */
+/* ARGSUSED */
+int
+t1394_add_cfgrom_entry(t1394_handle_t t1394_hdl,
+ t1394_cfgrom_entryinfo_t *cfgrom_entryinfo, uint_t flags,
+ t1394_cfgrom_handle_t *t1394_cfgrom_hdl, int *result)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ int ret;
+ uint_t key;
+ uint_t size;
+ uint32_t *buffer;
+
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ key = cfgrom_entryinfo->ce_key;
+ buffer = cfgrom_entryinfo->ce_buffer;
+ size = (uint_t)cfgrom_entryinfo->ce_size;
+
+ /* Check for a valid size */
+ if (size == 0) {
+ *result = T1394_EINVALID_PARAM;
+ TNF_PROBE_1_DEBUG(t1394_add_cfgrom_entry_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Invalid size of Config ROM buffer (== 0)");
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Check for a valid key type */
+ if (((key << IEEE1212_KEY_VALUE_SHIFT) & IEEE1212_KEY_TYPE_MASK) == 0) {
+ *result = T1394_EINVALID_PARAM;
+ TNF_PROBE_1_DEBUG(t1394_add_cfgrom_entry_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Invalid key_type in Config ROM key");
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ /* Is this on the interrupt stack? */
+ if (curthread->t_flag == T_INTR_THREAD) {
+ *result = T1394_EINVALID_CONTEXT;
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Lock the Config ROM buffer */
+ mutex_enter(&hal->local_config_rom_mutex);
+
+ ret = s1394_add_config_rom_entry(hal, key, buffer, size,
+ (void **)t1394_cfgrom_hdl, result);
+ if (ret != DDI_SUCCESS) {
+ if (*result == CMD1394_ERSRC_CONFLICT)
+ *result = T1394_ECFGROM_FULL;
+ mutex_exit(&hal->local_config_rom_mutex);
+
+ TNF_PROBE_1(t1394_add_cfgrom_entry_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failed in s1394_add_cfgrom_entry()");
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_exit,
+ "stacktrace 1394 s1394", "");
+ return (ret);
+ }
+
+ /* Setup the timeout function */
+ if (hal->config_rom_timer_set == B_FALSE) {
+ hal->config_rom_timer_set = B_TRUE;
+ mutex_exit(&hal->local_config_rom_mutex);
+ hal->config_rom_timer =
+ timeout(s1394_update_config_rom_callback, hal,
+ drv_usectohz(CONFIG_ROM_UPDATE_DELAY * 1000));
+ } else {
+ mutex_exit(&hal->local_config_rom_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_add_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_rem_cfgrom_entry()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * flags The flags parameter is unused (for now)
+ * t1394_cfgrom_hdl The ConfigROM "handle" returned by
+ * t1394_add_cfgrom_entry()
+ *
+ * Output(s): result Used to pass more specific info back
+ * to target
+ *
+ * Description: t1394_rem_cfgrom_entry() is used to remove a previously added
+ * Config ROM entry (indicated by t1394_cfgrom_hdl).
+ */
+/* ARGSUSED */
+int
+t1394_rem_cfgrom_entry(t1394_handle_t t1394_hdl, uint_t flags,
+ t1394_cfgrom_handle_t *t1394_cfgrom_hdl, int *result)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ int ret;
+
+ TNF_PROBE_0_DEBUG(t1394_rem_cfgrom_entry_enter,
+ S1394_TNF_SL_CFGROM_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Find the HAL this target resides on */
+ hal = target->on_hal;
+
+ /* Is this on the interrupt stack? */
+ if (curthread->t_flag == T_INTR_THREAD) {
+ *result = T1394_EINVALID_CONTEXT;
+ TNF_PROBE_0_DEBUG(t1394_rem_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (DDI_FAILURE);
+ }
+
+ /* Lock the Config ROM buffer */
+ mutex_enter(&hal->local_config_rom_mutex);
+
+ ret = s1394_remove_config_rom_entry(hal, (void **)t1394_cfgrom_hdl,
+ result);
+ if (ret != DDI_SUCCESS) {
+ mutex_exit(&hal->local_config_rom_mutex);
+ TNF_PROBE_1(t1394_rem_cfgrom_entry_error,
+ S1394_TNF_SL_CFGROM_ERROR, "", tnf_string, msg,
+ "Failed in s1394_remove_cfgrom_entry()");
+ TNF_PROBE_0_DEBUG(t1394_rem_cfgrom_entry_exit,
+ "stacktrace 1394 s1394", "");
+ return (ret);
+ }
+
+ /* Setup the timeout function */
+ if (hal->config_rom_timer_set == B_FALSE) {
+ hal->config_rom_timer_set = B_TRUE;
+ mutex_exit(&hal->local_config_rom_mutex);
+ hal->config_rom_timer =
+ timeout(s1394_update_config_rom_callback, hal,
+ drv_usectohz(CONFIG_ROM_UPDATE_DELAY * 1000));
+ } else {
+ mutex_exit(&hal->local_config_rom_mutex);
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_rem_cfgrom_entry_exit,
+ S1394_TNF_SL_CFGROM_STACK, "");
+ return (ret);
+}
+
+/*
+ * Function: t1394_get_targetinfo()
+ * Input(s): t1394_hdl The target "handle" returned by
+ * t1394_attach()
+ * bus_generation The current generation
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): targetinfo Structure containing max_payload,
+ * max_speed, and target node ID.
+ *
+ * Description: t1394_get_targetinfo() is used to retrieve information specific
+ * to a target device. It will fail if the generation given
+ * does not match the current generation.
+ */
+/* ARGSUSED */
+int
+t1394_get_targetinfo(t1394_handle_t t1394_hdl, uint_t bus_generation,
+ uint_t flags, t1394_targetinfo_t *targetinfo)
+{
+ s1394_hal_t *hal;
+ s1394_target_t *target;
+ uint_t dev;
+ uint_t curr;
+ uint_t from_node;
+ uint_t to_node;
+
+ TNF_PROBE_0_DEBUG(t1394_get_targetinfo_enter, S1394_TNF_SL_STACK, "");
+
+ ASSERT(t1394_hdl != NULL);
+
+ /* Find the HAL this target resides on */
+ hal = ((s1394_target_t *)t1394_hdl)->on_hal;
+
+ target = (s1394_target_t *)t1394_hdl;
+
+ /* Lock the topology tree */
+ mutex_enter(&hal->topology_tree_mutex);
+
+ /* Check the bus_generation */
+ if (bus_generation != hal->generation_count) {
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ TNF_PROBE_3(t1394_get_targetinfo_error, S1394_TNF_SL_STACK, "",
+ tnf_string, msg, "Generation mismatch",
+ tnf_uint, gen, bus_generation,
+ tnf_uint, current_gen, hal->generation_count);
+ return (DDI_FAILURE);
+ }
+
+ rw_enter(&hal->target_list_rwlock, RW_READER);
+ /*
+ * If there is no node, report T1394_INVALID_NODEID for target_nodeID;
+ * current_max_speed and current_max_payload are undefined for this
+ * case.
+ */
+ if (((target->target_state & S1394_TARG_GONE) != 0) ||
+ (target->on_node == NULL)) {
+ targetinfo->target_nodeID = T1394_INVALID_NODEID;
+ TNF_PROBE_1_DEBUG(t1394_get_targetinfo_exit,
+ S1394_TNF_SL_STACK, "", tnf_string, msg, "No device");
+ } else {
+ targetinfo->target_nodeID =
+ (target->on_hal->node_id & IEEE1394_BUS_NUM_MASK) |
+ target->on_node->node_num;
+
+ from_node = (target->on_hal->node_id) & IEEE1394_NODE_NUM_MASK;
+ to_node = target->on_node->node_num;
+
+ targetinfo->current_max_speed = (uint_t)s1394_speed_map_get(
+ hal, from_node, to_node);
+
+ /* Get current_max_payload */
+ s1394_get_maxpayload(target, &dev, &curr);
+ targetinfo->current_max_payload = curr;
+
+ TNF_PROBE_3_DEBUG(t1394_get_targetinfo_exit,
+ S1394_TNF_SL_STACK, "",
+ tnf_uint, payload, targetinfo->current_max_payload,
+ tnf_uint, speed, targetinfo->current_max_speed,
+ tnf_uint, nodeid, targetinfo->target_nodeID);
+ }
+
+ rw_exit(&hal->target_list_rwlock);
+ /* Unlock the topology tree */
+ mutex_exit(&hal->topology_tree_mutex);
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/common/io/1394/t1394_errmsg.c b/usr/src/uts/common/io/1394/t1394_errmsg.c
new file mode 100644
index 0000000000..1dc85807d8
--- /dev/null
+++ b/usr/src/uts/common/io/1394/t1394_errmsg.c
@@ -0,0 +1,199 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * t1394_errmsg.c
+ * Utility function that targets can use to convert an error code into a
+ * printable string.
+ */
+
+#include <sys/tnf_probe.h>
+
+#include <sys/1394/t1394.h>
+#include <sys/1394/s1394.h>
+#include <sys/1394/cmd1394.h>
+#include <sys/1394/ixl1394.h>
+
+static const char *error_string[] = {
+ "CMD1394_CMDSUCCESS: Command Success", /* 0 */
+ "", /* -1 */
+ "", /* -2 */
+ "", /* -3 */
+ "", /* -4 */
+ "", /* -5 */
+ "", /* -6 */
+ "", /* -7 */
+ "", /* -8 */
+ "", /* -9 */
+ "CMD1394_ENULL_MBLK: NULL mblk pointer", /* -10 */
+ "CMD1394_EMBLK_TOO_SMALL: Mblk too small", /* -11 */
+ "CMD1394_ESTALE_GENERATION: Stale generation", /* -12 */
+ "CMD1394_EDEVICE_REMOVED: Device removed", /* -13 */
+ "CMD1394_EINVALID_CONTEXT: Invalid context", /* -14 */
+ "CMD1394_EINVALID_COMMAND: Invalid command", /* -15 */
+ "CMD1394_EUNKNOWN_ERROR: Unknown error", /* -16 */
+ "CMD1394_NOSTATUS: No status", /* -17 */
+ "CMD1394_EFATAL_ERROR: Fatal error", /* -18 */
+ "CMD1394_ENO_ATREQ: Unable to send ATREQ", /* -19 */
+ "CMD1394_EDEVICE_ERROR: Device error", /* -20 */
+ "", /* -21 */
+ "", /* -22 */
+ "", /* -23 */
+ "", /* -24 */
+ "", /* -25 */
+ "", /* -26 */
+ "", /* -27 */
+ "", /* -28 */
+ "", /* -29 */
+ "CMD1394_EDEVICE_BUSY: Device busy", /* -30 */
+ "CMD1394_ERETRIES_EXCEEDED: Too many retries", /* -31 */
+ "CMD1394_ETYPE_ERROR: Resp/ack type error", /* -32 */
+ "CMD1394_EDATA_ERROR: Resp/ack data error", /* -33 */
+ "CMD1394_EBUSRESET: Bus reset", /* -34 */
+ "CMD1394_EADDRESS_ERROR: Address error", /* -35 */
+ "CMD1394_ETIMEOUT: Command timed out", /* -36 */
+ "CMD1394_ERSRC_CONFLICT: Resource conflict" /* -37 */
+};
+
+static const char *ixl_compilation_error_string[] = {
+ "IXL1394_EMEM_ALLOC_FAIL: Memory allocation failed", /* -301 */
+ "IXL1394_EBAD_IXL_OPCODE: Bad opcode", /* -302 */
+ "IXL1394_EFRAGMENT_OFLO: Descriptor block overflow", /* -303 */
+ "IXL1394_ENO_DATA_PKTS: No descriptor blocks", /* -304 */
+ "IXL1394_EMISPLACED_RECV: Misplaced receive", /* -305 */
+ "IXL1394_EMISPLACED_SEND: Misplaced send", /* -306 */
+ "IXL1394_EPKT_HDR_MISSING: Packet header missing", /* -307 */
+ "IXL1394_ENULL_BUFFER_ADDR: NULL buffer address", /* -308 */
+ "IXL1394_EPKTSIZE_MAX_OFLO: Packet > 0xFFFF bytes", /* -309 */
+ "IXL1394_EPKTSIZE_RATIO: Improper packet length/count", /* -310 */
+ "IXL1394_EUNAPPLIED_SET_CMD: Unused set command", /* -311 */
+ "IXL1394_EDUPLICATE_SET_CMD: Multiple set commands", /* -312 */
+ "IXL1394_EJUMP_NOT_TO_LABEL: Jump destination not a label", /* -313 */
+ "IXL1394_EUPDATE_DISALLOWED: Update not allowed ", /* -314 */
+ "IXL1394_EBAD_SKIPMODE: Invalid skip mode", /* -315 */
+ "IXL1394_EWRONG_XR_CMD_MODE: Inapproriate xmit/recv mode", /* -316 */
+ "IXL1394_EINTERNAL_ERROR: Internal error", /* -317 */
+ "IXL1394_ENOT_IMPLEMENTED: Unimplemented error", /* -318 */
+ "IXL1394_EOPCODE_MISMATCH: Opcode mismatch", /* -319 */
+ "IXL1394_EOPCODE_DISALLOWED: Opcode disallowed for update", /* -320 */
+ "IXL1394_EBAD_SKIP_LABEL: Skip label destination not a label",
+ "IXL1394_EXFER_BUF_MISSING: Missing buffer in transfer command",
+ "IXL1394_EXFER_BUF_CNT_DIFF: Packet count differs in new buffer",
+ "IXL1394_EORIG_IXL_CORRUPTED: Original IXL program corrupted",
+ "IXL1394_ECOUNT_MISMATCH: IXL command count difference", /* -325 */
+ "IXL1394_EPRE_UPD_DMALOST: DMA path lost before update", /* -326 */
+ "IXL1394_EPOST_UPD_DMALOST: DMA path lost after update", /* -327 */
+ "IXL1394_ERISK_PROHIBITS_UPD: Risk prohibits update" /* -328 */
+};
+
+static const char *addr_error_string[] = {
+ "T1394_EALLOC_ADDR: Unable to alloc 1394 address block", /* -400 */
+};
+
+static const char *cec_error_string[] = {
+ "T1394_ENO_BANDWIDTH: Bandwidth allocation failed", /* -500 */
+ "T1394_ENO_CHANNEL: Channel allocation failed", /* -501 */
+ "T1394_ETARGET: One or more callbacks failed in isoch setup" /* -502 */
+};
+
+static const char *idma_error_string[] = {
+ "T1394_EIDMA_NO_RESRCS: No DMA resources", /* -600 */
+ "T1394_EIDMA_CONFLICT: Conflicting arguments" /* -601 */
+};
+
+static const char *cfgrom_error_string[] = {
+ "T1394_ECFGROM_FULL: Config ROM is full", /* -700 */
+ "T1394_EINVALID_PARAM: Invalid parameter in call", /* -701 */
+ "T1394_EINVALID_CONTEXT: Invalid context for call", /* -702 */
+ "T1394_NOERROR: No error" /* -703 */
+};
+
+#define T1394_ERRMSG_EMPTY_STRING ""
+
+/*
+ * Function: t1394_errmsg()
+ * Input(s): result Result code
+ * flags The flags parameter is unused (for now)
+ *
+ * Output(s): const string; empty string if invalid result code is passed in
+ *
+ * Description: t1394_errmsg() returns a string corresponding the error code
+ */
+/* ARGSUSED */
+const char *
+t1394_errmsg(int result, uint_t flags)
+{
+ int err;
+ const char *msg = T1394_ERRMSG_EMPTY_STRING;
+
+ TNF_PROBE_1_DEBUG(t1394_errmsg_enter, S1394_TNF_SL_STACK, "",
+ tnf_int, argument, result);
+
+ if (result > 0) {
+ TNF_PROBE_2(t1394_errmsg, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "invalid result argument",
+ tnf_int, result, result);
+ TNF_PROBE_0_DEBUG(t1394_errmsg_exit, S1394_TNF_SL_STACK, "");
+ return (T1394_ERRMSG_EMPTY_STRING);
+ }
+
+ result = -result;
+
+ if ((result >= -CMD1394_ERR_FIRST) && (result <= -CMD1394_ERR_LAST)) {
+ err = result - (-CMD1394_ERR_FIRST);
+ msg = error_string[err];
+ } else if ((result >= -IXL1394_COMP_ERR_FIRST) &&
+ (result <= -IXL1394_COMP_ERR_LAST)) {
+ err = result - (-IXL1394_COMP_ERR_FIRST);
+ msg = ixl_compilation_error_string[err];
+ } else if ((result >= -T1394_EADDR_FIRST) &&
+ (result <= -T1394_EADDR_LAST)) {
+ err = result - (-T1394_EADDR_FIRST);
+ msg = addr_error_string[err];
+ } else if ((result >= -T1394_CEC_ERR_FIRST) &&
+ (result <= -T1394_CEC_ERR_LAST)) {
+ err = result - (-T1394_CEC_ERR_FIRST);
+ msg = cec_error_string[err];
+ } else if ((result >= -T1394_IDMA_ERR_FIRST) &&
+ (result <= -T1394_IDMA_ERR_LAST)) {
+ err = result - (-T1394_IDMA_ERR_FIRST);
+ msg = idma_error_string[err];
+ } else if ((result >= -T1394_ECFG_FIRST) &&
+ (result <= -T1394_ECFG_LAST)) {
+ err = result - (-T1394_ECFG_FIRST);
+ msg = cfgrom_error_string[err];
+ } else {
+ TNF_PROBE_2(t1394_errmsg, S1394_TNF_SL_ERROR, "",
+ tnf_string, msg, "invalid result argument",
+ tnf_int, result, result);
+ }
+
+ TNF_PROBE_0_DEBUG(t1394_errmsg_exit, S1394_TNF_SL_STACK, "");
+
+ return (msg);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394.c b/usr/src/uts/common/io/1394/targets/av1394/av1394.c
new file mode 100644
index 0000000000..710ba9b488
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394.c
@@ -0,0 +1,695 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 driver
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/cred.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* DDI/DKI entry points */
+static int av1394_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int av1394_attach(dev_info_t *, ddi_attach_cmd_t);
+static int av1394_detach(dev_info_t *, ddi_detach_cmd_t);
+static int av1394_open(dev_t *, int, int, cred_t *);
+static int av1394_close(dev_t, int, int, cred_t *);
+static int av1394_read(dev_t, struct uio *, cred_t *);
+static int av1394_write(dev_t, struct uio *, cred_t *);
+static int av1394_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int av1394_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
+ size_t *, uint_t);
+static int av1394_poll(dev_t, short, int, short *, struct pollhead **);
+
+/* configuration routines */
+static void av1394_cleanup(av1394_inst_t *, int);
+static int av1394_t1394_attach(av1394_inst_t *, dev_info_t *);
+static void av1394_t1394_detach(av1394_inst_t *);
+static int av1394_add_events(av1394_inst_t *);
+static void av1394_remove_events(av1394_inst_t *);
+
+/* CPR */
+static int av1394_cpr_suspend(av1394_inst_t *);
+static int av1394_cpr_resume(av1394_inst_t *);
+
+/* callbacks */
+static void av1394_bus_reset(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+static void av1394_disconnect(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+static void av1394_reconnect(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+
+extern struct mod_ops mod_driverops;
+
+struct cb_ops av1394_cb_ops = {
+ av1394_open, /* open */
+ av1394_close, /* close */
+ nulldev, /* strategy */
+ nulldev, /* print */
+ nulldev, /* dump */
+ av1394_read, /* read */
+ av1394_write, /* write */
+ av1394_ioctl, /* ioctl */
+ av1394_devmap, /* devmap */
+ nulldev, /* mmap */
+ nulldev, /* segmap */
+ av1394_poll, /* poll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* streamtab */
+ D_MP | D_NEW | D_HOTPLUG | D_DEVMAP
+};
+
+static struct dev_ops av1394_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* refcnt */
+ av1394_getinfo, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ av1394_attach, /* attach */
+ av1394_detach, /* detach */
+ nodev, /* reset */
+ &av1394_cb_ops, /* driver operations */
+ NULL, /* bus operations */
+ NULL /* power */
+};
+
+static struct modldrv av1394_modldrv = {
+ &mod_driverops,
+ "IEEE 1394 AV driver %I%",
+ &av1394_ops
+};
+
+static struct modlinkage av1394_modlinkage = {
+ MODREV_1,
+ &av1394_modldrv,
+ NULL,
+};
+
+static void *av1394_statep;
+
+#ifndef NPROBE
+extern int tnf_mod_load(void);
+extern int tnf_mod_unload(struct modlinkage *mlp);
+#endif
+
+#define AV1394_INST2STATE(inst) (ddi_get_soft_state(av1394_statep, inst))
+#define AV1394_DEV2STATE(dev) \
+ (ddi_get_soft_state(av1394_statep, AV1394_DEV2INST(dev)))
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_INST_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_INST_STACK, "");
+
+/*
+ *
+ * --- DDI/DKI entry points
+ *
+ */
+int
+_init(void)
+{
+ int error;
+
+#ifndef NPROBE
+ (void) tnf_mod_load();
+#endif
+ error = ddi_soft_state_init(&av1394_statep, sizeof (av1394_inst_t), 1);
+ if (error != 0) {
+#ifndef NPROBE
+ (void) tnf_mod_unload(&av1394_modlinkage);
+#endif
+ return (error);
+ }
+
+ if ((error = mod_install(&av1394_modlinkage)) != 0) {
+ ddi_soft_state_fini(&av1394_statep);
+#ifndef NPROBE
+ (void) tnf_mod_unload(&av1394_modlinkage);
+#endif
+ }
+
+ return (error);
+}
+
+int
+_fini(void)
+{
+ int error;
+
+ if ((error = mod_remove(&av1394_modlinkage)) == 0) {
+ ddi_soft_state_fini(&av1394_statep);
+#ifndef NPROBE
+ (void) tnf_mod_unload(&av1394_modlinkage);
+#endif
+ }
+
+ return (error);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&av1394_modlinkage, modinfop));
+}
+
+/*
+ * attach
+ */
+static int
+av1394_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ av1394_inst_t *avp;
+
+ AV1394_TNF_ENTER(av1394_attach);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ if ((avp = AV1394_INST2STATE(instance)) == NULL) {
+ return (DDI_FAILURE);
+ }
+ return (av1394_cpr_resume(avp));
+ default:
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_soft_state_zalloc(av1394_statep, instance) != 0) {
+ TNF_PROBE_0(av1394_attach_error_soft_state_zalloc,
+ AV1394_TNF_INST_ERROR, "");
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+ avp = AV1394_INST2STATE(instance);
+
+ if (av1394_t1394_attach(avp, dip) != DDI_SUCCESS) {
+ av1394_cleanup(avp, 1);
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+
+ mutex_init(&avp->av_mutex, NULL, MUTEX_DRIVER,
+ avp->av_attachinfo.iblock_cookie);
+
+#ifndef __lock_lint
+ avp->av_dip = dip;
+ avp->av_instance = instance;
+#endif
+
+ if (av1394_add_events(avp) != DDI_SUCCESS) {
+ av1394_cleanup(avp, 2);
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_isoch_attach(avp) != DDI_SUCCESS) {
+ av1394_cleanup(avp, 3);
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_async_attach(avp) != DDI_SUCCESS) {
+ av1394_cleanup(avp, 4);
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_FAILURE);
+ }
+
+#ifndef __lock_lint
+ avp->av_dev_state = AV1394_DEV_ONLINE;
+#endif
+
+ ddi_report_dev(dip);
+
+ AV1394_TNF_EXIT(av1394_attach);
+ return (DDI_SUCCESS);
+}
+
+static int
+av1394_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ av1394_inst_t *avp;
+
+ AV1394_TNF_ENTER(av1394_detach);
+
+ if ((avp = AV1394_INST2STATE(instance)) == NULL) {
+ TNF_PROBE_0(av1394_detach_error_instance,
+ AV1394_TNF_INST_ERROR, "");
+ AV1394_TNF_EXIT(av1394_detach);
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ av1394_cleanup(avp, AV1394_CLEANUP_LEVEL_MAX);
+ AV1394_TNF_EXIT(av1394_detach);
+ return (DDI_SUCCESS);
+ case DDI_SUSPEND:
+ return (av1394_cpr_suspend(avp));
+ default:
+ AV1394_TNF_EXIT(av1394_detach);
+ return (DDI_FAILURE);
+ }
+}
+
+/*ARGSUSED*/
+static int
+av1394_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
+ void **result)
+{
+ dev_t dev = (dev_t)arg;
+ av1394_inst_t *avp;
+ int rval = DDI_FAILURE;
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ if ((avp = AV1394_DEV2STATE(dev)) != NULL) {
+ *result = avp->av_dip;
+ rval = DDI_SUCCESS;
+ } else {
+ *result = NULL;
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)AV1394_DEV2INST(dev);
+ rval = DDI_SUCCESS;
+ break;
+ }
+
+ return (rval);
+}
+
+/*ARGSUSED*/
+static int
+av1394_open(dev_t *dev, int flag, int otyp, cred_t *cr)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(*dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_open);
+ if (avp != NULL) {
+ if (AV1394_DEV_IS_ISOCH(*dev)) {
+ ret = 0;
+ } else if (AV1394_DEV_IS_ASYNC(*dev)) {
+ ret = av1394_async_open(avp, flag);
+ }
+ }
+ AV1394_TNF_EXIT(av1394_open);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_close(dev_t dev, int flag, int otyp, cred_t *cr)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_close);
+ if (avp != NULL) {
+ if (AV1394_DEV_IS_ISOCH(dev)) {
+ ret = av1394_isoch_close(avp, flag);
+ } else if (AV1394_DEV_IS_ASYNC(dev)) {
+ ret = av1394_async_close(avp, flag);
+ }
+ }
+ AV1394_TNF_EXIT(av1394_close);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_read(dev_t dev, struct uio *uiop, cred_t *cr)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_read);
+ if (avp != NULL) {
+ if (AV1394_DEV_IS_ISOCH(dev)) {
+ ret = av1394_isoch_read(avp, uiop);
+ } else if (AV1394_DEV_IS_ASYNC(dev)) {
+ ret = av1394_async_read(avp, uiop);
+ }
+ }
+ AV1394_TNF_EXIT(av1394_read);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_write(dev_t dev, struct uio *uiop, cred_t *cr)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_write);
+ if (avp != NULL) {
+ if (AV1394_DEV_IS_ISOCH(dev)) {
+ ret = av1394_isoch_write(avp, uiop);
+ } else if (AV1394_DEV_IS_ASYNC(dev)) {
+ ret = av1394_async_write(avp, uiop);
+ }
+ }
+ AV1394_TNF_EXIT(av1394_write);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rvalp)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_ioctl);
+ if (avp != NULL) {
+ if (AV1394_DEV_IS_ISOCH(dev)) {
+ ret = av1394_isoch_ioctl(avp, cmd, arg, mode, rvalp);
+ } else if (AV1394_DEV_IS_ASYNC(dev)) {
+ ret = av1394_async_ioctl(avp, cmd, arg, mode, rvalp);
+ }
+ }
+ AV1394_TNF_EXIT(av1394_ioctl);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
+ size_t *maplen, uint_t model)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_devmap);
+ if ((avp != NULL) && (AV1394_DEV_IS_ISOCH(dev))) {
+ ret = av1394_isoch_devmap(avp, dhp, off, len, maplen, model);
+ }
+ AV1394_TNF_EXIT(av1394_devmap);
+ return (ret);
+}
+
+static int
+av1394_poll(dev_t dev, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
+{
+ av1394_inst_t *avp = AV1394_DEV2STATE(dev);
+ int ret = ENXIO;
+
+ AV1394_TNF_ENTER(av1394_poll);
+ if ((avp != NULL) && AV1394_DEV_IS_ASYNC(dev)) {
+ ret = av1394_async_poll(avp, events, anyyet, reventsp, phpp);
+ }
+ AV1394_TNF_EXIT(av1394_poll);
+ return (ret);
+}
+
+
+/*
+ *
+ * --- configuration routines
+ *
+ * av1394_cleanup()
+ * Cleanup after attach
+ */
+static void
+av1394_cleanup(av1394_inst_t *avp, int level)
+{
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ AV1394_TNF_ENTER(av1394_cleanup);
+ switch (level) {
+ default:
+ av1394_async_detach(avp);
+ /* FALLTHRU */
+ case 4:
+ av1394_isoch_detach(avp);
+ /* FALLTHRU */
+ case 3:
+ av1394_remove_events(avp);
+ /* FALLTHRU */
+ case 2:
+ av1394_t1394_detach(avp);
+ mutex_destroy(&avp->av_mutex);
+ /* FALLTHRU */
+ case 1:
+ ddi_soft_state_free(av1394_statep, avp->av_instance);
+ }
+ AV1394_TNF_EXIT(av1394_cleanup);
+}
+
+static int
+av1394_t1394_attach(av1394_inst_t *avp, dev_info_t *dip)
+{
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_t1394_attach);
+
+ ret = t1394_attach(dip, T1394_VERSION_V1, 0, &avp->av_attachinfo,
+ &avp->av_t1394_hdl);
+
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_t1394_attach_error, AV1394_TNF_INST_ERROR,
+ "", tnf_int, ret, ret);
+ }
+
+ AV1394_TNF_EXIT(av1394_t1394_attach);
+ return (ret);
+}
+
+static void
+av1394_t1394_detach(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_t1394_detach);
+
+ (void) t1394_detach(&avp->av_t1394_hdl, 0);
+
+ AV1394_TNF_EXIT(av1394_t1394_detach);
+}
+
+static int
+av1394_add_events(av1394_inst_t *avp)
+{
+ ddi_eventcookie_t br_evc, rem_evc, ins_evc;
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_BUS_RESET_EVENT,
+ &br_evc) != DDI_SUCCESS) {
+ TNF_PROBE_0(av1394_add_events_error_bus_reset_cookie,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(avp->av_dip, br_evc, av1394_bus_reset,
+ avp, &avp->av_reset_cb) != DDI_SUCCESS) {
+ TNF_PROBE_0(av1394_add_events_error_bus_reset_event,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_REMOVE_EVENT,
+ &rem_evc) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_reset_cb);
+ TNF_PROBE_0(av1394_add_events_error_remove_cookie,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(avp->av_dip, rem_evc, av1394_disconnect,
+ avp, &avp->av_remove_cb) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_reset_cb);
+ TNF_PROBE_0(av1394_add_events_error_remove_event,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_INSERT_EVENT,
+ &ins_evc) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_remove_cb);
+ (void) ddi_remove_event_handler(avp->av_reset_cb);
+ TNF_PROBE_0(av1394_add_events_error_insert_cookie,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(avp->av_dip, ins_evc, av1394_reconnect,
+ avp, &avp->av_insert_cb) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_remove_cb);
+ (void) ddi_remove_event_handler(avp->av_reset_cb);
+ TNF_PROBE_0(av1394_add_events_error_insert_event,
+ AV1394_TNF_INST_ERROR, "");
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+av1394_remove_events(av1394_inst_t *avp)
+{
+ ddi_eventcookie_t evc;
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_INSERT_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_insert_cb);
+ }
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_REMOVE_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_remove_cb);
+ }
+
+ if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_BUS_RESET_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(avp->av_reset_cb);
+ }
+}
+
+/*
+ *
+ * --- CPR
+ *
+ */
+static int
+av1394_cpr_suspend(av1394_inst_t *avp)
+{
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_cpr_suspend);
+
+ ret = av1394_isoch_cpr_suspend(avp);
+
+ if (ret == DDI_SUCCESS) {
+ mutex_enter(&avp->av_mutex);
+ avp->av_prev_dev_state = avp->av_dev_state;
+ avp->av_dev_state = AV1394_DEV_SUSPENDED;
+ mutex_exit(&avp->av_mutex);
+ }
+
+ AV1394_TNF_EXIT(av1394_cpr_suspend);
+ return (ret);
+}
+
+/*
+ * CPR resume should always succeed
+ */
+static int
+av1394_cpr_resume(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_cpr_resume);
+
+ mutex_enter(&avp->av_mutex);
+ avp->av_dev_state = avp->av_prev_dev_state;
+ mutex_exit(&avp->av_mutex);
+
+ (void) av1394_async_cpr_resume(avp);
+
+ AV1394_TNF_EXIT(av1394_cpr_resume);
+ return (DDI_SUCCESS);
+}
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+/*ARGSUSED*/
+static void
+av1394_bus_reset(dev_info_t *dip, ddi_eventcookie_t evc, void *arg, void *data)
+{
+ av1394_inst_t *avp = arg;
+
+ AV1394_TNF_ENTER(av1394_bus_reset);
+
+ if (avp == NULL) {
+ AV1394_TNF_EXIT(av1394_bus_reset);
+ return;
+ }
+
+ mutex_enter(&avp->av_mutex);
+ avp->av_attachinfo.localinfo = *(t1394_localinfo_t *)data;
+ mutex_exit(&avp->av_mutex);
+
+ av1394_async_bus_reset(avp);
+ av1394_cmp_bus_reset(avp);
+
+ AV1394_TNF_EXIT(av1394_bus_reset);
+}
+
+/*ARGSUSED*/
+static void
+av1394_disconnect(dev_info_t *dip, ddi_eventcookie_t evc, void *arg, void *data)
+{
+ av1394_inst_t *avp = arg;
+
+ AV1394_TNF_ENTER(av1394_disconnect);
+
+ if (avp == NULL) {
+ AV1394_TNF_EXIT(av1394_disconnect);
+ return;
+ }
+
+ mutex_enter(&avp->av_mutex);
+ avp->av_dev_state = AV1394_DEV_DISCONNECTED;
+ mutex_exit(&avp->av_mutex);
+
+ AV1394_TNF_EXIT(av1394_disconnect);
+}
+
+/*ARGSUSED*/
+static void
+av1394_reconnect(dev_info_t *dip, ddi_eventcookie_t evc, void *arg, void *data)
+{
+ av1394_inst_t *avp = arg;
+
+ AV1394_TNF_ENTER(av1394_disconnect);
+
+ if (avp == NULL) {
+ AV1394_TNF_EXIT(av1394_disconnect);
+ return;
+ }
+
+ mutex_enter(&avp->av_mutex);
+ avp->av_dev_state = AV1394_DEV_ONLINE;
+ avp->av_attachinfo.localinfo = *(t1394_localinfo_t *)data;
+ mutex_exit(&avp->av_mutex);
+
+ av1394_async_reconnect(avp);
+
+ AV1394_TNF_EXIT(av1394_disconnect);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_as.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_as.c
new file mode 100644
index 0000000000..53f20ba6c0
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_as.c
@@ -0,0 +1,63 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 address space for mmap(2)
+ */
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+void
+av1394_as_init(av1394_as_t *as)
+{
+ as->as_end = 0;
+}
+
+void
+av1394_as_fini(av1394_as_t *as)
+{
+ as->as_end = 0;
+}
+
+/*
+ * XXX implement a better allocation algorithm
+ */
+off_t
+av1394_as_alloc(av1394_as_t *as, size_t size)
+{
+ off_t addr;
+
+ addr = as->as_end;
+ as->as_end += size;
+ return (addr);
+}
+
+/*ARGSUSED*/
+void
+av1394_as_free(av1394_as_t *as, off_t addr)
+{
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_async.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_async.c
new file mode 100644
index 0000000000..94323582d6
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_async.c
@@ -0,0 +1,554 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 asynchronous module
+ */
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static void av1394_async_cleanup(av1394_inst_t *, int);
+static int av1394_async_create_minor_node(av1394_inst_t *);
+static void av1394_async_remove_minor_node(av1394_inst_t *);
+static int av1394_async_update_targetinfo(av1394_inst_t *);
+static int av1394_async_db2arq_type(int);
+static void av1394_async_putbq(av1394_queue_t *, mblk_t *);
+
+static int av1394_ioctl_arq_get_ibuf_size(av1394_inst_t *, void *, int);
+static int av1394_ioctl_arq_set_ibuf_size(av1394_inst_t *, void *, int);
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ASYNC_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ASYNC_STACK, "");
+
+/* tunables */
+int av1394_ibuf_size_default = 64 * 1024; /* default ibuf size */
+int av1394_ibuf_size_max = 1024 * 1024; /* max ibuf size */
+
+/*
+ *
+ * --- configuration entry points
+ *
+ */
+int
+av1394_async_attach(av1394_inst_t *avp)
+{
+ av1394_async_t *ap = &avp->av_a;
+ ddi_iblock_cookie_t ibc = avp->av_attachinfo.iblock_cookie;
+
+ AV1394_TNF_ENTER(av1394_async_attach);
+
+ mutex_init(&ap->a_mutex, NULL, MUTEX_DRIVER, ibc);
+ av1394_initq(&ap->a_rq, ibc, av1394_ibuf_size_default);
+
+ if (av1394_fcp_attach(avp) != DDI_SUCCESS) {
+ av1394_async_cleanup(avp, 1);
+ AV1394_TNF_EXIT(av1394_async_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_cfgrom_init(avp) != DDI_SUCCESS) {
+ av1394_async_cleanup(avp, 2);
+ AV1394_TNF_EXIT(av1394_async_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_async_create_minor_node(avp) != DDI_SUCCESS) {
+ av1394_async_cleanup(avp, 3);
+ AV1394_TNF_EXIT(av1394_async_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_async_update_targetinfo(avp) != DDI_SUCCESS) {
+ av1394_async_cleanup(avp, 4);
+ AV1394_TNF_EXIT(av1394_async_attach);
+ return (DDI_FAILURE);
+ }
+
+ AV1394_TNF_EXIT(av1394_async_attach);
+ return (DDI_SUCCESS);
+}
+
+void
+av1394_async_detach(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_async_detach);
+
+ av1394_async_cleanup(avp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_EXIT(av1394_async_detach);
+}
+
+void
+av1394_async_bus_reset(av1394_inst_t *avp)
+{
+ av1394_async_t *ap = &avp->av_a;
+ mblk_t *bp;
+
+ AV1394_TNF_ENTER(av1394_async_bus_reset);
+
+ (void) av1394_async_update_targetinfo(avp);
+
+ mutex_enter(&ap->a_mutex);
+ if (ap->a_nopen > 0) {
+ mutex_exit(&ap->a_mutex);
+ return;
+ }
+ mutex_exit(&ap->a_mutex);
+
+ /* queue up a bus reset message */
+ if ((bp = allocb(1, BPRI_HI)) == NULL) {
+ TNF_PROBE_0(av1394_async_bus_reset_error_allocb,
+ AV1394_TNF_ASYNC_ERROR, "");
+ } else {
+ DB_TYPE(bp) = AV1394_M_BUS_RESET;
+ av1394_async_putq_rq(avp, bp);
+ }
+
+ AV1394_TNF_EXIT(av1394_async_bus_reset);
+}
+
+int
+av1394_async_cpr_resume(av1394_inst_t *avp)
+{
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_async_cpr_resume);
+
+ ret = av1394_async_update_targetinfo(avp);
+
+ AV1394_TNF_EXIT(av1394_async_cpr_resume);
+ return (ret);
+}
+
+void
+av1394_async_reconnect(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_async_reconnect);
+
+ (void) av1394_async_update_targetinfo(avp);
+
+ AV1394_TNF_EXIT(av1394_async_reconnect);
+}
+
+int
+av1394_async_open(av1394_inst_t *avp, int flag)
+{
+ av1394_async_t *ap = &avp->av_a;
+
+ AV1394_TNF_ENTER(av1394_async_open);
+
+ mutex_enter(&ap->a_mutex);
+ if (ap->a_nopen == 0) {
+ ap->a_pollevents = 0;
+ }
+ ap->a_nopen++;
+ ap->a_oflag = flag;
+ mutex_exit(&ap->a_mutex);
+
+ AV1394_TNF_EXIT(av1394_async_open);
+ return (0);
+}
+
+/*ARGSUSED*/
+int
+av1394_async_close(av1394_inst_t *avp, int flag)
+{
+ av1394_async_t *ap = &avp->av_a;
+
+ AV1394_TNF_ENTER(av1394_async_close);
+
+ av1394_cfgrom_close(avp);
+
+ av1394_flushq(&ap->a_rq);
+
+ mutex_enter(&ap->a_mutex);
+ ap->a_nopen = 0;
+ ap->a_pollevents = 0;
+ mutex_exit(&ap->a_mutex);
+
+ AV1394_TNF_EXIT(av1394_async_close);
+ return (0);
+}
+
+int
+av1394_async_read(av1394_inst_t *avp, struct uio *uiop)
+{
+ av1394_async_t *ap = &avp->av_a;
+ av1394_queue_t *q = &ap->a_rq;
+ iec61883_arq_t arq;
+ int ret = 0;
+ mblk_t *mp;
+ int dbtype;
+ int len;
+
+ AV1394_TNF_ENTER(av1394_async_read);
+
+ /* copyout as much as we can */
+ while ((uiop->uio_resid > 0) && (ret == 0)) {
+ /*
+ * if data is available, copy it out. otherwise wait until
+ * data arrives, unless opened with non-blocking flag
+ */
+ if ((mp = av1394_getq(q)) == NULL) {
+ if (ap->a_oflag & FNDELAY) {
+ AV1394_TNF_EXIT(av1394_async_read);
+ return (EAGAIN);
+ }
+ if (av1394_qwait_sig(q) <= 0) {
+ ret = EINTR;
+ }
+ continue;
+ }
+ dbtype = AV1394_DBTYPE(mp);
+
+ /* generate and copyout ARQ header, if not already */
+ if (!AV1394_IS_NOHDR(mp)) {
+ /* headers cannot be partially read */
+ if (uiop->uio_resid < sizeof (arq)) {
+ av1394_async_putbq(q, mp);
+ ret = EINVAL;
+ break;
+ }
+
+ arq.arq_type = av1394_async_db2arq_type(dbtype);
+ arq.arq_len = MBLKL(mp);
+ arq.arq_data.octlet = 0;
+
+ /* copy ARQ-embedded data */
+ len = min(arq.arq_len, sizeof (arq.arq_data));
+ bcopy(mp->b_rptr, &arq.arq_data.buf[0], len);
+
+ /* copyout the ARQ */
+ ret = uiomove(&arq, sizeof (arq), UIO_READ, uiop);
+ if (ret != 0) {
+ av1394_async_putbq(q, mp);
+ break;
+ }
+ mp->b_rptr += len;
+ AV1394_MARK_NOHDR(mp);
+ }
+
+ /* any data left? */
+ if (MBLKL(mp) == 0) {
+ freemsg(mp);
+ continue;
+ }
+
+ /* now we have some data and some user buffer space to fill */
+ len = min(uiop->uio_resid, MBLKL(mp));
+ if (len > 0) {
+ ret = uiomove(mp->b_rptr, len, UIO_READ, uiop);
+ if (ret != 0) {
+ av1394_async_putbq(q, mp);
+ break;
+ }
+ mp->b_rptr += len;
+ }
+
+ /* save the rest of the data for later */
+ if (MBLKL(mp) > 0) {
+ av1394_async_putbq(q, mp);
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_async_read);
+ return (0);
+}
+
+int
+av1394_async_write(av1394_inst_t *avp, struct uio *uiop)
+{
+ iec61883_arq_t arq;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_async_write);
+
+ /* all data should arrive in ARQ format */
+ while (uiop->uio_resid >= sizeof (arq)) {
+ if ((ret = uiomove(&arq, sizeof (arq), UIO_WRITE, uiop)) != 0) {
+ break;
+ }
+
+ switch (arq.arq_type) {
+ case IEC61883_ARQ_FCP_CMD:
+ case IEC61883_ARQ_FCP_RESP:
+ ret = av1394_fcp_write(avp, &arq, uiop);
+ break;
+ default:
+ ret = EINVAL;
+ }
+ if (ret != 0) {
+ break;
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_async_write);
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+av1394_async_ioctl(av1394_inst_t *avp, int cmd, intptr_t arg, int mode,
+ int *rvalp)
+{
+ int ret = EINVAL;
+
+ AV1394_TNF_ENTER(av1394_async_ioctl);
+
+ switch (cmd) {
+ case IEC61883_ARQ_GET_IBUF_SIZE:
+ ret = av1394_ioctl_arq_get_ibuf_size(avp, (void *)arg, mode);
+ break;
+ case IEC61883_ARQ_SET_IBUF_SIZE:
+ ret = av1394_ioctl_arq_set_ibuf_size(avp, (void *)arg, mode);
+ break;
+ case IEC61883_NODE_GET_BUS_NAME:
+ ret = av1394_ioctl_node_get_bus_name(avp, (void *)arg, mode);
+ break;
+ case IEC61883_NODE_GET_UID:
+ ret = av1394_ioctl_node_get_uid(avp, (void *)arg, mode);
+ break;
+ case IEC61883_NODE_GET_TEXT_LEAF:
+ ret = av1394_ioctl_node_get_text_leaf(avp, (void *)arg, mode);
+ }
+
+ AV1394_TNF_EXIT(av1394_async_ioctl);
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+av1394_async_poll(av1394_inst_t *avp, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
+{
+ av1394_async_t *ap = &avp->av_a;
+ av1394_queue_t *rq = &ap->a_rq;
+
+ AV1394_TNF_ENTER(av1394_async_poll);
+
+ if (events & POLLIN) {
+ if (av1394_peekq(rq)) {
+ *reventsp |= POLLIN;
+ } else if (!anyyet) {
+ mutex_enter(&ap->a_mutex);
+ ap->a_pollevents |= POLLIN;
+ *phpp = &ap->a_pollhead;
+ mutex_exit(&ap->a_mutex);
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_async_poll);
+ return (0);
+}
+
+
+/*
+ * put a message on the read queue, take care of polling
+ */
+void
+av1394_async_putq_rq(av1394_inst_t *avp, mblk_t *mp)
+{
+ av1394_async_t *ap = &avp->av_a;
+
+ if (!av1394_putq(&ap->a_rq, mp)) {
+ freemsg(mp);
+ TNF_PROBE_0(av1394_async_putq_rq_error_putq,
+ AV1394_TNF_ASYNC_ERROR, "");
+ } else {
+ mutex_enter(&ap->a_mutex);
+ if (ap->a_pollevents & POLLIN) {
+ ap->a_pollevents &= ~POLLIN;
+ mutex_exit(&ap->a_mutex);
+ pollwakeup(&ap->a_pollhead, POLLIN);
+ } else {
+ mutex_exit(&ap->a_mutex);
+ }
+ }
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ * av1394_async_cleanup()
+ * Cleanup after attach
+ */
+static void
+av1394_async_cleanup(av1394_inst_t *avp, int level)
+{
+ av1394_async_t *ap = &avp->av_a;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ av1394_async_remove_minor_node(avp);
+ /* FALLTHRU */
+ case 3:
+ av1394_cfgrom_fini(avp);
+ /* FALLTHRU */
+ case 2:
+ av1394_fcp_detach(avp);
+ /* FALLTHRU */
+ case 1:
+ av1394_destroyq(&ap->a_rq);
+ mutex_destroy(&ap->a_mutex);
+ }
+}
+
+/*
+ * av1394_async_create_minor_node()
+ * Create async minor node
+ */
+static int
+av1394_async_create_minor_node(av1394_inst_t *avp)
+{
+ int ret;
+
+ ret = ddi_create_minor_node(avp->av_dip, "async",
+ S_IFCHR, AV1394_ASYNC_INST2MINOR(avp->av_instance),
+ DDI_NT_AV_ASYNC, NULL);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_0(av1394_async_create_minor_node_error,
+ AV1394_TNF_ASYNC_ERROR, "");
+ }
+ return (ret);
+}
+
+/*
+ * av1394_async_remove_minor_node()
+ * Remove async minor node
+ */
+static void
+av1394_async_remove_minor_node(av1394_inst_t *avp)
+{
+ ddi_remove_minor_node(avp->av_dip, "async");
+}
+
+/*
+ * av1394_async_update_targetinfo()
+ * Retrieve target info and bus generation
+ */
+static int
+av1394_async_update_targetinfo(av1394_inst_t *avp)
+{
+ av1394_async_t *ap = &avp->av_a;
+ uint_t bg;
+ int ret;
+
+ mutex_enter(&avp->av_mutex);
+ bg = avp->av_attachinfo.localinfo.bus_generation;
+ mutex_exit(&avp->av_mutex);
+
+ mutex_enter(&ap->a_mutex);
+ ret = t1394_get_targetinfo(avp->av_t1394_hdl, bg, 0, &ap->a_targetinfo);
+ ap->a_bus_generation = bg;
+ mutex_exit(&ap->a_mutex);
+
+ return (ret);
+}
+
+static int
+av1394_async_db2arq_type(int dbtype)
+{
+ int arq_type;
+
+ switch (dbtype) {
+ case AV1394_M_FCP_RESP:
+ arq_type = IEC61883_ARQ_FCP_RESP;
+ break;
+ case AV1394_M_FCP_CMD:
+ arq_type = IEC61883_ARQ_FCP_CMD;
+ break;
+ case AV1394_M_BUS_RESET:
+ arq_type = IEC61883_ARQ_BUS_RESET;
+ break;
+ default:
+ ASSERT(0); /* cannot happen */
+ }
+ return (arq_type);
+}
+
+static void
+av1394_async_putbq(av1394_queue_t *q, mblk_t *mp)
+{
+ if (!av1394_putbq(q, mp)) {
+ freemsg(mp);
+ TNF_PROBE_0(av1394_async_putbq_error,
+ AV1394_TNF_ASYNC_ERROR, "");
+ }
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl_arq_get_ibuf_size(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_async_t *ap = &avp->av_a;
+ int sz;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ioctl_arq_get_ibuf_size);
+
+ sz = av1394_getmaxq(&ap->a_rq);
+
+ if (ddi_copyout(&sz, arg, sizeof (sz), mode) != 0) {
+ ret = EFAULT;
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_arq_get_ibuf_size);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl_arq_set_ibuf_size(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_async_t *ap = &avp->av_a;
+ int sz;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ioctl_arq_set_ibuf_size);
+
+ sz = (int)(intptr_t)arg;
+
+ if ((sz < 0) || (sz > av1394_ibuf_size_max)) {
+ ret = EINVAL;
+ } else {
+ av1394_setmaxq(&ap->a_rq, sz);
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_arq_set_ibuf_size);
+ return (ret);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_cfgrom.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_cfgrom.c
new file mode 100644
index 0000000000..e470d26b31
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_cfgrom.c
@@ -0,0 +1,560 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 configuration ROM
+ */
+#include <sys/file.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configROM parsing */
+static int av1394_cfgrom_parse_rom(av1394_inst_t *);
+static void av1394_cfgrom_unparse_rom(av1394_inst_t *);
+static int av1394_cfgrom_parse_dir(av1394_inst_t *, cmd1394_cmd_t *,
+ av1394_cfgrom_parse_arg_t *);
+static void av1394_cfgrom_add_text_leaf(av1394_inst_t *,
+ av1394_cfgrom_parsed_dir_t *, uint64_t, uint32_t);
+static int av1394_cfgrom_read_leaf(av1394_inst_t *, uint64_t, mblk_t **);
+static void av1394_cfgrom_grow_parsed_dir(av1394_cfgrom_parsed_dir_t *,
+ int);
+
+/* routines involving bus transactions */
+static int av1394_cfgrom_rq(av1394_inst_t *, cmd1394_cmd_t *,
+ uint64_t, uint32_t *);
+
+/* the following macros emulate throwing an exception when read fails */
+#define AV1394_CFGROM_RQ(avp, cmd, addr, valp) \
+ if ((ret = av1394_cfgrom_rq(avp, cmd, addr, valp)) != 0) { \
+ goto catch; \
+ }
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ASYNC_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ASYNC_STACK, "");
+
+int
+av1394_cfgrom_init(av1394_inst_t *avp)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+ ddi_iblock_cookie_t ibc = avp->av_attachinfo.iblock_cookie;
+
+ AV1394_TNF_ENTER(av1394_cfgrom_init);
+
+ rw_init(&crp->cr_rwlock, NULL, RW_DRIVER, ibc);
+
+ AV1394_TNF_EXIT(av1394_cfgrom_init);
+ return (DDI_SUCCESS);
+}
+
+void
+av1394_cfgrom_fini(av1394_inst_t *avp)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+
+ AV1394_TNF_ENTER(av1394_cfgrom_fini);
+
+ rw_destroy(&crp->cr_rwlock);
+
+ AV1394_TNF_EXIT(av1394_cfgrom_fini);
+}
+
+void
+av1394_cfgrom_close(av1394_inst_t *avp)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+
+ AV1394_TNF_ENTER(av1394_cfgrom_close);
+
+ rw_enter(&crp->cr_rwlock, RW_WRITER);
+ if (crp->cr_parsed) {
+ av1394_cfgrom_unparse_rom(avp);
+ }
+ rw_exit(&crp->cr_rwlock);
+
+ AV1394_TNF_EXIT(av1394_cfgrom_close);
+}
+
+int
+av1394_ioctl_node_get_bus_name(av1394_inst_t *avp, void *arg, int mode)
+{
+ cmd1394_cmd_t *cmd;
+ uint32_t val;
+ int err;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ioctl_node_get_bus_name);
+
+ err = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (err != DDI_SUCCESS) {
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_bus_name);
+ return (ENOMEM);
+ }
+
+ ret = av1394_cfgrom_rq(avp, cmd, AV1394_CFGROM_BUS_NAME_ADDR, &val);
+ if (ret == 0) {
+ if (ddi_copyout(&val, arg, sizeof (uint32_t), mode) != 0) {
+ ret = EFAULT;
+ }
+ }
+
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_bus_name);
+ return (ret);
+}
+
+int
+av1394_ioctl_node_get_uid(av1394_inst_t *avp, void *arg, int mode)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t eui64;
+ uint32_t hi, lo;
+ int err;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ioctl_node_get_uid);
+
+ err = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (err != DDI_SUCCESS) {
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_uid);
+ return (ENOMEM);
+ }
+
+ AV1394_CFGROM_RQ(avp, cmd, AV1394_CFGROM_EUI64_HI_ADDR, &hi);
+ AV1394_CFGROM_RQ(avp, cmd, AV1394_CFGROM_EUI64_LO_ADDR, &lo);
+
+ eui64 = ((uint64_t)hi << 32) | lo;
+ if (ddi_copyout(&eui64, arg, sizeof (uint64_t), mode) != 0) {
+ ret = EFAULT;
+ }
+
+catch:
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_uid);
+ return (ret);
+}
+
+int
+av1394_ioctl_node_get_text_leaf(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+ iec61883_node_text_leaf_t tl;
+#ifdef _MULTI_DATAMODEL
+ iec61883_node_text_leaf32_t tl32;
+#endif
+ int n; /* text leaf number requested */
+ int parent; /* leaf parent */
+ mblk_t *bp = NULL;
+ av1394_cfgrom_parsed_dir_t *pd;
+ int leaf_len;
+ uint32_t spec, lang_id, desc_entry;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ioctl_node_get_text_leaf);
+
+ /* copyin arguments */
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ if (ddi_copyin(arg, &tl32, sizeof (tl32), mode) != 0) {
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (EFAULT);
+ }
+ n = tl32.tl_num;
+ parent = tl32.tl_parent;
+ } else {
+#endif
+ if (ddi_copyin(arg, &tl, sizeof (tl), mode) != 0) {
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (EFAULT);
+ }
+ n = tl.tl_num;
+ parent = tl.tl_parent;
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ /* verify arguments */
+ if (((parent != IEC61883_ROM_ROOT) && (parent != IEC61883_ROM_UNIT)) ||
+ (n < 0)) {
+ return (EINVAL);
+ }
+
+ /* parse ConfigROM if not already */
+ rw_enter(&crp->cr_rwlock, RW_WRITER);
+ if (!crp->cr_parsed) {
+ ret = av1394_cfgrom_parse_rom(avp);
+ if (ret != 0) {
+ rw_exit(&crp->cr_rwlock);
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (ret);
+ }
+ }
+ rw_downgrade(&crp->cr_rwlock);
+
+ /* get parsed leaf info */
+ if (parent == IEC61883_ROM_ROOT) {
+ pd = &crp->cr_root_dir;
+ } else {
+ pd = &crp->cr_unit_dir;
+ }
+
+ if (n < pd->pd_tl_next) {
+ /* read the leaf */
+ ret = av1394_cfgrom_read_leaf(avp, pd->pd_tl[n].tl_addr, &bp);
+ if (ret != 0) {
+ rw_exit(&crp->cr_rwlock);
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (ret);
+ }
+ leaf_len = MBLKL(bp) / 4 - 2;
+ ASSERT(leaf_len > 0);
+ spec = *(uint32_t *)bp->b_rptr;
+ bp->b_rptr += 4;
+ lang_id = *(uint32_t *)bp->b_rptr;
+ bp->b_rptr += 4;
+ desc_entry = pd->pd_tl[n].tl_desc_entry;
+ } else {
+ /* return success anyway, but with tl_cnt < tl_num */
+ spec = lang_id = desc_entry = 0;
+ leaf_len = 0;
+ }
+
+ /* copyout the results */
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ tl32.tl_cnt = pd->pd_tl_next;
+ tl32.tl_desc_entry = desc_entry;
+ tl32.tl_rlen = leaf_len;
+ tl32.tl_spec = spec;
+ tl32.tl_lang_id = lang_id;
+ if (ddi_copyout(&tl32, arg, sizeof (tl32), mode) != 0) {
+ ret = EFAULT;
+ } else if (bp && ddi_copyout(bp->b_rptr,
+ (void *)(uintptr_t)tl32.tl_data,
+ 4 * min(tl32.tl_len, tl32.tl_rlen), mode) != 0) {
+ ret = EFAULT;
+ }
+ } else {
+#endif
+ tl.tl_cnt = pd->pd_tl_next;
+ tl.tl_desc_entry = desc_entry;
+ tl.tl_rlen = leaf_len;
+ tl.tl_spec = spec;
+ tl.tl_lang_id = lang_id;
+ if (ddi_copyout(&tl, arg, sizeof (tl), mode) != 0) {
+ ret = EFAULT;
+ } else if (bp && ddi_copyout(bp->b_rptr, tl.tl_data,
+ 4 * min(tl.tl_len, tl.tl_rlen), mode) != 0) {
+ ret = EFAULT;
+ }
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ rw_exit(&crp->cr_rwlock);
+
+ freemsg(bp);
+
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (ret);
+}
+
+
+/*
+ *
+ * --- configROM parsing
+ *
+ * Parse entire configROM. Only extract information that interests us.
+ * ConfigROM integrity checks are only made to ensure correct parsing.
+ */
+static int
+av1394_cfgrom_parse_rom(av1394_inst_t *avp)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+ cmd1394_cmd_t *cmd;
+ uint32_t val;
+ uint64_t root_addr; /* root dir address */
+ uint16_t root_len; /* root dir length */
+ av1394_cfgrom_parse_arg_t pa;
+ int err;
+ int ret;
+
+ ASSERT(crp->cr_parsed == B_FALSE);
+
+ err = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (err != DDI_SUCCESS) {
+ return (ENOMEM);
+ }
+
+ /* skip info_len quadlets to get root dir address and length */
+ AV1394_CFGROM_RQ(avp, cmd, AV1394_CFGROM_INFO_LEN_ADDR, &val);
+ root_addr = IEEE1394_CONFIG_ROM_ADDR + 4 + (val >> 24) * 4;
+ AV1394_CFGROM_RQ(avp, cmd, root_addr, &val);
+ root_len = IEEE1212_DIR_LEN(val);
+
+ /* parse root dir and everything underneath */
+ pa.pa_depth = 0;
+ pa.pa_desc_entry = 0;
+ pa.pa_parent_k = 0;
+ pa.pa_addr = root_addr + 4;
+ pa.pa_len = root_len;
+ pa.pa_dir = &crp->cr_root_dir;
+
+ ret = av1394_cfgrom_parse_dir(avp, cmd, &pa);
+
+catch:
+ if (ret == 0) {
+ crp->cr_parsed = B_TRUE;
+ } else {
+ av1394_cfgrom_unparse_rom(avp);
+ }
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ return (ret);
+}
+
+/*
+ * parse a directory
+ */
+static int
+av1394_cfgrom_parse_dir(av1394_inst_t *avp, cmd1394_cmd_t *cmd,
+ av1394_cfgrom_parse_arg_t *pa)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+ int i;
+ uint64_t entry_addr;
+ uint32_t entry;
+ uint64_t leaf_addr;
+ uint64_t dir_addr;
+ uint16_t dir_len;
+ uint8_t t, k;
+ uint16_t v;
+ uint32_t val;
+ av1394_cfgrom_parse_arg_t this_pa;
+ int ret = 0;
+
+ /* safeguard against deep recursion */
+ if (pa->pa_depth > AV1394_CFGROM_PARSE_MAX_DEPTH) {
+ return (ENOMEM);
+ }
+
+ /* initialize parse arguments */
+ this_pa.pa_depth = pa->pa_depth + 1;
+ this_pa.pa_desc_entry = pa->pa_desc_entry;
+
+ /* walk dir entries */
+ entry_addr = pa->pa_addr;
+ for (i = 0; i < pa->pa_len; i++) {
+ AV1394_CFGROM_RQ(avp, cmd, entry_addr, &entry);
+
+ CFGROM_TYPE_KEY_VALUE(entry, t, k, v);
+ if ((t == IEEE1212_LEAF_TYPE) &&
+ (k == IEEE1212_TEXTUAL_DESCRIPTOR)) {
+ /* save this leaf */
+ leaf_addr = entry_addr + 4 * v;
+ av1394_cfgrom_add_text_leaf(avp, pa->pa_dir,
+ leaf_addr, this_pa.pa_desc_entry);
+ } else if (t == IEEE1212_DIRECTORY_TYPE) {
+ dir_addr = entry_addr + 4 * v;
+ AV1394_CFGROM_RQ(avp, cmd, dir_addr, &val);
+ dir_len = IEEE1212_DIR_LEN(val);
+
+ /* parse this dir */
+ this_pa.pa_parent_k = k;
+ this_pa.pa_addr = dir_addr + 4;
+ this_pa.pa_len = dir_len;
+ /* leaves will be added to either root or unit array */
+ if (k == IEEE1212_UNIT_DIRECTORY) {
+ this_pa.pa_dir = &crp->cr_unit_dir;
+ } else {
+ this_pa.pa_dir = pa->pa_dir;
+ }
+
+ ret = av1394_cfgrom_parse_dir(avp, cmd, &this_pa);
+ if (ret != 0) {
+ goto catch;
+ }
+ }
+
+ /*
+ * if we're walking Textual_Descriptor directory,
+ * the described entry is the one preceding directory's entry,
+ * so we need to preserve what was passed in pa->pa_desc_entry
+ */
+ if (pa->pa_parent_k != IEEE1212_TEXTUAL_DESCRIPTOR) {
+ this_pa.pa_desc_entry = entry;
+ }
+ entry_addr += 4;
+ }
+
+catch:
+ return (ret);
+}
+
+/*ARGSUSED*/
+static void
+av1394_cfgrom_add_text_leaf(av1394_inst_t *avp, av1394_cfgrom_parsed_dir_t *pd,
+ uint64_t addr, uint32_t desc_entry)
+{
+ /* grow array of needed */
+ if (pd->pd_tl_next >= pd->pd_tl_size) {
+ av1394_cfgrom_grow_parsed_dir(pd, 2);
+ }
+ pd->pd_tl[pd->pd_tl_next].tl_addr = addr;
+ pd->pd_tl[pd->pd_tl_next].tl_desc_entry = desc_entry;
+ pd->pd_tl_next++;
+}
+
+/*
+ * this routine cleans up after av1394_cfgrom_parse()
+ */
+static void
+av1394_cfgrom_unparse_rom(av1394_inst_t *avp)
+{
+ av1394_cfgrom_t *crp = &avp->av_a.a_cfgrom;
+ av1394_cfgrom_parsed_dir_t *pd;
+
+ pd = &crp->cr_root_dir;
+ if (pd->pd_tl) {
+ kmem_free(pd->pd_tl, pd->pd_tl_size * sizeof (*pd->pd_tl));
+ bzero(pd, sizeof (*pd));
+ }
+ pd = &crp->cr_unit_dir;
+ if (pd->pd_tl) {
+ kmem_free(pd->pd_tl, pd->pd_tl_size * sizeof (*pd->pd_tl));
+ bzero(pd, sizeof (*pd));
+ }
+ crp->cr_parsed = B_FALSE;
+}
+
+/*
+ * grow parsed dir leaf array by 'cnt' entries
+ */
+static void
+av1394_cfgrom_grow_parsed_dir(av1394_cfgrom_parsed_dir_t *pd, int cnt)
+{
+ int new_size;
+ void *new_tl;
+
+ ASSERT(cnt > 0);
+
+ new_size = (pd->pd_tl_size + cnt) * sizeof (av1394_cfgrom_text_leaf_t);
+ new_tl = kmem_zalloc(new_size, KM_SLEEP);
+ if (pd->pd_tl_size > 0) {
+ bcopy(pd->pd_tl, new_tl, pd->pd_tl_size * sizeof (*pd->pd_tl));
+ kmem_free(pd->pd_tl, pd->pd_tl_size * sizeof (*pd->pd_tl));
+ }
+ pd->pd_tl = new_tl;
+ pd->pd_tl_size += cnt;
+}
+
+static int
+av1394_cfgrom_read_leaf(av1394_inst_t *avp, uint64_t leaf_addr, mblk_t **bpp)
+{
+ cmd1394_cmd_t *cmd;
+ uint64_t addr;
+ uint32_t val;
+ int leaf_len; /* leaf length in quadlets */
+ mblk_t *bp = NULL;
+ int i;
+ int err;
+ int ret = 0;
+
+ err = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (err != DDI_SUCCESS) {
+ AV1394_TNF_EXIT(av1394_ioctl_node_get_text_leaf);
+ return (ENOMEM);
+ }
+
+ /* read leaf length */
+ AV1394_CFGROM_RQ(avp, cmd, leaf_addr, &val);
+ leaf_len = IEEE1212_DIR_LEN(val);
+
+ if (leaf_len < 3) {
+ ret = EIO;
+ goto catch;
+ }
+
+ if ((bp = allocb(leaf_len * 4, BPRI_HI)) == NULL) {
+ TNF_PROBE_0(aav1394_cfgrom_read_leaf_error_allocb,
+ AV1394_TNF_ASYNC_ERROR, "");
+ return (ENOMEM);
+ }
+
+ /* read leaf value */
+ addr = leaf_addr + 4;
+ for (i = 0; i < leaf_len; i++) {
+ AV1394_CFGROM_RQ(avp, cmd, addr, (uint32_t *)bp->b_wptr);
+ bp->b_wptr += 4;
+ addr += 4;
+ }
+
+catch:
+ if (ret == 0) {
+ *bpp = bp;
+ } else {
+ freemsg(bp);
+ }
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ return (ret);
+}
+
+/*
+ *
+ * --- routines involving bus transactions
+ *
+ */
+static int
+av1394_cfgrom_rq(av1394_inst_t *avp, cmd1394_cmd_t *cmd, uint64_t addr,
+ uint32_t *rval)
+{
+ int err;
+
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+ cmd->cmd_options = CMD1394_BLOCKING;
+ cmd->cmd_addr = addr;
+
+ err = t1394_read(avp->av_t1394_hdl, cmd);
+ if ((err == DDI_SUCCESS) && (cmd->cmd_result == CMD1394_CMDSUCCESS)) {
+ *rval = cmd->cmd_u.q.quadlet_data;
+ return (0);
+ } else {
+ TNF_PROBE_2(av1394_cfgrom_rq_error,
+ AV1394_TNF_ASYNC_ERROR, "", tnf_int, err, err,
+ tnf_int, result, cmd->cmd_result);
+ return (EIO);
+ }
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_cmp.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_cmp.c
new file mode 100644
index 0000000000..1e731893f5
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_cmp.c
@@ -0,0 +1,792 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 CMP (Connection Management Procedures)
+ */
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static void av1394_cmp_cleanup(av1394_inst_t *icp);
+
+/* ioctl routines */
+static int av1394_ioctl_plug_init_local(av1394_inst_t *,
+ iec61883_plug_init_t *);
+static int av1394_ioctl_plug_init_remote(av1394_inst_t *,
+ iec61883_plug_init_t *);
+
+/* local PCR routines */
+static int av1394_pcr_init(av1394_inst_t *, int, uint32_t);
+static void av1394_pcr_fini(av1394_inst_t *, int);
+static int av1394_pcr_alloc_addr(av1394_inst_t *, uint64_t,
+ t1394_addr_handle_t *);
+static void av1394_pcr_free_addr(av1394_inst_t *, t1394_addr_handle_t *);
+static int av1394_pcr_make_ph(int, int, int);
+static int av1394_pcr_ph2idx(int);
+static av1394_pcr_t *av1394_pcr_ph2pcr(av1394_cmp_t *, int);
+static uint64_t av1394_pcr_idx2addr(int);
+static int av1394_pcr_idx2num(int);
+static boolean_t av1394_pcr_idx_is_mpr(int);
+static boolean_t av1394_pcr_ph_is_mpr(int);
+static boolean_t av1394_pcr_ph_is_remote(int);
+
+/* callbacks */
+static void av1394_pcr_recv_read_request(cmd1394_cmd_t *);
+static void av1394_pcr_recv_lock_request(cmd1394_cmd_t *);
+
+/* remote PCR routines */
+static int av1394_pcr_remote_read(av1394_inst_t *, int, uint32_t *);
+static int av1394_pcr_remote_cas(av1394_inst_t *, int, uint32_t *,
+ uint32_t, uint32_t);
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_CMP_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_CMP_STACK, "");
+
+
+int
+av1394_cmp_init(av1394_inst_t *avp)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ ddi_iblock_cookie_t ibc = avp->av_attachinfo.iblock_cookie;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_cmp_init);
+
+ ret = t1394_cmp_register(avp->av_t1394_hdl, NULL, 0);
+
+ if (ret == DDI_SUCCESS) {
+ rw_init(&cmp->cmp_pcr_rwlock, NULL, RW_DRIVER, ibc);
+ }
+
+ AV1394_TNF_EXIT(av1394_cmp_init);
+ return (ret);
+}
+
+void
+av1394_cmp_fini(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_cmp_fini);
+
+ av1394_cmp_cleanup(avp);
+
+ AV1394_TNF_EXIT(av1394_cmp_fini);
+}
+
+void
+av1394_cmp_bus_reset(av1394_inst_t *avp)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int i;
+
+ AV1394_TNF_ENTER(av1394_cmp_bus_reset);
+
+ /* reset PCR values */
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ for (i = 0; i < NELEM(cmp->cmp_pcr); i++) {
+ if ((i == AV1394_OMPR_IDX) || (i == AV1394_IMPR_IDX)) {
+ continue;
+ }
+ if (cmp->cmp_pcr[i]) {
+ if (i < AV1394_IMPR_IDX) {
+ cmp->cmp_pcr[i]->pcr_val &=
+ ~AV1394_OPCR_BR_CLEAR_MASK;
+ } else {
+ cmp->cmp_pcr[i]->pcr_val &=
+ ~AV1394_IPCR_BR_CLEAR_MASK;
+ }
+ }
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+
+ AV1394_TNF_EXIT(av1394_cmp_bus_reset);
+}
+
+/*
+ * on close, free iPCRs and oPCRs not finalized by application
+ */
+void
+av1394_cmp_close(av1394_inst_t *avp)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int i;
+
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ for (i = 0; i < NELEM(cmp->cmp_pcr); i++) {
+ if ((i == AV1394_OMPR_IDX) || (i == AV1394_IMPR_IDX)) {
+ continue;
+ }
+ if (cmp->cmp_pcr[i]) {
+ av1394_pcr_fini(avp, i);
+ }
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+}
+
+/*
+ *
+ * --- ioctls
+ *
+ * IEC61883_PLUG_INIT
+ */
+int
+av1394_ioctl_plug_init(av1394_inst_t *avp, void *arg, int mode)
+{
+ int ret = 0;
+ iec61883_plug_init_t pi;
+
+ if (ddi_copyin(arg, &pi, sizeof (pi), mode) != 0) {
+ return (EFAULT);
+ }
+
+ /* check arguments */
+ if (((pi.pi_type != IEC61883_PLUG_IN) &&
+ (pi.pi_type != IEC61883_PLUG_OUT) &&
+ (pi.pi_type != IEC61883_PLUG_MASTER_IN) &&
+ (pi.pi_type != IEC61883_PLUG_MASTER_OUT)) ||
+ (((pi.pi_num < 0) || (pi.pi_num >= AV1394_NPCR)) &&
+ (pi.pi_num != IEC61883_PLUG_ANY))) {
+ return (EINVAL);
+ }
+
+ if (pi.pi_loc == IEC61883_LOC_LOCAL) {
+ ret = av1394_ioctl_plug_init_local(avp, &pi);
+ } else if (pi.pi_loc == IEC61883_LOC_REMOTE) {
+ ret = av1394_ioctl_plug_init_remote(avp, &pi);
+ } else {
+ ret = EINVAL;
+ }
+
+ if (ret == 0) {
+ if (ddi_copyout(&pi, arg, sizeof (pi), mode) != 0) {
+ ret = EFAULT;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * IEC61883_PLUG_FINI
+ */
+/*ARGSUSED*/
+int
+av1394_ioctl_plug_fini(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int ret;
+ int ph;
+
+ ph = (int)(intptr_t)arg;
+
+ if (av1394_pcr_ph_is_remote(ph) || av1394_pcr_ph_is_mpr(ph)) {
+ return (0);
+ }
+
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ if (av1394_pcr_ph2pcr(cmp, ph) != NULL) {
+ av1394_pcr_fini(avp, av1394_pcr_ph2idx(ph));
+ ret = 0;
+ } else {
+ ret = EINVAL;
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+
+ return (ret);
+}
+
+/*
+ * IEC61883_PLUG_REG_READ
+ */
+int
+av1394_ioctl_plug_reg_read(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int ret = 0;
+ iec61883_plug_reg_val_t pr;
+ int ph;
+ av1394_pcr_t *pcr;
+
+ if (ddi_copyin(arg, &pr, sizeof (pr), mode) != 0) {
+ return (EFAULT);
+ }
+ ph = pr.pr_handle;
+
+ if (av1394_pcr_ph_is_remote(ph)) {
+ ret = av1394_pcr_remote_read(avp, ph, &pr.pr_val);
+ } else {
+ switch (av1394_pcr_ph2idx(ph)) {
+ case AV1394_OMPR_IDX:
+ ret = t1394_cmp_read(avp->av_t1394_hdl, T1394_CMP_OMPR,
+ &pr.pr_val);
+ break;
+ case AV1394_IMPR_IDX:
+ ret = t1394_cmp_read(avp->av_t1394_hdl, T1394_CMP_IMPR,
+ &pr.pr_val);
+ break;
+ default:
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_READER);
+ if ((pcr = av1394_pcr_ph2pcr(cmp, ph)) != NULL) {
+ pr.pr_val = pcr->pcr_val;
+ } else {
+ ret = EINVAL;
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+ }
+ }
+
+ if (ret == 0) {
+ if (ddi_copyout(&pr, arg, sizeof (pr), mode) != 0) {
+ ret = EFAULT;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * IEC61883_PLUG_REG_CAS
+ */
+int
+av1394_ioctl_plug_reg_cas(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int ret = 0;
+ iec61883_plug_reg_lock_t pl;
+ int ph;
+ av1394_pcr_t *pcr;
+
+ if (ddi_copyin(arg, &pl, sizeof (pl), mode) != 0) {
+ return (EFAULT);
+ }
+ ph = pl.pl_handle;
+
+ if (av1394_pcr_ph_is_remote(ph)) {
+ ret = av1394_pcr_remote_cas(avp, ph,
+ &pl.pl_old, pl.pl_data, pl.pl_arg);
+ } else {
+ switch (av1394_pcr_ph2idx(ph)) {
+ case AV1394_OMPR_IDX:
+ ret = t1394_cmp_cas(avp->av_t1394_hdl, T1394_CMP_OMPR,
+ pl.pl_arg, pl.pl_data, &pl.pl_old);
+ break;
+ case AV1394_IMPR_IDX:
+ ret = t1394_cmp_cas(avp->av_t1394_hdl, T1394_CMP_IMPR,
+ pl.pl_arg, pl.pl_data, &pl.pl_old);
+ break;
+ default:
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ if ((pcr = av1394_pcr_ph2pcr(cmp, ph)) != NULL) {
+ /* compare_swap */
+ pl.pl_old = pcr->pcr_val;
+ if (pcr->pcr_val == pl.pl_arg) {
+ pcr->pcr_val = pl.pl_data;
+ }
+ } else {
+ ret = EINVAL;
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+ }
+ }
+
+ if (ret == 0) {
+ if (ddi_copyout(&pl, arg, sizeof (pl), mode) != 0) {
+ ret = EFAULT;
+ }
+ }
+
+ return (ret);
+}
+
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static void
+av1394_cmp_cleanup(av1394_inst_t *avp)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int i;
+
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ for (i = 0; i < NELEM(cmp->cmp_pcr); i++) {
+ if (cmp->cmp_pcr[i]) {
+ av1394_pcr_fini(avp, i);
+ }
+ }
+ rw_exit(&cmp->cmp_pcr_rwlock);
+ rw_destroy(&cmp->cmp_pcr_rwlock);
+ (void) t1394_cmp_unregister(avp->av_t1394_hdl);
+}
+
+
+/*
+ *
+ * --- ioctl routines
+ *
+ * IEC61883_PLUG_INIT for local plugs
+ */
+static int
+av1394_ioctl_plug_init_local(av1394_inst_t *avp, iec61883_plug_init_t *pip)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int err;
+ int ph; /* plug handle */
+ int idx, max_idx; /* plug index */
+
+ /* MPR's are a special case */
+ if ((pip->pi_type == IEC61883_PLUG_MASTER_IN) ||
+ (pip->pi_type == IEC61883_PLUG_MASTER_OUT)) {
+ pip->pi_handle = av1394_pcr_make_ph(pip->pi_loc,
+ pip->pi_type, 0);
+ return (0);
+ }
+
+ /* PCR */
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ if (pip->pi_num == IEC61883_PLUG_ANY) {
+ if (pip->pi_type == IEC61883_PLUG_OUT) {
+ idx = AV1394_OPCR0_IDX;
+ max_idx = idx + AV1394_PCR_ADDR_NOPCR - 1;
+ } else {
+ ASSERT(pip->pi_type == IEC61883_PLUG_IN);
+ idx = AV1394_IPCR0_IDX;
+ max_idx = idx + AV1394_PCR_ADDR_NIPCR - 1;
+ }
+
+ /* find unused PCR */
+ for (; idx <= max_idx; idx++) {
+ if (cmp->cmp_pcr[idx] != NULL) {
+ continue;
+ }
+ err = av1394_pcr_init(avp, idx, AV1394_PCR_INIT_VAL);
+ if (err == DDI_SUCCESS) {
+ break;
+ }
+ }
+ } else {
+ ph = av1394_pcr_make_ph(pip->pi_loc, pip->pi_type, pip->pi_num);
+ idx = max_idx = av1394_pcr_ph2idx(ph);
+
+ /* create PCR if not already */
+ if (cmp->cmp_pcr[idx] == NULL) {
+ err = av1394_pcr_init(avp, idx, AV1394_PCR_INIT_VAL);
+ }
+ }
+
+ rw_exit(&cmp->cmp_pcr_rwlock);
+
+ if ((err != DDI_SUCCESS) || (idx > max_idx)) {
+ return (EBUSY);
+ }
+ pip->pi_rnum = av1394_pcr_idx2num(idx);
+ pip->pi_handle = av1394_pcr_make_ph(pip->pi_loc, pip->pi_type,
+ pip->pi_rnum);
+
+ return (0);
+}
+
+/*
+ * IEC61883_PLUG_INIT for remote plugs
+ */
+static int
+av1394_ioctl_plug_init_remote(av1394_inst_t *avp, iec61883_plug_init_t *pip)
+{
+ int ph;
+ uint32_t val;
+ int ret;
+
+ if (pip->pi_num == IEC61883_PLUG_ANY) {
+ return (EINVAL);
+ }
+
+ ph = av1394_pcr_make_ph(pip->pi_loc, pip->pi_type, pip->pi_num);
+
+ /* check PCR existance by attempting to read it */
+ if ((ret = av1394_pcr_remote_read(avp, ph, &val)) == 0) {
+ pip->pi_handle = ph;
+ pip->pi_rnum = pip->pi_num;
+ }
+
+ return (ret);
+}
+
+
+/*
+ *
+ * --- plug routines
+ *
+ * initialize a PCR
+ */
+static int
+av1394_pcr_init(av1394_inst_t *avp, int idx, uint32_t val)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ av1394_pcr_t *pcr;
+ uint64_t addr;
+ int ret;
+
+ pcr = kmem_zalloc(sizeof (av1394_pcr_t), KM_SLEEP);
+ pcr->pcr_val = val;
+ cmp->cmp_pcr[idx] = pcr;
+
+ addr = av1394_pcr_idx2addr(idx);
+ ret = av1394_pcr_alloc_addr(avp, addr, &pcr->pcr_addr_hdl);
+ if (ret != DDI_SUCCESS) {
+ kmem_free(pcr, sizeof (av1394_pcr_t));
+ cmp->cmp_pcr[idx] = NULL;
+ }
+
+ return (ret);
+}
+
+/*
+ * finalize a PCR
+ */
+static void
+av1394_pcr_fini(av1394_inst_t *avp, int idx)
+{
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+
+ av1394_pcr_free_addr(avp, &cmp->cmp_pcr[idx]->pcr_addr_hdl);
+ kmem_free(cmp->cmp_pcr[idx], sizeof (av1394_pcr_t));
+ cmp->cmp_pcr[idx] = NULL;
+}
+
+/*
+ * allocate CSR address for a PCR
+ */
+static int
+av1394_pcr_alloc_addr(av1394_inst_t *avp, uint64_t addr,
+ t1394_addr_handle_t *hdlp)
+{
+ t1394_alloc_addr_t aa;
+ int ret;
+ int result;
+
+ AV1394_TNF_ENTER(av1394_pcr_addr_alloc);
+
+ bzero(&aa, sizeof (aa));
+ aa.aa_address = addr;
+ aa.aa_length = 4;
+ aa.aa_type = T1394_ADDR_FIXED;
+ aa.aa_enable = T1394_ADDR_RDENBL | T1394_ADDR_LKENBL;
+ aa.aa_evts.recv_read_request = av1394_pcr_recv_read_request;
+ aa.aa_evts.recv_lock_request = av1394_pcr_recv_lock_request;
+ aa.aa_arg = avp;
+
+ ret = t1394_alloc_addr(avp->av_t1394_hdl, &aa, 0, &result);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_2(av1394_pcr_alloc_addr_error, AV1394_TNF_CMP_ERROR,
+ "", tnf_int, ret, ret, tnf_int, result, result);
+ } else {
+ *hdlp = aa.aa_hdl;
+ }
+
+ AV1394_TNF_EXIT(av1394_pcr_addr_alloc);
+ return (ret);
+}
+
+/*
+ * free CSR address occupied by a PCR
+ */
+static void
+av1394_pcr_free_addr(av1394_inst_t *avp, t1394_addr_handle_t *hdlp)
+{
+ int ret;
+
+ ret = t1394_free_addr(avp->av_t1394_hdl, hdlp, 0);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_pcr_free_addr_error, AV1394_TNF_CMP_ERROR,
+ "", tnf_int, ret, ret);
+ }
+}
+
+/*
+ * make plug handle. range checking should be performed by caller
+ */
+static int
+av1394_pcr_make_ph(int loc, int type, int num)
+{
+ int ph;
+
+ switch (type) {
+ case IEC61883_PLUG_IN:
+ ph = num + AV1394_IPCR0_IDX;
+ break;
+ case IEC61883_PLUG_OUT:
+ ph = num + AV1394_OPCR0_IDX;
+ break;
+ case IEC61883_PLUG_MASTER_IN:
+ ph = AV1394_IMPR_IDX;
+ break;
+ case IEC61883_PLUG_MASTER_OUT:
+ ph = AV1394_OMPR_IDX;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ if (loc == IEC61883_LOC_REMOTE) {
+ ph |= AV1394_PCR_REMOTE;
+ }
+
+ return (ph);
+}
+
+/*
+ * convert plug handle to PCR index
+ */
+static int
+av1394_pcr_ph2idx(int ph)
+{
+ return (ph & ~AV1394_PCR_REMOTE);
+}
+
+/*
+ * convert plug handle to PCR pointer
+ */
+static av1394_pcr_t *
+av1394_pcr_ph2pcr(av1394_cmp_t *cmp, int ph)
+{
+ int idx = av1394_pcr_ph2idx(ph);
+
+ if ((idx >= 0) && (idx < NELEM(cmp->cmp_pcr))) {
+ return (cmp->cmp_pcr[idx]);
+ } else {
+ return (NULL);
+ }
+}
+
+/*
+ * convert PCR index to CSR address
+ */
+static uint64_t
+av1394_pcr_idx2addr(int idx)
+{
+ return (AV1394_PCR_ADDR_START + idx * 4);
+}
+
+/*
+ * convert PCR index to number
+ */
+static int
+av1394_pcr_idx2num(int idx)
+{
+ ASSERT(!av1394_pcr_idx_is_mpr(idx));
+
+ return ((idx - 1) % 32);
+}
+
+/*
+ * returns B_TRUE if a master plug
+ */
+static boolean_t
+av1394_pcr_idx_is_mpr(int idx)
+{
+ return (idx % 32 == 0);
+}
+
+static boolean_t
+av1394_pcr_ph_is_mpr(int ph)
+{
+ return (av1394_pcr_ph2idx(ph) % 32 == 0);
+}
+
+/*
+ * returns B_TRUE if a remote plug
+ */
+static boolean_t
+av1394_pcr_ph_is_remote(int ph)
+{
+ return ((ph & AV1394_PCR_REMOTE) != 0);
+}
+
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+static void
+av1394_pcr_recv_read_request(cmd1394_cmd_t *req)
+{
+ av1394_inst_t *avp = req->cmd_callback_arg;
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int idx; /* PCR index */
+ av1394_pcr_t *pcr;
+ int err;
+
+ AV1394_TNF_ENTER(av1394_pcr_recv_read_request);
+
+ idx = (req->cmd_addr - AV1394_PCR_ADDR_START) / 4;
+
+ if (req->cmd_type != CMD1394_ASYNCH_RD_QUAD) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ } if ((idx >= NELEM(cmp->cmp_pcr)) ||
+ ((pcr = cmp->cmp_pcr[idx]) == NULL)) {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ } else {
+ /* read */
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_READER);
+ req->cmd_u.q.quadlet_data = pcr->pcr_val;
+ rw_exit(&cmp->cmp_pcr_rwlock);
+
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ err = t1394_recv_request_done(avp->av_t1394_hdl, req, 0);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_pcr_recv_read_request_done_error,
+ AV1394_TNF_CMP_ERROR, "", tnf_int, err, err);
+ }
+
+ AV1394_TNF_EXIT(av1394_pcr_recv_read_request);
+}
+
+static void
+av1394_pcr_recv_lock_request(cmd1394_cmd_t *req)
+{
+ av1394_inst_t *avp = req->cmd_callback_arg;
+ av1394_cmp_t *cmp = &avp->av_i.i_cmp;
+ int idx; /* PCR index */
+ av1394_pcr_t *pcr;
+ int err;
+
+ AV1394_TNF_ENTER(av1394_pcr_recv_lock_request);
+
+ idx = (req->cmd_addr - AV1394_PCR_ADDR_START) / 4;
+
+ if ((req->cmd_type != CMD1394_ASYNCH_LOCK_32) ||
+ (req->cmd_u.l32.lock_type != CMD1394_LOCK_COMPARE_SWAP)) {
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ } if ((idx >= NELEM(cmp->cmp_pcr)) ||
+ ((pcr = cmp->cmp_pcr[idx]) == NULL)) {
+ req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
+ } else {
+ /* compare_swap */
+ rw_enter(&cmp->cmp_pcr_rwlock, RW_WRITER);
+ if (pcr->pcr_val == req->cmd_u.l32.arg_value) {
+ pcr->pcr_val = req->cmd_u.l32.data_value;
+ }
+ req->cmd_u.l32.old_value = pcr->pcr_val;
+ rw_exit(&cmp->cmp_pcr_rwlock);
+
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ }
+
+ err = t1394_recv_request_done(avp->av_t1394_hdl, req, 0);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_2(av1394_pcr_recv_lock_request_done_error,
+ AV1394_TNF_CMP_ERROR, "", tnf_int, err, err,
+ tnf_int, result, req->cmd_result);
+ }
+
+ AV1394_TNF_EXIT(av1394_pcr_recv_lock_request);
+}
+
+
+/*
+ *
+ * --- remote PCR routines
+ *
+ * read specified PCR on the remote node
+ */
+static int
+av1394_pcr_remote_read(av1394_inst_t *avp, int ph, uint32_t *valp)
+{
+ cmd1394_cmd_t *cmd;
+ int ret = 0;
+ int err;
+
+ ret = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (ret != DDI_SUCCESS) {
+ return (ENOMEM);
+ }
+
+ cmd->cmd_addr = av1394_pcr_idx2addr(av1394_pcr_ph2idx(ph));
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if (((err = t1394_read(avp->av_t1394_hdl, cmd)) == DDI_SUCCESS) &&
+ (cmd->cmd_result == CMD1394_CMDSUCCESS)) {
+ *valp = cmd->cmd_u.q.quadlet_data;
+ } else {
+ TNF_PROBE_2(av1394_pcr_remote_read_error, AV1394_TNF_CMP_ERROR,
+ "", tnf_int, err, err, tnf_int, result, cmd->cmd_result);
+ ret = EIO;
+ }
+
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ return (ret);
+}
+
+/*
+ * compare_swap specified PCR on the remote node
+ */
+static int
+av1394_pcr_remote_cas(av1394_inst_t *avp, int ph, uint32_t *old_valuep,
+ uint32_t data_value, uint32_t arg_value)
+{
+ cmd1394_cmd_t *cmd;
+ int ret = 0;
+ int err;
+
+ ret = t1394_alloc_cmd(avp->av_t1394_hdl, 0, &cmd);
+ if (ret != DDI_SUCCESS) {
+ return (ENOMEM);
+ }
+
+ cmd->cmd_addr = av1394_pcr_idx2addr(av1394_pcr_ph2idx(ph));
+ cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
+ cmd->cmd_u.l32.lock_type = CMD1394_LOCK_COMPARE_SWAP;
+ cmd->cmd_u.l32.data_value = data_value;
+ cmd->cmd_u.l32.arg_value = arg_value;
+ cmd->cmd_u.l32.num_retries = 0;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if (((err = t1394_lock(avp->av_t1394_hdl, cmd)) == DDI_SUCCESS) &&
+ (cmd->cmd_result == CMD1394_CMDSUCCESS)) {
+ *old_valuep = cmd->cmd_u.l32.old_value;
+ } else {
+ TNF_PROBE_2(av1394_pcr_remote_cas_error, AV1394_TNF_CMP_ERROR,
+ "", tnf_int, err, err, tnf_int, result, cmd->cmd_result);
+ ret = EIO;
+ }
+
+ err = t1394_free_cmd(avp->av_t1394_hdl, 0, &cmd);
+ ASSERT(err == DDI_SUCCESS);
+
+ return (ret);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_fcp.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_fcp.c
new file mode 100644
index 0000000000..a551bad1db
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_fcp.c
@@ -0,0 +1,542 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 FCP module
+ */
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static int av1394_fcp_ctl_register(av1394_inst_t *);
+static int av1394_fcp_tgt_register(av1394_inst_t *);
+static int av1394_fcp_ctl_alloc_cmd(av1394_inst_t *);
+static void av1394_fcp_ctl_free_cmd(av1394_inst_t *);
+static int av1394_fcp_tgt_alloc_cmd(av1394_inst_t *);
+static void av1394_fcp_tgt_free_cmd(av1394_inst_t *);
+static void av1394_fcp_cleanup(av1394_inst_t *, int);
+
+/* FCP write and completion handling */
+static int av1394_fcp_cmd_write_sync(av1394_inst_t *, av1394_fcp_cmd_t *);
+static void av1394_fcp_cmd_completion_cb(struct cmd1394_cmd *);
+static int av1394_fcp_cmd_write_request_cb(cmd1394_cmd_t *);
+static int av1394_fcp_resp_write_request_cb(cmd1394_cmd_t *);
+static void av1394_fcp_common_write_request_cb(cmd1394_cmd_t *, int);
+
+/* misc routines */
+static int av1394_fcp_copyin_block(iec61883_arq_t *, mblk_t *,
+ struct uio *);
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_FCP_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_FCP_STACK, "");
+
+/*
+ *
+ * --- configuration entry points
+ *
+ */
+int
+av1394_fcp_attach(av1394_inst_t *avp)
+{
+ av1394_fcp_t *fcp = &avp->av_a.a_fcp;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_fcp_attach);
+
+ /* register FCP controller */
+ if ((ret = av1394_fcp_ctl_register(avp)) != DDI_SUCCESS) {
+ AV1394_TNF_EXIT(av1394_fcp_attach);
+ return (ret);
+ }
+
+ /* allocate FCP controller command */
+ if ((ret = av1394_fcp_ctl_alloc_cmd(avp)) != DDI_SUCCESS) {
+ av1394_fcp_cleanup(avp, 1);
+ AV1394_TNF_EXIT(av1394_fcp_attach);
+ return (ret);
+ }
+
+ /* register FCP target */
+ if ((ret = av1394_fcp_tgt_register(avp)) != DDI_SUCCESS) {
+ av1394_fcp_cleanup(avp, 2);
+ AV1394_TNF_EXIT(av1394_fcp_attach);
+ return (ret);
+ }
+
+ /* allocate FCP target command */
+ if ((ret = av1394_fcp_tgt_alloc_cmd(avp)) != DDI_SUCCESS) {
+ av1394_fcp_cleanup(avp, 3);
+ AV1394_TNF_EXIT(av1394_fcp_attach);
+ return (ret);
+ }
+
+ cv_init(&fcp->fcp_cmd.fc_xmit_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&fcp->fcp_cmd.fc_busy_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&fcp->fcp_resp.fc_xmit_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&fcp->fcp_resp.fc_busy_cv, NULL, CV_DRIVER, NULL);
+
+ AV1394_TNF_EXIT(av1394_fcp_attach);
+ return (ret);
+}
+
+void
+av1394_fcp_detach(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_fcp_detach);
+
+ av1394_fcp_cleanup(avp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_EXIT(av1394_fcp_detach);
+}
+
+int
+av1394_fcp_write(av1394_inst_t *avp, iec61883_arq_t *arq, struct uio *uiop)
+{
+ av1394_async_t *ap = &avp->av_a;
+ av1394_fcp_t *fcp = &ap->a_fcp;
+ int len = arq->arq_len;
+ av1394_fcp_cmd_t *fc;
+ cmd1394_cmd_t *cmd;
+ mblk_t *mp = NULL;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_fcp_write);
+
+ ASSERT((arq->arq_type == IEC61883_ARQ_FCP_CMD) ||
+ (arq->arq_type == IEC61883_ARQ_FCP_RESP));
+
+ /* check arguments */
+ if ((len == 0) || (len > AV1394_FCP_ARQ_LEN_MAX) ||
+ (len % IEEE1394_QUADLET != 0)) {
+ TNF_PROBE_1(av1394_fcp_write_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, len, len);
+ AV1394_TNF_EXIT(av1394_fcp_write);
+ return (EINVAL);
+ }
+
+ /* block write requires an mblk */
+ if (len > IEEE1394_QUADLET) {
+ if ((mp = allocb(len, BPRI_HI)) == NULL) {
+ AV1394_TNF_EXIT(av1394_fcp_write);
+ return (ENOMEM);
+ }
+ if ((ret = av1394_fcp_copyin_block(arq, mp, uiop)) != 0) {
+ freemsg(mp);
+ AV1394_TNF_EXIT(av1394_fcp_write);
+ return (ret);
+ }
+ }
+
+ /* either FCP command or response */
+ fc = (arq->arq_type == IEC61883_ARQ_FCP_CMD) ?
+ &fcp->fcp_cmd : &fcp->fcp_resp;
+
+ /* one command at a time */
+ mutex_enter(&ap->a_mutex);
+ while (fc->fc_busy) {
+ if (cv_wait_sig(&fc->fc_busy_cv, &ap->a_mutex) == 0) {
+ mutex_exit(&ap->a_mutex);
+ freemsg(mp);
+ AV1394_TNF_EXIT(av1394_fcp_write);
+ return (EINTR);
+ }
+ }
+ fc->fc_busy = B_TRUE;
+
+ /* prepare command */
+ cmd = fc->fc_cmd;
+ if (len == IEEE1394_QUADLET) {
+ cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
+ cmd->cmd_u.q.quadlet_data = arq->arq_data.quadlet;
+ } else {
+ cmd->cmd_type = CMD1394_ASYNCH_WR_BLOCK;
+ cmd->cmd_u.b.data_block = mp;
+ cmd->cmd_u.b.blk_length = len;
+ }
+
+ /* do the write and wait for completion */
+ ret = av1394_fcp_cmd_write_sync(avp, fc);
+
+ /* not busy anymore */
+ fc->fc_busy = B_FALSE;
+ cv_broadcast(&fc->fc_busy_cv);
+ mutex_exit(&ap->a_mutex);
+
+ freemsg(mp);
+
+ AV1394_TNF_EXIT(av1394_fcp_write);
+ return (ret);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static int
+av1394_fcp_ctl_register(av1394_inst_t *avp)
+{
+ t1394_fcp_evts_t evts;
+ int ret;
+
+ evts.fcp_write_request = av1394_fcp_resp_write_request_cb;
+ evts.fcp_arg = avp;
+
+ ret = t1394_fcp_register_controller(avp->av_t1394_hdl, &evts, 0);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_ctl_register_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+ return (ret);
+}
+
+static int
+av1394_fcp_tgt_register(av1394_inst_t *avp)
+{
+ t1394_fcp_evts_t evts;
+ int ret;
+
+ evts.fcp_write_request = av1394_fcp_cmd_write_request_cb;
+ evts.fcp_arg = avp;
+
+ ret = t1394_fcp_register_target(avp->av_t1394_hdl, &evts, 0);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_tgt_register_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+ return (ret);
+}
+
+static int
+av1394_fcp_ctl_alloc_cmd(av1394_inst_t *avp)
+{
+ av1394_fcp_cmd_t *fc = &avp->av_a.a_fcp.fcp_cmd;
+ int ret;
+
+ ret = t1394_alloc_cmd(avp->av_t1394_hdl, T1394_ALLOC_CMD_FCP_COMMAND,
+ &fc->fc_cmd);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_ctl_alloc_cmd_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+ return (ret);
+}
+
+static void
+av1394_fcp_ctl_free_cmd(av1394_inst_t *avp)
+{
+ av1394_fcp_cmd_t *fc = &avp->av_a.a_fcp.fcp_cmd;
+ int ret;
+
+ ret = t1394_free_cmd(avp->av_t1394_hdl, 0, &fc->fc_cmd);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_ctl_free_cmd_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+}
+
+static int
+av1394_fcp_tgt_alloc_cmd(av1394_inst_t *avp)
+{
+ av1394_fcp_cmd_t *fc = &avp->av_a.a_fcp.fcp_resp;
+ int ret;
+
+ ret = t1394_alloc_cmd(avp->av_t1394_hdl, T1394_ALLOC_CMD_FCP_RESPONSE,
+ &fc->fc_cmd);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_tgt_alloc_cmd_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+ return (ret);
+}
+
+static void
+av1394_fcp_tgt_free_cmd(av1394_inst_t *avp)
+{
+ av1394_fcp_cmd_t *fc = &avp->av_a.a_fcp.fcp_resp;
+ int ret;
+
+ ret = t1394_free_cmd(avp->av_t1394_hdl, 0, &fc->fc_cmd);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_fcp_tgt_free_cmd_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ }
+}
+
+static void
+av1394_fcp_cleanup(av1394_inst_t *avp, int level)
+{
+ av1394_fcp_t *fcp = &avp->av_a.a_fcp;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ cv_destroy(&fcp->fcp_cmd.fc_xmit_cv);
+ cv_destroy(&fcp->fcp_cmd.fc_busy_cv);
+ cv_destroy(&fcp->fcp_resp.fc_xmit_cv);
+ cv_destroy(&fcp->fcp_resp.fc_busy_cv);
+
+ av1394_fcp_tgt_free_cmd(avp);
+ /* FALLTHRU */
+ case 3:
+ (void) t1394_fcp_unregister_target(avp->av_t1394_hdl);
+ /* FALLTHRU */
+ case 2:
+ av1394_fcp_ctl_free_cmd(avp);
+ /* FALLTHRU */
+ case 1:
+ (void) t1394_fcp_unregister_controller(avp->av_t1394_hdl);
+ }
+}
+
+/*
+ *
+ * --- FCP write and completion handling
+ *
+ */
+static int
+av1394_fcp_cmd_write_sync(av1394_inst_t *avp, av1394_fcp_cmd_t *fc)
+{
+ av1394_async_t *ap = &avp->av_a;
+ cmd1394_cmd_t *cmd = fc->fc_cmd;
+ int ret = 0;
+
+ cmd->completion_callback = av1394_fcp_cmd_completion_cb;
+ cmd->cmd_callback_arg = avp;
+
+ /* go */
+ ASSERT(!fc->fc_xmit);
+ fc->fc_xmit = B_TRUE;
+
+ mutex_exit(&ap->a_mutex);
+ ret = t1394_write(avp->av_t1394_hdl, cmd);
+ mutex_enter(&ap->a_mutex);
+
+ /* immediate error? */
+ if (ret != DDI_SUCCESS) {
+ fc->fc_xmit = B_FALSE;
+ TNF_PROBE_2(av1394_fcp_cmd_write_sync_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret,
+ tnf_int, cmd_result, cmd->cmd_result);
+ return (EIO);
+ }
+
+ /* wait for completion */
+ while (fc->fc_xmit) {
+ if (cv_wait_sig(&fc->fc_xmit_cv, &ap->a_mutex) == 0) {
+ return (EINTR);
+ }
+ }
+
+ if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
+ TNF_PROBE_1(av1394_fcp_cmd_write_sync_error,
+ AV1394_TNF_FCP_ERROR, "",
+ tnf_int, cmd_result, cmd->cmd_result);
+ if (cmd->cmd_result == CMD1394_EDEVICE_BUSY) {
+ return (EBUSY);
+ } else {
+ return (EIO);
+ }
+ } else {
+ return (0);
+ }
+}
+
+static void
+av1394_fcp_cmd_completion_cb(struct cmd1394_cmd *cmd)
+{
+ av1394_inst_t *avp = cmd->cmd_callback_arg;
+ av1394_async_t *ap = &avp->av_a;
+ av1394_fcp_t *fcp = &ap->a_fcp;
+ av1394_fcp_cmd_t *fc;
+
+ AV1394_TNF_ENTER(av1394_fcp_cmd_completion_cb);
+
+ mutex_enter(&ap->a_mutex);
+ /* is this FCP command or response */
+ if (cmd == fcp->fcp_cmd.fc_cmd) {
+ fc = &fcp->fcp_cmd;
+ } else {
+ ASSERT(cmd == fcp->fcp_resp.fc_cmd);
+ fc = &fcp->fcp_resp;
+ }
+
+ /* wake the waiter */
+ fc->fc_xmit = B_FALSE;
+ cv_signal(&fc->fc_xmit_cv);
+ mutex_exit(&ap->a_mutex);
+
+ AV1394_TNF_EXIT(av1394_fcp_cmd_completion_cb);
+}
+
+/*
+ * av1394_fcp_cmd_write_request_cb()
+ * Incoming response request from an FCP target
+ */
+static int
+av1394_fcp_resp_write_request_cb(cmd1394_cmd_t *req)
+{
+ av1394_inst_t *avp = req->cmd_callback_arg;
+ av1394_async_t *ap = &avp->av_a;
+
+ AV1394_TNF_ENTER(av1394_fcp_resp_write_request_cb);
+
+ mutex_enter(&ap->a_mutex);
+ if ((ap->a_nopen == 0) ||
+ (req->bus_generation != ap->a_bus_generation) ||
+ (req->nodeID != ap->a_targetinfo.target_nodeID)) {
+ mutex_exit(&ap->a_mutex);
+
+ AV1394_TNF_EXIT(av1394_fcp_resp_write_request_cb);
+ return (T1394_REQ_UNCLAIMED);
+ }
+ mutex_exit(&ap->a_mutex);
+
+ av1394_fcp_common_write_request_cb(req, AV1394_M_FCP_RESP);
+
+ AV1394_TNF_EXIT(av1394_fcp_resp_write_request_cb);
+ return (T1394_REQ_CLAIMED);
+}
+
+/*
+ * av1394_fcp_cmd_write_request_cb()
+ * Incoming command request from an FCP controller
+ */
+static int
+av1394_fcp_cmd_write_request_cb(cmd1394_cmd_t *req)
+{
+ av1394_inst_t *avp = req->cmd_callback_arg;
+ av1394_async_t *ap = &avp->av_a;
+
+ AV1394_TNF_ENTER(av1394_fcp_cmd_write_request_cb);
+
+ mutex_enter(&ap->a_mutex);
+ if (ap->a_nopen == 0) {
+ mutex_exit(&ap->a_mutex);
+
+ AV1394_TNF_EXIT(av1394_fcp_cmd_write_request_cb);
+ return (T1394_REQ_UNCLAIMED);
+ }
+ mutex_exit(&ap->a_mutex);
+
+ av1394_fcp_common_write_request_cb(req, AV1394_M_FCP_CMD);
+
+ AV1394_TNF_EXIT(av1394_fcp_cmd_write_request_cb);
+ return (T1394_REQ_CLAIMED);
+}
+
+static void
+av1394_fcp_common_write_request_cb(cmd1394_cmd_t *req, int mtype)
+{
+ av1394_inst_t *avp = req->cmd_callback_arg;
+ mblk_t *mp;
+ uint32_t quadlet_data;
+ int err;
+
+ AV1394_TNF_ENTER(av1394_fcp_common_write_request_cb);
+
+ ASSERT((req->cmd_type == CMD1394_ASYNCH_WR_QUAD) ||
+ (req->cmd_type == CMD1394_ASYNCH_WR_BLOCK));
+
+ /* get the data */
+ if (req->cmd_type == CMD1394_ASYNCH_WR_QUAD) {
+ quadlet_data = req->cmd_u.q.quadlet_data;
+ } else {
+ mp = req->cmd_u.b.data_block;
+ req->cmd_u.b.data_block = NULL;
+ }
+
+ /* complete request */
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+
+ err = t1394_recv_request_done(avp->av_t1394_hdl, req, 0);
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_2(av1394_fcp_common_write_request_cb_done_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, err, err,
+ tnf_int, result, req->cmd_result);
+ }
+
+ /* allocate mblk and copy quadlet into it */
+ if (req->cmd_type == CMD1394_ASYNCH_WR_QUAD) {
+ mp = allocb(IEEE1394_QUADLET, BPRI_HI);
+ if (mp == NULL) {
+ TNF_PROBE_0(
+ av1394_fcp_common_write_request_cb_allocb_error,
+ AV1394_TNF_FCP_ERROR, "");
+ AV1394_TNF_EXIT(av1394_fcp_common_write_request_cb);
+ return;
+ }
+ *(uint32_t *)mp->b_rptr = quadlet_data;
+ mp->b_wptr += IEEE1394_QUADLET;
+ }
+
+ /* queue up the data */
+ DB_TYPE(mp) = mtype;
+ av1394_async_putq_rq(avp, mp);
+
+ AV1394_TNF_EXIT(av1394_fcp_common_write_request_cb);
+}
+
+/*
+ *
+ * --- misc routines
+ *
+ */
+static int
+av1394_fcp_copyin_block(iec61883_arq_t *arq, mblk_t *mp, struct uio *uiop)
+{
+ int len = arq->arq_len;
+ int copylen;
+ int ret = 0;
+
+ ASSERT((len > 0) && (len % IEEE1394_QUADLET == 0));
+
+ /* first copy ARQ-embedded data */
+ copylen = min(len, sizeof (arq->arq_data));
+ bcopy(&arq->arq_data.buf[0], mp->b_wptr, copylen);
+ mp->b_wptr += copylen;
+
+ /* now copyin the rest of the data, if any */
+ copylen = len - copylen;
+ if (copylen > 0) {
+ ret = uiomove(mp->b_wptr, copylen, UIO_WRITE, uiop);
+ if (ret != 0) {
+ TNF_PROBE_1(av1394_fcp_copyin_block_error,
+ AV1394_TNF_FCP_ERROR, "", tnf_int, ret, ret);
+ return (ret);
+ }
+ mp->b_wptr += copylen;
+ }
+ return (ret);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch.c
new file mode 100644
index 0000000000..107f2f1220
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch.c
@@ -0,0 +1,773 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 isochronous module
+ */
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/av/iec61883.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static int av1394_isoch_create_minor_node(av1394_inst_t *);
+static void av1394_isoch_remove_minor_node(av1394_inst_t *);
+static void av1394_isoch_cleanup(av1394_inst_t *, int);
+av1394_isoch_seg_t *av1394_isoch_find_seg(av1394_inst_t *, offset_t, size_t);
+static int av1394_isoch_autorecv_init(av1394_inst_t *, av1394_ic_t **);
+static int av1394_isoch_autoxmit_init(av1394_inst_t *, av1394_ic_t **,
+ struct uio *);
+
+/* ioctls */
+static int av1394_ioctl_isoch_init(av1394_inst_t *, void *, int);
+static av1394_ic_t *av1394_ioctl_isoch_handle2ic(av1394_inst_t *, void *);
+static int av1394_ioctl_isoch_fini(av1394_inst_t *, void *, int);
+static int av1394_ioctl_start(av1394_inst_t *, void *, int);
+static int av1394_ioctl_stop(av1394_inst_t *, void *, int);
+static int av1394_ioctl_recv(av1394_inst_t *, void *, int);
+static int av1394_ioctl_xmit(av1394_inst_t *, void *, int);
+
+static uint_t av1394_isoch_softintr(caddr_t);
+
+static struct devmap_callback_ctl av1394_isoch_devmap_ops = {
+ DEVMAP_OPS_REV, /* rev */
+ NULL, /* map */
+ devmap_default_access, /* access */
+ NULL, /* dup */
+ NULL, /* unmap */
+};
+
+/* tunables */
+int av1394_rate_n_dv_ntsc = 246;
+int av1394_rate_d_dv_ntsc = 3840;
+int av1394_rate_n_dv_pal = 1;
+int av1394_rate_d_dv_pal = 16;
+
+int av1394_isoch_autorecv_nframes = 50;
+int av1394_isoch_autorecv_framesz = 250;
+int av1394_isoch_autoxmit_nframes = 50;
+int av1394_isoch_autoxmit_framesz = 250;
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ISOCH_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ISOCH_STACK, "");
+
+int
+av1394_isoch_attach(av1394_inst_t *avp)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ ddi_iblock_cookie_t ibc = avp->av_attachinfo.iblock_cookie;
+
+ AV1394_TNF_ENTER(av1394_isoch_attach);
+
+ mutex_init(&ip->i_mutex, NULL, MUTEX_DRIVER, ibc);
+
+ mutex_enter(&ip->i_mutex);
+ if (av1394_isoch_create_minor_node(avp) != DDI_SUCCESS) {
+ mutex_exit(&ip->i_mutex);
+ av1394_isoch_cleanup(avp, 1);
+ AV1394_TNF_EXIT(av1394_isoch_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_add_softintr(avp->av_dip, DDI_SOFTINT_LOW, &ip->i_softintr_id,
+ 0, 0, av1394_isoch_softintr, (caddr_t)avp) != DDI_SUCCESS) {
+ mutex_exit(&ip->i_mutex);
+ av1394_isoch_cleanup(avp, 2);
+ AV1394_TNF_EXIT(av1394_isoch_attach);
+ return (DDI_FAILURE);
+ }
+
+ if (av1394_cmp_init(avp) != DDI_SUCCESS) {
+ mutex_exit(&ip->i_mutex);
+ av1394_isoch_cleanup(avp, 3);
+ AV1394_TNF_EXIT(av1394_isoch_attach);
+ return (DDI_FAILURE);
+ }
+
+ av1394_as_init(&ip->i_mmap_as);
+ mutex_exit(&ip->i_mutex);
+
+ AV1394_TNF_EXIT(av1394_isoch_attach);
+ return (DDI_SUCCESS);
+}
+
+void
+av1394_isoch_detach(av1394_inst_t *avp)
+{
+ AV1394_TNF_ENTER(av1394_isoch_detach);
+
+ av1394_isoch_cleanup(avp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_EXIT(av1394_isoch_detach);
+}
+
+int
+av1394_isoch_cpr_suspend(av1394_inst_t *avp)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp;
+ int i;
+ int ret = DDI_SUCCESS;
+
+ AV1394_TNF_ENTER(av1394_isoch_cpr_suspend);
+
+ /*
+ * suspend only if there are no active channels
+ */
+ mutex_enter(&ip->i_mutex);
+ for (i = 0; (i < NELEM(ip->i_ic)) && (ret == DDI_SUCCESS); i++) {
+ icp = ip->i_ic[i];
+ if (icp) {
+ mutex_enter(&icp->ic_mutex);
+ if (icp->ic_state != AV1394_IC_IDLE) {
+ ret = DDI_FAILURE;
+ }
+ mutex_exit(&icp->ic_mutex);
+ }
+ }
+ mutex_exit(&ip->i_mutex);
+
+ AV1394_TNF_EXIT(av1394_isoch_cpr_suspend);
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+av1394_isoch_close(av1394_inst_t *avp, int flag)
+{
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_isoch_close);
+
+ ret = av1394_ic_close(avp, flag);
+ av1394_cmp_close(avp);
+
+ AV1394_TNF_EXIT(av1394_isoch_close);
+ return (ret);
+}
+
+int
+av1394_isoch_read(av1394_inst_t *avp, struct uio *uiop)
+{
+ av1394_ic_t *icp;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_isoch_read);
+
+ /* use broadcast channel */
+ icp = avp->av_i.i_ic[63];
+ if (icp == NULL) {
+ if ((ret = av1394_isoch_autorecv_init(avp, &icp)) != 0) {
+ AV1394_TNF_EXIT(av1394_isoch_read);
+ return (ret);
+ }
+ } else if (icp->ic_dir != AV1394_IR) {
+ /* channel already used for xmit */
+ return (EBUSY);
+ }
+
+ if ((ret = av1394_ir_start(icp)) == 0) {
+ ret = av1394_ir_read(icp, uiop);
+ }
+
+ AV1394_TNF_EXIT(av1394_isoch_read);
+ return (ret);
+}
+
+int
+av1394_isoch_write(av1394_inst_t *avp, struct uio *uiop)
+{
+ av1394_ic_t *icp;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_isoch_write);
+
+ /* use broadcast channel */
+ icp = avp->av_i.i_ic[63];
+ if (icp == NULL) {
+ if ((ret = av1394_isoch_autoxmit_init(avp, &icp, uiop)) != 0) {
+ AV1394_TNF_EXIT(av1394_isoch_write);
+ return (ret);
+ }
+ } else if (icp->ic_dir != AV1394_IT) {
+ /* channel already used for recv */
+ AV1394_TNF_EXIT(av1394_isoch_write);
+ return (EBUSY);
+ }
+
+ ret = av1394_it_write(icp, uiop);
+
+ AV1394_TNF_EXIT(av1394_isoch_write);
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+av1394_isoch_ioctl(av1394_inst_t *avp, int cmd, intptr_t arg, int mode,
+ int *rvalp)
+{
+ int ret = EINVAL;
+
+ switch (cmd) {
+ case IEC61883_ISOCH_INIT:
+ ret = av1394_ioctl_isoch_init(avp, (void *)arg, mode);
+ break;
+ case IEC61883_ISOCH_FINI:
+ ret = av1394_ioctl_isoch_fini(avp, (void *)arg, mode);
+ break;
+ case IEC61883_START:
+ ret = av1394_ioctl_start(avp, (void *)arg, mode);
+ break;
+ case IEC61883_STOP:
+ ret = av1394_ioctl_stop(avp, (void *)arg, mode);
+ break;
+ case IEC61883_RECV:
+ ret = av1394_ioctl_recv(avp, (void *)arg, mode);
+ break;
+ case IEC61883_XMIT:
+ ret = av1394_ioctl_xmit(avp, (void *)arg, mode);
+ break;
+ case IEC61883_PLUG_INIT:
+ ret = av1394_ioctl_plug_init(avp, (void *)arg, mode);
+ break;
+ case IEC61883_PLUG_FINI:
+ ret = av1394_ioctl_plug_fini(avp, (void *)arg, mode);
+ break;
+ case IEC61883_PLUG_REG_READ:
+ ret = av1394_ioctl_plug_reg_read(avp, (void *)arg, mode);
+ break;
+ case IEC61883_PLUG_REG_CAS:
+ ret = av1394_ioctl_plug_reg_cas(avp, (void *)arg, mode);
+ break;
+ }
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+int
+av1394_isoch_devmap(av1394_inst_t *avp, devmap_cookie_t dhp, offset_t off,
+ size_t len, size_t *maplen, uint_t model)
+{
+ av1394_isoch_seg_t *isp;
+
+ AV1394_TNF_ENTER(av1394_isoch_devmap);
+
+ *maplen = 0;
+
+ /* find segment */
+ isp = av1394_isoch_find_seg(avp, off, ptob(btopr(len)));
+ if (isp == NULL) {
+ AV1394_TNF_EXIT(av1394_isoch_devmap);
+ return (EINVAL);
+ }
+
+ /* map segment */
+ if (devmap_umem_setup(dhp, avp->av_dip, &av1394_isoch_devmap_ops,
+ isp->is_umem_cookie, 0, isp->is_umem_size, PROT_ALL, 0,
+ &avp->av_attachinfo.acc_attr) != 0) {
+ TNF_PROBE_0(av1394_isoch_devmap_error_umem_setup,
+ AV1394_TNF_ISOCH_ERROR, "");
+ AV1394_TNF_EXIT(av1394_isoch_devmap);
+ return (EINVAL);
+ }
+ *maplen = isp->is_umem_size;
+
+ AV1394_TNF_EXIT(av1394_isoch_devmap);
+ return (0);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ * av1394_isoch_create_minor_node()
+ * Create isoch minor node
+ */
+static int
+av1394_isoch_create_minor_node(av1394_inst_t *avp)
+{
+ int ret;
+
+ ret = ddi_create_minor_node(avp->av_dip, "isoch",
+ S_IFCHR, AV1394_ISOCH_INST2MINOR(avp->av_instance),
+ DDI_NT_AV_ISOCH, NULL);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_0(av1394_isoch_create_minor_node_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ }
+ return (ret);
+}
+
+/*
+ * av1394_isoch_remove_minor_node()
+ * Remove isoch minor node
+ */
+static void
+av1394_isoch_remove_minor_node(av1394_inst_t *avp)
+{
+ ddi_remove_minor_node(avp->av_dip, "isoch");
+}
+
+/*
+ * av1394_isoch_cleanup()
+ * Cleanup after attach
+ */
+static void
+av1394_isoch_cleanup(av1394_inst_t *avp, int level)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ mutex_enter(&ip->i_mutex);
+ av1394_as_fini(&ip->i_mmap_as);
+ av1394_cmp_fini(avp);
+ mutex_exit(&ip->i_mutex);
+ /* FALLTHRU */
+ case 3:
+ ddi_remove_softintr(ip->i_softintr_id);
+ /* FALLTHRU */
+ case 2:
+ av1394_isoch_remove_minor_node(avp);
+ /* FALLTHRU */
+ case 1:
+ mutex_destroy(&ip->i_mutex);
+ }
+}
+
+/*
+ * av1394_isoch_find_seg()
+ * Given an offset and size, find a matching av1394_isoch_seg_t structure.
+ */
+av1394_isoch_seg_t *
+av1394_isoch_find_seg(av1394_inst_t *avp, offset_t off, size_t len)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp;
+ av1394_isoch_pool_t *pool;
+ av1394_isoch_seg_t *isp;
+ offset_t segoff;
+ int i;
+
+ /* find channel from within this range */
+ for (i = 0; i < NELEM(ip->i_ic); i++) {
+ icp = ip->i_ic[i];
+ if (icp == NULL) {
+ continue;
+ }
+ if ((off >= icp->ic_mmap_off) &&
+ (off + len <= icp->ic_mmap_off + icp->ic_mmap_sz)) {
+ off -= icp->ic_mmap_off; /* convert to base */
+ break;
+ }
+ icp = NULL;
+ }
+ if (icp == NULL) {
+ TNF_PROBE_0(av1394_isoch_find_seg_error_nochan,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (NULL);
+ }
+
+ /* find a segment */
+ pool = (icp->ic_dir == AV1394_IR) ?
+ &icp->ic_ir.ir_data_pool : &icp->ic_it.it_data_pool;
+ for (segoff = 0, i = 0; i < pool->ip_nsegs; i++) {
+ isp = &pool->ip_seg[i];
+ if (off == segoff) {
+ break;
+ }
+ segoff += isp->is_umem_size;
+ isp = NULL;
+ }
+ if (isp == NULL) {
+ TNF_PROBE_0(av1394_isoch_find_seg_error_noseg,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (NULL);
+ }
+
+ /* only whole segments can be mapped */
+ if (len != isp->is_umem_size) {
+ TNF_PROBE_0(av1394_isoch_devmap_error_whole,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (NULL);
+ }
+ return (isp);
+}
+
+/*
+ * initialize default channel for data receipt
+ */
+static int
+av1394_isoch_autorecv_init(av1394_inst_t *avp, av1394_ic_t **icpp)
+{
+ iec61883_isoch_init_t ii;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_isoch_autorecv_init);
+
+ bzero(&ii, sizeof (ii));
+ ii.ii_version = IEC61883_V1_0;
+ ii.ii_pkt_size = 512;
+ ii.ii_frame_size = av1394_isoch_autorecv_framesz;
+ ii.ii_frame_cnt = av1394_isoch_autorecv_nframes;
+ ii.ii_direction = IEC61883_DIR_RECV;
+ ii.ii_bus_speed = IEC61883_S100;
+ ii.ii_channel = (1ULL << 63);
+
+ ret = av1394_ic_init(avp, &ii, icpp);
+
+ AV1394_TNF_EXIT(av1394_isoch_autorecv_init);
+ return (ret);
+}
+
+/*
+ * initialize default channel for data xmit
+ */
+static int
+av1394_isoch_autoxmit_init(av1394_inst_t *avp, av1394_ic_t **icpp,
+ struct uio *uiop)
+{
+ av1394_isoch_autoxmit_t *axp = &avp->av_i.i_autoxmit;
+ iec61883_isoch_init_t ii;
+ uint_t fmt, dbs, fn, f5060, stype; /* CIP fields */
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_isoch_autoxmit_init);
+
+ /* copyin the first CIP header */
+ axp->ax_copy_ciph = B_FALSE;
+ if (uiop->uio_resid < AV1394_CIPSZ) {
+ TNF_PROBE_0_DEBUG(av1394_isoch_autoxmit_init_error_cipsz,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (EINVAL);
+ }
+ ret = uiomove(axp->ax_ciph, AV1394_CIPSZ, UIO_WRITE, uiop);
+ if (ret != 0) {
+ return (ret);
+ }
+ axp->ax_copy_ciph = B_TRUE;
+
+ /* parse CIP header */
+ dbs = axp->ax_ciph[1];
+ fn = (axp->ax_ciph[2] >> 6) & 0x3;
+ fmt = axp->ax_ciph[4] & 0x3F;
+ stype = (axp->ax_ciph[5] >> 2) & 0x1F;
+
+ /* fill out the init structure */
+ bzero(&ii, sizeof (ii));
+ ii.ii_version = IEC61883_V1_0;
+ ii.ii_frame_cnt = av1394_isoch_autoxmit_nframes;
+ ii.ii_direction = IEC61883_DIR_XMIT;
+ ii.ii_bus_speed = IEC61883_S100;
+ ii.ii_channel = (1ULL << 63);
+ ii.ii_dbs = dbs;
+ ii.ii_fn = fn;
+
+ if ((fmt == 0) && (dbs == 0x78) && (fn == 0) && (stype == 0)) {
+ /* either DV-NTSC or DV-PAL */
+ ii.ii_pkt_size = 488;
+ ii.ii_ts_mode = IEC61883_TS_SYT;
+ f5060 = axp->ax_ciph[5] & 0x80;
+ if (f5060 == 0) {
+ axp->ax_fmt = AV1394_ISOCH_AUTOXMIT_DV_NTSC;
+ ii.ii_frame_size = AV1394_DV_NTSC_FRAMESZ;
+ ii.ii_rate_n = av1394_rate_n_dv_ntsc;
+ ii.ii_rate_d = av1394_rate_d_dv_ntsc;
+ } else {
+ axp->ax_fmt = AV1394_ISOCH_AUTOXMIT_DV_PAL;
+ ii.ii_frame_size = AV1394_DV_PAL_FRAMESZ;
+ ii.ii_rate_n = av1394_rate_n_dv_pal;
+ ii.ii_rate_d = av1394_rate_d_dv_pal;
+ }
+ } else {
+ /* raw stream */
+ axp->ax_fmt = AV1394_ISOCH_AUTOXMIT_UNKNOWN;
+ ii.ii_pkt_size = 512;
+ ii.ii_frame_size = av1394_isoch_autoxmit_framesz;
+ ii.ii_ts_mode = IEC61883_TS_NONE;
+ }
+
+ ret = av1394_ic_init(avp, &ii, icpp);
+
+ AV1394_TNF_EXIT(av1394_isoch_autoxmit_init);
+ return (ret);
+}
+
+
+/*
+ *
+ * --- ioctls
+ * these routines are generally responsible for copyin/out of arguments
+ * and passing control to the actual implementation.
+ *
+ */
+static int
+av1394_ioctl_isoch_init(av1394_inst_t *avp, void *arg, int mode)
+{
+ iec61883_isoch_init_t ii;
+#ifdef _MULTI_DATAMODEL
+ iec61883_isoch_init32_t ii32;
+#endif
+ av1394_ic_t *icp;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_ioctl_isoch_init);
+
+ if (ddi_copyin(arg, &ii, sizeof (ii), mode) != 0) {
+ AV1394_TNF_EXIT(av1394_ioctl_isoch_init);
+ return (EFAULT);
+ }
+
+ ret = av1394_ic_init(avp, &ii, &icp);
+
+ if (ret != 0) {
+ AV1394_TNF_EXIT(av1394_ioctl_isoch_init);
+ return (ret);
+ }
+
+#ifdef _MULTI_DATAMODEL
+ /* fixup 32-bit deviations */
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
+ bcopy(&ii, &ii32, sizeof (ii32));
+ ii32.ii_mmap_off = ii.ii_mmap_off;
+ ii32.ii_rchannel = ii.ii_rchannel;
+ ii32.ii_error = ii.ii_error;
+ ret = ddi_copyout(&ii32, arg, sizeof (ii32), mode);
+ } else
+#endif
+ ret = ddi_copyout(&ii, arg, sizeof (ii), mode);
+ if (ret != 0) {
+ AV1394_TNF_EXIT(av1394_ioctl_isoch_init);
+ return (ENOMEM);
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_isoch_init);
+ return (ret);
+}
+
+static av1394_ic_t *
+av1394_ioctl_isoch_handle2ic(av1394_inst_t *avp, void *arg)
+{
+ int num = (int)(intptr_t)arg;
+ av1394_isoch_t *ip = &avp->av_i;
+
+ if (num >= sizeof (ip->i_ic)) {
+ TNF_PROBE_0(av1394_ioctl_isoch_handle2ic_error_range,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (NULL);
+ }
+ if (ip->i_ic[num] == NULL) {
+ TNF_PROBE_0(av1394_ioctl_isoch_handle2ic_error_null,
+ AV1394_TNF_ISOCH_ERROR, "");
+ }
+ return (ip->i_ic[num]);
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl_isoch_fini(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_ic_t *icp;
+
+ AV1394_TNF_ENTER(av1394_ioctl_isoch_fini);
+
+ if ((icp = av1394_ioctl_isoch_handle2ic(avp, arg)) != NULL) {
+ av1394_ic_fini(icp);
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_isoch_fini);
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl_start(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_ic_t *icp;
+ int ret = EINVAL;
+
+ AV1394_TNF_ENTER(av1394_ioctl_start);
+
+ if ((icp = av1394_ioctl_isoch_handle2ic(avp, arg)) != NULL) {
+ ret = av1394_ic_start(icp);
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_start);
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+av1394_ioctl_stop(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_ic_t *icp;
+ int ret = EINVAL;
+
+ AV1394_TNF_ENTER(av1394_ioctl_stop);
+
+ if ((icp = av1394_ioctl_isoch_handle2ic(avp, arg)) != NULL) {
+ ret = av1394_ic_stop(icp);
+ }
+
+ AV1394_TNF_EXIT(av1394_ioctl_stop);
+ return (ret);
+}
+
+static int
+av1394_ioctl_recv(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp;
+ iec61883_recv_t recv;
+ int num;
+ int ret = EINVAL;
+
+ /* copyin the structure and get channel pointer */
+ if (ddi_copyin(arg, &recv, sizeof (recv), mode) != 0) {
+ return (EFAULT);
+ }
+ num = recv.rx_handle;
+ if (num >= sizeof (ip->i_ic)) {
+ TNF_PROBE_0(av1394_ioctl_recv_error_range,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (EINVAL);
+ }
+ icp = ip->i_ic[num];
+ if (icp == NULL) {
+ TNF_PROBE_0(av1394_ioctl_recv_error_null,
+ AV1394_TNF_ISOCH_ERROR, "");
+ }
+
+ /* now call the actual handler */
+ if (icp->ic_dir != AV1394_IR) {
+ ret = EINVAL;
+ } else {
+ ret = av1394_ir_recv(icp, &recv);
+ }
+
+ /* copyout the result */
+ if (ret == 0) {
+ if (ddi_copyout(&recv, arg, sizeof (recv), mode) != 0) {
+ return (EFAULT);
+ }
+ }
+
+ return (ret);
+}
+
+static int
+av1394_ioctl_xmit(av1394_inst_t *avp, void *arg, int mode)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp;
+ iec61883_xmit_t xmit;
+ int num;
+ int ret = EINVAL;
+
+ /* copyin the structure and get channel pointer */
+ if (ddi_copyin(arg, &xmit, sizeof (xmit), mode) != 0) {
+ return (EFAULT);
+ }
+ num = xmit.tx_handle;
+ if (num >= sizeof (ip->i_ic)) {
+ TNF_PROBE_0(av1394_ioctl_xmit_error_range,
+ AV1394_TNF_ISOCH_ERROR, "");
+ return (EINVAL);
+ }
+ icp = ip->i_ic[num];
+ if (icp == NULL) {
+ TNF_PROBE_0(av1394_ioctl_xmit_error_null,
+ AV1394_TNF_ISOCH_ERROR, "");
+ }
+
+ /* now call the actual handler */
+ if (icp->ic_dir != AV1394_IT) {
+ ret = EINVAL;
+ } else {
+ ret = av1394_it_xmit(icp, &xmit);
+ }
+
+ /* copyout the result */
+ if (ret == 0) {
+ if (ddi_copyout(&xmit, arg, sizeof (xmit), mode) != 0) {
+ return (EFAULT);
+ }
+ }
+
+ return (ret);
+}
+
+static uint_t
+av1394_isoch_softintr(caddr_t arg)
+{
+ av1394_inst_t *avp = (av1394_inst_t *)arg;
+ av1394_isoch_t *ip = &avp->av_i;
+ int i;
+ uint64_t ch;
+ av1394_ic_t *icp;
+
+ mutex_enter(&ip->i_mutex);
+ do {
+ for (i = 63, ch = (1ULL << 63);
+ (i > 0) && (ip->i_softintr_ch != 0);
+ i--, ch >>= 1) {
+ if ((ip->i_softintr_ch & ch) == 0) {
+ continue;
+ }
+ ip->i_softintr_ch &= ~ch;
+ icp = ip->i_ic[i];
+ if (icp == NULL) {
+ continue;
+ }
+
+ mutex_exit(&ip->i_mutex);
+ mutex_enter(&icp->ic_mutex);
+ if (icp->ic_preq & AV1394_PREQ_IR_OVERFLOW) {
+ icp->ic_preq &= ~AV1394_PREQ_IR_OVERFLOW;
+ av1394_ir_overflow(icp);
+ }
+ if (icp->ic_preq & AV1394_PREQ_IT_UNDERRUN) {
+ icp->ic_preq &= ~AV1394_PREQ_IT_UNDERRUN;
+ av1394_it_underrun(icp);
+ }
+ mutex_exit(&icp->ic_mutex);
+ mutex_enter(&ip->i_mutex);
+ }
+ } while (ip->i_softintr_ch != 0);
+ mutex_exit(&ip->i_mutex);
+
+ return (DDI_INTR_CLAIMED);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_chan.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_chan.c
new file mode 100644
index 0000000000..ef8647e330
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_chan.c
@@ -0,0 +1,813 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * routines common to isoch receive and isoch transmit
+ */
+#include <sys/stat.h>
+#include <sys/systm.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/bitmap.h>
+#include <sys/av/iec61883.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static void av1394_ic_cleanup(av1394_ic_t *icp, int level);
+static int av1394_ic_validate_init_params(iec61883_isoch_init_t *ii);
+static void av1394_ic_set_params(av1394_inst_t *avp,
+ iec61883_isoch_init_t *ii, av1394_ic_t *icp, int num);
+static int av1394_ic_alloc_channel(av1394_ic_t *icp, uint64_t mask, int *);
+static void av1394_ic_free_channel(av1394_ic_t *icp);
+
+/* callbacks */
+static void av1394_ic_rsrc_fail(t1394_isoch_single_handle_t t1394_sii_hdl,
+ opaque_t arg, t1394_isoch_rsrc_error_t fail_args);
+
+uint64_t av1394_ic_bitreverse(uint64_t);
+boolean_t av1394_ic_onebit(uint64_t);
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ISOCH_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ISOCH_STACK, "");
+
+/* tunables */
+extern int av1394_rate_n_dv_ntsc;
+extern int av1394_rate_d_dv_ntsc;
+extern int av1394_rate_n_dv_pal;
+extern int av1394_rate_d_dv_pal;
+
+/*ARGSUSED*/
+int
+av1394_ic_close(av1394_inst_t *avp, int flags)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp;
+ int i;
+
+ AV1394_TNF_ENTER(av1394_ic_close);
+
+ /* cleanup channels in case application didn't */
+ for (i = 0; i < NELEM(ip->i_ic); i++) {
+ icp = ip->i_ic[i];
+ if (icp != NULL) {
+ (void) av1394_ic_stop(icp);
+ av1394_ic_fini(icp);
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_ic_close);
+ return (0);
+}
+
+/*
+ * av1394_ic_init()
+ * Channel allocation and initialization.
+ */
+int
+av1394_ic_init(av1394_inst_t *avp, iec61883_isoch_init_t *ii,
+ av1394_ic_t **icpp)
+{
+ av1394_isoch_t *ip = &avp->av_i;
+ av1394_ic_t *icp = NULL;
+ int num;
+ av1394_isoch_pool_t *pool;
+ uint64_t mask; /* channel mask */
+ int ret;
+ ddi_iblock_cookie_t ibc = avp->av_attachinfo.iblock_cookie;
+
+ AV1394_TNF_ENTER(av1394_ic_init);
+
+ ii->ii_frame_rcnt = 0;
+ ii->ii_rchannel = 0;
+ ii->ii_error = 0;
+
+ if ((ret = av1394_ic_validate_init_params(ii)) != 0) {
+ AV1394_TNF_EXIT(av1394_ic_init);
+ return (ret);
+ }
+
+ /* allocate channel structure */
+ icp = kmem_zalloc(sizeof (av1394_ic_t), KM_SLEEP);
+
+ mutex_init(&icp->ic_mutex, NULL, MUTEX_DRIVER, ibc);
+ cv_init(&icp->ic_xfer_cv, NULL, CV_DRIVER, NULL);
+
+ av1394_ic_set_params(avp, ii, icp, -1);
+
+ /* allocate isoch channel and bandwidth, except for broadcast */
+ if (ii->ii_channel == (1ULL << 63)) {
+ num = 63;
+ } else if (ii->ii_flags & IEC61883_PRIV_ISOCH_NOALLOC) {
+ num = lowbit(ii->ii_channel) - 1;
+ } else {
+ mask = av1394_ic_bitreverse(ii->ii_channel);
+ ret = av1394_ic_alloc_channel(icp, mask, &num);
+ if (ret != DDI_SUCCESS) {
+ ii->ii_error = IEC61883_ERR_NOCHANNEL;
+ av1394_ic_cleanup(icp, 1);
+ AV1394_TNF_EXIT(av1394_ic_init);
+ return (EINVAL);
+ }
+ }
+ ASSERT((num >= 0) && (num < 64));
+
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_num = num;
+ mutex_exit(&icp->ic_mutex);
+
+ mutex_enter(&ip->i_mutex);
+ if (ip->i_ic[num] != NULL) {
+ mutex_exit(&ip->i_mutex);
+ ii->ii_error = IEC61883_ERR_NOCHANNEL;
+ av1394_ic_cleanup(icp, 2);
+ TNF_PROBE_0(av1394_ic_init_error_chan_used,
+ AV1394_TNF_ISOCH_ERROR, "");
+ AV1394_TNF_EXIT(av1394_ic_init);
+ return (EINVAL);
+ }
+ ip->i_ic[num] = icp;
+ mutex_exit(&ip->i_mutex);
+
+ /* do direction specific initialization */
+ if (icp->ic_dir == AV1394_IR) {
+ ret = av1394_ir_init(icp, &ii->ii_error);
+ pool = &icp->ic_ir.ir_data_pool;
+ } else {
+ ret = av1394_it_init(icp, &ii->ii_error);
+ pool = &icp->ic_it.it_data_pool;
+ }
+
+ if (ret != 0) {
+ av1394_ic_cleanup(icp, 3);
+ AV1394_TNF_EXIT(av1394_ic_init);
+ return (ret);
+ }
+
+ /* allocate mmap space */
+ mutex_enter(&ip->i_mutex);
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_mmap_sz = pool->ip_umem_size;
+ icp->ic_mmap_off = av1394_as_alloc(&ip->i_mmap_as, icp->ic_mmap_sz);
+
+ icp->ic_state = AV1394_IC_IDLE;
+
+ *icpp = icp;
+ ii->ii_handle = icp->ic_num;
+ ii->ii_frame_rcnt = icp->ic_nframes;
+ ii->ii_mmap_off = icp->ic_mmap_off;
+ ii->ii_rchannel = icp->ic_num;
+ mutex_exit(&icp->ic_mutex);
+ mutex_exit(&ip->i_mutex);
+
+ TNF_PROBE_2_DEBUG(av1394_ic_init, AV1394_TNF_ISOCH, "",
+ tnf_string, msg, "channel allocated", tnf_int, num, icp->ic_num);
+
+ AV1394_TNF_EXIT(av1394_ic_init);
+ return (0);
+}
+
+void
+av1394_ic_fini(av1394_ic_t *icp)
+{
+ AV1394_TNF_ENTER(av1394_ic_fini);
+
+ av1394_ic_cleanup(icp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_EXIT(av1394_ic_fini);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static void
+av1394_ic_cleanup(av1394_ic_t *icp, int level)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_isoch_t *ip = &avp->av_i;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ if (icp->ic_dir == AV1394_IR) {
+ av1394_ir_fini(icp);
+ } else {
+ av1394_it_fini(icp);
+ }
+ /* FALLTHRU */
+ case 3:
+ mutex_enter(&ip->i_mutex);
+ av1394_as_free(&ip->i_mmap_as, icp->ic_mmap_off);
+ ip->i_ic[icp->ic_num] = NULL;
+ mutex_exit(&ip->i_mutex);
+ /* FALLTHRU */
+ case 2:
+ av1394_ic_free_channel(icp);
+ /* FALLTHRU */
+ case 1:
+ cv_destroy(&icp->ic_xfer_cv);
+ mutex_destroy(&icp->ic_mutex);
+ kmem_free(icp, sizeof (av1394_ic_t));
+ }
+}
+
+static int
+av1394_ic_validate_init_params(iec61883_isoch_init_t *ii)
+{
+ int framesz;
+
+ ii->ii_error = 0;
+ if ((IEC61883_IMPL_VER_MAJOR(ii->ii_version) !=
+ IEC61883_IMPL_VER_MAJOR(AV1394_IEC61883_VER)) ||
+ (IEC61883_IMPL_VER_MINOR(ii->ii_version) >
+ IEC61883_IMPL_VER_MINOR(AV1394_IEC61883_VER))) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_ver_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_VERSION;
+ return (EINVAL);
+ }
+ if ((ii->ii_pkt_size % 4) || (ii->ii_pkt_size > 512)) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_pktsz_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_PKT_SIZE;
+ return (EINVAL);
+ }
+ framesz = ii->ii_frame_size * ii->ii_pkt_size;
+ if ((framesz > AV1394_IC_FRAME_SIZE_MAX) ||
+ (framesz < AV1394_IXL_BUFSZ_MAX)) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_frsz_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_NOMEM;
+ return (EINVAL);
+ }
+ if ((ii->ii_direction != IEC61883_DIR_RECV) &&
+ (ii->ii_direction != IEC61883_DIR_XMIT)) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_dir_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ if (((ii->ii_direction == IEC61883_DIR_RECV) &&
+ (ii->ii_frame_cnt < AV1394_IR_NFRAMES_MIN)) ||
+ ((ii->ii_direction == IEC61883_DIR_XMIT) &&
+ (ii->ii_frame_cnt < AV1394_IT_NFRAMES_MIN))) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_frcnt_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ if ((ii->ii_bus_speed != IEC61883_S100) &&
+ (ii->ii_bus_speed != IEC61883_S200) &&
+ (ii->ii_bus_speed != IEC61883_S400)) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_speed_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ if (ii->ii_channel == 0) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_chan_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ if ((ii->ii_flags & IEC61883_PRIV_ISOCH_NOALLOC) &&
+ !av1394_ic_onebit(ii->ii_channel)) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_chan_onebit_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ /* the rest are xmit only */
+ if (ii->ii_direction == IEC61883_DIR_RECV) {
+ return (0);
+ }
+ if (((ii->ii_rate_d != 0) ||
+ (ii->ii_rate_n != IEC61883_RATE_N_DV_NTSC) &&
+ (ii->ii_rate_n != IEC61883_RATE_N_DV_PAL)) &&
+ ((ii->ii_rate_d <= 0) || (ii->ii_rate_n < 0) ||
+ ((ii->ii_rate_n != 0) && (ii->ii_rate_d / ii->ii_rate_n < 2)))) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_rate_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ if (AV1394_TS_MODE_GET_OFF(ii->ii_ts_mode) +
+ AV1394_TS_MODE_GET_SIZE(ii->ii_ts_mode) > ii->ii_pkt_size) {
+ TNF_PROBE_0(av1394_ic_validate_init_params_ts_error,
+ AV1394_TNF_ISOCH_ERROR, "");
+ ii->ii_error = IEC61883_ERR_INVAL;
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static void
+av1394_ic_set_params(av1394_inst_t *avp, iec61883_isoch_init_t *ii,
+ av1394_ic_t *icp, int num)
+{
+ av1394_ic_param_t *cp = &icp->ic_param;
+
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_avp = avp;
+ icp->ic_num = num;
+ icp->ic_dir = (ii->ii_direction == IEC61883_DIR_RECV) ?
+ AV1394_IR : AV1394_IT;
+ icp->ic_pktsz = ii->ii_pkt_size;
+ icp->ic_npkts = ii->ii_frame_size;
+ icp->ic_framesz = icp->ic_pktsz * icp->ic_npkts;
+ icp->ic_nframes = ii->ii_frame_cnt;
+ cp->cp_bus_speed = ii->ii_bus_speed;
+ cp->cp_dbs = ii->ii_dbs;
+ cp->cp_fn = ii->ii_fn;
+ if (icp->ic_dir == AV1394_IT) {
+ if (ii->ii_rate_d == 0) {
+ switch (ii->ii_rate_n) {
+ case IEC61883_RATE_N_DV_NTSC:
+ cp->cp_n = av1394_rate_n_dv_ntsc;
+ cp->cp_d = av1394_rate_d_dv_ntsc;
+ break;
+ case IEC61883_RATE_N_DV_PAL:
+ cp->cp_n = av1394_rate_n_dv_pal;
+ cp->cp_d = av1394_rate_d_dv_pal;
+ break;
+ default:
+ ASSERT(0); /* can't happen */
+ }
+ } else {
+ cp->cp_n = ii->ii_rate_n;
+ cp->cp_d = ii->ii_rate_d;
+ }
+ }
+ cp->cp_ts_mode = ii->ii_ts_mode;
+ mutex_exit(&icp->ic_mutex);
+}
+
+static int
+av1394_ic_alloc_channel(av1394_ic_t *icp, uint64_t mask, int *num)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ int ret, result;
+ t1394_isoch_singleinfo_t sii;
+ t1394_isoch_single_out_t so;
+
+ /* allocate isoch channel */
+ sii.si_channel_mask = mask;
+ sii.si_bandwidth = icp->ic_pktsz;
+ sii.rsrc_fail_target = av1394_ic_rsrc_fail;
+ sii.single_evt_arg = icp;
+ sii.si_speed = icp->ic_param.cp_bus_speed;
+
+ ret = t1394_alloc_isoch_single(avp->av_t1394_hdl, &sii, 0, &so,
+ &icp->ic_sii_hdl, &result);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_ic_alloc_channel_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, result, result);
+ } else {
+ *num = so.channel_num;
+ }
+ return (ret);
+}
+
+static void
+av1394_ic_free_channel(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+
+ if (icp->ic_sii_hdl != NULL) {
+ t1394_free_isoch_single(avp->av_t1394_hdl, &icp->ic_sii_hdl, 0);
+ }
+}
+
+/*
+ *
+ * --- memory allocation and mapping routines
+ *
+ * av1394_ic_alloc_pool()
+ * Allocate isoch pool for at least 'mincnt' and at most 'cnt' frames
+ * 'framesz' bytes each. The strategy is to allocate segments of reasonably
+ * large size, to avoid fragmentation and use resources efficiently in case
+ * of a large number of very small frames.
+ *
+ * Another problem is that RECV/SEND_BUF IXL commands can address limited
+ * amount of buffer space (AV1394_IXL_BUFSZ_MAX), and if segment size and
+ * buffer size are not aligned, it can make much harder to build IXL chains.
+ * To simplify things, segments shall always contain full frames.
+ *
+ * Function returns number of frames the resulting pool can hold.
+ */
+int
+av1394_ic_alloc_pool(av1394_isoch_pool_t *pool, size_t framesz, int cnt,
+ int mincnt)
+{
+ av1394_isoch_seg_t *seg;
+ int fps; /* frames per segment */
+ int nsegs;
+ size_t totalsz, segsz;
+ int i;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_ic_alloc_pool);
+
+ totalsz = framesz * cnt;
+ ASSERT(totalsz > 0);
+
+ /* request should be reasonable */
+ if (btopr(totalsz) > physmem / AV1394_MEM_MAX_PERCENT) {
+ TNF_PROBE_0(av1394_ic_alloc_pool_error_physmem,
+ AV1394_TNF_ISOCH_ERROR, "");
+ AV1394_TNF_EXIT(av1394_ic_alloc_pool);
+ return (0);
+ }
+
+ /* calculate segment size and number of segments */
+ segsz = framesz;
+ nsegs = cnt;
+ if (framesz < AV1394_IXL_BUFSZ_MAX / 2) {
+ fps = AV1394_IXL_BUFSZ_MAX / framesz;
+ segsz *= fps;
+ nsegs /= fps;
+ }
+ ASSERT(segsz * nsegs >= totalsz);
+
+ /* allocate segment array */
+ pool->ip_alloc_size = nsegs * sizeof (av1394_isoch_seg_t);
+ pool->ip_seg = kmem_zalloc(pool->ip_alloc_size, KM_SLEEP);
+
+ /* allocate page-aligned user-mappable memory for each segment */
+ pool->ip_nsegs = 0;
+ pool->ip_size = 0;
+ pool->ip_umem_size = 0;
+ for (i = 0; i < nsegs; i++) {
+ seg = &pool->ip_seg[i];
+
+ seg->is_umem_size = ptob(btopr(segsz));
+ seg->is_kaddr = ddi_umem_alloc(seg->is_umem_size,
+ DDI_UMEM_SLEEP, &seg->is_umem_cookie);
+ if (seg->is_kaddr == NULL) {
+ TNF_PROBE_0(av1394_ic_alloc_pool_error_umem_alloc,
+ AV1394_TNF_ISOCH_ERROR, "");
+ break;
+ }
+ seg->is_size = segsz;
+
+ pool->ip_size += seg->is_size;
+ pool->ip_umem_size += seg->is_umem_size;
+ pool->ip_nsegs++;
+ }
+
+ /* number of frames the pool can hold */
+ ret = pool->ip_size / framesz;
+ if (ret < mincnt) {
+ TNF_PROBE_0(av1394_ic_alloc_pool_error_mincnt,
+ AV1394_TNF_ISOCH_ERROR, "");
+ av1394_ic_free_pool(pool);
+ ret = 0;
+ }
+
+ AV1394_TNF_EXIT(av1394_ic_alloc_pool);
+ return (ret);
+}
+
+void
+av1394_ic_free_pool(av1394_isoch_pool_t *pool)
+{
+ int i;
+
+ AV1394_TNF_ENTER(av1394_ic_free_pool);
+
+ if (pool->ip_seg != NULL) {
+ for (i = 0; i < pool->ip_nsegs; i++) {
+ ddi_umem_free(pool->ip_seg[i].is_umem_cookie);
+ }
+ kmem_free(pool->ip_seg, pool->ip_alloc_size);
+ pool->ip_seg = NULL;
+ }
+
+ AV1394_TNF_EXIT(av1394_ic_free_pool);
+}
+
+int
+av1394_ic_dma_setup(av1394_ic_t *icp, av1394_isoch_pool_t *pool)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_isoch_seg_t *isp;
+ uint_t dma_dir;
+ int ret;
+ int i;
+
+ AV1394_TNF_ENTER(av1394_ic_dma_setup);
+
+ dma_dir = (icp->ic_dir == AV1394_IR) ? DDI_DMA_READ : DDI_DMA_WRITE;
+ /*
+ * Alloc and bind a DMA handle for each segment.
+ * Note that we need packet size alignment, but since ddi_umem_alloc'ed
+ * memory is page-aligned and our packets are less than page size (yet)
+ * we don't need to do anything special here.
+ */
+ for (i = 0; i < pool->ip_nsegs; i++) {
+ isp = &pool->ip_seg[i];
+
+ ret = ddi_dma_alloc_handle(avp->av_dip,
+ &avp->av_attachinfo.dma_attr, DDI_DMA_DONTWAIT, NULL,
+ &isp->is_dma_hdl);
+ if (ret != DDI_SUCCESS) {
+ TNF_PROBE_0(av1394_ic_dma_setup_error_alloc_hdl,
+ AV1394_TNF_ISOCH_ERROR, "");
+ av1394_ic_dma_cleanup(icp, pool);
+ AV1394_TNF_EXIT(av1394_ic_dma_setup);
+ return (ret);
+ }
+
+ ret = ddi_dma_addr_bind_handle(isp->is_dma_hdl, NULL,
+ isp->is_kaddr, isp->is_size,
+ dma_dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
+ &isp->is_dma_cookie, &isp->is_dma_ncookies);
+
+ if (ret != DDI_DMA_MAPPED) {
+ TNF_PROBE_0(av1394_ic_dma_setup_error_bind_hdl,
+ AV1394_TNF_ISOCH_ERROR, "");
+ av1394_ic_dma_cleanup(icp, pool);
+ AV1394_TNF_EXIT(av1394_ic_dma_setup);
+ return (DDI_FAILURE);
+ }
+
+ /* multiple cookies not supported (yet) */
+ if (isp->is_dma_ncookies != 1) {
+ TNF_PROBE_0(av1394_ic_dma_setup_error_ncookies,
+ AV1394_TNF_ISOCH_ERROR, "");
+ av1394_ic_dma_cleanup(icp, pool);
+ AV1394_TNF_EXIT(av1394_ic_dma_setup);
+ return (DDI_FAILURE);
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_ic_dma_setup);
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+void
+av1394_ic_dma_cleanup(av1394_ic_t *icp, av1394_isoch_pool_t *pool)
+{
+ av1394_isoch_seg_t *seg;
+ int i;
+
+ AV1394_TNF_ENTER(av1394_ic_dma_cleanup);
+
+ for (i = 0; i < pool->ip_nsegs; i++) {
+ seg = &pool->ip_seg[i];
+ if (seg->is_dma_hdl != NULL) {
+ if (seg->is_dma_ncookies > 0) {
+ (void) ddi_dma_unbind_handle(seg->is_dma_hdl);
+ }
+ ddi_dma_free_handle(&seg->is_dma_hdl);
+ }
+ }
+
+ AV1394_TNF_EXIT(av1394_ic_dma_cleanup);
+}
+
+/*
+ * sync frames for CPU access
+ */
+void
+av1394_ic_dma_sync_frames(av1394_ic_t *icp, int idx, int cnt,
+ av1394_isoch_pool_t *pool, uint_t type)
+{
+ int i;
+ int j = idx;
+
+ for (i = cnt; i > 0; i--) {
+ (void) ddi_dma_sync(pool->ip_seg[j].is_dma_hdl, 0,
+ icp->ic_framesz, type);
+ j = (j + 1) % icp->ic_nframes;
+ }
+}
+
+/*
+ *
+ * --- transfer
+ *
+ */
+int
+av1394_ic_start(av1394_ic_t *icp)
+{
+ if (icp->ic_dir == AV1394_IR) {
+ return (av1394_ir_start(icp));
+ } else {
+ return (av1394_it_start(icp));
+ }
+}
+
+int
+av1394_ic_stop(av1394_ic_t *icp)
+{
+ if (icp->ic_dir == AV1394_IR) {
+ return (av1394_ir_stop(icp));
+ } else {
+ return (av1394_it_stop(icp));
+ }
+}
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+/*ARGSUSED*/
+static void
+av1394_ic_rsrc_fail(t1394_isoch_single_handle_t t1394_sii_hdl, opaque_t arg,
+ t1394_isoch_rsrc_error_t fail_args)
+{
+ AV1394_TNF_ENTER(av1394_ic_rsrc_fail);
+
+ /* XXX this could be handled more gracefully */
+ cmn_err(CE_CONT, "av1394: can't reallocate isochronous resources"
+ " after bus reset\n");
+
+ AV1394_TNF_EXIT(av1394_ic_rsrc_fail);
+}
+
+/*
+ *
+ * --- misc
+ *
+ *
+ * av1394_ic_ixl_seg_decomp()
+ * Calculate the best decomposition of a segment into buffers.
+ * Return number of buffers, buffer and tail buffer sizes.
+ *
+ * We are looking to divide a segment evenly into equally-sized or almost
+ * equally-sized buffers. Maximum buffer size is AV1394_IXL_BUFSZ_MAX.
+ * Algorithm:
+ * 1. If segment size divides evenly by maximum size, terminate.
+ * 2. n = number of maximum-size buffers than fits into the segment.
+ * 3. Divide the segment by n+1, calculate buffer size and tail
+ * (remainder) size.
+ * 4. If the tail can be appended to the last buffer and the resulting
+ * buffer is still less than maximum size, terminate.
+ * 5. Repeat steps 3-5 for n+2, n+3, ... until division is too small.
+ *
+ * Since all sizes are packet-aligned, we scale them down (divide by
+ * packet size) in the beginning, do all calculations and scale them up
+ * in the end.
+ */
+int
+av1394_ic_ixl_seg_decomp(size_t segsz, size_t pktsz, size_t *bufszp,
+ size_t *tailszp)
+{
+ size_t nbufs, bufsz, tailsz;
+ size_t maxsz = AV1394_IXL_BUFSZ_MAX;
+
+ ASSERT(segsz >= maxsz);
+ ASSERT(segsz % pktsz == 0);
+
+ if (segsz % maxsz == 0) {
+ *tailszp = *bufszp = maxsz;
+ return (segsz / *bufszp - 1);
+ }
+
+ maxsz /= pktsz;
+ segsz /= pktsz;
+
+ nbufs = segsz / maxsz;
+ do {
+ nbufs++;
+ bufsz = segsz / nbufs;
+ tailsz = bufsz + (segsz - bufsz * nbufs);
+ } while ((tailsz > maxsz) && ((segsz / (nbufs + 1)) > 1));
+ nbufs--;
+
+ *bufszp = bufsz * pktsz;
+ *tailszp = tailsz * pktsz;
+ return (nbufs);
+}
+
+void
+av1394_ic_ixl_dump(ixl1394_command_t *cmd)
+{
+ ixl1394_callback_t *cb;
+ ixl1394_jump_t *jmp;
+ ixl1394_xfer_buf_t *buf;
+ ixl1394_xfer_pkt_t *pkt;
+
+ while (cmd) {
+ switch (cmd->ixl_opcode) {
+ case IXL1394_OP_LABEL:
+ cmn_err(CE_CONT, "%p: LABEL\n", (void *)cmd);
+ break;
+ case IXL1394_OP_RECV_BUF:
+ case IXL1394_OP_RECV_BUF_U:
+ buf = (ixl1394_xfer_buf_t *)cmd;
+ cmn_err(CE_CONT, "%p: RECV_BUF addr=%p size=%d "
+ "pkt_size=%d\n", (void *)cmd, (void *)buf->mem_bufp,
+ buf->size, buf->pkt_size);
+ break;
+ case IXL1394_OP_SEND_BUF:
+ case IXL1394_OP_SEND_BUF_U:
+ buf = (ixl1394_xfer_buf_t *)cmd;
+ cmn_err(CE_CONT, "%p: SEND_BUF addr=%p size=%d "
+ "pkt_size=%d\n", (void *)cmd, (void *)buf->mem_bufp,
+ buf->size, buf->pkt_size);
+ break;
+ case IXL1394_OP_SEND_PKT_ST:
+ pkt = (ixl1394_xfer_pkt_t *)cmd;
+ cmn_err(CE_CONT, "%p: SEND_PKT_ST addr=%p size=%d\n",
+ (void *)cmd, (void *)pkt->mem_bufp, pkt->size);
+ break;
+ case IXL1394_OP_CALLBACK:
+ case IXL1394_OP_CALLBACK_U:
+ cb = (ixl1394_callback_t *)cmd;
+ cmn_err(CE_CONT, "%p: CALLBACK %p\n", (void *)cmd,
+ (void *)cb->callback);
+ break;
+ case IXL1394_OP_JUMP:
+ jmp = (ixl1394_jump_t *)cmd;
+ cmn_err(CE_CONT, "%p: JUMP %p\n", (void *)cmd,
+ (void *)jmp->label);
+ break;
+ case IXL1394_OP_JUMP_U:
+ jmp = (ixl1394_jump_t *)cmd;
+ cmn_err(CE_CONT, "%p: JUMP_U %p\n", (void *)cmd,
+ (void *)jmp->label);
+ break;
+ case IXL1394_OP_STORE_TIMESTAMP:
+ cmn_err(CE_CONT, "%p: STORE_TIMESTAMP\n", (void *)cmd);
+ break;
+ default:
+ cmn_err(CE_CONT, "%p: other\n", (void *)cmd);
+ }
+ cmd = cmd->next_ixlp;
+ }
+}
+
+/*
+ * trigger a soft interrupt, if not already, for a given channel and type
+ */
+void
+av1394_ic_trigger_softintr(av1394_ic_t *icp, int num, int preq)
+{
+ av1394_isoch_t *ip = &icp->ic_avp->av_i;
+ uint64_t chmask = (1UL << num);
+
+ if (((ip->i_softintr_ch & chmask) == 0) ||
+ ((icp->ic_preq & preq) == 0)) {
+ ip->i_softintr_ch |= chmask;
+ icp->ic_preq |= preq;
+ ddi_trigger_softintr(ip->i_softintr_id);
+ }
+}
+
+/*
+ * reverse bits in a 64-bit word
+ */
+uint64_t
+av1394_ic_bitreverse(uint64_t x)
+{
+ x = (((x >> 1) & 0x5555555555555555) | ((x & 0x5555555555555555) << 1));
+ x = (((x >> 2) & 0x3333333333333333) | ((x & 0x3333333333333333) << 2));
+ x = (((x >> 4) & 0x0f0f0f0f0f0f0f0f) | ((x & 0x0f0f0f0f0f0f0f0f) << 4));
+ x = (((x >> 8) & 0x00ff00ff00ff00ff) | ((x & 0x00ff00ff00ff00ff) << 8));
+ x = (((x >> 16) & 0x0000ffff0000ffff) |
+ ((x & 0x0000ffff0000ffff) << 16));
+ return ((x >> 32) | (x << 32));
+}
+
+/*
+ * return B_TRUE if a 64-bit value has only one bit set to 1
+ */
+boolean_t
+av1394_ic_onebit(uint64_t i)
+{
+ return (((~i + 1) | ~i) == 0xFFFFFFFFFFFFFFFF);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_recv.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_recv.c
new file mode 100644
index 0000000000..1644970803
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_recv.c
@@ -0,0 +1,850 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 isochronous receive module
+ */
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+/* configuration routines */
+static void av1394_ir_cleanup(av1394_ic_t *, int);
+static int av1394_ir_build_ixl(av1394_ic_t *);
+static void av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *,
+ ixl1394_command_t *);
+static void av1394_ir_ixl_buf_init(av1394_ic_t *, ixl1394_xfer_buf_t *,
+ av1394_isoch_seg_t *, off_t, uint16_t, ixl1394_command_t *);
+static void av1394_ir_ixl_cb_init(av1394_ic_t *, av1394_ir_ixl_data_t *,
+ int);
+static void av1394_ir_ixl_jump_init(av1394_ic_t *, av1394_ir_ixl_data_t *,
+ int);
+static void av1394_ir_destroy_ixl(av1394_ic_t *);
+static int av1394_ir_alloc_isoch_dma(av1394_ic_t *);
+static void av1394_ir_free_isoch_dma(av1394_ic_t *);
+static void av1394_ir_dma_sync_frames(av1394_ic_t *, int, int);
+
+/* callbacks */
+static void av1394_ir_ixl_frame_cb(opaque_t, struct ixl1394_callback *);
+static void av1394_ir_overflow_resume(av1394_ic_t *icp);
+static void av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t,
+ opaque_t, id1394_isoch_dma_stopped_t);
+
+/* data transfer routines */
+static int av1394_ir_add_frames(av1394_ic_t *, int, int);
+static int av1394_ir_wait_frames(av1394_ic_t *, int *, int *);
+static int av1394_ir_copyout(av1394_ic_t *, struct uio *, int *);
+static void av1394_ir_zero_pkts(av1394_ic_t *, int, int);
+
+/* value complementary to hi & lo watermarks (modulo number of frames) */
+int av1394_ir_hiwat_sub = 2;
+int av1394_ir_lowat_sub = 3;
+int av1394_ir_dump_ixl = 0;
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ISOCH_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ISOCH_STACK, "");
+
+int
+av1394_ir_init(av1394_ic_t *icp, int *error)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ av1394_isoch_pool_t *pool = &irp->ir_data_pool;
+ int nframes;
+
+ AV1394_TNF_ENTER(av1394_ir_init);
+
+ nframes = av1394_ic_alloc_pool(pool, icp->ic_framesz, icp->ic_nframes,
+ AV1394_IR_NFRAMES_MIN);
+ if (nframes == 0) {
+ *error = IEC61883_ERR_NOMEM;
+ AV1394_TNF_EXIT(av1394_ir_init);
+ return (EINVAL);
+ }
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_nframes = nframes;
+ irp->ir_hiwat = nframes - av1394_ir_hiwat_sub;
+ irp->ir_lowat = nframes - av1394_ir_lowat_sub;
+
+ if (av1394_ic_dma_setup(icp, pool) != DDI_SUCCESS) {
+ mutex_exit(&icp->ic_mutex);
+ *error = IEC61883_ERR_NOMEM;
+ av1394_ir_cleanup(icp, 1);
+ AV1394_TNF_EXIT(av1394_ir_init);
+ return (EINVAL);
+ }
+
+ if (av1394_ir_build_ixl(icp) != DDI_SUCCESS) {
+ mutex_exit(&icp->ic_mutex);
+ *error = IEC61883_ERR_NOMEM;
+ av1394_ir_cleanup(icp, 2);
+ AV1394_TNF_EXIT(av1394_ir_init);
+ return (EINVAL);
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ if (av1394_ir_alloc_isoch_dma(icp) != DDI_SUCCESS) {
+ *error = IEC61883_ERR_NOMEM;
+ av1394_ir_cleanup(icp, 3);
+ AV1394_TNF_EXIT(av1394_ir_init);
+ return (EINVAL);
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_init);
+ return (0);
+}
+
+void
+av1394_ir_fini(av1394_ic_t *icp)
+{
+ AV1394_TNF_ENTER(av1394_ir_fini);
+
+ av1394_ir_cleanup(icp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_ENTER(av1394_ir_fini);
+}
+
+int
+av1394_ir_start(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_ir_t *irp = &icp->ic_ir;
+ id1394_isoch_dma_ctrlinfo_t idma_ctrlinfo = { 0 };
+ int result;
+ int err;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_ir_start);
+
+ mutex_enter(&icp->ic_mutex);
+ if (icp->ic_state != AV1394_IC_IDLE) {
+ mutex_exit(&icp->ic_mutex);
+ return (0);
+ }
+
+ irp->ir_first_full = 0;
+ irp->ir_last_empty = icp->ic_nframes - 1;
+ irp->ir_nfull = 0;
+ irp->ir_nempty = icp->ic_nframes;
+ irp->ir_read_cnt = 0;
+ mutex_exit(&icp->ic_mutex);
+
+ err = t1394_start_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl,
+ &idma_ctrlinfo, 0, &result);
+ if (err == DDI_SUCCESS) {
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_state = AV1394_IC_DMA;
+ mutex_exit(&icp->ic_mutex);
+ } else {
+ TNF_PROBE_1(av1394_ir_start_error, AV1394_TNF_ISOCH_ERROR, "",
+ tnf_int, result, result);
+ ret = EIO;
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_start);
+ return (ret);
+}
+
+int
+av1394_ir_stop(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+
+ AV1394_TNF_ENTER(av1394_ir_stop);
+
+ mutex_enter(&icp->ic_mutex);
+ if (icp->ic_state != AV1394_IC_IDLE) {
+ mutex_exit(&icp->ic_mutex);
+ t1394_stop_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 0);
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_state = AV1394_IC_IDLE;
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_ir_stop);
+ return (0);
+}
+
+int
+av1394_ir_recv(av1394_ic_t *icp, iec61883_recv_t *recv)
+{
+ int ret = 0;
+ int idx, cnt;
+
+ idx = recv->rx_xfer.xf_empty_idx;
+ cnt = recv->rx_xfer.xf_empty_cnt;
+
+ /* check arguments */
+ if ((idx < 0) || (idx >= icp->ic_nframes) ||
+ (cnt < 0) || (cnt > icp->ic_nframes)) {
+ TNF_PROBE_2(av1394_ir_recv_error_args, AV1394_TNF_ISOCH_ERROR,
+ "", tnf_int, idx, idx, tnf_int, cnt, cnt);
+ return (EINVAL);
+ }
+
+ mutex_enter(&icp->ic_mutex);
+ if (cnt > 0) {
+ /* add empty frames to the pool */
+ if ((ret = av1394_ir_add_frames(icp, idx, cnt)) != 0) {
+ mutex_exit(&icp->ic_mutex);
+ return (ret);
+ }
+ }
+
+ /* wait for new frames to arrive */
+ ret = av1394_ir_wait_frames(icp,
+ &recv->rx_xfer.xf_full_idx, &recv->rx_xfer.xf_full_cnt);
+ mutex_exit(&icp->ic_mutex);
+
+ return (ret);
+}
+
+int
+av1394_ir_read(av1394_ic_t *icp, struct uio *uiop)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ int ret = 0;
+ int empty_cnt;
+
+ AV1394_TNF_ENTER(av1394_ir_read);
+
+ mutex_enter(&icp->ic_mutex);
+ while (uiop->uio_resid) {
+ /* wait for full frames, if necessary */
+ if (irp->ir_read_cnt == 0) {
+ irp->ir_read_off = 0;
+ ret = av1394_ir_wait_frames(icp,
+ &irp->ir_read_idx, &irp->ir_read_cnt);
+ if (ret != 0) {
+ mutex_exit(&icp->ic_mutex);
+ AV1394_TNF_EXIT(av1394_ir_read);
+ return (ret);
+ }
+ }
+
+ /* copyout the data */
+ ret = av1394_ir_copyout(icp, uiop, &empty_cnt);
+
+ /* return freed frames to the pool */
+ if (empty_cnt > 0) {
+ av1394_ir_zero_pkts(icp, irp->ir_read_idx, empty_cnt);
+ ret = av1394_ir_add_frames(icp, irp->ir_read_idx,
+ empty_cnt);
+ irp->ir_read_idx += empty_cnt;
+ irp->ir_read_idx %= icp->ic_nframes;
+ irp->ir_read_cnt -= empty_cnt;
+ }
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_ir_read);
+ return (ret);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static void
+av1394_ir_cleanup(av1394_ic_t *icp, int level)
+{
+ av1394_isoch_pool_t *pool = &icp->ic_ir.ir_data_pool;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ av1394_ir_free_isoch_dma(icp);
+ /* FALLTHRU */
+ case 3:
+ av1394_ir_destroy_ixl(icp);
+ /* FALLTHRU */
+ case 2:
+ av1394_ic_dma_cleanup(icp, pool);
+ /* FALLTHRU */
+ case 1:
+ av1394_ic_free_pool(pool);
+ /* FALLTHRU */
+ }
+}
+
+/*
+ * av1394_ir_build_ixl()
+ * Build an IXL chain to receive CIP data. The smallest instance of data
+ * that can be received is a packet, typically 512 bytes. Frames consist
+ * of a number of packets, typically 250-300. Packet size, frame size and
+ * number of frames allocated are set by a user process. The received data
+ * made available to the user process in full frames, hence there an IXL
+ * callback at the end of each frame. A sequence of IXL commands that
+ * receives one frame is further referred to as an IXL data block.
+ *
+ * During normal operation, frames are in a circular list and IXL chain
+ * does not change. When the user process does not keep up with the
+ * data flow and there are too few empty frames left, the jump following
+ * last empty frame is dynamically updated to point to NULL -- otherwise
+ * the first full frame would be overwritten. When IXL execution reaches
+ * the nulled jump, it just waits until the driver updates it again or
+ * stops the transfer. Once a user process frees up enough frames, the
+ * jump is restored and transfer continues. User process will be able to
+ * detect dropped packets using continuity conters embedded in the data.
+ *
+ * Because RECV_BUF buffer size is limited to AV1394_IXL_BUFSZ_MAX, and due
+ * to isoch pool segmentaion, the number of RECV_BUF commands per IXL data
+ * block depends on frame size. Also, to simplify calculations, we consider
+ * a sequence of RECV_BUF commands to consist of two parts: zero or more
+ * equal-sized RECV_BUF commands followed by one "tail" REC_BUF command,
+ * whose size may not be equal to others.
+ *
+ * Schematically the IXL chain looks like this:
+ *
+ * ...
+ * LABEL N;
+ * RECV_BUF(buf)
+ * ...
+ * RECV_BUF(tail)
+ * CALLBACK(frame done);
+ * JUMP_U(LABEL (N+1)%nframes or NULL);
+ * ...
+ */
+static int
+av1394_ir_build_ixl(av1394_ic_t *icp)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ av1394_isoch_pool_t *pool = &irp->ir_data_pool;
+ size_t segsz;
+ av1394_ir_ixl_data_t *dp;
+ int i; /* frame index */
+ int j; /* buffer index */
+ int k;
+ int spf; /* segments per frame */
+ int bpf; /* buffers per frame */
+
+ AV1394_TNF_ENTER(av1394_ir_build_ixl);
+
+ /* allocate space for IXL data blocks */
+ irp->ir_ixl_data = kmem_zalloc(icp->ic_nframes *
+ sizeof (av1394_ir_ixl_data_t), KM_SLEEP);
+
+ /*
+ * calculate and allocate space for buf commands
+ */
+ segsz = pool->ip_seg[0].is_size;
+ ASSERT(segsz * pool->ip_nsegs == pool->ip_size); /* equal-size segs */
+ ASSERT(segsz % icp->ic_pktsz == 0); /* packet-aligned */
+ ASSERT(segsz >= icp->ic_framesz); /* 1+ full frames per segment */
+
+ if (icp->ic_framesz <= AV1394_IXL_BUFSZ_MAX) {
+ /* RECV_BUF == frame, one or more frames per segment */
+ irp->ir_ixl_tailsz = irp->ir_ixl_bufsz = icp->ic_framesz;
+ irp->ir_ixl_bpf = 0;
+ } else {
+ /* segment == frame, several RECV_BUF's per segment */
+ ASSERT(segsz == icp->ic_framesz);
+ /* calculate the best decomposition of a segment into buffers */
+ irp->ir_ixl_bpf = av1394_ic_ixl_seg_decomp(segsz,
+ icp->ic_pktsz, &irp->ir_ixl_bufsz, &irp->ir_ixl_tailsz);
+ }
+ spf = segsz / icp->ic_framesz;
+ bpf = irp->ir_ixl_bpf + 1;
+
+ irp->ir_ixl_nbufs = bpf * icp->ic_nframes;
+ irp->ir_ixl_buf = kmem_zalloc(irp->ir_ixl_nbufs *
+ sizeof (ixl1394_xfer_buf_t), KM_SLEEP);
+
+ /* initialize data blocks and receive buffers */
+ for (i = 0; i < icp->ic_nframes; i++) {
+ dp = &irp->ir_ixl_data[i];
+
+ av1394_ir_ixl_label_init(dp,
+ (ixl1394_command_t *)&irp->ir_ixl_buf[i * bpf]);
+
+ /* regular buffers, if any */
+ for (j = 0; j < irp->ir_ixl_bpf; j++) {
+ k = j + i * bpf;
+ av1394_ir_ixl_buf_init(icp, &irp->ir_ixl_buf[k],
+ &pool->ip_seg[i], j * irp->ir_ixl_bufsz,
+ irp->ir_ixl_bufsz,
+ (ixl1394_command_t *)&irp->ir_ixl_buf[k + 1]);
+ }
+
+ /* tail buffer */
+ if (icp->ic_framesz <= AV1394_IXL_BUFSZ_MAX) {
+ av1394_ir_ixl_buf_init(icp, &irp->ir_ixl_buf[i],
+ &pool->ip_seg[i / spf],
+ i * irp->ir_ixl_tailsz,
+ irp->ir_ixl_tailsz,
+ (ixl1394_command_t *)&dp->rd_cb);
+ } else {
+ k = irp->ir_ixl_bpf + i * bpf;
+ av1394_ir_ixl_buf_init(icp, &irp->ir_ixl_buf[k],
+ &pool->ip_seg[i],
+ irp->ir_ixl_bpf * irp->ir_ixl_bufsz,
+ irp->ir_ixl_tailsz,
+ (ixl1394_command_t *)&dp->rd_cb);
+ }
+
+ av1394_ir_ixl_cb_init(icp, dp, i);
+ av1394_ir_ixl_jump_init(icp, dp, i);
+ }
+
+ irp->ir_ixlp = (ixl1394_command_t *)irp->ir_ixl_data;
+
+ if (av1394_ir_dump_ixl) {
+ av1394_ic_ixl_dump(irp->ir_ixlp);
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_build_ixl);
+ return (DDI_SUCCESS);
+}
+
+static void
+av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *dp, ixl1394_command_t *nextp)
+{
+ dp->rd_label.ixl_opcode = IXL1394_OP_LABEL;
+ dp->rd_label.next_ixlp = nextp;
+}
+
+static void
+av1394_ir_ixl_buf_init(av1394_ic_t *icp, ixl1394_xfer_buf_t *buf,
+ av1394_isoch_seg_t *isp, off_t offset, uint16_t size,
+ ixl1394_command_t *nextp)
+{
+ buf->ixl_opcode = IXL1394_OP_RECV_BUF;
+ buf->size = size;
+ buf->pkt_size = icp->ic_pktsz;
+ buf->ixl_buf._dmac_ll = isp->is_dma_cookie.dmac_laddress + offset;
+ buf->mem_bufp = isp->is_kaddr + offset;
+ buf->next_ixlp = nextp;
+}
+
+/*ARGSUSED*/
+static void
+av1394_ir_ixl_cb_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i)
+{
+ dp->rd_cb.ixl_opcode = IXL1394_OP_CALLBACK;
+ dp->rd_cb.callback = av1394_ir_ixl_frame_cb;
+ dp->rd_cb.callback_arg = (void *)(intptr_t)i;
+ dp->rd_cb.next_ixlp = (ixl1394_command_t *)&dp->rd_jump;
+}
+
+static void
+av1394_ir_ixl_jump_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ int next_idx;
+ ixl1394_command_t *jump_cmd;
+
+ next_idx = (i + 1) % icp->ic_nframes;
+ jump_cmd = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx];
+
+ dp->rd_jump.ixl_opcode = IXL1394_OP_JUMP_U;
+ dp->rd_jump.label = jump_cmd;
+ dp->rd_jump.next_ixlp = (next_idx != 0) ? jump_cmd : NULL;
+}
+
+static void
+av1394_ir_destroy_ixl(av1394_ic_t *icp)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+
+ AV1394_TNF_ENTER(av1394_ir_destroy_ixl);
+
+ mutex_enter(&icp->ic_mutex);
+ kmem_free(irp->ir_ixl_buf,
+ irp->ir_ixl_nbufs * sizeof (ixl1394_xfer_buf_t));
+ kmem_free(irp->ir_ixl_data,
+ icp->ic_nframes * sizeof (av1394_ir_ixl_data_t));
+
+ irp->ir_ixlp = NULL;
+ irp->ir_ixl_buf = NULL;
+ irp->ir_ixl_data = NULL;
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_ir_destroy_ixl);
+}
+
+static int
+av1394_ir_alloc_isoch_dma(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_ir_t *irp = &icp->ic_ir;
+ id1394_isoch_dmainfo_t di;
+ int result;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_ir_alloc_isoch_dma);
+
+ di.ixlp = irp->ir_ixlp;
+ di.channel_num = icp->ic_num;
+ di.global_callback_arg = icp;
+ di.idma_options = ID1394_LISTEN_PKT_MODE;
+ di.isoch_dma_stopped = av1394_ir_dma_stopped_cb;
+ di.idma_evt_arg = icp;
+
+ if ((ret = t1394_alloc_isoch_dma(avp->av_t1394_hdl, &di, 0,
+ &icp->ic_isoch_hdl, &result)) != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_ir_alloc_isoch_dma_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, result, result);
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_alloc_isoch_dma);
+ return (ret);
+}
+
+static void
+av1394_ir_free_isoch_dma(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+
+ AV1394_TNF_ENTER(av1394_ir_free_isoch_rsrc);
+
+ t1394_free_isoch_dma(avp->av_t1394_hdl, 0, &icp->ic_isoch_hdl);
+
+ AV1394_TNF_EXIT(av1394_ir_free_isoch_rsrc);
+}
+
+static void
+av1394_ir_dma_sync_frames(av1394_ic_t *icp, int idx, int cnt)
+{
+ av1394_ic_dma_sync_frames(icp, idx, cnt,
+ &icp->ic_ir.ir_data_pool, DDI_DMA_SYNC_FORCPU);
+}
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+/*ARGSUSED*/
+static void
+av1394_ir_ixl_frame_cb(opaque_t arg, struct ixl1394_callback *cb)
+{
+ av1394_ic_t *icp = arg;
+ av1394_isoch_t *ip = &icp->ic_avp->av_i;
+ av1394_ir_t *irp = &icp->ic_ir;
+
+ AV1394_TNF_ENTER(av1394_ir_ixl_frame_cb);
+
+ mutex_enter(&ip->i_mutex);
+ mutex_enter(&icp->ic_mutex);
+ if (irp->ir_nfull < icp->ic_nframes) {
+ irp->ir_nfull++;
+ irp->ir_nempty--;
+ cv_broadcast(&icp->ic_xfer_cv);
+
+ /*
+ * signal the overflow condition early, so we get enough
+ * time to handle it before old data is overwritten
+ */
+ if (irp->ir_nfull >= irp->ir_hiwat) {
+ av1394_ic_trigger_softintr(icp, icp->ic_num,
+ AV1394_PREQ_IR_OVERFLOW);
+ }
+ }
+ mutex_exit(&icp->ic_mutex);
+ mutex_exit(&ip->i_mutex);
+
+ AV1394_TNF_EXIT(av1394_ir_ixl_frame_cb);
+}
+
+/*
+ * received data overflow
+ */
+void
+av1394_ir_overflow(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_ir_t *irp = &icp->ic_ir;
+ int idx;
+ ixl1394_jump_t *old_jmp;
+ ixl1394_jump_t new_jmp;
+ id1394_isoch_dma_updateinfo_t update_info;
+ int err;
+ int result;
+
+ AV1394_TNF_ENTER(av1394_ir_overflow);
+
+ /*
+ * in the circular IXL chain overflow means overwriting the least
+ * recent data. to avoid that, we suspend the transfer by NULL'ing
+ * the last IXL block until the user process frees up some frames.
+ */
+ idx = irp->ir_last_empty;
+
+ old_jmp = &irp->ir_ixl_data[idx].rd_jump;
+
+ new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
+ new_jmp.label = NULL;
+ new_jmp.next_ixlp = NULL;
+
+ update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
+ update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
+ update_info.ixl_count = 1;
+
+ mutex_exit(&icp->ic_mutex);
+ err = t1394_update_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl,
+ &update_info, 0, &result);
+ mutex_enter(&icp->ic_mutex);
+
+ if (err == DDI_SUCCESS) {
+ irp->ir_overflow_idx = idx;
+ icp->ic_state = AV1394_IC_SUSPENDED;
+ } else {
+ TNF_PROBE_2(av1394_ir_overflow_error_update,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
+ tnf_int, result, result);
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_overflow);
+}
+
+/*
+ * restore from overflow condition
+ */
+static void
+av1394_ir_overflow_resume(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_ir_t *irp = &icp->ic_ir;
+ int idx, next_idx;
+ ixl1394_jump_t *old_jmp;
+ ixl1394_jump_t new_jmp;
+ id1394_isoch_dma_updateinfo_t update_info;
+ int err;
+ int result;
+
+ AV1394_TNF_ENTER(av1394_ir_overflow_resume);
+
+ /*
+ * restore the jump command we NULL'ed in av1394_ir_overflow()
+ */
+ idx = irp->ir_overflow_idx;
+ next_idx = (idx + 1) % icp->ic_nframes;
+
+ old_jmp = &irp->ir_ixl_data[idx].rd_jump;
+
+ new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
+ new_jmp.label = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx];
+ new_jmp.next_ixlp = NULL;
+
+ update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
+ update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
+ update_info.ixl_count = 1;
+
+ mutex_exit(&icp->ic_mutex);
+ err = t1394_update_isoch_dma(avp->av_t1394_hdl,
+ icp->ic_isoch_hdl, &update_info, 0, &result);
+ mutex_enter(&icp->ic_mutex);
+
+ if (err == DDI_SUCCESS) {
+ icp->ic_state = AV1394_IC_DMA;
+ } else {
+ TNF_PROBE_2(av1394_ir_overflow_resume_error_update,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
+ tnf_int, result, result);
+ }
+
+ AV1394_TNF_EXIT(av1394_ir_overflow_resume);
+}
+
+/*ARGSUSED*/
+static void
+av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t t1394_idma_hdl,
+ opaque_t idma_evt_arg, id1394_isoch_dma_stopped_t status)
+{
+ av1394_ic_t *icp = idma_evt_arg;
+
+ AV1394_TNF_ENTER(av1394_ir_dma_stopped_cb);
+
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_state = AV1394_IC_IDLE;
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_ir_dma_stopped_cb);
+}
+
+
+/*
+ *
+ * --- data transfer routines
+ *
+ * av1394_ir_add_frames()
+ * Add empty frames to the pool.
+ */
+static int
+av1394_ir_add_frames(av1394_ic_t *icp, int idx, int cnt)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+
+ /* can only add to the tail */
+ if (idx != ((irp->ir_last_empty + 1) % icp->ic_nframes)) {
+ TNF_PROBE_1(av1394_ir_add_frames_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, idx, idx);
+ return (EINVAL);
+ }
+
+ /* turn full frames into empty ones */
+ irp->ir_nfull -= cnt;
+ irp->ir_first_full = (irp->ir_first_full + cnt) % icp->ic_nframes;
+ irp->ir_nempty += cnt;
+ irp->ir_last_empty = (irp->ir_last_empty + cnt) % icp->ic_nframes;
+ ASSERT((irp->ir_nfull >= 0) && (irp->ir_nempty <= icp->ic_nframes));
+
+ /* if suspended due to overflow, check if iwe can resume */
+ if ((icp->ic_state == AV1394_IC_SUSPENDED) &&
+ (irp->ir_nempty >= irp->ir_lowat)) {
+ av1394_ir_overflow_resume(icp);
+ }
+
+ return (0);
+}
+
+static int
+av1394_ir_wait_frames(av1394_ic_t *icp, int *idx, int *cnt)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ int ret = 0;
+
+ while (irp->ir_nfull == 0) {
+ if (cv_wait_sig(&icp->ic_xfer_cv, &icp->ic_mutex) <= 0) {
+ ret = EINTR;
+ break;
+ }
+ }
+ if (irp->ir_nfull > 0) {
+ *idx = irp->ir_first_full;
+ *cnt = irp->ir_nfull;
+ av1394_ir_dma_sync_frames(icp, *idx, *cnt);
+ ret = 0;
+ }
+ return (ret);
+}
+
+/*
+ * copyout the data, adjust to data format and remove empty CIPs if possible
+ */
+static int
+av1394_ir_copyout(av1394_ic_t *icp, struct uio *uiop, int *empty_cnt)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg;
+ int idx = irp->ir_read_idx;
+ int cnt = irp->ir_read_cnt;
+ int pktsz = icp->ic_pktsz;
+ int bs; /* data block size */
+ caddr_t kaddr_begin, kaddr;
+ int pkt_off; /* offset into current packet */
+ int len;
+ int frame_resid; /* bytes left in the current frame */
+ int ret = 0;
+
+ *empty_cnt = 0;
+
+ /* DBS -> block size */
+ bs = *(uchar_t *)(seg[idx].is_kaddr + 1) * 4 + AV1394_CIPSZ;
+ if ((bs > pktsz) || (bs < AV1394_CIPSZ + 8)) {
+ bs = pktsz;
+ }
+
+ while ((cnt > 0) && (uiop->uio_resid > 0) && (ret == 0)) {
+ kaddr = kaddr_begin = seg[idx].is_kaddr + irp->ir_read_off;
+ frame_resid = icp->ic_framesz - irp->ir_read_off;
+
+ mutex_exit(&icp->ic_mutex);
+ /* copyout data blocks, skipping empty CIPs */
+ while ((uiop->uio_resid > 0) && (frame_resid > 0)) {
+ pkt_off = (uintptr_t)kaddr % pktsz;
+ /*
+ * a quadlet following CIP header can't be zero
+ * unless in an empty packet
+ */
+ if ((pkt_off == 0) &&
+ (*(uint32_t *)(kaddr + AV1394_CIPSZ) == 0)) {
+ kaddr += pktsz;
+ frame_resid -= pktsz;
+ continue;
+ }
+
+ len = bs - pkt_off;
+ if (len > uiop->uio_resid) {
+ len = uiop->uio_resid;
+ }
+ if (len > frame_resid) {
+ len = frame_resid;
+ }
+ if ((ret = uiomove(kaddr, len, UIO_READ, uiop)) != 0) {
+ break;
+ }
+
+ if (pkt_off + len == bs) {
+ kaddr += pktsz - pkt_off;
+ frame_resid -= pktsz - pkt_off;
+ } else {
+ kaddr += len;
+ frame_resid -= len;
+ }
+ }
+ mutex_enter(&icp->ic_mutex);
+
+ if (frame_resid > 0) {
+ irp->ir_read_off = kaddr - kaddr_begin;
+ } else {
+ irp->ir_read_off = 0;
+ idx = (idx + 1) % icp->ic_nframes;
+ cnt--;
+ (*empty_cnt)++;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * zero a quadlet in each packet so we can recognize empty CIPs
+ */
+static void
+av1394_ir_zero_pkts(av1394_ic_t *icp, int idx, int cnt)
+{
+ av1394_ir_t *irp = &icp->ic_ir;
+ av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg;
+ caddr_t kaddr, kaddr_end;
+ int pktsz = icp->ic_pktsz;
+ int i;
+
+ for (i = cnt; i > 0; i--) {
+ kaddr = seg[idx].is_kaddr + AV1394_CIPSZ;
+ kaddr_end = seg[idx].is_kaddr + icp->ic_framesz;
+ do {
+ *(uint32_t *)kaddr = 0;
+ kaddr += pktsz;
+ } while (kaddr < kaddr_end);
+
+ idx = (idx + 1) % icp->ic_nframes;
+ }
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_xmit.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_xmit.c
new file mode 100644
index 0000000000..153e8b5703
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_xmit.c
@@ -0,0 +1,1249 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 isochronous transmit module
+ */
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+static int av1394_it_start_common(av1394_ic_t *);
+
+/* configuration routines */
+static void av1394_it_cleanup(av1394_ic_t *, int);
+static int av1394_it_bld_ixl(av1394_ic_t *);
+static void av1394_it_destroy_ixl(av1394_ic_t *);
+static int av1394_it_ixl_bld_data(av1394_ic_t *);
+static void av1394_it_ixl_destroy_data(av1394_ic_t *);
+static av1394_it_ixl_buf_t *av1394_it_ixl_bld_buf(av1394_ic_t *, int, int,
+ off_t, int, int);
+static void av1394_it_ixl_complete_buf(av1394_it_ixl_buf_t *,
+ av1394_it_ixl_empty_cip_t *);
+static void av1394_it_ixl_complete_buf2(av1394_it_ixl_buf_t *,
+ av1394_it_ixl_buf_t *);
+static av1394_it_ixl_empty_cip_t *av1394_it_ixl_bld_empty_cip(av1394_ic_t *,
+ int);
+static void av1394_it_ixl_complete_empty_cip(av1394_it_ixl_empty_cip_t *,
+ av1394_it_ixl_buf_t *);
+static void av1394_it_ixl_bld_begin(av1394_ic_t *);
+static void av1394_it_ixl_begin_update_pkts(av1394_ic_t *,
+ av1394_it_ixl_buf_t *);
+static int av1394_it_alloc_isoch_dma(av1394_ic_t *);
+static void av1394_it_free_isoch_dma(av1394_ic_t *);
+static void av1394_it_dma_sync_frames(av1394_ic_t *, int, int);
+
+/* callbacks */
+static void av1394_it_ixl_begin_cb(opaque_t, struct ixl1394_callback *);
+static void av1394_it_ixl_buf_cb(opaque_t, struct ixl1394_callback *);
+static void av1394_it_ixl_eof_cb(av1394_it_ixl_buf_t *bp);
+static int av1394_it_underrun_resume(av1394_ic_t *);
+static void av1394_it_dma_stopped_cb(t1394_isoch_dma_handle_t,
+ opaque_t, id1394_isoch_dma_stopped_t);
+
+/* data transfer routines */
+static int av1394_it_add_frames(av1394_ic_t *, int, int);
+static int av1394_it_wait_frames(av1394_ic_t *, int *, int *, int *);
+
+static void av1394_it_update_frame_syt(av1394_ic_t *, int, int, uint16_t);
+static uint16_t av1394_it_ts_cyc2syt(uint16_t);
+static uint16_t av1394_it_ts_syt_inc(uint16_t, uint16_t);
+
+static void av1394_it_kcopyin(av1394_ic_t *, void *, size_t);
+static int av1394_it_copyin(av1394_ic_t *, struct uio *, int *, int);
+static boolean_t av1394_it_is_dv_frame_start(caddr_t);
+static void av1394_it_reset_frame_syt(av1394_ic_t *, int);
+
+/* tunables */
+int av1394_it_hiwat_sub = 2;
+int av1394_it_lowat = 3;
+int av1394_it_start_thre = 3; /* xmit start threshold */
+int av1394_it_syt_off = 3; /* SYT offset in cycles */
+int av1394_it_dump_ixl = 0;
+
+#define AV1394_TNF_ENTER(func) \
+ TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ISOCH_STACK, "");
+
+#define AV1394_TNF_EXIT(func) \
+ TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ISOCH_STACK, "");
+
+int
+av1394_it_init(av1394_ic_t *icp, int *error)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_pool_t *pool = &itp->it_data_pool;
+ int nframes;
+
+ AV1394_TNF_ENTER(av1394_it_init);
+
+ nframes = av1394_ic_alloc_pool(pool, icp->ic_framesz, icp->ic_nframes,
+ AV1394_IT_NFRAMES_MIN);
+ if (nframes == 0) {
+ *error = IEC61883_ERR_NOMEM;
+ AV1394_TNF_EXIT(av1394_it_init);
+ return (EINVAL);
+ }
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_nframes = nframes;
+ itp->it_hiwat = nframes - av1394_it_hiwat_sub;
+ itp->it_lowat = av1394_it_lowat;
+ itp->it_start_thre = av1394_it_start_thre;
+ itp->it_nempty = icp->ic_nframes;
+ itp->it_last_full = icp->ic_nframes - 1;
+
+ if (av1394_ic_dma_setup(icp, pool) != DDI_SUCCESS) {
+ mutex_exit(&icp->ic_mutex);
+ *error = IEC61883_ERR_NOMEM;
+ av1394_it_cleanup(icp, 1);
+ AV1394_TNF_EXIT(av1394_it_init);
+ return (EINVAL);
+ }
+
+ if (av1394_it_bld_ixl(icp) != DDI_SUCCESS) {
+ mutex_exit(&icp->ic_mutex);
+ *error = IEC61883_ERR_NOMEM;
+ av1394_it_cleanup(icp, 2);
+ AV1394_TNF_EXIT(av1394_it_init);
+ return (EINVAL);
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ if (av1394_it_alloc_isoch_dma(icp) != DDI_SUCCESS) {
+ *error = IEC61883_ERR_NOMEM;
+ av1394_it_cleanup(icp, 3);
+ AV1394_TNF_EXIT(av1394_it_init);
+ return (EINVAL);
+ }
+
+ AV1394_TNF_EXIT(av1394_it_init);
+ return (0);
+}
+
+void
+av1394_it_fini(av1394_ic_t *icp)
+{
+ AV1394_TNF_ENTER(av1394_it_fini);
+
+ av1394_it_cleanup(icp, AV1394_CLEANUP_LEVEL_MAX);
+
+ AV1394_TNF_ENTER(av1394_it_fini);
+}
+
+int
+av1394_it_start(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ int ret = 0;
+
+ AV1394_TNF_ENTER(av1394_it_start);
+
+ mutex_enter(&icp->ic_mutex);
+ ASSERT(icp->ic_state == AV1394_IC_IDLE);
+
+ /* should be enough full frames to be able to start */
+ if (itp->it_nfull >= itp->it_start_thre) {
+ ret = av1394_it_start_common(icp);
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_start);
+ return (ret);
+}
+
+static int
+av1394_it_start_common(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ id1394_isoch_dma_ctrlinfo_t idma_ctrlinfo = { 0 };
+ int result;
+ int err;
+ int ret = 0;
+
+ ASSERT(icp->ic_state == AV1394_IC_IDLE);
+
+ err = t1394_start_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl,
+ &idma_ctrlinfo, 0, &result);
+ if (err == DDI_SUCCESS) {
+ icp->ic_state = AV1394_IC_DMA;
+ } else {
+ TNF_PROBE_1(av1394_it_start_common_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, result, result);
+ ret = EIO;
+ }
+
+ return (ret);
+}
+
+
+int
+av1394_it_stop(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_it_t *itp = &icp->ic_it;
+
+ AV1394_TNF_ENTER(av1394_it_stop);
+
+ mutex_enter(&icp->ic_mutex);
+ if (icp->ic_state != AV1394_IC_IDLE) {
+ mutex_exit(&icp->ic_mutex);
+ t1394_stop_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 0);
+ mutex_enter(&icp->ic_mutex);
+
+ icp->ic_state = AV1394_IC_IDLE;
+ itp->it_nempty = icp->ic_nframes;
+ itp->it_first_empty = 0;
+ itp->it_last_full = icp->ic_nframes - 1;
+ itp->it_nfull = 0;
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_stop);
+ return (0);
+}
+
+int
+av1394_it_xmit(av1394_ic_t *icp, iec61883_xmit_t *xmit)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ int ret = 0;
+ int idx, cnt;
+
+ idx = xmit->tx_xfer.xf_full_idx;
+ cnt = xmit->tx_xfer.xf_full_cnt;
+
+ mutex_enter(&icp->ic_mutex);
+ /* check arguments */
+ if ((idx < 0) || (cnt < 0) || (cnt > itp->it_nempty)) {
+ mutex_exit(&icp->ic_mutex);
+ TNF_PROBE_2(av1394_it_xmit_error_args, AV1394_TNF_ISOCH_ERROR,
+ "", tnf_int, idx, idx, tnf_int, cnt, cnt);
+ return (EINVAL);
+ }
+
+ /* add full frames to the pool */
+ if (cnt > 0) {
+ if ((ret = av1394_it_add_frames(icp, idx, cnt)) != 0) {
+ mutex_exit(&icp->ic_mutex);
+ return (ret);
+ }
+ }
+
+ if ((icp->ic_state == AV1394_IC_IDLE) &&
+ (itp->it_nfull >= itp->it_start_thre)) {
+ if ((ret = av1394_it_start_common(icp)) != 0) {
+ mutex_exit(&icp->ic_mutex);
+ return (ret);
+ }
+ }
+
+ /* wait for new empty frames */
+ ret = av1394_it_wait_frames(icp, &xmit->tx_xfer.xf_empty_idx,
+ &xmit->tx_xfer.xf_empty_cnt, &xmit->tx_miss_cnt);
+ mutex_exit(&icp->ic_mutex);
+
+ return (ret);
+}
+
+int
+av1394_it_write(av1394_ic_t *icp, struct uio *uiop)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_autoxmit_t *axp = &avp->av_i.i_autoxmit;
+ int dv;
+ int ret = 0;
+ int full_cnt;
+ int miss_cnt;
+
+ AV1394_TNF_ENTER(av1394_it_write);
+
+ mutex_enter(&icp->ic_mutex);
+ dv = (axp->ax_fmt & AV1394_ISOCH_AUTOXMIT_DV);
+
+ while (uiop->uio_resid > 0) {
+ /* must have at least one empty frame */
+ if (itp->it_write_cnt == 0) {
+ ret = av1394_it_wait_frames(icp, &itp->it_write_idx,
+ &itp->it_write_cnt, &miss_cnt);
+ if (ret != 0) {
+ break;
+ }
+ }
+
+ /* copyin as much data as we can */
+ if (axp->ax_copy_ciph) {
+ ASSERT(itp->it_write_off == 0);
+ av1394_it_kcopyin(icp, axp->ax_ciph, AV1394_CIPSZ);
+ axp->ax_copy_ciph = B_FALSE;
+ }
+ if ((ret = av1394_it_copyin(icp, uiop, &full_cnt, dv)) != 0) {
+ break;
+ }
+
+ /* add full frames to the pool */
+ if (full_cnt > 0) {
+ ret = av1394_it_add_frames(icp,
+ itp->it_write_idx, full_cnt);
+ if (ret != 0) {
+ break;
+ }
+ itp->it_write_idx += full_cnt;
+ itp->it_write_idx %= icp->ic_nframes;
+ }
+
+ /* start xfer if not already */
+ if ((icp->ic_state == AV1394_IC_IDLE) &&
+ (itp->it_nfull >= itp->it_start_thre)) {
+ if ((ret = av1394_it_start_common(icp)) != 0) {
+ mutex_exit(&icp->ic_mutex);
+ AV1394_TNF_EXIT(av1394_it_write);
+ return (ret);
+ }
+ }
+ }
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_write);
+ return (ret);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static void
+av1394_it_cleanup(av1394_ic_t *icp, int level)
+{
+ av1394_isoch_pool_t *pool = &icp->ic_it.it_data_pool;
+
+ ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ av1394_it_free_isoch_dma(icp);
+ /* FALLTHRU */
+ case 3:
+ av1394_it_destroy_ixl(icp);
+ /* FALLTHRU */
+ case 2:
+ av1394_ic_dma_cleanup(icp, pool);
+ /* FALLTHRU */
+ case 1:
+ av1394_ic_free_pool(pool);
+ /* FALLTHRU */
+ }
+}
+
+/*
+ * av1394_it_bld_ixl()
+ * Build an IXL chain out of several blocks.
+ */
+static int
+av1394_it_bld_ixl(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_it_bld_ixl);
+
+ /* data block */
+ if ((ret = av1394_it_ixl_bld_data(icp)) != DDI_SUCCESS) {
+ AV1394_TNF_EXIT(av1394_it_bld_ixl);
+ return (ret);
+ }
+
+ /* begin block */
+ if (icp->ic_param.cp_ts_mode != IEC61883_TS_NONE) {
+ av1394_it_ixl_bld_begin(icp);
+
+ itp->it_ixlp = (ixl1394_command_t *)&itp->it_ixl_begin;
+ } else {
+ itp->it_ixlp = (ixl1394_command_t *)
+ &((av1394_it_ixl_buf_t *)itp->it_ixl_data)->tb_label;
+ }
+
+ if (av1394_it_dump_ixl) {
+ av1394_ic_ixl_dump(itp->it_ixlp);
+ }
+
+ AV1394_TNF_EXIT(av1394_it_bld_ixl);
+ return (ret);
+}
+
+static void
+av1394_it_destroy_ixl(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+
+ av1394_it_ixl_destroy_data(icp);
+ itp->it_ixlp = NULL;
+}
+
+/*
+ * build data transmit part of the IXL chain
+ */
+static int
+av1394_it_ixl_bld_data(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_pool_t *pool = &itp->it_data_pool;
+ int total = 0; /* # of pkts in the chain */
+ int nfull = 0; /* # of full CIPs in a series */
+ int framenum = -1; /* frame number */
+ int bufsz_max; /* max buffer size in pkts */
+ int segnum = 0; /* current segment number */
+ int segsz; /* segment size in pkts */
+ off_t segoff = 0; /* segment offset in pkts */
+ av1394_it_ixl_empty_cip_t *ep = NULL; /* last empty CIP */
+ av1394_it_ixl_buf_t *bp = NULL; /* last data buffer */
+ av1394_it_ixl_buf_t *prevbp = NULL;
+ int a, n, d; /* N/D algorithm variables */
+ int type, ptype; /* current and prev CIP type */
+ int tb_flags;
+
+ itp->it_frame_info = kmem_zalloc(icp->ic_nframes *
+ sizeof (av1394_it_frame_info_t), KM_SLEEP);
+
+ bufsz_max = AV1394_IXL_BUFSZ_MAX / icp->ic_pktsz;
+ n = icp->ic_param.cp_n;
+ d = icp->ic_param.cp_d;
+ /*
+ * following assert guarantees no more than one empty CIP in a row,
+ * i.e. empty CIPs account for <=50% of all packets.
+ * this should be ensured by ioctl argument validation.
+ */
+ ASSERT((n == 0) || (d / n > 1));
+ /*
+ * build the chain. it is hard to precalculate amount of memory
+ * needed for the entire chain, so we simply allocate as we go.
+ */
+ ptype = AV1394_CIP_EMPTY;
+ segsz = pool->ip_seg[0].is_size / icp->ic_pktsz;
+ a = n;
+ while (total < icp->ic_nframes * icp->ic_npkts) {
+ /* insert empty CIPs using N/D algorithm */
+ a += n;
+ if (a > d) {
+ a -= d;
+ type = AV1394_CIP_EMPTY;
+ } else {
+ type = AV1394_CIP_FULL;
+ nfull++;
+ }
+
+ /*
+ * merge series of full packets into single SEND_BUF commands.
+ * a series can be terminated by:
+ * - an empty CIP;
+ * - series buffer size reached maximum;
+ * - end of isoch segment;
+ * - end of frame (which is always at the end of segment);
+ */
+ if (((type == AV1394_CIP_EMPTY) || (segoff + nfull == segsz) ||
+ (nfull == bufsz_max)) && (nfull > 0)) {
+
+ /* build buffer block */
+ prevbp = bp;
+ tb_flags = 0;
+ if (type == AV1394_CIP_EMPTY) {
+ tb_flags |= AV1394_IT_IXL_BUF_NEXT_EMPTY;
+ }
+ if (total % icp->ic_npkts == 0) {
+ tb_flags |= AV1394_IT_IXL_BUF_SOF;
+ framenum++;
+ }
+ if ((total + nfull) % icp->ic_npkts == 0) {
+ tb_flags |= AV1394_IT_IXL_BUF_EOF;
+ }
+ bp = av1394_it_ixl_bld_buf(icp, nfull, segnum, segoff,
+ tb_flags, framenum);
+
+ if (itp->it_ixl_data == NULL) {
+ itp->it_ixl_data = &bp->tb_common;
+ }
+
+ /* complete previous empty CIP or a buffer */
+ if (ep) {
+ av1394_it_ixl_complete_empty_cip(ep, bp);
+ ep = NULL;
+ } else if (prevbp) {
+ av1394_it_ixl_complete_buf2(prevbp, bp);
+ }
+
+ /* if current segment is used up, pick next one */
+ segoff += nfull;
+ if (segoff == segsz) {
+ if (++segnum < pool->ip_nsegs) {
+ segsz = pool->ip_seg[segnum].is_size /
+ icp->ic_pktsz;
+ }
+ segoff = 0;
+ }
+
+ total += nfull;
+ nfull = 0;
+ }
+ /* insert an empty packet if needed */
+ if (type == AV1394_CIP_EMPTY) {
+ ep = av1394_it_ixl_bld_empty_cip(icp, framenum);
+ av1394_it_ixl_complete_buf(bp, ep);
+ }
+ ptype = type;
+ }
+ ASSERT(nfull == 0);
+
+ /* last packet must be an empty CIP, except when n == 0 */
+ if (n != 0) {
+ if (ptype != AV1394_CIP_EMPTY) {
+ ep = av1394_it_ixl_bld_empty_cip(icp, framenum);
+ av1394_it_ixl_complete_buf(bp, ep);
+ }
+ av1394_it_ixl_complete_empty_cip(ep,
+ (av1394_it_ixl_buf_t *)itp->it_ixl_data);
+ ep->te_jump.next_ixlp = NULL;
+ ep->te_common.tc_next = NULL;
+ } else {
+ bp->tb_jump.label = (ixl1394_command_t *)
+ &(((av1394_it_ixl_buf_t *)itp->it_ixl_data)->tb_label);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+av1394_it_ixl_destroy_data(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_it_ixl_common_t *cmd, *cmd_next;
+
+ for (cmd = itp->it_ixl_data; cmd != NULL; cmd = cmd_next) {
+ cmd_next = cmd->tc_next;
+ kmem_free(cmd, cmd->tc_size);
+ }
+ itp->it_ixl_data = NULL;
+
+ kmem_free(itp->it_frame_info,
+ icp->ic_nframes * sizeof (av1394_it_frame_info_t));
+}
+
+static av1394_it_ixl_buf_t *
+av1394_it_ixl_bld_buf(av1394_ic_t *icp, int cnt, int segnum, off_t off,
+ int flags, int framenum)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_seg_t *isp = &itp->it_data_pool.ip_seg[segnum];
+ av1394_it_ixl_buf_t *bp;
+ int pktsz = icp->ic_pktsz;
+
+ bp = kmem_zalloc(sizeof (av1394_it_ixl_buf_t), KM_SLEEP);
+ bp->tb_common.tc_size = sizeof (av1394_it_ixl_buf_t);
+ /* tc_next later */
+ bp->tb_flags = flags;
+ bp->tb_framenum = framenum;
+ bp->tb_icp = icp;
+
+ bp->tb_label.ixl_opcode = IXL1394_OP_LABEL;
+ bp->tb_label.next_ixlp = (ixl1394_command_t *)&bp->tb_buf;
+
+ bp->tb_buf.ixl_opcode = IXL1394_OP_SEND_BUF;
+ bp->tb_buf.pkt_size = pktsz;
+ bp->tb_buf.size = cnt * pktsz;
+ bp->tb_buf.ixl_buf._dmac_ll =
+ isp->is_dma_cookie.dmac_laddress + off * pktsz;
+ bp->tb_buf.mem_bufp = isp->is_kaddr + off * pktsz;
+
+ if (flags & AV1394_IT_IXL_BUF_EOF) {
+ bp->tb_buf.next_ixlp = (ixl1394_command_t *)&bp->tb_store_ts;
+
+ bp->tb_store_ts.ixl_opcode = IXL1394_OP_STORE_TIMESTAMP;
+ bp->tb_store_ts.next_ixlp = (ixl1394_command_t *)&bp->tb_cb;
+
+ bp->tb_cb.ixl_opcode = IXL1394_OP_CALLBACK;
+ bp->tb_cb.callback = av1394_it_ixl_buf_cb;
+ bp->tb_cb.callback_arg = bp;
+ bp->tb_cb.next_ixlp = (ixl1394_command_t *)&bp->tb_jump;
+
+ bp->tb_jump.ixl_opcode = IXL1394_OP_JUMP_U;
+ } else {
+ bp->tb_buf.next_ixlp = (ixl1394_command_t *)&bp->tb_jump;
+
+ bp->tb_jump.ixl_opcode = IXL1394_OP_JUMP;
+ }
+ /*
+ * jump label and next_ixlp later.
+ * unset fields will be set in av1394_it_ixl_complete_buf()
+ *
+ * save additional frame info
+ */
+ if (flags & AV1394_IT_IXL_BUF_SOF) {
+ itp->it_frame_info[framenum].fi_first_buf = bp;
+ itp->it_frame_info[framenum].fi_ts_off = bp->tb_buf.mem_bufp +
+ AV1394_TS_MODE_GET_OFF(icp->ic_param.cp_ts_mode);
+ } else if (flags & AV1394_IT_IXL_BUF_EOF) {
+ itp->it_frame_info[framenum].fi_last_buf = bp;
+ }
+ itp->it_frame_info[framenum].fi_ncycs += cnt;
+
+ return (bp);
+}
+
+static void
+av1394_it_ixl_complete_buf(av1394_it_ixl_buf_t *bp,
+ av1394_it_ixl_empty_cip_t *ep)
+{
+ bp->tb_common.tc_next = &ep->te_common;
+ bp->tb_jump.label = bp->tb_jump.next_ixlp =
+ (ixl1394_command_t *)&ep->te_label;
+}
+
+static void
+av1394_it_ixl_complete_buf2(av1394_it_ixl_buf_t *bp,
+ av1394_it_ixl_buf_t *nextbp)
+{
+ bp->tb_common.tc_next = &nextbp->tb_common;
+ bp->tb_jump.label = bp->tb_jump.next_ixlp =
+ (ixl1394_command_t *)&nextbp->tb_label;
+}
+
+static av1394_it_ixl_empty_cip_t *
+av1394_it_ixl_bld_empty_cip(av1394_ic_t *icp, int framenum)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_it_ixl_empty_cip_t *ep;
+
+ ep = kmem_zalloc(sizeof (av1394_it_ixl_empty_cip_t), KM_SLEEP);
+ ep->te_common.tc_size = sizeof (av1394_it_ixl_empty_cip_t);
+ /* tc_next later */
+
+ ep->te_label.ixl_opcode = IXL1394_OP_LABEL;
+ ep->te_label.next_ixlp = (ixl1394_command_t *)&ep->te_pkt;
+
+ ep->te_pkt.ixl_opcode = IXL1394_OP_SEND_PKT_ST;
+ ep->te_pkt.size = AV1394_CIPSZ;
+ /* ixl_buf and mem_bufp later */
+ ep->te_pkt.next_ixlp = (ixl1394_command_t *)&ep->te_jump;
+
+ ep->te_jump.ixl_opcode = IXL1394_OP_JUMP;
+ /*
+ * label and next_ixlp later.
+ * unset fields will be set in av1394_it_ixl_complete_empty_cip()
+ */
+
+ itp->it_frame_info[framenum].fi_ncycs++;
+
+ return (ep);
+}
+
+/*
+ * empty CIP packet contains CIP header of the next packet,
+ * so we just point to the same address as the next packet's header
+ */
+static void
+av1394_it_ixl_complete_empty_cip(av1394_it_ixl_empty_cip_t *ep,
+ av1394_it_ixl_buf_t *bp)
+{
+ ep->te_common.tc_next = &bp->tb_common;
+
+ ep->te_pkt.ixl_buf._dmac_ll = bp->tb_buf.ixl_buf._dmac_ll;
+ ep->te_pkt.mem_bufp = bp->tb_buf.mem_bufp;
+
+ ep->te_jump.label = ep->te_jump.next_ixlp =
+ (ixl1394_command_t *)&bp->tb_label;
+}
+
+static void
+av1394_it_ixl_bld_begin(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_it_ixl_buf_t *bp = (av1394_it_ixl_buf_t *)itp->it_ixl_data;
+ av1394_it_ixl_begin_t *bep = &itp->it_ixl_begin;
+ int i;
+
+ bep->be_label.ixl_opcode = IXL1394_OP_LABEL;
+ bep->be_label.next_ixlp = (ixl1394_command_t *)&bep->be_empty_pre;
+
+ bep->be_empty_pre.ixl_opcode = IXL1394_OP_SEND_PKT_ST;
+ bep->be_empty_pre.size = AV1394_CIPSZ;
+ bep->be_empty_pre.ixl_buf._dmac_ll = bp->tb_buf.ixl_buf._dmac_ll;
+ bep->be_empty_pre.mem_bufp = bp->tb_buf.mem_bufp;
+ bep->be_empty_pre.next_ixlp = (ixl1394_command_t *)&bep->be_store_ts;
+
+ bep->be_store_ts.ixl_opcode = IXL1394_OP_STORE_TIMESTAMP;
+ bep->be_store_ts.next_ixlp = (ixl1394_command_t *)&bep->be_cb;
+
+ bep->be_cb.ixl_opcode = IXL1394_OP_CALLBACK;
+ bep->be_cb.callback = av1394_it_ixl_begin_cb;
+ bep->be_cb.callback_arg = &bep->be_store_ts.timestamp;
+ bep->be_cb.next_ixlp = (ixl1394_command_t *)&bep->be_empty_post[0];
+
+ for (i = 0; i < AV1394_IT_IXL_BEGIN_NPOST; i++) {
+ bep->be_empty_post[i].ixl_opcode = IXL1394_OP_SEND_PKT_ST;
+ bep->be_empty_post[i].size = AV1394_CIPSZ;
+ bep->be_empty_post[i].ixl_buf._dmac_ll =
+ bp->tb_buf.ixl_buf._dmac_ll;
+ bep->be_empty_post[i].mem_bufp = bp->tb_buf.mem_bufp;
+ bep->be_empty_post[i].next_ixlp =
+ (ixl1394_command_t *)&bep->be_empty_post[i + 1];
+ }
+ bep->be_empty_post[AV1394_IT_IXL_BEGIN_NPOST - 1].next_ixlp =
+ (ixl1394_command_t *)&bep->be_jump;
+
+ bep->be_jump.ixl_opcode = IXL1394_OP_JUMP_U;
+ bep->be_jump.label = (ixl1394_command_t *)&bp->tb_label;
+ bep->be_jump.next_ixlp = (ixl1394_command_t *)&bp->tb_label;
+}
+
+static void
+av1394_it_ixl_begin_update_pkts(av1394_ic_t *icp, av1394_it_ixl_buf_t *bp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_it_ixl_begin_t *bep = &itp->it_ixl_begin;
+ int i;
+
+ for (i = 0; i < AV1394_IT_IXL_BEGIN_NPOST; i++) {
+ bep->be_empty_post[i].ixl_buf._dmac_ll =
+ bp->tb_buf.ixl_buf._dmac_ll;
+ bep->be_empty_post[i].mem_bufp = bp->tb_buf.mem_bufp;
+ }
+}
+
+static int
+av1394_it_alloc_isoch_dma(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_it_t *itp = &icp->ic_it;
+ id1394_isoch_dmainfo_t di;
+ int result;
+ int ret;
+
+ AV1394_TNF_ENTER(av1394_it_alloc_isoch_dma);
+
+ di.ixlp = itp->it_ixlp;
+ di.channel_num = icp->ic_num;
+ di.idma_options = ID1394_TALK;
+ di.it_speed = icp->ic_param.cp_bus_speed;
+ /*
+ * XXX this should really be IXL1394_SKIP_TO_NEXT,
+ * but it can't be used yet due to the Framework bug
+ */
+ di.it_default_skip = IXL1394_SKIP_TO_SELF;
+ di.default_tag = 1;
+ di.default_sync = 0;
+ di.global_callback_arg = icp;
+ di.isoch_dma_stopped = av1394_it_dma_stopped_cb;
+ di.idma_evt_arg = icp;
+
+ if ((ret = t1394_alloc_isoch_dma(avp->av_t1394_hdl, &di, 0,
+ &icp->ic_isoch_hdl, &result)) != DDI_SUCCESS) {
+ TNF_PROBE_1(av1394_it_alloc_isoch_dma_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, result, result);
+ }
+
+ AV1394_TNF_EXIT(av1394_it_alloc_isoch_dma);
+ return (ret);
+}
+
+static void
+av1394_it_free_isoch_dma(av1394_ic_t *icp)
+{
+ av1394_inst_t *avp = icp->ic_avp;
+
+ AV1394_TNF_ENTER(av1394_it_free_isoch_rsrc);
+
+ t1394_free_isoch_dma(avp->av_t1394_hdl, 0, &icp->ic_isoch_hdl);
+
+ AV1394_TNF_EXIT(av1394_it_free_isoch_rsrc);
+}
+
+static void
+av1394_it_dma_sync_frames(av1394_ic_t *icp, int idx, int cnt)
+{
+ av1394_ic_dma_sync_frames(icp, idx, cnt,
+ &icp->ic_it.it_data_pool, DDI_DMA_SYNC_FORDEV);
+}
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+static void
+av1394_it_ixl_begin_cb(opaque_t arg, struct ixl1394_callback *cb)
+{
+ av1394_ic_t *icp = arg;
+ av1394_it_t *itp = &icp->ic_it;
+ uint16_t *cycp = cb->callback_arg; /* cycle timestamp pointer */
+ uint16_t syt;
+ int first;
+
+ AV1394_TNF_ENTER(av1394_it_ixl_begin_cb);
+
+ mutex_enter(&icp->ic_mutex);
+ /* save initial timestamp value */
+ itp->it_ts_init.ts_syt = av1394_it_ts_cyc2syt(*cycp);
+
+ /*
+ * update frame timestamps if needed
+ */
+ if ((itp->it_nfull <= 0) ||
+ (AV1394_TS_MODE_GET_SIZE(icp->ic_param.cp_ts_mode) == 0)) {
+ mutex_exit(&icp->ic_mutex);
+ AV1394_TNF_EXIT(av1394_it_ixl_begin_cb);
+ return;
+ }
+ ASSERT(itp->it_nfull <= icp->ic_nframes);
+
+ syt = av1394_it_ts_syt_inc(itp->it_ts_init.ts_syt,
+ AV1394_IT_IXL_BEGIN_NPOST + av1394_it_syt_off);
+ first = (itp->it_last_full + icp->ic_nframes - itp->it_nfull + 1) %
+ icp->ic_nframes;
+ av1394_it_update_frame_syt(icp, first, itp->it_nfull, syt);
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_ixl_begin_cb);
+}
+
+/*ARGSUSED*/
+static void
+av1394_it_ixl_buf_cb(opaque_t arg, struct ixl1394_callback *cb)
+{
+ av1394_it_ixl_buf_t *bp = cb->callback_arg;
+
+ AV1394_TNF_ENTER(av1394_it_ixl_buf_cb);
+ if (bp->tb_flags & AV1394_IT_IXL_BUF_EOF) {
+ av1394_it_ixl_eof_cb(bp);
+ }
+ AV1394_TNF_EXIT(av1394_it_ixl_buf_cb);
+}
+
+static void
+av1394_it_ixl_eof_cb(av1394_it_ixl_buf_t *bp)
+{
+ av1394_ic_t *icp = bp->tb_icp;
+ av1394_isoch_t *ip = &icp->ic_avp->av_i;
+ av1394_it_t *itp = &icp->ic_it;
+
+ AV1394_TNF_ENTER(av1394_it_ixl_eof_cb);
+
+ mutex_enter(&ip->i_mutex);
+ mutex_enter(&icp->ic_mutex);
+ if (itp->it_nempty < icp->ic_nframes) {
+ itp->it_nempty++;
+ itp->it_nfull--;
+ cv_signal(&icp->ic_xfer_cv);
+ }
+
+ if ((itp->it_nempty >= itp->it_hiwat) &&
+ (icp->ic_state == AV1394_IC_DMA)) {
+ av1394_ic_trigger_softintr(icp, icp->ic_num,
+ AV1394_PREQ_IT_UNDERRUN);
+ }
+ mutex_exit(&icp->ic_mutex);
+ mutex_exit(&ip->i_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_ixl_eof_cb);
+}
+
+void
+av1394_it_underrun(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_inst_t *avp = icp->ic_avp;
+ int idx;
+ ixl1394_jump_t *old_jmp;
+ ixl1394_jump_t new_jmp;
+ id1394_isoch_dma_updateinfo_t update_info;
+ int err;
+ int result;
+
+ AV1394_TNF_ENTER(av1394_it_underrun);
+
+ /*
+ * update the last full frame's jump to NULL
+ */
+ idx = (itp->it_first_empty + icp->ic_nframes - 1) % icp->ic_nframes;
+
+ old_jmp = &itp->it_frame_info[idx].fi_last_buf->tb_jump;
+ itp->it_saved_label = old_jmp->label;
+
+ new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
+ new_jmp.label = NULL;
+ new_jmp.next_ixlp = NULL;
+
+ update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
+ update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
+ update_info.ixl_count = 1;
+
+ mutex_exit(&icp->ic_mutex);
+ err = t1394_update_isoch_dma(avp->av_t1394_hdl,
+ icp->ic_isoch_hdl, &update_info, 0, &result);
+ mutex_enter(&icp->ic_mutex);
+
+ if (err == DDI_SUCCESS) {
+ itp->it_underrun_idx = idx;
+ icp->ic_state = AV1394_IC_SUSPENDED;
+ cv_signal(&icp->ic_xfer_cv);
+ } else {
+ TNF_PROBE_2(av1394_it_underrun_error_update,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
+ tnf_int, result, result);
+ }
+
+ AV1394_TNF_EXIT(av1394_it_underrun);
+}
+
+/*
+ * resume from the underrun condition
+ */
+static int
+av1394_it_underrun_resume(av1394_ic_t *icp)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_inst_t *avp = icp->ic_avp;
+ av1394_it_ixl_buf_t *bp;
+ int idx;
+ ixl1394_jump_t *old_jmp;
+ ixl1394_jump_t new_jmp;
+ id1394_isoch_dma_updateinfo_t update_info;
+ int err;
+ int result;
+
+ AV1394_TNF_ENTER(av1394_it_underrun_resume);
+
+ /*
+ * resuming the transfer it a lot like starting the transfer:
+ * first the IXL begin block needs to be executed, then the rest
+ * of the IXL chain. The following dynamic updates are needed:
+ *
+ * 1. update the begin block to jump to the first empty frame;
+ * 2. restore the original jump label which we previously
+ * changed to jump to the underrun block;
+ *
+ * update #1
+ * start by updating the begin block with a new buffer address
+ */
+ idx = (itp->it_underrun_idx + 1) % icp->ic_nframes;
+ bp = itp->it_frame_info[idx].fi_first_buf;
+ av1394_it_ixl_begin_update_pkts(icp, bp);
+
+ old_jmp = &itp->it_ixl_begin.be_jump;
+
+ new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
+ new_jmp.label = (ixl1394_command_t *)&bp->tb_label;
+ new_jmp.next_ixlp = NULL;
+
+ update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
+ update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
+ update_info.ixl_count = 1;
+
+ mutex_exit(&icp->ic_mutex);
+ err = t1394_update_isoch_dma(avp->av_t1394_hdl,
+ icp->ic_isoch_hdl, &update_info, 0, &result);
+ mutex_enter(&icp->ic_mutex);
+
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_2(av1394_it_underrun_resume_error_update1,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
+ tnf_int, result, result);
+ AV1394_TNF_EXIT(av1394_it_underrun_resume);
+ return (EIO);
+ }
+
+ /*
+ * update #2
+ */
+ bp = itp->it_frame_info[itp->it_underrun_idx].fi_last_buf;
+ old_jmp = &bp->tb_jump;
+
+ new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
+ new_jmp.label = itp->it_saved_label;
+ new_jmp.next_ixlp = NULL;
+
+ update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
+ update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
+ update_info.ixl_count = 1;
+
+ mutex_exit(&icp->ic_mutex);
+ err = t1394_update_isoch_dma(avp->av_t1394_hdl,
+ icp->ic_isoch_hdl, &update_info, 0, &result);
+ mutex_enter(&icp->ic_mutex);
+
+ if (err != DDI_SUCCESS) {
+ TNF_PROBE_2(av1394_it_underrun_resume_error_update2,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
+ tnf_int, result, result);
+ AV1394_TNF_EXIT(av1394_it_underrun_resume);
+ return (EIO);
+ }
+
+ icp->ic_state = AV1394_IC_DMA;
+
+ AV1394_TNF_EXIT(av1394_it_underrun_resume);
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+av1394_it_dma_stopped_cb(t1394_isoch_dma_handle_t t1394_idma_hdl,
+ opaque_t idma_evt_arg, id1394_isoch_dma_stopped_t status)
+{
+ av1394_ic_t *icp = idma_evt_arg;
+
+ AV1394_TNF_ENTER(av1394_it_dma_stopped_cb);
+
+ mutex_enter(&icp->ic_mutex);
+ icp->ic_state = AV1394_IC_IDLE;
+ mutex_exit(&icp->ic_mutex);
+
+ AV1394_TNF_EXIT(av1394_it_dma_stopped_cb);
+}
+
+
+/*
+ *
+ * --- data transfer routines
+ *
+ * av1394_it_add_frames()
+ * Add full frames to the pool.
+ */
+static int
+av1394_it_add_frames(av1394_ic_t *icp, int idx, int cnt)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_it_frame_info_t *fip;
+ int prev_full = itp->it_last_full;
+ uint16_t syt;
+ int ret = 0;
+
+ /* can only add to tail */
+ if (idx != ((itp->it_last_full + 1) % icp->ic_nframes)) {
+ TNF_PROBE_1(av1394_it_add_frames_error,
+ AV1394_TNF_ISOCH_ERROR, "", tnf_int, idx, idx);
+ return (EINVAL);
+ }
+
+ /* turn empty frames into full ones */
+ itp->it_nempty -= cnt;
+ itp->it_first_empty = (itp->it_first_empty + cnt) % icp->ic_nframes;
+ itp->it_nfull += cnt;
+ itp->it_last_full = (itp->it_last_full + cnt) % icp->ic_nframes;
+ ASSERT((itp->it_nempty >= 0) && (itp->it_nfull <= icp->ic_nframes));
+
+ /*
+ * update frame timestamps if needed
+ */
+ if (AV1394_TS_MODE_GET_SIZE(icp->ic_param.cp_ts_mode) > 0) {
+ ASSERT(prev_full >= 0);
+ fip = &itp->it_frame_info[prev_full];
+ syt = *(uint16_t *)fip->fi_ts_off;
+ syt = av1394_it_ts_syt_inc(syt, fip->fi_ncycs);
+ av1394_it_update_frame_syt(icp, idx, cnt, syt);
+ }
+
+ av1394_it_dma_sync_frames(icp, idx, cnt);
+
+ /* if suspended due to overrun, check if we can resume */
+ if ((icp->ic_state == AV1394_IC_SUSPENDED) &&
+ (itp->it_nempty >= itp->it_lowat)) {
+ ret = av1394_it_underrun_resume(icp);
+ }
+
+ return (ret);
+}
+
+/*
+ * wait for empty frames
+ */
+static int
+av1394_it_wait_frames(av1394_ic_t *icp, int *idx, int *cnt, int *nlost)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ int ret = 0;
+
+ while ((itp->it_nempty == 0) && (icp->ic_state == AV1394_IC_DMA)) {
+ if (cv_wait_sig(&icp->ic_xfer_cv, &icp->ic_mutex) <= 0) {
+ ret = EINTR;
+ break;
+ }
+ }
+
+ if (itp->it_nempty > 0) {
+ *idx = itp->it_first_empty;
+ *cnt = itp->it_nempty;
+ *nlost = 0;
+ ret = 0;
+ }
+ return (ret);
+}
+
+/*
+ * update frame timestamps for a range of frames
+ */
+static void
+av1394_it_update_frame_syt(av1394_ic_t *icp, int first, int cnt, uint16_t syt)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ int i;
+ int j = first; /* frame number */
+
+ for (i = cnt; i > 0; i--) {
+ *(uint16_t *)itp->it_frame_info[j].fi_ts_off = syt;
+ syt = av1394_it_ts_syt_inc(syt, itp->it_frame_info[j].fi_ncycs);
+ j = (j + 1) % icp->ic_nframes;
+ }
+}
+
+/*
+ * convert cycle timestamp into SYT timestamp:
+ *
+ * Cycle timer: cycleSeconds cycleCount cycleOffset
+ * 31_30_29_28_27_26_25 24___15_14_13_12 11________0
+ * Cycle timestamp: |------------------------|
+ * SYT timestamp: |----------------------|
+ */
+static uint16_t
+av1394_it_ts_cyc2syt(uint16_t cyc)
+{
+ return (((cyc & 0xF) << 12) + 0x800);
+}
+
+/*
+ * increment SYT by a number of cycles
+ */
+static uint16_t
+av1394_it_ts_syt_inc(uint16_t syt, uint16_t ncycs)
+{
+ return (syt + (ncycs << 12));
+}
+
+/*
+ * copyin from the kernel buffer
+ */
+static void
+av1394_it_kcopyin(av1394_ic_t *icp, void *buf, size_t len)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_seg_t *seg = itp->it_data_pool.ip_seg;
+
+ ASSERT(itp->it_write_off + len < icp->ic_framesz);
+
+ bcopy(buf, seg[itp->it_write_idx].is_kaddr + itp->it_write_off, len);
+ itp->it_write_off += len;
+}
+
+/*
+ * copyin from the user buffer
+ */
+static int
+av1394_it_copyin(av1394_ic_t *icp, struct uio *uiop, int *full_cnt, int dv)
+{
+ av1394_it_t *itp = &icp->ic_it;
+ av1394_isoch_seg_t *seg = itp->it_data_pool.ip_seg;
+ int idx = itp->it_write_idx;
+ int framesz = icp->ic_framesz;
+ size_t len, frame_resid, start_resid;
+ caddr_t kaddr, kaddr_end;
+ int ret = 0;
+
+ *full_cnt = 0;
+
+ while ((uiop->uio_resid > 0) && (itp->it_write_cnt > 0)) {
+ kaddr = seg[idx].is_kaddr + itp->it_write_off;
+ frame_resid = framesz - itp->it_write_off;
+ len = min(uiop->uio_resid, frame_resid);
+
+ mutex_exit(&icp->ic_mutex);
+ ret = uiomove(kaddr, len, UIO_WRITE, uiop);
+ mutex_enter(&icp->ic_mutex);
+ if (ret != 0) {
+ break;
+ }
+
+ itp->it_write_off += len;
+ if ((itp->it_write_off == framesz) && dv) {
+ /*
+ * for DV formats, make sure we got a frame start.
+ * this is to ensure correct timestamping
+ */
+ kaddr = seg[idx].is_kaddr;
+ kaddr_end = kaddr + framesz;
+ while (!av1394_it_is_dv_frame_start(kaddr)) {
+ kaddr += icp->ic_pktsz;
+ if (kaddr == kaddr_end) {
+ break;
+ }
+ }
+ start_resid = kaddr_end - kaddr;
+ if (start_resid != framesz) {
+ bcopy(kaddr, seg[idx].is_kaddr, start_resid);
+ itp->it_write_off = start_resid;
+ }
+ }
+ if (itp->it_write_off == framesz) {
+ /* for DV formats, reset frame's SYT fields */
+ if (dv) {
+ av1394_it_reset_frame_syt(icp, idx);
+ }
+ itp->it_write_off = 0;
+ itp->it_write_cnt--;
+ idx = (idx + 1) % icp->ic_nframes;
+ (*full_cnt)++;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * check if a packet starts a new DV frame
+ */
+static boolean_t
+av1394_it_is_dv_frame_start(caddr_t kaddr)
+{
+ uint8_t *p = (uint8_t *)kaddr + 8;
+ /*
+ * in the DIF block ID data, which immediately follows CIP header,
+ * SCT, Dseq and DBN fields should be zero (Ref: IEC 61834-2, Fig. 66)
+ */
+ return (((p[0] & 0xE0) == 0) && ((p[1] & 0xF0) == 0) && (p[2] == 0));
+}
+
+/*
+ * reset all frame's SYT fields
+ */
+static void
+av1394_it_reset_frame_syt(av1394_ic_t *icp, int idx)
+{
+ caddr_t kaddr = icp->ic_it.it_data_pool.ip_seg[idx].is_kaddr;
+ caddr_t kaddr_end = kaddr + icp->ic_framesz;
+
+ kaddr += 6;
+ while (kaddr < kaddr_end) {
+ *(uint16_t *)kaddr = 0xFFFF;
+ kaddr += icp->ic_pktsz;
+ }
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_list.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_list.c
new file mode 100644
index 0000000000..8e0552c50e
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_list.c
@@ -0,0 +1,129 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Yet another list implementation
+ * This is a multipurpose double-linked list. It requires that the first
+ * two structure members of each item are the 'next' and 'prev' pointers.
+ * This works for mblk's and other data types utilized by av1394.
+ *
+ * Locking is provided by the caller.
+ */
+
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+#define ITEM(i) ((av1394_list_item_t *)(i))
+
+/*
+ * av1394_list_init()
+ * Initializes the list
+ */
+void
+av1394_list_init(av1394_list_t *lp)
+{
+ lp->l_head = lp->l_tail = NULL;
+ lp->l_cnt = 0;
+}
+
+/*
+ * av1394_list_head()
+ * Returns pointer to the first item in the list (but does not remove it)
+ */
+void *
+av1394_list_head(av1394_list_t *lp)
+{
+ return (lp->l_head);
+}
+
+
+/*
+ * av1394_list_put_tail()
+ * Adds item to the end of the list
+ */
+void
+av1394_list_put_tail(av1394_list_t *lp, void *item)
+{
+ ITEM(item)->i_next = NULL;
+ ITEM(item)->i_prev = lp->l_tail;
+ if (lp->l_tail == NULL) {
+ ASSERT(lp->l_head == 0);
+ ASSERT(lp->l_cnt == 0);
+ lp->l_head = lp->l_tail = item;
+ } else {
+ lp->l_tail->i_next = item;
+ lp->l_tail = item;
+ }
+ lp->l_cnt++;
+}
+
+/*
+ * av1394_list_put_head()
+ * Inserts item in the front of the list
+ */
+void
+av1394_list_put_head(av1394_list_t *lp, void *item)
+{
+ ITEM(item)->i_next = lp->l_head;
+ ITEM(item)->i_prev = NULL;
+ if (lp->l_head == NULL) {
+ ASSERT(lp->l_tail == 0);
+ ASSERT(lp->l_cnt == 0);
+ lp->l_head = lp->l_tail = item;
+ } else {
+ lp->l_head->i_prev = item;
+ lp->l_head = item;
+ }
+ lp->l_cnt++;
+}
+
+/*
+ * av1394_list_get_head()
+ * Removes and returns an item from the front of the list
+ */
+void *
+av1394_list_get_head(av1394_list_t *lp)
+{
+ av1394_list_item_t *item;
+
+ item = lp->l_head;
+ if (item != NULL) {
+ lp->l_head = item->i_next;
+ if (item == lp->l_tail) {
+ ASSERT(lp->l_cnt == 1);
+ ASSERT(lp->l_head == NULL);
+ lp->l_tail = NULL;
+ lp->l_cnt = 0;
+ } else {
+ ASSERT(lp->l_cnt > 1);
+ item->i_next->i_prev = item->i_prev;
+ lp->l_cnt--;
+ }
+ item->i_next = item->i_prev = NULL;
+ }
+ return (item);
+}
diff --git a/usr/src/uts/common/io/1394/targets/av1394/av1394_queue.c b/usr/src/uts/common/io/1394/targets/av1394/av1394_queue.c
new file mode 100644
index 0000000000..02982886bf
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/av1394/av1394_queue.c
@@ -0,0 +1,188 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2002 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * av1394 queue
+ * Based on av1394 list, plus locking, works only with mblk's,
+ * counts and limits amount of data on the queue.
+ */
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/1394/targets/av1394/av1394_impl.h>
+
+typedef void (*putfunc_t)(av1394_list_t *, void *);
+
+static mblk_t *av1394_getq_locked(av1394_queue_t *);
+static int av1394_put_common(av1394_queue_t *, mblk_t *, putfunc_t);
+
+void
+av1394_initq(av1394_queue_t *q, ddi_iblock_cookie_t ibc, int max)
+{
+ bzero(q, sizeof (av1394_queue_t));
+
+ mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, ibc);
+ cv_init(&q->q_cv, NULL, CV_DRIVER, NULL);
+
+ AV1394_ENTERQ(q);
+ av1394_list_init(&q->q_list);
+ q->q_max = max;
+ AV1394_LEAVEQ(q);
+}
+
+void
+av1394_destroyq(av1394_queue_t *q)
+{
+ av1394_flushq(q);
+ mutex_destroy(&q->q_mutex);
+ cv_destroy(&q->q_cv);
+}
+
+void
+av1394_setmaxq(av1394_queue_t *q, int max)
+{
+ AV1394_ENTERQ(q);
+ q->q_max = max;
+ AV1394_LEAVEQ(q);
+}
+
+int
+av1394_getmaxq(av1394_queue_t *q)
+{
+ int max;
+
+ AV1394_ENTERQ(q);
+ max = q->q_max;
+ AV1394_LEAVEQ(q);
+ return (max);
+}
+
+void
+av1394_flushq(av1394_queue_t *q)
+{
+ mblk_t *bp;
+
+ AV1394_ENTERQ(q);
+ while ((bp = av1394_getq_locked(q)) != NULL) {
+ freemsg(bp);
+ }
+ ASSERT(q->q_size == 0);
+ AV1394_LEAVEQ(q);
+}
+
+int
+av1394_putq(av1394_queue_t *q, mblk_t *bp)
+{
+ return (av1394_put_common(q, bp, av1394_list_put_tail));
+}
+
+int
+av1394_putbq(av1394_queue_t *q, mblk_t *bp)
+{
+ return (av1394_put_common(q, bp, av1394_list_put_head));
+}
+
+mblk_t *
+av1394_getq(av1394_queue_t *q)
+{
+ mblk_t *bp;
+
+ AV1394_ENTERQ(q);
+ bp = av1394_getq_locked(q);
+ AV1394_LEAVEQ(q);
+
+ return (bp);
+}
+
+mblk_t *
+av1394_peekq(av1394_queue_t *q)
+{
+ mblk_t *mp;
+
+ AV1394_ENTERQ(q);
+ mp = av1394_peekq_locked(q);
+ AV1394_LEAVEQ(q);
+ return (mp);
+}
+
+mblk_t *
+av1394_peekq_locked(av1394_queue_t *q)
+{
+ ASSERT(mutex_owned(&q->q_mutex));
+ return (av1394_list_head(&q->q_list));
+}
+
+/*
+ * wait until queue is not empty or a signal arrives
+ */
+int
+av1394_qwait_sig(av1394_queue_t *q)
+{
+ int ret = 1;
+
+ AV1394_ENTERQ(q);
+ while (av1394_peekq_locked(q) == NULL) {
+ if ((ret = cv_wait_sig(&q->q_cv, &q->q_mutex)) <= 0) {
+ break;
+ }
+ }
+ AV1394_LEAVEQ(q);
+
+ return (ret);
+}
+
+static int
+av1394_put_common(av1394_queue_t *q, mblk_t *bp, putfunc_t put)
+{
+ int ret;
+ int len = MBLKL(bp);
+
+ AV1394_ENTERQ(q);
+ if (q->q_size + len > q->q_max) {
+ ret = 0;
+ } else {
+ put(&q->q_list, bp);
+ q->q_size += len;
+ cv_broadcast(&q->q_cv);
+ ret = 1;
+ }
+ AV1394_LEAVEQ(q);
+
+ return (ret);
+}
+
+static mblk_t *
+av1394_getq_locked(av1394_queue_t *q)
+{
+ mblk_t *bp;
+
+ if ((bp = av1394_list_get_head(&q->q_list)) != NULL) {
+ q->q_size -= MBLKL(bp);
+ ASSERT(q->q_size >= 0);
+ }
+ return (bp);
+}
diff --git a/usr/src/uts/common/io/1394/targets/scsa1394/hba.c b/usr/src/uts/common/io/1394/targets/scsa1394/hba.c
new file mode 100644
index 0000000000..4d0b7f17b7
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/scsa1394/hba.c
@@ -0,0 +1,2498 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * 1394 mass storage HBA driver
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/cred.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/byteorder.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/targets/scsa1394/impl.h>
+#include <sys/1394/targets/scsa1394/cmd.h>
+
+/* DDI/DKI entry points */
+static int scsa1394_attach(dev_info_t *, ddi_attach_cmd_t);
+static int scsa1394_detach(dev_info_t *, ddi_detach_cmd_t);
+static int scsa1394_power(dev_info_t *, int, int);
+
+/* configuration routines */
+static void scsa1394_cleanup(scsa1394_state_t *, int);
+static int scsa1394_attach_1394(scsa1394_state_t *);
+static void scsa1394_detach_1394(scsa1394_state_t *);
+static int scsa1394_attach_threads(scsa1394_state_t *);
+static void scsa1394_detach_threads(scsa1394_state_t *);
+static int scsa1394_attach_scsa(scsa1394_state_t *);
+static void scsa1394_detach_scsa(scsa1394_state_t *);
+static int scsa1394_create_cmd_cache(scsa1394_state_t *);
+static void scsa1394_destroy_cmd_cache(scsa1394_state_t *);
+static int scsa1394_add_events(scsa1394_state_t *);
+static void scsa1394_remove_events(scsa1394_state_t *);
+
+/* device configuration */
+static int scsa1394_scsi_bus_config(dev_info_t *, uint_t,
+ ddi_bus_config_op_t, void *, dev_info_t **);
+static int scsa1394_scsi_bus_unconfig(dev_info_t *, uint_t,
+ ddi_bus_config_op_t, void *);
+static void scsa1394_create_children(scsa1394_state_t *);
+static void scsa1394_bus_reset(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+static void scsa1394_disconnect(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+static void scsa1394_reconnect(dev_info_t *, ddi_eventcookie_t, void *,
+ void *);
+
+/* SCSA HBA entry points */
+static int scsa1394_scsi_tgt_init(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static void scsa1394_scsi_tgt_free(dev_info_t *, dev_info_t *,
+ scsi_hba_tran_t *, struct scsi_device *);
+static int scsa1394_scsi_tgt_probe(struct scsi_device *, int (*)());
+static int scsa1394_probe_g0_nodata(struct scsi_device *, int (*)(),
+ uchar_t, uint_t, uint_t);
+static int scsa1394_probe_tran(struct scsi_pkt *);
+static struct scsi_pkt *scsa1394_scsi_init_pkt(struct scsi_address *,
+ struct scsi_pkt *, struct buf *, int, int, int, int,
+ int (*)(), caddr_t arg);
+static void scsa1394_scsi_destroy_pkt(struct scsi_address *,
+ struct scsi_pkt *);
+static int scsa1394_scsi_start(struct scsi_address *, struct scsi_pkt *);
+static int scsa1394_scsi_abort(struct scsi_address *, struct scsi_pkt *);
+static int scsa1394_scsi_reset(struct scsi_address *, int);
+static int scsa1394_scsi_getcap(struct scsi_address *, char *, int);
+static int scsa1394_scsi_setcap(struct scsi_address *, char *, int, int);
+static void scsa1394_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
+static void scsa1394_scsi_sync_pkt(struct scsi_address *,
+ struct scsi_pkt *);
+
+/* pkt resource allocation routines */
+static int scsa1394_cmd_cache_constructor(void *, void *, int);
+static void scsa1394_cmd_cache_destructor(void *, void *);
+static int scsa1394_cmd_ext_alloc(scsa1394_state_t *, scsa1394_cmd_t *,
+ int);
+static void scsa1394_cmd_ext_free(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_cdb_dma_alloc(scsa1394_state_t *, scsa1394_cmd_t *,
+ int, int (*)(), caddr_t);
+static void scsa1394_cmd_cdb_dma_free(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_buf_dma_alloc(scsa1394_state_t *, scsa1394_cmd_t *,
+ int, int (*)(), caddr_t, struct buf *);
+static void scsa1394_cmd_buf_dma_free(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_dmac2seg(scsa1394_state_t *, scsa1394_cmd_t *,
+ ddi_dma_cookie_t *, uint_t, int);
+static void scsa1394_cmd_seg_free(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_pt_dma_alloc(scsa1394_state_t *, scsa1394_cmd_t *,
+ int (*)(), caddr_t, int);
+static void scsa1394_cmd_pt_dma_free(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_buf_addr_alloc(scsa1394_state_t *,
+ scsa1394_cmd_t *);
+static void scsa1394_cmd_buf_addr_free(scsa1394_state_t *,
+ scsa1394_cmd_t *);
+static int scsa1394_cmd_buf_dma_move(scsa1394_state_t *, scsa1394_cmd_t *);
+
+
+/* pkt and data transfer routines */
+static void scsa1394_prepare_pkt(scsa1394_state_t *, struct scsi_pkt *);
+static void scsa1394_cmd_fill_cdb(scsa1394_lun_t *, scsa1394_cmd_t *);
+static void scsa1394_cmd_fill_cdb_rbc(scsa1394_lun_t *, scsa1394_cmd_t *);
+static void scsa1394_cmd_fill_cdb_other(scsa1394_lun_t *, scsa1394_cmd_t *);
+static void scsa1394_cmd_fill_cdb_len(scsa1394_cmd_t *, int);
+static void scsa1394_cmd_fill_cdb_lba(scsa1394_cmd_t *, int);
+static void scsa1394_cmd_fill_12byte_cdb_len(scsa1394_cmd_t *, int);
+static void scsa1394_cmd_fill_read_cd_cdb_len(scsa1394_cmd_t *, int);
+static int scsa1394_cmd_read_cd_blk_size(uchar_t);
+static int scsa1394_cmd_fake_mode_sense(scsa1394_state_t *,
+ scsa1394_cmd_t *);
+static int scsa1394_cmd_fake_inquiry(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_fake_comp(scsa1394_state_t *, scsa1394_cmd_t *);
+static int scsa1394_cmd_setup_next_xfer(scsa1394_lun_t *,
+ scsa1394_cmd_t *);
+static void scsa1394_cmd_adjust_cdb(scsa1394_lun_t *, scsa1394_cmd_t *);
+static void scsa1394_cmd_status_wrka(scsa1394_lun_t *, scsa1394_cmd_t *);
+
+/* other routines */
+static boolean_t scsa1394_is_my_child(dev_info_t *);
+static void * scsa1394_kmem_realloc(void *, int, int, size_t, int);
+
+static void *scsa1394_statep;
+#define SCSA1394_INST2STATE(inst) (ddi_get_soft_state(scsa1394_statep, inst))
+
+static struct cb_ops scsa1394_cb_ops = {
+ nodev, /* open */
+ nodev, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ NULL, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ ddi_prop_op, /* prop_op */
+ NULL, /* stream */
+ D_MP, /* cb_flag */
+ CB_REV, /* rev */
+ nodev, /* aread */
+ nodev /* awrite */
+};
+
+static struct dev_ops scsa1394_ops = {
+ DEVO_REV, /* devo_rev, */
+ 0, /* refcnt */
+ ddi_no_info, /* info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ scsa1394_attach, /* attach */
+ scsa1394_detach, /* detach */
+ nodev, /* reset */
+ &scsa1394_cb_ops, /* driver operations */
+ NULL, /* bus operations */
+ scsa1394_power /* power */
+};
+
+static struct modldrv scsa1394_modldrv = {
+ &mod_driverops, /* module type */
+ "1394 Mass Storage HBA Driver %I%", /* name of the module */
+ &scsa1394_ops, /* driver ops */
+};
+
+static struct modlinkage scsa1394_modlinkage = {
+ MODREV_1, (void *)&scsa1394_modldrv, NULL
+};
+
+/* tunables */
+int scsa1394_bus_config_debug = 0;
+int scsa1394_start_stop_fail_max = SCSA1394_START_STOP_FAIL_MAX;
+int scsa1394_mode_sense_fail_max = SCSA1394_MODE_SENSE_FAIL_MAX;
+int scsa1394_start_stop_timeout_max = SCSA1394_START_STOP_TIMEOUT_MAX;
+
+/* workarounds */
+int scsa1394_wrka_rbc2direct = 1;
+int scsa1394_wrka_force_rmb = 1;
+int scsa1394_wrka_fake_prin = 1;
+
+int scsa1394_wrka_symbios = 1;
+int scsa1394_symbios_page_size = 4 * 1024; /* must be <= _pagesize */
+int scsa1394_symbios_size_max = 512 * 248; /* multiple of page size */
+
+/*
+ *
+ * --- DDI/DKI entry points
+ *
+ */
+int
+_init(void)
+{
+ int ret;
+
+ if (((ret = ddi_soft_state_init(&scsa1394_statep,
+ sizeof (scsa1394_state_t), 1)) != 0)) {
+ return (ret);
+ }
+
+ if ((ret = scsi_hba_init(&scsa1394_modlinkage)) != 0) {
+ ddi_soft_state_fini(&scsa1394_statep);
+ return (ret);
+ }
+
+ if ((ret = mod_install(&scsa1394_modlinkage)) != 0) {
+ scsi_hba_fini(&scsa1394_modlinkage);
+ ddi_soft_state_fini(&scsa1394_statep);
+ return (ret);
+ }
+
+ return (ret);
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&scsa1394_modlinkage)) == 0) {
+ scsi_hba_fini(&scsa1394_modlinkage);
+ ddi_soft_state_fini(&scsa1394_statep);
+ }
+
+ return (ret);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&scsa1394_modlinkage, modinfop));
+}
+
+static int
+scsa1394_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ scsa1394_state_t *sp;
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ return (DDI_SUCCESS);
+ default:
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_soft_state_zalloc(scsa1394_statep, instance) != 0) {
+ return (DDI_FAILURE);
+ }
+ sp = SCSA1394_INST2STATE(instance);
+
+#ifndef __lock_lint
+ sp->s_dip = dip;
+ sp->s_instance = instance;
+#endif
+ mutex_init(&sp->s_mutex, NULL, MUTEX_DRIVER,
+ sp->s_attachinfo.iblock_cookie);
+ cv_init(&sp->s_event_cv, NULL, CV_DRIVER, NULL);
+
+ if (scsa1394_attach_1394(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 1);
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_sbp2_attach(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 2);
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_attach_threads(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 3);
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_attach_scsa(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 4);
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_create_cmd_cache(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 5);
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_add_events(sp) != DDI_SUCCESS) {
+ scsa1394_cleanup(sp, 6);
+ return (DDI_FAILURE);
+ }
+
+#ifndef __lock_lint
+ sp->s_dev_state = SCSA1394_DEV_ONLINE;
+#endif
+
+ ddi_report_dev(dip);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+scsa1394_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ scsa1394_state_t *sp;
+
+ if ((sp = SCSA1394_INST2STATE(instance)) == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ scsa1394_cleanup(sp, SCSA1394_CLEANUP_LEVEL_MAX);
+ return (DDI_SUCCESS);
+ case DDI_SUSPEND:
+ return (DDI_FAILURE);
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_power(dev_info_t *dip, int comp, int level)
+{
+ return (DDI_SUCCESS);
+}
+
+/*
+ *
+ * --- configuration routines
+ *
+ */
+static void
+scsa1394_cleanup(scsa1394_state_t *sp, int level)
+{
+ ASSERT((level > 0) && (level <= SCSA1394_CLEANUP_LEVEL_MAX));
+
+ switch (level) {
+ default:
+ scsa1394_remove_events(sp);
+ /* FALLTHRU */
+ case 6:
+ scsa1394_detach_scsa(sp);
+ /* FALLTHRU */
+ case 5:
+ scsa1394_destroy_cmd_cache(sp);
+ /* FALLTHRU */
+ case 4:
+ scsa1394_detach_threads(sp);
+ /* FALLTHRU */
+ case 3:
+ scsa1394_sbp2_detach(sp);
+ /* FALLTHRU */
+ case 2:
+ scsa1394_detach_1394(sp);
+ /* FALLTHRU */
+ case 1:
+ cv_destroy(&sp->s_event_cv);
+ mutex_destroy(&sp->s_mutex);
+ ddi_soft_state_free(scsa1394_statep, sp->s_instance);
+ }
+}
+
+static int
+scsa1394_attach_1394(scsa1394_state_t *sp)
+{
+ int ret;
+
+ if ((ret = t1394_attach(sp->s_dip, T1394_VERSION_V1, 0,
+ &sp->s_attachinfo, &sp->s_t1394_hdl)) != DDI_SUCCESS) {
+ return (ret);
+ }
+
+ /* DMA attributes for data buffers */
+ sp->s_buf_dma_attr = sp->s_attachinfo.dma_attr;
+
+ /* DMA attributes for page tables */
+ sp->s_pt_dma_attr = sp->s_attachinfo.dma_attr;
+ sp->s_pt_dma_attr.dma_attr_sgllen = 1; /* pt must be contiguous */
+
+ if ((ret = t1394_get_targetinfo(sp->s_t1394_hdl, SCSA1394_BUSGEN(sp), 0,
+ &sp->s_targetinfo)) != DDI_SUCCESS) {
+ (void) t1394_detach(&sp->s_t1394_hdl, 0);
+ return (ret);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_detach_1394(scsa1394_state_t *sp)
+{
+ (void) t1394_detach(&sp->s_t1394_hdl, 0);
+}
+
+static int
+scsa1394_attach_threads(scsa1394_state_t *sp)
+{
+ char name[16];
+ int nthr;
+
+ nthr = sp->s_nluns;
+ (void) snprintf(name, sizeof (name), "scsa1394%d", sp->s_instance);
+ if ((sp->s_taskq = ddi_taskq_create(sp->s_dip, name, nthr,
+ TASKQ_DEFAULTPRI, 0)) == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ if (scsa1394_sbp2_threads_init(sp) != DDI_SUCCESS) {
+ ddi_taskq_destroy(sp->s_taskq);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_detach_threads(scsa1394_state_t *sp)
+{
+ scsa1394_sbp2_threads_fini(sp);
+ ddi_taskq_destroy(sp->s_taskq);
+}
+
+static int
+scsa1394_attach_scsa(scsa1394_state_t *sp)
+{
+ scsi_hba_tran_t *tran;
+ int ret;
+
+ sp->s_tran = tran = scsi_hba_tran_alloc(sp->s_dip, SCSI_HBA_CANSLEEP);
+
+ tran->tran_hba_private = sp;
+ tran->tran_tgt_private = NULL;
+ tran->tran_tgt_init = scsa1394_scsi_tgt_init;
+ tran->tran_tgt_probe = scsa1394_scsi_tgt_probe;
+ tran->tran_tgt_free = scsa1394_scsi_tgt_free;
+ tran->tran_start = scsa1394_scsi_start;
+ tran->tran_abort = scsa1394_scsi_abort;
+ tran->tran_reset = scsa1394_scsi_reset;
+ tran->tran_getcap = scsa1394_scsi_getcap;
+ tran->tran_setcap = scsa1394_scsi_setcap;
+ tran->tran_init_pkt = scsa1394_scsi_init_pkt;
+ tran->tran_destroy_pkt = scsa1394_scsi_destroy_pkt;
+ tran->tran_dmafree = scsa1394_scsi_dmafree;
+ tran->tran_sync_pkt = scsa1394_scsi_sync_pkt;
+ tran->tran_reset_notify = NULL;
+ tran->tran_get_bus_addr = NULL;
+ tran->tran_get_name = NULL;
+ tran->tran_bus_reset = NULL;
+ tran->tran_quiesce = NULL;
+ tran->tran_unquiesce = NULL;
+ tran->tran_get_eventcookie = NULL;
+ tran->tran_add_eventcall = NULL;
+ tran->tran_remove_eventcall = NULL;
+ tran->tran_post_event = NULL;
+ tran->tran_bus_config = scsa1394_scsi_bus_config;
+ tran->tran_bus_unconfig = scsa1394_scsi_bus_unconfig;
+
+ if ((ret = scsi_hba_attach_setup(sp->s_dip, &sp->s_attachinfo.dma_attr,
+ tran, 0)) != DDI_SUCCESS) {
+ scsi_hba_tran_free(tran);
+ return (ret);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_detach_scsa(scsa1394_state_t *sp)
+{
+ int ret;
+
+ ret = scsi_hba_detach(sp->s_dip);
+ ASSERT(ret == DDI_SUCCESS);
+
+ scsi_hba_tran_free(sp->s_tran);
+}
+
+static int
+scsa1394_create_cmd_cache(scsa1394_state_t *sp)
+{
+ char name[64];
+
+ (void) sprintf(name, "scsa1394%d_cache", sp->s_instance);
+ sp->s_cmd_cache = kmem_cache_create(name,
+ sizeof (scsa1394_cmd_t), sizeof (void *),
+ scsa1394_cmd_cache_constructor, scsa1394_cmd_cache_destructor,
+ NULL, (void *)sp, NULL, 0);
+
+ return ((sp->s_cmd_cache == NULL) ? DDI_FAILURE : DDI_SUCCESS);
+}
+
+static void
+scsa1394_destroy_cmd_cache(scsa1394_state_t *sp)
+{
+ kmem_cache_destroy(sp->s_cmd_cache);
+}
+
+static int
+scsa1394_add_events(scsa1394_state_t *sp)
+{
+ ddi_eventcookie_t br_evc, rem_evc, ins_evc;
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_BUS_RESET_EVENT,
+ &br_evc) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(sp->s_dip, br_evc, scsa1394_bus_reset,
+ sp, &sp->s_reset_cb_id) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_REMOVE_EVENT,
+ &rem_evc) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_reset_cb_id);
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(sp->s_dip, rem_evc, scsa1394_disconnect,
+ sp, &sp->s_remove_cb_id) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_reset_cb_id);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_INSERT_EVENT,
+ &ins_evc) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_remove_cb_id);
+ (void) ddi_remove_event_handler(sp->s_reset_cb_id);
+ return (DDI_FAILURE);
+ }
+ if (ddi_add_event_handler(sp->s_dip, ins_evc, scsa1394_reconnect,
+ sp, &sp->s_insert_cb_id) != DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_remove_cb_id);
+ (void) ddi_remove_event_handler(sp->s_reset_cb_id);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_remove_events(scsa1394_state_t *sp)
+{
+ ddi_eventcookie_t evc;
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_INSERT_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_insert_cb_id);
+ }
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_REMOVE_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_remove_cb_id);
+ }
+
+ if (ddi_get_eventcookie(sp->s_dip, DDI_DEVI_BUS_RESET_EVENT,
+ &evc) == DDI_SUCCESS) {
+ (void) ddi_remove_event_handler(sp->s_reset_cb_id);
+ }
+}
+
+/*
+ *
+ * --- device configuration
+ *
+ */
+static int
+scsa1394_scsi_bus_config(dev_info_t *dip, uint_t flag, ddi_bus_config_op_t op,
+ void *arg, dev_info_t **child)
+{
+ scsa1394_state_t *sp = SCSA1394_INST2STATE(ddi_get_instance(dip));
+ int circ;
+ int ret;
+
+ if (scsa1394_bus_config_debug) {
+ flag |= NDI_DEVI_DEBUG;
+ }
+
+ ndi_devi_enter(dip, &circ);
+ if (DEVI(dip)->devi_child == NULL) {
+ scsa1394_create_children(sp);
+ }
+ ret = ndi_busop_bus_config(dip, flag, op, arg, child, 0);
+ ndi_devi_exit(dip, circ);
+
+ return (ret);
+}
+
+static int
+scsa1394_scsi_bus_unconfig(dev_info_t *dip, uint_t flag, ddi_bus_config_op_t op,
+ void *arg)
+{
+ scsa1394_state_t *sp = SCSA1394_INST2STATE(ddi_get_instance(dip));
+ int circ;
+ int ret;
+ uint_t saved_flag = flag;
+
+ if (scsa1394_bus_config_debug) {
+ flag |= NDI_DEVI_DEBUG;
+ }
+
+ /*
+ * First offline and if offlining successful, then remove children.
+ */
+ if (op == BUS_UNCONFIG_ALL) {
+ flag &= ~(NDI_DEVI_REMOVE | NDI_UNCONFIG);
+ }
+
+ ndi_devi_enter(dip, &circ);
+
+ ret = ndi_busop_bus_unconfig(dip, flag, op, arg);
+
+ /*
+ * If previous step was successful and not part of modunload daemon,
+ * attempt to remove children.
+ */
+ if ((op == BUS_UNCONFIG_ALL) && (ret == NDI_SUCCESS) &&
+ ((flag & NDI_AUTODETACH) == 0)) {
+ flag |= NDI_DEVI_REMOVE;
+ ret = ndi_busop_bus_unconfig(dip, flag, op, arg);
+ }
+ ndi_devi_exit(dip, circ);
+
+ if ((ret != NDI_SUCCESS) && (op == BUS_UNCONFIG_ALL) &&
+ ((saved_flag & NDI_DEVI_REMOVE) != 0)) {
+ mutex_enter(&sp->s_mutex);
+ if (!sp->s_disconnect_warned) {
+ cmn_err(CE_WARN, "scsa1394(%d): "
+ "Disconnected device was busy, please reconnect.\n",
+ sp->s_instance);
+ sp->s_disconnect_warned = B_TRUE;
+ }
+ mutex_exit(&sp->s_mutex);
+ }
+
+ return (ret);
+}
+
+void
+scsa1394_dtype2name(int dtype, char **node_name, char **driver_name)
+{
+ static struct {
+ char *node_name;
+ char *driver_name;
+ } dtype2name[] = {
+ { "disk", "sd" }, /* DTYPE_DIRECT 0x00 */
+ { "tape", "st" }, /* DTYPE_SEQUENTIAL 0x01 */
+ { "printer", NULL }, /* DTYPE_PRINTER 0x02 */
+ { "processor", NULL }, /* DTYPE_PROCESSOR 0x03 */
+ { "worm", NULL }, /* DTYPE_WORM 0x04 */
+ { "disk", "sd" }, /* DTYPE_RODIRECT 0x05 */
+ { "scanner", NULL }, /* DTYPE_SCANNER 0x06 */
+ { "disk", "sd" }, /* DTYPE_OPTICAL 0x07 */
+ { "changer", NULL }, /* DTYPE_CHANGER 0x08 */
+ { "comm", NULL }, /* DTYPE_COMM 0x09 */
+ { "generic", NULL }, /* DTYPE_??? 0x0A */
+ { "generic", NULL }, /* DTYPE_??? 0x0B */
+ { "array_ctrl", NULL }, /* DTYPE_ARRAY_CTRL 0x0C */
+ { "esi", "ses" }, /* DTYPE_ESI 0x0D */
+ { "disk", "sd" } /* DTYPE_RBC 0x0E */
+ };
+
+ if (dtype < NELEM(dtype2name)) {
+ *node_name = dtype2name[dtype].node_name;
+ *driver_name = dtype2name[dtype].driver_name;
+ } else {
+ *node_name = "generic";
+ *driver_name = NULL;
+ }
+}
+
+static void
+scsa1394_create_children(scsa1394_state_t *sp)
+{
+ char name[SCSA1394_COMPAT_MAX][16];
+ char *compatible[SCSA1394_COMPAT_MAX];
+ dev_info_t *cdip;
+ int i;
+ int dtype;
+ char *node_name;
+ char *driver_name;
+ int ret;
+
+ bzero(name, sizeof (name));
+ (void) strcpy(name[0], "sd");
+ for (i = 0; i < SCSA1394_COMPAT_MAX; i++) {
+ compatible[i] = name[i];
+ }
+
+ for (i = 0; i < sp->s_nluns; i++) {
+ dtype = scsa1394_sbp2_get_lun_type(&sp->s_lun[i]);
+ scsa1394_dtype2name(dtype, &node_name, &driver_name);
+
+ ndi_devi_alloc_sleep(sp->s_dip, node_name,
+ (dnode_t)DEVI_SID_NODEID, &cdip);
+
+ ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip, "target", 0);
+ if (ret != DDI_PROP_SUCCESS) {
+ (void) ndi_devi_free(cdip);
+ continue;
+ }
+
+ ret = ndi_prop_update_int(DDI_DEV_T_NONE, cdip, "lun", i);
+ if (ret != DDI_PROP_SUCCESS) {
+ ddi_prop_remove_all(cdip);
+ (void) ndi_devi_free(cdip);
+ continue;
+ }
+
+ if (driver_name) {
+ compatible[0] = driver_name;
+ ret = ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
+ "compatible", (char **)compatible,
+ SCSA1394_COMPAT_MAX);
+ if (ret != DDI_PROP_SUCCESS) {
+ ddi_prop_remove_all(cdip);
+ (void) ndi_devi_free(cdip);
+ continue;
+ }
+ }
+
+ /*
+ * add property "scsa1394" to distinguish from others' children
+ */
+ ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip, "scsa1394");
+ if (ret != DDI_PROP_SUCCESS) {
+ ddi_prop_remove_all(cdip);
+ (void) ndi_devi_free(cdip);
+ continue;
+ }
+
+ (void) ddi_initchild(sp->s_dip, cdip);
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_bus_reset(dev_info_t *dip, ddi_eventcookie_t evc, void *arg,
+ void *data)
+{
+ scsa1394_state_t *sp = arg;
+
+ if (sp != NULL) {
+ mutex_enter(&sp->s_mutex);
+ if (sp->s_dev_state == SCSA1394_DEV_DISCONNECTED) {
+ mutex_exit(&sp->s_mutex);
+ return;
+ }
+ sp->s_stat.stat_bus_reset_cnt++;
+ sp->s_dev_state = SCSA1394_DEV_BUS_RESET;
+ sp->s_attachinfo.localinfo = *(t1394_localinfo_t *)data;
+ mutex_exit(&sp->s_mutex);
+
+ scsa1394_sbp2_req(sp, 0, SCSA1394_THREQ_BUS_RESET);
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_disconnect(dev_info_t *dip, ddi_eventcookie_t evc, void *arg,
+ void *data)
+{
+ scsa1394_state_t *sp = arg;
+ int circ;
+ dev_info_t *cdip, *cdip_next;
+
+ if (sp == NULL) {
+ return;
+ }
+
+ mutex_enter(&sp->s_mutex);
+ sp->s_stat.stat_disconnect_cnt++;
+ sp->s_dev_state = SCSA1394_DEV_DISCONNECTED;
+ mutex_exit(&sp->s_mutex);
+
+ scsa1394_sbp2_disconnect(sp);
+
+ ndi_devi_enter(dip, &circ);
+ for (cdip = ddi_get_child(dip); cdip != NULL; cdip = cdip_next) {
+ cdip_next = ddi_get_next_sibling(cdip);
+
+ mutex_enter(&DEVI(cdip)->devi_lock);
+ DEVI_SET_DEVICE_REMOVED(cdip);
+ mutex_exit(&DEVI(cdip)->devi_lock);
+ }
+ ndi_devi_exit(dip, circ);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_reconnect(dev_info_t *dip, ddi_eventcookie_t evc, void *arg,
+ void *data)
+{
+ scsa1394_state_t *sp = arg;
+ int circ;
+ dev_info_t *cdip, *cdip_next;
+
+ if (sp == NULL) {
+ return;
+ }
+
+ mutex_enter(&sp->s_mutex);
+ sp->s_stat.stat_reconnect_cnt++;
+ sp->s_attachinfo.localinfo = *(t1394_localinfo_t *)data;
+ sp->s_disconnect_warned = B_FALSE;
+ mutex_exit(&sp->s_mutex);
+
+ ndi_devi_enter(dip, &circ);
+ for (cdip = ddi_get_child(dip); cdip != NULL; cdip = cdip_next) {
+ cdip_next = ddi_get_next_sibling(cdip);
+
+ mutex_enter(&DEVI(cdip)->devi_lock);
+ DEVI_SET_DEVICE_REINSERTED(cdip);
+ mutex_exit(&DEVI(cdip)->devi_lock);
+ }
+ ndi_devi_exit(dip, circ);
+
+ scsa1394_sbp2_req(sp, 0, SCSA1394_THREQ_RECONNECT);
+}
+
+/*
+ *
+ * --- SCSA entry points
+ *
+ */
+/*ARGSUSED*/
+static int
+scsa1394_scsi_tgt_init(dev_info_t *dip, dev_info_t *cdip, scsi_hba_tran_t *tran,
+ struct scsi_device *sd)
+{
+ scsa1394_state_t *sp = (scsa1394_state_t *)tran->tran_hba_private;
+ int lun;
+ int plen = sizeof (int);
+ int ret = DDI_FAILURE;
+
+ if (ddi_prop_op(DDI_DEV_T_ANY, cdip, PROP_LEN_AND_VAL_BUF,
+ DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", (caddr_t)&lun,
+ &plen) != DDI_PROP_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ if (!scsa1394_is_my_child(cdip)) {
+ /*
+ * add property "scsa1394" to distinguish from others' children
+ */
+ ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip, "scsa1394");
+ if (ret != DDI_PROP_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ ret = scsa1394_sbp2_login(sp, lun);
+
+ return (ret);
+ }
+
+ if ((lun >= sp->s_nluns) || (sp->s_lun[lun].l_cdip != NULL)) {
+ return (DDI_FAILURE);
+ }
+
+ if ((ret = scsa1394_sbp2_login(sp, lun)) == DDI_SUCCESS) {
+ sp->s_lun[lun].l_cdip = cdip;
+ }
+ return (ret);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_scsi_tgt_free(dev_info_t *dip, dev_info_t *cdip, scsi_hba_tran_t *tran,
+ struct scsi_device *sd)
+{
+ scsa1394_state_t *sp = (scsa1394_state_t *)tran->tran_hba_private;
+ int lun;
+ int plen = sizeof (int);
+
+ if (!scsa1394_is_my_child(cdip)) {
+ return;
+ }
+
+ if (ddi_prop_op(DDI_DEV_T_ANY, cdip, PROP_LEN_AND_VAL_BUF,
+ DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun", (caddr_t)&lun,
+ &plen) != DDI_PROP_SUCCESS) {
+ return;
+ }
+
+ if ((lun < sp->s_nluns) && (sp->s_lun[lun].l_cdip == cdip)) {
+ if (scsa1394_dev_is_online(sp)) {
+ scsa1394_sbp2_logout(sp, lun, B_TRUE);
+ }
+ sp->s_lun[lun].l_cdip = NULL;
+ }
+}
+
+static int
+scsa1394_scsi_tgt_probe(struct scsi_device *sd, int (*waitfunc)())
+{
+ dev_info_t *dip = ddi_get_parent(sd->sd_dev);
+ scsi_hba_tran_t *tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
+ scsa1394_state_t *sp = (scsa1394_state_t *)tran->tran_hba_private;
+ scsa1394_lun_t *lp;
+
+ if (!scsa1394_dev_is_online(sp)) {
+ return (SCSIPROBE_FAILURE);
+ }
+ lp = &sp->s_lun[sd->sd_address.a_lun];
+
+ if (scsa1394_probe_g0_nodata(sd, waitfunc,
+ SCMD_TEST_UNIT_READY, 0, 0) != SCSIPROBE_EXISTS) {
+ lp->l_nosup_tur = B_TRUE;
+ (void) scsa1394_sbp2_reset(lp, RESET_LUN, NULL);
+ }
+ if (scsa1394_probe_g0_nodata(sd, waitfunc,
+ SCMD_START_STOP, 0, 1) != SCSIPROBE_EXISTS) {
+ lp->l_nosup_start_stop = B_TRUE;
+ }
+
+ /* standard probe issues INQUIRY, which some devices may not support */
+ if (scsi_hba_probe(sd, waitfunc) != SCSIPROBE_EXISTS) {
+ lp->l_nosup_inquiry = B_TRUE;
+ scsa1394_sbp2_fake_inquiry(sp, &lp->l_fake_inq);
+ bcopy(&lp->l_fake_inq, sd->sd_inq, SUN_INQSIZE);
+#ifndef __lock_lint
+ lp->l_rmb_orig = 1;
+#endif
+ }
+
+ /* vold only handles devices with removeable bit set */
+ if (scsa1394_wrka_force_rmb) {
+ sd->sd_inq->inq_rmb = 1;
+ }
+
+ return (SCSIPROBE_EXISTS);
+}
+
+static int
+scsa1394_probe_g0_nodata(struct scsi_device *sd, int (*waitfunc)(),
+ uchar_t cmd, uint_t addr, uint_t cnt)
+{
+ struct scsi_pkt *pkt;
+ int ret = SCSIPROBE_EXISTS;
+
+ pkt = scsi_init_pkt(&sd->sd_address, NULL, NULL, CDB_GROUP0,
+ sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, waitfunc, NULL);
+
+ if (pkt == NULL) {
+ return (SCSIPROBE_NOMEM);
+ }
+
+ (void) scsi_setup_cdb((union scsi_cdb *)pkt->pkt_cdbp, cmd, addr, cnt,
+ 0);
+ ((union scsi_cdb *)(pkt)->pkt_cdbp)->scc_lun = sd->sd_address.a_lun;
+ pkt->pkt_flags = FLAG_NOINTR;
+
+ if (scsa1394_probe_tran(pkt) < 0) {
+ if (pkt->pkt_reason == CMD_INCOMPLETE) {
+ ret = SCSIPROBE_NORESP;
+ } else {
+ ret = SCSIPROBE_FAILURE;
+ }
+ }
+
+ scsi_destroy_pkt(pkt);
+
+ return (ret);
+}
+
+static int
+scsa1394_probe_tran(struct scsi_pkt *pkt)
+{
+ pkt->pkt_time = SCSA1394_PROBE_TIMEOUT;
+
+ if (scsi_transport(pkt) != TRAN_ACCEPT) {
+ return (-1);
+ } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
+ (pkt->pkt_state == 0)) {
+ return (-1);
+ } else if (pkt->pkt_reason != CMD_CMPLT) {
+ return (-1);
+ } else if (((*pkt->pkt_scbp) & STATUS_MASK) == STATUS_BUSY) {
+ return (0);
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ return (0);
+}
+
+static int
+scsa1394_scsi_reset(struct scsi_address *ap, int level)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ scsa1394_lun_t *lp;
+ int ret;
+
+ switch (level) {
+ case RESET_ALL:
+ case RESET_TARGET:
+ lp = &sp->s_lun[0];
+ break;
+ case RESET_LUN:
+ lp = &sp->s_lun[ap->a_lun];
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ ret = scsa1394_sbp2_reset(lp, level, NULL);
+
+ return ((ret == SBP2_SUCCESS) ? 1 : 0);
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ size_t dev_bsize_cap;
+ int ret = -1;
+
+ if (!scsa1394_dev_is_online(sp)) {
+ return (-1);
+ }
+
+ if (cap == NULL) {
+ return (-1);
+ }
+
+ switch (scsi_hba_lookup_capstr(cap)) {
+ case SCSI_CAP_DMA_MAX:
+ ret = sp->s_attachinfo.dma_attr.dma_attr_maxxfer;
+ break;
+ case SCSI_CAP_SCSI_VERSION:
+ ret = SCSI_VERSION_2;
+ break;
+ case SCSI_CAP_ARQ:
+ ret = 1;
+ break;
+ case SCSI_CAP_UNTAGGED_QING:
+ ret = 1;
+ break;
+ case SCSI_CAP_GEOMETRY:
+ dev_bsize_cap = sp->s_totalsec;
+
+ if (sp->s_secsz > DEV_BSIZE) {
+ dev_bsize_cap *= sp->s_secsz / DEV_BSIZE;
+ } else if (sp->s_secsz < DEV_BSIZE) {
+ dev_bsize_cap /= DEV_BSIZE / sp->s_secsz;
+ }
+
+ if (dev_bsize_cap < 65536 * 2 * 18) { /* < ~1GB */
+ /* unlabeled floppy, 18k per cylinder */
+ ret = ((2 << 16) | 18);
+ } else if (dev_bsize_cap < 65536 * 64 * 32) { /* < 64GB */
+ /* 1024k per cylinder */
+ ret = ((64 << 16) | 32);
+ } else if (dev_bsize_cap < 65536 * 255 * 63) { /* < ~500GB */
+ /* ~8m per cylinder */
+ ret = ((255 << 16) | 63);
+ } else { /* .. 8TB */
+ /* 64m per cylinder */
+ ret = ((512 << 16) | 256);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ int ret = -1;
+
+ if (!scsa1394_dev_is_online(sp)) {
+ return (-1);
+ }
+
+ switch (scsi_hba_lookup_capstr(cap)) {
+ case SCSI_CAP_ARQ:
+ ret = 1;
+ break;
+ case SCSI_CAP_DMA_MAX:
+ case SCSI_CAP_SCSI_VERSION:
+ case SCSI_CAP_UNTAGGED_QING:
+ /* supported but not settable */
+ ret = 0;
+ break;
+ case SCSI_CAP_SECTOR_SIZE:
+ if (value) {
+ sp->s_secsz = value;
+ }
+ break;
+ case SCSI_CAP_TOTAL_SECTORS:
+ if (value) {
+ sp->s_totalsec = value;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ scsa1394_cmd_t *cmd = PKT2CMD(pkt);
+
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_VALID) {
+ (void) ddi_dma_sync(cmd->sc_buf_dma_hdl, 0, 0,
+ (cmd->sc_flags & SCSA1394_CMD_READ) ?
+ DDI_DMA_SYNC_FORCPU : DDI_DMA_SYNC_FORDEV);
+ }
+}
+
+/*
+ *
+ * --- pkt resource allocation routines
+ *
+ */
+static struct scsi_pkt *
+scsa1394_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
+ struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
+ int (*callback)(), caddr_t arg)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ scsa1394_lun_t *lp;
+ scsa1394_cmd_t *cmd;
+ boolean_t is_new; /* new cmd is being allocated */
+ int kf = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
+
+ if (ap->a_lun >= sp->s_nluns) {
+ return (NULL);
+ }
+ lp = &sp->s_lun[ap->a_lun];
+
+ /*
+ * allocate cmd space
+ */
+ if (pkt == NULL) {
+ is_new = B_TRUE;
+ if ((cmd = kmem_cache_alloc(sp->s_cmd_cache, kf)) == NULL) {
+ return (NULL);
+ }
+
+ /* initialize cmd */
+ pkt = &cmd->sc_scsi_pkt;
+ pkt->pkt_ha_private = cmd;
+ pkt->pkt_address = *ap;
+ pkt->pkt_private = cmd->sc_priv;
+ pkt->pkt_scbp = (uchar_t *)&cmd->sc_scb;
+ pkt->pkt_cdbp = (uchar_t *)&cmd->sc_pkt_cdb;
+ pkt->pkt_resid = 0;
+
+ cmd->sc_lun = lp;
+ cmd->sc_pkt = pkt;
+ cmd->sc_cdb_len = cmdlen;
+ cmd->sc_scb_len = statuslen;
+ cmd->sc_priv_len = tgtlen;
+
+ /* need external space? */
+ if ((cmdlen > sizeof (cmd->sc_pkt_cdb)) ||
+ (statuslen > sizeof (cmd->sc_scb)) ||
+ (tgtlen > sizeof (cmd->sc_priv))) {
+ if (scsa1394_cmd_ext_alloc(sp, cmd, kf) !=
+ DDI_SUCCESS) {
+ kmem_cache_free(sp->s_cmd_cache, cmd);
+ lp->l_stat.stat_err_pkt_kmem_alloc++;
+ return (NULL);
+ }
+ }
+
+ /* allocate DMA resources for CDB */
+ if (scsa1394_cmd_cdb_dma_alloc(sp, cmd, flags, callback, arg) !=
+ DDI_SUCCESS) {
+ scsa1394_scsi_destroy_pkt(ap, pkt);
+ return (NULL);
+ }
+ } else {
+ is_new = B_FALSE;
+ cmd = PKT2CMD(pkt);
+ }
+
+ cmd->sc_flags &= ~SCSA1394_CMD_RDWR;
+
+ /* allocate/move DMA resources for data buffer */
+ if ((bp != NULL) && (bp->b_bcount > 0)) {
+ if ((cmd->sc_flags & SCSA1394_CMD_DMA_BUF_VALID) == 0) {
+ if (scsa1394_cmd_buf_dma_alloc(sp, cmd, flags, callback,
+ arg, bp) != DDI_SUCCESS) {
+ if (is_new) {
+ scsa1394_scsi_destroy_pkt(ap, pkt);
+ }
+ return (NULL);
+ }
+ } else {
+ if (scsa1394_cmd_buf_dma_move(sp, cmd) != DDI_SUCCESS) {
+ return (NULL);
+ }
+ }
+
+ ASSERT(cmd->sc_win_len > 0);
+ pkt->pkt_resid = bp->b_bcount - cmd->sc_win_len;
+ }
+
+ /*
+ * kernel virtual address may be required for certain workarounds
+ * and in case of B_PHYS or B_PAGEIO, bp_mapin() will get it for us
+ */
+ if ((bp != NULL) && ((bp->b_flags & (B_PAGEIO | B_PHYS)) != 0) &&
+ (bp->b_bcount < SCSA1394_MAPIN_SIZE_MAX) &&
+ ((cmd->sc_flags & SCSA1394_CMD_DMA_BUF_MAPIN) == 0)) {
+ bp_mapin(bp);
+ cmd->sc_flags |= SCSA1394_CMD_DMA_BUF_MAPIN;
+ }
+
+ return (pkt);
+}
+
+static void
+scsa1394_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ scsa1394_cmd_t *cmd = PKT2CMD(pkt);
+
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_VALID) {
+ scsa1394_cmd_buf_dma_free(sp, cmd);
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_CDB_VALID) {
+ scsa1394_cmd_cdb_dma_free(sp, cmd);
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_MAPIN) {
+ bp_mapout(cmd->sc_bp);
+ cmd->sc_flags &= ~SCSA1394_CMD_DMA_BUF_MAPIN;
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_EXT) {
+ scsa1394_cmd_ext_free(sp, cmd);
+ }
+
+ kmem_cache_free(sp->s_cmd_cache, cmd);
+}
+
+static void
+scsa1394_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ scsa1394_cmd_t *cmd = PKT2CMD(pkt);
+
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_VALID) {
+ scsa1394_cmd_buf_dma_free(sp, cmd);
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_MAPIN) {
+ bp_mapout(cmd->sc_bp);
+ cmd->sc_flags &= ~SCSA1394_CMD_DMA_BUF_MAPIN;
+ }
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_cmd_cache_constructor(void *buf, void *cdrarg, int kf)
+{
+ scsa1394_cmd_t *cmd = buf;
+
+ bzero(buf, sizeof (scsa1394_cmd_t));
+ cmd->sc_task.ts_drv_priv = cmd;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_cmd_cache_destructor(void *buf, void *cdrarg)
+{
+}
+
+/*
+ * allocate and deallocate external cmd space (ie. not part of scsa1394_cmd_t)
+ * for non-standard length cdb, pkt_private, status areas
+ */
+static int
+scsa1394_cmd_ext_alloc(scsa1394_state_t *sp, scsa1394_cmd_t *cmd, int kf)
+{
+ struct scsi_pkt *pkt = cmd->sc_pkt;
+ void *buf;
+
+ if (cmd->sc_cdb_len > sizeof (cmd->sc_pkt_cdb)) {
+ if ((buf = kmem_zalloc(cmd->sc_cdb_len, kf)) == NULL) {
+ return (DDI_FAILURE);
+ }
+ pkt->pkt_cdbp = buf;
+ cmd->sc_flags |= SCSA1394_CMD_CDB_EXT;
+ }
+
+ if (cmd->sc_scb_len > sizeof (cmd->sc_scb)) {
+ if ((buf = kmem_zalloc(cmd->sc_scb_len, kf)) == NULL) {
+ scsa1394_cmd_ext_free(sp, cmd);
+ return (DDI_FAILURE);
+ }
+ pkt->pkt_scbp = buf;
+ cmd->sc_flags |= SCSA1394_CMD_SCB_EXT;
+ }
+
+ if (cmd->sc_priv_len > sizeof (cmd->sc_priv)) {
+ if ((buf = kmem_zalloc(cmd->sc_priv_len, kf)) == NULL) {
+ scsa1394_cmd_ext_free(sp, cmd);
+ return (DDI_FAILURE);
+ }
+ pkt->pkt_private = buf;
+ cmd->sc_flags |= SCSA1394_CMD_PRIV_EXT;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_cmd_ext_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = cmd->sc_pkt;
+
+ if (cmd->sc_flags & SCSA1394_CMD_CDB_EXT) {
+ kmem_free(pkt->pkt_cdbp, cmd->sc_cdb_len);
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_SCB_EXT) {
+ kmem_free(pkt->pkt_scbp, cmd->sc_scb_len);
+ }
+ if (cmd->sc_flags & SCSA1394_CMD_PRIV_EXT) {
+ kmem_free(pkt->pkt_private, cmd->sc_priv_len);
+ }
+ cmd->sc_flags &= ~SCSA1394_CMD_EXT;
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_cmd_cdb_dma_alloc(scsa1394_state_t *sp, scsa1394_cmd_t *cmd,
+ int flags, int (*callback)(), caddr_t arg)
+{
+ if (sbp2_task_orb_alloc(cmd->sc_lun->l_lun, &cmd->sc_task,
+ sizeof (scsa1394_cmd_orb_t)) != SBP2_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ cmd->sc_flags |= SCSA1394_CMD_DMA_CDB_VALID;
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_cmd_cdb_dma_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ sbp2_task_orb_free(cmd->sc_lun->l_lun, &cmd->sc_task);
+ cmd->sc_flags &= ~SCSA1394_CMD_DMA_CDB_VALID;
+}
+
+/*
+ * buffer resources
+ */
+static int
+scsa1394_cmd_buf_dma_alloc(scsa1394_state_t *sp, scsa1394_cmd_t *cmd,
+ int flags, int (*callback)(), caddr_t arg, struct buf *bp)
+{
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ int kf = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
+ int dma_flags;
+ ddi_dma_cookie_t dmac;
+ uint_t ccount;
+ int error;
+ int ret;
+
+ cmd->sc_bp = bp;
+
+ if ((ddi_dma_alloc_handle(sp->s_dip, &sp->s_buf_dma_attr, callback,
+ NULL, &cmd->sc_buf_dma_hdl)) != DDI_SUCCESS) {
+ bioerror(bp, 0);
+ return (DDI_FAILURE);
+ }
+
+ cmd->sc_flags &= ~SCSA1394_CMD_RDWR;
+ if (bp->b_flags & B_READ) {
+ dma_flags = DDI_DMA_READ;
+ cmd->sc_flags |= SCSA1394_CMD_READ;
+ } else {
+ dma_flags = DDI_DMA_WRITE;
+ cmd->sc_flags |= SCSA1394_CMD_WRITE;
+ }
+ if (flags & PKT_CONSISTENT) {
+ dma_flags |= DDI_DMA_CONSISTENT;
+ }
+ if (flags & PKT_DMA_PARTIAL) {
+ dma_flags |= DDI_DMA_PARTIAL;
+ }
+
+ ret = ddi_dma_buf_bind_handle(cmd->sc_buf_dma_hdl, bp, dma_flags,
+ callback, arg, &dmac, &ccount);
+
+ switch (ret) {
+ case DDI_DMA_MAPPED:
+ cmd->sc_nwin = 1;
+ cmd->sc_curwin = 0;
+ cmd->sc_win_offset = 0;
+ cmd->sc_win_len = bp->b_bcount;
+ break;
+
+ case DDI_DMA_PARTIAL_MAP:
+ /* retrieve number of windows and first window cookie */
+ cmd->sc_curwin = 0;
+ if ((ddi_dma_numwin(cmd->sc_buf_dma_hdl, &cmd->sc_nwin) !=
+ DDI_SUCCESS) ||
+ (ddi_dma_getwin(cmd->sc_buf_dma_hdl, cmd->sc_curwin,
+ &cmd->sc_win_offset, &cmd->sc_win_len, &dmac, &ccount) !=
+ DDI_SUCCESS)) {
+ (void) ddi_dma_unbind_handle(cmd->sc_buf_dma_hdl);
+ ddi_dma_free_handle(&cmd->sc_buf_dma_hdl);
+ return (DDI_FAILURE);
+ }
+ break;
+
+ case DDI_DMA_NORESOURCES:
+ error = 0;
+ goto map_error;
+
+ case DDI_DMA_BADATTR:
+ case DDI_DMA_NOMAPPING:
+ error = EFAULT;
+ goto map_error;
+
+ default:
+ error = EINVAL;
+
+ map_error:
+ bioerror(bp, error);
+ lp->l_stat.stat_err_cmd_buf_dbind++;
+ ddi_dma_free_handle(&cmd->sc_buf_dma_hdl);
+ return (DDI_FAILURE);
+ }
+ cmd->sc_flags |= SCSA1394_CMD_DMA_BUF_BIND_VALID;
+
+ /*
+ * setup page table if needed
+ */
+ if ((ccount == 1) && (dmac.dmac_size <= SBP2_PT_SEGSIZE_MAX) &&
+ (!sp->s_symbios ||
+ (dmac.dmac_size <= scsa1394_symbios_page_size))) {
+ cmd->sc_buf_nsegs = 1;
+ cmd->sc_buf_seg_mem.ss_len = dmac.dmac_size;
+ cmd->sc_buf_seg_mem.ss_daddr = dmac.dmac_address;
+ cmd->sc_buf_seg = &cmd->sc_buf_seg_mem;
+ } else {
+ /* break window into segments */
+ if (scsa1394_cmd_dmac2seg(sp, cmd, &dmac, ccount, kf) !=
+ DDI_SUCCESS) {
+ scsa1394_cmd_buf_dma_free(sp, cmd);
+ bioerror(bp, 0);
+ return (DDI_FAILURE);
+ }
+
+ /* allocate DMA resources for page table */
+ if (scsa1394_cmd_pt_dma_alloc(sp, cmd, callback, arg,
+ cmd->sc_buf_nsegs) != DDI_SUCCESS) {
+ scsa1394_cmd_buf_dma_free(sp, cmd);
+ bioerror(bp, 0);
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* allocate 1394 addresses for segments */
+ if (scsa1394_cmd_buf_addr_alloc(sp, cmd) != DDI_SUCCESS) {
+ scsa1394_cmd_buf_dma_free(sp, cmd);
+ bioerror(bp, 0);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_cmd_buf_dma_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_cmd_buf_addr_free(sp, cmd);
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_PT_VALID) {
+ scsa1394_cmd_pt_dma_free(sp, cmd);
+ }
+ scsa1394_cmd_seg_free(sp, cmd);
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_BIND_VALID) {
+ (void) ddi_dma_unbind_handle(cmd->sc_buf_dma_hdl);
+ ddi_dma_free_handle(&cmd->sc_buf_dma_hdl);
+ }
+ cmd->sc_flags &= ~(SCSA1394_CMD_DMA_BUF_VALID | SCSA1394_CMD_RDWR);
+}
+
+/*
+ * Break a set DMA cookies into segments suitable for SBP-2 page table.
+ * This routine can reuse/reallocate segment array from previous calls.
+ */
+static int
+scsa1394_cmd_dmac2seg(scsa1394_state_t *sp, scsa1394_cmd_t *cmd,
+ ddi_dma_cookie_t *dmac, uint_t ccount, int kf)
+{
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ int i;
+ int nsegs;
+ size_t segsize_max;
+ size_t dmac_resid;
+ uint32_t dmac_addr;
+ scsa1394_cmd_seg_t *seg;
+
+ if (!sp->s_symbios) {
+ /*
+ * Number of segments is unknown at this point. Start with
+ * a reasonable estimate and grow it later if needed.
+ */
+ nsegs = max(ccount, cmd->sc_win_len / SBP2_PT_SEGSIZE_MAX) * 2;
+ segsize_max = SBP2_PT_SEGSIZE_MAX;
+ } else {
+ /* For Symbios workaround we know exactly the number of pages */
+ nsegs = howmany(cmd->sc_win_len, scsa1394_symbios_page_size);
+ segsize_max = scsa1394_symbios_page_size;
+ }
+
+ if (nsegs > cmd->sc_buf_nsegs_alloc) {
+ if ((cmd->sc_buf_seg = scsa1394_kmem_realloc(cmd->sc_buf_seg,
+ cmd->sc_buf_nsegs_alloc, nsegs,
+ sizeof (scsa1394_cmd_seg_t), kf)) == NULL) {
+ cmd->sc_buf_nsegs_alloc = 0;
+ return (DDI_FAILURE);
+ }
+ cmd->sc_buf_nsegs_alloc = nsegs;
+ }
+
+ /* each cookie maps into one or more segments */
+ cmd->sc_buf_nsegs = 0;
+ i = ccount;
+ for (;;) {
+ dmac_resid = dmac->dmac_size;
+ dmac_addr = dmac->dmac_address;
+ while (dmac_resid > 0) {
+ /* grow array if needed */
+ if (cmd->sc_buf_nsegs >= cmd->sc_buf_nsegs_alloc) {
+ if ((cmd->sc_buf_seg = scsa1394_kmem_realloc(
+ cmd->sc_buf_seg,
+ cmd->sc_buf_nsegs_alloc,
+ cmd->sc_buf_nsegs_alloc + ccount,
+ sizeof (scsa1394_cmd_seg_t), kf)) == NULL) {
+ return (DDI_FAILURE);
+ }
+ cmd->sc_buf_nsegs_alloc += ccount;
+ }
+
+ seg = &cmd->sc_buf_seg[cmd->sc_buf_nsegs];
+ seg->ss_len = min(dmac_resid, segsize_max);
+ seg->ss_daddr = (uint64_t)dmac_addr;
+ dmac_addr += seg->ss_len;
+ dmac_resid -= seg->ss_len;
+ cmd->sc_buf_nsegs++;
+ }
+ ASSERT(dmac_resid == 0);
+
+ /* grab next cookie */
+ if (--i <= 0) {
+ break;
+ }
+ ddi_dma_nextcookie(cmd->sc_buf_dma_hdl, dmac);
+ }
+
+ if (cmd->sc_buf_nsegs > lp->l_stat.stat_cmd_buf_max_nsegs) {
+ lp->l_stat.stat_cmd_buf_max_nsegs = cmd->sc_buf_nsegs;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_cmd_seg_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ if (cmd->sc_buf_nsegs_alloc > 0) {
+ kmem_free(cmd->sc_buf_seg, cmd->sc_buf_nsegs_alloc *
+ sizeof (scsa1394_cmd_seg_t));
+ }
+ cmd->sc_buf_seg = NULL;
+ cmd->sc_buf_nsegs = 0;
+ cmd->sc_buf_nsegs_alloc = 0;
+}
+
+static int
+scsa1394_cmd_pt_dma_alloc(scsa1394_state_t *sp, scsa1394_cmd_t *cmd,
+ int (*callback)(), caddr_t arg, int cnt)
+{
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ size_t len, rlen;
+ uint_t ccount;
+ t1394_alloc_addr_t aa;
+ int result;
+
+ /* allocate DMA memory for page table */
+ if ((ddi_dma_alloc_handle(sp->s_dip, &sp->s_pt_dma_attr,
+ callback, NULL, &cmd->sc_pt_dma_hdl)) != DDI_SUCCESS) {
+ lp->l_stat.stat_err_cmd_pt_dmem_alloc++;
+ return (DDI_FAILURE);
+ }
+
+ cmd->sc_pt_ent_alloc = cnt;
+ len = cmd->sc_pt_ent_alloc * SBP2_PT_ENT_SIZE;
+ if (ddi_dma_mem_alloc(cmd->sc_pt_dma_hdl, len,
+ &sp->s_attachinfo.acc_attr, DDI_DMA_CONSISTENT, callback, arg,
+ &cmd->sc_pt_kaddr, &rlen, &cmd->sc_pt_acc_hdl) != DDI_SUCCESS) {
+ ddi_dma_free_handle(&cmd->sc_pt_dma_hdl);
+ lp->l_stat.stat_err_cmd_pt_dmem_alloc++;
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_dma_addr_bind_handle(cmd->sc_pt_dma_hdl, NULL,
+ cmd->sc_pt_kaddr, len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
+ callback, arg, &cmd->sc_pt_dmac, &ccount) != DDI_DMA_MAPPED) {
+ ddi_dma_mem_free(&cmd->sc_pt_acc_hdl);
+ ddi_dma_free_handle(&cmd->sc_pt_dma_hdl);
+ lp->l_stat.stat_err_cmd_pt_dmem_alloc++;
+ return (DDI_FAILURE);
+ }
+ ASSERT(ccount == 1); /* because dma_attr_sgllen is 1 */
+
+ /* allocate 1394 address for page table */
+ aa.aa_type = T1394_ADDR_FIXED;
+ aa.aa_length = len;
+ aa.aa_address = cmd->sc_pt_dmac.dmac_address;
+ aa.aa_evts.recv_read_request = NULL;
+ aa.aa_evts.recv_write_request = NULL;
+ aa.aa_evts.recv_lock_request = NULL;
+ aa.aa_arg = NULL;
+ aa.aa_kmem_bufp = NULL;
+ aa.aa_enable = T1394_ADDR_RDENBL;
+ if (t1394_alloc_addr(sp->s_t1394_hdl, &aa, 0, &result) != DDI_SUCCESS) {
+ (void) ddi_dma_unbind_handle(cmd->sc_pt_dma_hdl);
+ ddi_dma_mem_free(&cmd->sc_pt_acc_hdl);
+ ddi_dma_free_handle(&cmd->sc_pt_dma_hdl);
+ lp->l_stat.stat_err_cmd_pt_addr_alloc++;
+ return (DDI_FAILURE);
+ }
+ ASSERT(aa.aa_address != 0);
+ cmd->sc_pt_baddr = aa.aa_address;
+ cmd->sc_pt_addr_hdl = aa.aa_hdl;
+
+ cmd->sc_flags |= SCSA1394_CMD_DMA_BUF_PT_VALID;
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_cmd_pt_dma_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ (void) ddi_dma_unbind_handle(cmd->sc_pt_dma_hdl);
+ ddi_dma_mem_free(&cmd->sc_pt_acc_hdl);
+ ddi_dma_free_handle(&cmd->sc_pt_dma_hdl);
+ (void) t1394_free_addr(sp->s_t1394_hdl, &cmd->sc_pt_addr_hdl, 0);
+ cmd->sc_flags &= ~SCSA1394_CMD_DMA_BUF_PT_VALID;
+}
+
+/*
+ * allocate 1394 addresses for all buffer segments
+ */
+static int
+scsa1394_cmd_buf_addr_alloc(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ t1394_alloc_addr_t aa;
+ scsa1394_cmd_seg_t *seg;
+ int result;
+ int i;
+
+ aa.aa_type = T1394_ADDR_FIXED;
+ aa.aa_evts.recv_read_request = NULL;
+ aa.aa_evts.recv_write_request = NULL;
+ aa.aa_evts.recv_lock_request = NULL;
+ aa.aa_arg = NULL;
+ aa.aa_kmem_bufp = NULL;
+ if (cmd->sc_flags & SCSA1394_CMD_READ) {
+ aa.aa_enable = T1394_ADDR_RDENBL;
+ } else {
+ aa.aa_enable = T1394_ADDR_WRENBL;
+ }
+
+ for (i = 0; i < cmd->sc_buf_nsegs; i++) {
+ seg = &cmd->sc_buf_seg[i];
+
+ /* segment bus address */
+ aa.aa_length = seg->ss_len;
+ aa.aa_address = seg->ss_daddr;
+
+ if (t1394_alloc_addr(sp->s_t1394_hdl, &aa, 0, &result) !=
+ DDI_SUCCESS) {
+ lp->l_stat.stat_err_cmd_buf_addr_alloc++;
+ return (DDI_FAILURE);
+ }
+ ASSERT(aa.aa_address != 0);
+ seg->ss_baddr = aa.aa_address;
+ seg->ss_addr_hdl = aa.aa_hdl;
+ }
+
+ cmd->sc_flags |= SCSA1394_CMD_DMA_BUF_ADDR_VALID;
+
+ return (DDI_SUCCESS);
+}
+
+static void
+scsa1394_cmd_buf_addr_free(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ int i;
+
+ for (i = 0; i < cmd->sc_buf_nsegs; i++) {
+ if (cmd->sc_buf_seg[i].ss_addr_hdl) {
+ (void) t1394_free_addr(sp->s_t1394_hdl,
+ &cmd->sc_buf_seg[i].ss_addr_hdl, 0);
+ }
+ }
+ cmd->sc_flags &= ~SCSA1394_CMD_DMA_BUF_ADDR_VALID;
+}
+
+/*
+ * move to next DMA window
+ */
+static int
+scsa1394_cmd_buf_dma_move(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ /* scsa1394_lun_t *lp = cmd->sc_lun; */
+ ddi_dma_cookie_t dmac;
+ uint_t ccount;
+
+ /* for small pkts, leave things where they are (says WDD) */
+ if ((cmd->sc_curwin == cmd->sc_nwin) && (cmd->sc_nwin == 1)) {
+ return (DDI_SUCCESS);
+ }
+ if (++cmd->sc_curwin >= cmd->sc_nwin) {
+ return (DDI_FAILURE);
+ }
+ if (ddi_dma_getwin(cmd->sc_buf_dma_hdl, cmd->sc_curwin,
+ &cmd->sc_win_offset, &cmd->sc_win_len, &dmac, &ccount) !=
+ DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ scsa1394_cmd_buf_addr_free(sp, cmd);
+
+ /*
+ * setup page table if needed
+ */
+ if ((ccount == 1) && (dmac.dmac_size <= SBP2_PT_SEGSIZE_MAX)) {
+ /* but first, free old resources */
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_PT_VALID) {
+ scsa1394_cmd_pt_dma_free(sp, cmd);
+ }
+ scsa1394_cmd_seg_free(sp, cmd);
+
+ cmd->sc_buf_nsegs = 1;
+ cmd->sc_buf_seg_mem.ss_len = dmac.dmac_size;
+ cmd->sc_buf_seg_mem.ss_daddr = dmac.dmac_address;
+ cmd->sc_buf_seg = &cmd->sc_buf_seg_mem;
+ } else {
+ /* break window into segments */
+ if (scsa1394_cmd_dmac2seg(sp, cmd, &dmac, ccount, KM_NOSLEEP) !=
+ DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ /* allocate DMA resources */
+ if (scsa1394_cmd_pt_dma_alloc(sp, cmd, NULL_FUNC, NULL,
+ cmd->sc_buf_nsegs) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ }
+
+ /* allocate 1394 addresses for segments */
+ if (scsa1394_cmd_buf_addr_alloc(sp, cmd) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ *
+ * --- pkt and data transfer routines
+ *
+ */
+static int
+scsa1394_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
+{
+ scsa1394_state_t *sp = ADDR2STATE(ap);
+ scsa1394_cmd_t *cmd = PKT2CMD(pkt);
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ int ret;
+
+ mutex_enter(&sp->s_mutex);
+ if (sp->s_dev_state != SCSA1394_DEV_ONLINE) {
+ if (sp->s_dev_state == SCSA1394_DEV_BUS_RESET) {
+ /* this should prevent scary console messages */
+ mutex_exit(&sp->s_mutex);
+ return (TRAN_BUSY);
+ } else {
+ mutex_exit(&sp->s_mutex);
+ return (TRAN_FATAL_ERROR);
+ }
+ }
+ mutex_exit(&sp->s_mutex);
+
+ /*
+ * since we don't support polled I/O, just accept the packet
+ * so the rest of the file systems get synced properly
+ */
+ if (ddi_in_panic()) {
+ scsa1394_prepare_pkt(sp, pkt);
+ return (TRAN_ACCEPT);
+ }
+
+ /* polling not supported yet */
+ if (pkt->pkt_flags & FLAG_NOINTR) {
+ return (TRAN_BADPKT);
+ }
+
+ if ((ap->a_lun >= sp->s_nluns) ||
+ (ap->a_lun != pkt->pkt_address.a_lun)) {
+ return (TRAN_BADPKT);
+ }
+
+ scsa1394_prepare_pkt(sp, pkt);
+
+ /* some commands may require fake completion */
+ if ((ret = scsa1394_cmd_fake_comp(sp, cmd)) == DDI_SUCCESS) {
+ return (TRAN_ACCEPT);
+ }
+
+ scsa1394_cmd_fill_cdb(lp, cmd);
+
+ if (cmd->sc_flags & SCSA1394_CMD_DMA_BUF_PT_VALID) {
+ scsa1394_sbp2_seg2pt(lp, cmd);
+ }
+
+ scsa1394_sbp2_cmd2orb(lp, cmd); /* convert into ORB */
+
+ if ((ret = scsa1394_sbp2_start(lp, cmd)) != DDI_SUCCESS) {
+ scsa1394_sbp2_nudge(lp);
+ }
+
+ return (ret);
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_prepare_pkt(scsa1394_state_t *sp, struct scsi_pkt *pkt)
+{
+ scsa1394_cmd_t *cmd = PKT2CMD(pkt);
+
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_state = 0;
+ pkt->pkt_statistics = 0;
+ *(pkt->pkt_scbp) = STATUS_GOOD;
+
+ if (cmd) {
+ cmd->sc_timeout = pkt->pkt_time;
+
+ /* workarounds */
+ switch (pkt->pkt_cdbp[0]) {
+ /*
+ * sd does START_STOP_UNIT during attach with a 200 sec timeout.
+ * at this time devi_lock is held, prtconf will be stuck.
+ * reduce timeout for the time being.
+ */
+ case SCMD_START_STOP:
+ cmd->sc_timeout = min(cmd->sc_timeout,
+ scsa1394_start_stop_timeout_max);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+scsa1394_cmd_fill_cdb(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ cmd->sc_cdb_actual_len = cmd->sc_cdb_len;
+
+ mutex_enter(&lp->l_mutex);
+
+ switch (lp->l_dtype_orig) {
+ case DTYPE_DIRECT:
+ case DTYPE_RODIRECT:
+ case DTYPE_OPTICAL:
+ case SCSA1394_DTYPE_RBC:
+ scsa1394_cmd_fill_cdb_rbc(lp, cmd);
+ break;
+ default:
+ scsa1394_cmd_fill_cdb_other(lp, cmd);
+ break;
+ }
+
+ mutex_exit(&lp->l_mutex);
+}
+
+static void
+scsa1394_cmd_fill_cdb_rbc(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ int lba, opcode;
+ struct buf *bp = cmd->sc_bp;
+ size_t len;
+ size_t blk_size;
+ int sz;
+
+ opcode = pkt->pkt_cdbp[0];
+ blk_size = lp->l_lba_size;
+
+ switch (opcode) {
+ case SCMD_READ:
+ /* RBC only supports 10-byte read/write */
+ lba = SCSA1394_LBA_6BYTE(pkt);
+ len = SCSA1394_LEN_6BYTE(pkt);
+ opcode = SCMD_READ_G1;
+ cmd->sc_cdb_actual_len = CDB_GROUP1;
+ break;
+ case SCMD_WRITE:
+ lba = SCSA1394_LBA_6BYTE(pkt);
+ len = SCSA1394_LEN_6BYTE(pkt);
+ opcode = SCMD_WRITE_G1;
+ cmd->sc_cdb_actual_len = CDB_GROUP1;
+ break;
+ case SCMD_READ_G1:
+ case SCMD_READ_LONG:
+ lba = SCSA1394_LBA_10BYTE(pkt);
+ len = SCSA1394_LEN_10BYTE(pkt);
+ break;
+ case SCMD_WRITE_G1:
+ case SCMD_WRITE_LONG:
+ lba = SCSA1394_LBA_10BYTE(pkt);
+ len = SCSA1394_LEN_10BYTE(pkt);
+ sz = SCSA1394_CDRW_BLKSZ(bp->b_bcount, len);
+ if (SCSA1394_VALID_CDRW_BLKSZ(sz)) {
+ blk_size = sz;
+ }
+ break;
+ case SCMD_READ_CD:
+ lba = SCSA1394_LBA_10BYTE(pkt);
+ len = SCSA1394_LEN_READ_CD(pkt);
+ blk_size = scsa1394_cmd_read_cd_blk_size(pkt->pkt_cdbp[1] >> 2);
+ break;
+ case SCMD_READ_G5:
+ lba = SCSA1394_LBA_12BYTE(pkt);
+ len = SCSA1394_LEN_12BYTE(pkt);
+ break;
+ case SCMD_WRITE_G5:
+ lba = SCSA1394_LBA_12BYTE(pkt);
+ len = SCSA1394_LEN_12BYTE(pkt);
+ break;
+ default:
+ /* no special mapping for other commands */
+ scsa1394_cmd_fill_cdb_other(lp, cmd);
+ return;
+ }
+
+ /* limit xfer length for Symbios workaround */
+ if (sp->s_symbios && (len * blk_size > scsa1394_symbios_size_max)) {
+ cmd->sc_flags |= SCSA1394_CMD_SYMBIOS_BREAKUP;
+
+ cmd->sc_blk_size = blk_size;
+ cmd->sc_total_blks = cmd->sc_resid_blks = len;
+
+ len = scsa1394_symbios_size_max / blk_size;
+ }
+ cmd->sc_xfer_blks = len;
+ cmd->sc_xfer_bytes = len * blk_size;
+
+ /* finalize new CDB */
+ cmd->sc_cdb[0] = (uchar_t)opcode;
+ scsa1394_cmd_fill_cdb_lba(cmd, lba);
+ switch (opcode) {
+ case SCMD_READ_CD:
+ scsa1394_cmd_fill_read_cd_cdb_len(cmd, len);
+ break;
+ case SCMD_WRITE_G5:
+ case SCMD_READ_G5:
+ scsa1394_cmd_fill_12byte_cdb_len(cmd, len);
+ break;
+ default:
+ scsa1394_cmd_fill_cdb_len(cmd, len);
+ break;
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_cmd_fill_cdb_other(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+
+ bcopy(pkt->pkt_cdbp, cmd->sc_cdb, cmd->sc_cdb_len);
+
+ cmd->sc_xfer_bytes = cmd->sc_win_len;
+}
+
+/*
+ * fill up parts of CDB
+ */
+static void
+scsa1394_cmd_fill_cdb_len(scsa1394_cmd_t *cmd, int len)
+{
+ cmd->sc_cdb[7] = len >> 8;
+ cmd->sc_cdb[8] = (uchar_t)len;
+}
+
+static void
+scsa1394_cmd_fill_cdb_lba(scsa1394_cmd_t *cmd, int lba)
+{
+ cmd->sc_cdb[2] = lba >> 24;
+ cmd->sc_cdb[3] = lba >> 16;
+ cmd->sc_cdb[4] = lba >> 8;
+ cmd->sc_cdb[5] = (uchar_t)lba;
+ cmd->sc_lba = lba;
+}
+
+static void
+scsa1394_cmd_fill_12byte_cdb_len(scsa1394_cmd_t *cmd, int len)
+{
+ cmd->sc_cdb[6] = len >> 24;
+ cmd->sc_cdb[7] = len >> 16;
+ cmd->sc_cdb[8] = len >> 8;
+ cmd->sc_cdb[9] = (uchar_t)len;
+}
+
+static void
+scsa1394_cmd_fill_read_cd_cdb_len(scsa1394_cmd_t *cmd, int len)
+{
+ cmd->sc_cdb[6] = len >> 16;
+ cmd->sc_cdb[7] = len >> 8;
+ cmd->sc_cdb[8] = (uchar_t)len;
+}
+
+/*
+ * For SCMD_READ_CD, figure out the block size based on expected sector type.
+ * See MMC SCSI Specs section 6.1.15
+ */
+static int
+scsa1394_cmd_read_cd_blk_size(uchar_t expected_sector_type)
+{
+ int blk_size;
+
+ switch (expected_sector_type) {
+ case READ_CD_EST_CDDA:
+ blk_size = CDROM_BLK_2352;
+ break;
+ case READ_CD_EST_MODE2:
+ blk_size = CDROM_BLK_2336;
+ break;
+ case READ_CD_EST_MODE2FORM2:
+ blk_size = CDROM_BLK_2324;
+ break;
+ case READ_CD_EST_MODE2FORM1:
+ case READ_CD_EST_ALLTYPE:
+ case READ_CD_EST_MODE1:
+ default:
+ blk_size = CDROM_BLK_2048;
+ }
+
+ return (blk_size);
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_cmd_fake_mode_sense(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ struct scsi_arq_status *arqp = (struct scsi_arq_status *)pkt->pkt_scbp;
+ struct scsi_extended_sense *esp = &arqp->sts_sensedata;
+
+ *(pkt->pkt_scbp) = STATUS_CHECK;
+ *(uint8_t *)&arqp->sts_rqpkt_status = STATUS_GOOD;
+ arqp->sts_rqpkt_reason = CMD_CMPLT;
+ arqp->sts_rqpkt_resid = 0;
+ arqp->sts_rqpkt_state |= STATE_XFERRED_DATA;
+ arqp->sts_rqpkt_statistics = 0;
+
+ bzero(esp, sizeof (struct scsi_extended_sense));
+
+ esp->es_class = CLASS_EXTENDED_SENSE;
+
+ esp->es_key = KEY_ILLEGAL_REQUEST;
+
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
+ STATE_XFERRED_DATA | STATE_GOT_STATUS);
+
+ if (pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_cmd_fake_inquiry(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ struct scsi_inquiry *inq;
+
+ /* copy fabricated inquiry data */
+ inq = (struct scsi_inquiry *)cmd->sc_bp->b_un.b_addr;
+ bcopy(&lp->l_fake_inq, inq, sizeof (struct scsi_inquiry));
+
+ pkt->pkt_resid -= sizeof (struct scsi_inquiry);
+ pkt->pkt_reason = CMD_CMPLT;
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
+ STATE_XFERRED_DATA | STATE_GOT_STATUS);
+
+ if (pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * If command allows fake completion (without actually being transported),
+ * call completion callback and return DDI_SUCCESS.
+ * Otherwise return DDI_FAILURE.
+ */
+static int
+scsa1394_cmd_fake_comp(scsa1394_state_t *sp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ scsa1394_lun_t *lp = cmd->sc_lun;
+ int ret = DDI_SUCCESS;
+
+ mutex_enter(&lp->l_mutex);
+
+ switch (pkt->pkt_cdbp[0]) {
+ /*
+ * RBC support for PRIN/PROUT is optional
+ */
+ case SCMD_PRIN:
+ case SCMD_PROUT:
+ if (!scsa1394_wrka_fake_prin) {
+ ret = DDI_FAILURE;
+ }
+ break;
+ /*
+ * Some fixed disks don't like doorlock cmd. And they don't need it.
+ */
+ case SCMD_DOORLOCK:
+ if (lp->l_rmb_orig != 0) {
+ ret = DDI_FAILURE;
+ }
+ break;
+ case SCMD_TEST_UNIT_READY:
+ if (!lp->l_nosup_tur) {
+ ret = DDI_FAILURE;
+ }
+ break;
+ case SCMD_START_STOP:
+ if (!lp->l_nosup_start_stop) {
+ ret = DDI_FAILURE;
+ }
+ break;
+ case SCMD_INQUIRY:
+ if (!lp->l_nosup_inquiry) {
+ ret = DDI_FAILURE;
+ } else {
+ mutex_exit(&lp->l_mutex);
+ return (scsa1394_cmd_fake_inquiry(sp, cmd));
+ }
+ break;
+ case SCMD_MODE_SENSE:
+ if (!lp->l_mode_sense_fake) {
+ ret = DDI_FAILURE;
+ } else {
+ mutex_exit(&lp->l_mutex);
+ return (scsa1394_cmd_fake_mode_sense(sp, cmd));
+ }
+ default:
+ ret = DDI_FAILURE;
+ }
+
+ mutex_exit(&lp->l_mutex);
+
+ if (ret != DDI_SUCCESS) {
+ return (ret);
+ }
+
+ ASSERT(*(pkt->pkt_scbp) == STATUS_GOOD);
+ ASSERT(pkt->pkt_reason == CMD_CMPLT);
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
+ STATE_XFERRED_DATA | STATE_GOT_STATUS);
+
+ if (pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Returns DDI_SUCCESS if next xfer setup successfully, DDI_FAILURE otherwise.
+ */
+static int
+scsa1394_cmd_setup_next_xfer(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+
+ ASSERT(cmd->sc_flags & SCSA1394_CMD_SYMBIOS_BREAKUP);
+
+ cmd->sc_resid_blks -= cmd->sc_xfer_blks;
+ if (cmd->sc_resid_blks <= 0) {
+ pkt->pkt_resid = 0;
+ return (DDI_FAILURE);
+ }
+
+ scsa1394_cmd_adjust_cdb(lp, cmd);
+
+ scsa1394_sbp2_seg2pt(lp, cmd);
+
+ scsa1394_sbp2_cmd2orb(lp, cmd);
+
+ if (scsa1394_sbp2_start(lp, cmd) != TRAN_ACCEPT) {
+ pkt->pkt_resid = cmd->sc_resid_blks * cmd->sc_blk_size;
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * new lba = current lba + previous xfer len
+ */
+/*ARGSUSED*/
+static void
+scsa1394_cmd_adjust_cdb(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ int len;
+
+ ASSERT(cmd->sc_flags & SCSA1394_CMD_SYMBIOS_BREAKUP);
+
+ cmd->sc_lba += cmd->sc_xfer_blks;
+ len = cmd->sc_resid_blks;
+
+ /* limit xfer length for Symbios workaround */
+ if (len * cmd->sc_blk_size > scsa1394_symbios_size_max) {
+ len = scsa1394_symbios_size_max / cmd->sc_blk_size;
+ }
+
+ switch (cmd->sc_cdb[0]) {
+ case SCMD_READ_CD:
+ scsa1394_cmd_fill_read_cd_cdb_len(cmd, len);
+ break;
+ case SCMD_WRITE_G5:
+ case SCMD_READ_G5:
+ scsa1394_cmd_fill_12byte_cdb_len(cmd, len);
+ break;
+ case SCMD_WRITE_G1:
+ case SCMD_WRITE_LONG:
+ default:
+ scsa1394_cmd_fill_cdb_len(cmd, len);
+ }
+
+ scsa1394_cmd_fill_cdb_lba(cmd, cmd->sc_lba);
+
+ cmd->sc_xfer_blks = len;
+ cmd->sc_xfer_bytes = len * cmd->sc_blk_size;
+}
+
+void
+scsa1394_cmd_status_proc(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+
+ /* next iteration of partial xfer? */
+ if ((pkt->pkt_reason == CMD_CMPLT) &&
+ (cmd->sc_flags & SCSA1394_CMD_SYMBIOS_BREAKUP)) {
+ if (scsa1394_cmd_setup_next_xfer(lp, cmd) == DDI_SUCCESS) {
+ return;
+ }
+ }
+ cmd->sc_flags &= ~SCSA1394_CMD_SYMBIOS_BREAKUP;
+
+ /* apply workarounds */
+ if (pkt->pkt_reason == CMD_CMPLT) {
+ scsa1394_cmd_status_wrka(lp, cmd);
+ }
+
+ mutex_enter(&lp->l_mutex);
+
+ /* mode sense workaround */
+ if (pkt->pkt_cdbp[0] == SCMD_MODE_SENSE) {
+ if (pkt->pkt_reason == CMD_CMPLT) {
+ lp->l_mode_sense_fail_cnt = 0;
+ } else if (++lp->l_mode_sense_fail_cnt >=
+ scsa1394_mode_sense_fail_max) {
+ lp->l_mode_sense_fake = B_TRUE;
+ }
+ } else {
+ lp->l_mode_sense_fail_cnt = 0;
+ }
+
+ mutex_exit(&lp->l_mutex);
+
+ if (pkt->pkt_comp) {
+ (*pkt->pkt_comp)(pkt);
+ }
+}
+
+static void
+scsa1394_cmd_status_wrka(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+
+ mutex_enter(&lp->l_mutex);
+
+ switch (pkt->pkt_cdbp[0]) {
+ case SCMD_INQUIRY: {
+ struct scsi_inquiry *inq;
+
+ inq = (struct scsi_inquiry *)cmd->sc_bp->b_un.b_addr;
+
+ /* change dtype RBC to DIRECT, sd doesn't support RBC */
+ lp->l_dtype_orig = inq->inq_dtype;
+ if ((inq->inq_dtype == SCSA1394_DTYPE_RBC) &&
+ scsa1394_wrka_rbc2direct) {
+ inq->inq_dtype = DTYPE_DIRECT;
+ }
+
+ /* force RMB to 1 */
+ lp->l_rmb_orig = inq->inq_rmb;
+ if (scsa1394_wrka_force_rmb) {
+ inq->inq_rmb = 1;
+ }
+ break;
+ }
+ case SCMD_READ_CAPACITY: {
+ uint32_t *capacity_buf;
+
+ capacity_buf = (uint32_t *)cmd->sc_bp->b_un.b_addr;
+
+ if (lp->l_dtype_orig != DTYPE_RODIRECT) {
+ lp->l_lba_size = min(BE_32(capacity_buf[1]), DEV_BSIZE);
+ if (lp->l_lba_size == 0) {
+ cmn_err(CE_WARN, "zero LBA size reported, "
+ "possibly broken device");
+ lp->l_lba_size = DEV_BSIZE;
+ }
+ } else {
+ lp->l_lba_size = 2048;
+ }
+ }
+ default:
+ break;
+ }
+
+ mutex_exit(&lp->l_mutex);
+}
+
+/*
+ * --- thread management
+ *
+ * dispatch a thread
+ */
+int
+scsa1394_thr_dispatch(scsa1394_thread_t *thr)
+{
+ scsa1394_lun_t *lp = thr->thr_lun;
+ scsa1394_state_t *sp = lp->l_sp;
+ int ret;
+
+ ASSERT(mutex_owned(&lp->l_mutex));
+ ASSERT(thr->thr_state == SCSA1394_THR_INIT);
+
+ thr->thr_state = SCSA1394_THR_RUN;
+
+ ret = ddi_taskq_dispatch(sp->s_taskq, thr->thr_func, thr->thr_arg,
+ KM_SLEEP);
+ return (ret);
+}
+
+/*
+ * cancel thread
+ */
+void
+scsa1394_thr_cancel(scsa1394_thread_t *thr)
+{
+ scsa1394_lun_t *lp = thr->thr_lun;
+
+ ASSERT(mutex_owned(&lp->l_mutex));
+
+ thr->thr_req |= SCSA1394_THREQ_EXIT;
+ cv_signal(&thr->thr_cv);
+
+ /* wait until the thread actually exits */
+ do {
+ if (cv_wait_sig(&thr->thr_cv, &lp->l_mutex) == 0) {
+ break;
+ }
+ } while (thr->thr_state != SCSA1394_THR_EXIT);
+}
+
+/*
+ * wake thread
+ */
+void
+scsa1394_thr_wake(scsa1394_thread_t *thr, int req)
+{
+ scsa1394_lun_t *lp = thr->thr_lun;
+
+ ASSERT(mutex_owned(&lp->l_mutex));
+
+ thr->thr_req |= req;
+ cv_signal(&thr->thr_cv);
+}
+
+void
+scsa1394_thr_clear_req(scsa1394_thread_t *thr, int mask)
+{
+ scsa1394_lun_t *lp = thr->thr_lun;
+
+ mutex_enter(&lp->l_mutex);
+ thr->thr_req &= ~mask;
+ mutex_exit(&lp->l_mutex);
+}
+
+/*
+ *
+ * --- other routines
+ *
+ */
+static boolean_t
+scsa1394_is_my_child(dev_info_t *dip)
+{
+ return ((dip != NULL) && (ddi_prop_exists(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "scsa1394") == 1));
+}
+
+boolean_t
+scsa1394_dev_is_online(scsa1394_state_t *sp)
+{
+ boolean_t ret;
+
+ mutex_enter(&sp->s_mutex);
+ ret = (sp->s_dev_state == SCSA1394_DEV_ONLINE);
+ mutex_exit(&sp->s_mutex);
+
+ return (ret);
+}
+
+static void *
+scsa1394_kmem_realloc(void *old_buf, int old_size, int new_size, size_t elsize,
+ int kf)
+{
+ void *new_buf;
+
+ new_buf = kmem_zalloc(new_size * elsize, kf);
+
+ if (old_size > 0) {
+ if (new_buf != NULL) {
+ bcopy(old_buf, new_buf, old_size * elsize);
+ }
+ kmem_free(old_buf, old_size * elsize);
+ }
+
+ return (new_buf);
+}
diff --git a/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_bus.c b/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_bus.c
new file mode 100644
index 0000000000..6c3d98f04e
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_bus.c
@@ -0,0 +1,536 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * 1394 mass storage SBP-2 bus routines
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/cred.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/sbp2/bus.h>
+#include <sys/1394/targets/scsa1394/impl.h>
+
+static ddi_iblock_cookie_t scsa1394_bus_get_iblock_cookie(void *);
+static uint_t scsa1394_bus_get_node_id(void *);
+static int scsa1394_bus_alloc_cmd(void *, void **, int);
+static void scsa1394_bus_free_cmd(void *, void *);
+static int scsa1394_bus_rq(void *, void *, uint64_t, uint32_t *, int *);
+static int scsa1394_bus_rb(void *, void *, uint64_t, mblk_t **, int,
+ int *);
+static int scsa1394_bus_wq(void *, void *, uint64_t, uint32_t, int *);
+static int scsa1394_bus_wb(void *, void *, uint64_t, mblk_t *, int,
+ int *);
+static int scsa1394_bus_alloc_buf(void *, sbp2_bus_buf_t *);
+static int scsa1394_bus_alloc_buf_phys(void *, sbp2_bus_buf_t *);
+static void scsa1394_bus_free_buf_phys(void *, sbp2_bus_buf_t *);
+static int scsa1394_bus_alloc_buf_normal(void *, sbp2_bus_buf_t *,
+ boolean_t);
+static void scsa1394_bus_free_buf_normal(void *, sbp2_bus_buf_t *);
+static void scsa1394_bus_free_buf(void *, sbp2_bus_buf_t *);
+static int scsa1394_bus_sync_buf(void *, sbp2_bus_buf_t *, off_t, size_t,
+ int);
+static void scsa1394_bus_buf_rw_done(void *, sbp2_bus_buf_t *, void *, int);
+
+/* callbacks */
+static void scsa1394_bus_recv_read_request(cmd1394_cmd_t *);
+static void scsa1394_bus_recv_write_request(cmd1394_cmd_t *);
+
+sbp2_bus_t scsa1394_sbp2_bus = {
+ SBP2_BUS_REV, /* rev */
+ 0xFFFFF0000000LL, /* csr_base */
+ IEEE1394_CONFIG_ROM_ADDR, /* cfgrom_addr */
+ scsa1394_bus_get_iblock_cookie, /* get_iblock_cookie */
+ scsa1394_bus_get_node_id, /* get_node_id */
+ scsa1394_bus_alloc_buf, /* alloc_buf */
+ scsa1394_bus_free_buf, /* free_buf */
+ scsa1394_bus_sync_buf, /* sync_buf */
+ scsa1394_bus_buf_rw_done, /* buf_rd_done */
+ scsa1394_bus_buf_rw_done, /* buf_wr_done */
+ scsa1394_bus_alloc_cmd, /* alloc_cmd */
+ scsa1394_bus_free_cmd, /* free_cmd */
+ scsa1394_bus_rq, /* rq */
+ scsa1394_bus_rb, /* rb */
+ scsa1394_bus_wq, /* wq */
+ scsa1394_bus_wb /* wb */
+};
+
+/*
+ * fault injector
+ *
+ * global on/off switch
+ */
+int scsa1394_bus_fi_on = 0;
+
+/* fault probabilities per operation, in tenths of percent, i.e. 10 is 1% */
+int scsa1394_bus_fi_prob_alloc_buf = 10;
+int scsa1394_bus_fi_prob_alloc_cmd = 10;
+int scsa1394_bus_fi_prob_rq = 10;
+int scsa1394_bus_fi_prob_rb = 10;
+int scsa1394_bus_fi_prob_wq = 10;
+int scsa1394_bus_fi_prob_wb = 10;
+
+#define SCSA1394_BUS_FI_POSITIVE(p) (scsa1394_bus_fi_on && \
+ ((p) > 0) && ((gethrtime() % (p)) == 0))
+
+/*
+ * translate command result to SBP2 error code
+ */
+static int
+scsa1394_bus_rw_result2code(int result)
+{
+ int code;
+
+ switch (result) {
+ case CMD1394_EDEVICE_BUSY:
+ code = SBP2_EBUSY;
+ break;
+ case CMD1394_EADDRESS_ERROR:
+ code = SBP2_EADDR;
+ break;
+ case CMD1394_ETIMEOUT:
+ case CMD1394_ERETRIES_EXCEEDED:
+ code = SBP2_ETIMEOUT;
+ break;
+ case CMD1394_EDEVICE_REMOVED:
+ code = SBP2_ENODEV;
+ break;
+ default:
+ code = SBP2_EIO;
+ break;
+ }
+ return (code);
+}
+
+static ddi_iblock_cookie_t
+scsa1394_bus_get_iblock_cookie(void *hdl)
+{
+ scsa1394_state_t *sp = hdl;
+
+ return (sp->s_attachinfo.iblock_cookie);
+}
+
+static uint_t
+scsa1394_bus_get_node_id(void *hdl)
+{
+ scsa1394_state_t *sp = hdl;
+
+ return (sp->s_attachinfo.localinfo.local_nodeID);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_alloc_cmd(void *hdl, void **cmdp, int flags)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd;
+
+ if (SCSA1394_BUS_FI_POSITIVE(scsa1394_bus_fi_prob_alloc_cmd)) {
+ return (SBP2_ENOMEM);
+ }
+
+ if (t1394_alloc_cmd(sp->s_t1394_hdl, 0, &cmd) != DDI_SUCCESS) {
+ return (SBP2_ENOMEM);
+ }
+ *cmdp = cmd;
+ return (SBP2_SUCCESS);
+}
+
+
+static void
+scsa1394_bus_free_cmd(void *hdl, void *argcmd)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd = argcmd;
+
+ (void) t1394_free_cmd(sp->s_t1394_hdl, 0, &cmd);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_rq(void *hdl, void *argcmd, uint64_t addr, uint32_t *q, int *berr)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd = argcmd;
+
+ if (SCSA1394_BUS_FI_POSITIVE(scsa1394_bus_fi_prob_rq)) {
+ return (SBP2_EIO);
+ }
+
+ cmd->cmd_addr = addr;
+ cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if ((t1394_read(sp->s_t1394_hdl, cmd) != DDI_SUCCESS) ||
+ (cmd->cmd_result != CMD1394_CMDSUCCESS)) {
+ *berr = cmd->cmd_result;
+ return (scsa1394_bus_rw_result2code(cmd->cmd_result));
+ }
+
+ *q = cmd->cmd_u.q.quadlet_data;
+ return (SBP2_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_rb(void *hdl, void *argcmd, uint64_t addr, mblk_t **bpp, int len,
+ int *berr)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd = argcmd;
+ mblk_t *bp = *bpp;
+
+ /* caller wants us to allocate memory */
+ if ((bp == NULL) && ((bp = allocb(len, BPRI_HI)) == NULL)) {
+ return (SBP2_ENOMEM);
+ }
+
+ cmd->cmd_addr = addr;
+ cmd->cmd_type = CMD1394_ASYNCH_RD_BLOCK;
+ cmd->cmd_u.b.data_block = bp;
+ cmd->cmd_u.b.blk_length = len;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if ((t1394_read(sp->s_t1394_hdl, cmd) != DDI_SUCCESS) ||
+ (cmd->cmd_result != CMD1394_CMDSUCCESS)) {
+ freeb(bp);
+ *berr = cmd->cmd_result;
+ return (scsa1394_bus_rw_result2code(cmd->cmd_result));
+ }
+
+ *bpp = bp;
+ return (SBP2_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_wq(void *hdl, void *argcmd, uint64_t addr, uint32_t q, int *berr)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd = argcmd;
+
+ cmd->cmd_addr = addr;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
+ cmd->cmd_u.q.quadlet_data = q;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if ((t1394_write(sp->s_t1394_hdl, cmd) != DDI_SUCCESS) ||
+ (cmd->cmd_result != CMD1394_CMDSUCCESS)) {
+ *berr = cmd->cmd_result;
+ return (scsa1394_bus_rw_result2code(cmd->cmd_result));
+ }
+
+ return (SBP2_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_wb(void *hdl, void *argcmd, uint64_t addr, mblk_t *bp, int len,
+ int *berr)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *cmd = argcmd;
+
+ cmd->cmd_addr = addr;
+ cmd->cmd_type = CMD1394_ASYNCH_WR_BLOCK;
+ cmd->cmd_u.b.data_block = bp;
+ cmd->cmd_u.b.blk_length = len;
+ cmd->cmd_options = CMD1394_BLOCKING;
+
+ if ((t1394_write(sp->s_t1394_hdl, cmd) != DDI_SUCCESS) ||
+ (cmd->cmd_result != CMD1394_CMDSUCCESS)) {
+ *berr = cmd->cmd_result;
+ return (scsa1394_bus_rw_result2code(cmd->cmd_result));
+ }
+
+ return (SBP2_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_alloc_buf(void *hdl, sbp2_bus_buf_t *buf)
+{
+ if (SCSA1394_BUS_FI_POSITIVE(scsa1394_bus_fi_prob_alloc_buf)) {
+ return (SBP2_ENOMEM);
+ }
+
+ if (buf->bb_flags & SBP2_BUS_BUF_DMA) {
+ return (scsa1394_bus_alloc_buf_phys(hdl, buf));
+ } else {
+ return (scsa1394_bus_alloc_buf_normal(hdl, buf,
+ ((buf->bb_flags & SBP2_BUS_BUF_POSTED) != 0)));
+ }
+}
+
+
+static void
+scsa1394_bus_free_buf(void *hdl, sbp2_bus_buf_t *buf)
+{
+ if (buf->bb_flags & SBP2_BUS_BUF_DMA) {
+ scsa1394_bus_free_buf_phys(hdl, buf);
+ } else {
+ scsa1394_bus_free_buf_normal(hdl, buf);
+ }
+}
+
+
+static int
+scsa1394_bus_alloc_buf_phys(void *hdl, sbp2_bus_buf_t *buf)
+{
+ scsa1394_state_t *sp = hdl;
+ scsa1394_bus_buf_t *sbb; /* bus private structure */
+ size_t real_length; /* real allocated length */
+ ddi_dma_cookie_t cookie; /* cookies */
+ uint_t ccount; /* cookie count */
+ t1394_alloc_addr_t aa;
+ int result;
+
+ /* allocate bus private structure */
+ sbb = kmem_zalloc(sizeof (scsa1394_bus_buf_t), KM_SLEEP);
+ sbb->sbb_state = sp;
+
+ /* allocate DMA resources */
+ if (ddi_dma_alloc_handle(sp->s_dip, &sp->s_attachinfo.dma_attr,
+ DDI_DMA_SLEEP, NULL, &sbb->sbb_dma_hdl) != DDI_SUCCESS) {
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ return (SBP2_ENOMEM);
+ }
+
+ if (ddi_dma_mem_alloc(sbb->sbb_dma_hdl, buf->bb_len,
+ &sp->s_attachinfo.acc_attr,
+ buf->bb_flags & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT),
+ DDI_DMA_SLEEP, NULL, &buf->bb_kaddr, &real_length,
+ &sbb->sbb_acc_hdl) != DDI_SUCCESS) {
+ ddi_dma_free_handle(&sbb->sbb_dma_hdl);
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ return (SBP2_ENOMEM);
+ }
+
+ buf->bb_flags &= ~DDI_DMA_PARTIAL;
+ if (ddi_dma_addr_bind_handle(sbb->sbb_dma_hdl, NULL, buf->bb_kaddr,
+ buf->bb_len, buf->bb_flags, DDI_DMA_SLEEP, NULL,
+ &cookie, &ccount) != DDI_DMA_MAPPED) {
+ ddi_dma_mem_free(&sbb->sbb_acc_hdl);
+ ddi_dma_free_handle(&sbb->sbb_dma_hdl);
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ return (SBP2_ENOMEM);
+ }
+ ASSERT(ccount == 1);
+ buf->bb_paddr = cookie.dmac_address; /* 32-bit address */
+
+ /* allocate 1394 resources */
+ bzero(&aa, sizeof (aa));
+ aa.aa_type = T1394_ADDR_FIXED;
+ aa.aa_length = buf->bb_len;
+ if (buf->bb_flags & SBP2_BUS_BUF_RD) {
+ aa.aa_enable |= T1394_ADDR_RDENBL;
+ }
+ if (buf->bb_flags & SBP2_BUS_BUF_WR) {
+ aa.aa_enable |= T1394_ADDR_WRENBL;
+ }
+ aa.aa_address = buf->bb_paddr; /* PCI-1394 mapping is 1-1 */
+
+ if (t1394_alloc_addr(sp->s_t1394_hdl, &aa, 0, &result) != DDI_SUCCESS) {
+ (void) ddi_dma_unbind_handle(sbb->sbb_dma_hdl);
+ ddi_dma_mem_free(&sbb->sbb_acc_hdl);
+ ddi_dma_free_handle(&sbb->sbb_dma_hdl);
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ return (SBP2_ENOMEM);
+ }
+ sbb->sbb_addr_hdl = aa.aa_hdl;
+ buf->bb_baddr = aa.aa_address;
+
+ buf->bb_hdl = sbb;
+ return (SBP2_SUCCESS);
+}
+
+
+static void
+scsa1394_bus_free_buf_phys(void *hdl, sbp2_bus_buf_t *buf)
+{
+ scsa1394_state_t *sp = hdl;
+ scsa1394_bus_buf_t *sbb = buf->bb_hdl;
+
+ (void) t1394_free_addr(sp->s_t1394_hdl, &sbb->sbb_addr_hdl, 0);
+ (void) ddi_dma_unbind_handle(sbb->sbb_dma_hdl);
+ ddi_dma_mem_free(&sbb->sbb_acc_hdl);
+ ddi_dma_free_handle(&sbb->sbb_dma_hdl);
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ buf->bb_hdl = NULL;
+}
+
+
+static int
+scsa1394_bus_alloc_buf_normal(void *hdl, sbp2_bus_buf_t *buf, boolean_t posted)
+{
+ scsa1394_state_t *sp = hdl;
+ scsa1394_bus_buf_t *sbb; /* bus private structure */
+ t1394_alloc_addr_t aa;
+ int result;
+
+ /* allocate bus private structure */
+ sbb = kmem_zalloc(sizeof (scsa1394_bus_buf_t), KM_SLEEP);
+ sbb->sbb_state = sp;
+
+ /* allocate 1394 resources */
+ bzero(&aa, sizeof (aa));
+ aa.aa_type = posted ? T1394_ADDR_POSTED_WRITE : T1394_ADDR_NORMAL;
+ aa.aa_length = buf->bb_len;
+ if (buf->bb_flags & SBP2_BUS_BUF_RD) {
+ aa.aa_enable |= T1394_ADDR_RDENBL;
+ aa.aa_evts.recv_read_request = scsa1394_bus_recv_read_request;
+ }
+ if (buf->bb_flags & SBP2_BUS_BUF_WR) {
+ aa.aa_enable |= T1394_ADDR_WRENBL;
+ aa.aa_evts.recv_write_request = scsa1394_bus_recv_write_request;
+ }
+ aa.aa_arg = buf;
+
+ if (t1394_alloc_addr(sp->s_t1394_hdl, &aa, 0, &result) != DDI_SUCCESS) {
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ return (SBP2_ENOMEM);
+ }
+ sbb->sbb_addr_hdl = aa.aa_hdl;
+ buf->bb_baddr = aa.aa_address;
+
+ buf->bb_hdl = sbb;
+ return (SBP2_SUCCESS);
+}
+
+static void
+scsa1394_bus_free_buf_normal(void *hdl, sbp2_bus_buf_t *buf)
+{
+ scsa1394_state_t *sp = hdl;
+ scsa1394_bus_buf_t *sbb = buf->bb_hdl;
+
+ (void) t1394_free_addr(sp->s_t1394_hdl, &sbb->sbb_addr_hdl, 0);
+ kmem_free(sbb, sizeof (scsa1394_bus_buf_t));
+ buf->bb_hdl = NULL;
+}
+
+/*ARGSUSED*/
+static int
+scsa1394_bus_sync_buf(void *hdl, sbp2_bus_buf_t *buf, off_t offset,
+ size_t length, int type)
+{
+ scsa1394_bus_buf_t *sbb = buf->bb_hdl;
+
+ if (buf->bb_flags & SBP2_BUS_BUF_DMA) {
+ return (ddi_dma_sync(sbb->sbb_dma_hdl, offset, length, type));
+ } else {
+ return (SBP2_SUCCESS);
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_bus_buf_rw_done(void *hdl, sbp2_bus_buf_t *buf, void *reqh, int error)
+{
+ scsa1394_state_t *sp = hdl;
+ cmd1394_cmd_t *req = reqh;
+
+ /* complete request */
+ switch (error) {
+ case SBP2_BUS_BUF_SUCCESS:
+ req->cmd_result = IEEE1394_RESP_COMPLETE;
+ break;
+ case SBP2_BUS_BUF_ELENGTH:
+ req->cmd_result = IEEE1394_RESP_DATA_ERROR;
+ break;
+ case SBP2_BUS_BUF_EBUSY:
+ req->cmd_result = IEEE1394_RESP_CONFLICT_ERROR;
+ break;
+ default:
+ req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
+ }
+ (void) t1394_recv_request_done(sp->s_t1394_hdl, req, 0);
+}
+
+
+/*
+ *
+ * --- callbacks
+ *
+ */
+static void
+scsa1394_bus_recv_read_request(cmd1394_cmd_t *req)
+{
+ sbp2_bus_buf_t *buf = req->cmd_callback_arg;
+ scsa1394_bus_buf_t *sbb = buf->bb_hdl;
+ scsa1394_state_t *sp = sbb->sbb_state;
+
+ /* XXX sanity checks: addr, etc */
+ if (req->cmd_type == CMD1394_ASYNCH_RD_QUAD) {
+ if (buf->bb_rq_cb) {
+ buf->bb_rq_cb(buf, req, &req->cmd_u.q.quadlet_data);
+ return;
+ }
+ } else {
+ if (buf->bb_rb_cb) {
+ buf->bb_rb_cb(buf, req, &req->cmd_u.b.data_block,
+ req->cmd_u.b.blk_length);
+ return;
+ }
+ }
+ scsa1394_bus_buf_rw_done(sp, buf, req, SBP2_BUS_BUF_FAILURE);
+}
+
+
+static void
+scsa1394_bus_recv_write_request(cmd1394_cmd_t *req)
+{
+ sbp2_bus_buf_t *buf = req->cmd_callback_arg;
+ scsa1394_bus_buf_t *sbb = buf->bb_hdl;
+ scsa1394_state_t *sp = sbb->sbb_state;
+
+ /* XXX sanity checks: addr, etc */
+ if (req->cmd_type == CMD1394_ASYNCH_WR_QUAD) {
+ if (buf->bb_wq_cb) {
+ buf->bb_wq_cb(buf, req, req->cmd_u.q.quadlet_data);
+ return;
+ }
+ } else {
+ if (buf->bb_wb_cb) {
+ buf->bb_wb_cb(buf, req, &req->cmd_u.b.data_block);
+ return;
+ }
+ }
+ scsa1394_bus_buf_rw_done(sp, buf, req, SBP2_BUS_BUF_FAILURE);
+}
diff --git a/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_driver.c b/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_driver.c
new file mode 100644
index 0000000000..72f572f12a
--- /dev/null
+++ b/usr/src/uts/common/io/1394/targets/scsa1394/sbp2_driver.c
@@ -0,0 +1,996 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * 1394 mass storage SBP-2 driver routines
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/cred.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/1394/targets/scsa1394/impl.h>
+#include <sys/1394/targets/scsa1394/cmd.h>
+#include <sys/sbp2/bus.h>
+#include <sys/sbp2/driver.h>
+
+static void scsa1394_sbp2_detect_symbios(scsa1394_state_t *);
+static void scsa1394_sbp2_worker_thread(void *);
+static void scsa1394_sbp2_status_cb(void *, sbp2_task_t *);
+static void scsa1394_sbp2_req_bus_reset(scsa1394_lun_t *);
+static void scsa1394_sbp2_req_reconnect(scsa1394_lun_t *);
+static void scsa1394_sbp2_seg2pt_default(scsa1394_lun_t *,
+ scsa1394_cmd_t *);
+static void scsa1394_sbp2_seg2pt_symbios(scsa1394_lun_t *,
+ scsa1394_cmd_t *);
+static void scsa1394_sbp2_req_status(scsa1394_lun_t *);
+static void scsa1394_sbp2_status_proc(scsa1394_lun_t *, scsa1394_cmd_t *,
+ scsa1394_status_t *);
+static int scsa1394_sbp2_conv_status(scsa1394_cmd_t *,
+ scsa1394_status_t *);
+static void scsa1394_sbp2_reset_proc(scsa1394_lun_t *, int,
+ scsa1394_cmd_t *);
+static boolean_t scsa1394_sbp2_logged_in(scsa1394_lun_t *);
+
+extern sbp2_bus_t scsa1394_sbp2_bus;
+
+/* tunables */
+uint_t scsa1394_sbp2_max_payload_sub = 2;
+extern int scsa1394_symbios_size_max;
+extern int scsa1394_symbios_page_size;
+extern int scsa1394_wrka_symbios;
+
+/* symbios workaround will be applied unless device is on this list */
+scsa1394_bw_list_t scsa1394_sbp2_symbios_whitelist[] = {
+ { SCSA1394_BW_ONE, 0x0a27 }, /* Apple */
+ { SCSA1394_BW_ONE, 0xd04b } /* LaCie */
+};
+
+/*
+ *
+ * --- SBP-2 routines
+ *
+ */
+int
+scsa1394_sbp2_attach(scsa1394_state_t *sp)
+{
+ sbp2_tgt_t *tp;
+ scsa1394_lun_t *lp;
+ int i;
+
+ /*
+ * target
+ */
+ if (sbp2_tgt_init(sp, &scsa1394_sbp2_bus, NLUNS_PER_TARGET,
+ &sp->s_tgt) != SBP2_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ tp = sp->s_tgt;
+
+ /*
+ * luns
+ */
+ sp->s_nluns = tp->t_nluns;
+ sp->s_lun = kmem_zalloc(sp->s_nluns * sizeof (scsa1394_lun_t),
+ KM_SLEEP);
+
+ for (i = 0; i < sp->s_nluns; i++) {
+ lp = &sp->s_lun[i];
+
+ mutex_init(&lp->l_mutex, NULL, MUTEX_DRIVER,
+ sp->s_attachinfo.iblock_cookie);
+
+ lp->l_rmb_orig = -1;
+ lp->l_lun = &tp->t_lun[i];
+ lp->l_sp = sp;
+ lp->l_lba_size = DEV_BSIZE;
+ }
+
+ scsa1394_sbp2_detect_symbios(sp);
+
+ return (DDI_SUCCESS);
+}
+
+void
+scsa1394_sbp2_detach(scsa1394_state_t *sp)
+{
+ int i;
+ scsa1394_lun_t *lp;
+
+ for (i = 0; i < sp->s_nluns; i++) {
+ lp = &sp->s_lun[i];
+ if (lp->l_sp != NULL) {
+ mutex_destroy(&lp->l_mutex);
+ }
+ }
+
+ kmem_free(sp->s_lun, sp->s_nluns * sizeof (scsa1394_lun_t));
+ sbp2_tgt_fini(sp->s_tgt);
+}
+
+static void
+scsa1394_sbp2_detect_symbios(scsa1394_state_t *sp)
+{
+ sbp2_cfgrom_ent_t *root = &sp->s_tgt->t_cfgrom.cr_root;
+ sbp2_cfgrom_ent_t *ent;
+ scsa1394_bw_list_t *wl;
+ int vid;
+ int i;
+
+
+ if (!scsa1394_wrka_symbios) {
+ sp->s_symbios = B_FALSE;
+ return;
+ } else {
+ sp->s_symbios = B_TRUE;
+ }
+
+ /* get device's vendor ID */
+ if ((ent = sbp2_cfgrom_ent_by_key(root, IEEE1212_IMMEDIATE_TYPE,
+ IEEE1212_MODULE_VENDOR_ID, 0)) == NULL) {
+ return;
+ }
+ vid = ent->ce_data.imm;
+
+ /* find a match in the whitelist */
+ for (i = 0; i < NELEM(scsa1394_sbp2_symbios_whitelist); i++) {
+ wl = &scsa1394_sbp2_symbios_whitelist[i];
+ if ((wl->vid_match == SCSA1394_BW_ONE) && (wl->vid == vid)) {
+ sp->s_symbios = B_FALSE;
+ break;
+ }
+ }
+}
+
+
+/*
+ * functional equivalent of ddi_rep_get32() with big endian access handle
+ */
+static void
+bcopy_swap32(uint32_t *from, uint32_t *to, int count)
+{
+ int i;
+ uint32_t data;
+
+ ASSERT((uintptr_t)to % 4 == 0);
+
+ for (i = 0; i < count; i++) {
+ data = *from++;
+ *to++ = SBP2_SWAP32(data);
+ }
+}
+
+/*
+ * Build an inquiry for a given device that doesn't like inquiry commands.
+ */
+void
+scsa1394_sbp2_fake_inquiry(scsa1394_state_t *sp, struct scsi_inquiry *inq)
+{
+ sbp2_cfgrom_ent_t *r = &sp->s_tgt->t_cfgrom.cr_root;
+ sbp2_cfgrom_ent_t *e, *eref, *evid;
+ int i, len;
+
+ bzero(inq, sizeof (struct scsi_inquiry));
+
+ inq->inq_dtype = DTYPE_DIRECT;
+ inq->inq_rmb = 1;
+ inq->inq_ansi = 2;
+ inq->inq_rdf = RDF_SCSI2;
+ inq->inq_len = sizeof (struct scsi_inquiry) - 4;
+
+ (void) memset(inq->inq_vid, ' ', sizeof (inq->inq_vid));
+ (void) memset(inq->inq_pid, ' ', sizeof (inq->inq_pid));
+ (void) memset(inq->inq_revision, ' ', sizeof (inq->inq_revision));
+
+ /*
+ * vid/pid/rev can be derived from Config ROM textual descriptors
+ */
+ for (i = 0; i < 256; i++) {
+ if ((e = sbp2_cfgrom_ent_by_key(r, IEEE1212_LEAF_TYPE,
+ IEEE1212_TEXTUAL_DESCRIPTOR, i)) == NULL) {
+ break;
+ }
+ eref = e->ce_ref;
+ if ((eref == NULL) || (eref->ce_len < 3) &&
+ (eref->ce_kt != IEEE1212_IMMEDIATE_TYPE)) {
+ continue;
+ }
+
+ len = e->ce_len - 2;
+ if (eref->ce_kv == IEEE1212_MODULE_VENDOR_ID) {
+ evid = e;
+ bcopy_swap32(&e->ce_data.leaf[2],
+ (uint32_t *)inq->inq_vid,
+ min(sizeof (inq->inq_vid) / 4, len));
+ } else if (eref->ce_kv == 0x17) {
+ bcopy_swap32(&e->ce_data.leaf[2],
+ (uint32_t *)inq->inq_pid,
+ min(sizeof (inq->inq_pid) / 4, len));
+ } else if ((eref->ce_kv == IEEE1212_MODULE_HW_VERSION) ||
+ (eref == evid)) {
+ bcopy_swap32(&e->ce_data.leaf[2],
+ (uint32_t *)inq->inq_revision,
+ min(sizeof (inq->inq_revision) / 4, len));
+ }
+ }
+}
+
+int
+scsa1394_sbp2_threads_init(scsa1394_state_t *sp)
+{
+ scsa1394_lun_t *lp;
+ scsa1394_thread_t *thr;
+ int i;
+
+ for (i = 0; i < sp->s_nluns; i++) {
+ lp = &sp->s_lun[i];
+ thr = &lp->l_worker_thread;
+
+ thr->thr_func = scsa1394_sbp2_worker_thread;
+ thr->thr_arg = thr;
+ thr->thr_state = SCSA1394_THR_INIT;
+ cv_init(&thr->thr_cv, NULL, CV_DRIVER, NULL);
+ thr->thr_lun = lp;
+ thr->thr_req = 0;
+
+ mutex_enter(&lp->l_mutex);
+ if (scsa1394_thr_dispatch(thr) != DDI_SUCCESS) {
+ mutex_exit(&lp->l_mutex);
+ scsa1394_sbp2_threads_fini(sp);
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&lp->l_mutex);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+void
+scsa1394_sbp2_threads_fini(scsa1394_state_t *sp)
+{
+ scsa1394_lun_t *lp;
+ scsa1394_thread_t *thr;
+ int i;
+
+ for (i = 0; i < sp->s_nluns; i++) {
+ lp = &sp->s_lun[i];
+ thr = &lp->l_worker_thread;
+
+ /* if thread wasn't initialized, thr_lun will be NULL */
+ if (thr->thr_lun == lp) {
+ mutex_enter(&lp->l_mutex);
+ scsa1394_thr_cancel(thr);
+ mutex_exit(&lp->l_mutex);
+ ASSERT(thr->thr_state != SCSA1394_THR_RUN);
+ cv_destroy(&thr->thr_cv);
+ }
+ }
+}
+
+int
+scsa1394_sbp2_get_lun_type(scsa1394_lun_t *lp)
+{
+ return (lp->l_lun->l_type);
+}
+
+int
+scsa1394_sbp2_login(scsa1394_state_t *sp, int lun)
+{
+ scsa1394_lun_t *lp = &sp->s_lun[lun];
+ int berr;
+
+ if (sbp2_lun_login(lp->l_lun, &lp->l_ses,
+ scsa1394_sbp2_status_cb, lp, &berr) != SBP2_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ ASSERT(lp->l_ses != NULL);
+ return (DDI_SUCCESS);
+}
+
+void
+scsa1394_sbp2_logout(scsa1394_state_t *sp, int lun, boolean_t phys)
+{
+ scsa1394_lun_t *lp = &sp->s_lun[lun];
+ int berr;
+
+ if (scsa1394_sbp2_logged_in(lp)) {
+ (void) sbp2_lun_logout(lp->l_lun, &lp->l_ses, &berr, phys);
+ }
+}
+
+void
+scsa1394_sbp2_req(scsa1394_state_t *sp, int lun, int req)
+{
+ scsa1394_lun_t *lp = &sp->s_lun[lun];
+
+ if (lp != NULL) {
+ mutex_enter(&lp->l_mutex);
+ scsa1394_thr_wake(&lp->l_worker_thread, req);
+ mutex_exit(&lp->l_mutex);
+ }
+}
+
+static void
+scsa1394_sbp2_req_bus_reset(scsa1394_lun_t *lp)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+ int berr = 0;
+
+ if (t1394_get_targetinfo(sp->s_t1394_hdl, SCSA1394_BUSGEN(sp), 0,
+ &sp->s_targetinfo) != DDI_SUCCESS) {
+ goto disconnect;
+ }
+
+ if (sp->s_targetinfo.target_nodeID == T1394_INVALID_NODEID) {
+ goto disconnect;
+ }
+
+ if (!scsa1394_sbp2_logged_in(lp)) {
+ /* reconnect procedure is only for logged in hosts */
+ return;
+ }
+
+ /*
+ * Try SBP-2 RECONNECT procedure first. Note that we're passing
+ * local Node ID, which might have changed during bus reset.
+ * sbp2_ses_reconnect() will use it to update the ORBs.
+ */
+ if (sbp2_ses_reconnect(lp->l_ses, &berr,
+ SCSA1394_NODEID(sp)) == SBP2_SUCCESS) {
+ mutex_enter(&sp->s_mutex);
+ sp->s_dev_state = SCSA1394_DEV_ONLINE;
+ mutex_exit(&sp->s_mutex);
+
+ /* resume task processing */
+ scsa1394_sbp2_nudge(lp);
+
+ return;
+ }
+
+ if (berr == CMD1394_EDEVICE_REMOVED) {
+ goto disconnect;
+ }
+
+ /* reconnect failed, try to logout and login again */
+ scsa1394_sbp2_flush_cmds(lp, CMD_TRAN_ERR, 0, STAT_BUS_RESET);
+ (void) sbp2_lun_logout(lp->l_lun, &lp->l_ses, &berr, B_FALSE);
+
+ if (scsa1394_sbp2_login(sp, 0) != SBP2_SUCCESS) {
+ goto disconnect;
+ }
+
+ mutex_enter(&sp->s_mutex);
+ sp->s_dev_state = SCSA1394_DEV_ONLINE;
+ mutex_exit(&sp->s_mutex);
+
+ return;
+
+disconnect:
+ mutex_enter(&sp->s_mutex);
+ sp->s_dev_state = SCSA1394_DEV_DISCONNECTED;
+ mutex_exit(&sp->s_mutex);
+ if (scsa1394_sbp2_logged_in(lp)) {
+ scsa1394_sbp2_flush_cmds(lp, CMD_DEV_GONE, 0, STAT_BUS_RESET);
+ (void) sbp2_lun_logout(lp->l_lun, &lp->l_ses, &berr, B_FALSE);
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_sbp2_req_reconnect(scsa1394_lun_t *lp)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+
+ if (t1394_get_targetinfo(sp->s_t1394_hdl, SCSA1394_BUSGEN(sp), 0,
+ &sp->s_targetinfo) != DDI_SUCCESS) {
+ return;
+ }
+
+ mutex_enter(&sp->s_mutex);
+ sp->s_dev_state = SCSA1394_DEV_ONLINE;
+ mutex_exit(&sp->s_mutex);
+
+ if (sbp2_tgt_reconnect(sp->s_tgt) != SBP2_SUCCESS) {
+ goto disconnect;
+ }
+
+ if (scsa1394_sbp2_login(sp, 0) != SBP2_SUCCESS) {
+ goto disconnect;
+ }
+
+ cmn_err(CE_WARN, "scsa1394(%d): "
+ "Reinserted device is accessible again.\n", sp->s_instance);
+
+ return;
+
+disconnect:
+ mutex_enter(&sp->s_mutex);
+ sp->s_dev_state = SCSA1394_DEV_DISCONNECTED;
+ mutex_exit(&sp->s_mutex);
+}
+
+void
+scsa1394_sbp2_disconnect(scsa1394_state_t *sp)
+{
+ scsa1394_lun_t *lp = &sp->s_lun[0];
+ int berr;
+
+ scsa1394_sbp2_flush_cmds(lp, CMD_DEV_GONE, 0, STAT_BUS_RESET);
+ (void) sbp2_lun_logout(lp->l_lun, &lp->l_ses, &berr, B_FALSE);
+ sbp2_tgt_disconnect(sp->s_tgt);
+}
+
+/*
+ * convert segment array into DMA-mapped SBP-2 page table
+ */
+void
+scsa1394_sbp2_seg2pt(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+
+ ASSERT(cmd->sc_flags & SCSA1394_CMD_DMA_BUF_PT_VALID);
+
+ if (sp->s_symbios) {
+ scsa1394_sbp2_seg2pt_symbios(lp, cmd);
+ } else {
+ scsa1394_sbp2_seg2pt_default(lp, cmd);
+ }
+}
+
+/*ARGSUSED*/
+static void
+scsa1394_sbp2_seg2pt_default(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ sbp2_pt_unrestricted_t *pt;
+ scsa1394_cmd_seg_t *seg;
+ int i;
+
+ pt = (sbp2_pt_unrestricted_t *)cmd->sc_pt_kaddr;
+ seg = &cmd->sc_buf_seg[0];
+ for (i = 0; i < cmd->sc_buf_nsegs; i++) {
+ pt->pt_seg_len = SBP2_SWAP16(seg->ss_len);
+ pt->pt_seg_base_hi = SBP2_SWAP16(seg->ss_baddr >> 32);
+ pt->pt_seg_base_lo = SBP2_SWAP32(seg->ss_baddr & 0xFFFFFFFF);
+
+ pt++;
+ seg++;
+ }
+ (void) ddi_dma_sync(cmd->sc_pt_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
+
+ cmd->sc_pt_cmd_size = cmd->sc_buf_nsegs;
+}
+
+/*
+ * fill page table for Symbios workaround
+ */
+/*ARGSUSED*/
+static void
+scsa1394_sbp2_seg2pt_symbios(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ size_t offset;
+ int start_page;
+ sbp2_pt_unrestricted_t *pt;
+ scsa1394_cmd_seg_t *seg;
+ size_t resid;
+ int nsegs;
+
+ /* calculate page table address and size */
+ if (cmd->sc_flags & SCSA1394_CMD_SYMBIOS_BREAKUP) {
+ offset = (cmd->sc_total_blks - cmd->sc_resid_blks) *
+ cmd->sc_blk_size;
+ } else {
+ offset = 0;
+ }
+ start_page = offset / scsa1394_symbios_page_size;
+
+ pt = (sbp2_pt_unrestricted_t *)cmd->sc_pt_kaddr;
+ seg = &cmd->sc_buf_seg[start_page];
+ resid = cmd->sc_xfer_bytes;
+ nsegs = 0;
+ while (resid > 0) {
+ ASSERT(seg->ss_len <= scsa1394_symbios_page_size);
+
+ pt->pt_seg_len = min(seg->ss_len, resid);
+ resid -= pt->pt_seg_len;
+ SBP2_SWAP16_1(pt->pt_seg_len);
+
+ pt->pt_seg_base_hi = SBP2_SWAP16(seg->ss_baddr >> 32);
+ pt->pt_seg_base_lo = SBP2_SWAP32(seg->ss_baddr & 0xFFFFFFFF);
+
+ nsegs++;
+ pt++;
+ seg++;
+ }
+ (void) ddi_dma_sync(cmd->sc_pt_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
+
+ cmd->sc_pt_cmd_size = nsegs;
+}
+
+/*
+ * convert command into DMA-mapped SBP-2 ORB
+ */
+void
+scsa1394_sbp2_cmd2orb(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+ scsa1394_cmd_orb_t *orb = sbp2_task_orb_kaddr(&cmd->sc_task);
+
+ mutex_enter(&lp->l_mutex);
+
+ lp->l_stat.stat_cmd_cnt++;
+
+ bzero(orb->co_cdb, sizeof (orb->co_cdb));
+
+ /* CDB */
+ bcopy(cmd->sc_cdb, orb->co_cdb, cmd->sc_cdb_actual_len);
+
+ /*
+ * ORB parameters
+ *
+ * use max speed and max payload for this speed.
+ * max async data transfer for a given speed is 512<<speed
+ * SBP-2 defines (see 5.1.2) max data transfer as 2^(max_payload+2),
+ * hence max_payload = 7 + speed
+ */
+ orb->co_params = SBP2_ORB_NOTIFY | SBP2_ORB_RQ_FMT_SBP2 |
+ (sp->s_targetinfo.current_max_speed << SBP2_ORB_CMD_SPD_SHIFT) |
+ ((7 + sp->s_targetinfo.current_max_speed -
+ scsa1394_sbp2_max_payload_sub) << SBP2_ORB_CMD_MAX_PAYLOAD_SHIFT);
+
+ /* direction: initiator's read is target's write (and vice versa) */
+ if (cmd->sc_flags & SCSA1394_CMD_READ) {
+ orb->co_params |= SBP2_ORB_CMD_DIR;
+ }
+
+ /*
+ * data_size and data_descriptor
+ */
+ if (cmd->sc_buf_nsegs == 0) {
+ /* no data */
+ orb->co_data_size = 0;
+ SCSA1394_ADDR_SET(sp, orb->co_data_descr, 0);
+ } else if (cmd->sc_buf_nsegs == 1) {
+ /* contiguous buffer - use direct addressing */
+ ASSERT(cmd->sc_buf_seg[0].ss_len != 0);
+ orb->co_data_size = SBP2_SWAP16(cmd->sc_buf_seg[0].ss_len);
+ SCSA1394_ADDR_SET(sp, orb->co_data_descr,
+ cmd->sc_buf_seg[0].ss_baddr);
+ } else {
+ /* non-contiguous s/g list - page table */
+ ASSERT(cmd->sc_pt_cmd_size > 0);
+ orb->co_params |= SBP2_ORB_CMD_PT;
+ orb->co_data_size = SBP2_SWAP16(cmd->sc_pt_cmd_size);
+ SCSA1394_ADDR_SET(sp, orb->co_data_descr, cmd->sc_pt_baddr);
+ }
+
+ SBP2_SWAP16_1(orb->co_params);
+
+ SBP2_ORBP_SET(orb->co_next_orb, SBP2_ORBP_NULL);
+
+ mutex_exit(&lp->l_mutex);
+
+ sbp2_task_orb_sync(lp->l_lun, &cmd->sc_task, DDI_DMA_SYNC_FORDEV);
+}
+
+
+/*ARGSUSED*/
+int
+scsa1394_sbp2_start(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd)
+{
+ sbp2_task_t *task = CMD2TASK(cmd);
+ int ret;
+
+ ASSERT(lp->l_ses != NULL);
+
+ task->ts_timeout = cmd->sc_timeout;
+ task->ts_error = SBP2_TASK_ERR_NONE;
+ task->ts_bus_error = 0;
+ task->ts_state = SBP2_TASK_INIT;
+
+ ret = sbp2_ses_submit_task(lp->l_ses, task);
+
+ if (ret == SBP2_SUCCESS) {
+ return (TRAN_ACCEPT);
+ } if (task->ts_error == SBP2_TASK_ERR_BUS) {
+ if (task->ts_bus_error == CMD1394_EDEVICE_BUSY) {
+ return (TRAN_BUSY);
+ } else {
+ return (TRAN_FATAL_ERROR);
+ }
+ } else {
+ return (TRAN_FATAL_ERROR);
+ }
+}
+
+/*
+ * This function is called by SBP-2 layer when task status is received,
+ * typically from interrupt handler. Just wake the thread to do the actual work.
+ */
+/*ARGSUSED*/
+static void
+scsa1394_sbp2_status_cb(void *arg, sbp2_task_t *task)
+{
+ scsa1394_lun_t *lp = (scsa1394_lun_t *)arg;
+
+ mutex_enter(&lp->l_mutex);
+ scsa1394_thr_wake(&lp->l_worker_thread, SCSA1394_THREQ_TASK_STATUS);
+ mutex_exit(&lp->l_mutex);
+}
+
+void
+scsa1394_sbp2_nudge(scsa1394_lun_t *lp)
+{
+ mutex_enter(&lp->l_mutex);
+ scsa1394_thr_wake(&lp->l_worker_thread, SCSA1394_THREQ_NUDGE);
+ mutex_exit(&lp->l_mutex);
+}
+
+/*
+ * worker thread
+ */
+static void
+scsa1394_sbp2_worker_thread(void *arg)
+{
+ scsa1394_thread_t *thr = (scsa1394_thread_t *)arg;
+ scsa1394_lun_t *lp = thr->thr_lun;
+
+ mutex_enter(&lp->l_mutex);
+ for (;;) {
+ while (thr->thr_req == 0) {
+ cv_wait(&thr->thr_cv, &lp->l_mutex);
+ }
+ if (thr->thr_req & SCSA1394_THREQ_EXIT) {
+ break;
+ }
+ if (thr->thr_req & SCSA1394_THREQ_BUS_RESET) {
+ thr->thr_req &= ~SCSA1394_THREQ_BUS_RESET;
+ mutex_exit(&lp->l_mutex);
+ scsa1394_sbp2_req_bus_reset(lp);
+ mutex_enter(&lp->l_mutex);
+ continue;
+ }
+ if (thr->thr_req & SCSA1394_THREQ_RECONNECT) {
+ thr->thr_req &= ~SCSA1394_THREQ_RECONNECT;
+ mutex_exit(&lp->l_mutex);
+ scsa1394_sbp2_req_reconnect(lp);
+ mutex_enter(&lp->l_mutex);
+ continue;
+ }
+ if (thr->thr_req & SCSA1394_THREQ_TASK_STATUS) {
+ thr->thr_req &= ~SCSA1394_THREQ_TASK_STATUS;
+ mutex_exit(&lp->l_mutex);
+ scsa1394_sbp2_req_status(lp);
+ mutex_enter(&lp->l_mutex);
+ continue;
+ }
+ if (thr->thr_req & SCSA1394_THREQ_NUDGE) {
+ thr->thr_req &= ~SCSA1394_THREQ_NUDGE;
+ mutex_exit(&lp->l_mutex);
+ if (scsa1394_sbp2_logged_in(lp)) {
+ sbp2_ses_nudge(lp->l_ses);
+ }
+ mutex_enter(&lp->l_mutex);
+ continue;
+ }
+ }
+ thr->thr_state = SCSA1394_THR_EXIT;
+ cv_signal(&thr->thr_cv);
+ mutex_exit(&lp->l_mutex);
+}
+
+/*
+ * task status handler
+ */
+static void
+scsa1394_sbp2_req_status(scsa1394_lun_t *lp)
+{
+ sbp2_ses_t *sp = lp->l_ses;
+ sbp2_task_t *task;
+
+ if (sp == NULL) {
+ return;
+ }
+
+ /*
+ * Process all tasks that received status.
+ * This algorithm preserves callback order.
+ */
+ while ((task = sbp2_ses_remove_first_task_state(sp, SBP2_TASK_COMP)) !=
+ NULL) {
+ sbp2_ses_nudge(sp);
+
+ ASSERT(task->ts_state == SBP2_TASK_COMP);
+ task->ts_state = SBP2_TASK_PROC;
+ scsa1394_sbp2_status_proc(lp, TASK2CMD(task),
+ (scsa1394_status_t *)&task->ts_status);
+ }
+ sbp2_ses_nudge(sp); /* submit next task */
+}
+
+static void
+scsa1394_sbp2_status_proc(scsa1394_lun_t *lp, scsa1394_cmd_t *cmd,
+ scsa1394_status_t *st)
+{
+ sbp2_task_t *task = CMD2TASK(cmd);
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ uint64_t *p;
+
+ if (cmd->sc_flags & SCSA1394_CMD_READ) {
+ (void) ddi_dma_sync(cmd->sc_buf_dma_hdl, 0, 0,
+ DDI_DMA_SYNC_FORKERNEL);
+ }
+
+ if (task->ts_error != SBP2_TASK_ERR_NONE) {
+ pkt->pkt_state |= STATE_GOT_BUS;
+ switch (task->ts_error) {
+ case SBP2_TASK_ERR_ABORT:
+ pkt->pkt_state |= STATE_GOT_TARGET;
+ pkt->pkt_reason = CMD_ABORTED;
+ break;
+ case SBP2_TASK_ERR_LUN_RESET:
+ pkt->pkt_state |= STATE_GOT_TARGET;
+ pkt->pkt_reason = CMD_RESET;
+ pkt->pkt_statistics |= STAT_DEV_RESET;
+ break;
+ case SBP2_TASK_ERR_TGT_RESET:
+ pkt->pkt_state |= STATE_GOT_TARGET;
+ pkt->pkt_reason = CMD_RESET;
+ pkt->pkt_statistics |= STAT_DEV_RESET;
+ break;
+ case SBP2_TASK_ERR_TIMEOUT:
+ (void) scsa1394_sbp2_reset(lp, RESET_TARGET, cmd);
+ return;
+ case SBP2_TASK_ERR_DEAD:
+ case SBP2_TASK_ERR_BUS:
+ default:
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ break;
+ }
+ } else if ((st->st_param & SBP2_ST_RESP) == SBP2_ST_RESP_COMPLETE) {
+ /*
+ * SBP-2 status block has been received, now look at sbp_status.
+ *
+ * Note: ANSI NCITS 325-1998 B.2 requires that when status is
+ * GOOD, length must be one, but some devices do not comply
+ */
+ if (st->st_sbp_status == SBP2_ST_SBP_DUMMY_ORB) {
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
+ pkt->pkt_reason = CMD_ABORTED;
+ pkt->pkt_statistics |= STAT_DEV_RESET;
+ } else if ((st->st_status & SCSA1394_ST_STATUS) ==
+ STATUS_GOOD) {
+ /* request complete */
+ *(pkt->pkt_scbp) = STATUS_GOOD;
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD | STATE_XFERRED_DATA |
+ STATE_GOT_STATUS);
+ pkt->pkt_reason = CMD_CMPLT;
+ } else if (scsa1394_sbp2_conv_status(cmd, st) == DDI_SUCCESS) {
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD | STATE_XFERRED_DATA |
+ STATE_GOT_STATUS | STATE_ARQ_DONE);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ } else {
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
+ STATE_SENT_CMD | STATE_XFERRED_DATA |
+ STATE_GOT_STATUS);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ lp->l_stat.stat_err_status_conv++;
+ }
+ } else {
+ /* transport or serial bus failure */
+ pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
+ pkt->pkt_reason = CMD_TRAN_ERR;
+ lp->l_stat.stat_err_status_resp++;
+ }
+
+ if (pkt->pkt_reason == CMD_TRAN_ERR) {
+ lp->l_stat.stat_err_status_tran_err++;
+
+ /* save the command */
+ p = &lp->l_stat.stat_cmd_last_fail[
+ lp->l_stat.stat_cmd_last_fail_idx][0];
+ bcopy(&pkt->pkt_cdbp[0], p, min(cmd->sc_cdb_len, 16));
+ *(clock_t *)&p[2] = ddi_get_lbolt();
+ lp->l_stat.stat_cmd_last_fail_idx =
+ (lp->l_stat.stat_cmd_last_fail_idx + 1) %
+ SCSA1394_STAT_NCMD_LAST;
+ }
+
+ /* generic HBA status processing */
+ scsa1394_cmd_status_proc(lp, cmd);
+}
+
+
+/*
+ * Convert SBP-2 status block into SCSA status.
+ *
+ * Note: (ref: B.2) "SBP-2 permits the return of a status block between two
+ * and quadlets in length. When a truncated status block is stored, the
+ * omitted quadlets shall be interpreted as if zero values were stored."
+ * We expect the sbp2 layer to do the zeroing for us.
+ */
+static int
+scsa1394_sbp2_conv_status(scsa1394_cmd_t *cmd, scsa1394_status_t *st)
+{
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ uint8_t status = st->st_status;
+ uint8_t bits = st->st_sense_bits;
+ struct scsi_arq_status *arqp = (struct scsi_arq_status *)pkt->pkt_scbp;
+ struct scsi_extended_sense *esp = &arqp->sts_sensedata;
+
+ *(pkt->pkt_scbp) = (status & SCSA1394_ST_STATUS);
+ *(uint8_t *)&arqp->sts_rqpkt_status = STATUS_GOOD;
+ arqp->sts_rqpkt_reason = CMD_CMPLT;
+ arqp->sts_rqpkt_resid = 0;
+ arqp->sts_rqpkt_state |= STATE_XFERRED_DATA;
+ arqp->sts_rqpkt_statistics = 0;
+
+ esp->es_valid = (bits & SCSA1394_ST_VALID) >> SCSA1394_ST_VALID_SHIFT;
+ esp->es_class = CLASS_EXTENDED_SENSE;
+ esp->es_code = (status & SCSA1394_ST_SFMT) >> SCSA1394_ST_SFMT_SHIFT;
+
+ esp->es_segnum = 0;
+
+ esp->es_filmk = (bits & SCSA1394_ST_MARK) >> SCSA1394_ST_MARK_SHIFT;
+ esp->es_eom = (bits & SCSA1394_ST_EOM) >> SCSA1394_ST_EOM_SHIFT;
+ esp->es_ili = (bits & SCSA1394_ST_ILI) >> SCSA1394_ST_ILI_SHIFT;
+ esp->es_key = (bits & SCSA1394_ST_SENSE_KEY);
+
+ esp->es_info_1 = st->st_info[0];
+ esp->es_info_2 = st->st_info[1];
+ esp->es_info_3 = st->st_info[2];
+ esp->es_info_4 = st->st_info[3];
+ esp->es_add_len = 4;
+
+ esp->es_cmd_info[0] = st->st_cdb[0];
+ esp->es_cmd_info[1] = st->st_cdb[1];
+ esp->es_cmd_info[2] = st->st_cdb[2];
+ esp->es_cmd_info[3] = st->st_cdb[3];
+ esp->es_add_code = st->st_sense_code;
+ esp->es_qual_code = st->st_sense_qual;
+ esp->es_fru_code = st->st_fru;
+ esp->es_skey_specific[0] = st->st_sks[0];
+ esp->es_skey_specific[1] = st->st_sks[1];
+ esp->es_skey_specific[2] = st->st_sks[2];
+
+ esp->es_add_info[0] = esp->es_add_info[1] = 0;
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Sends appropriate reset command to the target. LUN reset is optional, so it
+ * can fail, in which case the SCSA target driver will use RESET_TARGET/ALL.
+ * Target reset support is mandatory in SBP-2, if it fails, it means something's
+ * terribly wrong with the device - blow away outstanding tasks in that case.
+ */
+int
+scsa1394_sbp2_reset(scsa1394_lun_t *lp, int level, scsa1394_cmd_t *cmd)
+{
+ scsa1394_state_t *sp = lp->l_sp;
+ sbp2_task_t *task;
+ int berr;
+ int ret = DDI_FAILURE;
+
+ if (scsa1394_dev_is_online(sp)) {
+ switch (level) {
+ case RESET_LUN:
+ ret = sbp2_lun_reset(lp->l_lun, &berr);
+ if (ret != SBP2_SUCCESS) {
+ return (ret);
+ }
+ break;
+ case RESET_TARGET:
+ case RESET_ALL:
+ ret = sbp2_tgt_reset(sp->s_tgt, &berr);
+ break;
+ }
+ }
+
+ if (cmd != NULL) {
+ scsa1394_sbp2_reset_proc(lp, level, cmd);
+ }
+ if (scsa1394_sbp2_logged_in(lp)) {
+ while ((task = sbp2_ses_cancel_first_task(lp->l_ses)) != NULL) {
+ ASSERT(task->ts_state < SBP2_TASK_PROC);
+ scsa1394_sbp2_reset_proc(lp, level, TASK2CMD(task));
+ }
+ }
+
+ return (ret);
+}
+
+static void
+scsa1394_sbp2_reset_proc(scsa1394_lun_t *lp, int level, scsa1394_cmd_t *cmd)
+{
+ sbp2_task_t *task = CMD2TASK(cmd);
+ struct scsi_pkt *pkt = CMD2PKT(cmd);
+ int ts_error;
+
+ pkt->pkt_reason = CMD_RESET;
+ if (level == RESET_LUN) {
+ if (task->ts_state == SBP2_TASK_PEND) {
+ pkt->pkt_statistics |= STAT_DEV_RESET;
+ } else {
+ pkt->pkt_statistics |= STAT_ABORTED;
+ }
+ ts_error = SBP2_TASK_ERR_LUN_RESET;
+ } else {
+ pkt->pkt_statistics |= STAT_BUS_RESET;
+ ts_error = SBP2_TASK_ERR_TGT_RESET;
+ }
+ task->ts_error = ts_error;
+ task->ts_state = SBP2_TASK_PROC;
+ scsa1394_cmd_status_proc(lp, cmd);
+}
+
+/*
+ * Cancel commands immediately.
+ *
+ * Caller's responsibility to set device state such that no new tasks are added.
+ */
+void
+scsa1394_sbp2_flush_cmds(scsa1394_lun_t *lp, int reason, int state,
+ int statistics)
+{
+ scsa1394_cmd_t *cmd;
+ struct scsi_pkt *pkt;
+ sbp2_ses_t *sp = lp->l_ses;
+ sbp2_task_t *task;
+
+ if (sp == NULL) {
+ return;
+ }
+
+ while ((task = sbp2_ses_cancel_first_task(sp)) != NULL) {
+ ASSERT(task->ts_state < SBP2_TASK_PROC);
+ cmd = TASK2CMD(task);
+ pkt = CMD2PKT(cmd);
+
+ pkt->pkt_reason = reason;
+ pkt->pkt_state |= state;
+ pkt->pkt_statistics |= statistics;
+ task->ts_state = SBP2_TASK_PROC;
+ scsa1394_cmd_status_proc(lp, cmd);
+ }
+
+ scsa1394_thr_clear_req(&lp->l_worker_thread,
+ SCSA1394_THREQ_TASK_STATUS | SCSA1394_THREQ_NUDGE);
+}
+
+static boolean_t
+scsa1394_sbp2_logged_in(scsa1394_lun_t *lp)
+{
+ return (lp->l_ses != NULL);
+}