summaryrefslogtreecommitdiff
path: root/usr/src/uts
diff options
context:
space:
mode:
authorgm89044 <none@none>2005-11-14 11:14:44 -0800
committergm89044 <none@none>2005-11-14 11:14:44 -0800
commit88f8b78a88cbdc6d8c1af5c3e54bc49d25095c98 (patch)
tree147b8b9da77c2704bf0030e38f57d62112165c9d /usr/src/uts
parentde122929e7c37df60cbea70616404e22d20e025b (diff)
downloadillumos-joyent-88f8b78a88cbdc6d8c1af5c3e54bc49d25095c98.tar.gz
PSARC 2005/477 Broadcom 582[0125] support for dca crypto driver
6316121 Add DCA crypto driver to Solaris
Diffstat (limited to 'usr/src/uts')
-rw-r--r--usr/src/uts/common/Makefile.files3
-rw-r--r--usr/src/uts/common/crypto/io/dca.c4998
-rw-r--r--usr/src/uts/common/crypto/io/dca.conf30
-rw-r--r--usr/src/uts/common/crypto/io/dca_3des.c747
-rw-r--r--usr/src/uts/common/crypto/io/dca_debug.c92
-rw-r--r--usr/src/uts/common/crypto/io/dca_dsa.c582
-rw-r--r--usr/src/uts/common/crypto/io/dca_kstat.c208
-rw-r--r--usr/src/uts/common/crypto/io/dca_rng.c325
-rw-r--r--usr/src/uts/common/crypto/io/dca_rsa.c832
-rw-r--r--usr/src/uts/common/sys/crypto/dca.h928
-rw-r--r--usr/src/uts/intel/Makefile5
-rw-r--r--usr/src/uts/intel/dca/Makefile92
-rw-r--r--usr/src/uts/sparc/Makefile1
-rw-r--r--usr/src/uts/sparc/dca/Makefile95
14 files changed, 8938 insertions, 0 deletions
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index cf58171183..57b3f1968f 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -1034,6 +1034,9 @@ CRYPTO_OBJS += crypto.o
DPROV_OBJS += dprov.o
+DCA_OBJS += dca.o dca_3des.o dca_debug.o dca_dsa.o dca_kstat.o dca_rng.o \
+ dca_rsa.o
+
AESPROV_OBJS += aes.o aes_cbc_crypt.o aes_impl.o
ARCFOURPROV_OBJS += arcfour.o arcfour_crypt.o
diff --git a/usr/src/uts/common/crypto/io/dca.c b/usr/src/uts/common/crypto/io/dca.c
new file mode 100644
index 0000000000..528e9ac2ca
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca.c
@@ -0,0 +1,4998 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/varargs.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/ioccom.h>
+#include <sys/open.h>
+#include <sys/cred.h>
+#include <sys/kstat.h>
+#include <sys/strsun.h>
+#include <sys/note.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+#include <sys/crypto/dca.h>
+
+/*
+ * Core Deimos driver.
+ */
+
+static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
+ kmutex_t *);
+static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
+static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
+static void dca_free_context_list(dca_t *dca);
+static int dca_free_context_low(crypto_ctx_t *ctx);
+static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
+static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
+static int dca_suspend(dca_t *);
+static int dca_resume(dca_t *);
+static int dca_init(dca_t *);
+static int dca_reset(dca_t *, int);
+static int dca_initworklist(dca_t *, dca_worklist_t *);
+static void dca_uninit(dca_t *);
+static void dca_initq(dca_listnode_t *);
+static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
+static dca_listnode_t *dca_dequeue(dca_listnode_t *);
+static dca_listnode_t *dca_unqueue(dca_listnode_t *);
+static dca_request_t *dca_newreq(dca_t *);
+static dca_work_t *dca_getwork(dca_t *, int);
+static void dca_freework(dca_work_t *);
+static dca_work_t *dca_newwork(dca_t *);
+static void dca_destroywork(dca_work_t *);
+static void dca_schedule(dca_t *, int);
+static void dca_reclaim(dca_t *, int);
+static uint_t dca_intr(char *);
+static void dca_failure(dca_t *, ddi_fault_location_t,
+ dca_fma_eclass_t index, uint64_t, int, char *, ...);
+static void dca_jobtimeout(void *);
+static int dca_drain(dca_t *);
+static void dca_undrain(dca_t *);
+static void dca_rejectjobs(dca_t *);
+
+#ifdef SCHEDDELAY
+static void dca_schedtimeout(void *);
+#endif
+
+/*
+ * We want these inlined for performance.
+ */
+#ifndef DEBUG
+#pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
+#pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
+#pragma inline(dca_reverse, dca_length)
+#endif
+
+/*
+ * Device operations.
+ */
+static struct dev_ops devops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ nodev, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ dca_attach, /* devo_attach */
+ dca_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ NULL, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ ddi_power /* devo_power */
+};
+
+#define IDENT "PCI Crypto Accelerator 2.0"
+#define IDENT_SYM "Crypto Accel Sym 2.0"
+#define IDENT_ASYM "Crypto Accel Asym 2.0"
+
+/* Space-padded, will be filled in dynamically during registration */
+#define IDENT3 "PCI Crypto Accelerator Mod 2.0"
+
+#define VENDOR "Sun Microsystems, Inc."
+
+#define STALETIME (30 * SECOND)
+
+#define crypto_prov_notify crypto_provider_notification
+ /* A 28 char function name doesn't leave much line space */
+
+/*
+ * Module linkage.
+ */
+static struct modldrv modldrv = {
+ &mod_driverops, /* drv_modops */
+ IDENT, /* drv_linkinfo */
+ &devops, /* drv_dev_ops */
+};
+
+extern struct mod_ops mod_cryptoops;
+
+static struct modlcrypto modlcrypto = {
+ &mod_cryptoops,
+ IDENT3
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* ml_rev */
+ &modldrv, /* ml_linkage */
+ &modlcrypto,
+ NULL
+};
+
+/*
+ * CSPI information (entry points, provider info, etc.)
+ */
+
+/* Mechanisms for the symmetric cipher provider */
+static crypto_mech_info_t dca_mech_info_tab1[] = {
+ /* DES-CBC */
+ {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
+ CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
+ DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* 3DES-CBC */
+ {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
+ CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
+ DES3_KEY_LEN, DES3_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
+};
+
+/* Mechanisms for the asymmetric cipher provider */
+static crypto_mech_info_t dca_mech_info_tab2[] = {
+ /* DSA */
+ {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
+ CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
+ CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
+ DSA_MIN_KEY_LEN * 8, DSA_MAX_KEY_LEN * 8,
+ CRYPTO_KEYSIZE_UNIT_IN_BITS},
+
+ /* RSA */
+ {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
+ CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
+ CRYPTO_FG_VERIFY_RECOVER |
+ CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
+ CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
+ CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
+ RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8,
+ CRYPTO_KEYSIZE_UNIT_IN_BITS},
+ {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
+ CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
+ CRYPTO_FG_VERIFY_RECOVER |
+ CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
+ CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
+ CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
+ RSA_MIN_KEY_LEN * 8, RSA_MAX_KEY_LEN * 8,
+ CRYPTO_KEYSIZE_UNIT_IN_BITS}
+};
+
+static void dca_provider_status(crypto_provider_handle_t, uint_t *);
+
+static crypto_control_ops_t dca_control_ops = {
+ dca_provider_status
+};
+
+static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_cipher_ops_t dca_cipher_ops = {
+ dca_encrypt_init,
+ dca_encrypt,
+ dca_encrypt_update,
+ dca_encrypt_final,
+ dca_encrypt_atomic,
+ dca_decrypt_init,
+ dca_decrypt,
+ dca_decrypt_update,
+ dca_decrypt_final,
+ dca_decrypt_atomic
+};
+
+static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_sign_recover_atomic(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_sign_ops_t dca_sign_ops = {
+ dca_sign_init,
+ dca_sign,
+ dca_sign_update,
+ dca_sign_final,
+ dca_sign_atomic,
+ dca_sign_recover_init,
+ dca_sign_recover,
+ dca_sign_recover_atomic
+};
+
+static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
+ crypto_req_handle_t);
+static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
+ crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
+ crypto_data_t *, crypto_req_handle_t);
+static int dca_verify_recover_atomic(crypto_provider_handle_t,
+ crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
+ crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
+
+static crypto_verify_ops_t dca_verify_ops = {
+ dca_verify_init,
+ dca_verify,
+ dca_verify_update,
+ dca_verify_final,
+ dca_verify_atomic,
+ dca_verify_recover_init,
+ dca_verify_recover,
+ dca_verify_recover_atomic
+};
+
+static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
+ uchar_t *, size_t, crypto_req_handle_t);
+
+static crypto_random_number_ops_t dca_random_number_ops = {
+ NULL,
+ dca_generate_random
+};
+
+static int ext_info_sym(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
+static int ext_info_asym(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
+static int ext_info_base(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
+
+static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
+ ext_info_sym, /* ext_info */
+ NULL, /* init_token */
+ NULL, /* init_pin */
+ NULL /* set_pin */
+};
+
+static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
+ ext_info_asym, /* ext_info */
+ NULL, /* init_token */
+ NULL, /* init_pin */
+ NULL /* set_pin */
+};
+
+int dca_free_context(crypto_ctx_t *);
+
+static crypto_ctx_ops_t dca_ctx_ops = {
+ NULL,
+ dca_free_context
+};
+
+/* Operations for the symmetric cipher provider */
+static crypto_ops_t dca_crypto_ops1 = {
+ &dca_control_ops,
+ NULL, /* digest_ops */
+ &dca_cipher_ops,
+ NULL, /* mac_ops */
+ NULL, /* sign_ops */
+ NULL, /* verify_ops */
+ NULL, /* dual_ops */
+ NULL, /* cipher_mac_ops */
+ NULL, /* random_number_ops */
+ NULL, /* session_ops */
+ NULL, /* object_ops */
+ NULL, /* key_ops */
+ &dca_provmanage_ops_1, /* management_ops */
+ &dca_ctx_ops
+};
+
+/* Operations for the asymmetric cipher provider */
+static crypto_ops_t dca_crypto_ops2 = {
+ &dca_control_ops,
+ NULL, /* digest_ops */
+ &dca_cipher_ops,
+ NULL, /* mac_ops */
+ &dca_sign_ops,
+ &dca_verify_ops,
+ NULL, /* dual_ops */
+ NULL, /* cipher_mac_ops */
+ &dca_random_number_ops,
+ NULL, /* session_ops */
+ NULL, /* object_ops */
+ NULL, /* key_ops */
+ &dca_provmanage_ops_2, /* management_ops */
+ &dca_ctx_ops
+};
+
+/* Provider information for the symmetric cipher provider */
+static crypto_provider_info_t dca_prov_info1 = {
+ CRYPTO_SPI_VERSION_1,
+ NULL, /* pi_provider_description */
+ CRYPTO_HW_PROVIDER,
+ NULL, /* pi_provider_dev */
+ NULL, /* pi_provider_handle */
+ &dca_crypto_ops1,
+ sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
+ dca_mech_info_tab1,
+ 0, /* pi_logical_provider_count */
+ NULL /* pi_logical_providers */
+};
+
+/* Provider information for the asymmetric cipher provider */
+static crypto_provider_info_t dca_prov_info2 = {
+ CRYPTO_SPI_VERSION_1,
+ NULL, /* pi_provider_description */
+ CRYPTO_HW_PROVIDER,
+ NULL, /* pi_provider_dev */
+ NULL, /* pi_provider_handle */
+ &dca_crypto_ops2,
+ sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
+ dca_mech_info_tab2,
+ 0, /* pi_logical_provider_count */
+ NULL /* pi_logical_providers */
+};
+
+/* Convenience macros */
+/* Retrieve the softc and instance number from a SPI crypto context */
+#define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \
+ (softc) = (dca_t *)(ctx)->cc_provider; \
+ (instance) = ddi_get_instance((softc)->dca_dip); \
+}
+
+#define DCA_MECH_FROM_CTX(ctx) \
+ (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
+
+static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
+ caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
+ dca_chain_t *head, int *n_chain);
+static uint64_t dca_ena(uint64_t ena);
+static caddr_t dca_bufdaddr_out(crypto_data_t *data);
+static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
+static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
+ dca_fma_eclass_t eclass_index);
+
+static void dca_fma_init(dca_t *dca);
+static void dca_fma_fini(dca_t *dca);
+static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
+ const void *impl_data);
+
+
+static dca_device_t dca_devices[] = {
+ /* Broadcom vanilla variants */
+ { 0x14e4, 0x5820, "Broadcom 5820" },
+ { 0x14e4, 0x5821, "Broadcom 5821" },
+ { 0x14e4, 0x5822, "Broadcom 5822" },
+ { 0x14e4, 0x5825, "Broadcom 5825" },
+ /* Sun specific OEMd variants */
+ { 0x108e, 0x5454, "SCA" },
+ { 0x108e, 0x5455, "SCA 1000" },
+ { 0x108e, 0x5457, "SCA 500" },
+ /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
+ { 0x108e, 0x1, "SCA 500" },
+};
+
+/*
+ * Device attributes.
+ */
+static struct ddi_device_acc_attr dca_regsattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+
+static struct ddi_device_acc_attr dca_devattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+
+#if !defined(i386) && !defined(__i386)
+static struct ddi_device_acc_attr dca_bufattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
+};
+#endif
+
+static struct ddi_dma_attr dca_dmaattr = {
+ DMA_ATTR_V0, /* dma_attr_version */
+ 0x0, /* dma_attr_addr_lo */
+ 0xffffffffUL, /* dma_attr_addr_hi */
+ 0x00ffffffUL, /* dma_attr_count_max */
+ 0x40, /* dma_attr_align */
+ 0x40, /* dma_attr_burstsizes */
+ 0x1, /* dma_attr_minxfer */
+ 0x00ffffffUL, /* dma_attr_maxxfer */
+ 0xffffffffUL, /* dma_attr_seg */
+#if defined(i386) || defined(__i386) || defined(__amd64)
+ 512, /* dma_attr_sgllen */
+#else
+ 1, /* dma_attr_sgllen */
+#endif
+ 1, /* dma_attr_granular */
+ DDI_DMA_FLAGERR /* dma_attr_flags */
+};
+
+static void *dca_state = NULL;
+int dca_mindma = 2500;
+
+/*
+ * FMA eclass string definitions. Note that these string arrays must be
+ * consistent with the dca_fma_eclass_t enum.
+ */
+static char *dca_fma_eclass_sca1000[] = {
+ "sca1000.hw.device",
+ "sca1000.hw.timeout",
+ "sca1000.none"
+};
+
+static char *dca_fma_eclass_sca500[] = {
+ "sca500.hw.device",
+ "sca500.hw.timeout",
+ "sca500.none"
+};
+
+/*
+ * DDI entry points.
+ */
+int
+_init(void)
+{
+ int rv;
+
+ DBG(NULL, DMOD, "dca: in _init");
+
+ if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
+ /* this should *never* happen! */
+ return (rv);
+ }
+
+ if ((rv = mod_install(&modlinkage)) != 0) {
+ /* cleanup here */
+ ddi_soft_state_fini(&dca_state);
+ return (rv);
+ }
+
+ return (0);
+}
+
+int
+_fini(void)
+{
+ int rv;
+
+ DBG(NULL, DMOD, "dca: in _fini");
+
+ if ((rv = mod_remove(&modlinkage)) == 0) {
+ /* cleanup here */
+ ddi_soft_state_fini(&dca_state);
+ }
+ return (rv);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ DBG(NULL, DMOD, "dca: in _info");
+
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ ddi_acc_handle_t pci;
+ int instance;
+ ddi_iblock_cookie_t ibc;
+ int intr_added = 0;
+ dca_t *dca;
+ ushort_t venid;
+ ushort_t devid;
+ ushort_t revid;
+ ushort_t subsysid;
+ ushort_t subvenid;
+ int i;
+ int ret;
+ char ID[64];
+ static char *unknowndev = "Unknown device";
+
+#if DEBUG
+ /* these are only used for debugging */
+ ushort_t pcicomm;
+ ushort_t pcistat;
+ uchar_t cachelinesz;
+ uchar_t mingnt;
+ uchar_t maxlat;
+ uchar_t lattmr;
+#endif
+
+ instance = ddi_get_instance(dip);
+
+ DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
+
+ switch (cmd) {
+ case DDI_RESUME:
+ if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
+ dca_diperror(dip, "no soft state in detach");
+ return (DDI_FAILURE);
+ }
+ /* assumption: we won't be DDI_DETACHed until we return */
+ return (dca_resume(dca));
+ case DDI_ATTACH:
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_slaveonly(dip) == DDI_SUCCESS) {
+ dca_diperror(dip, "slot does not support PCI bus-master");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_intr_hilevel(dip, 0) != 0) {
+ dca_diperror(dip, "hilevel interrupts not supported");
+ return (DDI_FAILURE);
+ }
+
+ if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
+ dca_diperror(dip, "unable to setup PCI config handle");
+ return (DDI_FAILURE);
+ }
+
+ /* common PCI attributes */
+ venid = pci_config_get16(pci, PCI_VENID);
+ devid = pci_config_get16(pci, PCI_DEVID);
+ revid = pci_config_get8(pci, PCI_REVID);
+ subvenid = pci_config_get16(pci, PCI_SUBVENID);
+ subsysid = pci_config_get16(pci, PCI_SUBSYSID);
+
+ /*
+ * Broadcom-specific timings.
+ * We disable these timers/counters since they can cause
+ * incorrect false failures when the bus is just a little
+ * bit slow, or busy.
+ */
+ pci_config_put8(pci, PCI_TRDYTO, 0);
+ pci_config_put8(pci, PCI_RETRIES, 0);
+
+ /* initialize PCI access settings */
+ pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
+ PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
+
+ /* set up our PCI latency timer */
+ pci_config_put8(pci, PCI_LATTMR, 0x40);
+
+#if DEBUG
+ /* read registers (for debugging) */
+ pcicomm = pci_config_get16(pci, PCI_COMM);
+ pcistat = pci_config_get16(pci, PCI_STATUS);
+ cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
+ mingnt = pci_config_get8(pci, PCI_MINGNT);
+ maxlat = pci_config_get8(pci, PCI_MAXLAT);
+ lattmr = pci_config_get8(pci, PCI_LATTMR);
+#endif
+
+ pci_config_teardown(&pci);
+
+ if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
+ dca_diperror(dip, "unable to get iblock cookie");
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
+ dca_diperror(dip, "unable to allocate soft state");
+ return (DDI_FAILURE);
+ }
+
+ dca = ddi_get_soft_state(dca_state, instance);
+ ASSERT(dca != NULL);
+ dca->dca_dip = dip;
+ WORKLIST(dca, MCR1)->dwl_prov = NULL;
+ WORKLIST(dca, MCR2)->dwl_prov = NULL;
+ /* figure pagesize */
+ dca->dca_pagesize = ddi_ptob(dip, 1);
+
+ /*
+ * Search for the device in our supported devices table. This
+ * is here for two reasons. First, we want to ensure that
+ * only Sun-qualified (and presumably Sun-labeled) devices can
+ * be used with this driver. Second, some devices have
+ * specific differences. E.g. the 5821 has support for a
+ * special mode of RC4, deeper queues, power management, and
+ * other changes. Also, the export versions of some of these
+ * chips don't support RC4 or 3DES, so we catch that here.
+ *
+ * Note that we only look at the upper nibble of the device
+ * id, which is used to distinguish export vs. domestic
+ * versions of the chip. (The lower nibble is used for
+ * stepping information.)
+ */
+ for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
+ /*
+ * Try to match the subsystem information first.
+ */
+ if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
+ subsysid && (subsysid == dca_devices[i].dd_device_id)) {
+ dca->dca_model = dca_devices[i].dd_model;
+ break;
+ }
+ /*
+ * Failing that, try the generic vendor and device id.
+ * Even if we find a match, we keep searching anyway,
+ * since we would prefer to find a match based on the
+ * subsystem ids.
+ */
+ if ((venid == dca_devices[i].dd_vendor_id) &&
+ (devid == dca_devices[i].dd_device_id)) {
+ dca->dca_model = dca_devices[i].dd_model;
+ }
+ }
+ /* try and handle an unrecognized device */
+ if (dca->dca_model == NULL) {
+ dca->dca_model = unknowndev;
+ dca_error(dca, "device not recognized, not supported");
+ DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
+ i, venid, devid, revid);
+ }
+
+ if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
+ dca->dca_model) != DDI_SUCCESS) {
+ dca_error(dca, "unable to create description property");
+ return (DDI_FAILURE);
+ }
+
+ DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
+ pcicomm, pcistat, cachelinesz);
+ DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
+ mingnt, maxlat, lattmr);
+
+ /*
+ * initialize locks, etc.
+ */
+ (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
+
+ /* use RNGSHA1 by default */
+ if (ddi_getprop(DDI_DEV_T_ANY, dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
+ dca->dca_flags |= DCA_RNGSHA1;
+ }
+
+ /* initialize FMA */
+ dca_fma_init(dca);
+
+ /* initialize some key data structures */
+ if (dca_init(dca) != DDI_SUCCESS) {
+ goto failed;
+ }
+
+ /* initialize kstats */
+ dca_ksinit(dca);
+
+ /* setup access to registers */
+ if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
+ 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
+ dca_error(dca, "unable to map registers");
+ goto failed;
+ }
+
+ DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
+ DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
+ DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
+ DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
+ DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
+
+ /* reset the chip */
+ if (dca_reset(dca, 0) < 0) {
+ goto failed;
+ }
+
+ /* initialize the chip */
+ PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ goto failed;
+ }
+
+ /* add the interrupt */
+ if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
+ (void *)dca) != DDI_SUCCESS) {
+ DBG(dca, DWARN, "ddi_add_intr failed");
+ goto failed;
+ } else {
+ intr_added = 1;
+ }
+
+ /* enable interrupts on the device */
+ /*
+ * XXX: Note, 5820A1 errata indicates that this may clobber
+ * bits 24 and 23, which affect the speed of the RNG. Since
+ * we always want to run in full-speed mode, this should be
+ * harmless.
+ */
+ SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ goto failed;
+ }
+
+ /* register MCR1 with the crypto framework */
+ /* Be careful not to exceed 32 chars */
+ (void) sprintf(ID, "%s/%d %s",
+ ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
+ dca_prov_info1.pi_provider_description = ID;
+ dca_prov_info1.pi_provider_dev.pd_hw = dip;
+ dca_prov_info1.pi_provider_handle = dca;
+ if ((ret = crypto_register_provider(&dca_prov_info1,
+ &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "crypto_register_provider() failed (%d) for MCR1", ret);
+ goto failed;
+ }
+
+ /* register MCR2 with the crypto framework */
+ /* Be careful not to exceed 32 chars */
+ (void) sprintf(ID, "%s/%d %s",
+ ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
+ dca_prov_info2.pi_provider_description = ID;
+ dca_prov_info2.pi_provider_dev.pd_hw = dip;
+ dca_prov_info2.pi_provider_handle = dca;
+ if ((ret = crypto_register_provider(&dca_prov_info2,
+ &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
+ cmn_err(CE_WARN,
+ "crypto_register_provider() failed (%d) for MCR2", ret);
+ goto failed;
+ }
+
+ crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
+ CRYPTO_PROVIDER_READY);
+ crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
+ CRYPTO_PROVIDER_READY);
+
+ /* Initialize the local random number pool for this instance */
+ if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
+ goto failed;
+ }
+
+ mutex_enter(&dca->dca_intrlock);
+ dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
+ drv_usectohz(SECOND));
+ mutex_exit(&dca->dca_intrlock);
+
+ ddi_set_driver_private(dip, (caddr_t)dca);
+
+ ddi_report_dev(dip);
+
+ if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
+ ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
+ }
+
+ return (DDI_SUCCESS);
+
+failed:
+ /* unregister from the crypto framework */
+ if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
+ (void) crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov);
+ }
+ if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
+ (void) crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov);
+ }
+ if (intr_added) {
+ CLRBIT(dca, CSR_DMACTL,
+ DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
+ /* unregister intr handler */
+ ddi_remove_intr(dip, 0, dca->dca_icookie);
+ }
+ if (dca->dca_regs_handle) {
+ ddi_regs_map_free(&dca->dca_regs_handle);
+ }
+ if (dca->dca_intrstats) {
+ kstat_delete(dca->dca_intrstats);
+ }
+ if (dca->dca_ksp) {
+ kstat_delete(dca->dca_ksp);
+ }
+ dca_uninit(dca);
+
+ /* finalize FMA */
+ dca_fma_fini(dca);
+
+ mutex_destroy(&dca->dca_intrlock);
+ ddi_soft_state_free(dca_state, instance);
+ return (DDI_FAILURE);
+
+}
+
+int
+dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ dca_t *dca;
+ timeout_id_t tid;
+
+ instance = ddi_get_instance(dip);
+
+ DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
+
+ switch (cmd) {
+ case DDI_SUSPEND:
+ if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
+ dca_diperror(dip, "no soft state in detach");
+ return (DDI_FAILURE);
+ }
+ /* assumption: we won't be DDI_DETACHed until we return */
+ return (dca_suspend(dca));
+
+ case DDI_DETACH:
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
+ dca_diperror(dip, "no soft state in detach");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Unregister from kCF.
+ * This needs to be done at the beginning of detach.
+ */
+ if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
+ if (crypto_unregister_provider(WORKLIST(dca, MCR1)->dwl_prov) !=
+ CRYPTO_SUCCESS) {
+ dca_error(dca, "unable to unregister MCR1 from kcf");
+ return (DDI_FAILURE);
+ }
+ }
+
+ if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
+ if (crypto_unregister_provider(WORKLIST(dca, MCR2)->dwl_prov) !=
+ CRYPTO_SUCCESS) {
+ dca_error(dca, "unable to unregister MCR2 from kcf");
+ return (DDI_FAILURE);
+ }
+ }
+
+ /*
+ * Cleanup the private context list. Once the
+ * crypto_unregister_provider returns, it is safe to do so.
+ */
+ dca_free_context_list(dca);
+
+ /* Cleanup the local random number pool */
+ dca_random_fini(dca);
+
+ /* send any jobs in the waitq back to kCF */
+ dca_rejectjobs(dca);
+
+ /* untimeout the timeouts */
+ mutex_enter(&dca->dca_intrlock);
+ tid = dca->dca_jobtid;
+ dca->dca_jobtid = 0;
+ mutex_exit(&dca->dca_intrlock);
+ if (tid) {
+ (void) untimeout(tid);
+ }
+
+ /* disable device interrupts */
+ CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
+
+ /* unregister interrupt handlers */
+ ddi_remove_intr(dip, 0, dca->dca_icookie);
+
+ /* release our regs handle */
+ ddi_regs_map_free(&dca->dca_regs_handle);
+
+ /* toss out kstats */
+ if (dca->dca_intrstats) {
+ kstat_delete(dca->dca_intrstats);
+ }
+ if (dca->dca_ksp) {
+ kstat_delete(dca->dca_ksp);
+ }
+
+ mutex_destroy(&dca->dca_intrlock);
+ dca_uninit(dca);
+
+ /* finalize FMA */
+ dca_fma_fini(dca);
+
+ ddi_soft_state_free(dca_state, instance);
+
+ return (DDI_SUCCESS);
+}
+
+int
+dca_resume(dca_t *dca)
+{
+ ddi_acc_handle_t pci;
+
+ if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
+ dca_error(dca, "unable to setup PCI config handle");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Reprogram registers in PCI configuration space.
+ */
+
+ /* Broadcom-specific timers -- we disable them. */
+ pci_config_put8(pci, PCI_TRDYTO, 0);
+ pci_config_put8(pci, PCI_RETRIES, 0);
+
+ /* initialize PCI access settings */
+ pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
+ PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
+
+ /* set up our PCI latency timer */
+ pci_config_put8(pci, PCI_LATTMR, 0x40);
+
+ pci_config_teardown(&pci);
+
+ if (dca_reset(dca, 0) < 0) {
+ dca_error(dca, "unable to reset device during resume");
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Now restore the card-specific CSRs.
+ */
+
+ /* restore endianness settings */
+ PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ /* restore interrupt enables */
+ SETBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ /* resume scheduling jobs on the device */
+ dca_undrain(dca);
+
+ return (DDI_SUCCESS);
+}
+
+int
+dca_suspend(dca_t *dca)
+{
+ if ((dca_drain(dca)) != 0) {
+ return (DDI_FAILURE);
+ }
+ if (dca_reset(dca, 0) < 0) {
+ dca_error(dca, "unable to reset device during suspend");
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Hardware access stuff.
+ */
+int
+dca_reset(dca_t *dca, int failreset)
+{
+ int i;
+
+ if (dca->dca_regs_handle == NULL) {
+ return (-1);
+ }
+
+ PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
+ if (!failreset) {
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
+ return (-1);
+ }
+
+ /* now wait for a reset */
+ for (i = 1; i < 100; i++) {
+ uint32_t dmactl;
+ drv_usecwait(100);
+ dmactl = GETCSR(dca, CSR_DMACTL);
+ if (!failreset) {
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
+ return (-1);
+ }
+ if ((dmactl & DMACTL_RESET) == 0) {
+ DBG(dca, DCHATTY, "reset in %d usec", i * 100);
+ return (0);
+ }
+ }
+ if (!failreset) {
+ dca_failure(dca, DDI_DEVICE_FAULT,
+ DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
+ "timeout waiting for reset after %d usec", i * 100);
+ }
+ return (-1);
+}
+
+int
+dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
+{
+ int i;
+ int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
+
+ /*
+ * Set up work queue.
+ */
+ mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
+ mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
+ dca->dca_icookie);
+ cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
+
+ mutex_enter(&wlp->dwl_lock);
+
+ dca_initq(&wlp->dwl_freereqs);
+ dca_initq(&wlp->dwl_waitq);
+ dca_initq(&wlp->dwl_freework);
+ dca_initq(&wlp->dwl_runq);
+
+ for (i = 0; i < MAXWORK; i++) {
+ dca_work_t *workp;
+
+ if ((workp = dca_newwork(dca)) == NULL) {
+ dca_error(dca, "unable to allocate work");
+ mutex_exit(&wlp->dwl_lock);
+ return (DDI_FAILURE);
+ }
+ workp->dw_wlp = wlp;
+ dca_freework(workp);
+ }
+ mutex_exit(&wlp->dwl_lock);
+
+ for (i = 0; i < reqprealloc; i++) {
+ dca_request_t *reqp;
+
+ if ((reqp = dca_newreq(dca)) == NULL) {
+ dca_error(dca, "unable to allocate request");
+ return (DDI_FAILURE);
+ }
+ reqp->dr_dca = dca;
+ reqp->dr_wlp = wlp;
+ dca_freereq(reqp);
+ }
+ return (DDI_SUCCESS);
+}
+
+int
+dca_init(dca_t *dca)
+{
+ dca_worklist_t *wlp;
+
+ /* Initialize the private context list and the corresponding lock. */
+ mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
+ dca_initq(&dca->dca_ctx_list);
+
+ /*
+ * MCR1 algorithms.
+ */
+ wlp = WORKLIST(dca, MCR1);
+ (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
+ ddi_get_instance(dca->dca_dip));
+ wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr1_lowater", MCR1LOWATER);
+ wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr1_hiwater", MCR1HIWATER);
+ wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
+ wlp->dwl_dca = dca;
+ wlp->dwl_mcr = MCR1;
+ if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * MCR2 algorithms.
+ */
+ wlp = WORKLIST(dca, MCR2);
+ (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
+ ddi_get_instance(dca->dca_dip));
+ wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr2_lowater", MCR2LOWATER);
+ wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr2_hiwater", MCR2HIWATER);
+ wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
+ dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
+ "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
+ wlp->dwl_dca = dca;
+ wlp->dwl_mcr = MCR2;
+ if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Uninitialize worklists. This routine should only be called when no
+ * active jobs (hence DMA mappings) exist. One way to ensure this is
+ * to unregister from kCF before calling this routine. (This is done
+ * e.g. in detach(9e).)
+ */
+void
+dca_uninit(dca_t *dca)
+{
+ int mcr;
+
+ mutex_destroy(&dca->dca_ctx_list_lock);
+
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_work_t *workp;
+ dca_request_t *reqp;
+
+ if (dca->dca_regs_handle == NULL) {
+ continue;
+ }
+
+ mutex_enter(&wlp->dwl_lock);
+ while ((workp = dca_getwork(dca, mcr)) != NULL) {
+ dca_destroywork(workp);
+ }
+ mutex_exit(&wlp->dwl_lock);
+ while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
+ dca_destroyreq(reqp);
+ }
+
+ mutex_destroy(&wlp->dwl_lock);
+ mutex_destroy(&wlp->dwl_freereqslock);
+ cv_destroy(&wlp->dwl_cv);
+ wlp->dwl_prov = NULL;
+ }
+}
+
+static void
+dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
+{
+ if (!q || !node)
+ return;
+
+ mutex_enter(lock);
+ node->dl_next2 = q;
+ node->dl_prev2 = q->dl_prev2;
+ node->dl_next2->dl_prev2 = node;
+ node->dl_prev2->dl_next2 = node;
+ mutex_exit(lock);
+}
+
+static void
+dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
+{
+ if (!node)
+ return;
+
+ mutex_enter(lock);
+ node->dl_next2->dl_prev2 = node->dl_prev2;
+ node->dl_prev2->dl_next2 = node->dl_next2;
+ node->dl_next2 = NULL;
+ node->dl_prev2 = NULL;
+ mutex_exit(lock);
+}
+
+static dca_listnode_t *
+dca_delist2(dca_listnode_t *q, kmutex_t *lock)
+{
+ dca_listnode_t *node;
+
+ mutex_enter(lock);
+ if ((node = q->dl_next2) == q) {
+ mutex_exit(lock);
+ return (NULL);
+ }
+
+ node->dl_next2->dl_prev2 = node->dl_prev2;
+ node->dl_prev2->dl_next2 = node->dl_next2;
+ node->dl_next2 = NULL;
+ node->dl_prev2 = NULL;
+ mutex_exit(lock);
+
+ return (node);
+}
+
+void
+dca_initq(dca_listnode_t *q)
+{
+ q->dl_next = q;
+ q->dl_prev = q;
+ q->dl_next2 = q;
+ q->dl_prev2 = q;
+}
+
+void
+dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
+{
+ /*
+ * Enqueue submits at the "tail" of the list, i.e. just
+ * behind the sentinel.
+ */
+ node->dl_next = q;
+ node->dl_prev = q->dl_prev;
+ node->dl_next->dl_prev = node;
+ node->dl_prev->dl_next = node;
+}
+
+void
+dca_rmqueue(dca_listnode_t *node)
+{
+ node->dl_next->dl_prev = node->dl_prev;
+ node->dl_prev->dl_next = node->dl_next;
+ node->dl_next = NULL;
+ node->dl_prev = NULL;
+}
+
+dca_listnode_t *
+dca_dequeue(dca_listnode_t *q)
+{
+ dca_listnode_t *node;
+ /*
+ * Dequeue takes from the "head" of the list, i.e. just after
+ * the sentinel.
+ */
+ if ((node = q->dl_next) == q) {
+ /* queue is empty */
+ return (NULL);
+ }
+ dca_rmqueue(node);
+ return (node);
+}
+
+/* this is the opposite of dequeue, it takes things off in LIFO order */
+dca_listnode_t *
+dca_unqueue(dca_listnode_t *q)
+{
+ dca_listnode_t *node;
+ /*
+ * unqueue takes from the "tail" of the list, i.e. just before
+ * the sentinel.
+ */
+ if ((node = q->dl_prev) == q) {;
+ /* queue is empty */
+ return (NULL);
+ }
+ dca_rmqueue(node);
+ return (node);
+}
+
+dca_listnode_t *
+dca_peekqueue(dca_listnode_t *q)
+{
+ dca_listnode_t *node;
+
+ if ((node = q->dl_next) == q) {
+ return (NULL);
+ } else {
+ return (node);
+ }
+}
+
+/*
+ * Interrupt service routine.
+ */
+uint_t
+dca_intr(char *arg)
+{
+ dca_t *dca = (dca_t *)arg;
+ uint32_t status;
+
+ mutex_enter(&dca->dca_intrlock);
+ status = GETCSR(dca, CSR_DMASTAT);
+ PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ mutex_exit(&dca->dca_intrlock);
+ return ((uint_t)DDI_FAILURE);
+ }
+
+ DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
+
+ if ((status & DMASTAT_INTERRUPTS) == 0) {
+ /* increment spurious interrupt kstat */
+ if (dca->dca_intrstats) {
+ KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
+ }
+ mutex_exit(&dca->dca_intrlock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ if (dca->dca_intrstats) {
+ KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
+ }
+ if (status & DMASTAT_MCR1INT) {
+ DBG(dca, DINTR, "MCR1 interrupted");
+ mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
+ dca_schedule(dca, MCR1);
+ dca_reclaim(dca, MCR1);
+ mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
+ }
+
+ if (status & DMASTAT_MCR2INT) {
+ DBG(dca, DINTR, "MCR2 interrupted");
+ mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
+ dca_schedule(dca, MCR2);
+ dca_reclaim(dca, MCR2);
+ mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
+ }
+
+ if (status & DMASTAT_ERRINT) {
+ uint32_t erraddr;
+ erraddr = GETCSR(dca, CSR_DMAEA);
+ mutex_exit(&dca->dca_intrlock);
+
+ /*
+ * bit 1 of the error address indicates failure during
+ * read if set, during write otherwise.
+ */
+ dca_failure(dca, DDI_DEVICE_FAULT,
+ DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
+ "DMA master access error %s address 0x%x",
+ erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ mutex_exit(&dca->dca_intrlock);
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Reverse a string of bytes from s1 into s2. The reversal happens
+ * from the tail of s1. If len1 < len2, then null bytes will be
+ * padded to the end of s2. If len2 < len1, then (presumably null)
+ * bytes will be dropped from the start of s1.
+ *
+ * The rationale here is that when s1 (source) is shorter, then we
+ * are reversing from big-endian ordering, into device ordering, and
+ * want to add some extra nulls to the tail (MSB) side of the device.
+ *
+ * Similarly, when s2 (dest) is shorter, then we are truncating what
+ * are presumably null MSB bits from the device.
+ *
+ * There is an expectation when reversing from the device back into
+ * big-endian, that the number of bytes to reverse and the target size
+ * will match, and no truncation or padding occurs.
+ */
+void
+dca_reverse(void *s1, void *s2, int len1, int len2)
+{
+ caddr_t src, dst;
+
+ if (len1 == 0) {
+ if (len2) {
+ bzero(s2, len2);
+ }
+ return;
+ }
+ src = (caddr_t)s1 + len1 - 1;
+ dst = s2;
+ while ((src >= (caddr_t)s1) && (len2)) {
+ *dst++ = *src--;
+ len2--;
+ }
+ while (len2 > 0) {
+ *dst++ = 0;
+ len2--;
+ }
+}
+
+uint16_t
+dca_padfull(int num)
+{
+ if (num <= 512) {
+ return (BITS2BYTES(512));
+ }
+ if (num <= 768) {
+ return (BITS2BYTES(768));
+ }
+ if (num <= 1024) {
+ return (BITS2BYTES(1024));
+ }
+ if (num <= 1536) {
+ return (BITS2BYTES(1536));
+ }
+ if (num <= 2048) {
+ return (BITS2BYTES(2048));
+ }
+ return (0);
+}
+
+uint16_t
+dca_padhalf(int num)
+{
+ if (num <= 256) {
+ return (BITS2BYTES(256));
+ }
+ if (num <= 384) {
+ return (BITS2BYTES(384));
+ }
+ if (num <= 512) {
+ return (BITS2BYTES(512));
+ }
+ if (num <= 768) {
+ return (BITS2BYTES(768));
+ }
+ if (num <= 1024) {
+ return (BITS2BYTES(1024));
+ }
+ return (0);
+}
+
+dca_work_t *
+dca_newwork(dca_t *dca)
+{
+ dca_work_t *workp;
+ size_t size;
+ ddi_dma_cookie_t c;
+ unsigned nc;
+ int rv;
+
+ workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
+
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
+ DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
+ if (rv != 0) {
+ dca_error(dca, "unable to alloc MCR DMA handle");
+ dca_destroywork(workp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
+ ROUNDUP(MCR_SIZE, dca->dca_pagesize),
+ &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
+ &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
+ if (rv != 0) {
+ dca_error(dca, "unable to alloc MCR DMA memory");
+ dca_destroywork(workp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
+ workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
+ DDI_DMA_SLEEP, NULL, &c, &nc);
+ if (rv != DDI_DMA_MAPPED) {
+ dca_error(dca, "unable to map MCR DMA memory");
+ dca_destroywork(workp);
+ return (NULL);
+ }
+
+ workp->dw_mcr_paddr = c.dmac_address;
+ return (workp);
+}
+
+void
+dca_destroywork(dca_work_t *workp)
+{
+ if (workp->dw_mcr_paddr) {
+ (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
+ }
+ if (workp->dw_mcr_acch) {
+ ddi_dma_mem_free(&workp->dw_mcr_acch);
+ }
+ if (workp->dw_mcr_dmah) {
+ ddi_dma_free_handle(&workp->dw_mcr_dmah);
+ }
+ kmem_free(workp, sizeof (dca_work_t));
+}
+
+dca_request_t *
+dca_newreq(dca_t *dca)
+{
+ dca_request_t *reqp;
+ size_t size;
+ ddi_dma_cookie_t c;
+ unsigned nc;
+ int rv;
+ int n_chain = 0;
+
+ size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
+
+ reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
+
+ reqp->dr_dca = dca;
+
+ /*
+ * Setup the DMA region for the context and descriptors.
+ */
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
+ NULL, &reqp->dr_ctx_dmah);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "failure allocating request DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ /* for driver hardening, allocate in whole pages */
+ rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
+ ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
+ DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
+ &reqp->dr_ctx_acch);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "unable to alloc request DMA memory");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
+ reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
+ DDI_DMA_SLEEP, 0, &c, &nc);
+ if (rv != DDI_DMA_MAPPED) {
+ dca_error(dca, "failed binding request DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+ reqp->dr_ctx_paddr = c.dmac_address;
+
+ reqp->dr_dma_size = size;
+
+ /*
+ * Set up the dma for our scratch/shared buffers.
+ */
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
+ DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "failure allocating ibuf DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
+ DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "failure allocating obuf DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
+ DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "failure allocating chain_in DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
+ DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "failure allocating chain_out DMA handle");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ /*
+ * for driver hardening, allocate in whole pages.
+ */
+ size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
+#if defined(i386) || defined(__i386)
+ /*
+ * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
+ * may fail on x86 platform if a physically contigous memory chunk
+ * cannot be found. From initial testing, we did not see performance
+ * degration as seen on Sparc.
+ */
+ if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
+ dca_error(dca, "unable to alloc request ibuf memory");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+ if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
+ dca_error(dca, "unable to alloc request obuf memory");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+#else
+ /*
+ * We could kmem_alloc for sparc too. However, it gives worse
+ * performance when transfering more than one page data. For example,
+ * using 4 threads and 12032 byte data and 3DES on 900MHZ sparc system,
+ * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
+ * the same throughput.
+ */
+ rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
+ size, &dca_bufattr,
+ DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
+ &size, &reqp->dr_ibuf_acch);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "unable to alloc request DMA memory");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+
+ rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
+ size, &dca_bufattr,
+ DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
+ &size, &reqp->dr_obuf_acch);
+ if (rv != DDI_SUCCESS) {
+ dca_error(dca, "unable to alloc request DMA memory");
+ dca_destroyreq(reqp);
+ return (NULL);
+ }
+#endif
+
+ /* Skip the used portion in the context page */
+ reqp->dr_offset = CTX_MAXLENGTH;
+ if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
+ reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
+ DDI_DMA_WRITE | DDI_DMA_STREAMING,
+ &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
+ (void) dca_destroyreq(reqp);
+ return (NULL);
+ }
+ reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
+ /* Skip the space used by the input buffer */
+ reqp->dr_offset += DESC_SIZE * n_chain;
+
+ if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
+ reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
+ DDI_DMA_READ | DDI_DMA_STREAMING,
+ &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
+ (void) dca_destroyreq(reqp);
+ return (NULL);
+ }
+ reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
+ /* Skip the space used by the output buffer */
+ reqp->dr_offset += DESC_SIZE * n_chain;
+
+ DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
+ reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
+ return (reqp);
+}
+
+void
+dca_destroyreq(dca_request_t *reqp)
+{
+#if defined(i386) || defined(__i386)
+ dca_t *dca = reqp->dr_dca;
+ size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
+#endif
+
+ /*
+ * Clean up DMA for the context structure.
+ */
+ if (reqp->dr_ctx_paddr) {
+ (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
+ }
+
+ if (reqp->dr_ctx_acch) {
+ ddi_dma_mem_free(&reqp->dr_ctx_acch);
+ }
+
+ if (reqp->dr_ctx_dmah) {
+ ddi_dma_free_handle(&reqp->dr_ctx_dmah);
+ }
+
+ /*
+ * Clean up DMA for the scratch buffer.
+ */
+#if defined(i386) || defined(__i386)
+ if (reqp->dr_ibuf_dmah) {
+ (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
+ ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
+ }
+ if (reqp->dr_obuf_dmah) {
+ (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
+ ddi_dma_free_handle(&reqp->dr_obuf_dmah);
+ }
+
+ kmem_free(reqp->dr_ibuf_kaddr, size);
+ kmem_free(reqp->dr_obuf_kaddr, size);
+#else
+ if (reqp->dr_ibuf_paddr) {
+ (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
+ }
+ if (reqp->dr_obuf_paddr) {
+ (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
+ }
+
+ if (reqp->dr_ibuf_acch) {
+ ddi_dma_mem_free(&reqp->dr_ibuf_acch);
+ }
+ if (reqp->dr_obuf_acch) {
+ ddi_dma_mem_free(&reqp->dr_obuf_acch);
+ }
+
+ if (reqp->dr_ibuf_dmah) {
+ ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
+ }
+ if (reqp->dr_obuf_dmah) {
+ ddi_dma_free_handle(&reqp->dr_obuf_dmah);
+ }
+#endif
+ /*
+ * These two DMA handles should have been unbinded in
+ * dca_unbindchains() function
+ */
+ if (reqp->dr_chain_in_dmah) {
+ ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
+ }
+ if (reqp->dr_chain_out_dmah) {
+ ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
+ }
+
+ kmem_free(reqp, sizeof (dca_request_t));
+}
+
+dca_work_t *
+dca_getwork(dca_t *dca, int mcr)
+{
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_work_t *workp;
+
+ ASSERT(mutex_owned(&wlp->dwl_lock));
+ workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
+ if (workp) {
+ int nreqs;
+ bzero(workp->dw_mcr_kaddr, 8);
+
+ /* clear out old requests */
+ for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
+ workp->dw_reqs[nreqs] = NULL;
+ }
+ }
+ return (workp);
+}
+
+void
+dca_freework(dca_work_t *workp)
+{
+ ASSERT(mutex_owned(&workp->dw_wlp->dwl_lock));
+ dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
+}
+
+dca_request_t *
+dca_getreq(dca_t *dca, int mcr, int tryhard)
+{
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_request_t *reqp;
+
+ mutex_enter(&wlp->dwl_freereqslock);
+ reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
+ mutex_exit(&wlp->dwl_freereqslock);
+ if (reqp) {
+ reqp->dr_flags = 0;
+ reqp->dr_callback = NULL;
+ } else if (tryhard) {
+ /*
+ * failed to get a free one, try an allocation, the hard way.
+ * XXX: Kstat desired here.
+ */
+ if ((reqp = dca_newreq(dca)) != NULL) {
+ reqp->dr_wlp = wlp;
+ reqp->dr_dca = dca;
+ reqp->dr_flags = 0;
+ reqp->dr_callback = NULL;
+ }
+ }
+ return (reqp);
+}
+
+void
+dca_freereq(dca_request_t *reqp)
+{
+ reqp->dr_kcf_req = NULL;
+ if (!(reqp->dr_flags & DR_NOCACHE)) {
+ mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
+ dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
+ (dca_listnode_t *)reqp);
+ mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
+ }
+}
+
+/*
+ * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
+ * is mapped to a single physicall address. On x86, a user buffer is mapped
+ * to multiple physically addresses. These phsyical addresses are chained
+ * using the method specified in Broadcom BCM5820 specification
+ */
+int
+dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
+{
+ int rv;
+ caddr_t kaddr;
+ uint_t flags;
+ int n_chain = 0;
+
+ if (reqp->dr_flags & DR_INPLACE) {
+ flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
+ } else {
+ flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
+ }
+
+ /* first the input */
+ if (incnt) {
+ if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ return (DDI_FAILURE);
+ }
+ if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
+ kaddr, reqp->dr_chain_in_dmah, flags,
+ &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
+ (void) dca_unbindchains(reqp);
+ return (rv);
+ }
+
+ /*
+ * The offset and length are altered by the calling routine
+ * reqp->dr_in->cd_offset += incnt;
+ * reqp->dr_in->cd_length -= incnt;
+ */
+ /* Save the first one in the chain for MCR */
+ reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
+ reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
+ reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
+ } else {
+ reqp->dr_in_paddr = NULL;
+ reqp->dr_in_next = 0;
+ reqp->dr_in_len = 0;
+ }
+
+ if (reqp->dr_flags & DR_INPLACE) {
+ reqp->dr_out_paddr = reqp->dr_in_paddr;
+ reqp->dr_out_len = reqp->dr_in_len;
+ reqp->dr_out_next = reqp->dr_in_next;
+ return (DDI_SUCCESS);
+ }
+
+ /* then the output */
+ if (outcnt) {
+ flags = DDI_DMA_READ | DDI_DMA_STREAMING;
+ if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ (void) dca_unbindchains(reqp);
+ return (DDI_FAILURE);
+ }
+ rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
+ n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
+ flags, &reqp->dr_chain_out_head, &n_chain);
+ if (rv != DDI_SUCCESS) {
+ (void) dca_unbindchains(reqp);
+ return (DDI_FAILURE);
+ }
+
+ /* Save the first one in the chain for MCR */
+ reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
+ reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
+ reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
+ } else {
+ reqp->dr_out_paddr = NULL;
+ reqp->dr_out_next = 0;
+ reqp->dr_out_len = 0;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Unbind the user buffers from the DMA handles.
+ */
+int
+dca_unbindchains(dca_request_t *reqp)
+{
+ int rv = DDI_SUCCESS;
+ int rv1 = DDI_SUCCESS;
+
+ /* Clear the input chain */
+ if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
+ (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
+ reqp->dr_chain_in_head.dc_buffer_paddr = 0;
+ }
+
+ /* Clear the output chain */
+ if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
+ (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
+ reqp->dr_chain_out_head.dc_buffer_paddr = 0;
+ }
+
+ return ((rv != DDI_SUCCESS)? rv : rv1);
+}
+
+/*
+ * Build either input chain or output chain. It is single-item chain for Sparc,
+ * and possible mutiple-item chain for x86.
+ */
+static int
+dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
+ caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
+ dca_chain_t *head, int *n_chain)
+{
+ ddi_dma_cookie_t c;
+ uint_t nc;
+ int rv;
+ caddr_t chain_kaddr_pre;
+ caddr_t chain_kaddr;
+ uint32_t chain_paddr;
+ int i;
+
+ /* Advance past the context structure to the starting address */
+ chain_paddr = reqp->dr_ctx_paddr + dr_offset;
+ chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
+
+ /*
+ * Bind the kernel address to the DMA handle. On x86, the actual
+ * buffer is mapped into multiple physical addresses. On Sparc,
+ * the actual buffer is mapped into a single address.
+ */
+ rv = ddi_dma_addr_bind_handle(handle,
+ NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
+ if (rv != DDI_DMA_MAPPED) {
+ return (DDI_FAILURE);
+ }
+
+ (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
+ if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
+ DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ return (rv);
+ }
+
+ *n_chain = nc;
+
+ /* Setup the data buffer chain for DMA transfer */
+ chain_kaddr_pre = NULL;
+ head->dc_buffer_paddr = 0;
+ head->dc_next_paddr = 0;
+ head->dc_buffer_length = 0;
+ for (i = 0; i < nc; i++) {
+ /* PIO */
+ PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
+ PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
+ PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
+
+ /* Remember the head of the chain */
+ if (head->dc_buffer_paddr == 0) {
+ head->dc_buffer_paddr = c.dmac_address;
+ head->dc_buffer_length = c.dmac_size;
+ }
+
+ /* Link to the previous one if one exists */
+ if (chain_kaddr_pre) {
+ PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
+ chain_paddr);
+ if (head->dc_next_paddr == 0)
+ head->dc_next_paddr = chain_paddr;
+ }
+ chain_kaddr_pre = chain_kaddr;
+
+ /* Maintain pointers */
+ chain_paddr += DESC_SIZE;
+ chain_kaddr += DESC_SIZE;
+
+ /* Retrieve the next cookie if there is one */
+ if (i < nc-1)
+ ddi_dma_nextcookie(handle, &c);
+ }
+
+ /* Set the next pointer in the last entry to NULL */
+ PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Schedule some work.
+ */
+int
+dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
+{
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+
+ mutex_enter(&wlp->dwl_lock);
+
+ DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
+ reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
+ reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
+ DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
+ reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
+ /* sync out the entire context and descriptor chains */
+ (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
+ if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ mutex_exit(&wlp->dwl_lock);
+ return (CRYPTO_DEVICE_ERROR);
+ }
+
+ dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
+ wlp->dwl_count++;
+ wlp->dwl_lastsubmit = ddi_get_lbolt();
+ reqp->dr_wlp = wlp;
+
+ if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
+ /* we are fully loaded now, let kCF know */
+
+ wlp->dwl_flowctl++;
+ wlp->dwl_busy = 1;
+
+ crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
+ }
+
+ if (dosched) {
+#ifdef SCHEDDELAY
+ /* possibly wait for more work to arrive */
+ if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
+ dca_schedule(dca, mcr);
+ } else if (!wlp->dwl_schedtid) {
+ /* wait 1 msec for more work before doing it */
+ wlp->dwl_schedtid = timeout(dca_schedtimeout,
+ (void *)wlp, drv_usectohz(MSEC));
+ }
+#else
+ dca_schedule(dca, mcr);
+#endif
+ }
+ mutex_exit(&wlp->dwl_lock);
+
+ return (CRYPTO_QUEUED);
+}
+
+void
+dca_schedule(dca_t *dca, int mcr)
+{
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ int csr;
+ int full;
+ uint32_t status;
+
+ ASSERT(mutex_owned(&wlp->dwl_lock));
+ /*
+ * If the card is draining or has an outstanding failure,
+ * don't schedule any more work on it right now
+ */
+ if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
+ return;
+ }
+
+ if (mcr == MCR2) {
+ csr = CSR_MCR2;
+ full = DMASTAT_MCR2FULL;
+ } else {
+ csr = CSR_MCR1;
+ full = DMASTAT_MCR1FULL;
+ }
+
+ for (;;) {
+ dca_work_t *workp;
+ uint32_t offset;
+ int nreqs;
+
+ status = GETCSR(dca, CSR_DMASTAT);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
+ return;
+
+ if ((status & full) != 0)
+ break;
+
+#ifdef SCHEDDELAY
+ /* if there isn't enough to do, don't bother now */
+ if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
+ (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
+ drv_usectohz(MSEC)))) {
+ /* wait a bit longer... */
+ if (wlp->dwl_schedtid == 0) {
+ wlp->dwl_schedtid = timeout(dca_schedtimeout,
+ (void *)wlp, drv_usectohz(MSEC));
+ }
+ return;
+ }
+#endif
+
+ /* grab a work structure */
+ workp = dca_getwork(dca, mcr);
+
+ if (workp == NULL) {
+ /*
+ * There must be work ready to be reclaimed,
+ * in this case, since the chip can only hold
+ * less work outstanding than there are total.
+ */
+ dca_reclaim(dca, mcr);
+ continue;
+ }
+
+ nreqs = 0;
+ offset = MCR_CTXADDR;
+
+ while (nreqs < wlp->dwl_reqspermcr) {
+ dca_request_t *reqp;
+
+ reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
+ if (reqp == NULL) {
+ /* nothing left to process */
+ break;
+ }
+ /*
+ * Update flow control.
+ */
+ wlp->dwl_count--;
+ if ((wlp->dwl_count == wlp->dwl_lowater) &&
+ (wlp->dwl_busy)) {
+ wlp->dwl_busy = 0;
+ crypto_prov_notify(wlp->dwl_prov,
+ CRYPTO_PROVIDER_READY);
+ }
+
+ /*
+ * Context address.
+ */
+ PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
+ offset += 4;
+
+ /*
+ * Input chain.
+ */
+ /* input buffer address */
+ PUTMCR32(workp, offset, reqp->dr_in_paddr);
+ offset += 4;
+ /* next input buffer entry */
+ PUTMCR32(workp, offset, reqp->dr_in_next);
+ offset += 4;
+ /* input buffer length */
+ PUTMCR16(workp, offset, reqp->dr_in_len);
+ offset += 2;
+ /* zero the reserved field */
+ PUTMCR16(workp, offset, 0);
+ offset += 2;
+
+ /*
+ * Overall length.
+ */
+ /* reserved field */
+ PUTMCR16(workp, offset, 0);
+ offset += 2;
+ /* total packet length */
+ PUTMCR16(workp, offset, reqp->dr_pkt_length);
+ offset += 2;
+
+ /*
+ * Output chain.
+ */
+ /* output buffer address */
+ PUTMCR32(workp, offset, reqp->dr_out_paddr);
+ offset += 4;
+ /* next output buffer entry */
+ PUTMCR32(workp, offset, reqp->dr_out_next);
+ offset += 4;
+ /* output buffer length */
+ PUTMCR16(workp, offset, reqp->dr_out_len);
+ offset += 2;
+ /* zero the reserved field */
+ PUTMCR16(workp, offset, 0);
+ offset += 2;
+
+ /*
+ * Note submission.
+ */
+ workp->dw_reqs[nreqs] = reqp;
+ nreqs++;
+ }
+
+ if (nreqs == 0) {
+ /* nothing in the queue! */
+ dca_freework(workp);
+ return;
+ }
+
+ wlp->dwl_submit++;
+
+ PUTMCR16(workp, MCR_FLAGS, 0);
+ PUTMCR16(workp, MCR_COUNT, nreqs);
+
+ DBG(dca, DCHATTY,
+ "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
+ workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
+ nreqs, mcr);
+
+ workp->dw_lbolt = ddi_get_lbolt();
+ /* Make sure MCR is synced out to device. */
+ (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
+ DDI_DMA_SYNC_FORDEV);
+ if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ dca_destroywork(workp);
+ return;
+ }
+
+ PUTCSR(dca, csr, workp->dw_mcr_paddr);
+ if (dca_check_acc_handle(dca, dca->dca_regs_handle,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ dca_destroywork(workp);
+ return;
+ } else {
+ dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
+ }
+
+ DBG(dca, DCHATTY, "posted");
+ }
+}
+
+/*
+ * Reclaim completed work, called in interrupt context.
+ */
+void
+dca_reclaim(dca_t *dca, int mcr)
+{
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_work_t *workp;
+ ushort_t flags;
+ int nreclaimed = 0;
+ int i;
+
+ DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
+ ASSERT(mutex_owned(&wlp->dwl_lock));
+ /*
+ * For each MCR in the submitted (runq), we check to see if
+ * it has been processed. If so, then we note each individual
+ * job in the MCR, and and do the completion processing for
+ * each of such job.
+ */
+ for (;;) {
+
+ workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
+ if (workp == NULL) {
+ break;
+ }
+
+ /* only sync the MCR flags, since that's all we need */
+ (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
+ DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ dca_rmqueue((dca_listnode_t *)workp);
+ dca_destroywork(workp);
+ return;
+ }
+
+ flags = GETMCR16(workp, MCR_FLAGS);
+ if ((flags & MCRFLAG_FINISHED) == 0) {
+ /* chip is still working on it */
+ DBG(dca, DRECLAIM,
+ "chip still working on it (MCR%d)", mcr);
+ break;
+ }
+
+ /* its really for us, so remove it from the queue */
+ dca_rmqueue((dca_listnode_t *)workp);
+
+ /* if we were draining, signal on the cv */
+ if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
+ cv_signal(&wlp->dwl_cv);
+ }
+
+ /* update statistics, done under the lock */
+ for (i = 0; i < wlp->dwl_reqspermcr; i++) {
+ dca_request_t *reqp = workp->dw_reqs[i];
+ if (reqp == NULL) {
+ continue;
+ }
+ if (reqp->dr_byte_stat >= 0) {
+ dca->dca_stats[reqp->dr_byte_stat] +=
+ reqp->dr_pkt_length;
+ }
+ if (reqp->dr_job_stat >= 0) {
+ dca->dca_stats[reqp->dr_job_stat]++;
+ }
+ }
+ mutex_exit(&wlp->dwl_lock);
+
+ for (i = 0; i < wlp->dwl_reqspermcr; i++) {
+ dca_request_t *reqp = workp->dw_reqs[i];
+
+ if (reqp == NULL) {
+ continue;
+ }
+
+ /* Do the callback. */
+ workp->dw_reqs[i] = NULL;
+ dca_done(reqp, CRYPTO_SUCCESS);
+
+ nreclaimed++;
+ }
+
+ mutex_enter(&wlp->dwl_lock);
+
+ /* now we can release the work */
+ dca_freework(workp);
+ }
+ DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
+}
+
+int
+dca_length(crypto_data_t *cdata)
+{
+ return (cdata->cd_length);
+}
+
+/*
+ * This is the callback function called from the interrupt when a kCF job
+ * completes. It does some driver-specific things, and then calls the
+ * kCF-provided callback. Finally, it cleans up the state for the work
+ * request and drops the reference count to allow for DR.
+ */
+void
+dca_done(dca_request_t *reqp, int err)
+{
+ uint64_t ena = 0;
+
+ /* unbind any chains we were using */
+ if (dca_unbindchains(reqp) != DDI_SUCCESS) {
+ /* DMA failure */
+ ena = dca_ena(ena);
+ dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
+ DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
+ "fault on buffer DMA handle");
+ if (err == CRYPTO_SUCCESS) {
+ err = CRYPTO_DEVICE_ERROR;
+ }
+ }
+
+ if (reqp->dr_callback != NULL) {
+ reqp->dr_callback(reqp, err);
+ } else {
+ dca_freereq(reqp);
+ }
+}
+
+/*
+ * Call this when a failure is detected. It will reset the chip,
+ * log a message, alert kCF, and mark jobs in the runq as failed.
+ */
+/* ARGSUSED */
+void
+dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
+ uint64_t ena, int errno, char *mess, ...)
+{
+ va_list ap;
+ char buf[256];
+ int mcr;
+ char *eclass;
+ int have_mutex;
+
+ va_start(ap, mess);
+ (void) vsprintf(buf, mess, ap);
+ va_end(ap);
+
+ eclass = dca_fma_eclass_string(dca->dca_model, index);
+
+ if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
+ index != DCA_FM_ECLASS_NONE) {
+ ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
+ DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
+ FM_EREPORT_VERS0, NULL);
+
+ /* Report the impact of the failure to the DDI. */
+ ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
+ } else {
+ /* Just log the error string to the message log */
+ dca_error(dca, buf);
+ }
+
+ /*
+ * Indicate a failure (keeps schedule from running).
+ */
+ dca->dca_flags |= DCA_FAILED;
+
+ /*
+ * Reset the chip. This should also have as a side effect, the
+ * disabling of all interrupts from the device.
+ */
+ (void) dca_reset(dca, 1);
+
+ /*
+ * Report the failure to kCF.
+ */
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ if (WORKLIST(dca, mcr)->dwl_prov) {
+ crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
+ CRYPTO_PROVIDER_FAILED);
+ }
+ }
+
+ /*
+ * Return jobs not sent to hardware back to kCF.
+ */
+ dca_rejectjobs(dca);
+
+ /*
+ * From this point on, no new work should be arriving, and the
+ * chip should not be doing any active DMA.
+ */
+
+ /*
+ * Now find all the work submitted to the device and fail
+ * them.
+ */
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ dca_worklist_t *wlp;
+ int i;
+
+ wlp = WORKLIST(dca, mcr);
+
+ if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
+ continue;
+ }
+ for (;;) {
+ dca_work_t *workp;
+
+ have_mutex = mutex_tryenter(&wlp->dwl_lock);
+ workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
+ if (workp == NULL) {
+ if (have_mutex)
+ mutex_exit(&wlp->dwl_lock);
+ break;
+ }
+ mutex_exit(&wlp->dwl_lock);
+
+ /*
+ * Free up requests
+ */
+ for (i = 0; i < wlp->dwl_reqspermcr; i++) {
+ dca_request_t *reqp = workp->dw_reqs[i];
+ if (reqp) {
+ if (reqp->dr_flags & DR_INPLACE) {
+ dca_done(reqp, errno);
+ } else {
+ /*
+ * cause it to get retried
+ * elsewhere (software)
+ */
+ dca_done(reqp, CRYPTO_FAILED);
+ }
+ workp->dw_reqs[i] = NULL;
+ }
+ }
+
+ mutex_enter(&wlp->dwl_lock);
+ /*
+ * If waiting to drain, signal on the waiter.
+ */
+ if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
+ cv_signal(&wlp->dwl_cv);
+ }
+
+ /*
+ * Return the work and request structures to
+ * the free pool.
+ */
+ dca_freework(workp);
+ if (have_mutex)
+ mutex_exit(&wlp->dwl_lock);
+ }
+ }
+
+}
+
+#ifdef SCHEDDELAY
+/*
+ * Reschedule worklist as needed.
+ */
+void
+dca_schedtimeout(void *arg)
+{
+ dca_worklist_t *wlp = (dca_worklist_t *)arg;
+ mutex_enter(&wlp->dwl_lock);
+ wlp->dwl_schedtid = 0;
+ dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
+ mutex_exit(&wlp->dwl_lock);
+}
+#endif
+
+/*
+ * Check for stalled jobs.
+ */
+void
+dca_jobtimeout(void *arg)
+{
+ int mcr;
+ dca_t *dca = (dca_t *)arg;
+ int hung = 0;
+
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_work_t *workp;
+ clock_t when;
+
+ mutex_enter(&wlp->dwl_lock);
+ when = ddi_get_lbolt();
+
+ workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
+ if (workp == NULL) {
+ /* nothing sitting in the queue */
+ mutex_exit(&wlp->dwl_lock);
+ continue;
+ }
+
+ if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
+ /* request has been queued for less than STALETIME */
+ mutex_exit(&wlp->dwl_lock);
+ continue;
+ }
+
+ /* job has been sitting around for over 1 second, badness */
+ DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
+ mcr);
+
+ /* put it back in the queue, until we reset the chip */
+ hung++;
+ mutex_exit(&wlp->dwl_lock);
+ }
+
+ if (hung) {
+ dca_failure(dca, DDI_DEVICE_FAULT,
+ DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
+ "timeout processing job.)");
+ }
+
+ /* reschedule ourself */
+ mutex_enter(&dca->dca_intrlock);
+ if (dca->dca_jobtid == 0) {
+ /* timeout has been canceled, prior to DR */
+ mutex_exit(&dca->dca_intrlock);
+ return;
+ }
+
+ /* check again in 1 second */
+ dca->dca_jobtid = timeout(dca_jobtimeout, arg,
+ drv_usectohz(SECOND));
+ mutex_exit(&dca->dca_intrlock);
+}
+
+/*
+ * This returns all jobs back to kCF. It assumes that processing
+ * on the worklist has halted.
+ */
+void
+dca_rejectjobs(dca_t *dca)
+{
+ int mcr;
+ int have_mutex;
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ dca_request_t *reqp;
+
+ if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
+ continue;
+ }
+ have_mutex = mutex_tryenter(&wlp->dwl_lock);
+ for (;;) {
+ reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
+ if (reqp == NULL) {
+ break;
+ }
+ /* update flow control */
+ wlp->dwl_count--;
+ if ((wlp->dwl_count == wlp->dwl_lowater) &&
+ (wlp->dwl_busy)) {
+ wlp->dwl_busy = 0;
+ crypto_prov_notify(wlp->dwl_prov,
+ CRYPTO_PROVIDER_READY);
+ }
+ mutex_exit(&wlp->dwl_lock);
+
+ (void) dca_unbindchains(reqp);
+ reqp->dr_callback(reqp, EAGAIN);
+ mutex_enter(&wlp->dwl_lock);
+ }
+ if (have_mutex)
+ mutex_exit(&wlp->dwl_lock);
+ }
+}
+
+int
+dca_drain(dca_t *dca)
+{
+ int mcr;
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+#ifdef SCHEDDELAY
+ timeout_id_t tid;
+#endif
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+
+ mutex_enter(&wlp->dwl_lock);
+ wlp->dwl_drain = 1;
+
+ /* give it up to a second to drain from the chip */
+ if (!QEMPTY(&wlp->dwl_runq)) {
+ (void) cv_timedwait(&wlp->dwl_cv, &wlp->dwl_lock,
+ ddi_get_time() + drv_usectohz(STALETIME));
+
+ if (!QEMPTY(&wlp->dwl_runq)) {
+ dca_error(dca, "unable to drain device");
+ mutex_exit(&wlp->dwl_lock);
+ dca_undrain(dca);
+ return (EBUSY);
+ }
+ }
+
+#ifdef SCHEDDELAY
+ tid = wlp->dwl_schedtid;
+ mutex_exit(&wlp->dwl_lock);
+
+ /*
+ * untimeout outside the lock -- this is safe because we
+ * have set the drain flag, so dca_schedule() will not
+ * reschedule another timeout
+ */
+ if (tid) {
+ untimeout(tid);
+ }
+#else
+ mutex_exit(&wlp->dwl_lock);
+#endif
+ }
+ return (0);
+}
+
+void
+dca_undrain(dca_t *dca)
+{
+ int mcr;
+
+ for (mcr = MCR1; mcr <= MCR2; mcr++) {
+ dca_worklist_t *wlp = WORKLIST(dca, mcr);
+ mutex_enter(&wlp->dwl_lock);
+ wlp->dwl_drain = 0;
+ dca_schedule(dca, mcr);
+ mutex_exit(&wlp->dwl_lock);
+ }
+}
+
+/*
+ * Duplicate the crypto_data_t structure, but point to the original
+ * buffers.
+ */
+int
+dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
+{
+ ninput->cd_format = input->cd_format;
+ ninput->cd_offset = input->cd_offset;
+ ninput->cd_length = input->cd_length;
+ ninput->cd_miscdata = input->cd_miscdata;
+
+ switch (input->cd_format) {
+ case CRYPTO_DATA_RAW:
+ ninput->cd_raw.iov_base = input->cd_raw.iov_base;
+ ninput->cd_raw.iov_len = input->cd_raw.iov_len;
+ break;
+
+ case CRYPTO_DATA_UIO:
+ ninput->cd_uio = input->cd_uio;
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ ninput->cd_mp = input->cd_mp;
+ break;
+
+ default:
+ DBG(NULL, DWARN,
+ "dca_dupcrypto: unrecognised crypto data format");
+ return (CRYPTO_FAILED);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Performs validation checks on the input and output data structures.
+ */
+int
+dca_verifyio(crypto_data_t *input, crypto_data_t *output)
+{
+ int rv = CRYPTO_SUCCESS;
+
+ switch (input->cd_format) {
+ case CRYPTO_DATA_RAW:
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /* we support only kernel buffer */
+ if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
+ DBG(NULL, DWARN, "non kernel input uio buffer");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised input crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ switch (output->cd_format) {
+ case CRYPTO_DATA_RAW:
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /* we support only kernel buffer */
+ if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
+ DBG(NULL, DWARN, "non kernel output uio buffer");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised output crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+
+ return (rv);
+}
+
+/*
+ * data: source crypto_data_t struct
+ * off: offset into the source before commencing copy
+ * count: the amount of data to copy
+ * dest: destination buffer
+ */
+int
+dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
+{
+ int rv = CRYPTO_SUCCESS;
+ uio_t *uiop;
+ uint_t vec_idx;
+ size_t cur_len;
+ mblk_t *mp;
+
+ if (count == 0) {
+ /* We don't want anything so we're done. */
+ return (rv);
+ }
+
+ /*
+ * Sanity check that we haven't specified a length greater than the
+ * offset adjusted size of the buffer.
+ */
+ if (count > (data->cd_length - off)) {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /* Add the internal crypto_data offset to the requested offset. */
+ off += data->cd_offset;
+
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ bcopy(data->cd_raw.iov_base + off, dest, count);
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /*
+ * Jump to the first iovec containing data to be
+ * processed.
+ */
+ uiop = data->cd_uio;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ off >= uiop->uio_iov[vec_idx].iov_len;
+ off -= uiop->uio_iov[vec_idx++].iov_len);
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now process the iovecs.
+ */
+ while (vec_idx < uiop->uio_iovcnt && count > 0) {
+ cur_len = min(uiop->uio_iov[vec_idx].iov_len -
+ off, count);
+ bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
+ cur_len);
+ count -= cur_len;
+ dest += cur_len;
+ vec_idx++;
+ off = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && count > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed
+ * (requested to digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ /*
+ * Jump to the first mblk_t containing data to be processed.
+ */
+ for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
+ off -= MBLKL(mp), mp = mp->b_cont);
+ if (mp == NULL) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the processing on the mblk chain.
+ */
+ while (mp != NULL && count > 0) {
+ cur_len = min(MBLKL(mp) - off, count);
+ bcopy((char *)(mp->b_rptr + off), dest, cur_len);
+ count -= cur_len;
+ dest += cur_len;
+ mp = mp->b_cont;
+ off = 0;
+ }
+
+ if (mp == NULL && count > 0) {
+ /*
+ * The end of the mblk was reached but the length
+ * requested could not be processed, (requested to
+ * digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ return (rv);
+}
+
+
+/*
+ * Performs the input, output or hard scatter/gather checks on the specified
+ * crypto_data_t struct. Returns true if the data is scatter/gather in nature
+ * ie fails the test.
+ */
+int
+dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
+{
+ uio_t *uiop;
+ mblk_t *mp;
+ int rv = FALSE;
+
+ switch (val) {
+ case DCA_SG_CONTIG:
+ /*
+ * Check for a contiguous data buffer.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ /* Contiguous in nature */
+ break;
+
+ case CRYPTO_DATA_UIO:
+ if (data->cd_uio->uio_iovcnt > 1)
+ rv = TRUE;
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ mp = data->cd_mp;
+ if (mp->b_cont != NULL)
+ rv = TRUE;
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ }
+ break;
+
+ case DCA_SG_WALIGN:
+ /*
+ * Check for a contiguous data buffer that is 32-bit word
+ * aligned and is of word multiples in size.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
+ ((uintptr_t)data->cd_raw.iov_base %
+ sizeof (uint32_t))) {
+ rv = TRUE;
+ }
+ break;
+
+ case CRYPTO_DATA_UIO:
+ uiop = data->cd_uio;
+ if (uiop->uio_iovcnt > 1) {
+ return (TRUE);
+ }
+ /* So there is only one iovec */
+ if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
+ ((uintptr_t)uiop->uio_iov[0].iov_base %
+ sizeof (uint32_t))) {
+ rv = TRUE;
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ mp = data->cd_mp;
+ if (mp->b_cont != NULL) {
+ return (TRUE);
+ }
+ /* So there is only one mblk in the chain */
+ if ((MBLKL(mp) % sizeof (uint32_t)) ||
+ ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
+ rv = TRUE;
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ }
+ break;
+
+ case DCA_SG_PALIGN:
+ /*
+ * Check that the data buffer is page aligned and is of
+ * page multiples in size.
+ */
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if ((data->cd_length % dca->dca_pagesize) ||
+ ((uintptr_t)data->cd_raw.iov_base %
+ dca->dca_pagesize)) {
+ rv = TRUE;
+ }
+ break;
+
+ case CRYPTO_DATA_UIO:
+ uiop = data->cd_uio;
+ if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
+ ((uintptr_t)uiop->uio_iov[0].iov_base %
+ dca->dca_pagesize)) {
+ rv = TRUE;
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ mp = data->cd_mp;
+ if ((MBLKL(mp) % dca->dca_pagesize) ||
+ ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
+ rv = TRUE;
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised scatter/gather param type");
+ }
+
+ return (rv);
+}
+
+/*
+ * Increments the cd_offset and decrements the cd_length as the data is
+ * gathered from the crypto_data_t struct.
+ * The data is reverse-copied into the dest buffer if the flag is true.
+ */
+int
+dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
+{
+ int rv = CRYPTO_SUCCESS;
+ uint_t vec_idx;
+ uio_t *uiop;
+ off_t off = in->cd_offset;
+ size_t cur_len;
+ mblk_t *mp;
+
+ switch (in->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (count > in->cd_length) {
+ /*
+ * The caller specified a length greater than the
+ * size of the buffer.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ if (reverse)
+ dca_reverse(in->cd_raw.iov_base + off, dest, count,
+ count);
+ else
+ bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
+ in->cd_offset += count;
+ in->cd_length -= count;
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /*
+ * Jump to the first iovec containing data to be processed.
+ */
+ uiop = in->cd_uio;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ off >= uiop->uio_iov[vec_idx].iov_len;
+ off -= uiop->uio_iov[vec_idx++].iov_len);
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now process the iovecs.
+ */
+ while (vec_idx < uiop->uio_iovcnt && count > 0) {
+ cur_len = min(uiop->uio_iov[vec_idx].iov_len -
+ off, count);
+ count -= cur_len;
+ if (reverse) {
+ /* Fill the dest buffer from the end */
+ dca_reverse(uiop->uio_iov[vec_idx].iov_base +
+ off, dest+count, cur_len, cur_len);
+ } else {
+ bcopy(uiop->uio_iov[vec_idx].iov_base + off,
+ dest, cur_len);
+ dest += cur_len;
+ }
+ in->cd_offset += cur_len;
+ in->cd_length -= cur_len;
+ vec_idx++;
+ off = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && count > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed
+ * (requested to digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ /*
+ * Jump to the first mblk_t containing data to be processed.
+ */
+ for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
+ off -= MBLKL(mp), mp = mp->b_cont);
+ if (mp == NULL) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the processing on the mblk chain.
+ */
+ while (mp != NULL && count > 0) {
+ cur_len = min(MBLKL(mp) - off, count);
+ count -= cur_len;
+ if (reverse) {
+ /* Fill the dest buffer from the end */
+ dca_reverse((char *)(mp->b_rptr + off),
+ dest+count, cur_len, cur_len);
+ } else {
+ bcopy((char *)(mp->b_rptr + off), dest,
+ cur_len);
+ dest += cur_len;
+ }
+ in->cd_offset += cur_len;
+ in->cd_length -= cur_len;
+ mp = mp->b_cont;
+ off = 0;
+ }
+
+ if (mp == NULL && count > 0) {
+ /*
+ * The end of the mblk was reached but the length
+ * requested could not be processed, (requested to
+ * digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ return (rv);
+}
+
+/*
+ * Increments the cd_offset and decrements the cd_length as the data is
+ * gathered from the crypto_data_t struct.
+ */
+int
+dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
+ int count)
+{
+ int rv = CRYPTO_SUCCESS;
+ caddr_t baddr;
+ uint_t vec_idx;
+ uio_t *uiop;
+ off_t off = in->cd_offset;
+ size_t cur_len;
+ mblk_t *mp;
+
+ /* Process the residual first */
+ if (*residlen > 0) {
+ uint_t num = min(count, *residlen);
+ bcopy(resid, dest, num);
+ *residlen -= num;
+ if (*residlen > 0) {
+ /*
+ * Requested amount 'count' is less than what's in
+ * the residual, so shuffle any remaining resid to
+ * the front.
+ */
+ baddr = resid + num;
+ bcopy(baddr, resid, *residlen);
+ }
+ dest += num;
+ count -= num;
+ }
+
+ /* Now process what's in the crypto_data_t structs */
+ switch (in->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (count > in->cd_length) {
+ /*
+ * The caller specified a length greater than the
+ * size of the buffer.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
+ in->cd_offset += count;
+ in->cd_length -= count;
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /*
+ * Jump to the first iovec containing data to be processed.
+ */
+ uiop = in->cd_uio;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ off >= uiop->uio_iov[vec_idx].iov_len;
+ off -= uiop->uio_iov[vec_idx++].iov_len);
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now process the iovecs.
+ */
+ while (vec_idx < uiop->uio_iovcnt && count > 0) {
+ cur_len = min(uiop->uio_iov[vec_idx].iov_len -
+ off, count);
+ bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
+ cur_len);
+ count -= cur_len;
+ dest += cur_len;
+ in->cd_offset += cur_len;
+ in->cd_length -= cur_len;
+ vec_idx++;
+ off = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && count > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed
+ * (requested to digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ /*
+ * Jump to the first mblk_t containing data to be processed.
+ */
+ for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
+ off -= MBLKL(mp), mp = mp->b_cont);
+ if (mp == NULL) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the processing on the mblk chain.
+ */
+ while (mp != NULL && count > 0) {
+ cur_len = min(MBLKL(mp) - off, count);
+ bcopy((char *)(mp->b_rptr + off), dest, cur_len);
+ count -= cur_len;
+ dest += cur_len;
+ in->cd_offset += cur_len;
+ in->cd_length -= cur_len;
+ mp = mp->b_cont;
+ off = 0;
+ }
+
+ if (mp == NULL && count > 0) {
+ /*
+ * The end of the mblk was reached but the length
+ * requested could not be processed, (requested to
+ * digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN,
+ "dca_resid_gather: unrecognised crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ return (rv);
+}
+
+/*
+ * Appends the data to the crypto_data_t struct increasing cd_length.
+ * cd_offset is left unchanged.
+ * Data is reverse-copied if the flag is TRUE.
+ */
+int
+dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
+{
+ int rv = CRYPTO_SUCCESS;
+ off_t offset = out->cd_offset + out->cd_length;
+ uint_t vec_idx;
+ uio_t *uiop;
+ size_t cur_len;
+ mblk_t *mp;
+
+ switch (out->cd_format) {
+ case CRYPTO_DATA_RAW:
+ if (out->cd_raw.iov_len - offset < count) {
+ /* Trying to write out more than space available. */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ if (reverse)
+ dca_reverse((void*) src, out->cd_raw.iov_base + offset,
+ count, count);
+ else
+ bcopy(src, out->cd_raw.iov_base + offset, count);
+ out->cd_length += count;
+ break;
+
+ case CRYPTO_DATA_UIO:
+ /*
+ * Jump to the first iovec that can be written to.
+ */
+ uiop = out->cd_uio;
+ for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
+ offset >= uiop->uio_iov[vec_idx].iov_len;
+ offset -= uiop->uio_iov[vec_idx++].iov_len);
+ if (vec_idx == uiop->uio_iovcnt) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now process the iovecs.
+ */
+ while (vec_idx < uiop->uio_iovcnt && count > 0) {
+ cur_len = min(uiop->uio_iov[vec_idx].iov_len -
+ offset, count);
+ count -= cur_len;
+ if (reverse) {
+ dca_reverse((void*) (src+count),
+ uiop->uio_iov[vec_idx].iov_base +
+ offset, cur_len, cur_len);
+ } else {
+ bcopy(src, uiop->uio_iov[vec_idx].iov_base +
+ offset, cur_len);
+ src += cur_len;
+ }
+ out->cd_length += cur_len;
+ vec_idx++;
+ offset = 0;
+ }
+
+ if (vec_idx == uiop->uio_iovcnt && count > 0) {
+ /*
+ * The end of the specified iovec's was reached but
+ * the length requested could not be processed
+ * (requested to write more data than space provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ case CRYPTO_DATA_MBLK:
+ /*
+ * Jump to the first mblk_t that can be written to.
+ */
+ for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
+ offset -= MBLKL(mp), mp = mp->b_cont);
+ if (mp == NULL) {
+ /*
+ * The caller specified an offset that is larger than
+ * the total size of the buffers it provided.
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+
+ /*
+ * Now do the processing on the mblk chain.
+ */
+ while (mp != NULL && count > 0) {
+ cur_len = min(MBLKL(mp) - offset, count);
+ count -= cur_len;
+ if (reverse) {
+ dca_reverse((void*) (src+count),
+ (char *)(mp->b_rptr + offset), cur_len,
+ cur_len);
+ } else {
+ bcopy(src, (char *)(mp->b_rptr + offset),
+ cur_len);
+ src += cur_len;
+ }
+ out->cd_length += cur_len;
+ mp = mp->b_cont;
+ offset = 0;
+ }
+
+ if (mp == NULL && count > 0) {
+ /*
+ * The end of the mblk was reached but the length
+ * requested could not be processed, (requested to
+ * digest more data than it provided).
+ */
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ break;
+
+ default:
+ DBG(NULL, DWARN, "unrecognised crypto data format");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ }
+ return (rv);
+}
+
+/*
+ * Compare two byte arrays in reverse order.
+ * Return 0 if they are identical, 1 otherwise.
+ */
+int
+dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
+{
+ int i;
+ caddr_t src, dst;
+
+ if (!n)
+ return (0);
+
+ src = ((caddr_t)s1) + n - 1;
+ dst = (caddr_t)s2;
+ for (i = 0; i < n; i++) {
+ if (*src != *dst)
+ return (1);
+ src--;
+ dst++;
+ }
+
+ return (0);
+}
+
+
+/*
+ * This calculates the size of a bignum in bits, specifically not counting
+ * leading zero bits. This size calculation must be done *before* any
+ * endian reversal takes place (i.e. the numbers are in absolute big-endian
+ * order.)
+ */
+int
+dca_bitlen(unsigned char *bignum, int bytelen)
+{
+ unsigned char msbyte;
+ int i, j;
+
+ for (i = 0; i < bytelen - 1; i++) {
+ if (bignum[i] != 0) {
+ break;
+ }
+ }
+ msbyte = bignum[i];
+ for (j = 8; j > 1; j--) {
+ if (msbyte & 0x80) {
+ break;
+ }
+ msbyte <<= 1;
+ }
+ return ((8 * (bytelen - i - 1)) + j);
+}
+
+/*
+ * This compares to bignums (in big-endian order). It ignores leading
+ * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
+ */
+int
+dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
+{
+ while ((n1len > 1) && (*n1 == 0)) {
+ n1len--;
+ n1++;
+ }
+ while ((n2len > 1) && (*n2 == 0)) {
+ n2len--;
+ n2++;
+ }
+ if (n1len != n2len) {
+ return (n1len - n2len);
+ }
+ while ((n1len > 1) && (*n1 == *n2)) {
+ n1++;
+ n2++;
+ n1len--;
+ }
+ return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
+}
+
+/*
+ * Return array of key attributes.
+ */
+crypto_object_attribute_t *
+dca_get_key_attr(crypto_key_t *key)
+{
+ if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
+ (key->ck_count == 0)) {
+ return (NULL);
+ }
+
+ return (key->ck_attrs);
+}
+
+/*
+ * If attribute type exists valp points to it's 32-bit value.
+ */
+int
+dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
+ uint64_t atype, uint32_t *valp)
+{
+ crypto_object_attribute_t *bap;
+
+ bap = dca_find_attribute(attrp, atnum, atype);
+ if (bap == NULL) {
+ return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
+ }
+
+ *valp = *bap->oa_value;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * If attribute type exists data contains the start address of the value,
+ * and numelems contains it's length.
+ */
+int
+dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
+ uint64_t atype, void **data, unsigned int *numelems)
+{
+ crypto_object_attribute_t *bap;
+
+ bap = dca_find_attribute(attrp, atnum, atype);
+ if (bap == NULL) {
+ return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
+ }
+
+ *data = bap->oa_value;
+ *numelems = bap->oa_value_len;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * Finds entry of specified name. If it is not found dca_find_attribute returns
+ * NULL.
+ */
+crypto_object_attribute_t *
+dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
+ uint64_t atype)
+{
+ while (atnum) {
+ if (attrp->oa_type == atype)
+ return (attrp);
+ atnum--;
+ attrp++;
+ }
+ return (NULL);
+}
+
+/*
+ * Return the address of the first data buffer. If the data format is
+ * unrecognised return NULL.
+ */
+caddr_t
+dca_bufdaddr(crypto_data_t *data)
+{
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ return (data->cd_raw.iov_base + data->cd_offset);
+ case CRYPTO_DATA_UIO:
+ return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
+ case CRYPTO_DATA_MBLK:
+ return ((char *)data->cd_mp->b_rptr + data->cd_offset);
+ default:
+ DBG(NULL, DWARN,
+ "dca_bufdaddr: unrecognised crypto data format");
+ return (NULL);
+ }
+}
+
+static caddr_t
+dca_bufdaddr_out(crypto_data_t *data)
+{
+ size_t offset = data->cd_offset + data->cd_length;
+
+ switch (data->cd_format) {
+ case CRYPTO_DATA_RAW:
+ return (data->cd_raw.iov_base + offset);
+ case CRYPTO_DATA_UIO:
+ return (data->cd_uio->uio_iov[0].iov_base + offset);
+ case CRYPTO_DATA_MBLK:
+ return ((char *)data->cd_mp->b_rptr + offset);
+ default:
+ DBG(NULL, DWARN,
+ "dca_bufdaddr_out: unrecognised crypto data format");
+ return (NULL);
+ }
+}
+
+/*
+ * Control entry points.
+ */
+
+/* ARGSUSED */
+static void
+dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
+{
+ *status = CRYPTO_PROVIDER_READY;
+}
+
+/*
+ * Cipher (encrypt/decrypt) entry points.
+ */
+
+/* ARGSUSED */
+static int
+dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_encrypt_init: started");
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
+ DR_ENCRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
+ DR_ENCRYPT | DR_TRIPLE);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_encrypt: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3des(ctx, plaintext, ciphertext, req,
+ DR_ENCRYPT | DR_TRIPLE);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, plaintext, ciphertext, req,
+ DCA_RSA_ENC);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
+ (error != CRYPTO_BUFFER_TOO_SMALL)) {
+ ciphertext->cd_length = 0;
+ }
+
+ DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_data_t *ciphertext, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_encrypt_update: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desupdate(ctx, plaintext, ciphertext, req,
+ DR_ENCRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desupdate(ctx, plaintext, ciphertext, req,
+ DR_ENCRYPT | DR_TRIPLE);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_encrypt_final: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_encrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+
+ DBG(softc, DENTRY, "dca_encrypt_atomic: started");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desatomic(provider, session_id, mechanism, key,
+ plaintext, ciphertext, KM_SLEEP, req,
+ DR_ENCRYPT | DR_ATOMIC);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desatomic(provider, session_id, mechanism, key,
+ plaintext, ciphertext, KM_SLEEP, req,
+ DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
+ ciphertext->cd_length = 0;
+ }
+
+ DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_decrypt_init: started");
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
+ DR_DECRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
+ DR_DECRYPT | DR_TRIPLE);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_decrypt: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3des(ctx, ciphertext, plaintext, req,
+ DR_DECRYPT | DR_TRIPLE);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, ciphertext, plaintext, req,
+ DCA_RSA_DEC);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
+ (error != CRYPTO_BUFFER_TOO_SMALL)) {
+ if (plaintext)
+ plaintext->cd_length = 0;
+ }
+
+ DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
+ crypto_data_t *plaintext, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_decrypt_update: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desupdate(ctx, ciphertext, plaintext, req,
+ DR_DECRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desupdate(ctx, ciphertext, plaintext, req,
+ DR_DECRYPT | DR_TRIPLE);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_decrypt_final: started");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_decrypt_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+
+ DBG(softc, DENTRY, "dca_decrypt_atomic: started");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case DES_CBC_MECH_INFO_TYPE:
+ error = dca_3desatomic(provider, session_id, mechanism, key,
+ ciphertext, plaintext, KM_SLEEP, req,
+ DR_DECRYPT | DR_ATOMIC);
+ break;
+ case DES3_CBC_MECH_INFO_TYPE:
+ error = dca_3desatomic(provider, session_id, mechanism, key,
+ ciphertext, plaintext, KM_SLEEP, req,
+ DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
+ plaintext->cd_length = 0;
+ }
+
+ DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/*
+ * Sign entry points.
+ */
+
+/* ARGSUSED */
+static int
+dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign_init: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
+ DCA_DSA_SIGN);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+static int
+dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_data_t *signature, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign: started\n");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsa_sign(ctx, data, signature, req);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign_update: started\n");
+
+ cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+
+ DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign_final: started\n");
+
+ cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+
+ DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
+
+ return (error);
+}
+
+static int
+dca_sign_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+
+ DBG(softc, DENTRY, "dca_sign_atomic: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsaatomic(provider, session_id, mechanism, key,
+ data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+static int
+dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_data_t *signature, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_sign_recover: started\n");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
+
+ return (error);
+}
+
+static int
+dca_sign_recover_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ instance = ddi_get_instance(softc->dca_dip);
+ DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
+ " 0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/*
+ * Verify entry points.
+ */
+
+/* ARGSUSED */
+static int
+dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify_init: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
+ DCA_DSA_VRFY);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+static int
+dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify: started\n");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsa_verify(ctx, data, signature, req);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify_update: started\n");
+
+ cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+
+ DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify_final: started\n");
+
+ cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+
+ DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
+
+ return (error);
+}
+
+static int
+dca_verify_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+
+ DBG(softc, DENTRY, "dca_verify_atomic: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ error = dca_dsaatomic(provider, session_id, mechanism, key,
+ data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
+ "0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/* ARGSUSED */
+static int
+dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
+ crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
+ " 0x%llx\n", (unsigned long long)mechanism->cm_type);
+ }
+
+ DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
+
+ if (error == CRYPTO_SUCCESS)
+ dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
+ &softc->dca_ctx_list_lock);
+
+ return (error);
+}
+
+static int
+dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
+ crypto_data_t *data, crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
+ return (CRYPTO_OPERATION_NOT_INITIALIZED);
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_verify_recover: started\n");
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ }
+
+ DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
+
+ return (error);
+}
+
+static int
+dca_verify_recover_atomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
+ crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
+{
+ int error = CRYPTO_MECHANISM_INVALID;
+ dca_t *softc = (dca_t *)provider;
+
+ DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
+
+ if (ctx_template != NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ /* check mechanism */
+ switch (mechanism->cm_type) {
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ error = dca_rsaatomic(provider, session_id, mechanism, key,
+ signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
+ break;
+ default:
+ cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
+ "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ DBG(softc, DENTRY,
+ "dca_verify_recover_atomic: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/*
+ * Random number entry points.
+ */
+
+/* ARGSUSED */
+static int
+dca_generate_random(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id,
+ uchar_t *buf, size_t len, crypto_req_handle_t req)
+{
+ int error = CRYPTO_FAILED;
+ dca_t *softc = (dca_t *)provider;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ instance = ddi_get_instance(softc->dca_dip);
+ DBG(softc, DENTRY, "dca_generate_random: started");
+
+ error = dca_rng(softc, buf, len, req);
+
+ DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
+
+ return (error);
+}
+
+/*
+ * Context management entry points.
+ */
+
+int
+dca_free_context(crypto_ctx_t *ctx)
+{
+ int error = CRYPTO_SUCCESS;
+ dca_t *softc;
+ /* LINTED E_FUNC_SET_NOT_USED */
+ int instance;
+
+ /* extract softc and instance number from context */
+ DCA_SOFTC_FROM_CTX(ctx, softc, instance);
+ DBG(softc, DENTRY, "dca_free_context: entered");
+
+ if (ctx->cc_provider_private == NULL)
+ return (error);
+
+ dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
+
+ error = dca_free_context_low(ctx);
+
+ DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
+
+ return (error);
+}
+
+static int
+dca_free_context_low(crypto_ctx_t *ctx)
+{
+ int error = CRYPTO_SUCCESS;
+
+ /* check mechanism */
+ switch (DCA_MECH_FROM_CTX(ctx)) {
+ case DES_CBC_MECH_INFO_TYPE:
+ case DES3_CBC_MECH_INFO_TYPE:
+ dca_3desctxfree(ctx);
+ break;
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ dca_rsactxfree(ctx);
+ break;
+ case DSA_MECH_INFO_TYPE:
+ dca_dsactxfree(ctx);
+ break;
+ default:
+ /* Should never reach here */
+ cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
+ "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
+ error = CRYPTO_MECHANISM_INVALID;
+ }
+
+ return (error);
+}
+
+
+/* Free any unfreed private context. It is called in detach. */
+static void
+dca_free_context_list(dca_t *dca)
+{
+ dca_listnode_t *node;
+ crypto_ctx_t ctx;
+
+ (void) memset(&ctx, 0, sizeof (ctx));
+ ctx.cc_provider = dca;
+
+ while ((node = dca_delist2(&dca->dca_ctx_list,
+ &dca->dca_ctx_list_lock)) != NULL) {
+ ctx.cc_provider_private = node;
+ (void) dca_free_context_low(&ctx);
+ }
+}
+
+static int
+ext_info_sym(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
+{
+ return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
+}
+
+static int
+ext_info_asym(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
+{
+ int rv;
+
+ rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
+ /* The asymmetric cipher slot supports random */
+ ext_info->ei_flags |= CRYPTO_EXTF_RNG;
+
+ return (rv);
+}
+
+/* ARGSUSED */
+static int
+ext_info_base(crypto_provider_handle_t prov,
+ crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
+{
+ dca_t *dca = (dca_t *)prov;
+ int len;
+
+ /* Label */
+ (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
+ ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
+ len = strlen((char *)ext_info->ei_label);
+ (void) memset(ext_info->ei_label + len, ' ',
+ CRYPTO_EXT_SIZE_LABEL - len);
+
+ /* Manufacturer ID */
+ (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
+ DCA_MANUFACTURER_ID);
+ len = strlen((char *)ext_info->ei_manufacturerID);
+ (void) memset(ext_info->ei_manufacturerID + len, ' ',
+ CRYPTO_EXT_SIZE_MANUF - len);
+
+ /* Model */
+ (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
+
+ DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
+
+ len = strlen((char *)ext_info->ei_model);
+ (void) memset(ext_info->ei_model + len, ' ',
+ CRYPTO_EXT_SIZE_MODEL - len);
+
+ /* Serial Number. Blank for Deimos */
+ (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
+
+ ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
+
+ ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
+ ext_info->ei_hardware_version.cv_major = 0;
+ ext_info->ei_hardware_version.cv_minor = 0;
+ ext_info->ei_firmware_version.cv_major = 0;
+ ext_info->ei_firmware_version.cv_minor = 0;
+
+ /* Time. No need to be supplied for token without a clock */
+ ext_info->ei_time[0] = '\000';
+
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+dca_fma_init(dca_t *dca)
+{
+ ddi_iblock_cookie_t fm_ibc;
+ int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
+ DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
+ DDI_FM_ERRCB_CAPABLE;
+
+ /* Read FMA capabilities from dca.conf file (if present) */
+ dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
+ fm_capabilities);
+
+ DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
+
+ /* Only register with IO Fault Services if we have some capability */
+ if (dca->fm_capabilities) {
+ dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
+ dca_devattr.devacc_attr_access = DDI_FLAGERR_ACC;
+ dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
+
+ /* Register capabilities with IO Fault Services */
+ ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
+ DBG(dca, DWARN, "fm_capable() = 0x%x",
+ ddi_fm_capable(dca->dca_dip));
+
+ /*
+ * Initialize pci ereport capabilities if ereport capable
+ */
+ if (DDI_FM_EREPORT_CAP(dca->fm_capabilities))
+ pci_ereport_setup(dca->dca_dip);
+
+ /*
+ * Initialize callback mutex and register error callback if
+ * error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
+ ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
+ (void *)dca);
+ }
+ } else {
+ /*
+ * These fields have to be cleared of FMA if there are no
+ * FMA capabilities at runtime.
+ */
+ dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
+ dca_devattr.devacc_attr_access = DDI_DEFAULT_ACC;
+ dca_dmaattr.dma_attr_flags = 0;
+ }
+}
+
+
+static void
+dca_fma_fini(dca_t *dca)
+{
+ /* Only unregister FMA capabilities if we registered some */
+ if (dca->fm_capabilities) {
+
+ /*
+ * Release any resources allocated by pci_ereport_setup()
+ */
+ if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) {
+ pci_ereport_teardown(dca->dca_dip);
+ }
+
+ /*
+ * Free callback mutex and un-register error callback if
+ * error callback capable.
+ */
+ if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
+ ddi_fm_handler_unregister(dca->dca_dip);
+ }
+
+ /* Unregister from IO Fault Services */
+ ddi_fm_fini(dca->dca_dip);
+ DBG(dca, DWARN, "fm_capable() = 0x%x",
+ ddi_fm_capable(dca->dca_dip));
+ }
+}
+
+
+/*
+ * The IO fault service error handling callback function
+ */
+static int
+dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
+{
+ int rv;
+ dca_t *dca = (dca_t *)impl_data;
+ uint16_t pci_status;
+ ddi_fm_error_t dca_err;
+
+ rv = err->fme_status = DDI_FM_OK;
+ if (err->fme_flag == DDI_FM_ERR_EXPECTED) {
+ /*
+ * dca never perfrom DDI_ACC_CAUTIOUS protected operations
+ * but if it did. we would handle it here
+ */
+ return (rv);
+ }
+
+ /*
+ * See if there is a pci error as well
+ * The updated pci_ereport_post function requires a reinitialized
+ * ddi_fm_error_t structure with a zero ena field.
+ */
+ bzero(&dca_err, sizeof (ddi_fm_error_t));
+ dca_err.fme_version = DDI_FME_VERSION;
+ dca_err.fme_flag = DDI_FM_ERR_UNEXPECTED;
+ pci_ereport_post(dip, &dca_err, &pci_status);
+ if (pci_status != 0) {
+ dca_failure(dca, DDI_DATAPATH_FAULT,
+ DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
+ "fault PCI in FMA callback.");
+
+ rv = err->fme_status = DDI_FM_FATAL;
+ return (rv);
+ }
+
+ return (rv);
+}
+
+
+static int
+dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
+ dca_fma_eclass_t eclass_index)
+{
+ ddi_fm_error_t de;
+ int version = 0;
+ uint16_t pci_status;
+
+ if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) {
+ ddi_fm_acc_err_get(handle, &de, version);
+ if (de.fme_status != DDI_FM_OK) {
+ pci_ereport_post(dca->dca_dip, &de, &pci_status);
+ dca_failure(dca, DDI_DATAPATH_FAULT,
+ eclass_index, fm_ena_increment(de.fme_ena),
+ CRYPTO_DEVICE_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ return (DDI_SUCCESS);
+}
+
+int
+dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
+ dca_fma_eclass_t eclass_index)
+{
+ ddi_fm_error_t de;
+ int version = 0;
+ uint16_t pci_status;
+
+ if (DDI_FM_EREPORT_CAP(dca->fm_capabilities)) {
+ ddi_fm_dma_err_get(handle, &de, version);
+ if (de.fme_status != DDI_FM_OK) {
+ pci_ereport_post(dca->dca_dip, &de, &pci_status);
+ dca_failure(dca, DDI_DATAPATH_FAULT,
+ eclass_index, fm_ena_increment(de.fme_ena),
+ CRYPTO_DEVICE_ERROR, "");
+ return (DDI_FAILURE);
+ }
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static uint64_t
+dca_ena(uint64_t ena)
+{
+ if (ena == 0)
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
+ else
+ ena = fm_ena_increment(ena);
+ return (ena);
+}
+
+static char *
+dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
+{
+ if (strstr(model, "500"))
+ return (dca_fma_eclass_sca500[index]);
+ else
+ return (dca_fma_eclass_sca1000[index]);
+}
diff --git a/usr/src/uts/common/crypto/io/dca.conf b/usr/src/uts/common/crypto/io/dca.conf
new file mode 100644
index 0000000000..aae8e3b075
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca.conf
@@ -0,0 +1,30 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+#
+ddi-forceattach=1;
diff --git a/usr/src/uts/common/crypto/io/dca_3des.c b/usr/src/uts/common/crypto/io/dca_3des.c
new file mode 100644
index 0000000000..33d5f121d8
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_3des.c
@@ -0,0 +1,747 @@
+
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kmem.h>
+#include <sys/note.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/dca.h>
+
+/*
+ * 3DES implementation.
+ */
+
+static int dca_3desstart(dca_t *, uint32_t, dca_request_t *);
+static void dca_3desdone(dca_request_t *, int);
+
+
+int
+dca_3des(crypto_ctx_t *ctx, crypto_data_t *in,
+ crypto_data_t *out, crypto_req_handle_t req, int flags)
+{
+ int len;
+ int rv;
+ dca_request_t *reqp = ctx->cc_provider_private;
+ dca_request_t *des_ctx = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ crypto_data_t *nin = &reqp->dr_ctx.in_dup;
+
+ len = dca_length(in);
+ if (len % DESBLOCK) {
+ DBG(dca, DWARN, "input not an integral number of DES blocks");
+ (void) dca_free_context(ctx);
+ if (flags & DR_DECRYPT) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ } else {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ /*
+ * If cd_miscdata non-null then this contains the IV.
+ */
+ if (in->cd_miscdata != NULL) {
+ uchar_t *p = (uchar_t *)in->cd_miscdata;
+ des_ctx->dr_ctx.iv[0] = p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];
+ des_ctx->dr_ctx.iv[1] = p[4]<<24 | p[5]<<16 | p[6]<<8 | p[7];
+ }
+
+ /*
+ * In-place operations (input == out) are indicated by having a
+ * NULL output. In this case set the output to point to the input.
+ */
+ if (out == NULL) {
+ out = in;
+ }
+ if (len > dca_length(out)) {
+ DBG(dca, DWARN, "inadequate output space (need %d, got %d)",
+ len, dca_length(out));
+ out->cd_length = len;
+ /* Do not free the context since the app will call again */
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ if ((rv = dca_verifyio(in, out)) != CRYPTO_SUCCESS) {
+ (void) dca_free_context(ctx);
+ return (rv);
+ }
+
+ /* special handling for null-sized input buffers */
+ if (len == 0) {
+ out->cd_length = 0;
+ (void) dca_free_context(ctx);
+ return (CRYPTO_SUCCESS);
+ }
+
+ /*
+ * Make a local copy of the input crypto_data_t structure. This
+ * allows it to be manipulated locally and for dealing with in-place
+ * data (ie in == out). Note that "nin" has been pre-allocated,
+ * and only fields are copied, not actual data.
+ */
+ if ((rv = dca_dupcrypto(in, nin)) != CRYPTO_SUCCESS) {
+ (void) dca_free_context(ctx);
+ return (rv);
+ }
+
+ /* Set output to zero ready to take the processed data */
+ out->cd_length = 0;
+
+ reqp->dr_kcf_req = req;
+ reqp->dr_in = nin;
+ reqp->dr_out = out;
+ reqp->dr_job_stat = DS_3DESJOBS;
+ reqp->dr_byte_stat = DS_3DESBYTES;
+
+ rv = dca_3desstart(dca, flags, reqp);
+
+ /* Context will be freed in the kCF callback function otherwise */
+ if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL) {
+ (void) dca_free_context(ctx);
+ }
+ return (rv);
+}
+
+
+void
+dca_3desctxfree(void *arg)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)arg;
+ dca_request_t *des_ctx = ctx->cc_provider_private;
+
+ if (des_ctx == NULL)
+ return;
+
+ des_ctx->dr_ctx.atomic = 0;
+ des_ctx->dr_ctx.ctx_cm_type = 0;
+ ctx->cc_provider_private = NULL;
+
+ if (des_ctx->destroy)
+ dca_destroyreq(des_ctx);
+ else
+ /* Return it to the pool */
+ dca_freereq(des_ctx);
+}
+
+int
+dca_3desupdate(crypto_ctx_t *ctx, crypto_data_t *in,
+ crypto_data_t *out, crypto_req_handle_t req, int flags)
+{
+ int len;
+ int rawlen;
+ int rv;
+ dca_request_t *reqp = ctx->cc_provider_private;
+ dca_request_t *des_ctx = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ crypto_data_t *nin = &reqp->dr_ctx.in_dup;
+
+ rawlen = dca_length(in) + des_ctx->dr_ctx.residlen;
+
+ len = ROUNDDOWN(rawlen, DESBLOCK);
+ /*
+ * If cd_miscdata non-null then this contains the IV.
+ */
+ if (in->cd_miscdata != NULL) {
+ uchar_t *p = (uchar_t *)in->cd_miscdata;
+ des_ctx->dr_ctx.iv[0] = p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];
+ des_ctx->dr_ctx.iv[1] = p[4]<<24 | p[5]<<16 | p[6]<<8 | p[7];
+ }
+
+ /*
+ * In-place operations (in == out) are indicated by having a
+ * NULL output. In this case set the output to point to the input.
+ */
+ if (out == NULL) {
+ out = in;
+ }
+ if (len > dca_length(out)) {
+ DBG(dca, DWARN, "not enough output space (need %d, got %d)",
+ len, dca_length(out));
+ out->cd_length = len;
+ /* Do not free the context since the app will call again */
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ if ((rv = dca_verifyio(in, out)) != CRYPTO_SUCCESS) {
+ (void) dca_free_context(ctx);
+ return (rv);
+ }
+
+ reqp->dr_kcf_req = req;
+
+ /*
+ * From here on out, we are committed.
+ */
+
+ if (len == 0) {
+ /*
+ * No blocks being encrypted, so we just accumulate the
+ * input for the next pass and return.
+ */
+ if ((rv = dca_getbufbytes(in, 0,
+ (rawlen % DESBLOCK) - des_ctx->dr_ctx.residlen,
+ des_ctx->dr_ctx.resid + des_ctx->dr_ctx.residlen)) !=
+ CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_3desupdate: dca_getbufbytes() failed for residual only pass");
+ dca_freereq(reqp);
+ return (rv);
+ }
+ des_ctx->dr_ctx.residlen = rawlen % DESBLOCK;
+
+ out->cd_length = 0;
+ /*
+ * Do not free the context here since it will be done
+ * in the final function
+ */
+ return (CRYPTO_SUCCESS);
+ }
+
+ /*
+ * Set up rbuf for previous residual data.
+ */
+ if (des_ctx->dr_ctx.residlen) {
+ bcopy(des_ctx->dr_ctx.resid, des_ctx->dr_ctx.activeresid,
+ des_ctx->dr_ctx.residlen);
+ des_ctx->dr_ctx.activeresidlen = des_ctx->dr_ctx.residlen;
+ }
+
+ /*
+ * Locate and save residual data for next encrypt_update.
+ */
+ if ((rv = dca_getbufbytes(in, len - des_ctx->dr_ctx.residlen,
+ rawlen % DESBLOCK, des_ctx->dr_ctx.resid)) != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN, "dca_3desupdate: dca_getbufbytes() failed");
+ (void) dca_free_context(ctx);
+ return (rv);
+ }
+
+ /* Calculate new residual length. */
+ des_ctx->dr_ctx.residlen = rawlen % DESBLOCK;
+
+ /*
+ * Make a local copy of the input crypto_data_t structure. This
+ * allows it to be manipulated locally and for dealing with in-place
+ * data (ie in == out).
+ */
+ if ((rv = dca_dupcrypto(in, nin)) != CRYPTO_SUCCESS) {
+ (void) dca_free_context(ctx);
+ return (rv);
+ }
+
+ /* Set output to zero ready to take the processed data */
+ out->cd_length = 0;
+
+ reqp->dr_in = nin;
+ reqp->dr_out = out;
+ reqp->dr_job_stat = DS_3DESJOBS;
+ reqp->dr_byte_stat = DS_3DESBYTES;
+
+ rv = dca_3desstart(dca, flags, reqp);
+
+ /*
+ * As this is multi-part the context is cleared on success
+ * (CRYPTO_QUEUED) in dca_3desfinal().
+ */
+
+ if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL) {
+ (void) dca_free_context(ctx);
+ }
+ return (rv);
+}
+
+int
+dca_3desfinal(crypto_ctx_t *ctx, crypto_data_t *out, int mode)
+{
+ dca_request_t *des_ctx = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ int rv = CRYPTO_SUCCESS;
+
+ ASSERT(ctx->cc_provider_private != NULL);
+ /*
+ * There must be no unprocessed ciphertext/plaintext.
+ * This happens if the length of the last data is
+ * not a multiple of the DES block length.
+ */
+ if (des_ctx->dr_ctx.residlen != 0) {
+ DBG(dca, DWARN, "dca_3desfinal: invalid nonzero residual");
+ if (mode & DR_DECRYPT) {
+ rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ } else {
+ rv = CRYPTO_DATA_LEN_RANGE;
+ }
+ }
+ (void) dca_free_context(ctx);
+ out->cd_length = 0;
+ return (rv);
+}
+
+int
+dca_3desatomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *input, crypto_data_t *output,
+ int kmflag, crypto_req_handle_t req, int mode)
+{
+ crypto_ctx_t ctx; /* on the stack */
+ int rv;
+
+ ctx.cc_provider = provider;
+ ctx.cc_session = session_id;
+
+ /*
+ * Input must be a multiple of the block size. This test only
+ * works for non-padded mechanisms when the blocksize is 2^N.
+ */
+ if ((dca_length(input) & (DESBLOCK - 1)) != 0) {
+ DBG(NULL, DWARN, "dca_3desatomic: input not multiple of BS");
+ if (mode & DR_DECRYPT) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ } else {
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
+ }
+
+ rv = dca_3desctxinit(&ctx, mechanism, key, kmflag, mode);
+ if (rv != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN, "dca_3desatomic: dca_3desctxinit() failed");
+ return (rv);
+ }
+
+ /*
+ * Set the atomic flag so that the hardware callback function
+ * will free the context.
+ */
+ ((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
+
+ rv = dca_3des(&ctx, input, output, req, mode);
+ if ((rv != CRYPTO_QUEUED) && (rv != CRYPTO_SUCCESS)) {
+ DBG(NULL, DWARN, "dca_3desatomic: dca_3des() failed");
+ output->cd_length = 0;
+ }
+
+ /*
+ * The features of dca_3desfinal() are implemented within
+ * dca_3desdone() due to the asynchronous nature of dca_3des().
+ */
+
+ /*
+ * The context will be freed in the hardware callback function if it
+ * is queued
+ */
+ if (rv != CRYPTO_QUEUED)
+ dca_3desctxfree(&ctx);
+
+ return (rv);
+}
+
+int
+dca_3desstart(dca_t *dca, uint32_t flags, dca_request_t *reqp)
+{
+ size_t len;
+ crypto_data_t *in = reqp->dr_in;
+ int rv;
+ dca_request_t *ctx = reqp;
+ uint32_t iv[2];
+
+ /*
+ * Preconditions:
+ * 1) in and out point to the "right" buffers.
+ * 2) in->b_bcount - in->b_resid == initial offset
+ * 3) likewise for out
+ * 4) there is enough space in the output
+ * 5) we perform a block for block encrypt
+ */
+ len = ctx->dr_ctx.activeresidlen + dca_length(in);
+ len = ROUNDDOWN(min(len, MAXPACKET), DESBLOCK);
+ reqp->dr_pkt_length = (uint16_t)len;
+
+ /* collect IVs for this pass */
+ iv[0] = ctx->dr_ctx.iv[0];
+ iv[1] = ctx->dr_ctx.iv[1];
+
+ /*
+ * And also, for decrypt, collect the IV for the next pass. For
+ * decrypt, the IV must be collected BEFORE decryption, or else
+ * we will lose it. (For encrypt, we grab the IV AFTER encryption,
+ * in dca_3desdone.
+ */
+ if (flags & DR_DECRYPT) {
+ uchar_t ivstore[DESBLOCK];
+ uchar_t *ivp = ivstore;
+
+ /* get last 8 bytes of ciphertext for IV of next op */
+ /*
+ * If we're processing only a DESBLOCKS worth of data
+ * and there is active residual present then it will be
+ * needed for the IV also.
+ */
+ if ((len == DESBLOCK) && ctx->dr_ctx.activeresidlen) {
+ /* Bring the active residual into play */
+ bcopy(ctx->dr_ctx.activeresid, ivstore,
+ ctx->dr_ctx.activeresidlen);
+ rv = dca_getbufbytes(in, 0,
+ DESBLOCK - ctx->dr_ctx.activeresidlen,
+ ivstore + ctx->dr_ctx.activeresidlen);
+ } else {
+ rv = dca_getbufbytes(in,
+ len - DESBLOCK - ctx->dr_ctx.activeresidlen,
+ DESBLOCK, ivstore);
+ }
+
+ if (rv != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_3desstart: dca_getbufbytes() failed");
+ return (rv);
+ }
+
+ /* store as a pair of native 32-bit values */
+ ctx->dr_ctx.iv[0] =
+ ivp[0]<<24 | ivp[1]<<16 | ivp[2]<<8 | ivp[3];
+ ctx->dr_ctx.iv[1] =
+ ivp[4]<<24 | ivp[5]<<16 | ivp[6]<<8 | ivp[7];
+ }
+
+ /* For now we force a pullup. Add direct DMA later. */
+ reqp->dr_flags &= ~(DR_SCATTER | DR_GATHER);
+ if ((len < dca_mindma) || (ctx->dr_ctx.activeresidlen > 0) ||
+ dca_sgcheck(dca, reqp->dr_in, DCA_SG_CONTIG) ||
+ dca_sgcheck(dca, reqp->dr_out, DCA_SG_WALIGN)) {
+ reqp->dr_flags |= DR_SCATTER | DR_GATHER;
+ }
+
+ /* Try to do direct DMA. */
+ if (!(reqp->dr_flags & (DR_SCATTER | DR_GATHER))) {
+ if (dca_bindchains(reqp, len, len) == DDI_SUCCESS) {
+ reqp->dr_in->cd_offset += len;
+ reqp->dr_in->cd_length -= len;
+ } else {
+ DBG(dca, DWARN,
+ "dca_3desstart: dca_bindchains() failed");
+ return (CRYPTO_DEVICE_ERROR);
+ }
+ }
+
+ /* gather the data into the device */
+ if (reqp->dr_flags & DR_GATHER) {
+ rv = dca_resid_gather(in, (char *)ctx->dr_ctx.activeresid,
+ &ctx->dr_ctx.activeresidlen, reqp->dr_ibuf_kaddr, len);
+ if (rv != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_3desstart: dca_resid_gather() failed");
+ return (rv);
+ }
+ /*
+ * Setup for scattering the result back out
+ * The output buffer is a multi-entry chain for x86 and
+ * a single entry chain for Sparc.
+ * Use the actual length if the first entry is sufficient.
+ */
+ (void) ddi_dma_sync(reqp->dr_ibuf_dmah, 0, len,
+ DDI_DMA_SYNC_FORDEV);
+ if (dca_check_dma_handle(dca, reqp->dr_ibuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ return (CRYPTO_DEVICE_ERROR);
+ }
+
+ reqp->dr_in_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
+ reqp->dr_in_next = reqp->dr_ibuf_head.dc_next_paddr;
+ if (len > reqp->dr_ibuf_head.dc_buffer_length)
+ reqp->dr_in_len = reqp->dr_ibuf_head.dc_buffer_length;
+ else
+ reqp->dr_in_len = len;
+ }
+ /*
+ * Setup for scattering the result back out
+ * The output buffer is a multi-entry chain for x86 and
+ * a single entry chain for Sparc.
+ * Use the actual length if the first entry is sufficient.
+ */
+ if (reqp->dr_flags & DR_SCATTER) {
+ reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
+ reqp->dr_out_next = reqp->dr_obuf_head.dc_next_paddr;
+ if (len > reqp->dr_obuf_head.dc_buffer_length)
+ reqp->dr_out_len = reqp->dr_obuf_head.dc_buffer_length;
+ else
+ reqp->dr_out_len = len;
+ }
+
+ reqp->dr_flags |= flags;
+ reqp->dr_callback = dca_3desdone;
+
+ /* write out the context structure */
+ PUTCTX32(reqp, CTX_3DESIVHI, iv[0]);
+ PUTCTX32(reqp, CTX_3DESIVLO, iv[1]);
+
+ /* schedule the work by doing a submit */
+ return (dca_start(dca, reqp, MCR1, 1));
+}
+
+void
+dca_3desdone(dca_request_t *reqp, int errno)
+{
+ crypto_data_t *out = reqp->dr_out;
+ dca_request_t *ctx = reqp;
+ ASSERT(ctx != NULL);
+
+ if (errno == CRYPTO_SUCCESS) {
+ size_t off;
+ /*
+ * Save the offset: this has to be done *before* dca_scatter
+ * modifies the buffer. We take the initial offset into the
+ * first buf, and add that to the total packet size to find
+ * the end of the packet.
+ */
+ off = dca_length(out) + reqp->dr_pkt_length - DESBLOCK;
+
+ if (reqp->dr_flags & DR_SCATTER) {
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0,
+ reqp->dr_out_len, DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca,
+ reqp->dr_obuf_dmah, DCA_FM_ECLASS_NONE) !=
+ DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ errno = dca_scatter(reqp->dr_obuf_kaddr,
+ reqp->dr_out, reqp->dr_out_len, 0);
+ if (errno != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN,
+ "dca_3desdone: dca_scatter() failed");
+ goto errout;
+ }
+
+ } else {
+ /* we've processed some more data */
+ out->cd_length += reqp->dr_pkt_length;
+ }
+
+
+ /*
+ * For encryption only, we have to grab the IV for the
+ * next pass AFTER encryption.
+ */
+ if (reqp->dr_flags & DR_ENCRYPT) {
+ uchar_t ivstore[DESBLOCK];
+ uchar_t *iv = ivstore;
+
+ /* get last 8 bytes for IV of next op */
+ errno = dca_getbufbytes(out, off, DESBLOCK, iv);
+ if (errno != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN,
+ "dca_3desdone: dca_getbufbytes() failed");
+ goto errout;
+ }
+ /* store as a pair of native 32-bit values */
+ ctx->dr_ctx.iv[0] =
+ iv[0]<<24 | iv[1]<<16 | iv[2]<<8 | iv[3];
+ ctx->dr_ctx.iv[1] =
+ iv[4]<<24 | iv[5]<<16 | iv[6]<<8 | iv[7];
+ }
+
+ /*
+ * If there is more to do, then reschedule another
+ * pass.
+ */
+ if (dca_length(reqp->dr_in) >= 8) {
+ errno = dca_3desstart(reqp->dr_dca, reqp->dr_flags,
+ reqp);
+ if (errno == CRYPTO_QUEUED) {
+ return;
+ }
+ }
+ }
+
+errout:
+
+ /*
+ * If this is an atomic operation perform the final function
+ * tasks (equivalent to to dca_3desfinal()).
+ */
+ if (reqp->dr_ctx.atomic) {
+ if ((errno == CRYPTO_SUCCESS) && (ctx->dr_ctx.residlen != 0)) {
+ DBG(NULL, DWARN,
+ "dca_3desdone: invalid nonzero residual");
+ if (reqp->dr_flags & DR_DECRYPT) {
+ errno = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
+ } else {
+ errno = CRYPTO_DATA_LEN_RANGE;
+ }
+ }
+ }
+
+ ASSERT(reqp->dr_kcf_req != NULL);
+ /* notify framework that request is completed */
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ DBG(NULL, DINTR,
+ "dca_3desdone: returning %d to the kef via crypto_op_notification",
+ errno);
+
+ /* This has to be done after notifing the framework */
+ if (reqp->dr_ctx.atomic) {
+ reqp->dr_context = NULL;
+ reqp->dr_ctx.atomic = 0;
+ reqp->dr_ctx.ctx_cm_type = 0;
+ if (reqp->destroy)
+ dca_destroyreq(reqp);
+ else
+ dca_freereq(reqp);
+ }
+}
+
+/* ARGSUSED */
+int
+dca_3desctxinit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, int kmflag, int flags)
+{
+ dca_request_t *des_ctx;
+ dca_t *dca = ctx->cc_provider;
+ uchar_t *param;
+ uchar_t *value;
+ size_t paramsz;
+ unsigned len;
+ int i, j;
+
+ paramsz = mechanism->cm_param_len;
+ param = (uchar_t *)mechanism->cm_param;
+ if ((paramsz != 0) && (paramsz != DES_IV_LEN)) {
+ DBG(NULL, DWARN,
+ "dca_3desctxinit: parameter(IV) length not %d (%d)",
+ DES_IV_LEN, paramsz);
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ if ((des_ctx = dca_getreq(dca, MCR1, 1)) == NULL) {
+ dca_error(dca, "unable to allocate request for 3DES");
+ return (CRYPTO_HOST_MEMORY);
+ }
+ /*
+ * Identify and store the IV as a pair of native 32-bit words.
+ *
+ * If cm_param == NULL then the IV comes from the cd_miscdata field
+ * in the crypto_data structure.
+ */
+ if (param != NULL) {
+ ASSERT(paramsz == DES_IV_LEN);
+ des_ctx->dr_ctx.iv[0] = param[0]<<24 | param[1]<<16 |
+ param[2]<<8 | param[3];
+ des_ctx->dr_ctx.iv[1] = param[4]<<24 | param[5]<<16 |
+ param[6]<<8 | param[7];
+ }
+ des_ctx->dr_ctx.residlen = 0;
+ des_ctx->dr_ctx.activeresidlen = 0;
+ des_ctx->dr_ctx.ctx_cm_type = mechanism->cm_type;
+ ctx->cc_provider_private = des_ctx;
+
+ if (key->ck_format != CRYPTO_KEY_RAW) {
+ DBG(NULL, DWARN,
+ "dca_3desctxinit: only raw crypto key type support with DES/3DES");
+ dca_3desctxfree(ctx);
+ return (CRYPTO_KEY_TYPE_INCONSISTENT);
+ }
+
+ len = key->ck_length;
+ value = (uchar_t *)key->ck_data;
+
+ if (flags & DR_TRIPLE) {
+ /* 3DES */
+ switch (len) {
+ case 192:
+ for (i = 0; i < 6; i++) {
+ des_ctx->dr_ctx.key[i] = 0;
+ for (j = 0; j < 4; j++) {
+ des_ctx->dr_ctx.key[i] <<= 8;
+ des_ctx->dr_ctx.key[i] |= *value;
+ value++;
+ }
+ }
+ break;
+
+ case 128:
+ for (i = 0; i < 4; i++) {
+ des_ctx->dr_ctx.key[i] = 0;
+ for (j = 0; j < 4; j++) {
+ des_ctx->dr_ctx.key[i] <<= 8;
+ des_ctx->dr_ctx.key[i] |= *value;
+ value++;
+ }
+ }
+ des_ctx->dr_ctx.key[4] = des_ctx->dr_ctx.key[0];
+ des_ctx->dr_ctx.key[5] = des_ctx->dr_ctx.key[1];
+ break;
+
+ default:
+ DBG(NULL, DWARN, "Incorrect 3DES keysize (%d)", len);
+ dca_3desctxfree(ctx);
+ return (CRYPTO_KEY_SIZE_RANGE);
+ }
+ } else {
+ /* single DES */
+ if (len != 64) {
+ DBG(NULL, DWARN, "Incorrect DES keysize (%d)", len);
+ dca_3desctxfree(ctx);
+ return (CRYPTO_KEY_SIZE_RANGE);
+ }
+ des_ctx->dr_ctx.key[0] =
+ value[0]<<24 | value[1]<<16 | value[2]<<8 | value[3];
+ des_ctx->dr_ctx.key[1] =
+ value[4]<<24 | value[5]<<16 | value[6]<<8 | value[7];
+ /* for single des just repeat des key */
+ des_ctx->dr_ctx.key[4] =
+ des_ctx->dr_ctx.key[2] = des_ctx->dr_ctx.key[0];
+ des_ctx->dr_ctx.key[5] =
+ des_ctx->dr_ctx.key[3] = des_ctx->dr_ctx.key[1];
+ }
+
+ /*
+ * Setup the context here so that we do not need to setup it up
+ * for every update
+ */
+ PUTCTX16(des_ctx, CTX_LENGTH, CTX_3DES_LENGTH);
+ PUTCTX16(des_ctx, CTX_CMD, CMD_3DES);
+ PUTCTX32(des_ctx, CTX_3DESDIRECTION,
+ flags & DR_ENCRYPT ? CTX_3DES_ENCRYPT : CTX_3DES_DECRYPT);
+ PUTCTX32(des_ctx, CTX_3DESKEY1HI, des_ctx->dr_ctx.key[0]);
+ PUTCTX32(des_ctx, CTX_3DESKEY1LO, des_ctx->dr_ctx.key[1]);
+ PUTCTX32(des_ctx, CTX_3DESKEY2HI, des_ctx->dr_ctx.key[2]);
+ PUTCTX32(des_ctx, CTX_3DESKEY2LO, des_ctx->dr_ctx.key[3]);
+ PUTCTX32(des_ctx, CTX_3DESKEY3HI, des_ctx->dr_ctx.key[4]);
+ PUTCTX32(des_ctx, CTX_3DESKEY3LO, des_ctx->dr_ctx.key[5]);
+
+ return (CRYPTO_SUCCESS);
+}
diff --git a/usr/src/uts/common/crypto/io/dca_debug.c b/usr/src/uts/common/crypto/io/dca_debug.c
new file mode 100644
index 0000000000..7e662630a2
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_debug.c
@@ -0,0 +1,92 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/cmn_err.h>
+#include <sys/varargs.h>
+#include <sys/crypto/dca.h>
+
+/*
+ * Debugging and messaging.
+ */
+#if DEBUG
+static int dca_debug = 0;
+
+void
+dca_dprintf(dca_t *dca, int level, const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+
+ if (dca_debug & level) {
+ va_start(ap, fmt);
+ if (dca == NULL) {
+ (void) sprintf(buf, "%s\n", fmt);
+ } else {
+ (void) sprintf(buf, "%s/%d: %s\n",
+ ddi_driver_name(dca->dca_dip),
+ ddi_get_instance(dca->dca_dip), fmt);
+ }
+ vprintf(buf, ap);
+ va_end(ap);
+ }
+}
+#endif
+
+void
+dca_error(dca_t *dca, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ dca_dipverror(dca->dca_dip, fmt, ap);
+ va_end(ap);
+}
+
+void
+dca_diperror(dev_info_t *dip, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ dca_dipverror(dip, fmt, ap);
+ va_end(ap);
+}
+
+void
+dca_dipverror(dev_info_t *dip, const char *fmt, va_list ap)
+{
+ char buf[256];
+ (void) sprintf(buf, "%s%d: %s", ddi_driver_name(dip),
+ ddi_get_instance(dip), fmt);
+ vcmn_err(CE_WARN, buf, ap);
+}
diff --git a/usr/src/uts/common/crypto/io/dca_dsa.c b/usr/src/uts/common/crypto/io/dca_dsa.c
new file mode 100644
index 0000000000..8e16346940
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_dsa.c
@@ -0,0 +1,582 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kmem.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/ioctl.h>
+#include <sys/crypto/dca.h>
+
+/*
+ * DSA implementation.
+ */
+
+static void dca_dsa_sign_done(dca_request_t *, int);
+static void dca_dsa_verify_done(dca_request_t *, int);
+
+
+int dca_dsa_sign(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *sig,
+ crypto_req_handle_t req);
+int dca_dsa_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *sig,
+ crypto_req_handle_t req);
+int dca_dsainit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, int kmflag, int mode);
+
+
+int
+dca_dsa_sign(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *sig,
+ crypto_req_handle_t req)
+{
+ dca_request_t *reqp = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ int err;
+ int rv = CRYPTO_QUEUED;
+ caddr_t kaddr;
+ size_t buflen;
+
+ buflen = dca_length(data);
+ if (buflen != SHA1LEN) {
+ DBG(dca, DWARN, "dca_dsa_sign: data length != %d", SHA1LEN);
+ rv = CRYPTO_DATA_LEN_RANGE;
+ goto errout;
+ }
+
+ /* Return length needed to store the output. */
+ if (dca_length(sig) < DSASIGLEN) {
+ DBG(dca, DWARN,
+ "dca_dsa_sign: output buffer too short (%d < %d)",
+ dca_length(sig), DSASIGLEN);
+ sig->cd_length = DSASIGLEN;
+ rv = CRYPTO_BUFFER_TOO_SMALL;
+ goto errout;
+ }
+
+ /*
+ * Don't change the data values of the data crypto_data_t structure
+ * yet. Only reset the sig cd_length to zero before writing to it.
+ */
+
+ reqp->dr_job_stat = DS_DSASIGN;
+ reqp->dr_byte_stat = -1;
+ reqp->dr_in = data;
+ reqp->dr_out = sig;
+ reqp->dr_callback = dca_dsa_sign_done;
+
+ reqp->dr_kcf_req = req;
+ /* dca_gather() increments cd_offset & dec. cd_length by SHA1LEN. */
+ err = dca_gather(data, reqp->dr_ibuf_kaddr, SHA1LEN, 1);
+ if (err != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN, "dca_dsa_sign: dca_gather() failed");
+ rv = err;
+ goto errout;
+ }
+
+
+ /* sync the input buffer */
+ (void) ddi_dma_sync(reqp->dr_ibuf_dmah, 0, SHA1LEN,
+ DDI_DMA_SYNC_FORDEV);
+ if (dca_check_dma_handle(dca, reqp->dr_ibuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ rv = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ reqp->dr_in_paddr = reqp->dr_ibuf_paddr;
+ reqp->dr_in_next = 0;
+ reqp->dr_in_len = SHA1LEN;
+ reqp->dr_pkt_length = buflen;
+
+ /*
+ * The output requires *two* buffers, r followed by s.
+ */
+ kaddr = reqp->dr_ctx_kaddr + reqp->dr_offset;
+
+ /* r */
+ reqp->dr_out_paddr = reqp->dr_obuf_paddr;
+ reqp->dr_out_len = DSAPARTLEN;
+ reqp->dr_out_next = reqp->dr_ctx_paddr + reqp->dr_offset;
+
+ /* s */
+ PUTDESC32(reqp, kaddr, DESC_BUFADDR,
+ reqp->dr_obuf_paddr + DSAPARTLEN);
+ PUTDESC32(reqp, kaddr, DESC_NEXT, 0);
+ PUTDESC16(reqp, kaddr, DESC_RSVD, 0);
+ PUTDESC16(reqp, kaddr, DESC_LENGTH, DSAPARTLEN);
+
+ /* schedule the work by doing a submit */
+ rv = dca_start(dca, reqp, MCR2, 1);
+
+errout:
+
+ if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL)
+ (void) dca_free_context(ctx);
+
+ return (rv);
+}
+
+static void
+dca_dsa_sign_done(dca_request_t *reqp, int errno)
+{
+ if (errno == CRYPTO_SUCCESS) {
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, DSASIGLEN,
+ DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+ /*
+ * Set the sig cd_length to zero so it's ready to take the
+ * signature. Have already confirmed its size is adequate.
+ */
+ reqp->dr_out->cd_length = 0;
+ errno = dca_scatter(reqp->dr_obuf_kaddr,
+ reqp->dr_out, DSAPARTLEN, 1);
+ if (errno != CRYPTO_SUCCESS) {
+ DBG(reqp->dr_dca, DWARN,
+ "dca_dsa_sign_done: dca_scatter() failed");
+ goto errout;
+ }
+ errno = dca_scatter(reqp->dr_obuf_kaddr+DSAPARTLEN,
+ reqp->dr_out, DSAPARTLEN, 1);
+ if (errno != CRYPTO_SUCCESS) {
+ DBG(reqp->dr_dca, DWARN,
+ "dca_dsa_sign_done: dca_scatter() failed");
+ }
+ }
+errout:
+ ASSERT(reqp->dr_kcf_req != NULL);
+
+ /* notify framework that request is completed */
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ DBG(reqp->dr_dca, DINTR,
+ "dca_dsa_sign_done: rtn 0x%x to kef via crypto_op_notification",
+ errno);
+
+ /*
+ * For non-atomic operations, reqp will be freed in the kCF
+ * callback function since it may be needed again if
+ * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
+ */
+ if (reqp->dr_ctx.atomic) {
+ crypto_ctx_t ctx;
+ ctx.cc_provider_private = reqp;
+ dca_dsactxfree(&ctx);
+ }
+}
+
+int
+dca_dsa_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *sig,
+ crypto_req_handle_t req)
+{
+ dca_request_t *reqp = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ int err;
+ int rv = CRYPTO_QUEUED;
+ caddr_t kaddr;
+
+ /* Impossible for verify to be an in-place operation. */
+ if (sig == NULL) {
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ if (dca_length(data) != SHA1LEN) {
+ DBG(dca, DWARN, "dca_dsa_verify: input length != %d", SHA1LEN);
+ rv = CRYPTO_DATA_LEN_RANGE;
+ goto errout;
+ }
+
+ if (dca_length(sig) != DSASIGLEN) {
+ DBG(dca, DWARN, "dca_dsa_verify: signature length != %d",
+ DSASIGLEN);
+ rv = CRYPTO_SIGNATURE_LEN_RANGE;
+ goto errout;
+ }
+
+ /* Don't change the data & sig values for verify. */
+
+ reqp->dr_job_stat = DS_DSAVERIFY;
+ reqp->dr_byte_stat = -1;
+
+ /*
+ * Grab h, r and s.
+ */
+ err = dca_gather(data, reqp->dr_ibuf_kaddr, SHA1LEN, 1);
+ if (err != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_dsa_vrfy: dca_gather() failed for h");
+ rv = err;
+ goto errout;
+ }
+ err = dca_gather(sig, reqp->dr_ibuf_kaddr+SHA1LEN, DSAPARTLEN, 1);
+ if (err != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_dsa_vrfy: dca_gather() failed for r");
+ rv = err;
+ goto errout;
+ }
+ err = dca_gather(sig, reqp->dr_ibuf_kaddr+SHA1LEN+DSAPARTLEN,
+ DSAPARTLEN, 1);
+ if (err != CRYPTO_SUCCESS) {
+ DBG(dca, DWARN,
+ "dca_dsa_vrfy: dca_gather() failed for s");
+ rv = err;
+ goto errout;
+ }
+ /*
+ * As dca_gather() increments the cd_offset and decrements
+ * the cd_length as it copies the data rewind the values ready for
+ * the final compare.
+ */
+ sig->cd_offset -= (DSAPARTLEN * 2);
+ sig->cd_length += (DSAPARTLEN * 2);
+ /* sync the input buffer */
+ (void) ddi_dma_sync(reqp->dr_ibuf_dmah, 0, SHA1LEN + DSAPARTLEN,
+ DDI_DMA_SYNC_FORDEV);
+
+ if (dca_check_dma_handle(dca, reqp->dr_ibuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ rv = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ reqp->dr_in = data;
+ reqp->dr_out = sig;
+ reqp->dr_kcf_req = req;
+ reqp->dr_flags |= DR_SCATTER | DR_GATHER;
+ reqp->dr_callback = dca_dsa_verify_done;
+
+ /*
+ * Input requires three buffers. m, followed by r, followed by s.
+ * In order to deal with things cleanly, we reverse the signature
+ * into the buffer and then fix up the pointers.
+ */
+ reqp->dr_pkt_length = SHA1LEN;
+
+ reqp->dr_in_paddr = reqp->dr_ibuf_paddr;
+ reqp->dr_in_len = SHA1LEN;
+ reqp->dr_in_next = reqp->dr_ctx_paddr + reqp->dr_offset;
+
+ reqp->dr_out_paddr = reqp->dr_obuf_paddr;
+ reqp->dr_out_len = DSAPARTLEN;
+ reqp->dr_out_next = 0;
+
+ /* setup 1st chain for r */
+ kaddr = reqp->dr_ctx_kaddr + reqp->dr_offset;
+ PUTDESC32(reqp, kaddr, DESC_BUFADDR, reqp->dr_ibuf_paddr + SHA1LEN);
+ PUTDESC32(reqp, kaddr, DESC_NEXT,
+ reqp->dr_ctx_paddr + reqp->dr_offset + DESC_SIZE);
+ PUTDESC16(reqp, kaddr, DESC_RSVD, 0);
+ PUTDESC16(reqp, kaddr, DESC_LENGTH, DSAPARTLEN);
+
+ /* and 2nd chain for s */
+ kaddr = reqp->dr_ctx_kaddr + reqp->dr_offset + DESC_SIZE;
+ PUTDESC32(reqp, kaddr, DESC_BUFADDR, reqp->dr_ibuf_paddr +
+ SHA1LEN + DSAPARTLEN);
+ PUTDESC32(reqp, kaddr, DESC_NEXT, 0);
+ PUTDESC16(reqp, kaddr, DESC_RSVD, 0);
+ PUTDESC16(reqp, kaddr, DESC_LENGTH, DSAPARTLEN);
+
+ /* schedule the work by doing a submit */
+ rv = dca_start(dca, reqp, MCR2, 1);
+
+errout:
+ if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL) {
+ (void) dca_free_context(ctx);
+ }
+ return (rv);
+}
+
+static void
+dca_dsa_verify_done(dca_request_t *reqp, int errno)
+{
+ if (errno == CRYPTO_SUCCESS) {
+ int count = DSAPARTLEN;
+ crypto_data_t *sig = reqp->dr_out;
+ caddr_t daddr;
+
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, count,
+ DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ /* Can only handle a contiguous data buffer currently. */
+ if (dca_sgcheck(reqp->dr_dca, sig, DCA_SG_CONTIG)) {
+ errno = CRYPTO_SIGNATURE_INVALID;
+ goto errout;
+ }
+
+ if ((daddr = dca_bufdaddr(sig)) == NULL) {
+ errno = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ if (dca_bcmp_reverse(daddr, reqp->dr_obuf_kaddr,
+ DSAPARTLEN) != 0) {
+ /* VERIFY FAILED */
+ errno = CRYPTO_SIGNATURE_INVALID;
+ }
+ }
+errout:
+ ASSERT(reqp->dr_kcf_req != NULL);
+
+ /* notify framework that request is completed */
+
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ DBG(reqp->dr_dca, DINTR,
+ "dca_dsa_verify_done: rtn 0x%x to kef via crypto_op_notification",
+ errno);
+
+ /*
+ * For non-atomic operations, reqp will be freed in the kCF
+ * callback function since it may be needed again if
+ * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
+ */
+ if (reqp->dr_ctx.atomic) {
+ crypto_ctx_t ctx;
+ ctx.cc_provider_private = reqp;
+ dca_dsactxfree(&ctx);
+ }
+}
+
+/* ARGSUSED */
+int
+dca_dsainit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, int kmflag, int mode)
+{
+ crypto_object_attribute_t *attr;
+ unsigned plen = 0, qlen = 0, glen = 0, xlen = 0;
+ uchar_t *p, *q, *g, *x;
+ dca_request_t *reqp = NULL;
+ dca_t *dca = (dca_t *)ctx->cc_provider;
+ int rv = CRYPTO_SUCCESS;
+ unsigned pbits, padjlen;
+ uint16_t ctxlen;
+ caddr_t kaddr;
+
+ if ((reqp = dca_getreq(dca, MCR2, 1)) == NULL) {
+ dca_error(dca,
+ "dca_dsainit: unable to allocate request for DSA");
+ rv = CRYPTO_HOST_MEMORY;
+ goto errout;
+ }
+
+ ctx->cc_provider_private = reqp;
+ reqp->dr_ctx.ctx_cm_type = mechanism->cm_type;
+
+ if ((attr = dca_get_key_attr(key)) == NULL) {
+ DBG(NULL, DWARN, "dca_dsainit: key attributes missing");
+ rv = CRYPTO_KEY_TYPE_INCONSISTENT;
+ goto errout;
+ }
+
+ /* Prime */
+ if (dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_PRIME,
+ (void *) &p, &plen)) {
+ DBG(NULL, DWARN, "dca_dsainit: prime key value not present");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ /* Subprime */
+ if (dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_SUBPRIME,
+ (void *) &q, &qlen)) {
+ DBG(NULL, DWARN, "dca_dsainit: subprime key value not present");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ /* Base */
+ if (dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_BASE,
+ (void *) &g, &glen)) {
+ DBG(NULL, DWARN, "dca_dsainit: base key value not present");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ /* Value */
+ if (dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_VALUE,
+ (void *) &x, &xlen)) {
+ DBG(NULL, DWARN, "dca_dsainit: value key not present");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ if (plen == 0 || qlen == 0 || glen == 0 || xlen == 0) {
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ if (plen > DSA_MAX_KEY_LEN) {
+ /* maximum 1Kbit key */
+ DBG(NULL, DWARN, "dca_dsainit: maximum 1Kbit key (%d)", plen);
+ rv = CRYPTO_KEY_SIZE_RANGE;
+ goto errout;
+ }
+
+ if (qlen > DSAPARTLEN) {
+ DBG(NULL, DWARN, "dca_dsainit: q is too long (%d)", qlen);
+ rv = CRYPTO_KEY_SIZE_RANGE;
+ goto errout;
+ }
+
+ if (mode == DCA_DSA_SIGN && xlen > DSAPARTLEN) {
+ DBG(NULL, DWARN,
+ "dca_dsainit: private key is too long (%d)", xlen);
+ rv = CRYPTO_KEY_SIZE_RANGE;
+ goto errout;
+ }
+
+ /*
+ * Setup the key partion of the request.
+ */
+
+ pbits = dca_bitlen(p, plen);
+ padjlen = dca_padfull(pbits);
+
+ /* accounts for leading context words */
+ if (mode == DCA_DSA_SIGN) {
+ ctxlen = CTX_DSABIGNUMS + DSAPARTLEN + (padjlen * 2) +
+ DSAPARTLEN;
+ PUTCTX16(reqp, CTX_CMD, CMD_DSASIGN);
+ } else {
+ ctxlen = CTX_DSABIGNUMS + DSAPARTLEN + (padjlen * 3);
+ PUTCTX16(reqp, CTX_CMD, CMD_DSAVERIFY);
+ }
+
+ PUTCTX16(reqp, CTX_LENGTH, ctxlen);
+ PUTCTX16(reqp, CTX_DSAMSGTYPE, CTX_DSAMSGTYPE_SHA1);
+ PUTCTX16(reqp, CTX_DSARSVD, 0);
+ if (mode == DCA_DSA_SIGN)
+ PUTCTX16(reqp, CTX_DSARNG, CTX_DSARNG_GEN);
+ else
+ PUTCTX16(reqp, CTX_DSARNG, 0);
+ PUTCTX16(reqp, CTX_DSAPLEN, pbits);
+
+ kaddr = reqp->dr_ctx_kaddr + CTX_DSABIGNUMS;
+
+ /* store the bignums */
+ dca_reverse(q, kaddr, qlen, DSAPARTLEN);
+ kaddr += DSAPARTLEN;
+
+ dca_reverse(p, kaddr, plen, padjlen);
+ kaddr += padjlen;
+
+ dca_reverse(g, kaddr, glen, padjlen);
+ kaddr += padjlen;
+
+ if (mode == DCA_DSA_SIGN) {
+ dca_reverse(x, kaddr, xlen, DSAPARTLEN);
+ kaddr += DSAPARTLEN;
+ } else {
+ dca_reverse(x, kaddr, xlen, padjlen);
+ kaddr += padjlen;
+ }
+
+ return (CRYPTO_SUCCESS);
+
+errout:
+
+ dca_dsactxfree(ctx);
+ return (rv);
+}
+
+void
+dca_dsactxfree(void *arg)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)arg;
+ dca_request_t *reqp = ctx->cc_provider_private;
+
+ if (reqp == NULL)
+ return;
+
+ reqp->dr_ctx.ctx_cm_type = 0;
+ reqp->dr_ctx.atomic = 0;
+ if (reqp->destroy)
+ dca_destroyreq(reqp);
+ else
+ dca_freereq(reqp);
+
+ ctx->cc_provider_private = NULL;
+}
+
+int
+dca_dsaatomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *data, crypto_data_t *sig,
+ int kmflag, crypto_req_handle_t req, int mode)
+{
+ crypto_ctx_t ctx; /* on the stack */
+ int rv;
+
+ ctx.cc_provider = provider;
+ ctx.cc_session = session_id;
+
+ rv = dca_dsainit(&ctx, mechanism, key, kmflag, mode);
+ if (rv != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN, "dca_dsaatomic: dca_dsainit() failed");
+ return (rv);
+ }
+
+ /*
+ * Set the atomic flag so that the hardware callback function
+ * will free the context.
+ */
+ ((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
+
+ if (mode == DCA_DSA_SIGN) {
+ rv = dca_dsa_sign(&ctx, data, sig, req);
+ } else {
+ ASSERT(mode == DCA_DSA_VRFY);
+ rv = dca_dsa_verify(&ctx, data, sig, req);
+ }
+
+ /*
+ * The context will be freed in the hardware callback function if it
+ * is queued
+ */
+ if (rv != CRYPTO_QUEUED)
+ dca_dsactxfree(&ctx);
+
+ return (rv);
+}
diff --git a/usr/src/uts/common/crypto/io/dca_kstat.c b/usr/src/uts/common/crypto/io/dca_kstat.c
new file mode 100644
index 0000000000..9223582cee
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_kstat.c
@@ -0,0 +1,208 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kstat.h>
+#include <sys/crypto/dca.h>
+
+/*
+ * Kernel statistics.
+ */
+static int dca_ksupdate(kstat_t *, int);
+
+/*
+ * Initialize Kstats.
+ */
+void
+dca_ksinit(dca_t *dca)
+{
+ char buf[64];
+ int instance;
+
+ if (ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
+ DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "nostats", 0) != 0) {
+ /*
+ * sysadmin has explicity disabled stats to prevent
+ * covert channel.
+ */
+ return;
+ }
+
+ instance = ddi_get_instance(dca->dca_dip);
+
+ /*
+ * Interrupt kstats.
+ */
+ (void) sprintf(buf, "%sc%d", DRIVER, instance);
+ if ((dca->dca_intrstats = kstat_create(DRIVER, instance, buf,
+ "controller", KSTAT_TYPE_INTR, 1, 0)) == NULL) {
+ dca_error(dca, "unable to create interrupt kstat");
+ } else {
+ kstat_install(dca->dca_intrstats);
+ }
+
+ /*
+ * Named kstats.
+ */
+ if ((dca->dca_ksp = kstat_create(DRIVER, instance, NULL, "misc",
+ KSTAT_TYPE_NAMED, sizeof (dca_stat_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_WRITABLE)) == NULL) {
+ dca_error(dca, "unable to create kstats");
+ } else {
+ dca_stat_t *dkp = (dca_stat_t *)dca->dca_ksp->ks_data;
+ kstat_named_init(&dkp->ds_status, "status", KSTAT_DATA_CHAR);
+ kstat_named_init(&dkp->ds_mcr[0].ds_submit, "mcr1submit",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[0].ds_flowctl, "mcr1flowctl",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[0].ds_lowater, "mcr1lowater",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[0].ds_hiwater, "mcr1hiwater",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[0].ds_maxreqs, "mcr1maxreqs",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[1].ds_submit, "mcr2submit",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[1].ds_flowctl, "mcr2flowctl",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[1].ds_lowater, "mcr2lowater",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[1].ds_hiwater, "mcr2hiwater",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_mcr[1].ds_maxreqs, "mcr2maxreqs",
+ KSTAT_DATA_ULONGLONG);
+#ifdef DS_RC4JOBS
+ /* rc4 */
+ kstat_named_init(&dkp->ds_algs[DS_RC4JOBS], "rc4jobs",
+ KSTAT_DATA_ULONGLONG);
+#endif
+#ifdef DS_RC4BYTES
+ kstat_named_init(&dkp->ds_algs[DS_RC4BYTES], "rc4bytes",
+ KSTAT_DATA_ULONGLONG);
+#endif
+ /* 3des */
+ kstat_named_init(&dkp->ds_algs[DS_3DESJOBS], "3desjobs",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_3DESBYTES], "3desbytes",
+ KSTAT_DATA_ULONGLONG);
+ /* rsa */
+ kstat_named_init(&dkp->ds_algs[DS_RSAPUBLIC], "rsapublic",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_RSAPRIVATE], "rsaprivate",
+ KSTAT_DATA_ULONGLONG);
+ /* dsa */
+ kstat_named_init(&dkp->ds_algs[DS_DSASIGN], "dsasign",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_DSAVERIFY], "dsaverify",
+ KSTAT_DATA_ULONGLONG);
+#ifdef DS_DHPUBLIC
+ /* diffie-hellman */
+ kstat_named_init(&dkp->ds_algs[DS_DHPUBLIC], "dhpublic",
+ KSTAT_DATA_ULONGLONG);
+#endif
+#ifdef DS_DHSECRET
+ kstat_named_init(&dkp->ds_algs[DS_DHSECRET], "dhsecret",
+ KSTAT_DATA_ULONGLONG);
+#endif
+ /* random number jobs */
+ kstat_named_init(&dkp->ds_algs[DS_RNGJOBS], "rngjobs",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_RNGBYTES], "rngbytes",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_RNGSHA1JOBS], "rngsha1jobs",
+ KSTAT_DATA_ULONGLONG);
+ kstat_named_init(&dkp->ds_algs[DS_RNGSHA1BYTES],
+ "rngsha1bytes", KSTAT_DATA_ULONGLONG);
+ dca->dca_ksp->ks_update = dca_ksupdate;
+ dca->dca_ksp->ks_private = dca;
+ kstat_install(dca->dca_ksp);
+ }
+}
+
+/*
+ * Update Kstats.
+ */
+int
+dca_ksupdate(kstat_t *ksp, int rw)
+{
+ dca_t *dca;
+ dca_stat_t *dkp;
+ int i;
+
+ dca = (dca_t *)ksp->ks_private;
+ dkp = (dca_stat_t *)ksp->ks_data;
+
+ if (rw == KSTAT_WRITE) {
+ for (i = 0; i < DS_MAX; i++) {
+ dca->dca_stats[i] = dkp->ds_algs[i].value.ull;
+ }
+ for (i = MCR1; i <= MCR2; i++) {
+ WORKLIST(dca, i)->dwl_submit =
+ dkp->ds_mcr[i - 1].ds_submit.value.ull;
+ WORKLIST(dca, i)->dwl_flowctl =
+ dkp->ds_mcr[i - 1].ds_flowctl.value.ull;
+ /* hiwater, lowater, and maxreqs are read only */
+ }
+ } else {
+ /* handy status value */
+ if (dca->dca_flags & DCA_FAILED) {
+ /* device has failed */
+ (void) strcpy(dkp->ds_status.value.c, "fail");
+ } else if ((WORKLIST(dca, MCR1)->dwl_drain) ||
+ (WORKLIST(dca, MCR2)->dwl_drain)) {
+ /* device is draining for DR */
+ (void) strcpy(dkp->ds_status.value.c, "drain");
+ } else {
+ /* everything looks good */
+ (void) strcpy(dkp->ds_status.value.c, "online");
+ }
+
+ for (i = 0; i < DS_MAX; i++) {
+ dkp->ds_algs[i].value.ull = dca->dca_stats[i];
+ }
+ for (i = MCR1; i <= MCR2; i++) {
+ dkp->ds_mcr[i - 1].ds_submit.value.ull =
+ WORKLIST(dca, i)->dwl_submit;
+ dkp->ds_mcr[i - 1].ds_flowctl.value.ull =
+ WORKLIST(dca, i)->dwl_flowctl;
+ dkp->ds_mcr[i - 1].ds_lowater.value.ull =
+ WORKLIST(dca, i)->dwl_lowater;
+ dkp->ds_mcr[i - 1].ds_hiwater.value.ull =
+ WORKLIST(dca, i)->dwl_hiwater;
+ dkp->ds_mcr[i - 1].ds_maxreqs.value.ull =
+ WORKLIST(dca, i)->dwl_reqspermcr;
+ }
+ }
+ return (0);
+}
diff --git a/usr/src/uts/common/crypto/io/dca_rng.c b/usr/src/uts/common/crypto/io/dca_rng.c
new file mode 100644
index 0000000000..bfb56cdf0e
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_rng.c
@@ -0,0 +1,325 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kmem.h>
+#include <sys/crypto/dca.h>
+#include <sys/atomic.h>
+
+/*
+ * Random number implementation.
+ */
+
+static int dca_rngstart(dca_t *, dca_request_t *);
+static void dca_rngdone(dca_request_t *, int);
+
+static void dca_random_done();
+int dca_random_buffer(dca_t *dca, caddr_t buf, int len);
+int dca_random_init();
+void dca_random_fini();
+
+int
+dca_rng(dca_t *dca, uchar_t *buf, size_t len, crypto_req_handle_t req)
+{
+ dca_request_t *reqp;
+ int rv;
+ crypto_data_t *data;
+
+ if ((reqp = dca_getreq(dca, MCR2, 1)) == NULL) {
+ dca_error(dca, "unable to allocate request for RNG");
+ return (CRYPTO_HOST_MEMORY);
+ }
+
+ reqp->dr_kcf_req = req;
+
+ data = &reqp->dr_ctx.in_dup;
+ data->cd_format = CRYPTO_DATA_RAW;
+ data->cd_offset = 0;
+ data->cd_length = 0;
+ data->cd_raw.iov_base = (char *)buf;
+ data->cd_raw.iov_len = len;
+ reqp->dr_out = data;
+ reqp->dr_in = NULL;
+
+ rv = dca_rngstart(dca, reqp);
+ if (rv != CRYPTO_QUEUED) {
+ if (reqp->destroy)
+ dca_destroyreq(reqp);
+ else
+ dca_freereq(reqp);
+ }
+ return (rv);
+}
+
+int
+dca_rngstart(dca_t *dca, dca_request_t *reqp)
+{
+ uint16_t cmd;
+ size_t len;
+ uint16_t chunk;
+ crypto_data_t *out = reqp->dr_out;
+
+ if (dca->dca_flags & DCA_RNGSHA1) {
+ reqp->dr_job_stat = DS_RNGSHA1JOBS;
+ reqp->dr_byte_stat = DS_RNGSHA1BYTES;
+ cmd = CMD_RNGSHA1;
+ } else {
+ reqp->dr_job_stat = DS_RNGJOBS;
+ reqp->dr_byte_stat = DS_RNGBYTES;
+ cmd = CMD_RNGDIRECT;
+ }
+
+ len = out->cd_raw.iov_len - out->cd_length;
+ len = min(len, MAXPACKET & ~0xf);
+ chunk = ROUNDUP(len, sizeof (uint32_t));
+
+ if ((len < dca_mindma) ||
+ dca_sgcheck(dca, reqp->dr_out, DCA_SG_WALIGN)) {
+ reqp->dr_flags |= DR_SCATTER;
+ }
+
+ /* Try to do direct DMA. */
+ if (!(reqp->dr_flags & DR_SCATTER)) {
+ if (dca_bindchains(reqp, 0, len) != DDI_SUCCESS) {
+ return (CRYPTO_DEVICE_ERROR);
+ }
+ }
+
+ reqp->dr_in_paddr = 0;
+ reqp->dr_in_next = 0;
+ reqp->dr_in_len = 0;
+
+ /*
+ * Setup for scattering the result back out
+ * Using the pre-mapped buffers to store random numbers. Since the
+ * data buffer is a linked list, we need to transfer its head to MCR
+ */
+ if (reqp->dr_flags & DR_SCATTER) {
+ reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
+ reqp->dr_out_next = reqp->dr_obuf_head.dc_next_paddr;
+ if (chunk > reqp->dr_obuf_head.dc_buffer_length)
+ reqp->dr_out_len = reqp->dr_obuf_head.dc_buffer_length;
+ else
+ reqp->dr_out_len = chunk;
+ }
+ reqp->dr_param.dp_rng.dr_chunklen = len;
+ reqp->dr_pkt_length = (uint16_t)chunk;
+ reqp->dr_callback = dca_rngdone;
+
+ /* write out the context structure */
+ PUTCTX16(reqp, CTX_LENGTH, CTX_RNG_LENGTH);
+ PUTCTX16(reqp, CTX_CMD, cmd);
+
+ /* schedule the work by doing a submit */
+ return (dca_start(dca, reqp, MCR2, 1));
+}
+
+void
+dca_rngdone(dca_request_t *reqp, int errno)
+{
+ if (errno == CRYPTO_SUCCESS) {
+
+ if (reqp->dr_flags & DR_SCATTER) {
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0,
+ reqp->dr_out_len, DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca,
+ reqp->dr_obuf_dmah, DCA_FM_ECLASS_NONE) !=
+ DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+ errno = dca_scatter(reqp->dr_obuf_kaddr,
+ reqp->dr_out, reqp->dr_param.dp_rng.dr_chunklen, 0);
+ if (errno != CRYPTO_SUCCESS) {
+ goto errout;
+ }
+ } else {
+ reqp->dr_out->cd_length +=
+ reqp->dr_param.dp_rng.dr_chunklen;
+ }
+
+ /*
+ * If there is more to do, then reschedule another
+ * pass.
+ */
+ if (reqp->dr_out->cd_length < reqp->dr_out->cd_raw.iov_len) {
+ errno = dca_rngstart(reqp->dr_dca, reqp);
+ if (errno == CRYPTO_QUEUED) {
+ return;
+ }
+ }
+ }
+
+errout:
+
+ if (reqp->dr_kcf_req) {
+ /* notify framework that request is completed */
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ } else {
+ /* For internal random number generation */
+ dca_random_done(reqp->dr_dca);
+ }
+
+ DBG(NULL, DINTR,
+ "dca_rngdone: returning %d to the kef via crypto_op_notification",
+ errno);
+ if (reqp->destroy)
+ dca_destroyreq(reqp);
+ else
+ dca_freereq(reqp);
+}
+
+/*
+ * This gives a 32k random bytes per buffer. The two buffers will switch back
+ * and forth. When a buffer is used up, a request will be submitted to refill
+ * this buffer before switching to the other one
+ */
+
+#define RANDOM_BUFFER_SIZE (1<<15)
+#define DCA_RANDOM_MAX_WAIT 10000
+
+int
+dca_random_init(dca_t *dca)
+{
+ /* Mutex for the local random number pool */
+ mutex_init(&dca->dca_random_lock, NULL, MUTEX_DRIVER, NULL);
+
+ if ((dca->dca_buf1 = kmem_alloc(RANDOM_BUFFER_SIZE, KM_SLEEP)) ==
+ NULL) {
+ mutex_destroy(&dca->dca_random_lock);
+ return (CRYPTO_FAILED);
+ }
+
+ if ((dca->dca_buf2 = kmem_alloc(RANDOM_BUFFER_SIZE, KM_SLEEP)) ==
+ NULL) {
+ mutex_destroy(&dca->dca_random_lock);
+ kmem_free(dca->dca_buf1, RANDOM_BUFFER_SIZE);
+ return (CRYPTO_FAILED);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+void
+dca_random_fini(dca_t *dca)
+{
+ kmem_free(dca->dca_buf1, RANDOM_BUFFER_SIZE);
+ kmem_free(dca->dca_buf2, RANDOM_BUFFER_SIZE);
+ dca->dca_buf1 = dca->dca_buf2 = dca->dca_buf_ptr = NULL;
+ (void) mutex_destroy(&dca->dca_random_lock);
+}
+
+int
+dca_random_buffer(dca_t *dca, caddr_t buf, int len)
+{
+ int rv;
+ int i, j;
+ char *fill_buf;
+
+ mutex_enter(&dca->dca_random_lock);
+
+ if (dca->dca_buf_ptr == NULL) {
+ if (dca->dca_buf1 == NULL || dca->dca_buf2 == NULL) {
+ mutex_exit(&dca->dca_random_lock);
+ return (CRYPTO_FAILED);
+ }
+
+ /* Very first time. Let us fill the first buffer */
+ if (dca_rng(dca, (uchar_t *)dca->dca_buf1, RANDOM_BUFFER_SIZE,
+ NULL) != CRYPTO_QUEUED) {
+ mutex_exit(&dca->dca_random_lock);
+ return (CRYPTO_FAILED);
+ }
+
+ atomic_or_32(&dca->dca_random_filling, 0x1);
+
+ /* Pretend we are using buffer2 and it is empty */
+ dca->dca_buf_ptr = dca->dca_buf2;
+ dca->dca_index = RANDOM_BUFFER_SIZE;
+ }
+
+ i = 0;
+ while (i < len) {
+ if (dca->dca_index >= RANDOM_BUFFER_SIZE) {
+ j = 0;
+ while (dca->dca_random_filling) {
+ /* Only wait here at the first time */
+ delay(drv_usectohz(100));
+ if (j++ >= DCA_RANDOM_MAX_WAIT)
+ break;
+ }
+ DBG(NULL, DENTRY, "dca_random_buffer: j: %d", j);
+ if (j > DCA_RANDOM_MAX_WAIT) {
+ mutex_exit(&dca->dca_random_lock);
+ return (CRYPTO_FAILED);
+ }
+
+ /* switch to the other buffer */
+ if (dca->dca_buf_ptr == dca->dca_buf1) {
+ dca->dca_buf_ptr = dca->dca_buf2;
+ fill_buf = dca->dca_buf1;
+ } else {
+ dca->dca_buf_ptr = dca->dca_buf1;
+ fill_buf = dca->dca_buf2;
+ }
+
+ atomic_or_32(&dca->dca_random_filling, 0x1);
+ dca->dca_index = 0;
+
+ if ((rv = dca_rng(dca, (uchar_t *)fill_buf,
+ RANDOM_BUFFER_SIZE, NULL)) != CRYPTO_QUEUED) {
+ mutex_exit(&dca->dca_random_lock);
+ return (rv);
+ }
+ }
+
+ if (dca->dca_buf_ptr[dca->dca_index] != '\0')
+ buf[i++] = dca->dca_buf_ptr[dca->dca_index];
+
+ dca->dca_index++;
+ }
+
+ mutex_exit(&dca->dca_random_lock);
+
+ DBG(NULL, DENTRY, "dca_random_buffer: i: %d", i);
+ return (CRYPTO_SUCCESS);
+}
+
+static void
+dca_random_done(dca_t *dca)
+{
+ DBG(NULL, DENTRY, "dca_random_done");
+ atomic_and_32(&dca->dca_random_filling, 0x0);
+}
diff --git a/usr/src/uts/common/crypto/io/dca_rsa.c b/usr/src/uts/common/crypto/io/dca_rsa.c
new file mode 100644
index 0000000000..71ec60d82c
--- /dev/null
+++ b/usr/src/uts/common/crypto/io/dca_rsa.c
@@ -0,0 +1,832 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ */
+
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kmem.h>
+#include <sys/note.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/ioctl.h>
+#include <sys/crypto/dca.h>
+
+
+static void dca_rsaverifydone(dca_request_t *, int);
+static void dca_rsadone(dca_request_t *, int);
+
+/* Exported function prototypes */
+int dca_rsastart(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t, int);
+int dca_rsainit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, int);
+void dca_rsactxfree(void *);
+int dca_rsaatomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ int, crypto_req_handle_t, int);
+
+/* Local function prototypes */
+static int dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen,
+ int private);
+static int dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode);
+static int dca_x509_padding(caddr_t buf, int flen, int tlen);
+static int dca_x509_unpadding(char *buf, int tlen, int flen, int mode);
+static int decrypt_error_code(int mode, int decrypt, int verify, int def);
+
+
+int dca_rsastart(crypto_ctx_t *ctx, crypto_data_t *in, crypto_data_t *out,
+ crypto_req_handle_t req, int mode)
+{
+ dca_request_t *reqp = ctx->cc_provider_private;
+ dca_t *dca = ctx->cc_provider;
+ caddr_t daddr;
+ int rv = CRYPTO_QUEUED;
+ int len;
+
+ /*
+ * In-place operations (in == out) are indicated by having a
+ * NULL output. In this case set the out to point to the in.
+ * Note that this only works for CKM_RSA_X_509 without any padding
+ */
+ if (!out) {
+ DBG(dca, DWARN, "Using inline since output buffer is NULL.");
+ out = in;
+ }
+
+ /* We don't support non-contiguous buffers for RSA */
+ if (dca_sgcheck(dca, in, DCA_SG_CONTIG) ||
+ dca_sgcheck(dca, out, DCA_SG_CONTIG)) {
+ rv = CRYPTO_NOT_SUPPORTED;
+ goto errout;
+ }
+
+ len = dca_length(in);
+
+ /* Extracting the key attributes is now done in dca_rsainit(). */
+ if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
+ mode == DCA_RSA_SIGNR) {
+ /*
+ * Return length needed to store the output.
+ * For sign, sign-recover, and encrypt, the output buffer
+ * should not be smaller than modlen since PKCS or X_509
+ * padding will be applied
+ */
+ if (dca_length(out) < reqp->dr_ctx.modlen) {
+ DBG(dca, DWARN,
+ "dca_rsastart: output buffer too short (%d < %d)",
+ dca_length(out), reqp->dr_ctx.modlen);
+ out->cd_length = reqp->dr_ctx.modlen;
+ rv = CRYPTO_BUFFER_TOO_SMALL;
+ goto errout;
+ }
+ }
+ if (out != in && out->cd_length > reqp->dr_ctx.modlen)
+ out->cd_length = reqp->dr_ctx.modlen;
+
+ /* The input length should not be bigger than the modulus */
+ if (len > reqp->dr_ctx.modlen) {
+ rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
+ CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
+ goto errout;
+ }
+
+ /*
+ * For decryption, verify, and verifyRecover, the input length should
+ * not be less than the modulus
+ */
+ if (len < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
+ mode == DCA_RSA_VRFY || mode == DCA_RSA_VRFYR)) {
+ rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
+ CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
+ goto errout;
+ }
+
+ /*
+ * For decryption and verifyRecover, the output buffer should not
+ * be less than the modulus
+ */
+ if (out->cd_length < reqp->dr_ctx.modlen && (mode == DCA_RSA_DEC ||
+ mode == DCA_RSA_VRFYR) &&
+ reqp->dr_ctx.ctx_cm_type == RSA_X_509_MECH_INFO_TYPE) {
+ out->cd_length = reqp->dr_ctx.modlen;
+ rv = CRYPTO_BUFFER_TOO_SMALL;
+ goto errout;
+ }
+
+ /* For decrypt and verify, the input should not be less than output */
+ if (out && len < out->cd_length) {
+ if ((rv = decrypt_error_code(mode,
+ CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
+ CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_SUCCESS)) !=
+ CRYPTO_SUCCESS)
+ goto errout;
+ }
+
+ if ((daddr = dca_bufdaddr(in)) == NULL && len > 0) {
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ if (dca_numcmp(daddr, len, (char *)reqp->dr_ctx.mod,
+ reqp->dr_ctx.modlen) > 0) {
+ DBG(dca, DWARN,
+ "dca_rsastart: input larger (numerically) than modulus!");
+ rv = decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
+ CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
+ goto errout;
+ }
+
+ reqp->dr_byte_stat = -1;
+ reqp->dr_in = in;
+ reqp->dr_out = out;
+ reqp->dr_kcf_req = req;
+ if (mode == DCA_RSA_VRFY)
+ reqp->dr_callback = dca_rsaverifydone;
+ else
+ reqp->dr_callback = dca_rsadone;
+
+ dca_reverse(daddr, reqp->dr_ibuf_kaddr, len, reqp->dr_pkt_length);
+ if (mode == DCA_RSA_ENC || mode == DCA_RSA_SIGN ||
+ mode == DCA_RSA_SIGNR) {
+ /*
+ * Needs to pad appropriately for encrypt, sign, and
+ * sign_recover
+ */
+ if (reqp->dr_ctx.ctx_cm_type == RSA_PKCS_MECH_INFO_TYPE) {
+ if ((rv = dca_pkcs1_padding(dca, reqp->dr_ibuf_kaddr,
+ len, reqp->dr_ctx.modlen, reqp->dr_ctx.pqfix)) !=
+ CRYPTO_QUEUED)
+ goto errout;
+ } else if (reqp->dr_ctx.ctx_cm_type ==
+ RSA_X_509_MECH_INFO_TYPE) {
+ if ((rv = dca_x509_padding(reqp->dr_ibuf_kaddr,
+ len, reqp->dr_pkt_length)) != CRYPTO_QUEUED)
+ goto errout;
+ }
+ }
+ reqp->dr_ctx.mode = mode;
+
+ /*
+ * Since the max RSA input size is 256 bytes (2048 bits), the firstx
+ * page (at least 4096 bytes) in the pre-mapped buffer is large enough.
+ * Therefore, we use this first page for RSA.
+ */
+ reqp->dr_in_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
+ reqp->dr_in_next = 0;
+ reqp->dr_in_len = reqp->dr_pkt_length;
+ reqp->dr_out_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
+ reqp->dr_out_next = 0;
+ reqp->dr_out_len = reqp->dr_pkt_length;
+
+ /* schedule the work by doing a submit */
+ rv = dca_start(dca, reqp, MCR2, 1);
+
+
+errout:
+ if (rv != CRYPTO_QUEUED && rv != CRYPTO_BUFFER_TOO_SMALL)
+ (void) dca_free_context(ctx);
+
+ return (rv);
+}
+
+void
+dca_rsadone(dca_request_t *reqp, int errno)
+{
+ if (errno == CRYPTO_SUCCESS) {
+ int outsz = reqp->dr_out->cd_length;
+ caddr_t daddr;
+
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, reqp->dr_out_len,
+ DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
+ reqp->dr_ctx.mode == DCA_RSA_VRFY ||
+ reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
+ /*
+ * Needs to unpad appropriately for decrypt, verify,
+ * and verify_recover
+ */
+ if (reqp->dr_ctx.ctx_cm_type ==
+ RSA_PKCS_MECH_INFO_TYPE) {
+ errno = dca_pkcs1_unpadding(
+ reqp->dr_obuf_kaddr, &outsz,
+ reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
+
+ /* check for bad data errors */
+ if (errno != CRYPTO_SUCCESS &&
+ errno != CRYPTO_BUFFER_TOO_SMALL) {
+ goto errout;
+ }
+ if (dca_bufdaddr(reqp->dr_out) == NULL) {
+ errno = CRYPTO_BUFFER_TOO_SMALL;
+ }
+ if (errno == CRYPTO_BUFFER_TOO_SMALL) {
+ reqp->dr_out->cd_length = outsz;
+ goto errout;
+ }
+ /* Reset the output data length */
+ reqp->dr_out->cd_length = outsz;
+ } else if (reqp->dr_ctx.ctx_cm_type ==
+ RSA_X_509_MECH_INFO_TYPE) {
+ if ((errno = dca_x509_unpadding(
+ reqp->dr_obuf_kaddr, outsz,
+ reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
+ CRYPTO_SUCCESS)
+ goto errout;
+ }
+ }
+
+ if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
+ DBG(reqp->dr_dca, DINTR,
+ "dca_rsadone: reqp->dr_out is bad");
+ errno = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+ /*
+ * Note that there may be some number of null bytes
+ * at the end of the source (result), but we don't care
+ * about them -- they are place holders only and are
+ * truncated here.
+ */
+ dca_reverse(reqp->dr_obuf_kaddr, daddr, outsz, outsz);
+ }
+errout:
+ ASSERT(reqp->dr_kcf_req != NULL);
+
+ /* notify framework that request is completed */
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ DBG(reqp->dr_dca, DINTR,
+ "dca_rsadone: returning 0x%x to the kef via crypto_op_notification",
+ errno);
+
+ /*
+ * For non-atomic operations, reqp will be freed in the kCF
+ * callback function since it may be needed again if
+ * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
+ */
+ if (reqp->dr_ctx.atomic) {
+ crypto_ctx_t ctx;
+ ctx.cc_provider_private = reqp;
+ dca_rsactxfree(&ctx);
+ }
+}
+
+void
+dca_rsaverifydone(dca_request_t *reqp, int errno)
+{
+ if (errno == CRYPTO_SUCCESS) {
+ char scratch[RSA_MAX_KEY_LEN];
+ int outsz = reqp->dr_out->cd_length;
+ caddr_t daddr;
+
+ /*
+ * ASSUMPTION: the signature length was already
+ * checked on the way in, and it is a valid length.
+ */
+ (void) ddi_dma_sync(reqp->dr_obuf_dmah, 0, outsz,
+ DDI_DMA_SYNC_FORKERNEL);
+ if (dca_check_dma_handle(reqp->dr_dca, reqp->dr_obuf_dmah,
+ DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
+ reqp->destroy = TRUE;
+ errno = CRYPTO_DEVICE_ERROR;
+ goto errout;
+ }
+
+ if (reqp->dr_ctx.mode == DCA_RSA_DEC ||
+ reqp->dr_ctx.mode == DCA_RSA_VRFY ||
+ reqp->dr_ctx.mode == DCA_RSA_VRFYR) {
+ /*
+ * Needs to unpad appropriately for decrypt, verify,
+ * and verify_recover
+ */
+ if (reqp->dr_ctx.ctx_cm_type ==
+ RSA_PKCS_MECH_INFO_TYPE) {
+ errno = dca_pkcs1_unpadding(
+ reqp->dr_obuf_kaddr, &outsz,
+ reqp->dr_ctx.modlen, reqp->dr_ctx.mode);
+
+ /* check for bad data errors */
+ if (errno != CRYPTO_SUCCESS &&
+ errno != CRYPTO_BUFFER_TOO_SMALL) {
+ goto errout;
+ }
+ if (dca_bufdaddr(reqp->dr_out) == NULL) {
+ errno = CRYPTO_BUFFER_TOO_SMALL;
+ }
+ if (errno == CRYPTO_BUFFER_TOO_SMALL) {
+ reqp->dr_out->cd_length = outsz;
+ goto errout;
+ }
+ /* Reset the output data length */
+ reqp->dr_out->cd_length = outsz;
+ } else if (reqp->dr_ctx.ctx_cm_type ==
+ RSA_X_509_MECH_INFO_TYPE) {
+ if ((errno = dca_x509_unpadding(
+ reqp->dr_obuf_kaddr, outsz,
+ reqp->dr_pkt_length, reqp->dr_ctx.mode)) !=
+ CRYPTO_SUCCESS)
+ goto errout;
+ }
+ }
+
+ dca_reverse(reqp->dr_obuf_kaddr, scratch, outsz, outsz);
+
+ if ((daddr = dca_bufdaddr(reqp->dr_out)) == NULL) {
+ errno = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+ if (dca_numcmp(daddr, reqp->dr_out->cd_length, scratch,
+ outsz) != 0) {
+ /* VERIFY FAILED */
+ errno = CRYPTO_SIGNATURE_INVALID;
+ }
+ }
+errout:
+ ASSERT(reqp->dr_kcf_req != NULL);
+
+ /* notify framework that request is completed */
+ crypto_op_notification(reqp->dr_kcf_req, errno);
+ DBG(reqp->dr_dca, DINTR,
+ "dca_rsaverifydone: rtn 0x%x to the kef via crypto_op_notification",
+ errno);
+
+ /*
+ * For non-atomic operations, reqp will be freed in the kCF
+ * callback function since it may be needed again if
+ * CRYPTO_BUFFER_TOO_SMALL is returned to kCF
+ */
+ if (reqp->dr_ctx.atomic) {
+ crypto_ctx_t ctx;
+ ctx.cc_provider_private = reqp;
+ dca_rsactxfree(&ctx);
+ }
+}
+
+/*
+ * Setup either a public or a private RSA key for subsequent uses
+ */
+int
+dca_rsainit(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, int kmflag)
+{
+ crypto_object_attribute_t *attr;
+ unsigned expname = 0;
+ void *attrdata;
+ int rv;
+
+ uchar_t *exp;
+ uchar_t *p;
+ uchar_t *q;
+ uchar_t *dp;
+ uchar_t *dq;
+ uchar_t *pinv;
+
+ unsigned explen = 0;
+ unsigned plen = 0;
+ unsigned qlen = 0;
+ unsigned dplen = 0;
+ unsigned dqlen = 0;
+ unsigned pinvlen = 0;
+
+ unsigned modbits, expbits, pbits, qbits;
+ unsigned modfix, expfix, pqfix = 0;
+ uint16_t ctxlen;
+ caddr_t kaddr;
+ dca_request_t *reqp = NULL;
+ dca_t *dca = (dca_t *)ctx->cc_provider;
+
+ DBG(NULL, DENTRY, "dca_rsainit: start");
+
+ if ((reqp = dca_getreq(dca, MCR2, 1)) == NULL) {
+ DBG(NULL, DWARN,
+ "dca_rsainit: unable to allocate request for RSA");
+ rv = CRYPTO_HOST_MEMORY;
+ goto errout;
+ }
+
+ reqp->dr_ctx.ctx_cm_type = mechanism->cm_type;
+ ctx->cc_provider_private = reqp;
+
+ /*
+ * Key type can be either RAW, or REFERENCE, or ATTR_LIST (VALUE).
+ * Only ATTR_LIST is supported on Deimos for RSA.
+ */
+ if ((attr = dca_get_key_attr(key)) == NULL) {
+ DBG(NULL, DWARN, "dca_rsainit: key attributes missing");
+ rv = CRYPTO_KEY_TYPE_INCONSISTENT;
+ goto errout;
+ }
+
+ if (dca_find_attribute(attr, key->ck_count, CKA_PUBLIC_EXPONENT))
+ expname = CKA_PUBLIC_EXPONENT;
+
+ /*
+ * RSA public key has only public exponent. RSA private key must have
+ * private exponent. However, it may also have public exponent.
+ * Thus, the existance of a private exponent indicates a private key.
+ */
+ if (dca_find_attribute(attr, key->ck_count, CKA_PRIVATE_EXPONENT))
+ expname = CKA_PRIVATE_EXPONENT;
+
+ if (!expname) {
+ DBG(NULL, DWARN, "dca_rsainit: no exponent in key");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ /* Modulus */
+ if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, CKA_MODULUS,
+ &attrdata, &(reqp->dr_ctx.modlen))) != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN, "dca_rsainit: failed to retrieve modulus");
+ goto errout;
+ }
+ if ((reqp->dr_ctx.modlen == 0) ||
+ (reqp->dr_ctx.modlen > RSA_MAX_KEY_LEN)) {
+ DBG(NULL, DWARN, "dca_rsainit: bad modulus size");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+ if ((reqp->dr_ctx.mod = kmem_alloc(reqp->dr_ctx.modlen, kmflag)) ==
+ NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ goto errout;
+ }
+ bcopy(attrdata, reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
+
+ /* Exponent */
+ if ((rv = dca_attr_lookup_uint8_array(attr, key->ck_count, expname,
+ (void **) &exp, &explen)) != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN, "dca_rsainit: failed to retrieve exponent");
+ goto errout;
+ }
+ if ((explen == 0) || (explen > RSA_MAX_KEY_LEN)) {
+ DBG(NULL, DWARN, "dca_rsainit: bad exponent size");
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto errout;
+ }
+
+ /* Lookup private attributes */
+ if (expname == CKA_PRIVATE_EXPONENT) {
+ /* Prime 1 */
+ (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
+ CKA_PRIME_1, (void **)&q, &qlen);
+
+ /* Prime 2 */
+ (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
+ CKA_PRIME_2, (void **)&p, &plen);
+
+ /* Exponent 1 */
+ (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
+ CKA_EXPONENT_1, (void **)&dq, &dqlen);
+
+ /* Exponent 2 */
+ (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
+ CKA_EXPONENT_2, (void **)&dp, &dplen);
+
+ /* Coefficient */
+ (void) dca_attr_lookup_uint8_array(attr, key->ck_count,
+ CKA_COEFFICIENT, (void **)&pinv, &pinvlen);
+ }
+
+ modbits = dca_bitlen(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
+ expbits = dca_bitlen(exp, explen);
+
+ if ((modfix = dca_padfull(modbits)) == 0) {
+ DBG(NULL, DWARN, "dca_rsainit: modulus too long");
+ rv = CRYPTO_KEY_SIZE_RANGE;
+ goto errout;
+ }
+ expfix = ROUNDUP(explen, sizeof (uint32_t));
+
+ if (plen && qlen && dplen && dqlen && pinvlen) {
+ unsigned pfix, qfix;
+ qbits = dca_bitlen(q, qlen);
+ pbits = dca_bitlen(p, plen);
+ qfix = dca_padhalf(qbits);
+ pfix = dca_padhalf(pbits);
+ if (pfix & qfix)
+ pqfix = max(pfix, qfix);
+ }
+
+ if (pqfix) {
+ reqp->dr_job_stat = DS_RSAPRIVATE;
+ reqp->dr_pkt_length = 2 * pqfix;
+ } else {
+ reqp->dr_job_stat = DS_RSAPUBLIC;
+ reqp->dr_pkt_length = modfix;
+ }
+
+ if (pqfix) {
+ /*
+ * NOTE: chip's notion of p vs. q is reversed from
+ * PKCS#11. We use the chip's notion in our variable
+ * naming.
+ */
+ ctxlen = 8 + pqfix * 5;
+
+ /* write out the context structure */
+ PUTCTX16(reqp, CTX_CMD, CMD_RSAPRIVATE);
+ PUTCTX16(reqp, CTX_LENGTH, ctxlen);
+ /* exponent and modulus length in bits!!! */
+ PUTCTX16(reqp, CTX_RSAQLEN, qbits);
+ PUTCTX16(reqp, CTX_RSAPLEN, pbits);
+
+ kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
+
+ /* store the bignums */
+ dca_reverse(p, kaddr, plen, pqfix);
+ kaddr += pqfix;
+
+ dca_reverse(q, kaddr, qlen, pqfix);
+ kaddr += pqfix;
+
+ dca_reverse(dp, kaddr, dplen, pqfix);
+ kaddr += pqfix;
+
+ dca_reverse(dq, kaddr, dqlen, pqfix);
+ kaddr += pqfix;
+
+ dca_reverse(pinv, kaddr, pinvlen, pqfix);
+ kaddr += pqfix;
+ } else {
+ ctxlen = 8 + modfix + expfix;
+ /* write out the context structure */
+ PUTCTX16(reqp, CTX_CMD, CMD_RSAPUBLIC);
+ PUTCTX16(reqp, CTX_LENGTH, (uint16_t)ctxlen);
+ /* exponent and modulus length in bits!!! */
+ PUTCTX16(reqp, CTX_RSAEXPLEN, expbits);
+ PUTCTX16(reqp, CTX_RSAMODLEN, modbits);
+
+ kaddr = reqp->dr_ctx_kaddr + CTX_RSABIGNUMS;
+
+ /* store the bignums */
+ dca_reverse(reqp->dr_ctx.mod, kaddr, reqp->dr_ctx.modlen,
+ modfix);
+ kaddr += modfix;
+
+ dca_reverse(exp, kaddr, explen, expfix);
+ kaddr += expfix;
+ }
+
+ reqp->dr_ctx.pqfix = pqfix;
+
+errout:
+ if (rv != CRYPTO_SUCCESS)
+ dca_rsactxfree(ctx);
+
+ return (rv);
+}
+
+void
+dca_rsactxfree(void *arg)
+{
+ crypto_ctx_t *ctx = (crypto_ctx_t *)arg;
+ dca_request_t *reqp = ctx->cc_provider_private;
+
+ if (reqp == NULL)
+ return;
+
+ if (reqp->dr_ctx.mod)
+ kmem_free(reqp->dr_ctx.mod, reqp->dr_ctx.modlen);
+
+ reqp->dr_ctx.mode = 0;
+ reqp->dr_ctx.ctx_cm_type = 0;
+ reqp->dr_ctx.mod = NULL;
+ reqp->dr_ctx.modlen = 0;
+ reqp->dr_ctx.pqfix = 0;
+ reqp->dr_ctx.atomic = 0;
+
+ if (reqp->destroy)
+ dca_destroyreq(reqp);
+ else
+ dca_freereq(reqp);
+
+ ctx->cc_provider_private = NULL;
+}
+
+int
+dca_rsaatomic(crypto_provider_handle_t provider,
+ crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
+ crypto_key_t *key, crypto_data_t *input, crypto_data_t *output,
+ int kmflag, crypto_req_handle_t req, int mode)
+{
+ crypto_ctx_t ctx; /* on the stack */
+ int rv;
+
+ ctx.cc_provider = provider;
+ ctx.cc_session = session_id;
+
+ rv = dca_rsainit(&ctx, mechanism, key, kmflag);
+ if (rv != CRYPTO_SUCCESS) {
+ DBG(NULL, DWARN, "dca_rsaatomic: dca_rsainit() failed");
+ /* The content of ctx should have been freed already */
+ return (rv);
+ }
+
+ /*
+ * Set the atomic flag so that the hardware callback function
+ * will free the context.
+ */
+ ((dca_request_t *)ctx.cc_provider_private)->dr_ctx.atomic = 1;
+
+ rv = dca_rsastart(&ctx, input, output, req, mode);
+
+ /*
+ * The context will be freed in the hardware callback function if it
+ * is queued
+ */
+ if (rv != CRYPTO_QUEUED)
+ dca_rsactxfree(&ctx);
+
+ return (rv);
+}
+
+
+/*
+ * For RSA_PKCS padding and unpadding:
+ * 1. The minimum padding is 11 bytes.
+ * 2. The first and the last bytes must 0.
+ * 3. The second byte is 1 for private and 2 for public keys.
+ * 4. Pad with 0xff for private and non-zero random for public keys.
+ */
+static int
+dca_pkcs1_padding(dca_t *dca, caddr_t buf, int flen, int tlen, int private)
+{
+ int i;
+
+ DBG(NULL, DENTRY,
+ "dca_pkcs1_padding: tlen: %d, flen: %d: private: %d\n",
+ tlen, flen, private);
+
+ if (flen > tlen - 11)
+ return (CRYPTO_DATA_LEN_RANGE);
+
+ if (private) {
+ /* Padding for private encrypt */
+ buf[flen] = '\0';
+ for (i = flen + 1; i < tlen - 2; i++) {
+ buf[i] = (unsigned char) 0xff;
+ }
+ buf[tlen - 2] = 1;
+ buf[tlen - 1] = 0;
+ } else {
+ /* Padding for public encrypt */
+ buf[flen] = '\0';
+
+ if (dca_random_buffer(dca, &buf[flen+1], tlen - flen - 3) !=
+ CRYPTO_SUCCESS)
+ return (CRYPTO_RANDOM_NO_RNG);
+
+ buf[tlen - 2] = 2;
+ buf[tlen - 1] = 0;
+ }
+
+ return (CRYPTO_QUEUED);
+}
+
+static int
+dca_pkcs1_unpadding(char *buf, int *tlen, int flen, int mode)
+{
+ int i;
+ const unsigned char *p;
+ unsigned char type;
+
+ DBG(NULL, DENTRY, "dca_pkcs1_unpadding: tlen: %d, flen: %d\n",
+ *tlen, flen);
+
+ p = (unsigned char *) buf + (flen-1);
+ if (*(p--) != 0)
+ return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
+ CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
+
+ /* It is ok if the data length is 0 after removing the padding */
+ type = *(p--);
+ if (type == 01) {
+ for (i = flen - 3; i >= 0; i--) {
+ if (*p != 0xff) {
+ if (*p == '\0') {
+ p--;
+ break;
+ } else {
+ return decrypt_error_code(mode,
+ CRYPTO_ENCRYPTED_DATA_INVALID,
+ CRYPTO_SIGNATURE_INVALID,
+ CRYPTO_DATA_INVALID);
+ }
+ }
+ p--;
+ }
+ } else if (type == 02) {
+ for (i = flen - 3; i >= 0; i--) {
+ if (*p == '\0') {
+ p--;
+ break;
+ }
+ p--;
+ }
+ } else {
+ return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
+ CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
+ }
+
+ /* i < 0 means did not find the end of the padding */
+ if (i < 0)
+ return decrypt_error_code(mode, CRYPTO_ENCRYPTED_DATA_INVALID,
+ CRYPTO_SIGNATURE_INVALID, CRYPTO_DATA_INVALID);
+
+ if (i > *tlen) {
+ *tlen = i;
+ return (CRYPTO_BUFFER_TOO_SMALL);
+ }
+
+ if (flen - i < 11)
+ return decrypt_error_code(mode,
+ CRYPTO_ENCRYPTED_DATA_LEN_RANGE,
+ CRYPTO_SIGNATURE_LEN_RANGE, CRYPTO_DATA_LEN_RANGE);
+
+ /* Return the unpadded length to the caller */
+ *tlen = i;
+
+ return (CRYPTO_SUCCESS);
+}
+
+/*
+ * For RSA_X_509 padding and unpadding, pad all 0s before actual data.
+ * Note that the data will be in reverse order.
+ */
+static int
+dca_x509_padding(caddr_t buf, int flen, int tlen)
+{
+ DBG(NULL, DENTRY, "dca_x509_padding: tlen: %d, flen: %d\n",
+ tlen, flen);
+
+ bzero(buf+tlen, tlen - flen);
+
+ return (CRYPTO_QUEUED);
+}
+
+/* ARGSUSED */
+static int
+dca_x509_unpadding(char *buf, int tlen, int flen, int mode)
+{
+ int i;
+ const unsigned char *p;
+
+ DBG(NULL, DENTRY, "dca_x509_unpadding: tlen: %d, flen: %d\n",
+ tlen, flen);
+
+ p = (unsigned char *) buf + flen;
+ for (i = tlen; i < flen; i++) {
+ if (*(--p) != 0)
+ return (CRYPTO_SIGNATURE_INVALID);
+ }
+
+ return (CRYPTO_SUCCESS);
+}
+
+static int decrypt_error_code(int mode, int decrypt, int verify, int def)
+{
+ switch (mode) {
+ case DCA_RSA_DEC:
+ return (decrypt);
+ case DCA_RSA_VRFY:
+ case DCA_RSA_VRFYR:
+ return (verify);
+ default:
+ return (def);
+ }
+}
diff --git a/usr/src/uts/common/sys/crypto/dca.h b/usr/src/uts/common/sys/crypto/dca.h
new file mode 100644
index 0000000000..8644db3977
--- /dev/null
+++ b/usr/src/uts/common/sys/crypto/dca.h
@@ -0,0 +1,928 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CRYPTO_DCA_H
+#define _SYS_CRYPTO_DCA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/varargs.h>
+
+#include <sys/crypto/spi.h>
+
+/*
+ * Deimos - cryptographic acceleration based upon Broadcom 582x.
+ *
+ * Note: Everything in this file is private to the Deimos device
+ * driver! Do not include this in any other file.
+ */
+
+#define DRIVER "dca"
+#define DCA_MANUFACTURER_ID "SUNWdca"
+
+#ifdef _KERNEL
+
+/*
+ * Tunables.
+ */
+#define MCR1LOWATER 16 /* these numbers favor overall throughput */
+#define MCR1HIWATER 24
+#define MCR1MAXREQS 8
+#define MCR2LOWATER 16
+#define MCR2HIWATER 24
+#define MCR2MAXREQS 4
+#define MAXMCR 2 /* there are 2 mcrs */
+#define MAXREQSPERMCR 16 /* there are 4 subunits serviced by MCR2 */
+#define MAXFRAGS 6 /* Limit on the number of fragments */
+#define MAXWORK 6 /* How many work structures to preallocate */
+
+/*
+ * These are constants. Do not change them.
+ */
+#if defined(i386) || defined(__i386) || defined(__amd64)
+#define MAXPACKET 0xefff /* rootnex INT_MAX_BUF hack. */
+#else
+#define MAXPACKET 0xffff /* Max size of a packet or fragment */
+#endif
+#define DESBLOCK 8 /* Size of a DES or 3DES block */
+#define DSAPARTLEN 20 /* Size of fixed DSA parts (r, s, q, x, v) */
+#define DSASIGLEN 40 /* Size of a DSA signature */
+#define SHA1LEN 20 /* Size of a SHA1 hash */
+#define SECOND 1000000 /* One second in usec */
+#define MSEC 1000 /* One millisecond in usec */
+#define DES_KEYSIZE 8
+#define DES_IV_LEN 8
+#define DES3_KEYSIZE (3 * DES_KEYSIZE)
+
+/*
+ * Mechanism info structure passed to KCF during registration.
+ */
+
+#define MD5_HMAC_BLOCK_SIZE 64 /* MD5-HMAC block size */
+#define MD5_HMAC_MIN_KEY_LEN 1 /* MD5-HMAC min key length in bytes */
+#define MD5_HMAC_MAX_KEY_LEN 64 /* MD5-HMAC max key length in bytes */
+
+#define SHA1_HMAC_BLOCK_SIZE 64 /* SHA1-HMAC block size */
+#define SHA1_HMAC_MIN_KEY_LEN 1 /* SHA1-HMAC min key length in bytes */
+#define SHA1_HMAC_MAX_KEY_LEN 64 /* SHA1-HMAC max key length in bytes */
+
+#define DES_KEY_LEN 8 /* DES key length in bytes */
+#define DES3_KEY_LEN 24 /* 3DES key length in bytes */
+
+#define DSA_MIN_KEY_LEN 64 /* DSA min key length in bytes */
+#define DSA_MAX_KEY_LEN 128 /* DSA max key length in bytes */
+
+#define RSA_MIN_KEY_LEN 32 /* RSA min key length in bytes */
+#define RSA_MAX_KEY_LEN 256 /* RSA max key length in bytes */
+
+/*
+ * RSA implementation.
+ */
+
+#define DCA_RSA_ENC 0
+#define DCA_RSA_DEC 1
+#define DCA_RSA_SIGN 2
+#define DCA_RSA_VRFY 3
+#define DCA_RSA_SIGNR 4
+#define DCA_RSA_VRFYR 5
+
+/*
+ * DSA implementation.
+ */
+
+#define DCA_DSA_SIGN 0
+#define DCA_DSA_VRFY 1
+
+/*
+ * FMA eclass index definitions. Note that this enum must be consistent
+ * with the dca_fma_eclass_sca1000 and dca_fma_eclass_sca500 string arrays.
+ */
+typedef enum dca_fma_eclass {
+ DCA_FM_ECLASS_HW_DEVICE = 0,
+ DCA_FM_ECLASS_HW_TIMEOUT,
+ DCA_FM_ECLASS_NONE
+} dca_fma_eclass_t;
+
+/*
+ * Forward typedefs.
+ */
+typedef struct dca dca_t;
+typedef struct dca_chain dca_chain_t;
+typedef struct dca_listnode dca_listnode_t;
+typedef struct dca_worklist dca_worklist_t;
+typedef struct dca_work dca_work_t;
+typedef struct dca_request dca_request_t;
+typedef struct dca_stat dca_stat_t;
+typedef struct dca_cookie dca_cookie_t;
+typedef struct dca_device dca_device_t;
+
+/*
+ * This structure is used to identify a specific board.
+ */
+struct dca_device {
+ ushort_t dd_vendor_id;
+ ushort_t dd_device_id;
+ char *dd_model;
+};
+
+/*
+ * Structure representing a node in a DMA chain. (Broadcom calls
+ * these "Data Buffer Chain Entries".)
+ *
+ * note, this structure must be a multiple of sizeof (intptr_t)
+ */
+struct dca_chain {
+ /* the descriptor */
+ caddr_t dc_desc_kaddr;
+ /* and the buffer to which it points */
+ size_t dc_buffer_length;
+ ddi_dma_handle_t dc_buffer_dmah;
+ caddr_t dc_buffer_kaddr;
+ /* physical addresses */
+ uint32_t dc_desc_paddr;
+ uint32_t dc_buffer_paddr;
+ uint32_t dc_next_paddr;
+};
+
+/*
+ * Linked-list linkage.
+ */
+struct dca_listnode {
+ dca_listnode_t *dl_next;
+ dca_listnode_t *dl_prev;
+ dca_listnode_t *dl_next2;
+ dca_listnode_t *dl_prev2;
+};
+
+typedef enum dca_mech_type {
+ DES_CBC_MECH_INFO_TYPE, /* SUN_CKM_DES_CBC */
+ DES3_CBC_MECH_INFO_TYPE, /* SUN_CKM_DES3_CBC */
+ DSA_MECH_INFO_TYPE, /* SUN_CKM_DSA */
+ RSA_X_509_MECH_INFO_TYPE, /* SUN_CKM_RSA_X_509 */
+ RSA_PKCS_MECH_INFO_TYPE /* SUN_CKM_RSA_PKCS */
+} dca_mech_type_t;
+
+#define SUN_CKM_DSA "CKM_DSA"
+
+struct dca_rng {
+ uint32_t dr_chunklen;
+};
+
+union dca_parameters {
+ struct dca_rng dp_rng;
+};
+
+typedef struct dca_ctx {
+ /*
+ * The following are context fields for Deimos 2.0.
+ */
+ crypto_mech_type_t ctx_cm_type; /* Mechanism type */
+ int mode; /* Mode of operation */
+ int atomic; /* Boolean */
+
+ /* Fields for RSA and DSA */
+ uchar_t *mod; /* RSA modulus */
+ unsigned modlen; /* RSA modulus length */
+ unsigned pqfix; /* RSA flag */
+
+ /* Fields for DES and 3DES */
+ uint32_t iv[2];
+ uint32_t key[6];
+ int residlen;
+ uchar_t resid[DESBLOCK];
+ int activeresidlen;
+ uchar_t activeresid[DESBLOCK];
+ crypto_data_t in_dup; /* input data duplicate */
+} dca_ctx_t;
+
+/*
+ * Work structure. One of these per actual job submitted to an MCR.
+ * Contains everything we need to submit the job, and everything we
+ * need to notify caller and release resources when the completion
+ * interrupt comes.
+ */
+struct dca_request {
+ dca_listnode_t dr_linkage;
+ uint16_t dr_pkt_length;
+ crypto_req_handle_t dr_kcf_req;
+ dca_t *dr_dca;
+ dca_worklist_t *dr_wlp;
+ /*
+ * Consumer's I/O buffers.
+ */
+ crypto_data_t *dr_in;
+ crypto_data_t *dr_out;
+ dca_ctx_t dr_ctx;
+ /*
+ * Chains and DMA structures.
+ */
+ size_t dr_dma_size;
+ uint32_t dr_ctx_paddr;
+ caddr_t dr_ctx_kaddr;
+ ddi_acc_handle_t dr_ctx_acch;
+ ddi_dma_handle_t dr_ctx_dmah;
+ /*
+ * Scratch input buffer.
+ */
+ ddi_acc_handle_t dr_ibuf_acch;
+ ddi_dma_handle_t dr_ibuf_dmah;
+ caddr_t dr_ibuf_kaddr;
+ uint32_t dr_ibuf_paddr;
+
+ /*
+ * Scratch output buffer.
+ */
+ ddi_acc_handle_t dr_obuf_acch;
+ ddi_dma_handle_t dr_obuf_dmah;
+ caddr_t dr_obuf_kaddr;
+ uint32_t dr_obuf_paddr;
+
+ /*
+ * Values to program MCR with.
+ */
+ uint32_t dr_in_paddr;
+ uint32_t dr_out_paddr;
+ uint32_t dr_in_next;
+ uint32_t dr_out_next;
+ uint16_t dr_in_len;
+ uint16_t dr_out_len;
+ /*
+ * Callback.
+ */
+ void (*dr_callback)(dca_request_t *, int);
+ /*
+ * Other stuff.
+ */
+ uint32_t dr_flags;
+ /*
+ * Algorithm specific parameters.
+ */
+ void *dr_context;
+ union dca_parameters dr_param;
+ /*
+ * Statistics.
+ */
+ int dr_job_stat;
+ int dr_byte_stat;
+
+ /* Pre-mapped input and output data buffer chain support */
+ dca_chain_t dr_ibuf_head;
+ dca_chain_t dr_obuf_head;
+
+ /*
+ * User buffers are mapped to DMA handles dynamically. The physically
+ * contigous blocks ( >= a page) are built into a data buffer chain.
+ */
+ dca_chain_t dr_chain_in_head;
+ ddi_dma_handle_t dr_chain_in_dmah;
+
+ dca_chain_t dr_chain_out_head;
+ ddi_dma_handle_t dr_chain_out_dmah;
+
+ /* Offset in the context page for storing dynamic buffer chains */
+ int dr_offset;
+
+ /* Destroy this request if true */
+ int destroy;
+};
+
+/*
+ * Request flags (dca_request_t.dr_flags).
+ */
+#define DR_INPLACE 0x002
+#define DR_SCATTER 0x004
+#define DR_GATHER 0x008
+#define DR_NOCACHE 0x020
+#define DR_ENCRYPT 0x040
+#define DR_DECRYPT 0x080
+#define DR_TRIPLE 0x100 /* triple DES vs. single DES */
+#define DR_ATOMIC 0x200 /* for atomic operation */
+
+struct dca_work {
+ dca_listnode_t dw_linkage;
+ dca_worklist_t *dw_wlp;
+
+ /* DMA access to the MCR and context */
+ ddi_acc_handle_t dw_mcr_acch;
+ ddi_dma_handle_t dw_mcr_dmah;
+ caddr_t dw_mcr_kaddr;
+ uint32_t dw_mcr_paddr;
+
+ dca_request_t *dw_reqs[MAXREQSPERMCR];
+ clock_t dw_lbolt;
+};
+
+/*
+ * MCRs.
+ */
+#define MCR1 0x1
+#define MCR2 0x2
+
+struct dca_worklist {
+ dca_t *dwl_dca;
+ crypto_kcf_provider_handle_t dwl_prov;
+ char dwl_name[16];
+ int dwl_mcr;
+ kmutex_t dwl_lock;
+ kmutex_t dwl_freereqslock;
+ kcondvar_t dwl_cv;
+ dca_listnode_t dwl_freereqs; /* available requests */
+ dca_listnode_t dwl_waitq; /* requests arrive here */
+ dca_listnode_t dwl_freework; /* available work structures */
+ dca_listnode_t dwl_runq; /* work structs sent to chip */
+ timeout_id_t dwl_schedtid;
+ clock_t dwl_lastsubmit;
+ int dwl_count;
+ int dwl_busy;
+ int dwl_lowater;
+ int dwl_hiwater;
+ int dwl_reqspermcr;
+ int dwl_drain; /* for DR (suspend) */
+ /* Kstats */
+ u_longlong_t dwl_submit;
+ u_longlong_t dwl_flowctl;
+};
+
+/*
+ * Operations for MCR1 (bulk stuff).
+ */
+#define CMD_IPSEC 0x0 /* IPsec packet processing */
+#define CMD_SSLMAC 0x1 /* SSL HMAC processing */
+#define CMD_TLSMAC 0x2 /* TLS HMAC processing */
+#define CMD_3DES 0x3 /* SSL/TLS/raw 3DES processing */
+#define CMD_RC4 0x4 /* ARCFOUR procesing */
+#define CMD_PUREHASH 0x5 /* Pure MD5/SHA1 hash processing */
+
+/*
+ * Operations for MCR2 (key stuff).
+ */
+#define CMD_DHPUBLIC 0x1 /* DH public key generation */
+#define CMD_DHSHARED 0x2 /* DH shared secret generation */
+#define CMD_RSAPUBLIC 0x3 /* RSA public key operation */
+#define CMD_RSAPRIVATE 0x4 /* RSA private key operation (CRT) */
+#define CMD_DSASIGN 0x5 /* DSA signing operation */
+#define CMD_DSAVERIFY 0x6 /* DSA verification operation */
+#define CMD_RNGDIRECT 0x41 /* Direct access to the RNG */
+#define CMD_RNGSHA1 0x42 /* RNG output processed by SHA1 */
+#define CMD_MODADD 0x43 /* Modular add */
+#define CMD_MODSUB 0x44 /* Moduler subtract */
+#define CMD_MODMUL 0x45 /* Modular multiply */
+#define CMD_MODREM 0x46 /* Modular remainder */
+#define CMD_MODEXP 0x47 /* Modular exponentiation */
+#define CMD_MODINV 0x48 /* Modular inverse */
+
+/*
+ * Kstats.
+ */
+#define DS_3DESJOBS 0
+#define DS_3DESBYTES 1
+#define DS_RSAPUBLIC 2
+#define DS_RSAPRIVATE 3
+#define DS_DSASIGN 4
+#define DS_DSAVERIFY 5
+#define DS_RNGJOBS 6
+#define DS_RNGBYTES 7
+#define DS_RNGSHA1JOBS 8
+#define DS_RNGSHA1BYTES 9
+#define DS_MAX 10
+
+#if 0
+/*
+ * note that when reenabling any of these stats, DS_MAX will need to
+ * be adjusted.
+ */
+#define DS_RC4JOBS 11
+#define DS_RC4BYTES 12
+#define DS_DHPUBLIC 13
+#define DS_DHSECRET 14
+#endif
+
+struct dca_stat {
+ kstat_named_t ds_status;
+ kstat_named_t ds_algs[DS_MAX];
+ struct {
+ kstat_named_t ds_submit;
+ kstat_named_t ds_flowctl;
+ kstat_named_t ds_lowater;
+ kstat_named_t ds_hiwater;
+ kstat_named_t ds_maxreqs;
+ } ds_mcr[MAXMCR];
+};
+
+/*
+ * Blocking structure for ioctls.
+ */
+struct dca_cookie {
+ kmutex_t dc_mx;
+ kcondvar_t dc_cv;
+ int dc_outstanding;
+ int dc_status;
+};
+
+/*
+ * Per instance structure.
+ */
+struct dca {
+ dev_info_t *dca_dip;
+ kmutex_t dca_intrlock;
+ caddr_t dca_regs;
+ ddi_acc_handle_t dca_regs_handle;
+ ddi_iblock_cookie_t dca_icookie;
+ timeout_id_t dca_jobtid;
+ ulong_t dca_pagesize;
+ unsigned dca_flags; /* dev state flags */
+
+ /*
+ * Work requests.
+ */
+ dca_worklist_t dca_worklist[MAXMCR];
+
+ /*
+ * hardware model
+ */
+ char *dca_model;
+
+ /*
+ * Kstats. There is no standard for what standards
+ * Cryptographic Providers should supply, so we're
+ * making them up for now.
+ */
+ kstat_t *dca_ksp;
+ kstat_t *dca_intrstats;
+ u_longlong_t dca_stats[DS_MAX];
+
+ /* For the local random number pool used internally by the dca driver */
+ char *dca_buf1;
+ char *dca_buf2;
+ char *dca_buf_ptr;
+ int dca_index;
+ uint32_t dca_random_filling;
+ kmutex_t dca_random_lock;
+
+ /* FMA capabilities */
+ int fm_capabilities; /* FMA capabilities */
+
+ kmutex_t dca_ctx_list_lock;
+ dca_listnode_t dca_ctx_list;
+};
+
+/*
+ * Device flags (dca_t.dca_flags)
+ */
+#define DCA_FAILED 0x1
+#define DCA_POWERMGMT 0x4
+#define DCA_RNGSHA1 0x8
+
+#define KIOIP(dca) KSTAT_INTR_PTR((dca)->dca_intrstats)
+
+/*
+ * Scatter/gather checks.
+ */
+typedef enum dca_sg_param {
+ DCA_SG_CONTIG = 1,
+ DCA_SG_WALIGN,
+ DCA_SG_PALIGN
+} dca_sg_param_t;
+
+#define FALSE 0
+#define TRUE 1
+
+/*
+ * PCI configuration registers.
+ */
+#define PCI_VENID 0x00 /* vendor id, 16 bits */
+#define PCI_DEVID 0x02 /* device id, 16 bits */
+#define PCI_COMM 0x04 /* command, 16 bits */
+#define PCI_STATUS 0x06 /* status, 16 bits */
+#define PCI_REVID 0x08 /* revision id, 8 bits */
+#define PCI_PROGCLASS 0x09 /* programming class, 8 bits */
+#define PCI_SUBCLASS 0x0A /* subclass, 8 bits */
+#define PCI_CACHELINESZ 0x0C /* cache line size, 8 bits */
+#define PCI_LATTMR 0x0D /* latency timer, 8 bits */
+#define PCI_BIST 0x0F /* builtin-self-test, 8 bits */
+#define PCI_SUBVENID 0x2C /* subsystem vendor id, 16 bits */
+#define PCI_SUBSYSID 0x2E /* subsystem id, 16 bits */
+#define PCI_MINGNT 0x3E /* min grant for burst, 8 bits */
+#define PCI_MAXLAT 0x3F /* maximum grant for burst, 8 bits */
+#define PCI_TRDYTO 0x40 /* TRDY timeout, 8 bits */
+#define PCI_RETRIES 0x41 /* retries bus will perform, 8 bits */
+
+/*
+ * PCI configuration register bit values.
+ */
+#define PCICOMM_FBBE 0x0200 /* fast back-to-back enable */
+#define PCICOMM_SEE 0x0100 /* system error enable */
+#define PCICOMM_PEE 0x0040 /* parity error enable */
+#define PCICOMM_MWIE 0x0010 /* memory write & invalidate enable */
+#define PCICOMM_BME 0x0004 /* bus master enable */
+#define PCICOMM_MAE 0x0002 /* memory access enable */
+
+#define PCISTAT_PERR 0x8000 /* parity error detected */
+#define PCISTAT_SERR 0x4000 /* system error detected */
+#define PCISTAT_MABRT 0x2000 /* master abort detected */
+#define PCISTAT_TABRT 0x1000 /* target abort detected */
+#define PCISTAT_TABRTS 0x0800 /* target abort signaled */
+#define PCISTAT_PARITY 0x0100 /* data parity error detected */
+
+#define PCIREVID_DOMESTIC 0x01 /* domestic version */
+#define PCIREVID_EXPORT 0xE1 /* export version */
+
+/* Note: 5820 errata: BIST feature does not work */
+#define PCIBIST_CAP 0x80 /* BIST capable */
+#define PCIBIST_START 0x40 /* start BIST test */
+#define PCIBIST_ERRMASK 0x0F /* mask of BIST error codes */
+
+/*
+ * Command and Status Registers.
+ */
+#define CSR_MCR1 0x00 /* pointer to MCR1 (bulk) */
+#define CSR_DMACTL 0x04 /* DMA control */
+#define CSR_DMASTAT 0x08 /* DMA status */
+#define CSR_DMAEA 0x0C /* DMA error address */
+#define CSR_MCR2 0x10 /* pointer to MCR2 (exponentiator) */
+
+/*
+ * Command and status register bits.
+ */
+#define DMACTL_RESET 0x80000000U /* reset the chip */
+#define DMACTL_MCR2IE 0x40000000U /* MCR2 interrupt enable */
+#define DMACTL_MCR1IE 0x20000000U /* MCR1 interrupt enable */
+#define DMACTL_OFM 0x10000000U /* output fragment mode */
+#define DMACTL_BE32 0x08000000U /* 32-bit big endian mode */
+#define DMACTL_BE64 0x04000000U /* 64-bit big endian mode */
+#define DMACTL_EIE 0x02000000U /* error interrupt enable */
+#define DMACTL_RNGMASK 0x01800000U /* RNG mode mask */
+#define DMACTL_RNG1 0x00000000U /* 1 RNG bit per cycle */
+#define DMACTL_RNG4 0x00800000U /* 1 RNG bit per 4 cycles */
+#define DMACTL_RNG8 0x01000000U /* 1 RNG bit per 8 cycles */
+#define DMACTL_RNG16 0x01800000U /* 1 RNG bit per 16 cycles */
+#define DMACTL_MODNORM 0x00400000U /* s/w modulus normalization */
+#define DMACTL_FRAGMASK 0x0000FFFFU /* output fragment size */
+
+#define DMASTAT_MAIP 0x80000000U /* master access in progress */
+#define DMASTAT_MCR1FULL 0x40000000U /* MCR1 is full */
+#define DMASTAT_MCR1INT 0x20000000U /* MCR1 interrupted */
+#define DMASTAT_ERRINT 0x10000000U /* error interrupted */
+#define DMASTAT_MCR2FULL 0x08000000U /* MCR2 is full */
+#define DMASTAT_MCR2INT 0x04000000U /* MCR2 interrupted */
+#define DMASTAT_INTERRUPTS 0x34000000U /* all interrupts */
+
+/*
+ * Offsets of things relative to an MCR.
+ */
+#define MCR_COUNT 0 /* 16 bits */
+#define MCR_FLAGS 2 /* 16 bits */
+#define MCR_CTXADDR 4 /* 32 bits */
+
+/*
+ * Basis for size (should be optimized by constant folding):
+ * 4 bytes for flags and #packets.
+ * for each packet:
+ * 2 descriptors (DESC_SIZE)
+ * 4 bytes for context address
+ * 4 bytes for packet length and reserved
+ */
+#define MCR_SIZE (4 + MAXREQSPERMCR * ((2 * DESC_SIZE) + 8))
+
+/*
+ * MCR flags.
+ */
+#define MCRFLAG_FINISHED 0x0001 /* MCR processing complete */
+#define MCRFLAG_ERROR 0x0002 /* set if an error occured */
+#define MCRFLAG_ERRORMASK 0xff00 /* error code bits */
+
+/*
+ * Fields within a descriptor (data buffer chain).
+ */
+#define DESC_BUFADDR 0 /* 32 bits */
+#define DESC_NEXT 4 /* 32 bits */
+#define DESC_LENGTH 8 /* 16 bits */
+#define DESC_RSVD 10 /* 16 bits */
+#define DESC_SIZE 16 /* ROUNDUP(12, 16) - descriptor size (bytes) */
+
+/*
+ * Offsets of fields within context structures, see Broadcom spec.
+ */
+#define CTX_LENGTH 0 /* 16 bits */
+#define CTX_CMD 2 /* 16 bits */
+#define CTX_MAXLENGTH 768 /* max size of ctx, fits anything */
+
+#define CTX_3DESDIRECTION 4 /* 16 bits */
+#define CTX_3DESKEY1HI 8 /* 32 bits */
+#define CTX_3DESKEY1LO 12 /* 32 bits */
+#define CTX_3DESKEY2HI 16 /* 32 bits */
+#define CTX_3DESKEY2LO 20 /* 32 bits */
+#define CTX_3DESKEY3HI 24 /* 32 bits */
+#define CTX_3DESKEY3LO 28 /* 32 bits */
+#define CTX_3DESIVHI 32 /* 32 bits */
+#define CTX_3DESIVLO 36 /* 32 bits */
+
+#define CTX_IPSECFLAGS 4 /* 16 bits */
+#define CTX_IPSECOFFSET 6 /* 16 bits */
+#define CTX_IPSECKEY1HI 8 /* 32 bits */
+#define CTX_IPSECKEY1LO 12 /* 32 bits */
+#define CTX_IPSECKEY2HI 16 /* 32 bits */
+#define CTX_IPSECKEY2LO 20 /* 32 bits */
+#define CTX_IPSECKEY3HI 24 /* 32 bits */
+#define CTX_IPSECKEY3LO 28 /* 32 bits */
+#define CTX_IPSECIVHI 32 /* 32 bits */
+#define CTX_IPSECIVLO 36 /* 32 bits */
+#define CTX_IPSECHMACINNER1 40 /* 32 bits */
+#define CTX_IPSECHMACINNER2 44 /* 32 bits */
+#define CTX_IPSECHMACINNER3 48 /* 32 bits */
+#define CTX_IPSECHMACINNER4 52 /* 32 bits */
+#define CTX_IPSECHMACINNER5 56 /* 32 bits */
+#define CTX_IPSECHMACOUTER1 60 /* 32 bits */
+#define CTX_IPSECHMACOUTER2 64 /* 32 bits */
+#define CTX_IPSECHMACOUTER3 68 /* 32 bits */
+#define CTX_IPSECHMACOUTER4 72 /* 32 bits */
+#define CTX_IPSECHMACOUTER5 76 /* 32 bits */
+
+#define CTX_RSAEXPLEN 4 /* 16 bits */
+#define CTX_RSAMODLEN 6 /* 16 bits */
+#define CTX_RSABIGNUMS 8 /* variable length */
+#define CTX_RSAQLEN 4 /* 16 bits */
+#define CTX_RSAPLEN 6 /* 16 bits */
+
+#define CTX_DSAMSGTYPE 4 /* 16 bits */
+#define CTX_DSARSVD 6 /* 16 bits */
+#define CTX_DSARNG 8 /* 16 bits */
+#define CTX_DSAPLEN 10 /* 16 bits */
+#define CTX_DSABIGNUMS 12 /* variable length */
+
+/*
+ * Values for specific operations.
+ */
+#define CTX_RNG_LENGTH 64 /* context length for RNG (64 min) */
+#define CTX_3DES_LENGTH 64 /* context length for 3DES (64 min) */
+#define CTX_3DES_DECRYPT 0x4000 /* perform decryption */
+#define CTX_3DES_ENCRYPT 0x0000 /* perform encryption */
+#define CTX_IPSEC_LENGTH 80 /* context length for IPsec */
+#define CTX_IPSEC_ENCRYPT 0x8000 /* perform encryption */
+#define CTX_IPSEC_DECRYPT 0xc000 /* perform decryption */
+#define CTX_IPSEC_HMAC_MD5 0x1000 /* HMAC-MD5 authentication */
+#define CTX_IPSEC_HMAC_SHA1 0x2000 /* HMAC-MD5 authentication */
+#define CTX_DSAMSGTYPE_SHA1 0 /* Message is SHA1 */
+#define CTX_DSAMSGTYPE_TEXT 1 /* Generate SHA1 hash first */
+#define CTX_DSARNG_GEN 1 /* Generate random k */
+#define CTX_DSARNG_SUPPLY 0 /* Random k is supplied */
+
+/*
+ * Macros to access fields within the MCR. Note that this includes the
+ * context fields as well, since the context is just offset from the
+ * base of the MCR.
+ */
+
+#define PUTMCR32(work, reg, val) \
+ ddi_put32(work->dw_mcr_acch, \
+ (uint32_t *)(work->dw_mcr_kaddr + reg), val)
+
+#define PUTMCR16(work, reg, val) \
+ ddi_put16(work->dw_mcr_acch, \
+ (uint16_t *)(work->dw_mcr_kaddr + reg), val)
+
+#define GETMCR32(work, reg) \
+ ddi_get32(work->dw_mcr_acch, (uint32_t *)(work->dw_mcr_kaddr + reg))
+
+#define GETMCR16(work, reg) \
+ ddi_get16(work->dw_mcr_acch, (uint16_t *)(work->dw_mcr_kaddr + reg))
+
+#define PUTDESC32(req, dc_desc_kaddr, reg, val) \
+ ddi_put32(req->dr_ctx_acch, \
+ (uint32_t *)(dc_desc_kaddr + reg), val)
+
+#define PUTDESC16(req, dc_desc_kaddr, reg, val) \
+ ddi_put16(req->dr_ctx_acch, \
+ (uint16_t *)(dc_desc_kaddr + reg), val)
+
+/* XXX: define the GET forms for descriptors only if needed */
+
+#define PUTCTX32(req, reg, val) \
+ ddi_put32(req->dr_ctx_acch, \
+ (uint32_t *)(req->dr_ctx_kaddr + reg), val)
+
+#define PUTCTX16(req, reg, val) \
+ ddi_put16(req->dr_ctx_acch, \
+ (uint16_t *)(req->dr_ctx_kaddr + reg), val)
+
+#define CTXBCOPY(req, src, dst, count) \
+ ddi_rep_put8(req->dr_ctx_acch, (uchar_t *)src, (uchar_t *)dst, count, \
+ DDI_DEV_AUTOINCR)
+
+/*
+ * Register access.
+ */
+#define GETCSR(dca, reg) \
+ ddi_get32(dca->dca_regs_handle, (uint_t *)(dca->dca_regs + reg))
+
+#define PUTCSR(dca, reg, val) \
+ ddi_put32(dca->dca_regs_handle, (uint_t *)(dca->dca_regs + reg), val)
+
+#define SETBIT(dca, reg, val) \
+ PUTCSR(dca, reg, GETCSR(dca, reg) | val)
+
+#define CLRBIT(dca, reg, val) \
+ PUTCSR(dca, reg, GETCSR(dca, reg) & ~val)
+
+/*
+ * Used to guarantee alignment.
+ */
+#define ROUNDUP(a, n) (((a) + ((n) - 1)) & ~((n) - 1))
+#define ROUNDDOWN(a, n) (((a) & ~((n) - 1)))
+#define HIDBLWORD(x) (((x) & 0xffffffff00000000ULL) >> 32)
+#define LODBLWORD(x) ((x) & 0xffffffffULL)
+
+/*
+ * Driver hardening related.
+ */
+#define CHECK_REGS(dca) ddi_check_acc_handle(dca->dca_regs_handle)
+
+/*
+ * Other utility macros.
+ */
+#define QEMPTY(q) ((q)->dl_next == (q))
+#define BITS2BYTES(b) ((b) >> 3)
+#define WORKLIST(dca, mcr) (&((dca)->dca_worklist[mcr - 1]))
+
+/*
+ * Debug stuff.
+ */
+#ifdef DEBUG
+#define DWARN 0x0001
+#define DPCI 0x0002
+#define DINTR 0x0004
+#define DSTART 0x0008
+#define DRECLAIM 0x0010
+#define DCHATTY 0x0020
+#define DMOD 0x0040 /* _init/_fini/_info/attach/detach */
+#define DENTRY 0x0080 /* crypto routine entry/exit points */
+
+void dca_dprintf(dca_t *, int, const char *, ...);
+#define DBG dca_dprintf
+#else
+#define DBG(dca, lvl, ...)
+#endif
+
+/*
+ * Some pkcs#11 defines as there are no pkcs#11 header files included.
+ */
+#define CKO_PUBLIC_KEY 0x00000002
+#define CKO_PRIVATE_KEY 0x00000003
+
+#define CKA_CLASS 0x00000000
+#define CKA_VALUE 0x00000011
+#define CKA_KEY_TYPE 0x00000100
+#define CKA_MODULUS 0x00000120
+#define CKA_PUBLIC_EXPONENT 0x00000122
+#define CKA_PRIVATE_EXPONENT 0x00000123
+#define CKA_PRIME_1 0x00000124
+#define CKA_PRIME_2 0x00000125
+#define CKA_EXPONENT_1 0x00000126
+#define CKA_EXPONENT_2 0x00000127
+#define CKA_COEFFICIENT 0x00000128
+#define CKA_PRIME 0x00000130
+#define CKA_SUBPRIME 0x00000131
+#define CKA_BASE 0x00000132
+/*
+ * Driver globals.
+ */
+extern int dca_mindma;
+extern int dca_hardening;
+
+/*
+ * Prototypes.
+ */
+
+/*
+ * dca_debug.c
+ */
+void dca_error(dca_t *, const char *, ...);
+void dca_diperror(dev_info_t *, const char *, ...);
+void dca_dipverror(dev_info_t *, const char *, va_list);
+/*
+ * dca_3des.c
+ */
+int dca_3desctxinit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
+ int, int);
+int dca_3des(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t, int);
+int dca_3desupdate(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t, int);
+int dca_3desfinal(crypto_ctx_t *, crypto_data_t *, int);
+int dca_3desatomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ int, crypto_req_handle_t, int);
+void dca_3desctxfree(void *);
+
+/*
+ * dca_rsa.c
+ */
+int dca_rsastart(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t, int);
+int dca_rsainit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, int);
+void dca_rsactxfree(void *);
+int dca_rsaatomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ int, crypto_req_handle_t, int);
+
+/*
+ * dca_dsa.c
+ */
+int dca_dsa_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+int dca_dsa_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
+ crypto_req_handle_t);
+int dca_dsainit(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, int,
+ int);
+void dca_dsactxfree(void *);
+int dca_dsaatomic(crypto_provider_handle_t, crypto_session_id_t,
+ crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
+ int, crypto_req_handle_t, int);
+
+/*
+ * dca_rng.c
+ */
+int dca_rng(dca_t *, uchar_t *, size_t len, crypto_req_handle_t);
+int dca_random_buffer(dca_t *dca, caddr_t buf, int len);
+int dca_random_init();
+void dca_random_fini();
+
+/*
+ * dca_kstat.c
+ */
+void dca_ksinit(dca_t *);
+/*
+ * dca.c
+ */
+void dca_rmqueue(dca_listnode_t *);
+dca_request_t *dca_getreq(dca_t *, int, int);
+void dca_freereq(dca_request_t *);
+int dca_bindchains(dca_request_t *, size_t, size_t);
+int dca_unbindchains(dca_request_t *);
+int dca_start(dca_t *, dca_request_t *, int, int);
+void dca_done(dca_request_t *, int);
+void dca_destroyreq(dca_request_t *);
+int dca_length(crypto_data_t *);
+int dca_gather(crypto_data_t *, char *, int, int);
+int dca_resid_gather(crypto_data_t *, char *, int *, char *, int);
+int dca_scatter(const char *, crypto_data_t *, int, int);
+int dca_bcmp_reverse(const void *s1, const void *s2, size_t n);
+int dca_dupcrypto(crypto_data_t *, crypto_data_t *);
+int dca_verifyio(crypto_data_t *, crypto_data_t *);
+int dca_getbufbytes(crypto_data_t *, size_t, int, uchar_t *);
+int dca_sgcheck(dca_t *, crypto_data_t *, dca_sg_param_t);
+crypto_object_attribute_t *
+ dca_get_key_attr(crypto_key_t *);
+int dca_attr_lookup_uint32(crypto_object_attribute_t *, uint_t, uint64_t,
+ uint32_t *);
+int dca_attr_lookup_uint8_array(crypto_object_attribute_t *, uint_t,
+ uint64_t, void **, unsigned int *);
+crypto_object_attribute_t *
+ dca_find_attribute(crypto_object_attribute_t *, uint_t, uint64_t);
+caddr_t dca_bufdaddr(crypto_data_t *);
+void dca_rcoalesce(dca_request_t *, int);
+void dca_runcoalesce(dca_request_t *);
+int dca_bitlen(unsigned char *, int);
+uint16_t dca_padhalf(int);
+uint16_t dca_padfull(int);
+void dca_reverse(void *, void *, int, int);
+int dca_numcmp(caddr_t, int, caddr_t, int);
+int dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
+ dca_fma_eclass_t eclass_index);
+int dca_free_context(crypto_ctx_t *ctx);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CRYPTO_DCA_H */
diff --git a/usr/src/uts/intel/Makefile b/usr/src/uts/intel/Makefile
index c8a3880452..3b0ad1308d 100644
--- a/usr/src/uts/intel/Makefile
+++ b/usr/src/uts/intel/Makefile
@@ -51,6 +51,11 @@ LINT_LIBS += $(LINT_XMODLIBS:%=$(LINT_LIB_DIR)/llib-l%.ln)
DRV_KMODS += dprov
#
+# dca is delivered in the SUNWdcar package.
+#
+DRV_KMODS += dca
+
+#
# CRYPTO_EK_KMODS modules go in the encryption pack (SUNWcry*)
# They need to be listed separately since they duplicate global symbols
# causing the 2nd pass of lint on the kernel to complain. CRYPTO_EK_KMODS
diff --git a/usr/src/uts/intel/dca/Makefile b/usr/src/uts/intel/dca/Makefile
new file mode 100644
index 0000000000..209d7dd7eb
--- /dev/null
+++ b/usr/src/uts/intel/dca/Makefile
@@ -0,0 +1,92 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the DCA kCF provider.
+#
+# intel implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = dca
+OBJECTS = $(DCA_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DCA_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/crypto/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+# set signing mode
+ELFSIGN_MOD = $(ELFSIGN_CRYPTO)
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
+
+# C99 mode is needed for dca
+CFLAGS += $(C99_ENABLE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+$(ROOTLINK): $(ROOT_CRYPTO_DIR) $(ROOTMODULE)
+ -$(RM) $@; ln $(ROOTMODULE) $@
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/sparc/Makefile b/usr/src/uts/sparc/Makefile
index 3f720c577e..e5a0d19bef 100644
--- a/usr/src/uts/sparc/Makefile
+++ b/usr/src/uts/sparc/Makefile
@@ -44,6 +44,7 @@ LINT_LIBS += $(SVVS_KMODS:%=$(LINT_LIB_DIR)/llib-l%.ln)
LINT_XMODLIBS = $(XMODS:e1000g=)
LINT_LIBS += $(LINT_XMODLIBS:%=$(LINT_LIB_DIR)/llib-l%.ln)
DRV_KMODS += dprov
+DRV_KMODS += dca
#
# CRYPTO_EK_KMODS modules go in the encryption pack (SUNWcry*)
# They need to be listed separately since they duplicate global symbols
diff --git a/usr/src/uts/sparc/dca/Makefile b/usr/src/uts/sparc/dca/Makefile
new file mode 100644
index 0000000000..cde6eba5d6
--- /dev/null
+++ b/usr/src/uts/sparc/dca/Makefile
@@ -0,0 +1,95 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the DCA kCF provider.
+#
+# sparc implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = dca
+OBJECTS = $(DCA_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DCA_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/crypto/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sparc/Makefile.sparc
+
+# set signing mode
+ELFSIGN_MOD = $(ELFSIGN_CRYPTO)
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOTLINK) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+CFLAGS += $(C99_ENABLE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+$(ROOTLINK): $(ROOT_CRYPTO_DIR) $(ROOTMODULE)
+ -$(RM) $@; ln $(ROOTMODULE) $@
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sparc/Makefile.targ