summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/crypto')
-rw-r--r--usr/src/uts/common/crypto/api/kcf_cipher.c131
-rw-r--r--usr/src/uts/common/crypto/api/kcf_digest.c80
-rw-r--r--usr/src/uts/common/crypto/api/kcf_dual.c572
-rw-r--r--usr/src/uts/common/crypto/api/kcf_keys.c243
-rw-r--r--usr/src/uts/common/crypto/api/kcf_mac.c128
-rw-r--r--usr/src/uts/common/crypto/api/kcf_miscapi.c199
-rw-r--r--usr/src/uts/common/crypto/api/kcf_object.c383
-rw-r--r--usr/src/uts/common/crypto/api/kcf_session.c197
-rw-r--r--usr/src/uts/common/crypto/api/kcf_sign.c138
-rw-r--r--usr/src/uts/common/crypto/api/kcf_verify.c145
-rw-r--r--usr/src/uts/common/crypto/core/kcf.c10
-rw-r--r--usr/src/uts/common/crypto/core/kcf_callprov.c65
-rw-r--r--usr/src/uts/common/crypto/core/kcf_cryptoadm.c3
-rw-r--r--usr/src/uts/common/crypto/core/kcf_prov_tabs.c212
-rw-r--r--usr/src/uts/common/crypto/io/aes.c322
-rw-r--r--usr/src/uts/common/crypto/io/crypto.c535
-rw-r--r--usr/src/uts/common/crypto/io/dprov.c499
-rw-r--r--usr/src/uts/common/crypto/spi/kcf_spi.c51
18 files changed, 3334 insertions, 579 deletions
diff --git a/usr/src/uts/common/crypto/api/kcf_cipher.c b/usr/src/uts/common/crypto/api/kcf_cipher.c
index 5d3c8b6b57..04eb1e70fd 100644
--- a/usr/src/uts/common/crypto/api/kcf_cipher.c
+++ b/usr/src/uts/common/crypto/api/kcf_cipher.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -29,12 +29,16 @@
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/kmem.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_CIPHER_OFFSET(f) offsetof(crypto_cipher_ops_t, f)
+
/*
* Encryption and decryption routines.
*/
@@ -102,7 +106,7 @@
* See comment in the beginning of the file.
*/
static int
-crypto_cipher_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq, crypto_func_group_t func)
@@ -110,28 +114,50 @@ crypto_cipher_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
int error;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ if (func == CRYPTO_FG_ENCRYPT) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(cipher_ops),
+ CRYPTO_CIPHER_OFFSET(encrypt_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+ } else {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(cipher_ops),
+ CRYPTO_CIPHER_OFFSET(decrypt_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+ }
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
- KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
if (func == CRYPTO_FG_ENCRYPT)
- error = KCF_PROV_ENCRYPT_INIT(pd, ctx, &lmech,
- key, tmpl, KCF_SWFP_RHNDL(crq));
+ error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx,
+ &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
else {
ASSERT(func == CRYPTO_FG_DECRYPT);
- error = KCF_PROV_DECRYPT_INIT(pd, ctx, &lmech,
- key, tmpl, KCF_SWFP_RHNDL(crq));
+ error = KCF_PROV_DECRYPT_INIT(real_provider, ctx,
+ &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
}
KCF_PROV_INCRSTATS(pd, error);
} else {
@@ -144,9 +170,13 @@ crypto_cipher_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
mech, key, NULL, NULL, tmpl);
}
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
+ error = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
}
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
@@ -252,17 +282,36 @@ retry:
* See comment in the beginning of the file.
*/
int
-crypto_encrypt_prov(crypto_mechanism_t *mech, crypto_data_t *plaintext,
- crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
- crypto_call_req_t *crq, kcf_provider_desc_t *pd, crypto_session_id_t sid)
+crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *plaintext, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *ciphertext,
+ crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int error;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(cipher_ops),
+ CRYPTO_CIPHER_OFFSET(encrypt_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
plaintext, ciphertext, tmpl);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ error = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (error);
}
/*
@@ -347,7 +396,7 @@ retry:
* Calls crypto_cipher_init_prov() to initialize an encryption operation.
*/
int
-crypto_encrypt_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_encrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
@@ -405,6 +454,7 @@ crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -413,8 +463,8 @@ crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE, pd->pd_sid,
- NULL, NULL, plaintext, ciphertext, NULL);
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, plaintext, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -456,6 +506,7 @@ crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -463,8 +514,8 @@ crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
error = KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_FINAL, pd->pd_sid,
- NULL, NULL, NULL, ciphertext, NULL);
+ KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, ciphertext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -508,17 +559,36 @@ crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
* See comment in the beginning of the file.
*/
int
-crypto_decrypt_prov(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
- crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
- crypto_call_req_t *crq, kcf_provider_desc_t *pd, crypto_session_id_t sid)
+crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *ciphertext, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *plaintext,
+ crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(cipher_ops),
+ CRYPTO_CIPHER_OFFSET(decrypt_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
ciphertext, plaintext, tmpl);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
}
/*
@@ -604,7 +674,7 @@ retry:
* Calls crypto_cipher_init_prov() to initialize a decryption operation
*/
int
-crypto_decrypt_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_decrypt_init_prov(crypto_provider_t pd, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
crypto_call_req_t *crq)
@@ -662,6 +732,7 @@ crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -670,8 +741,8 @@ crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
plaintext, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE, pd->pd_sid,
- NULL, NULL, ciphertext, plaintext, NULL);
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, ciphertext, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -713,15 +784,17 @@ crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
- error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext, NULL);
+ error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext,
+ NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL, pd->pd_sid,
- NULL, NULL, NULL, plaintext, NULL);
+ KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, plaintext, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
diff --git a/usr/src/uts/common/crypto/api/kcf_digest.c b/usr/src/uts/common/crypto/api/kcf_digest.c
index a713a473de..0058af01b4 100644
--- a/usr/src/uts/common/crypto/api/kcf_digest.c
+++ b/usr/src/uts/common/crypto/api/kcf_digest.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,12 +30,16 @@
#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_DIGEST_OFFSET(f) offsetof(crypto_digest_ops_t, f)
+
/*
* Message digest routines
*/
@@ -92,18 +96,35 @@
* See comment in the beginning of the file.
*/
int
-crypto_digest_prov(crypto_mechanism_t *mech, crypto_data_t *data,
- crypto_data_t *digest, crypto_call_req_t *crq, kcf_provider_desc_t *pd,
- crypto_session_id_t sid)
+crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest,
+ crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(digest_ops),
+ CRYPTO_DIGEST_OFFSET(digest_atomic), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, NULL,
data, digest);
/* no crypto context to carry between multiple parts. */
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
}
/*
@@ -184,35 +205,53 @@ retry:
* descriptor before calling this function.
*/
int
-crypto_digest_init_prov(kcf_provider_desc_t *pd,
- crypto_session_id_t sid, crypto_mechanism_t *mech,
- crypto_context_t *ctxp, crypto_call_req_t *crq)
+crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
int error;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ error = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(digest_ops),
+ CRYPTO_DIGEST_OFFSET(digest_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
- KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
- error = KCF_PROV_DIGEST_INIT(pd, ctx, &lmech,
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+ error = KCF_PROV_DIGEST_INIT(real_provider, ctx, &lmech,
KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, NULL, NULL, NULL);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
+ error = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
}
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
@@ -295,6 +334,7 @@ crypto_digest_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -302,8 +342,8 @@ crypto_digest_update(crypto_context_t context, crypto_data_t *data,
error = KCF_PROV_DIGEST_UPDATE(pd, ctx, data, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_UPDATE, pd->pd_sid,
- NULL, NULL, data, NULL);
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, data, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -345,6 +385,7 @@ crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -352,8 +393,8 @@ crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
error = KCF_PROV_DIGEST_FINAL(pd, ctx, digest, NULL);
KCF_PROV_INCRSTATS(pd, error);
} else {
- KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_FINAL, pd->pd_sid,
- NULL, NULL, NULL, digest);
+ KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, digest);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -383,6 +424,7 @@ crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
@@ -391,11 +433,11 @@ crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key,
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_DIGEST_KEY,
- pd->pd_sid, NULL, key, NULL, NULL);
+ ctx->cc_session, NULL, key, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
-
KCF_PROV_REFRELE(pd);
+
return (error);
}
diff --git a/usr/src/uts/common/crypto/api/kcf_dual.c b/usr/src/uts/common/crypto/api/kcf_dual.c
index a23528e3e8..2dc8b34a66 100644
--- a/usr/src/uts/common/crypto/api/kcf_dual.c
+++ b/usr/src/uts/common/crypto/api/kcf_dual.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -29,18 +29,146 @@
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/kmem.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_CIPHER_MAC_OFFSET(f) offsetof(crypto_dual_cipher_mac_ops_t, f)
static int crypto_mac_decrypt_common(crypto_mechanism_t *,
crypto_mechanism_t *, crypto_dual_data_t *, crypto_key_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_ctx_template_t, crypto_data_t *,
crypto_data_t *, crypto_call_req_t *, boolean_t);
+static int crypto_mac_decrypt_common_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *, crypto_mechanism_t *,
+ crypto_dual_data_t *, crypto_key_t *, crypto_key_t *,
+ crypto_ctx_template_t, crypto_ctx_template_t, crypto_data_t *,
+ crypto_data_t *, crypto_call_req_t *, boolean_t);
+
+int
+crypto_encrypt_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *encr_mech, crypto_mechanism_t *mac_mech,
+ crypto_data_t *pt, crypto_key_t *encr_key, crypto_key_t *mac_key,
+ crypto_ctx_template_t encr_tmpl, crypto_ctx_template_t mac_tmpl,
+ crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *crq)
+{
+ /*
+ * First try to find a provider for the encryption mechanism, that
+ * is also capable of the MAC mechanism.
+ */
+ int rv;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ kcf_ctx_template_t *ctx_encr_tmpl, *ctx_mac_tmpl;
+ kcf_req_params_t params;
+ kcf_encrypt_mac_ops_params_t *cmops;
+ crypto_spi_ctx_template_t spi_encr_tmpl = NULL, spi_mac_tmpl = NULL;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(encr_mech->cm_type,
+ mac_mech->cm_type, CRYPTO_OPS_OFFSET(dual_cipher_mac_ops),
+ CRYPTO_CIPHER_MAC_OFFSET(encrypt_mac_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ * Warning! will need to change when multiple software providers
+ * per mechanism are supported.
+ */
+
+ if (real_provider->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ if (encr_tmpl != NULL) {
+ if (kcf_get_mech_entry(encr_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_encr_tmpl = (kcf_ctx_template_t *)encr_tmpl;
+ if (ctx_encr_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_encr_tmpl = ctx_encr_tmpl->ct_prov_tmpl;
+ }
+
+ if (mac_tmpl != NULL) {
+ if (kcf_get_mech_entry(mac_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
+ if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ crypto_mechanism_t lencr_mech;
+ crypto_mechanism_t lmac_mech;
+
+ /* careful! structs assignments */
+ lencr_mech = *encr_mech;
+ KCF_SET_PROVIDER_MECHNUM(encr_mech->cm_type, real_provider,
+ &lencr_mech);
+
+ lmac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &lmac_mech);
+
+ rv = KCF_PROV_ENCRYPT_MAC_ATOMIC(real_provider, sid,
+ &lencr_mech, encr_key, &lmac_mech, mac_key, pt, ct,
+ mac, spi_encr_tmpl, spi_mac_tmpl, KCF_SWFP_RHNDL(crq));
+
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
+ sid, encr_key, mac_key, pt, ct, mac, spi_encr_tmpl,
+ spi_mac_tmpl);
+
+ cmops = &(params.rp_u.encrypt_mac_params);
+
+ /* careful! structs assignments */
+ cmops->em_encr_mech = *encr_mech;
+ KCF_SET_PROVIDER_MECHNUM(encr_mech->cm_type, real_provider,
+ &cmops->em_encr_mech);
+ cmops->em_framework_encr_mechtype = encr_mech->cm_type;
+
+ cmops->em_mac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &cmops->em_mac_mech);
+ cmops->em_framework_mac_mechtype = mac_mech->cm_type;
+
+ rv = kcf_submit_request(real_provider, NULL, crq, &params,
+ B_FALSE);
+ }
+
+out:
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (rv);
+}
+
/*
* Performs a dual encrypt/mac atomic operation. The provider and session
* to use are determined by the KCF dispatcher.
@@ -286,6 +414,140 @@ retry:
return (error);
}
+int
+crypto_encrypt_mac_init_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *encr_mech,
+ crypto_mechanism_t *mac_mech, crypto_key_t *encr_key,
+ crypto_key_t *mac_key, crypto_ctx_template_t encr_tmpl,
+ crypto_ctx_template_t mac_tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *cr)
+{
+ /*
+ * First try to find a provider for the encryption mechanism, that
+ * is also capable of the MAC mechanism.
+ */
+ int rv;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ kcf_ctx_template_t *ctx_encr_tmpl, *ctx_mac_tmpl;
+ kcf_req_params_t params;
+ kcf_encrypt_mac_ops_params_t *cmops;
+ crypto_spi_ctx_template_t spi_encr_tmpl = NULL, spi_mac_tmpl = NULL;
+ crypto_ctx_t *ctx;
+ kcf_context_t *encr_kcf_context = NULL;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(encr_mech->cm_type,
+ mac_mech->cm_type, CRYPTO_OPS_OFFSET(dual_cipher_mac_ops),
+ CRYPTO_CIPHER_MAC_OFFSET(encrypt_mac_init),
+ CHECK_RESTRICT(cr), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ * Warning! will need to change when multiple software providers
+ * per mechanism are supported.
+ */
+
+ if (real_provider->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ if (encr_tmpl != NULL) {
+ if (kcf_get_mech_entry(encr_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_encr_tmpl = (kcf_ctx_template_t *)encr_tmpl;
+ if (ctx_encr_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_encr_tmpl = ctx_encr_tmpl->ct_prov_tmpl;
+ }
+
+ if (mac_tmpl != NULL) {
+ if (kcf_get_mech_entry(mac_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
+ if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ ctx = kcf_new_ctx(cr, real_provider, sid);
+ if (ctx == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ goto out;
+ }
+ encr_kcf_context = (kcf_context_t *)ctx->cc_framework_private;
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, real_provider)) {
+ crypto_mechanism_t lencr_mech;
+ crypto_mechanism_t lmac_mech;
+
+ /* careful! structs assignments */
+ lencr_mech = *encr_mech;
+ KCF_SET_PROVIDER_MECHNUM(encr_mech->cm_type, real_provider,
+ &lencr_mech);
+
+ lmac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &lmac_mech);
+
+ rv = KCF_PROV_ENCRYPT_MAC_INIT(real_provider, ctx, &lencr_mech,
+ encr_key, &lmac_mech, mac_key, spi_encr_tmpl, spi_mac_tmpl,
+ KCF_SWFP_RHNDL(cr));
+
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_INIT,
+ sid, encr_key, mac_key, NULL, NULL, NULL,
+ spi_encr_tmpl, spi_mac_tmpl);
+
+ cmops = &(params.rp_u.encrypt_mac_params);
+
+ /* careful! structs assignments */
+ cmops->em_encr_mech = *encr_mech;
+ KCF_SET_PROVIDER_MECHNUM(encr_mech->cm_type, real_provider,
+ &cmops->em_encr_mech);
+ cmops->em_framework_encr_mechtype = encr_mech->cm_type;
+
+ cmops->em_mac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &cmops->em_mac_mech);
+ cmops->em_framework_mac_mechtype = mac_mech->cm_type;
+
+ rv = kcf_submit_request(real_provider, ctx, cr, &params,
+ B_FALSE);
+ }
+
+ if (rv != CRYPTO_SUCCESS && rv != CRYPTO_QUEUED) {
+ KCF_CONTEXT_REFRELE(encr_kcf_context);
+ } else
+ *ctxp = (crypto_context_t)ctx;
+
+out:
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (rv);
+}
+
/*
* Starts a multi-part dual encrypt/mac operation. The provider and session
* to use are determined by the KCF dispatcher.
@@ -622,6 +884,7 @@ crypto_encrypt_mac_update(crypto_context_t context,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
@@ -698,7 +961,7 @@ crypto_encrypt_mac_update(crypto_context_t context,
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
- pd->pd_sid, NULL, NULL, pt, ct, NULL, NULL, NULL);
+ ctx->cc_session, NULL, NULL, pt, ct, NULL, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -726,6 +989,7 @@ int crypto_encrypt_mac_final(crypto_context_t context, crypto_dual_data_t *ct,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
@@ -800,7 +1064,7 @@ int crypto_encrypt_mac_final(crypto_context_t context, crypto_dual_data_t *ct,
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
- pd->pd_sid, NULL, NULL, NULL, ct, mac, NULL, NULL);
+ ctx->cc_session, NULL, NULL, NULL, ct, mac, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
out:
@@ -825,6 +1089,18 @@ crypto_mac_decrypt(crypto_mechanism_t *mac_mech,
decr_key, mac_tmpl, decr_tmpl, mac, pt, crq, B_FALSE));
}
+int
+crypto_mac_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mac_mech, crypto_mechanism_t *decr_mech,
+ crypto_dual_data_t *ct, crypto_key_t *mac_key, crypto_key_t *decr_key,
+ crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
+ crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq)
+{
+ return (crypto_mac_decrypt_common_prov(provider, sid, mac_mech,
+ decr_mech, ct, mac_key, decr_key, mac_tmpl, decr_tmpl, mac, pt,
+ crq, B_FALSE));
+}
+
/*
* Performs an atomic dual mac/decrypt operation. The provider to use
* is determined by the KCF dispatcher. 'mac' specifies the expected
@@ -842,6 +1118,19 @@ crypto_mac_verify_decrypt(crypto_mechanism_t *mac_mech,
decr_key, mac_tmpl, decr_tmpl, mac, pt, crq, B_TRUE));
}
+int
+crypto_mac_verify_decrypt_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *mac_mech,
+ crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
+ crypto_key_t *mac_key, crypto_key_t *decr_key,
+ crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
+ crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq)
+{
+ return (crypto_mac_decrypt_common_prov(provider, sid, mac_mech,
+ decr_mech, ct, mac_key, decr_key, mac_tmpl, decr_tmpl, mac, pt,
+ crq, B_TRUE));
+}
+
/*
* Called by both crypto_mac_decrypt() and crypto_mac_verify_decrypt().
* optionally verified if the MACs match before calling the decryption step.
@@ -1108,6 +1397,143 @@ retry:
return (error);
}
+static int
+crypto_mac_decrypt_common_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *mac_mech,
+ crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
+ crypto_key_t *mac_key, crypto_key_t *decr_key,
+ crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
+ crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq,
+ boolean_t do_verify)
+{
+ /*
+ * First try to find a provider for the decryption mechanism, that
+ * is also capable of the MAC mechanism.
+ * We still favor optimizing the costlier decryption.
+ */
+ int error;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ kcf_ctx_template_t *ctx_decr_tmpl, *ctx_mac_tmpl;
+ kcf_req_params_t params;
+ kcf_mac_decrypt_ops_params_t *cmops;
+ crypto_spi_ctx_template_t spi_decr_tmpl = NULL, spi_mac_tmpl = NULL;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ if (do_verify) {
+ error = kcf_get_hardware_provider(decr_mech->cm_type,
+ mac_mech->cm_type,
+ CRYPTO_OPS_OFFSET(dual_cipher_mac_ops),
+ CRYPTO_CIPHER_MAC_OFFSET(mac_verify_decrypt_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+ } else {
+ error = kcf_get_hardware_provider(decr_mech->cm_type,
+ mac_mech->cm_type,
+ CRYPTO_OPS_OFFSET(dual_cipher_mac_ops),
+ CRYPTO_CIPHER_MAC_OFFSET(mac_decrypt_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+ }
+
+ if (error != CRYPTO_SUCCESS)
+ return (error);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ */
+
+ if (real_provider->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ if (decr_tmpl != NULL) {
+ if (kcf_get_mech_entry(decr_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ error = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_decr_tmpl = (kcf_ctx_template_t *)decr_tmpl;
+ if (ctx_decr_tmpl->ct_generation != me->me_gen_swprov) {
+ error = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_decr_tmpl = ctx_decr_tmpl->ct_prov_tmpl;
+ }
+
+ if (mac_tmpl != NULL) {
+ if (kcf_get_mech_entry(mac_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ error = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
+ if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
+ error = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ crypto_mechanism_t lmac_mech;
+ crypto_mechanism_t ldecr_mech;
+
+ /* careful! structs assignments */
+ ldecr_mech = *decr_mech;
+ KCF_SET_PROVIDER_MECHNUM(decr_mech->cm_type, real_provider,
+ &ldecr_mech);
+
+ lmac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &lmac_mech);
+
+ if (do_verify)
+ error = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(
+ real_provider, sid, &lmac_mech, mac_key,
+ &ldecr_mech, decr_key, ct, mac, pt, spi_mac_tmpl,
+ spi_decr_tmpl, KCF_SWFP_RHNDL(crq));
+ else
+ error = KCF_PROV_MAC_DECRYPT_ATOMIC(real_provider, sid,
+ &lmac_mech, mac_key, &ldecr_mech, decr_key,
+ ct, mac, pt, spi_mac_tmpl, spi_decr_tmpl,
+ KCF_SWFP_RHNDL(crq));
+
+ KCF_PROV_INCRSTATS(pd, error);
+ } else {
+ KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params,
+ (do_verify) ? KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC :
+ KCF_OP_ATOMIC, sid, mac_key, decr_key, ct, mac, pt,
+ spi_mac_tmpl, spi_decr_tmpl);
+
+ cmops = &(params.rp_u.mac_decrypt_params);
+
+ /* careful! structs assignments */
+ cmops->md_decr_mech = *decr_mech;
+ KCF_SET_PROVIDER_MECHNUM(decr_mech->cm_type, real_provider,
+ &cmops->md_decr_mech);
+ cmops->md_framework_decr_mechtype = decr_mech->cm_type;
+
+ cmops->md_mac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &cmops->md_mac_mech);
+ cmops->md_framework_mac_mechtype = mac_mech->cm_type;
+
+ error = kcf_submit_request(real_provider, NULL, crq, &params,
+ B_FALSE);
+ }
+
+out:
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (error);
+}
+
/*
* Starts a multi-part dual mac/decrypt operation. The provider to
* use is determined by the KCF dispatcher.
@@ -1429,6 +1855,140 @@ retry:
return (error);
}
+int
+crypto_mac_decrypt_init_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *mac_mech,
+ crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
+ crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
+ crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
+ crypto_call_req_t *cr)
+{
+ /*
+ * First try to find a provider for the decryption mechanism, that
+ * is also capable of the MAC mechanism.
+ * We still favor optimizing the costlier decryption.
+ */
+ int rv;
+ kcf_mech_entry_t *me;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ kcf_ctx_template_t *ctx_decr_tmpl, *ctx_mac_tmpl;
+ kcf_req_params_t params;
+ kcf_mac_decrypt_ops_params_t *mdops;
+ crypto_spi_ctx_template_t spi_decr_tmpl = NULL, spi_mac_tmpl = NULL;
+ crypto_ctx_t *ctx;
+ kcf_context_t *decr_kcf_context = NULL;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(decr_mech->cm_type,
+ mac_mech->cm_type, CRYPTO_OPS_OFFSET(dual_cipher_mac_ops),
+ CRYPTO_CIPHER_MAC_OFFSET(mac_decrypt_init),
+ CHECK_RESTRICT(cr), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /*
+ * For SW providers, check the validity of the context template
+ * It is very rare that the generation number mis-matches, so
+ * is acceptable to fail here, and let the consumer recover by
+ * freeing this tmpl and create a new one for the key and new SW
+ * provider
+ * Warning! will need to change when multiple software providers
+ * per mechanism are supported.
+ */
+
+ if (real_provider->pd_prov_type == CRYPTO_SW_PROVIDER) {
+ if (decr_tmpl != NULL) {
+ if (kcf_get_mech_entry(decr_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_decr_tmpl = (kcf_ctx_template_t *)decr_tmpl;
+ if (ctx_decr_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_decr_tmpl = ctx_decr_tmpl->ct_prov_tmpl;
+ }
+
+ if (mac_tmpl != NULL) {
+ if (kcf_get_mech_entry(mac_mech->cm_type, &me) !=
+ KCF_SUCCESS) {
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+ ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
+ if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
+ rv = CRYPTO_OLD_CTX_TEMPLATE;
+ goto out;
+ }
+ spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
+ }
+ }
+
+ ctx = kcf_new_ctx(cr, real_provider, sid);
+ if (ctx == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ goto out;
+ }
+ decr_kcf_context = (kcf_context_t *)ctx->cc_framework_private;
+
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(cr, pd)) {
+ crypto_mechanism_t ldecr_mech;
+ crypto_mechanism_t lmac_mech;
+
+ /* careful! structs assignments */
+ ldecr_mech = *decr_mech;
+ KCF_SET_PROVIDER_MECHNUM(decr_mech->cm_type, real_provider,
+ &ldecr_mech);
+
+ lmac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &lmac_mech);
+
+ rv = KCF_PROV_MAC_DECRYPT_INIT(real_provider, ctx, &lmac_mech,
+ mac_key, &ldecr_mech, decr_key, spi_mac_tmpl, spi_decr_tmpl,
+ KCF_SWFP_RHNDL(cr));
+
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_INIT,
+ sid, mac_key, decr_key, NULL, NULL, NULL,
+ spi_mac_tmpl, spi_decr_tmpl);
+
+ mdops = &(params.rp_u.mac_decrypt_params);
+
+ /* careful! structs assignments */
+ mdops->md_decr_mech = *decr_mech;
+ KCF_SET_PROVIDER_MECHNUM(decr_mech->cm_type, real_provider,
+ &mdops->md_decr_mech);
+ mdops->md_framework_decr_mechtype = decr_mech->cm_type;
+
+ mdops->md_mac_mech = *mac_mech;
+ KCF_SET_PROVIDER_MECHNUM(mac_mech->cm_type, real_provider,
+ &mdops->md_mac_mech);
+ mdops->md_framework_mac_mechtype = mac_mech->cm_type;
+
+ rv = kcf_submit_request(real_provider, ctx, cr, &params,
+ B_FALSE);
+ }
+
+ if (rv != CRYPTO_SUCCESS && rv != CRYPTO_QUEUED) {
+ KCF_CONTEXT_REFRELE(decr_kcf_context);
+ } else
+ *ctxp = (crypto_context_t)ctx;
+
+out:
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+ return (rv);
+}
/*
* Continues a multi-part dual mac/decrypt operation.
*/
@@ -1449,6 +2009,7 @@ crypto_mac_decrypt_update(crypto_context_t context,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
@@ -1511,7 +2072,7 @@ crypto_mac_decrypt_update(crypto_context_t context,
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
- pd->pd_sid, NULL, NULL, ct, NULL, pt, NULL, NULL);
+ ctx->cc_session, NULL, NULL, ct, NULL, pt, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
@@ -1540,6 +2101,7 @@ crypto_mac_decrypt_final(crypto_context_t context, crypto_data_t *mac,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
@@ -1589,7 +2151,7 @@ crypto_mac_decrypt_final(crypto_context_t context, crypto_data_t *mac,
KCF_PROV_INCRSTATS(pd, error);
} else {
KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
- pd->pd_sid, NULL, NULL, NULL, mac, pt, NULL, NULL);
+ ctx->cc_session, NULL, NULL, NULL, mac, pt, NULL, NULL);
error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
diff --git a/usr/src/uts/common/crypto/api/kcf_keys.c b/usr/src/uts/common/crypto/api/kcf_keys.c
new file mode 100644
index 0000000000..c95fd9f72d
--- /dev/null
+++ b/usr/src/uts/common/crypto/api/kcf_keys.c
@@ -0,0 +1,243 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_KEY_OFFSET(f) offsetof(crypto_key_ops_t, f)
+
+int
+crypto_key_generate(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_object_attribute_t *attrs, uint_t count,
+ crypto_object_id_t *handle, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_generate), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_KEY_GENERATE(real_provider, sid,
+ mech, attrs, count, handle, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_KEY_OPS_PARAMS(&params, KCF_OP_KEY_GENERATE, sid,
+ mech, attrs, count, handle, NULL, 0, NULL, NULL, NULL, 0);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_key_generate_pair(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_object_attribute_t *pub_attrs,
+ uint_t pub_count, crypto_object_attribute_t *pri_attrs, uint_t pri_count,
+ crypto_object_id_t *pub_handle, crypto_object_id_t *pri_handle,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_generate_pair), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_KEY_GENERATE_PAIR(real_provider, sid, mech,
+ pub_attrs, pub_count, pri_attrs, pri_count, pub_handle,
+ pri_handle, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_KEY_OPS_PARAMS(&params, KCF_OP_KEY_GENERATE_PAIR,
+ sid, mech, pub_attrs, pub_count, pub_handle, pri_attrs,
+ pri_count, pri_handle, NULL, NULL, 0);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_key_wrap(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *wrapping_key,
+ crypto_object_id_t *key, uchar_t *wrapped_key, size_t *wrapped_key_len,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_wrap), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_KEY_WRAP(real_provider, sid, mech, wrapping_key,
+ key, wrapped_key, wrapped_key_len, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_KEY_OPS_PARAMS(&params, KCF_OP_KEY_WRAP, sid, mech,
+ NULL, 0, key, NULL, 0, NULL, wrapping_key, wrapped_key,
+ wrapped_key_len);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_key_unwrap(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *unwrapping_key,
+ uchar_t *wrapped_key, size_t *wrapped_key_len,
+ crypto_object_attribute_t *attrs, uint_t count, crypto_object_id_t *key,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_unwrap), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_KEY_UNWRAP(real_provider, sid, mech,
+ unwrapping_key, wrapped_key, wrapped_key_len, attrs,
+ count, key, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_KEY_OPS_PARAMS(&params, KCF_OP_KEY_UNWRAP, sid, mech,
+ attrs, count, key, NULL, 0, NULL, unwrapping_key,
+ wrapped_key, wrapped_key_len);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_key_derive(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *base_key,
+ crypto_object_attribute_t *attrs, uint_t count,
+ crypto_object_id_t *new_key, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_derive), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_KEY_DERIVE(real_provider, sid, mech, base_key,
+ attrs, count, new_key, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_KEY_OPS_PARAMS(&params, KCF_OP_KEY_DERIVE, sid, mech,
+ attrs, count, new_key, NULL, 0, NULL, base_key, NULL, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
diff --git a/usr/src/uts/common/crypto/api/kcf_mac.c b/usr/src/uts/common/crypto/api/kcf_mac.c
index a0459bcc50..57b2a7595f 100644
--- a/usr/src/uts/common/crypto/api/kcf_mac.c
+++ b/usr/src/uts/common/crypto/api/kcf_mac.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -29,12 +29,16 @@
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/kmem.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_MAC_OFFSET(f) offsetof(crypto_mac_ops_t, f)
+
/*
* Message authentication codes routines.
*/
@@ -92,17 +96,34 @@
* See comment in the beginning of the file.
*/
int
-crypto_mac_prov(crypto_mechanism_t *mech, crypto_data_t *data,
- crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
- crypto_call_req_t *crq, kcf_provider_desc_t *pd, crypto_session_id_t sid)
+crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(mac_ops),
+ CRYPTO_MAC_OFFSET(mac_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
data, mac, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
/*
@@ -189,17 +210,34 @@ retry:
* The other arguments are the same as the function crypto_mac_prov().
*/
int
-crypto_mac_verify_prov(crypto_mechanism_t *mech, crypto_data_t *data,
- crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
- crypto_call_req_t *crq, kcf_provider_desc_t *pd, crypto_session_id_t sid)
+crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(mac_ops),
+ CRYPTO_MAC_OFFSET(mac_verify_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech,
key, data, mac, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
/*
@@ -318,43 +356,62 @@ retry:
* See comment in the beginning of the file.
*/
int
-crypto_mac_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl,
crypto_context_t *ctxp, crypto_call_req_t *crq)
{
- int error;
+ int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
ASSERT(KCF_PROV_REFHELD(pd));
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(mac_ops),
+ CRYPTO_MAC_OFFSET(mac_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
crypto_mechanism_t lmech;
lmech = *mech;
- KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
- error = KCF_PROV_MAC_INIT(pd, ctx, &lmech, key, tmpl,
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+ rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl,
KCF_SWFP_RHNDL(crq));
- KCF_PROV_INCRSTATS(pd, error);
+ KCF_PROV_INCRSTATS(pd, rv);
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech, key,
NULL, NULL, tmpl);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params,
+ B_FALSE);
}
- if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
- return (error);
+ return (rv);
}
/*
@@ -444,9 +501,8 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
kcf_req_params_t params;
-
+ int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
@@ -454,20 +510,21 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
- error = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
- KCF_PROV_INCRSTATS(pd, error);
+ rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
+ KCF_PROV_INCRSTATS(pd, rv);
} else {
- KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE, pd->pd_sid,
- NULL, NULL, data, NULL, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
+ ctx->cc_session, NULL, NULL, data, NULL, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
KCF_PROV_REFRELE(pd);
- return (error);
+ return (rv);
}
/*
@@ -495,8 +552,8 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
kcf_req_params_t params;
+ int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
@@ -504,22 +561,23 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
- error = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
- KCF_PROV_INCRSTATS(pd, error);
+ rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);
+ KCF_PROV_INCRSTATS(pd, rv);
} else {
- KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_FINAL, pd->pd_sid, NULL,
- NULL, NULL, mac, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
+ ctx->cc_session, NULL, NULL, NULL, mac, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
}
KCF_PROV_REFRELE(pd);
/* Release the hold done in kcf_new_ctx() during init step. */
- KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
- return (error);
+ KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
+ return (rv);
}
/*
diff --git a/usr/src/uts/common/crypto/api/kcf_miscapi.c b/usr/src/uts/common/crypto/api/kcf_miscapi.c
index e86c27fe1f..6242df6dc8 100644
--- a/usr/src/uts/common/crypto/api/kcf_miscapi.c
+++ b/usr/src/uts/common/crypto/api/kcf_miscapi.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -30,11 +30,20 @@
#include <sys/sunddi.h>
#include <sys/disp.h>
#include <sys/modctl.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/api.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
+#define isspace(ch) (((ch) == ' ') || ((ch) == '\r') || ((ch) == '\n') || \
+ ((ch) == '\t') || ((ch) == '\f'))
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_KEY_OFFSET(f) offsetof(crypto_key_ops_t, f)
+#define CRYPTO_PROVIDER_OFFSET(f) \
+ offsetof(crypto_provider_management_ops_t, f)
+
/* Miscellaneous exported entry points */
/*
@@ -571,6 +580,40 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
return (CRYPTO_SUCCESS);
}
+int
+crypto_key_check_prov(crypto_provider_t provider, crypto_mechanism_t *mech,
+ crypto_key_t *key)
+{
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ crypto_mechanism_t lmech;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if ((mech == NULL) || (key == NULL) ||
+ (key->ck_format == CRYPTO_KEY_REFERENCE))
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(key_ops),
+ CRYPTO_KEY_OFFSET(key_check), CHECK_RESTRICT_FALSE,
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ lmech = *mech;
+ KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);
+ rv = KCF_PROV_KEY_CHECK(real_provider, &lmech, key);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
/*
* Initialize the specified crypto_mechanism_info_t structure for
* the specified mechanism provider descriptor. Used by
@@ -662,3 +705,157 @@ bail:
*num_mech_infos = ninfos;
return (rv);
}
+
+/*
+ * memcmp_pad_max() is a specialized version of memcmp() which
+ * compares two pieces of data up to a maximum length. If the
+ * the two data match up the maximum length, they are considered
+ * matching. Trailing blanks do not cause the match to fail if
+ * one of the data is shorter.
+ *
+ * Examples of matches:
+ * "one" |
+ * "one " |
+ * ^maximum length
+ *
+ * "Number One | X" (X is beyond maximum length)
+ * "Number One " |
+ * ^maximum length
+ *
+ * Examples of mismatches:
+ * " one"
+ * "one"
+ *
+ * "Number One X|"
+ * "Number One |"
+ * ^maximum length
+ */
+static int
+memcmp_pad_max(void *d1, uint_t d1_len, void *d2, uint_t d2_len, uint_t max_sz)
+{
+ uint_t len, extra_len;
+ char *marker;
+
+ /* No point in comparing anything beyond max_sz */
+ if (d1_len > max_sz)
+ d1_len = max_sz;
+ if (d2_len > max_sz)
+ d2_len = max_sz;
+
+ /* Find shorter of the two data. */
+ if (d1_len <= d2_len) {
+ len = d1_len;
+ extra_len = d2_len;
+ marker = d2;
+ } else { /* d1_len > d2_len */
+ len = d2_len;
+ extra_len = d1_len;
+ marker = d1;
+ }
+
+ /* Have a match in the shortest length of data? */
+ if (memcmp(d1, d2, len) != 0)
+ /* CONSTCOND */
+ return (!0);
+
+ /* If the rest of longer data is nulls or blanks, call it a match. */
+ while (len < extra_len)
+ if (!isspace(marker[len++]))
+ /* CONSTCOND */
+ return (!0);
+ return (0);
+}
+
+/*
+ * Obtain ext info for specified provider and see if it matches.
+ */
+static boolean_t
+match_ext_info(kcf_provider_desc_t *pd, char *label, char *manuf, char *serial,
+ crypto_provider_ext_info_t *ext_info)
+{
+ kcf_provider_desc_t *real_provider;
+ int rv;
+ kcf_req_params_t params;
+
+ (void) kcf_get_hardware_provider_nomech(
+ CRYPTO_OPS_OFFSET(provider_ops), CRYPTO_PROVIDER_OFFSET(ext_info),
+ CHECK_RESTRICT_FALSE, pd, &real_provider);
+
+ if (real_provider != NULL) {
+ ASSERT(real_provider == pd ||
+ pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);
+ KCF_WRAP_PROVMGMT_OPS_PARAMS(&params, KCF_OP_MGMT_EXTINFO,
+ 0, NULL, 0, NULL, 0, NULL, ext_info, pd);
+ rv = kcf_submit_request(real_provider, NULL, NULL, &params,
+ B_FALSE);
+ ASSERT(rv != CRYPTO_NOT_SUPPORTED);
+ KCF_PROV_REFRELE(real_provider);
+ }
+
+ if (rv != CRYPTO_SUCCESS)
+ return (B_FALSE);
+
+ if (memcmp_pad_max(ext_info->ei_label, CRYPTO_EXT_SIZE_LABEL,
+ label, strlen(label), CRYPTO_EXT_SIZE_LABEL))
+ return (B_FALSE);
+
+ if (manuf != NULL) {
+ if (memcmp_pad_max(ext_info->ei_manufacturerID,
+ CRYPTO_EXT_SIZE_MANUF, manuf, strlen(manuf),
+ CRYPTO_EXT_SIZE_MANUF))
+ return (B_FALSE);
+ }
+
+ if (serial != NULL) {
+ if (memcmp_pad_max(ext_info->ei_serial_number,
+ CRYPTO_EXT_SIZE_SERIAL, label, strlen(label),
+ CRYPTO_EXT_SIZE_SERIAL))
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+/*
+ * Find a provider based on its label, manufacturer ID, and serial number.
+ */
+crypto_provider_t
+crypto_get_provider(char *label, char *manuf, char *serial)
+{
+ kcf_provider_desc_t **provider_array, *pd;
+ crypto_provider_ext_info_t *ext_info;
+ uint_t count;
+ int i;
+
+ /* manuf and serial are optional */
+ if (label == NULL)
+ return (NULL);
+
+ if (kcf_get_slot_list(&count, &provider_array, B_FALSE)
+ != CRYPTO_SUCCESS)
+ return (NULL);
+
+ if (count == 0)
+ return (NULL);
+
+ ext_info = kmem_zalloc(sizeof (crypto_provider_ext_info_t), KM_SLEEP);
+
+ for (i = 0; i < count; i++) {
+ pd = provider_array[i];
+ if (match_ext_info(pd, label, manuf, serial, ext_info)) {
+ KCF_PROV_REFHOLD(pd);
+ break;
+ }
+ }
+ if (i == count)
+ pd = NULL;
+
+ kcf_free_provider_tab(count, provider_array);
+ kmem_free(ext_info, sizeof (crypto_provider_ext_info_t));
+ return (pd);
+}
+
+void
+crypto_release_provider(crypto_provider_t provider)
+{
+ KCF_PROV_REFRELE((kcf_provider_desc_t *)provider);
+}
diff --git a/usr/src/uts/common/crypto/api/kcf_object.c b/usr/src/uts/common/crypto/api/kcf_object.c
new file mode 100644
index 0000000000..148ed9a817
--- /dev/null
+++ b/usr/src/uts/common/crypto/api/kcf_object.c
@@ -0,0 +1,383 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_OBJECT_OFFSET(f) offsetof(crypto_object_ops_t, f)
+
+int
+crypto_object_create(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_object_attribute_t *attrs, uint_t count,
+ crypto_object_id_t *object_handle, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_create),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_CREATE(real_provider, sid,
+ attrs, count, object_handle, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_CREATE,
+ sid, 0, attrs, count, object_handle, 0,
+ NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_destroy(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_object_id_t object_handle, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_destroy),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_DESTROY(real_provider, sid,
+ object_handle, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_DESTROY,
+ sid, object_handle, NULL, 0, NULL, 0,
+ NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_copy(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_object_id_t object_handle, crypto_object_attribute_t *attrs,
+ uint_t count, crypto_object_id_t *new_handle, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_copy),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_COPY(real_provider, sid,
+ object_handle, attrs, count, new_handle,
+ KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_COPY,
+ sid, object_handle, attrs, count,
+ new_handle, 0, NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_get_attribute_value(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_object_id_t object_handle,
+ crypto_object_attribute_t *attrs, uint_t count, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops),
+ CRYPTO_OBJECT_OFFSET(object_get_attribute_value),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(real_provider,
+ sid, object_handle, attrs, count, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params,
+ KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE, sid, object_handle,
+ attrs, count, NULL, 0, NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_set_attribute_value(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_object_id_t object_handle,
+ crypto_object_attribute_t *attrs, uint_t count, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops),
+ CRYPTO_OBJECT_OFFSET(object_set_attribute_value),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(real_provider,
+ sid, object_handle, attrs, count, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params,
+ KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE, sid, object_handle,
+ attrs, count, NULL, 0, NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_get_size(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_object_id_t object_handle, size_t *size, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_get_size),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_GET_SIZE(real_provider,
+ sid, object_handle, size, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_GET_SIZE, sid,
+ object_handle, NULL, 0, NULL, size, NULL, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_find_init(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_object_attribute_t *attrs, uint_t count, void **cookie,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (cookie == NULL) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_find_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_FIND_INIT(real_provider,
+ sid, attrs, count, cookie, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_FIND_INIT,
+ sid, 0, attrs, count, NULL, 0, cookie, NULL, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_find_final(crypto_provider_t provider, void *cookie,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_find_final),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_FIND_FINAL(real_provider,
+ cookie, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_FIND_FINAL,
+ 0, 0, NULL, 0, NULL, 0, NULL, cookie, 0, NULL);
+ rv = kcf_submit_request(real_provider, NULL, NULL, &params,
+ B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_object_find(crypto_provider_t provider, void *cookie,
+ crypto_object_id_t *handles, uint_t *count, uint_t max_count,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ object_ops), CRYPTO_OBJECT_OFFSET(object_find),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_OBJECT_FIND(real_provider, cookie, handles,
+ max_count, count, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_FIND, 0,
+ 0, NULL, 0, handles, 0, NULL, cookie, max_count, count);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
diff --git a/usr/src/uts/common/crypto/api/kcf_session.c b/usr/src/uts/common/crypto/api/kcf_session.c
new file mode 100644
index 0000000000..b887829ea3
--- /dev/null
+++ b/usr/src/uts/common/crypto/api/kcf_session.c
@@ -0,0 +1,197 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
+#include <sys/crypto/common.h>
+#include <sys/crypto/impl.h>
+#include <sys/crypto/api.h>
+#include <sys/crypto/spi.h>
+#include <sys/crypto/sched_impl.h>
+
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_OBJECT_OFFSET(f) offsetof(crypto_object_ops_t, f)
+#define CRYPTO_SESSION_OFFSET(f) offsetof(crypto_session_ops_t, f)
+
+int
+crypto_session_open(crypto_provider_t provider, crypto_session_id_t *sidp,
+crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *pd = provider;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ /* find a provider that supports session ops */
+ (void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
+ CRYPTO_SESSION_OFFSET(session_open),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (real_provider != NULL) {
+ int rv;
+
+ ASSERT(real_provider == pd ||
+ pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);
+
+ if (CHECK_FASTPATH(crq, pd)) {
+ rv = KCF_PROV_SESSION_OPEN(real_provider, sidp,
+ KCF_SWFP_RHNDL(crq), pd);
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_SESSION_OPS_PARAMS(&params,
+ KCF_OP_SESSION_OPEN, sidp, 0, CRYPTO_USER, NULL,
+ 0, pd);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ KCF_PROV_REFRELE(real_provider);
+
+ if (rv != CRYPTO_SUCCESS) {
+ return (rv);
+ }
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+int
+crypto_session_close(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_call_req_t *crq)
+{
+ int rv;
+ kcf_req_params_t params;
+ kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *pd = provider;
+
+ if (pd == NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ /* find a provider that supports session ops */
+ (void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
+ CRYPTO_SESSION_OFFSET(session_close), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ ASSERT(real_provider == pd ||
+ pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);
+
+ /* edge case is where the logical provider has no members */
+ if (real_provider != NULL) {
+ /* The fast path for SW providers. */
+ if (CHECK_FASTPATH(crq, pd)) {
+ rv = KCF_PROV_SESSION_CLOSE(real_provider,
+ sid, KCF_SWFP_RHNDL(crq), pd);
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_SESSION_OPS_PARAMS(&params,
+ KCF_OP_SESSION_CLOSE, NULL, sid,
+ CRYPTO_USER, NULL, 0, pd);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ KCF_PROV_REFRELE(real_provider);
+ }
+ return (CRYPTO_SUCCESS);
+}
+
+int
+crypto_session_login(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_user_type_t type, char *pin, ulong_t len, crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ session_ops), CRYPTO_SESSION_OFFSET(session_login),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_SESSION_LOGIN(real_provider, sid,
+ type, pin, len, KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_SESSION_OPS_PARAMS(&params, KCF_OP_SESSION_LOGIN,
+ NULL, sid, type, pin, len, real_provider);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
+
+int
+crypto_session_logout(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_call_req_t *crq)
+{
+ kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
+ session_ops), CRYPTO_SESSION_OFFSET(session_logout),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ if (CHECK_FASTPATH(crq, real_provider)) {
+ rv = KCF_PROV_SESSION_LOGOUT(real_provider, sid,
+ KCF_SWFP_RHNDL(crq));
+ KCF_PROV_INCRSTATS(pd, rv);
+ } else {
+ KCF_WRAP_SESSION_OPS_PARAMS(&params, KCF_OP_SESSION_LOGOUT,
+ NULL, sid, 0, NULL, 0, real_provider);
+ rv = kcf_submit_request(real_provider, NULL, crq,
+ &params, B_FALSE);
+ }
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
+
+ return (rv);
+}
diff --git a/usr/src/uts/common/crypto/api/kcf_sign.c b/usr/src/uts/common/crypto/api/kcf_sign.c
index b4a052563f..c098f0ca10 100644
--- a/usr/src/uts/common/crypto/api/kcf_sign.c
+++ b/usr/src/uts/common/crypto/api/kcf_sign.c
@@ -29,12 +29,16 @@
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/kmem.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_SIGN_OFFSET(f) offsetof(crypto_sign_ops_t, f)
+
/*
* Sign entry points.
*/
@@ -43,30 +47,49 @@
* See comments for crypto_digest_init_prov().
*/
int
-crypto_sign_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_sign_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl,
crypto_context_t *ctxp, crypto_call_req_t *crq)
{
- int error;
+ int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(sign_ops),
+ CRYPTO_SIGN_OFFSET(sign_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech,
key, NULL, NULL, tmpl);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
- if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
- return (error);
+ return (rv);
}
int
@@ -161,8 +184,8 @@ crypto_sign_update(crypto_context_t context, crypto_data_t *data,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
kcf_req_params_t params;
+ int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
@@ -170,13 +193,14 @@ crypto_sign_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
- KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_UPDATE, 0, NULL,
+ KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_UPDATE, ctx->cc_session, NULL,
NULL, data, NULL, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
KCF_PROV_REFRELE(pd);
- return (error);
+ return (rv);
}
/*
@@ -189,7 +213,7 @@ crypto_sign_final(crypto_context_t context, crypto_data_t *signature,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
+ int rv;
kcf_req_params_t params;
if ((ctx == NULL) ||
@@ -198,30 +222,47 @@ crypto_sign_final(crypto_context_t context, crypto_data_t *signature,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
- KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_FINAL, 0, NULL,
+ KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_FINAL, ctx->cc_session, NULL,
NULL, NULL, signature, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
KCF_PROV_REFRELE(pd);
/* Release the hold done in kcf_new_ctx() during init step. */
- KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
- return (error);
+ KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
+ return (rv);
}
int
-crypto_sign_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_sign_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data,
crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(sign_ops),
+ CRYPTO_SIGN_OFFSET(sign_atomic), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech,
key, data, signature, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
static int
@@ -315,18 +356,34 @@ crypto_sign(crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data,
}
int
-crypto_sign_recover_prov(kcf_provider_desc_t *pd,
- crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
- crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
+crypto_sign_recover_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data,
+ crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(sign_ops),
+ CRYPTO_SIGN_OFFSET(sign_recover_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_SIGN_RECOVER_ATOMIC, sid, mech,
key, data, signature, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
int
@@ -339,30 +396,49 @@ crypto_sign_recover(crypto_mechanism_t *mech, crypto_key_t *key,
}
int
-crypto_sign_recover_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
- crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl,
- crypto_context_t *ctxp, crypto_call_req_t *crq)
+crypto_sign_recover_init_prov(crypto_provider_t provider,
+ crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
+ crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
- int error;
+ int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(sign_ops),
+ CRYPTO_SIGN_OFFSET(sign_recover_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_SIGN_RECOVER_INIT, sid, mech,
key, NULL, NULL, tmpl);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
- if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
- return (error);
+ return (rv);
}
int
diff --git a/usr/src/uts/common/crypto/api/kcf_verify.c b/usr/src/uts/common/crypto/api/kcf_verify.c
index 0ca0f376e0..dee7197df2 100644
--- a/usr/src/uts/common/crypto/api/kcf_verify.c
+++ b/usr/src/uts/common/crypto/api/kcf_verify.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -29,12 +29,16 @@
#include <sys/errno.h>
#include <sys/types.h>
#include <sys/kmem.h>
+#include <sys/sysmacros.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/api.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/sched_impl.h>
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
+#define CRYPTO_VERIFY_OFFSET(f) offsetof(crypto_verify_ops_t, f)
+
/*
* Verify entry points.
*/
@@ -43,30 +47,49 @@
* See comments for crypto_digest_init_prov().
*/
int
-crypto_verify_init_prov(kcf_provider_desc_t *pd, crypto_session_id_t sid,
+crypto_verify_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl,
crypto_context_t *ctxp, crypto_call_req_t *crq)
{
- int error;
+ int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(verify_ops),
+ CRYPTO_VERIFY_OFFSET(verify_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_INIT, sid, mech,
key, NULL, NULL, tmpl);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
- if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
- return (error);
+ return (rv);
}
@@ -163,8 +186,8 @@ crypto_verify_update(crypto_context_t context, crypto_data_t *data,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
kcf_req_params_t params;
+ int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
@@ -172,13 +195,14 @@ crypto_verify_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
- KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_UPDATE, 0, NULL,
- NULL, data, NULL, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_UPDATE, ctx->cc_session,
+ NULL, NULL, data, NULL, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
KCF_PROV_REFRELE(pd);
- return (error);
+ return (rv);
}
/*
@@ -191,8 +215,8 @@ crypto_verify_final(crypto_context_t context, crypto_data_t *signature,
crypto_ctx_t *ctx = (crypto_ctx_t *)context;
kcf_context_t *kcf_ctx;
kcf_provider_desc_t *pd;
- int error;
kcf_req_params_t params;
+ int rv;
if ((ctx == NULL) ||
((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
@@ -200,30 +224,47 @@ crypto_verify_final(crypto_context_t context, crypto_data_t *signature,
return (CRYPTO_INVALID_CONTEXT);
}
+ ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
KCF_PROV_REFHOLD(pd);
- KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_FINAL, 0, NULL,
- NULL, NULL, signature, NULL);
- error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
+ KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_FINAL, ctx->cc_session,
+ NULL, NULL, NULL, signature, NULL);
+ rv = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
KCF_PROV_REFRELE(pd);
/* Release the hold done in kcf_new_ctx() during init step. */
- KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
- return (error);
+ KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx);
+ return (rv);
}
int
-crypto_verify_prov(kcf_provider_desc_t *pd,
- crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
- crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
+crypto_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data,
+ crypto_ctx_template_t tmpl, crypto_data_t *signature,
crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(verify_ops),
+ CRYPTO_VERIFY_OFFSET(verify_atomic), CHECK_RESTRICT(crq),
+ pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech,
key, data, signature, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
static int
@@ -318,18 +359,33 @@ crypto_verify(crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data,
}
int
-crypto_verify_recover_prov(kcf_provider_desc_t *pd,
- crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
- crypto_data_t *signature, crypto_ctx_template_t tmpl, crypto_data_t *data,
- crypto_call_req_t *crq)
+crypto_verify_recover_prov(crypto_provider_t provider, crypto_session_id_t sid,
+ crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *signature,
+ crypto_ctx_template_t tmpl, crypto_data_t *data, crypto_call_req_t *crq)
{
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+ int rv;
ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(verify_ops),
+ CRYPTO_VERIFY_OFFSET(verify_recover_atomic),
+ CHECK_RESTRICT(crq), pd, &real_provider);
+
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_VERIFY_RECOVER_ATOMIC,
sid, mech, key, data, signature, tmpl);
+ rv = kcf_submit_request(real_provider, NULL, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- return (kcf_submit_request(pd, NULL, crq, &params, B_FALSE));
+ return (rv);
}
int
@@ -342,30 +398,49 @@ crypto_verify_recover(crypto_mechanism_t *mech, crypto_key_t *key,
}
int
-crypto_verify_recover_init_prov(kcf_provider_desc_t *pd,
+crypto_verify_recover_init_prov(crypto_provider_t provider,
crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
- int error;
+ int rv;
crypto_ctx_t *ctx;
kcf_req_params_t params;
+ kcf_provider_desc_t *pd = provider;
+ kcf_provider_desc_t *real_provider = pd;
+
+ ASSERT(KCF_PROV_REFHELD(pd));
+
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
+ rv = kcf_get_hardware_provider(mech->cm_type,
+ CRYPTO_MECH_INVALID, CRYPTO_OPS_OFFSET(verify_ops),
+ CRYPTO_VERIFY_OFFSET(verify_recover_init),
+ CHECK_RESTRICT(crq), pd, &real_provider);
- /* First, allocate and initialize the canonical context */
- if ((ctx = kcf_new_ctx(crq, pd, sid)) == NULL)
+ if (rv != CRYPTO_SUCCESS)
+ return (rv);
+ }
+
+ /* Allocate and initialize the canonical context */
+ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
return (CRYPTO_HOST_MEMORY);
+ }
KCF_WRAP_VERIFY_OPS_PARAMS(&params, KCF_OP_VERIFY_RECOVER_INIT,
sid, mech, key, NULL, NULL, tmpl);
+ rv = kcf_submit_request(real_provider, ctx, crq, &params, B_FALSE);
+ if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
+ KCF_PROV_REFRELE(real_provider);
- error = kcf_submit_request(pd, ctx, crq, &params, B_FALSE);
- if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
+ if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
/* Release the hold done in kcf_new_ctx(). */
KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
}
- return (error);
+ return (rv);
}
int
diff --git a/usr/src/uts/common/crypto/core/kcf.c b/usr/src/uts/common/crypto/core/kcf.c
index 35e8db56d4..9b6cc04a9a 100644
--- a/usr/src/uts/common/crypto/core/kcf.c
+++ b/usr/src/uts/common/crypto/core/kcf.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -192,10 +192,10 @@ kcf_verify_signature(kcf_provider_desc_t *pd)
* the module if it carries a signature. Any operation set which has
* a encryption/decryption component is a candidate for verification.
*/
- if (prov_ops->cipher_ops == NULL && prov_ops->dual_ops == NULL &&
- prov_ops->dual_cipher_mac_ops == NULL &&
- prov_ops->key_ops == NULL && prov_ops->sign_ops == NULL &&
- prov_ops->verify_ops == NULL && mp->sigdata == NULL) {
+ if (prov_ops->co_cipher_ops == NULL && prov_ops->co_dual_ops == NULL &&
+ prov_ops->co_dual_cipher_mac_ops == NULL &&
+ prov_ops->co_key_ops == NULL && prov_ops->co_sign_ops == NULL &&
+ prov_ops->co_verify_ops == NULL && mp->sigdata == NULL) {
return (0);
}
diff --git a/usr/src/uts/common/crypto/core/kcf_callprov.c b/usr/src/uts/common/crypto/core/kcf_callprov.c
index e87c8ae715..50c84e5f7a 100644
--- a/usr/src/uts/common/crypto/core/kcf_callprov.c
+++ b/usr/src/uts/common/crypto/core/kcf_callprov.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -127,8 +127,10 @@ is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me)
* Returns NULL if no provider can be found.
*/
int
-kcf_get_hardware_provider(crypto_mech_type_t mech_type, offset_t offset_1,
-offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
+kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
+ crypto_mech_type_t mech_type_2, offset_t offset_1, offset_t offset_2,
+ boolean_t call_restrict, kcf_provider_desc_t *old,
+ kcf_provider_desc_t **new)
{
kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
kcf_provider_list_t *p;
@@ -139,13 +141,13 @@ offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
/* get the mech entry for the specified mechanism */
- class = KCF_MECH2CLASS(mech_type);
+ class = KCF_MECH2CLASS(mech_type_1);
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
return (CRYPTO_MECHANISM_INVALID);
}
me_tab = &kcf_mech_tabs_tab[class];
- index = KCF_MECH2INDEX(mech_type);
+ index = KCF_MECH2INDEX(mech_type_1);
if ((index < 0) || (index >= me_tab->met_size)) {
return (CRYPTO_MECHANISM_INVALID);
}
@@ -180,7 +182,8 @@ offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
ASSERT(provider->pd_prov_type !=
CRYPTO_LOGICAL_PROVIDER);
- if (!KCF_IS_PROV_USABLE(provider)) {
+ if (!KCF_IS_PROV_USABLE(provider) ||
+ (call_restrict && provider->pd_restricted)) {
p = p->pl_next;
continue;
}
@@ -190,6 +193,28 @@ offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
continue;
}
+ /* provider does second mech */
+ if (mech_type_2 != CRYPTO_MECH_INVALID) {
+ crypto_mech_type_t mech_type;
+ int i;
+
+ /* convert from kef to provider's number */
+ mech_type = provider->pd_map_mechnums
+ [KCF_MECH2CLASS(mech_type_2)]
+ [KCF_MECH2INDEX(mech_type_2)];
+
+ for (i = 0; i < provider->pd_mech_list_count;
+ i++) {
+ if (provider->pd_mechanisms[i]
+ .cm_mech_number == mech_type)
+ break;
+ }
+ if (i == provider->pd_mech_list_count) {
+ p = p->pl_next;
+ continue;
+ }
+ }
+
if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
offset_2, ops)) {
p = p->pl_next;
@@ -204,18 +229,20 @@ offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
p = p->pl_next;
}
- mutex_exit(&old->pd_lock);
- if (gpd != NULL)
+ if (gpd != NULL) {
real_pd = gpd;
- else {
+ KCF_PROV_REFHOLD(real_pd);
+ } else {
/* can't find provider */
real_pd = NULL;
rv = CRYPTO_MECHANISM_INVALID;
}
+ mutex_exit(&old->pd_lock);
} else {
- if (!KCF_IS_PROV_USABLE(old)) {
+ if (!KCF_IS_PROV_USABLE(old) ||
+ (call_restrict && old->pd_restricted)) {
real_pd = NULL;
rv = CRYPTO_DEVICE_ERROR;
goto out;
@@ -230,7 +257,9 @@ offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
real_pd = NULL;
rv = CRYPTO_NOT_SUPPORTED;
+ goto out;
}
+ KCF_PROV_REFHOLD(real_pd);
}
out:
mutex_exit(&me->me_mutex);
@@ -249,7 +278,8 @@ out:
*/
int
kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
- kcf_provider_desc_t *old, kcf_provider_desc_t **new)
+ boolean_t call_restrict, kcf_provider_desc_t *old,
+ kcf_provider_desc_t **new)
{
kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
kcf_provider_list_t *p;
@@ -283,7 +313,8 @@ kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
ASSERT(provider->pd_prov_type !=
CRYPTO_LOGICAL_PROVIDER);
- if (!KCF_IS_PROV_USABLE(provider)) {
+ if (!KCF_IS_PROV_USABLE(provider) ||
+ (call_restrict && provider->pd_restricted)) {
p = p->pl_next;
continue;
}
@@ -304,16 +335,18 @@ kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
}
mutex_exit(&old->pd_lock);
- if (gpd != NULL)
+ if (gpd != NULL) {
real_pd = gpd;
- else {
+ KCF_PROV_REFHOLD(real_pd);
+ } else {
/* can't find provider */
real_pd = NULL;
rv = CRYPTO_DEVICE_ERROR;
}
} else {
- if (!KCF_IS_PROV_USABLE(old)) {
+ if (!KCF_IS_PROV_USABLE(old) ||
+ (call_restrict && old->pd_restricted)) {
real_pd = NULL;
rv = CRYPTO_DEVICE_ERROR;
goto out;
@@ -322,7 +355,9 @@ kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
real_pd = NULL;
rv = CRYPTO_NOT_SUPPORTED;
+ goto out;
}
+ KCF_PROV_REFHOLD(real_pd);
}
out:
*new = real_pd;
diff --git a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
index 0cb9bc7d02..5e47df7541 100644
--- a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
+++ b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
@@ -55,7 +55,6 @@ kmutex_t soft_config_mutex;
*/
kcf_soft_conf_entry_t *soft_config_list;
-static boolean_t in_soft_config_list(char *);
static int add_soft_config(char *, uint_t, crypto_mech_name_t *);
static int dup_mech_names(kcf_provider_desc_t *, crypto_mech_name_t **,
uint_t *, int);
@@ -928,7 +927,7 @@ get_sw_provider_for_mech(crypto_mech_name_t mech, char **name)
* This routine searches the soft_config_list for the specified
* software provider, returning B_TRUE if it is in the list.
*/
-static boolean_t
+boolean_t
in_soft_config_list(char *provider_name)
{
kcf_soft_conf_entry_t *p;
diff --git a/usr/src/uts/common/crypto/core/kcf_prov_tabs.c b/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
index aae772007f..fd2aef48de 100644
--- a/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
+++ b/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
@@ -203,6 +203,83 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
return (prov_desc);
}
+static void
+allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count)
+{
+ if (src->co_control_ops != NULL)
+ dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t),
+ KM_SLEEP);
+
+ if (src->co_digest_ops != NULL)
+ dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
+ KM_SLEEP);
+
+ if (src->co_cipher_ops != NULL)
+ dst->co_cipher_ops = kmem_alloc(sizeof (crypto_cipher_ops_t),
+ KM_SLEEP);
+
+ if (src->co_mac_ops != NULL)
+ dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
+ KM_SLEEP);
+
+ if (src->co_sign_ops != NULL)
+ dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
+ KM_SLEEP);
+
+ if (src->co_verify_ops != NULL)
+ dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
+ KM_SLEEP);
+
+ if (src->co_dual_ops != NULL)
+ dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
+ KM_SLEEP);
+
+ if (src->co_dual_cipher_mac_ops != NULL)
+ dst->co_dual_cipher_mac_ops = kmem_alloc(
+ sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
+
+ if (src->co_random_ops != NULL) {
+ dst->co_random_ops = kmem_alloc(
+ sizeof (crypto_random_number_ops_t), KM_SLEEP);
+
+ /*
+ * Allocate storage to store the array of supported mechanisms
+ * specified by provider. We allocate extra mechanism storage
+ * if the provider has random_ops since we keep an internal
+ * mechanism, SUN_RANDOM, in this case.
+ */
+ (*mech_list_count)++;
+ }
+
+ if (src->co_session_ops != NULL)
+ dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
+ KM_SLEEP);
+
+ if (src->co_object_ops != NULL)
+ dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
+ KM_SLEEP);
+
+ if (src->co_key_ops != NULL)
+ dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
+ KM_SLEEP);
+
+ if (src->co_provider_ops != NULL)
+ dst->co_provider_ops = kmem_alloc(
+ sizeof (crypto_provider_management_ops_t), KM_SLEEP);
+
+ if (src->co_ctx_ops != NULL)
+ dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
+ KM_SLEEP);
+}
+
+static void
+allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst)
+{
+ if (src->co_mech_ops != NULL)
+ dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
+ KM_SLEEP);
+}
+
/*
* Allocate a provider descriptor. mech_list_count specifies the
* number of mechanisms supported by the providers, and is used
@@ -246,72 +323,9 @@ kcf_alloc_provider_desc(crypto_provider_info_t *info)
desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
- if (src_ops->control_ops != NULL)
- desc->pd_ops_vector->control_ops = kmem_alloc(
- sizeof (crypto_control_ops_t), KM_SLEEP);
-
- if (src_ops->digest_ops != NULL)
- desc->pd_ops_vector->digest_ops = kmem_alloc(
- sizeof (crypto_digest_ops_t), KM_SLEEP);
-
- if (src_ops->cipher_ops != NULL)
- desc->pd_ops_vector->cipher_ops = kmem_alloc(
- sizeof (crypto_cipher_ops_t), KM_SLEEP);
-
- if (src_ops->mac_ops != NULL)
- desc->pd_ops_vector->mac_ops = kmem_alloc(
- sizeof (crypto_mac_ops_t), KM_SLEEP);
-
- if (src_ops->sign_ops != NULL)
- desc->pd_ops_vector->sign_ops = kmem_alloc(
- sizeof (crypto_sign_ops_t), KM_SLEEP);
-
- if (src_ops->verify_ops != NULL)
- desc->pd_ops_vector->verify_ops = kmem_alloc(
- sizeof (crypto_verify_ops_t), KM_SLEEP);
-
- if (src_ops->dual_ops != NULL)
- desc->pd_ops_vector->dual_ops = kmem_alloc(
- sizeof (crypto_dual_ops_t), KM_SLEEP);
-
- if (src_ops->dual_cipher_mac_ops != NULL)
- desc->pd_ops_vector->dual_cipher_mac_ops = kmem_alloc(
- sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
-
- if (src_ops->random_ops != NULL)
- desc->pd_ops_vector->random_ops = kmem_alloc(
- sizeof (crypto_random_number_ops_t), KM_SLEEP);
-
- if (src_ops->session_ops != NULL)
- desc->pd_ops_vector->session_ops = kmem_alloc(
- sizeof (crypto_session_ops_t), KM_SLEEP);
-
- if (src_ops->object_ops != NULL)
- desc->pd_ops_vector->object_ops = kmem_alloc(
- sizeof (crypto_object_ops_t), KM_SLEEP);
-
- if (src_ops->key_ops != NULL)
- desc->pd_ops_vector->key_ops = kmem_alloc(
- sizeof (crypto_key_ops_t), KM_SLEEP);
-
- if (src_ops->provider_ops != NULL)
- desc->pd_ops_vector->provider_ops = kmem_alloc(
- sizeof (crypto_provider_management_ops_t),
- KM_SLEEP);
-
- if (src_ops->ctx_ops != NULL)
- desc->pd_ops_vector->ctx_ops = kmem_alloc(
- sizeof (crypto_ctx_ops_t), KM_SLEEP);
-
- /*
- * Allocate storage to store the array of supported
- * mechanisms specified by provider. We allocate an extra
- * crypto_mech_info_t element if the provider has random_ops
- * since we keep an internal mechanism, SUN_RANDOM,
- * in this case.
- */
- if (src_ops->random_ops != NULL)
- mech_list_count++;
+ allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count);
+ if (info->pi_interface_version == CRYPTO_SPI_VERSION_2)
+ allocate_ops_v2(src_ops, desc->pd_ops_vector);
}
desc->pd_mech_list_count = mech_list_count;
@@ -382,62 +396,66 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
if (desc->pd_ops_vector != NULL) {
- if (desc->pd_ops_vector->control_ops != NULL)
- kmem_free(desc->pd_ops_vector->control_ops,
+ if (desc->pd_ops_vector->co_control_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_control_ops,
sizeof (crypto_control_ops_t));
- if (desc->pd_ops_vector->digest_ops != NULL)
- kmem_free(desc->pd_ops_vector->digest_ops,
+ if (desc->pd_ops_vector->co_digest_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_digest_ops,
sizeof (crypto_digest_ops_t));
- if (desc->pd_ops_vector->cipher_ops != NULL)
- kmem_free(desc->pd_ops_vector->cipher_ops,
+ if (desc->pd_ops_vector->co_cipher_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_cipher_ops,
sizeof (crypto_cipher_ops_t));
- if (desc->pd_ops_vector->mac_ops != NULL)
- kmem_free(desc->pd_ops_vector->mac_ops,
+ if (desc->pd_ops_vector->co_mac_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
- if (desc->pd_ops_vector->sign_ops != NULL)
- kmem_free(desc->pd_ops_vector->sign_ops,
+ if (desc->pd_ops_vector->co_sign_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_sign_ops,
sizeof (crypto_sign_ops_t));
- if (desc->pd_ops_vector->verify_ops != NULL)
- kmem_free(desc->pd_ops_vector->verify_ops,
+ if (desc->pd_ops_vector->co_verify_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_verify_ops,
sizeof (crypto_verify_ops_t));
- if (desc->pd_ops_vector->dual_ops != NULL)
- kmem_free(desc->pd_ops_vector->dual_ops,
+ if (desc->pd_ops_vector->co_dual_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_dual_ops,
sizeof (crypto_dual_ops_t));
- if (desc->pd_ops_vector->dual_cipher_mac_ops != NULL)
- kmem_free(desc->pd_ops_vector->dual_cipher_mac_ops,
+ if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
sizeof (crypto_dual_cipher_mac_ops_t));
- if (desc->pd_ops_vector->random_ops != NULL)
- kmem_free(desc->pd_ops_vector->random_ops,
+ if (desc->pd_ops_vector->co_random_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_random_ops,
sizeof (crypto_random_number_ops_t));
- if (desc->pd_ops_vector->session_ops != NULL)
- kmem_free(desc->pd_ops_vector->session_ops,
+ if (desc->pd_ops_vector->co_session_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_session_ops,
sizeof (crypto_session_ops_t));
- if (desc->pd_ops_vector->object_ops != NULL)
- kmem_free(desc->pd_ops_vector->object_ops,
+ if (desc->pd_ops_vector->co_object_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_object_ops,
sizeof (crypto_object_ops_t));
- if (desc->pd_ops_vector->key_ops != NULL)
- kmem_free(desc->pd_ops_vector->key_ops,
+ if (desc->pd_ops_vector->co_key_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_key_ops,
sizeof (crypto_key_ops_t));
- if (desc->pd_ops_vector->provider_ops != NULL)
- kmem_free(desc->pd_ops_vector->provider_ops,
+ if (desc->pd_ops_vector->co_provider_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_provider_ops,
sizeof (crypto_provider_management_ops_t));
- if (desc->pd_ops_vector->ctx_ops != NULL)
- kmem_free(desc->pd_ops_vector->ctx_ops,
+ if (desc->pd_ops_vector->co_ctx_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
+ if (desc->pd_ops_vector->co_mech_ops != NULL)
+ kmem_free(desc->pd_ops_vector->co_mech_ops,
+ sizeof (crypto_mech_ops_t));
+
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
@@ -545,7 +563,8 @@ kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
mutex_enter(&prov_tab_mutex);
for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
- (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER ||
+ ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
@@ -569,7 +588,8 @@ again:
/* fill the slot list */
for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
- (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER ||
+ ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
+ (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
diff --git a/usr/src/uts/common/crypto/io/aes.c b/usr/src/uts/common/crypto/io/aes.c
index d6942590b2..36f7e16b57 100644
--- a/usr/src/uts/common/crypto/io/aes.c
+++ b/usr/src/uts/common/crypto/io/aes.c
@@ -64,7 +64,8 @@ static struct modlinkage modlinkage = {
typedef enum aes_mech_type {
AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */
AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */
- AES_CBC_PAD_MECH_INFO_TYPE /* SUN_CKM_AES_CBC_PAD */
+ AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */
+ AES_CTR_MECH_INFO_TYPE /* SUN_CKM_AES_CTR */
} aes_mech_type_t;
/*
@@ -91,12 +92,18 @@ static crypto_mech_info_t aes_mech_info_tab[] = {
{SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
+ AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES_CTR */
+ {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
};
#define AES_VALID_MECH(mech) \
(((mech)->cm_type == AES_ECB_MECH_INFO_TYPE || \
- (mech)->cm_type == AES_CBC_MECH_INFO_TYPE) ? 1 : 0)
+ (mech)->cm_type == AES_CBC_MECH_INFO_TYPE || \
+ (mech)->cm_type == AES_CTR_MECH_INFO_TYPE) ? 1 : 0)
/* operations are in-place if the output buffer is NULL */
#define AES_ARG_INPLACE(input, output) \
@@ -317,9 +324,16 @@ aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
if (!AES_VALID_MECH(mechanism))
return (CRYPTO_MECHANISM_INVALID);
- if (mechanism->cm_param != NULL &&
- mechanism->cm_param_len != AES_BLOCK_LEN)
- return (CRYPTO_MECHANISM_PARAM_INVALID);
+ if (mechanism->cm_param != NULL) {
+ if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (mechanism->cm_param_len !=
+ sizeof (CK_AES_CTR_PARAMS))
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ } else {
+ if (mechanism->cm_param_len != AES_BLOCK_LEN)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ }
/*
* Allocate an AES context.
@@ -534,17 +548,17 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
aes_ctx_t *aes_ctx;
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
/*
- * Plaintext must be a multiple of AES block size.
- * This test only works for non-padded mechanisms
- * when blocksize is 2^N.
+ * For block ciphers, plaintext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
*/
- if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) &&
+ (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_DATA_LEN_RANGE);
- ASSERT(ctx->cc_provider_private != NULL);
- aes_ctx = ctx->cc_provider_private;
-
AES_ARG_INPLACE(plaintext, ciphertext);
/*
@@ -580,17 +594,17 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
aes_ctx_t *aes_ctx;
+ ASSERT(ctx->cc_provider_private != NULL);
+ aes_ctx = ctx->cc_provider_private;
+
/*
- * Ciphertext must be a multiple of AES block size.
- * This test only works for non-padded mechanisms
- * when blocksize is 2^N.
+ * For block ciphers, ciphertext must be a multiple of AES block size.
+ * This test is only valid for ciphers whose blocksize is a power of 2.
*/
- if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) &&
+ (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- ASSERT(ctx->cc_provider_private != NULL);
- aes_ctx = ctx->cc_provider_private;
-
AES_ARG_INPLACE(ciphertext, plaintext);
/*
@@ -623,6 +637,7 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
@@ -662,6 +677,18 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
ret = CRYPTO_ARGUMENTS_BAD;
}
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * aes_counter_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ aes_ctx = ctx->cc_provider_private;
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) &&
+ (aes_ctx->ac_remainder_len > 0)) {
+ ret = aes_counter_final(aes_ctx, ciphertext);
+ }
+
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext)
ciphertext->cd_length =
@@ -682,6 +709,7 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
+ aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
@@ -721,6 +749,18 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
ret = CRYPTO_ARGUMENTS_BAD;
}
+ /*
+ * Since AES counter mode is a stream cipher, we call
+ * aes_counter_final() to pick up any remaining bytes.
+ * It is an internal function that does not destroy
+ * the context like *normal* final routines.
+ */
+ aes_ctx = ctx->cc_provider_private;
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) &&
+ (aes_ctx->ac_remainder_len > 0)) {
+ ret = aes_counter_final(aes_ctx, plaintext);
+ }
+
if (ret == CRYPTO_SUCCESS) {
if (ciphertext != plaintext)
plaintext->cd_length =
@@ -730,6 +770,7 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
}
plaintext->cd_offset = saved_offset;
+
return (ret);
}
@@ -742,20 +783,36 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
/* EXPORT DELETE START */
aes_ctx_t *aes_ctx;
+ int ret;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO &&
+ data->cd_format != CRYPTO_DATA_MBLK) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
/*
* There must be no unprocessed plaintext.
* This happens if the length of the last data is
* not a multiple of the AES block length.
*/
- if (aes_ctx->ac_remainder_len > 0)
- return (CRYPTO_DATA_LEN_RANGE);
+ if (aes_ctx->ac_remainder_len > 0) {
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+ else {
+ ret = aes_counter_final(aes_ctx, data);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ }
+
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0)
+ data->cd_length = 0;
(void) aes_free_context(ctx);
- data->cd_length = 0;
/* EXPORT DELETE END */
@@ -771,20 +828,36 @@ aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
/* EXPORT DELETE START */
aes_ctx_t *aes_ctx;
+ int ret;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
+ if (data->cd_format != CRYPTO_DATA_RAW &&
+ data->cd_format != CRYPTO_DATA_UIO &&
+ data->cd_format != CRYPTO_DATA_MBLK) {
+ return (CRYPTO_ARGUMENTS_BAD);
+ }
+
/*
* There must be no unprocessed ciphertext.
* This happens if the length of the last ciphertext is
* not a multiple of the AES block length.
*/
- if (aes_ctx->ac_remainder_len > 0)
- return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ if (aes_ctx->ac_remainder_len > 0) {
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
+ else {
+ ret = aes_counter_final(aes_ctx, data);
+ if (ret != CRYPTO_SUCCESS)
+ return (ret);
+ }
+ }
+
+ if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0)
+ data->cd_length = 0;
(void) aes_free_context(ctx);
- data->cd_length = 0;
/* EXPORT DELETE END */
@@ -805,13 +878,15 @@ aes_encrypt_atomic(crypto_provider_handle_t provider,
AES_ARG_INPLACE(plaintext, ciphertext);
- /*
- * Plaintext must be a multiple of AES block size.
- * This test only works for non-padded mechanisms
- * when blocksize is 2^N.
- */
- if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
- return (CRYPTO_DATA_LEN_RANGE);
+ if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ /*
+ * Plaintext must be a multiple of AES block size.
+ * This test only works for non-padded mechanisms
+ * when blocksize is 2^N.
+ */
+ if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
/* return length needed to store the output */
if (ciphertext->cd_length < plaintext->cd_length) {
@@ -822,9 +897,16 @@ aes_encrypt_atomic(crypto_provider_handle_t provider,
if (!AES_VALID_MECH(mechanism))
return (CRYPTO_MECHANISM_INVALID);
- if (mechanism->cm_param_len != 0 &&
- mechanism->cm_param_len != AES_BLOCK_LEN)
- return (CRYPTO_MECHANISM_PARAM_INVALID);
+ if (mechanism->cm_param != NULL) {
+ if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (mechanism->cm_param_len !=
+ sizeof (CK_AES_CTR_PARAMS))
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ } else {
+ if (mechanism->cm_param_len != AES_BLOCK_LEN)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ }
bzero(&aes_ctx, sizeof (aes_ctx_t));
@@ -856,21 +938,33 @@ aes_encrypt_atomic(crypto_provider_handle_t provider,
ret = CRYPTO_ARGUMENTS_BAD;
}
- if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) {
- bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
- kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
- }
-
if (ret == CRYPTO_SUCCESS) {
- ASSERT(aes_ctx.ac_remainder_len == 0);
- if (plaintext != ciphertext)
- ciphertext->cd_length =
- ciphertext->cd_offset - saved_offset;
+ if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if (plaintext != ciphertext)
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ } else {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = aes_counter_final(&aes_ctx, ciphertext);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ if (plaintext != ciphertext)
+ ciphertext->cd_length =
+ ciphertext->cd_offset - saved_offset;
+ }
} else {
ciphertext->cd_length = saved_length;
}
ciphertext->cd_offset = saved_offset;
+out:
+ if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
return (ret);
}
@@ -888,13 +982,15 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
AES_ARG_INPLACE(ciphertext, plaintext);
- /*
- * Ciphertext must be a multiple of AES block size.
- * This test only works for non-padded mechanisms
- * when blocksize is 2^N.
- */
- if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
- return (CRYPTO_DATA_LEN_RANGE);
+ if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ /*
+ * Ciphertext must be a multiple of AES block size.
+ * This test only works for non-padded mechanisms
+ * when blocksize is 2^N.
+ */
+ if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
+ return (CRYPTO_DATA_LEN_RANGE);
+ }
/* return length needed to store the output */
if (plaintext->cd_length < ciphertext->cd_length) {
@@ -905,9 +1001,16 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
if (!AES_VALID_MECH(mechanism))
return (CRYPTO_MECHANISM_INVALID);
- if (mechanism->cm_param_len != 0 &&
- mechanism->cm_param_len != AES_BLOCK_LEN)
- return (CRYPTO_MECHANISM_PARAM_INVALID);
+ if (mechanism->cm_param != NULL) {
+ if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (mechanism->cm_param_len !=
+ sizeof (CK_AES_CTR_PARAMS))
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ } else {
+ if (mechanism->cm_param_len != AES_BLOCK_LEN)
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ }
bzero(&aes_ctx, sizeof (aes_ctx_t));
@@ -939,21 +1042,33 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
ret = CRYPTO_ARGUMENTS_BAD;
}
- if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) {
- bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
- kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
- }
-
if (ret == CRYPTO_SUCCESS) {
- ASSERT(aes_ctx.ac_remainder_len == 0);
- if (ciphertext != plaintext)
- plaintext->cd_length =
- plaintext->cd_offset - saved_offset;
+ if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
+ ASSERT(aes_ctx.ac_remainder_len == 0);
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ } else {
+ if (aes_ctx.ac_remainder_len > 0) {
+ ret = aes_counter_final(&aes_ctx, plaintext);
+ if (ret != CRYPTO_SUCCESS)
+ goto out;
+ }
+ if (ciphertext != plaintext)
+ plaintext->cd_length =
+ plaintext->cd_offset - saved_offset;
+ }
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
+out:
+ if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) {
+ bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
+ }
+
return (ret);
}
@@ -1036,25 +1151,11 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
void *keysched;
size_t size;
- if (template == NULL) {
- if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
- return (CRYPTO_HOST_MEMORY);
- /*
- * Initialize key schedule.
- * Key length is stored in the key.
- */
- if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS)
- kmem_free(keysched, size);
-
- aes_ctx->ac_flags = AES_PROVIDER_OWNS_KEY_SCHEDULE;
- aes_ctx->ac_keysched_len = size;
- } else {
- keysched = template;
- }
+ aes_ctx->ac_flags = 0;
if (mechanism->cm_type == AES_CBC_MECH_INFO_TYPE) {
/*
- * Copy IV into AES context.
+ * Copy 128-bit IV into context.
*
* If cm_param == NULL then the IV comes from the
* cd_miscdata field in the crypto_data structure.
@@ -1095,6 +1196,69 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0];
aes_ctx->ac_flags |= AES_CBC_MODE;
+
+ } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
+ if (mechanism->cm_param != NULL) {
+ CK_AES_CTR_PARAMS *pp;
+ uint64_t mask = 0;
+ ulong_t count;
+ uint8_t *iv8;
+ uint8_t *p8;
+
+ pp = (CK_AES_CTR_PARAMS *)mechanism->cm_param;
+ iv8 = (uint8_t *)&aes_ctx->ac_iv;
+ p8 = (uint8_t *)&pp->cb[0];
+
+ /* XXX what to do about miscdata */
+ count = pp->ulCounterBits;
+ if (count == 0 || count > 64) {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+ while (count-- > 0)
+ mask |= (1ULL << count);
+
+ aes_ctx->ac_counter_mask = mask;
+
+ iv8[0] = p8[0];
+ iv8[1] = p8[1];
+ iv8[2] = p8[2];
+ iv8[3] = p8[3];
+ iv8[4] = p8[4];
+ iv8[5] = p8[5];
+ iv8[6] = p8[6];
+ iv8[7] = p8[7];
+ iv8[8] = p8[8];
+ iv8[9] = p8[9];
+ iv8[10] = p8[10];
+ iv8[11] = p8[11];
+ iv8[12] = p8[12];
+ iv8[13] = p8[13];
+ iv8[14] = p8[14];
+ iv8[15] = p8[15];
+ } else {
+ return (CRYPTO_MECHANISM_PARAM_INVALID);
+ }
+
+ aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0];
+ aes_ctx->ac_flags |= AES_CTR_MODE;
+ } else {
+ aes_ctx->ac_flags |= AES_ECB_MODE;
+ }
+
+ if (template == NULL) {
+ if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
+ return (CRYPTO_HOST_MEMORY);
+ /*
+ * Initialize key schedule.
+ * Key length is stored in the key.
+ */
+ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS)
+ kmem_free(keysched, size);
+
+ aes_ctx->ac_flags |= AES_PROVIDER_OWNS_KEY_SCHEDULE;
+ aes_ctx->ac_keysched_len = size;
+ } else {
+ keysched = template;
}
aes_ctx->ac_keysched = keysched;
diff --git a/usr/src/uts/common/crypto/io/crypto.c b/usr/src/uts/common/crypto/io/crypto.c
index 9e45ff8891..8d49166597 100644
--- a/usr/src/uts/common/crypto/io/crypto.c
+++ b/usr/src/uts/common/crypto/io/crypto.c
@@ -75,7 +75,7 @@ static int crypto_open(dev_t *, int, int, cred_t *);
static int crypto_close(dev_t, int, int, cred_t *);
static int crypto_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
-static int cipher_init(dev_t, caddr_t, int, int (*)(kcf_provider_desc_t *,
+static int cipher_init(dev_t, caddr_t, int, int (*)(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *));
@@ -91,7 +91,7 @@ static int cipher_update(dev_t, caddr_t, int, int (*)(crypto_context_t,
static int common_final(dev_t, caddr_t, int, int (*)(crypto_context_t,
crypto_data_t *, crypto_call_req_t *));
-static int sign_verify_init(dev_t, caddr_t, int, int (*)(kcf_provider_desc_t *,
+static int sign_verify_init(dev_t, caddr_t, int, int (*)(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *));
@@ -159,7 +159,7 @@ static kcondvar_t crypto_cv;
#define RETURN_LIST B_TRUE
#define DONT_RETURN_LIST B_FALSE
-#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, f)
+#define CRYPTO_OPS_OFFSET(f) offsetof(crypto_ops_t, co_##f)
#define CRYPTO_DIGEST_OFFSET(f) offsetof(crypto_digest_ops_t, f)
#define CRYPTO_CIPHER_OFFSET(f) offsetof(crypto_cipher_ops_t, f)
#define CRYPTO_SIGN_OFFSET(f) offsetof(crypto_sign_ops_t, f)
@@ -588,7 +588,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if ((ops = pd->pd_ops_vector) == NULL)
return;
- if ((digest_ops = ops->digest_ops) != NULL) {
+ if ((digest_ops = ops->co_digest_ops) != NULL) {
if (digest_ops->digest_init != NULL)
fl->fl_digest_init = B_TRUE;
if (digest_ops->digest != NULL)
@@ -600,7 +600,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (digest_ops->digest_final != NULL)
fl->fl_digest_final = B_TRUE;
}
- if ((cipher_ops = ops->cipher_ops) != NULL) {
+ if ((cipher_ops = ops->co_cipher_ops) != NULL) {
if (cipher_ops->encrypt_init != NULL)
fl->fl_encrypt_init = B_TRUE;
if (cipher_ops->encrypt != NULL)
@@ -618,7 +618,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (cipher_ops->decrypt_final != NULL)
fl->fl_decrypt_final = B_TRUE;
}
- if ((mac_ops = ops->mac_ops) != NULL) {
+ if ((mac_ops = ops->co_mac_ops) != NULL) {
if (mac_ops->mac_init != NULL)
fl->fl_mac_init = B_TRUE;
if (mac_ops->mac != NULL)
@@ -628,7 +628,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (mac_ops->mac_final != NULL)
fl->fl_mac_final = B_TRUE;
}
- if ((sign_ops = ops->sign_ops) != NULL) {
+ if ((sign_ops = ops->co_sign_ops) != NULL) {
if (sign_ops->sign_init != NULL)
fl->fl_sign_init = B_TRUE;
if (sign_ops->sign != NULL)
@@ -642,7 +642,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (sign_ops->sign_recover != NULL)
fl->fl_sign_recover = B_TRUE;
}
- if ((verify_ops = ops->verify_ops) != NULL) {
+ if ((verify_ops = ops->co_verify_ops) != NULL) {
if (verify_ops->verify_init != NULL)
fl->fl_verify_init = B_TRUE;
if (verify_ops->verify != NULL)
@@ -656,7 +656,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (verify_ops->verify_recover != NULL)
fl->fl_verify_recover = B_TRUE;
}
- if ((dual_ops = ops->dual_ops) != NULL) {
+ if ((dual_ops = ops->co_dual_ops) != NULL) {
if (dual_ops->digest_encrypt_update != NULL)
fl->fl_digest_encrypt_update = B_TRUE;
if (dual_ops->decrypt_digest_update != NULL)
@@ -666,13 +666,13 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (dual_ops->decrypt_verify_update != NULL)
fl->fl_decrypt_verify_update = B_TRUE;
}
- if ((random_number_ops = ops->random_ops) != NULL) {
+ if ((random_number_ops = ops->co_random_ops) != NULL) {
if (random_number_ops->seed_random != NULL)
fl->fl_seed_random = B_TRUE;
if (random_number_ops->generate_random != NULL)
fl->fl_generate_random = B_TRUE;
}
- if ((session_ops = ops->session_ops) != NULL) {
+ if ((session_ops = ops->co_session_ops) != NULL) {
if (session_ops->session_open != NULL)
fl->fl_session_open = B_TRUE;
if (session_ops->session_close != NULL)
@@ -682,7 +682,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (session_ops->session_logout != NULL)
fl->fl_session_logout = B_TRUE;
}
- if ((object_ops = ops->object_ops) != NULL) {
+ if ((object_ops = ops->co_object_ops) != NULL) {
if (object_ops->object_create != NULL)
fl->fl_object_create = B_TRUE;
if (object_ops->object_copy != NULL)
@@ -702,7 +702,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (object_ops->object_find_final != NULL)
fl->fl_object_find_final = B_TRUE;
}
- if ((key_ops = ops->key_ops) != NULL) {
+ if ((key_ops = ops->co_key_ops) != NULL) {
if (key_ops->key_generate != NULL)
fl->fl_key_generate = B_TRUE;
if (key_ops->key_generate_pair != NULL)
@@ -714,7 +714,7 @@ crypto_build_function_list(crypto_function_list_t *fl, kcf_provider_desc_t *pd)
if (key_ops->key_derive != NULL)
fl->fl_key_derive = B_TRUE;
}
- if ((provider_ops = ops->provider_ops) != NULL) {
+ if ((provider_ops = ops->co_provider_ops) != NULL) {
if (provider_ops->init_token != NULL)
fl->fl_init_token = B_TRUE;
if (provider_ops->init_pin != NULL)
@@ -1154,7 +1154,7 @@ get_provider_info(dev_t dev, caddr_t arg, int mode, int *rval)
(void) kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(provider_ops), CRYPTO_PROVIDER_OFFSET(ext_info),
- provider, &real_provider);
+ CHECK_RESTRICT_FALSE, provider, &real_provider);
if (real_provider != NULL) {
ASSERT(real_provider == provider ||
@@ -1164,6 +1164,7 @@ get_provider_info(dev_t dev, caddr_t arg, int mode, int *rval)
rv = kcf_submit_request(real_provider, NULL, NULL, &params,
B_FALSE);
ASSERT(rv != CRYPTO_NOT_SUPPORTED);
+ KCF_PROV_REFRELE(real_provider);
} else {
/* do the best we can */
fabricate_ext_info(provider, ext_info);
@@ -1415,7 +1416,8 @@ again:
/* find a hardware provider that supports session ops */
(void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
- CRYPTO_SESSION_OFFSET(session_open), pd, &real_provider);
+ CRYPTO_SESSION_OFFSET(session_open), CHECK_RESTRICT_FALSE,
+ pd, &real_provider);
if (real_provider != NULL) {
ASSERT(real_provider == pd ||
@@ -1427,6 +1429,7 @@ again:
B_FALSE);
if (rv != CRYPTO_SUCCESS) {
mutex_enter(&cm->cm_lock);
+ KCF_PROV_REFRELE(real_provider);
return (rv);
}
}
@@ -1450,6 +1453,7 @@ again:
pd);
(void) kcf_submit_request(real_provider, NULL,
NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
}
mutex_enter(&cm->cm_lock);
goto again;
@@ -1463,7 +1467,6 @@ again:
KCF_PROV_REFHOLD(pd);
new_ps->ps_provider = pd;
if (real_provider != NULL) {
- KCF_PROV_REFHOLD(real_provider);
new_ps->ps_real_provider = real_provider;
}
new_ps->ps_next = cm->cm_provider_session;
@@ -1788,16 +1791,7 @@ close_session(dev_t dev, caddr_t arg, int mode, int *rval)
return (EFAULT);
rv = crypto_close_session(dev, close_session.cs_session);
- if (rv != CRYPTO_SUCCESS) {
- close_session.cs_return_value = rv;
- if (copyout(&close_session, arg, sizeof (close_session)) != 0) {
- return (EFAULT);
- }
- return (0);
- }
-
- close_session.cs_return_value = CRYPTO_SUCCESS;
-
+ close_session.cs_return_value = rv;
if (copyout(&close_session, arg, sizeof (close_session)) != 0) {
return (EFAULT);
}
@@ -2230,17 +2224,114 @@ decrypt_init(dev_t dev, caddr_t arg, int mode, int *rval)
}
/*
+ * umech is a mechanism structure that has been copied from user address
+ * space into kernel address space. Only one copyin has been done.
+ * The mechanism parameter, if non-null, still points to user address space.
+ * If the mechanism parameter contains pointers, they are pointers into
+ * user address space.
+ *
+ * kmech is a umech with all pointers and structures in kernel address space.
+ *
+ * This routine calls the provider's entry point to copy a umech parameter
+ * into kernel address space. Kernel memory is allocated by the provider.
+ */
+static int
+crypto_provider_copyin_mech_param(kcf_provider_desc_t *pd,
+ crypto_mechanism_t *umech, crypto_mechanism_t *kmech, int mode, int *error)
+{
+ crypto_mech_type_t provider_mech_type;
+ kcf_ops_class_t class;
+ int index;
+ int rv;
+
+ /* get the provider's mech number */
+ class = KCF_MECH2CLASS(umech->cm_type);
+ index = KCF_MECH2INDEX(umech->cm_type);
+ provider_mech_type = pd->pd_map_mechnums[class][index];
+
+ kmech->cm_param = NULL;
+ kmech->cm_param_len = 0;
+ kmech->cm_type = provider_mech_type;
+ rv = KCF_PROV_COPYIN_MECH(pd, umech, kmech, error, mode);
+ kmech->cm_type = umech->cm_type;
+
+ return (rv);
+}
+
+/*
+ * umech is a mechanism structure that has been copied from user address
+ * space into kernel address space. Only one copyin has been done.
+ * The mechanism parameter, if non-null, still points to user address space.
+ * If the mechanism parameter contains pointers, they are pointers into
+ * user address space.
+ *
+ * kmech is a umech with all pointers and structures in kernel address space.
+ *
+ * This routine calls the provider's entry point to copy a kmech parameter
+ * into user address space using umech as a template containing
+ * user address pointers.
+ */
+static int
+crypto_provider_copyout_mech_param(kcf_provider_desc_t *pd,
+ crypto_mechanism_t *kmech, crypto_mechanism_t *umech, int mode, int *error)
+{
+ crypto_mech_type_t provider_mech_type;
+ kcf_ops_class_t class;
+ int index;
+ int rv;
+
+ /* get the provider's mech number */
+ class = KCF_MECH2CLASS(umech->cm_type);
+ index = KCF_MECH2INDEX(umech->cm_type);
+ provider_mech_type = pd->pd_map_mechnums[class][index];
+
+ kmech->cm_type = provider_mech_type;
+ rv = KCF_PROV_COPYOUT_MECH(pd, kmech, umech, error, mode);
+ kmech->cm_type = umech->cm_type;
+
+ return (rv);
+}
+
+/*
+ * Call the provider's entry point to free kernel memory that has been
+ * allocated for the mechanism's parameter.
+ */
+static void
+crypto_free_mech(kcf_provider_desc_t *pd, boolean_t allocated_by_crypto_module,
+ crypto_mechanism_t *mech)
+{
+ crypto_mech_type_t provider_mech_type;
+ kcf_ops_class_t class;
+ int index;
+
+ if (allocated_by_crypto_module) {
+ if (mech->cm_param != NULL)
+ kmem_free(mech->cm_param, mech->cm_param_len);
+ } else {
+ /* get the provider's mech number */
+ class = KCF_MECH2CLASS(mech->cm_type);
+ index = KCF_MECH2INDEX(mech->cm_type);
+ provider_mech_type = pd->pd_map_mechnums[class][index];
+
+ if (mech->cm_param != NULL && mech->cm_param_len != 0) {
+ mech->cm_type = provider_mech_type;
+ (void) KCF_PROV_FREE_MECH(pd, mech);
+ }
+ }
+}
+
+/*
* ASSUMPTION: crypto_encrypt_init and crypto_decrypt_init
* structures are identical except for field names.
*/
static int
-cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(kcf_provider_desc_t *,
+cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(crypto_provider_t,
crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *,
crypto_ctx_template_t, crypto_context_t *, crypto_call_req_t *))
{
STRUCT_DECL(crypto_encrypt_init, encrypt_init);
kproject_t *mech_projp, *key_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
crypto_session_id_t session_id;
crypto_mechanism_t mech;
crypto_key_t key;
@@ -2254,6 +2345,7 @@ cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(kcf_provider_desc_t *,
offset_t offset;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(encrypt_init, mode);
@@ -2277,22 +2369,36 @@ cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(kcf_provider_desc_t *,
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(encrypt_init, ei_mech), &mech,
- &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
- goto out;
- }
+ bcopy(STRUCT_FADDR(encrypt_init, ei_mech), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
if (init == crypto_encrypt_init_prov)
offset = CRYPTO_CIPHER_OFFSET(encrypt_init);
else
offset = CRYPTO_CIPHER_OFFSET(decrypt_init);
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(cipher_ops), offset,
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(cipher_ops), offset, CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(encrypt_init, ei_mech), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(encrypt_init, ei_mech),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto out;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto out;
+ }
+
if (!copyin_key(mode, STRUCT_FADDR(encrypt_init, ei_key), &key,
&key_rctl_bytes, &rv, &error, carry, &key_projp)) {
goto out;
@@ -2311,6 +2417,7 @@ cipher_init(dev_t dev, caddr_t arg, int mode, int (*init)(kcf_provider_desc_t *,
if (*ctxpp != NULL)
CRYPTO_CANCEL_CTX(ctxpp);
*ctxpp = (rv == CRYPTO_SUCCESS) ? cc : NULL;
+
out:
CRYPTO_SESSION_RELE(sp);
@@ -2323,17 +2430,22 @@ release_minor:
mutex_exit(&crypto_rctl_lock);
crypto_release_minor(cm);
- if (mech.cm_param != NULL)
- kmem_free(mech.cm_param, mech.cm_param_len);
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
free_crypto_key(&key);
if (error != 0)
+ /* XXX free context */
return (error);
STRUCT_FSET(encrypt_init, ei_return_value, rv);
if (copyout(STRUCT_BUF(encrypt_init), arg,
STRUCT_SIZE(encrypt_init)) != 0) {
+ /* XXX free context */
return (EFAULT);
}
return (0);
@@ -2788,7 +2900,7 @@ digest_init(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_digest_init, digest_init);
kproject_t *mech_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
crypto_session_id_t session_id;
crypto_mechanism_t mech;
crypto_minor_t *cm;
@@ -2824,10 +2936,10 @@ digest_init(dev_t dev, caddr_t arg, int mode, int *rval)
goto out;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(digest_ops),
- CRYPTO_DIGEST_OFFSET(digest_init),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(digest_ops), CRYPTO_DIGEST_OFFSET(digest_init),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -2852,6 +2964,9 @@ release_minor:
}
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
+
if (mech.cm_param != NULL)
kmem_free(mech.cm_param, mech.cm_param_len);
@@ -3261,7 +3376,8 @@ set_pin(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(provider_ops), CRYPTO_PROVIDER_OFFSET(set_pin),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -3270,6 +3386,7 @@ set_pin(dev_t dev, caddr_t arg, int mode, int *rval)
new_pin, new_pin_len, NULL, NULL, real_provider);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -3339,7 +3456,8 @@ login(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(session_ops),
CRYPTO_SESSION_OFFSET(session_login),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -3348,6 +3466,7 @@ login(dev_t dev, caddr_t arg, int mode, int *rval)
real_provider);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -3398,7 +3517,7 @@ logout(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(session_ops),
- CRYPTO_SESSION_OFFSET(session_logout),
+ CRYPTO_SESSION_OFFSET(session_logout), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -3406,6 +3525,7 @@ logout(dev_t dev, caddr_t arg, int mode, int *rval)
KCF_WRAP_SESSION_OPS_PARAMS(&params, KCF_OP_SESSION_LOGOUT, NULL,
sp->sd_provider_session->ps_session, 0, NULL, 0, real_provider);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -3460,13 +3580,13 @@ verify_recover_init(dev_t dev, caddr_t arg, int mode, int *rval)
*/
static int
sign_verify_init(dev_t dev, caddr_t arg, int mode,
- int (*init)(kcf_provider_desc_t *, crypto_session_id_t,
+ int (*init)(crypto_provider_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_ctx_template_t,
crypto_context_t *, crypto_call_req_t *))
{
STRUCT_DECL(crypto_sign_init, sign_init);
kproject_t *mech_projp, *key_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
crypto_session_id_t session_id;
crypto_mechanism_t mech;
crypto_key_t key;
@@ -3480,6 +3600,7 @@ sign_verify_init(dev_t dev, caddr_t arg, int mode,
offset_t offset_1, offset_2;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(sign_init, mode);
@@ -3502,10 +3623,8 @@ sign_verify_init(dev_t dev, caddr_t arg, int mode,
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(sign_init, si_mech), &mech,
- &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
- goto out;
- }
+ bcopy(STRUCT_FADDR(sign_init, si_mech), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
ASSERT(init == crypto_sign_init_prov ||
init == crypto_verify_init_prov ||
@@ -3530,11 +3649,28 @@ sign_verify_init(dev_t dev, caddr_t arg, int mode,
ctxpp = &sp->sd_verify_recover_ctx;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type, offset_1,
- offset_2, sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ offset_1, offset_2, CHECK_RESTRICT_FALSE, sp->sd_provider,
+ &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(sign_init, si_mech), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(sign_init, si_mech),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto out;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto out;
+ }
+
if (!copyin_key(mode, STRUCT_FADDR(sign_init, si_key), &key,
&key_rctl_bytes, &rv, &error, carry, &key_projp)) {
goto out;
@@ -3563,8 +3699,11 @@ release_minor:
mutex_exit(&crypto_rctl_lock);
crypto_release_minor(cm);
- if (mech.cm_param != NULL)
- kmem_free(mech.cm_param, mech.cm_param_len);
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
free_crypto_key(&key);
@@ -3900,7 +4039,7 @@ seed_random(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_seed_random, seed_random);
kproject_t *projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_session_id_t session_id;
crypto_minor_t *cm;
@@ -3953,10 +4092,10 @@ seed_random(dev_t dev, caddr_t arg, int mode, int *rval)
if (random_mech == CRYPTO_MECH_INVALID)
random_mech = crypto_mech2id_common(SUN_RANDOM, B_FALSE);
- if ((rv = kcf_get_hardware_provider(random_mech,
- CRYPTO_OPS_OFFSET(random_ops),
- CRYPTO_RANDOM_OFFSET(seed_random),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ if ((rv = kcf_get_hardware_provider(random_mech, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(random_ops), CRYPTO_RANDOM_OFFSET(seed_random),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -3976,6 +4115,9 @@ release_minor:
}
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
+
if (seed_buffer != NULL)
kmem_free(seed_buffer, seed_len);
@@ -3996,7 +4138,7 @@ generate_random(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_generate_random, generate_random);
kproject_t *projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_session_id_t session_id;
crypto_minor_t *cm;
@@ -4043,9 +4185,9 @@ generate_random(dev_t dev, caddr_t arg, int mode, int *rval)
if (random_mech == CRYPTO_MECH_INVALID)
random_mech = crypto_mech2id_common(SUN_RANDOM, B_FALSE);
- if ((rv = kcf_get_hardware_provider(random_mech,
+ if ((rv = kcf_get_hardware_provider(random_mech, CRYPTO_MECH_INVALID,
CRYPTO_OPS_OFFSET(random_ops),
- CRYPTO_RANDOM_OFFSET(generate_random),
+ CRYPTO_RANDOM_OFFSET(generate_random), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -4073,6 +4215,9 @@ release_minor:
}
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
+
if (buffer != NULL) {
/* random numbers are often used to create keys */
bzero(buffer, len);
@@ -4147,7 +4292,7 @@ object_create(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_create, object_create);
kproject_t *projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_object_attribute_t *k_attrs = NULL;
crypto_session_id_t session_id;
@@ -4190,7 +4335,8 @@ object_create(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
CRYPTO_OBJECT_OFFSET(object_create),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto release_minor;
}
@@ -4235,6 +4381,8 @@ out:
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
return (error);
}
@@ -4244,7 +4392,7 @@ object_copy(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_copy, object_copy);
kproject_t *projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_object_attribute_t *k_attrs = NULL;
crypto_session_id_t session_id;
@@ -4286,7 +4434,7 @@ object_copy(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_copy),
+ CRYPTO_OBJECT_OFFSET(object_copy), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto release_minor;
}
@@ -4333,6 +4481,8 @@ out:
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
return (error);
}
@@ -4371,7 +4521,7 @@ object_destroy(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_destroy),
+ CRYPTO_OBJECT_OFFSET(object_destroy), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -4382,6 +4532,7 @@ object_destroy(dev_t dev, caddr_t arg, int mode, int *rval)
NULL, NULL, 0, NULL);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -4456,7 +4607,8 @@ object_get_attribute_value(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
CRYPTO_OBJECT_OFFSET(object_get_attribute_value),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -4466,6 +4618,7 @@ object_get_attribute_value(dev_t dev, caddr_t arg, int mode, int *rval)
0, NULL, NULL, 0, NULL);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -4539,7 +4692,7 @@ object_get_size(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_get_size),
+ CRYPTO_OBJECT_OFFSET(object_get_size), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -4550,6 +4703,7 @@ object_get_size(dev_t dev, caddr_t arg, int mode, int *rval)
NULL, NULL, 0, NULL);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -4621,7 +4775,8 @@ object_set_attribute_value(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
CRYPTO_OBJECT_OFFSET(object_set_attribute_value),
- sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
@@ -4631,6 +4786,7 @@ object_set_attribute_value(dev_t dev, caddr_t arg, int mode, int *rval)
NULL, 0, NULL, NULL, 0, NULL);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -4663,7 +4819,7 @@ object_find_init(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_find_init, find_init);
kproject_t *projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_object_attribute_t *k_attrs = NULL;
crypto_session_id_t session_id;
@@ -4704,7 +4860,7 @@ object_find_init(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_find_init),
+ CRYPTO_OBJECT_OFFSET(object_find_init), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -4744,6 +4900,9 @@ release_minor:
}
crypto_release_minor(cm);
+ if (real_provider != NULL)
+ KCF_PROV_REFRELE(real_provider);
+
if (k_attrs != NULL)
kmem_free(k_attrs, k_attrs_size);
@@ -4808,7 +4967,7 @@ object_find_update(dev_t dev, caddr_t arg, int mode, int *rval)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_find),
+ CRYPTO_OBJECT_OFFSET(object_find), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
goto out;
}
@@ -4818,6 +4977,7 @@ object_find_update(dev_t dev, caddr_t arg, int mode, int *rval)
NULL, sp->sd_find_init_cookie, max_count, &count);
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
out:
CRYPTO_SESSION_RELE(sp);
@@ -4873,7 +5033,7 @@ crypto_free_find_ctx(crypto_session_data_t *sp)
if ((rv = kcf_get_hardware_provider_nomech(
CRYPTO_OPS_OFFSET(object_ops),
- CRYPTO_OBJECT_OFFSET(object_find_final),
+ CRYPTO_OBJECT_OFFSET(object_find_final), CHECK_RESTRICT_FALSE,
sp->sd_provider, &real_provider)) != CRYPTO_SUCCESS) {
return (rv);
}
@@ -4882,8 +5042,9 @@ crypto_free_find_ctx(crypto_session_data_t *sp)
sp->sd_provider_session->ps_session, 0, NULL, 0, NULL, 0,
NULL, sp->sd_find_init_cookie, 0, NULL);
- return (kcf_submit_request(real_provider,
- NULL, NULL, &params, B_FALSE));
+ rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
+ KCF_PROV_REFRELE(real_provider);
+ return (rv);
}
/* ARGSUSED */
@@ -4943,7 +5104,7 @@ object_generate_key(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_generate_key, generate_key);
kproject_t *mech_projp, *key_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_mechanism_t mech;
crypto_object_attribute_t *k_attrs = NULL;
@@ -4958,6 +5119,7 @@ object_generate_key(dev_t dev, caddr_t arg, int mode, int *rval)
uint_t count;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(generate_key, mode);
@@ -4978,16 +5140,30 @@ object_generate_key(dev_t dev, caddr_t arg, int mode, int *rval)
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(generate_key, gk_mechanism),
- &mech, &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
+ bcopy(STRUCT_FADDR(generate_key, gk_mechanism), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
+
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(key_ops), CRYPTO_KEY_OFFSET(key_generate),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto release_minor;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(key_ops),
- CRYPTO_KEY_OFFSET(key_generate), sp->sd_provider,
- &real_provider)) != CRYPTO_SUCCESS) {
- goto release_minor;
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(generate_key, gk_mechanism), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(generate_key, gk_mechanism),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto release_minor;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto release_minor;
}
count = STRUCT_FGET(generate_key, gk_count);
@@ -5040,6 +5216,12 @@ out:
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
return (error);
}
@@ -5049,7 +5231,7 @@ object_generate_key_pair(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_generate_key_pair, generate_key_pair);
kproject_t *pub_projp, *pri_projp, *mech_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_mechanism_t mech;
crypto_object_attribute_t *k_pub_attrs = NULL;
@@ -5070,6 +5252,7 @@ object_generate_key_pair(dev_t dev, caddr_t arg, int mode, int *rval)
uint_t pri_count;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(generate_key_pair, mode);
@@ -5091,16 +5274,30 @@ object_generate_key_pair(dev_t dev, caddr_t arg, int mode, int *rval)
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(generate_key_pair, kp_mechanism),
- &mech, &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
+ bcopy(STRUCT_FADDR(generate_key_pair, kp_mechanism), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
+
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(key_ops), CRYPTO_KEY_OFFSET(key_generate_pair),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto release_minor;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(key_ops),
- CRYPTO_KEY_OFFSET(key_generate_pair), sp->sd_provider,
- &real_provider)) != CRYPTO_SUCCESS) {
- goto release_minor;
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(generate_key_pair, kp_mechanism), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(generate_key_pair,
+ kp_mechanism), &mech, &mech_rctl_bytes, &carry, &rv,
+ &error, &mech_projp)) {
+ goto release_minor;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto release_minor;
}
pub_count = STRUCT_FGET(generate_key_pair, kp_public_count);
@@ -5178,6 +5375,12 @@ out:
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
return (error);
}
@@ -5187,7 +5390,7 @@ object_wrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_object_wrap_key, wrap_key);
kproject_t *mech_projp, *key_projp, *wrapped_key_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_mechanism_t mech;
crypto_key_t key;
@@ -5203,6 +5406,7 @@ object_wrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
char *wrapped_key_buffer;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(wrap_key, mode);
@@ -5224,16 +5428,30 @@ object_wrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(wrap_key, wk_mechanism),
- &mech, &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
+ bcopy(STRUCT_FADDR(wrap_key, wk_mechanism), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
+
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(key_ops), CRYPTO_KEY_OFFSET(key_wrap),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto out;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(key_ops),
- CRYPTO_KEY_OFFSET(key_wrap), sp->sd_provider,
- &real_provider)) != CRYPTO_SUCCESS) {
- goto out;
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(wrap_key, wk_mechanism), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(wrap_key, wk_mechanism),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto out;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto out;
}
if (!copyin_key(mode, STRUCT_FADDR(wrap_key, wk_wrapping_key), &key,
@@ -5308,6 +5526,12 @@ release_minor:
mutex_exit(&crypto_rctl_lock);
crypto_release_minor(cm);
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
+
if (wrapped_key != NULL)
kmem_free(wrapped_key, wrapped_key_len);
@@ -5330,7 +5554,7 @@ object_unwrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
STRUCT_DECL(crypto_object_unwrap_key, unwrap_key);
kproject_t *mech_projp, *unwrapping_key_projp, *wrapped_key_projp,
*k_attrs_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_mechanism_t mech;
crypto_key_t unwrapping_key;
@@ -5349,6 +5573,7 @@ object_unwrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
int rv;
uint_t count;
caddr_t uk_attributes;
+ boolean_t allocated_by_crypto_module = B_FALSE;
STRUCT_INIT(unwrap_key, mode);
@@ -5370,16 +5595,30 @@ object_unwrap_key(dev_t dev, caddr_t arg, int mode, int *rval)
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(unwrap_key, uk_mechanism),
- &mech, &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
+ bcopy(STRUCT_FADDR(unwrap_key, uk_mechanism), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
+
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(key_ops), CRYPTO_KEY_OFFSET(key_unwrap),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto release_minor;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(key_ops),
- CRYPTO_KEY_OFFSET(key_unwrap), sp->sd_provider,
- &real_provider)) != CRYPTO_SUCCESS) {
- goto release_minor;
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(unwrap_key, uk_mechanism), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(unwrap_key, uk_mechanism),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto release_minor;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto release_minor;
}
if (!copyin_key(mode, STRUCT_FADDR(unwrap_key, uk_unwrapping_key),
@@ -5471,6 +5710,13 @@ out:
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
+
return (error);
}
@@ -5480,7 +5726,7 @@ object_derive_key(dev_t dev, caddr_t arg, int mode, int *rval)
{
STRUCT_DECL(crypto_derive_key, derive_key);
kproject_t *key_projp, *mech_projp, *attributes_projp;
- kcf_provider_desc_t *real_provider;
+ kcf_provider_desc_t *real_provider = NULL;
kcf_req_params_t params;
crypto_object_attribute_t *k_attrs = NULL;
crypto_mechanism_t mech;
@@ -5497,6 +5743,8 @@ object_derive_key(dev_t dev, caddr_t arg, int mode, int *rval)
uint_t count;
int error = 0;
int rv;
+ boolean_t allocated_by_crypto_module = B_FALSE;
+ boolean_t please_destroy_object = B_FALSE;
STRUCT_INIT(derive_key, mode);
@@ -5518,16 +5766,30 @@ object_derive_key(dev_t dev, caddr_t arg, int mode, int *rval)
goto release_minor;
}
- if (!copyin_mech(mode, STRUCT_FADDR(derive_key, dk_mechanism),
- &mech, &mech_rctl_bytes, &carry, &rv, &error, &mech_projp)) {
+ bcopy(STRUCT_FADDR(derive_key, dk_mechanism), &mech.cm_type,
+ sizeof (crypto_mech_type_t));
+
+ if ((rv = kcf_get_hardware_provider(mech.cm_type, CRYPTO_MECH_INVALID,
+ CRYPTO_OPS_OFFSET(key_ops), CRYPTO_KEY_OFFSET(key_derive),
+ CHECK_RESTRICT_FALSE, sp->sd_provider, &real_provider))
+ != CRYPTO_SUCCESS) {
goto release_minor;
}
- if ((rv = kcf_get_hardware_provider(mech.cm_type,
- CRYPTO_OPS_OFFSET(key_ops),
- CRYPTO_KEY_OFFSET(key_derive), sp->sd_provider,
- &real_provider)) != CRYPTO_SUCCESS) {
- goto release_minor;
+ carry = 0;
+ rv = crypto_provider_copyin_mech_param(real_provider,
+ STRUCT_FADDR(derive_key, dk_mechanism), &mech, mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ allocated_by_crypto_module = B_TRUE;
+ if (!copyin_mech(mode, STRUCT_FADDR(derive_key, dk_mechanism),
+ &mech, &mech_rctl_bytes, &carry, &rv, &error,
+ &mech_projp)) {
+ goto release_minor;
+ }
+ } else {
+ if (rv != CRYPTO_SUCCESS)
+ goto release_minor;
}
if (!copyin_key(mode, STRUCT_FADDR(derive_key, dk_base_key),
@@ -5550,9 +5812,22 @@ object_derive_key(dev_t dev, caddr_t arg, int mode, int *rval)
rv = kcf_submit_request(real_provider, NULL, NULL, &params, B_FALSE);
- if (rv == CRYPTO_SUCCESS)
+ if (rv == CRYPTO_SUCCESS) {
STRUCT_FSET(derive_key, dk_object_handle, handle);
+ rv = crypto_provider_copyout_mech_param(real_provider,
+ &mech, STRUCT_FADDR(derive_key, dk_mechanism),
+ mode, &error);
+
+ if (rv == CRYPTO_NOT_SUPPORTED) {
+ rv = CRYPTO_SUCCESS;
+ goto release_minor;
+ }
+
+ if (rv != CRYPTO_SUCCESS)
+ please_destroy_object = B_TRUE;
+ }
+
release_minor:
mutex_enter(&crypto_rctl_lock);
if (mech_rctl_bytes != 0)
@@ -5575,21 +5850,29 @@ release_minor:
if (copyout(STRUCT_BUF(derive_key), arg,
STRUCT_SIZE(derive_key)) != 0) {
if (rv == CRYPTO_SUCCESS) {
- KCF_WRAP_OBJECT_OPS_PARAMS(&params,
- KCF_OP_OBJECT_DESTROY,
- sp->sd_provider_session->ps_session, handle,
- NULL, 0, NULL, 0, NULL, NULL, 0, NULL);
-
- (void) kcf_submit_request(real_provider, NULL,
- NULL, &params, B_FALSE);
-
+ please_destroy_object = B_TRUE;
error = EFAULT;
}
}
out:
+ if (please_destroy_object) {
+ KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_DESTROY,
+ sp->sd_provider_session->ps_session, handle,
+ NULL, 0, NULL, 0, NULL, NULL, 0, NULL);
+
+ (void) kcf_submit_request(real_provider, NULL,
+ NULL, &params, B_FALSE);
+ }
+
if (sp != NULL)
CRYPTO_SESSION_RELE(sp);
crypto_release_minor(cm);
+
+ if (real_provider != NULL) {
+ crypto_free_mech(real_provider,
+ allocated_by_crypto_module, &mech);
+ KCF_PROV_REFRELE(real_provider);
+ }
return (error);
}
diff --git a/usr/src/uts/common/crypto/io/dprov.c b/usr/src/uts/common/crypto/io/dprov.c
index f93b52b815..5bf0be0e90 100644
--- a/usr/src/uts/common/crypto/io/dprov.c
+++ b/usr/src/uts/common/crypto/io/dprov.c
@@ -92,7 +92,12 @@
#include <sys/sysmacros.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
+
#include <sys/sha2.h>
+#include <aes/aes_cbc_crypt.h>
+#include <des/des_impl.h>
+#include <blowfish/blowfish_impl.h>
+
/*
* Debugging macros.
*/
@@ -217,6 +222,7 @@ typedef enum dprov_mech_type {
BLOWFISH_ECB_MECH_INFO_TYPE, /* SUN_CKM_BLOWFISH_ECB */
AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */
AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */
+ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */
RC4_MECH_INFO_TYPE, /* SUN_CKM_RC4 */
RSA_PKCS_MECH_INFO_TYPE, /* SUN_CKM_RSA_PKCS */
RSA_X_509_MECH_INFO_TYPE, /* SUN_CKM_RSA_X_509 */
@@ -448,6 +454,13 @@ static crypto_mech_info_t dprov_mech_info_tab[] = {
CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC |
CRYPTO_FG_MAC_DECRYPT_ATOMIC,
AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
+ /* AES-CTR */
+ {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
+ CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_ENCRYPT_MAC |
+ CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
+ CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC |
+ CRYPTO_FG_MAC_DECRYPT_ATOMIC,
+ AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* RC4 */
{SUN_CKM_RC4, RC4_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
@@ -868,12 +881,24 @@ static crypto_provider_management_ops_t dprov_management_ops = {
};
static int dprov_free_context(crypto_ctx_t *);
+static int dprov_copyin_mechanism(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_mechanism_t *, int *error, int);
+static int dprov_copyout_mechanism(crypto_provider_handle_t,
+ crypto_mechanism_t *, crypto_mechanism_t *, int *error, int);
+static int dprov_free_mechanism(crypto_provider_handle_t,
+ crypto_mechanism_t *);
static crypto_ctx_ops_t dprov_ctx_ops = {
NULL,
dprov_free_context
};
+static crypto_mech_ops_t dprov_mech_ops = {
+ dprov_copyin_mechanism,
+ dprov_copyout_mechanism,
+ dprov_free_mechanism
+};
+
static crypto_ops_t dprov_crypto_ops = {
&dprov_control_ops,
&dprov_digest_ops,
@@ -888,7 +913,8 @@ static crypto_ops_t dprov_crypto_ops = {
&dprov_object_ops,
&dprov_key_ops,
&dprov_management_ops,
- &dprov_ctx_ops
+ &dprov_ctx_ops,
+ &dprov_mech_ops
};
@@ -963,6 +989,7 @@ typedef struct dprov_object {
#define DPROV_CKA_TOKEN 0x00000001
#define DPROV_CKA_PRIVATE 0x00000002
#define DPROV_CKA_VALUE 0x00000011
+#define DPROV_CKA_CERTIFICATE_TYPE 0x00000080
#define DPROV_CKA_KEY_TYPE 0x00000100
#define DPROV_CKA_ENCRYPT 0x00000104
#define DPROV_CKA_DECRYPT 0x00000105
@@ -980,6 +1007,7 @@ typedef struct dprov_object {
#define DPROV_CKA_VALUE_BITS 0x00000160
#define DPROV_CKA_VALUE_LEN 0x00000161
#define DPROV_CKA_EXTRACTABLE 0x00000162
+#define DPROV_HW_FEATURE_TYPE 0x00000300
/*
* Object classes from PKCS#11
@@ -1036,7 +1064,7 @@ typedef struct dprov_session {
static crypto_provider_info_t dprov_prov_info = {
- CRYPTO_SPI_VERSION_1,
+ CRYPTO_SPI_VERSION_2,
"Dummy Pseudo HW Provider",
CRYPTO_HW_PROVIDER,
NULL, /* pi_provider_dev */
@@ -1602,6 +1630,9 @@ dprov_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
/* initialized done by init_token entry point */
softc->ds_token_initialized = B_TRUE;
+ (void) memset(softc->ds_label, ' ', CRYPTO_EXT_SIZE_LABEL);
+ bcopy("Dummy Pseudo HW Provider", softc->ds_label, 24);
+
bcopy("changeme", softc->ds_user_pin, 8);
softc->ds_user_pin_len = 8;
softc->ds_user_pin_set = B_TRUE;
@@ -2075,6 +2106,7 @@ dprov_valid_cipher_mech(crypto_mech_type_t mech_type)
mech_type == BLOWFISH_ECB_MECH_INFO_TYPE ||
mech_type == AES_CBC_MECH_INFO_TYPE ||
mech_type == AES_ECB_MECH_INFO_TYPE ||
+ mech_type == AES_CTR_MECH_INFO_TYPE ||
mech_type == RC4_MECH_INFO_TYPE ||
mech_type == RSA_PKCS_MECH_INFO_TYPE ||
mech_type == RSA_X_509_MECH_INFO_TYPE ||
@@ -4045,6 +4077,244 @@ dprov_free_context(crypto_ctx_t *ctx)
return (CRYPTO_SUCCESS);
}
+/*
+ * Resource control checks don't need to be done. Why? Because this routine
+ * knows the size of the structure, and it can't be overridden by a user.
+ * This is different from the crypto module, which has no knowledge of
+ * specific mechanisms, and therefore has to trust specified size of the
+ * parameter. This trust, or lack of trust, is why the size of the
+ * parameter has to be charged against the project resource control.
+ */
+static int
+copyin_aes_ctr_mech(crypto_mechanism_t *in_mech, crypto_mechanism_t *out_mech,
+ int *out_error, int mode)
+{
+ STRUCT_DECL(crypto_mechanism, mech);
+ STRUCT_DECL(CK_AES_CTR_PARAMS, params);
+ CK_AES_CTR_PARAMS *aes_ctr_params;
+ caddr_t pp;
+ size_t param_len;
+ int error = 0;
+ int rv = 0;
+
+ STRUCT_INIT(mech, mode);
+ STRUCT_INIT(params, mode);
+ bcopy(in_mech, STRUCT_BUF(mech), STRUCT_SIZE(mech));
+ pp = STRUCT_FGETP(mech, cm_param);
+ param_len = STRUCT_FGET(mech, cm_param_len);
+
+ if (param_len != STRUCT_SIZE(params)) {
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto out;
+ }
+
+ out_mech->cm_type = STRUCT_FGET(mech, cm_type);
+ out_mech->cm_param = NULL;
+ out_mech->cm_param_len = 0;
+ if (pp != NULL) {
+ if (copyin((char *)pp, STRUCT_BUF(params), param_len) != 0) {
+ out_mech->cm_param = NULL;
+ error = EFAULT;
+ goto out;
+ }
+ /* allocate param structure and counter block */
+ aes_ctr_params = kmem_alloc(sizeof (CK_AES_CTR_PARAMS) + 16,
+ KM_NOSLEEP);
+ if (aes_ctr_params == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ goto out;
+ }
+ aes_ctr_params->cb = (uchar_t *)aes_ctr_params +
+ sizeof (CK_AES_CTR_PARAMS);
+ aes_ctr_params->ulCounterBits = STRUCT_FGET(params,
+ ulCounterBits);
+ if (copyin((char *)STRUCT_FGETP(params, cb),
+ &aes_ctr_params->cb[0], 16) != 0) {
+ kmem_free(aes_ctr_params,
+ sizeof (CK_AES_CTR_PARAMS) + 16);
+ out_mech->cm_param = NULL;
+ error = EFAULT;
+ goto out;
+ }
+ out_mech->cm_param = (char *)aes_ctr_params;
+ out_mech->cm_param_len = sizeof (CK_AES_CTR_PARAMS);
+ }
+out:
+ *out_error = error;
+ return (rv);
+}
+
+/* ARGSUSED */
+static int
+copyout_aes_ctr_mech(crypto_mechanism_t *in_mech, crypto_mechanism_t *out_mech,
+ int *out_error, int mode)
+{
+ STRUCT_DECL(crypto_mechanism, mech);
+ STRUCT_DECL(CK_AES_CTR_PARAMS, params);
+ uint8_t cb[16];
+ caddr_t pp;
+ size_t param_len;
+ int error = 0;
+ int rv = 0;
+
+ STRUCT_INIT(mech, mode);
+ STRUCT_INIT(params, mode);
+ bcopy(out_mech, STRUCT_BUF(mech), STRUCT_SIZE(mech));
+ pp = STRUCT_FGETP(mech, cm_param);
+ param_len = STRUCT_FGET(mech, cm_param_len);
+ if (param_len != STRUCT_SIZE(params)) {
+ rv = CRYPTO_ARGUMENTS_BAD;
+ goto out;
+ }
+
+ if (copyin((char *)pp, STRUCT_BUF(params), param_len) != 0) {
+ error = EFAULT;
+ goto out;
+ }
+
+ /* for testing, overwrite the iv with 16 X 'A' */
+ if (pp != NULL) {
+ (void) memset(cb, 'A', 16);
+ if (copyout(cb, STRUCT_FGETP(params, cb), 16) != 0) {
+ error = EFAULT;
+ goto out;
+ }
+ }
+out:
+ *out_error = error;
+ return (rv);
+}
+
+/* ARGSUSED */
+static int
+dprov_copyin_mechanism(crypto_provider_handle_t provider,
+ crypto_mechanism_t *umech, crypto_mechanism_t *kmech,
+ int *out_error, int mode)
+{
+ STRUCT_DECL(crypto_mechanism, mech);
+ size_t param_len, expected_param_len;
+ caddr_t pp;
+ char *param;
+ int rv;
+ int error = 0;
+
+ ASSERT(!servicing_interrupt());
+
+ STRUCT_INIT(mech, mode);
+ bcopy(umech, STRUCT_BUF(mech), STRUCT_SIZE(mech));
+ pp = STRUCT_FGETP(mech, cm_param);
+ param_len = STRUCT_FGET(mech, cm_param_len);
+
+ kmech->cm_param = NULL;
+ kmech->cm_param_len = 0;
+
+ switch (kmech->cm_type) {
+ case AES_ECB_MECH_INFO_TYPE:
+ case BLOWFISH_ECB_MECH_INFO_TYPE:
+ case DES_ECB_MECH_INFO_TYPE:
+ case DES3_ECB_MECH_INFO_TYPE:
+ rv = CRYPTO_SUCCESS;
+ goto out;
+
+ case DES_CBC_MECH_INFO_TYPE:
+ case DES3_CBC_MECH_INFO_TYPE:
+ expected_param_len = DES_BLOCK_LEN;
+ break;
+
+ case BLOWFISH_CBC_MECH_INFO_TYPE:
+ expected_param_len = BLOWFISH_BLOCK_LEN;
+ break;
+
+ case AES_CBC_MECH_INFO_TYPE:
+ expected_param_len = AES_BLOCK_LEN;
+ break;
+
+ case AES_CTR_MECH_INFO_TYPE:
+ case SHA1_KEY_DERIVATION_MECH_INFO_TYPE: /* for testing only */
+ rv = copyin_aes_ctr_mech(umech, kmech, &error, mode);
+ goto out;
+
+ case RC4_MECH_INFO_TYPE:
+ case RSA_PKCS_MECH_INFO_TYPE:
+ case RSA_X_509_MECH_INFO_TYPE:
+ case MD5_RSA_PKCS_MECH_INFO_TYPE:
+ case SHA1_RSA_PKCS_MECH_INFO_TYPE:
+ case SHA256_RSA_PKCS_MECH_INFO_TYPE:
+ case SHA384_RSA_PKCS_MECH_INFO_TYPE:
+ case SHA512_RSA_PKCS_MECH_INFO_TYPE:
+ rv = CRYPTO_SUCCESS;
+ goto out;
+
+ default:
+ rv = CRYPTO_MECHANISM_INVALID;
+ goto out;
+ }
+
+ if (param_len != expected_param_len) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+ if (pp == NULL) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
+ goto out;
+ }
+ if ((param = kmem_alloc(param_len, KM_NOSLEEP)) == NULL) {
+ rv = CRYPTO_HOST_MEMORY;
+ goto out;
+ }
+ if (copyin((char *)pp, param, param_len) != 0) {
+ kmem_free(param, param_len);
+ error = EFAULT;
+ rv = CRYPTO_FAILED;
+ goto out;
+ }
+ kmech->cm_param = (char *)param;
+ kmech->cm_param_len = param_len;
+ rv = CRYPTO_SUCCESS;
+out:
+ *out_error = error;
+ return (rv);
+}
+
+/* ARGSUSED */
+static int
+dprov_copyout_mechanism(crypto_provider_handle_t provider,
+ crypto_mechanism_t *kmech, crypto_mechanism_t *umech,
+ int *out_error, int mode)
+{
+ ASSERT(!servicing_interrupt());
+
+ switch (kmech->cm_type) {
+ case AES_CTR_MECH_INFO_TYPE:
+ case SHA1_KEY_DERIVATION_MECH_INFO_TYPE: /* for testing only */
+ return (copyout_aes_ctr_mech(kmech, umech, out_error, mode));
+ default:
+ return (CRYPTO_MECHANISM_INVALID);
+ }
+}
+
+/*
+ * Free mechanism parameter that was allocated by the provider.
+ */
+/* ARGSUSED */
+static int
+dprov_free_mechanism(crypto_provider_handle_t provider,
+ crypto_mechanism_t *mech)
+{
+ size_t len;
+
+ if (mech->cm_param == NULL || mech->cm_param_len == 0)
+ return (CRYPTO_SUCCESS);
+
+ if (mech->cm_type == AES_CTR_MECH_INFO_TYPE ||
+ mech->cm_type == SHA1_KEY_DERIVATION_MECH_INFO_TYPE) {
+ len = sizeof (CK_AES_CTR_PARAMS) + 16;
+ } else {
+ len = mech->cm_param_len;
+ }
+ kmem_free(mech->cm_param, len);
+ return (CRYPTO_SUCCESS);
+}
/*
* Allocate a dprov taskq request and initialize the common fields.
@@ -4540,9 +4810,9 @@ dprov_digest_task(dprov_req_t *taskq_req)
break;
/* use a session id of zero since we use a software provider */
- error = crypto_digest_prov(&mech,
+ error = crypto_digest_prov(pd, 0, &mech,
taskq_req->dr_digest_req.dr_data,
- taskq_req->dr_digest_req.dr_digest, NULL, pd, 0);
+ taskq_req->dr_digest_req.dr_digest, NULL);
/* release provider reference */
KCF_PROV_REFRELE(pd);
@@ -4652,15 +4922,13 @@ dprov_mac_task(dprov_req_t *taskq_req)
/* use a session id of zero since we use a software provider */
if (taskq_req->dr_type == DPROV_REQ_MAC_ATOMIC)
- error = crypto_mac_prov(&mech,
+ error = crypto_mac_prov(pd, 0, &mech,
taskq_req->dr_mac_req.dr_data,
- &key, NULL, taskq_req->dr_mac_req.dr_mac, NULL,
- pd, 0);
+ &key, NULL, taskq_req->dr_mac_req.dr_mac, NULL);
else
- error = crypto_mac_verify_prov(&mech,
+ error = crypto_mac_verify_prov(pd, 0, &mech,
taskq_req->dr_mac_req.dr_data,
- &key, NULL, taskq_req->dr_mac_req.dr_mac, NULL,
- pd, 0);
+ &key, NULL, taskq_req->dr_mac_req.dr_mac, NULL);
/* release provider reference */
KCF_PROV_REFRELE(pd);
@@ -5222,16 +5490,15 @@ dprov_cipher_task(dprov_req_t *taskq_req)
/* use a session id of zero since we use a software provider */
if (taskq_req->dr_type == DPROV_REQ_ENCRYPT_ATOMIC)
- error = crypto_encrypt_prov(&mech,
+ error = crypto_encrypt_prov(pd, 0, &mech,
taskq_req->dr_cipher_req.dr_plaintext,
keyp, NULL,
- taskq_req->dr_cipher_req.dr_ciphertext, NULL, pd,
- 0);
+ taskq_req->dr_cipher_req.dr_ciphertext, NULL);
else
- error = crypto_decrypt_prov(&mech,
+ error = crypto_decrypt_prov(pd, 0, &mech,
taskq_req->dr_cipher_req.dr_ciphertext,
keyp, NULL,
- taskq_req->dr_cipher_req.dr_plaintext, NULL, pd, 0);
+ taskq_req->dr_cipher_req.dr_plaintext, NULL);
/* release provider reference */
KCF_PROV_REFRELE(pd);
@@ -5258,7 +5525,7 @@ dprov_cipher_mac_key_pd(dprov_state_t *softc, crypto_session_id_t sid,
/* get the cipher key value */
mutex_enter(&softc->ds_lock);
- error = dprov_key_value_secret(softc, sid, taskq_req->dr_type,
+ error = dprov_key_value_secret(softc, sid, DPROV_REQ_ENCRYPT_ATOMIC,
taskq_req->dr_cipher_mac_req.mr_cipher_key, cipher_key);
if (error != CRYPTO_SUCCESS) {
mutex_exit(&softc->ds_lock);
@@ -5266,7 +5533,7 @@ dprov_cipher_mac_key_pd(dprov_state_t *softc, crypto_session_id_t sid,
}
/* get the mac key value */
- error = dprov_key_value_secret(softc, sid, taskq_req->dr_type,
+ error = dprov_key_value_secret(softc, sid, DPROV_REQ_MAC_ATOMIC,
taskq_req->dr_cipher_mac_req.mr_mac_key, mac_key);
mutex_exit(&softc->ds_lock);
if (error != CRYPTO_SUCCESS)
@@ -5525,17 +5792,17 @@ dprov_cipher_mac_task(dprov_req_t *taskq_req)
break;
/* do the atomic encrypt */
- if ((error = crypto_encrypt_prov(
+ if ((error = crypto_encrypt_prov(cipher_pd, 0,
&cipher_mech, plaintext_tmp, &cipher_key, NULL,
- ciphertext_tmp, NULL, cipher_pd, 0)) != CRYPTO_SUCCESS)
+ ciphertext_tmp, NULL)) != CRYPTO_SUCCESS)
break;
/* do the atomic mac */
mac_data = cipher_data;
mac_data.cd_length = dual_data->dd_len2;
mac_data.cd_offset = dual_data->dd_offset2;
- error = crypto_mac_prov(&mac_mech, &mac_data, &mac_key, NULL,
- taskq_req->dr_cipher_mac_req.mr_mac, NULL, mac_pd, 0);
+ error = crypto_mac_prov(mac_pd, 0, &mac_mech, &mac_data,
+ &mac_key, NULL, taskq_req->dr_cipher_mac_req.mr_mac, NULL);
dual_data->dd_len1 = cipher_data.cd_length;
@@ -5649,14 +5916,14 @@ dprov_cipher_mac_task(dprov_req_t *taskq_req)
/* do the atomic mac */
if (taskq_req->dr_type == DPROV_REQ_MAC_DECRYPT_ATOMIC)
- error = crypto_mac_prov(&mac_mech, &cipher_data,
- &mac_key, NULL, taskq_req->dr_cipher_mac_req.mr_mac,
- NULL, mac_pd, 0);
+ error = crypto_mac_prov(mac_pd, 0, &mac_mech,
+ &cipher_data, &mac_key, NULL,
+ taskq_req->dr_cipher_mac_req.mr_mac, NULL);
else
/* DPROV_REQ_MAC_VERIFY_DECRYPT_ATOMIC */
- error = crypto_mac_verify_prov(&mac_mech, &cipher_data,
- &mac_key, NULL, taskq_req->dr_cipher_mac_req.mr_mac,
- NULL, mac_pd, 0);
+ error = crypto_mac_verify_prov(mac_pd, 0, &mac_mech,
+ &cipher_data, &mac_key, NULL,
+ taskq_req->dr_cipher_mac_req.mr_mac, NULL);
if (error != CRYPTO_SUCCESS)
break;
@@ -5664,9 +5931,9 @@ dprov_cipher_mac_task(dprov_req_t *taskq_req)
/* do the atomic decrypt */
cipher_data.cd_length = dual_data->dd_len2;
cipher_data.cd_offset = dual_data->dd_offset2;
- error = crypto_decrypt_prov(&cipher_mech, &cipher_data,
- &cipher_key, NULL, taskq_req->dr_cipher_mac_req.mr_data,
- NULL, cipher_pd, 0);
+ error = crypto_decrypt_prov(cipher_pd, 0, &cipher_mech,
+ &cipher_data, &cipher_key, NULL,
+ taskq_req->dr_cipher_mac_req.mr_data, NULL);
break;
}
@@ -5914,6 +6181,31 @@ dprov_session_task(dprov_req_t *taskq_req)
DPROV_DEBUG(D_SESSION, ("(%d) dprov_session_task: end\n", instance));
}
+/* return true if attribute is defined to be a PKCS#11 long */
+static boolean_t
+fixed_size_attribute(crypto_attr_type_t type)
+{
+ return (type == DPROV_CKA_CLASS ||
+ type == DPROV_CKA_CERTIFICATE_TYPE ||
+ type == DPROV_CKA_KEY_TYPE ||
+ type == DPROV_HW_FEATURE_TYPE);
+}
+
+/*
+ * Attributes defined to be a PKCS#11 long causes problems for dprov
+ * because 32-bit applications set the size to 4 and 64-bit applications
+ * set the size to 8. dprov always stores these fixed-size attributes
+ * as uint32_t.
+ */
+static ssize_t
+attribute_size(crypto_attr_type_t type, ssize_t len)
+{
+ if (fixed_size_attribute(type))
+ return (sizeof (uint32_t));
+
+ return (len);
+}
+
/*
* taskq dispatcher function for object management operations.
*/
@@ -6011,6 +6303,9 @@ dprov_object_task(dprov_req_t *taskq_req)
break;
case DPROV_REQ_OBJECT_GET_ATTRIBUTE_VALUE: {
+ crypto_attr_type_t type;
+ size_t olen, tlen;
+ offset_t offset;
int tmpl_idx;
int object_idx;
ulong_t class = DPROV_CKO_DATA;
@@ -6037,17 +6332,16 @@ dprov_object_task(dprov_req_t *taskq_req)
* Attribute can't be revealed if the CKA_EXTRACTABLE
* attribute is set to false.
*/
+ type = template[tmpl_idx].oa_type;
if (!extractable && class == DPROV_CKO_SECRET_KEY) {
- if (template[tmpl_idx].oa_type ==
- DPROV_CKA_VALUE) {
+ if (type == DPROV_CKA_VALUE) {
template[tmpl_idx].oa_value_len = -1;
error = CRYPTO_ATTRIBUTE_SENSITIVE;
continue;
}
}
if (!extractable && class == DPROV_CKO_PRIVATE_KEY) {
- if (template[tmpl_idx].oa_type ==
- DPROV_CKA_PRIVATE_EXPONENT) {
+ if (type == DPROV_CKA_PRIVATE_EXPONENT) {
template[tmpl_idx].oa_value_len = -1;
error = CRYPTO_ATTRIBUTE_SENSITIVE;
continue;
@@ -6055,7 +6349,7 @@ dprov_object_task(dprov_req_t *taskq_req)
}
object_idx = dprov_find_attr(object->do_attr,
- DPROV_MAX_ATTR, template[tmpl_idx].oa_type);
+ DPROV_MAX_ATTR, type);
if (object_idx == -1) {
/* attribute not found in object */
template[tmpl_idx].oa_value_len = -1;
@@ -6063,28 +6357,42 @@ dprov_object_task(dprov_req_t *taskq_req)
continue;
}
+ tlen = template[tmpl_idx].oa_value_len;
+ olen = object->do_attr[object_idx].oa_value_len;
+ /* return attribute length */
if (template[tmpl_idx].oa_value == NULL) {
- /* return attribute length */
- template[tmpl_idx].oa_value_len =
- object->do_attr[object_idx].oa_value_len;
- continue;
- }
- if (template[tmpl_idx].oa_value_len <
- object->do_attr[object_idx].oa_value_len) {
/*
- * Template buffer for attribute value too
- * small.
+ * The size of the attribute is set by the
+ * library according to the data model of the
+ * application, so don't overwrite it with
+ * dprov's size.
*/
+ if (!fixed_size_attribute(type))
+ template[tmpl_idx].oa_value_len = olen;
+ continue;
+ }
+
+ if (tlen < olen) {
template[tmpl_idx].oa_value_len = -1;
error = CRYPTO_BUFFER_TOO_SMALL;
continue;
}
- /* copy attribute value, update length */
+
+ /* copy attribute value */
+ bzero(template[tmpl_idx].oa_value, tlen);
+
+ offset = 0;
+#ifdef _BIG_ENDIAN
+ if (fixed_size_attribute(type)) {
+ offset = tlen - olen;
+ }
+#endif
bcopy(object->do_attr[object_idx].oa_value,
- template[tmpl_idx].oa_value,
- object->do_attr[object_idx].oa_value_len);
- template[tmpl_idx].oa_value_len =
- object->do_attr[object_idx].oa_value_len;
+ &template[tmpl_idx].oa_value[offset], olen);
+
+ /* don't update length for fixed-size attributes */
+ if (!fixed_size_attribute(type))
+ template[tmpl_idx].oa_value_len = olen;
}
break;
@@ -6776,8 +7084,8 @@ destroy_public_object:
ciphertext.cd_raw.iov_len = ciphertext.cd_length;
ciphertext.cd_miscdata = NULL;
- error = crypto_encrypt_prov(&mech, &plaintext, keyp,
- NULL, &ciphertext, NULL, pd, 0);
+ error = crypto_encrypt_prov(pd, 0, &mech, &plaintext, keyp,
+ NULL, &ciphertext, NULL);
KCF_PROV_REFRELE(pd);
if (error == CRYPTO_SUCCESS ||
@@ -6859,8 +7167,8 @@ destroy_public_object:
plaintext.cd_raw.iov_len = wrapped_key_len;
plaintext.cd_miscdata = NULL;
- error = crypto_decrypt_prov(&mech, &ciphertext, keyp,
- NULL, &plaintext, NULL, pd, 0);
+ error = crypto_decrypt_prov(pd, 0, &mech, &ciphertext, keyp,
+ NULL, &plaintext, NULL);
KCF_PROV_REFRELE(pd);
@@ -7047,8 +7355,8 @@ free_unwrapped_key:
digest.cd_raw.iov_base = digest_buf;
digest.cd_raw.iov_len = hash_size;
- error = crypto_digest_prov(&digest_mech, &data,
- &digest, NULL, pd, 0);
+ error = crypto_digest_prov(pd, 0, &digest_mech, &data,
+ &digest, NULL);
KCF_PROV_REFRELE(pd);
@@ -7349,6 +7657,7 @@ dprov_key_can_use(dprov_object_t *object, dprov_req_type_t req_type)
case DPROV_REQ_SIGN_ATOMIC:
case DPROV_REQ_MAC_INIT:
case DPROV_REQ_MAC_ATOMIC:
+ case DPROV_REQ_MAC_VERIFY_ATOMIC:
rv = dprov_get_object_attr_boolean(object,
DPROV_CKA_SIGN, &ret);
break;
@@ -7693,7 +8002,6 @@ dprov_template_can_create(dprov_session_t *session,
return (CRYPTO_SUCCESS);
}
-
/*
* Create an object from the specified template. Checks whether the
* object can be created according to its attributes and the state
@@ -7715,6 +8023,9 @@ dprov_create_object_from_template(dprov_state_t *softc,
int error;
uint_t attr;
uint_t oattr;
+ crypto_attr_type_t type;
+ size_t old_len, new_len;
+ offset_t offset;
if (nattr > DPROV_MAX_ATTR)
return (CRYPTO_HOST_MEMORY);
@@ -7765,19 +8076,29 @@ dprov_create_object_from_template(dprov_state_t *softc,
for (attr = 0, oattr = 0; attr < nattr; attr++) {
if (template[attr].oa_value == NULL)
continue;
- object->do_attr[oattr].oa_type = template[attr].oa_type;
- if (template[attr].oa_type == DPROV_CKA_EXTRACTABLE) {
+ type = template[attr].oa_type;
+ old_len = template[attr].oa_value_len;
+ new_len = attribute_size(type, old_len);
+
+ if (type == DPROV_CKA_EXTRACTABLE) {
extractable_attribute_present = B_TRUE;
- } else if (template[attr].oa_type == DPROV_CKA_PRIVATE) {
+ } else if (type == DPROV_CKA_PRIVATE) {
private_attribute_present = B_TRUE;
}
- object->do_attr[oattr].oa_value_len =
- template[attr].oa_value_len;
+ object->do_attr[oattr].oa_type = type;
+ object->do_attr[oattr].oa_value_len = new_len;
- object->do_attr[oattr].oa_value = kmem_alloc(
- template[attr].oa_value_len, KM_SLEEP);
- bcopy(template[attr].oa_value, object->do_attr[oattr].oa_value,
- template[attr].oa_value_len);
+ object->do_attr[oattr].oa_value = kmem_zalloc(new_len,
+ KM_SLEEP);
+
+ offset = 0;
+#ifdef _BIG_ENDIAN
+ if (fixed_size_attribute(type)) {
+ offset = old_len - new_len;
+ }
+#endif
+ bcopy(&template[attr].oa_value[offset],
+ object->do_attr[oattr].oa_value, new_len);
oattr++;
}
@@ -7803,11 +8124,16 @@ dprov_create_object_from_template(dprov_state_t *softc,
/*
* Checks whether or not the object matches the specified attributes.
+ *
+ * PKCS#11 attributes which are longs are stored in uint32_t containers
+ * so they can be matched by both 32 and 64-bit applications.
*/
static boolean_t
dprov_attributes_match(dprov_object_t *object,
crypto_object_attribute_t *template, uint_t nattr)
{
+ crypto_attr_type_t type;
+ size_t tlen, olen, diff;
int ta_idx; /* template attribute index */
int oa_idx; /* object attribute index */
@@ -7817,21 +8143,27 @@ dprov_attributes_match(dprov_object_t *object,
continue;
/* find attribute in object */
- oa_idx = dprov_find_attr(object->do_attr, DPROV_MAX_ATTR,
- template[ta_idx].oa_type);
+ type = template[ta_idx].oa_type;
+ oa_idx = dprov_find_attr(object->do_attr, DPROV_MAX_ATTR, type);
if (oa_idx == -1)
/* attribute not found in object */
return (B_FALSE);
- if (template[ta_idx].oa_value_len !=
- object->do_attr[oa_idx].oa_value_len)
- /* value length mismatch */
+ tlen = template[ta_idx].oa_value_len;
+ olen = object->do_attr[oa_idx].oa_value_len;
+ if (tlen < olen)
return (B_FALSE);
- if (bcmp(template[ta_idx].oa_value,
- object->do_attr[oa_idx].oa_value,
- template[ta_idx].oa_value_len) != 0)
+ diff = 0;
+#ifdef _BIG_ENDIAN
+ /* application may think attribute is 8 bytes */
+ if (fixed_size_attribute(type))
+ diff = tlen - olen;
+#endif
+
+ if (bcmp(&template[ta_idx].oa_value[diff],
+ object->do_attr[oa_idx].oa_value, olen) != 0)
/* value mismatch */
return (B_FALSE);
}
@@ -7925,7 +8257,9 @@ dprov_object_set_attr(dprov_session_t *session, crypto_object_id_t object_id,
crypto_object_attribute_t *template, uint_t nattr,
boolean_t check_attributes)
{
+ crypto_attr_type_t type;
dprov_object_t *object;
+ size_t old_len, new_len;
uint_t i, j;
int error;
@@ -7945,8 +8279,8 @@ dprov_object_set_attr(dprov_session_t *session, crypto_object_id_t object_id,
continue;
/* find attribute in object */
- j = dprov_find_attr(object->do_attr, DPROV_MAX_ATTR,
- template[i].oa_type);
+ type = template[i].oa_type;
+ j = dprov_find_attr(object->do_attr, DPROV_MAX_ATTR, type);
if (j != -1) {
/* attribute already exists, free old value */
@@ -7962,16 +8296,17 @@ dprov_object_set_attr(dprov_session_t *session, crypto_object_id_t object_id,
return (CRYPTO_HOST_MEMORY);
}
+ old_len = template[i].oa_value_len;
+ new_len = attribute_size(type, old_len);
+
/* set object attribute value */
- object->do_attr[j].oa_value =
- kmem_alloc(template[i].oa_value_len, KM_SLEEP);
- bcopy(template[i].oa_value, object->do_attr[j].oa_value,
- template[i].oa_value_len);
- object->do_attr[j].oa_value_len =
- template[i].oa_value_len;
+ object->do_attr[j].oa_value = kmem_alloc(new_len, KM_SLEEP);
+ bcopy(&template[i].oa_value[old_len - new_len],
+ object->do_attr[j].oa_value, new_len);
+ object->do_attr[j].oa_value_len = new_len;
/* and the type */
- object->do_attr[j].oa_type = template[i].oa_type;
+ object->do_attr[j].oa_type = type;
}
return (CRYPTO_SUCCESS);
diff --git a/usr/src/uts/common/crypto/spi/kcf_spi.c b/usr/src/uts/common/crypto/spi/kcf_spi.c
index e2d67ca000..1add93cb13 100644
--- a/usr/src/uts/common/crypto/spi/kcf_spi.c
+++ b/usr/src/uts/common/crypto/spi/kcf_spi.c
@@ -56,7 +56,8 @@ static void free_provider_list(kcf_provider_list_t *);
static void remove_provider(kcf_provider_desc_t *);
static void process_logical_providers(crypto_provider_info_t *,
kcf_provider_desc_t *);
-static void copy_ops_vector(crypto_ops_t *, crypto_ops_t *);
+static void copy_ops_vector_v1(crypto_ops_t *, crypto_ops_t *);
+static void copy_ops_vector_v2(crypto_ops_t *, crypto_ops_t *);
static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
static int kcf_prov_kstat_update(kstat_t *, int);
@@ -92,7 +93,7 @@ crypto_register_provider(crypto_provider_info_t *info,
kcf_provider_desc_t *prov_desc = NULL;
int ret = CRYPTO_ARGUMENTS_BAD;
- if (info->pi_interface_version != CRYPTO_SPI_VERSION_1)
+ if (info->pi_interface_version > CRYPTO_SPI_VERSION_2)
return (CRYPTO_VERSION_MISMATCH);
/*
@@ -134,7 +135,13 @@ crypto_register_provider(crypto_provider_info_t *info,
if (info->pi_ops_vector == NULL) {
return (CRYPTO_ARGUMENTS_BAD);
}
- copy_ops_vector(info->pi_ops_vector, prov_desc->pd_ops_vector);
+ copy_ops_vector_v1(info->pi_ops_vector,
+ prov_desc->pd_ops_vector);
+ if (info->pi_interface_version == CRYPTO_SPI_VERSION_2) {
+ copy_ops_vector_v2(info->pi_ops_vector,
+ prov_desc->pd_ops_vector);
+ prov_desc->pd_flags = info->pi_flags;
+ }
}
/*
@@ -606,22 +613,28 @@ crypto_kmflag(crypto_req_handle_t handle)
* persistent.
*/
static void
-copy_ops_vector(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
+copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
+{
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
+}
+
+static void
+copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
{
- KCF_SPI_COPY_OPS(src_ops, dst_ops, control_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, digest_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, cipher_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, mac_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, sign_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, verify_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, dual_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, dual_cipher_mac_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, random_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, session_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, object_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, key_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, provider_ops);
- KCF_SPI_COPY_OPS(src_ops, dst_ops, ctx_ops);
+ KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
}
/*
@@ -662,7 +675,7 @@ init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
* mechanism, SUN_RANDOM, in this case.
*/
if (info != NULL) {
- if (info->pi_ops_vector->random_ops != NULL) {
+ if (info->pi_ops_vector->co_random_ops != NULL) {
crypto_mech_info_t *rand_mi;
/*