summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKrishna Yenduri <Bhargava.Yenduri@Sun.COM>2009-04-29 15:27:19 -0700
committerKrishna Yenduri <Bhargava.Yenduri@Sun.COM>2009-04-29 15:27:19 -0700
commitef56a3c55098f8a52f056c7aa6ab084bfebef4e7 (patch)
tree32095d35b593e8e6f21e355605d3b9c59c300ba5
parent327644c797ae66411ee2599148c5fbef71bc7414 (diff)
downloadillumos-joyent-ef56a3c55098f8a52f056c7aa6ab084bfebef4e7.tar.gz
4781345 me_mutex lock in kcf_mech_entry_t can be broken up
6771819 Use of atomic increment in KCF causes scaling problems on multi-socket T2 systems 6705174 C_EncryptInit scaling issues on T2plus 6813873 assertion failed: (prov_desc)->pd_refcnt != 0, file: ../../common/crypto/core/kcf_prov_tabs.c, line
-rw-r--r--usr/src/cmd/mdb/common/modules/crypto/crypto_cmds.h12
-rw-r--r--usr/src/cmd/mdb/common/modules/crypto/impl.c124
-rw-r--r--usr/src/cmd/mdb/common/modules/crypto/spi.c11
-rw-r--r--usr/src/uts/common/crypto/api/kcf_miscapi.c39
-rw-r--r--usr/src/uts/common/crypto/core/kcf.c5
-rw-r--r--usr/src/uts/common/crypto/core/kcf_callprov.c126
-rw-r--r--usr/src/uts/common/crypto/core/kcf_cryptoadm.c7
-rw-r--r--usr/src/uts/common/crypto/core/kcf_mech_tabs.c70
-rw-r--r--usr/src/uts/common/crypto/core/kcf_prov_tabs.c86
-rw-r--r--usr/src/uts/common/crypto/core/kcf_sched.c46
-rw-r--r--usr/src/uts/common/crypto/io/crypto.c118
-rw-r--r--usr/src/uts/common/crypto/spi/kcf_spi.c150
-rw-r--r--usr/src/uts/common/sys/crypto/impl.h157
-rw-r--r--usr/src/uts/common/sys/crypto/sched_impl.h10
14 files changed, 542 insertions, 419 deletions
diff --git a/usr/src/cmd/mdb/common/modules/crypto/crypto_cmds.h b/usr/src/cmd/mdb/common/modules/crypto/crypto_cmds.h
index c03d61abf4..0d8d294ff5 100644
--- a/usr/src/cmd/mdb/common/modules/crypto/crypto_cmds.h
+++ b/usr/src/cmd/mdb/common/modules/crypto/crypto_cmds.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2003 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _CRYPTO_CMDS_H
#define _CRYPTO_CMDS_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -47,9 +44,6 @@ extern int crypto_dual_data(uintptr_t addr, uint_t flags, int argc, \
extern int crypto_key(uintptr_t addr, uint_t flags, int argc, \
const mdb_arg_t *argv);
-extern int kcf_sched_info(uintptr_t addr, uint_t flags, int argc, \
- const mdb_arg_t *argv);
-
extern int kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, \
const mdb_arg_t *argv);
diff --git a/usr/src/cmd/mdb/common/modules/crypto/impl.c b/usr/src/cmd/mdb/common/modules/crypto/impl.c
index 649d7fb91d..52d20d2509 100644
--- a/usr/src/cmd/mdb/common/modules/crypto/impl.c
+++ b/usr/src/cmd/mdb/common/modules/crypto/impl.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -36,32 +36,6 @@
#include <sys/crypto/impl.h>
#include "crypto_cmds.h"
-int
-kcf_sched_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
-{
- kcf_sched_info_t sched;
- kcf_sched_info_t *sinfo = &sched;
-
- if (!(flags & DCMD_ADDRSPEC)) {
- if ((argc == 1) && (argv->a_type == MDB_TYPE_IMMEDIATE))
- sinfo = (kcf_sched_info_t *)(uintptr_t)argv->a_un.a_val;
- else
- return (DCMD_USAGE);
- } else if (addr == NULL) /* not allowed with DCMD_ADDRSPEC */
- return (DCMD_USAGE);
- else {
- if (mdb_vread(sinfo, sizeof (kcf_sched_info_t), addr) == -1) {
- mdb_warn("cannot read %p", addr);
- return (DCMD_ERR);
- }
- }
- mdb_printf("ks_ndispatches:\t%llu\n", sinfo->ks_ndispatches);
- mdb_printf("ks_nfails:\t%llu\n", sinfo->ks_nfails);
- mdb_printf("ks_nbusy_rval:\t%llu\n", sinfo->ks_nbusy_rval);
- mdb_printf("ks_ntaskq:\t%p\n", sinfo->ks_taskq);
- return (DCMD_OK);
-}
-
static const char *prov_states[] = {
"none",
"KCF_PROV_ALLOCATED",
@@ -71,34 +45,10 @@ static const char *prov_states[] = {
"KCF_PROV_BUSY",
"KCF_PROV_FAILED",
"KCF_PROV_DISABLED",
- "KCF_PROV_REMOVED",
- "KCF_PROV_FREED"
+ "KCF_PROV_UNREGISTERING",
+ "KCF_PROV_UNREGISTERED"
};
-static void
-pr_kstat_named(kstat_named_t *ks)
-{
- mdb_inc_indent(4);
-
- mdb_printf("name = %s\n", ks->name);
- mdb_printf("value = ");
-
- /*
- * The only data type used for the provider kstats is uint64.
- */
- switch (ks->data_type) {
- case KSTAT_DATA_UINT64:
-#if defined(_LP64) || defined(_LONGLONG_TYPE)
- mdb_printf("%llu\n", ks->value.ui64);
-#endif
- break;
- default:
- mdb_warn("Incorrect data type for kstat.\n");
- }
-
- mdb_dec_indent(4);
-}
-
/*ARGSUSED*/
int
kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
@@ -108,7 +58,9 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
char string[MAXNAMELEN + 1];
int i, j;
crypto_mech_info_t *mech_pointer;
- mdb_arg_t arg;
+ kcf_prov_cpu_t stats;
+ uint64_t dtotal, ftotal, btotal;
+ int holdcnt, jobcnt;
if ((flags & DCMD_ADDRSPEC) != DCMD_ADDRSPEC)
return (DCMD_USAGE);
@@ -139,8 +91,6 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
mdb_printf("bad pd_prov_type:\t%d\n", desc.pd_prov_type);
}
- mdb_printf("pd_prov_handle:\t\t%p\n", desc.pd_prov_handle);
- mdb_printf("pd_kcf_prov_handle:\t%u\n", desc.pd_kcf_prov_handle);
mdb_printf("pd_prov_id:\t\t%u\n", desc.pd_prov_id);
if (desc.pd_description == NULL)
mdb_printf("pd_description:\t\tNULL\n");
@@ -150,6 +100,38 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
} else
mdb_printf("pd_description:\t\t%s\n", string);
+ mdb_printf("pd_sid:\t\t\t%u\n", desc.pd_sid);
+ mdb_printf("pd_taskq:\t\t%p\n", desc.pd_taskq);
+ mdb_printf("pd_nbins:\t\t%u\n", desc.pd_nbins);
+ mdb_printf("pd_percpu_bins:\t\t%p\n", desc.pd_percpu_bins);
+
+ dtotal = ftotal = btotal = 0;
+ holdcnt = jobcnt = 0;
+ for (i = 0; i < desc.pd_nbins; i++) {
+ if (mdb_vread(&stats, sizeof (kcf_prov_cpu_t),
+ (uintptr_t)(desc.pd_percpu_bins + i)) == -1) {
+ mdb_warn("cannot read addr %p",
+ desc.pd_percpu_bins + i);
+ return (DCMD_ERR);
+ }
+
+ holdcnt += stats.kp_holdcnt;
+ jobcnt += stats.kp_jobcnt;
+ dtotal += stats.kp_ndispatches;
+ ftotal += stats.kp_nfails;
+ btotal += stats.kp_nbusy_rval;
+ }
+ mdb_inc_indent(4);
+ mdb_printf("total kp_holdcnt:\t\t%d\n", holdcnt);
+ mdb_printf("total kp_jobcnt:\t\t%u\n", jobcnt);
+ mdb_printf("total kp_ndispatches:\t%llu\n", dtotal);
+ mdb_printf("total kp_nfails:\t\t%llu\n", ftotal);
+ mdb_printf("total kp_nbusy_rval:\t%llu\n", btotal);
+ mdb_dec_indent(4);
+
+ mdb_printf("pd_prov_handle:\t\t%p\n", desc.pd_prov_handle);
+ mdb_printf("pd_kcf_prov_handle:\t%u\n", desc.pd_kcf_prov_handle);
+
mdb_printf("pd_ops_vector:\t\t%p\n", desc.pd_ops_vector);
mdb_printf("pd_mech_list_count:\t%u\n", desc.pd_mech_list_count);
/* mechanisms */
@@ -172,28 +154,7 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
mdb_printf("\n");
}
mdb_dec_indent(8);
- mdb_printf("pd_ks_data.ps_ops_total:\n", desc.pd_ks_data.ps_ops_total);
- pr_kstat_named(&desc.pd_ks_data.ps_ops_total);
- mdb_printf("pd_ks_data.ps_ops_passed:\n",
- desc.pd_ks_data.ps_ops_passed);
- pr_kstat_named(&desc.pd_ks_data.ps_ops_passed);
- mdb_printf("pd_ks_data.ps_ops_failed:\n",
- desc.pd_ks_data.ps_ops_failed);
- pr_kstat_named(&desc.pd_ks_data.ps_ops_failed);
- mdb_printf("pd_ks_data.ps_ops_busy_rval:\n",
- desc.pd_ks_data.ps_ops_busy_rval);
- pr_kstat_named(&desc.pd_ks_data.ps_ops_busy_rval);
-
- mdb_printf("pd_kstat:\t\t%p\n", desc.pd_kstat);
- mdb_printf("kcf_sched_info:\n");
- /* print pd_sched_info via existing function */
- mdb_inc_indent(8);
- arg.a_type = MDB_TYPE_IMMEDIATE;
- arg.a_un.a_val = (uintmax_t)(uintptr_t)&desc.pd_sched_info;
- mdb_call_dcmd("kcf_sched_info", (uintptr_t)NULL, 0, 1, &arg);
- mdb_dec_indent(8);
- mdb_printf("pd_refcnt:\t\t%u\n", desc.pd_refcnt);
if (desc.pd_name == NULL)
mdb_printf("pd_name:\t\t NULL\n");
else if (mdb_readstr(string, MAXNAMELEN + 1, (uintptr_t)desc.pd_name)
@@ -205,16 +166,15 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
mdb_printf("pd_instance:\t\t%u\n", desc.pd_instance);
mdb_printf("pd_module_id:\t\t%d\n", desc.pd_module_id);
mdb_printf("pd_mctlp:\t\t%p\n", desc.pd_mctlp);
- mdb_printf("pd_sid:\t\t\t%u\n", desc.pd_sid);
mdb_printf("pd_lock:\t\t%p\n", desc.pd_lock);
if (desc.pd_state < KCF_PROV_ALLOCATED ||
- desc.pd_state > KCF_PROV_FREED)
+ desc.pd_state > KCF_PROV_UNREGISTERED)
mdb_printf("pd_state is invalid:\t%d\n", desc.pd_state);
else
mdb_printf("pd_state:\t%s\n", prov_states[desc.pd_state]);
+ mdb_printf("pd_provider_list:\t%p\n", desc.pd_provider_list);
mdb_printf("pd_resume_cv:\t\t%hd\n", desc.pd_resume_cv._opaque);
- mdb_printf("pd_remove_cv:\t\t%hd\n", desc.pd_remove_cv._opaque);
mdb_printf("pd_flags:\t\t%s %s %s %s %s\n",
(desc.pd_flags & CRYPTO_HIDE_PROVIDER) ?
"CRYPTO_HIDE_PROVIDER" : " ",
@@ -228,7 +188,9 @@ kcf_provider_desc(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
"KCF_PROV_RESTRICTED" : " ");
if (desc.pd_flags & CRYPTO_HASH_NO_UPDATE)
mdb_printf("pd_hash_limit:\t\t%u\n", desc.pd_hash_limit);
- mdb_printf("pd_provider_list:\t%p\n", desc.pd_provider_list);
+
+ mdb_printf("pd_kstat:\t\t%p\n", desc.pd_kstat);
+
return (DCMD_OK);
}
diff --git a/usr/src/cmd/mdb/common/modules/crypto/spi.c b/usr/src/cmd/mdb/common/modules/crypto/spi.c
index 7ab7df1130..13d35e64e3 100644
--- a/usr/src/cmd/mdb/common/modules/crypto/spi.c
+++ b/usr/src/cmd/mdb/common/modules/crypto/spi.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* mdb dcmds for selected structures from
* usr/src/uts/common/sys/crypto/spi.h
@@ -96,7 +94,7 @@ crypto_provider_ext_info(uintptr_t addr, uint_t flags, int argc,
return (DCMD_USAGE);
if (mdb_vread(&ext_prov, sizeof (crypto_provider_ext_info_t), addr)
- == -1) {
+ == -1) {
mdb_warn("cannot read addr");
return (DCMD_ERR);
}
@@ -197,7 +195,7 @@ crypto_mech_info(uintptr_t addr, uint_t flags, int argc,
return (DCMD_USAGE);
if (mdb_vread(&minfo, sizeof (crypto_mech_info_t), addr)
- == -1) {
+ == -1) {
mdb_warn("cannot read addr %p", addr);
return (DCMD_ERR);
}
@@ -246,9 +244,6 @@ static const mdb_dcmd_t dcmds[] = {
/* impl.c */
- { "kcf_sched_info", ":",
- "scheduling data for a crypto request", kcf_sched_info, NULL },
-
{ "kcf_provider_desc", ":",
"crypto provider description struct", kcf_provider_desc, NULL },
diff --git a/usr/src/uts/common/crypto/api/kcf_miscapi.c b/usr/src/uts/common/crypto/api/kcf_miscapi.c
index 600a144779..29fdab7a06 100644
--- a/usr/src/uts/common/crypto/api/kcf_miscapi.c
+++ b/usr/src/uts/common/crypto/api/kcf_miscapi.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -122,6 +122,7 @@ crypto_get_mech_list(uint_t *countp, int kmflag)
char *mech_name, *hint_mech, *end;
kcf_soft_conf_entry_t *p;
size_t n;
+ kcf_lock_withpad_t *mp;
/*
* Count the maximum possible mechanisms that can come from the
@@ -141,12 +142,13 @@ crypto_get_mech_list(uint_t *countp, int kmflag)
me_tab = kcf_mech_tabs_tab[cl].met_tab;
for (i = 0; i < me_tab_size; i++) {
me = &me_tab[i];
- mutex_enter(&(me->me_mutex));
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
if ((me->me_name[0] != 0) && (me->me_num_hwprov >= 1)) {
ASSERT(me->me_hw_prov_chain != NULL);
count++;
}
- mutex_exit(&(me->me_mutex));
+ mutex_exit(&mp->kl_lock);
}
}
@@ -176,11 +178,12 @@ again:
me_tab = kcf_mech_tabs_tab[cl].met_tab;
for (i = 0; i < me_tab_size; i++) {
me = &me_tab[i];
- mutex_enter(&(me->me_mutex));
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
if ((me->me_name[0] != 0) && (me->me_num_hwprov >= 1)) {
ASSERT(me->me_hw_prov_chain != NULL);
if ((mech_name + CRYPTO_MAX_MECH_NAME) > end) {
- mutex_exit(&(me->me_mutex));
+ mutex_exit(&mp->kl_lock);
kmem_free(tmp_mech_name_tab, n);
n = n << 1;
goto again;
@@ -191,7 +194,7 @@ again:
mech_name += CRYPTO_MAX_MECH_NAME;
count++;
}
- mutex_exit(&(me->me_mutex));
+ mutex_exit(&mp->kl_lock);
}
}
@@ -505,6 +508,7 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
kcf_mech_entry_t *me;
kcf_provider_desc_t *pd;
kcf_prov_mech_desc_t *prov_chain;
+ kcf_lock_withpad_t *mp;
/* when mech is a valid mechanism, me will be its mech_entry */
if ((mech == NULL) || (key == NULL) ||
@@ -516,7 +520,8 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
return (CRYPTO_MECHANISM_INVALID);
}
- mutex_enter(&me->me_mutex);
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
/* First let the software provider check this key */
if (me->me_sw_prov != NULL) {
@@ -527,7 +532,7 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
(KCF_PROV_KEY_OPS(pd)->key_check != NULL)) {
crypto_mechanism_t lmech;
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
error = KCF_PROV_KEY_CHECK(pd, &lmech, key);
@@ -537,7 +542,7 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
return (error);
}
- mutex_enter(&me->me_mutex);
+ mutex_enter(&mp->kl_lock);
}
KCF_PROV_REFRELE(pd);
}
@@ -551,7 +556,7 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
(KCF_PROV_KEY_OPS(pd)->key_check != NULL)) {
crypto_mechanism_t lmech;
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
lmech = *mech;
KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd,
&lmech);
@@ -561,13 +566,13 @@ crypto_key_check(crypto_mechanism_t *mech, crypto_key_t *key)
KCF_PROV_REFRELE(pd);
return (error);
}
- mutex_enter(&me->me_mutex);
+ mutex_enter(&mp->kl_lock);
}
KCF_PROV_REFRELE(pd);
prov_chain = prov_chain->pm_next;
}
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
/* All are happy with this key */
return (CRYPTO_SUCCESS);
@@ -645,6 +650,7 @@ crypto_get_all_mech_info(crypto_mech_type_t mech_type,
kcf_prov_mech_desc_t *hwp;
crypto_mechanism_info_t *infos;
size_t infos_size;
+ kcf_lock_withpad_t *mp;
/* get to the mech entry corresponding to the specified mech type */
if ((rv = kcf_get_mech_entry(mech_type, &me)) != CRYPTO_SUCCESS) {
@@ -652,10 +658,11 @@ crypto_get_all_mech_info(crypto_mech_type_t mech_type,
}
/* compute the number of key size ranges to return */
- mutex_enter(&me->me_mutex);
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
again:
ninfos = PROV_COUNT(me);
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
if (ninfos == 0) {
infos = NULL;
@@ -669,7 +676,7 @@ again:
goto bail;
}
- mutex_enter(&me->me_mutex);
+ mutex_enter(&mp->kl_lock);
if (ninfos != PROV_COUNT(me)) {
kmem_free(infos, infos_size);
goto again;
@@ -686,7 +693,7 @@ again:
for (hwp = me->me_hw_prov_chain; hwp != NULL; hwp = hwp->pm_next)
init_mechanism_info(&infos[cur_info++], hwp);
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
ASSERT(cur_info == ninfos);
bail:
*mech_infos = infos;
diff --git a/usr/src/uts/common/crypto/core/kcf.c b/usr/src/uts/common/crypto/core/kcf.c
index 6cd8f4f8e5..a7e98ba0d3 100644
--- a/usr/src/uts/common/crypto/core/kcf.c
+++ b/usr/src/uts/common/crypto/core/kcf.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Core KCF (Kernel Cryptographic Framework). This file implements
* the loadable module entry points and module verification routines.
@@ -368,7 +366,6 @@ kcf_verify_signature(void *arg)
out:
if (modhold_done)
mod_release_mod(mctlp);
- KCF_PROV_IREFRELE(pd);
KCF_PROV_REFRELE(pd);
}
diff --git a/usr/src/uts/common/crypto/core/kcf_callprov.c b/usr/src/uts/common/crypto/core/kcf_callprov.c
index f26a19fb76..5f25070d03 100644
--- a/usr/src/uts/common/crypto/core/kcf_callprov.c
+++ b/usr/src/uts/common/crypto/core/kcf_callprov.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* This file contains routines which call into a provider's
* entry points and do other related work.
@@ -52,6 +50,7 @@
static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
kcf_req_params_t *);
+
void
kcf_free_triedlist(kcf_prov_tried_t *list)
{
@@ -64,16 +63,24 @@ kcf_free_triedlist(kcf_prov_tried_t *list)
}
}
+/*
+ * The typical caller of this routine does a kcf_get_mech_provider()
+ * which holds the provider and then calls this routine. So, for the
+ * common case (no KCF_HOLD_PROV flag) we skip doing a KCF_PROV_REFHOLD.
+ */
kcf_prov_tried_t *
kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd,
- int kmflag)
+ int flags)
{
kcf_prov_tried_t *l;
- l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag);
+ l = kmem_alloc(sizeof (kcf_prov_tried_t),
+ flags & (KM_SLEEP | KM_NOSLEEP));
if (l == NULL)
return (NULL);
+ if (flags & KCF_HOLD_PROV)
+ KCF_PROV_REFHOLD(pd);
l->pt_pd = pd;
l->pt_next = *list;
*list = l;
@@ -140,6 +147,7 @@ kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
kcf_mech_entry_t *me;
kcf_mech_entry_tab_t *me_tab;
int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
+ kcf_lock_withpad_t *mp;
/* get the mech entry for the specified mechanism */
class = KCF_MECH2CLASS(mech_type_1);
@@ -154,7 +162,8 @@ kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
}
me = &((me_tab->met_tab)[index]);
- mutex_enter(&me->me_mutex);
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
/*
* We assume the provider descriptor will not go away because
@@ -211,13 +220,16 @@ kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
continue;
}
- len = KCF_PROV_LOAD(provider);
- if (len < gqlen) {
- gqlen = len;
+ /* Do load calculation only if needed */
+ if ((p = p->pl_next) == NULL && gpd == NULL) {
gpd = provider;
+ } else {
+ len = KCF_PROV_LOAD(provider);
+ if (len < gqlen) {
+ gqlen = len;
+ gpd = provider;
+ }
}
-
- p = p->pl_next;
}
if (gpd != NULL) {
@@ -250,7 +262,7 @@ kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
KCF_PROV_REFHOLD(real_pd);
}
out:
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
*new = real_pd;
return (rv);
}
@@ -319,13 +331,16 @@ kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
continue;
}
- len = KCF_PROV_LOAD(provider);
- if (len < gqlen) {
- gqlen = len;
+ /* Do load calculation only if needed */
+ if ((p = p->pl_next) == NULL && gpd == NULL) {
gpd = provider;
+ } else {
+ len = KCF_PROV_LOAD(provider);
+ if (len < gqlen) {
+ gqlen = len;
+ gpd = provider;
+ }
}
-
- p = p->pl_next;
}
mutex_exit(&old->pd_lock);
@@ -425,6 +440,7 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
int index;
kcf_mech_entry_t *me;
kcf_mech_entry_tab_t *me_tab;
+ kcf_lock_withpad_t *mp;
class = KCF_MECH2CLASS(mech_type);
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
@@ -443,12 +459,13 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
if (mepp != NULL)
*mepp = me;
- mutex_enter(&me->me_mutex);
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
prov_chain = me->me_hw_prov_chain;
/*
- * We check for the threshhold for using a hardware provider for
+ * We check for the threshold for using a hardware provider for
* this amount of data. If there is no software provider available
* for the mechanism, then the threshold is ignored.
*/
@@ -477,12 +494,17 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
continue;
}
- if ((len = KCF_PROV_LOAD(pd)) < gqlen) {
- gqlen = len;
+ /* Do load calculation only if needed */
+ if ((prov_chain = prov_chain->pm_next) == NULL &&
+ gpd == NULL) {
gpd = pd;
+ } else {
+ len = KCF_PROV_LOAD(pd);
+ if (len < gqlen) {
+ gqlen = len;
+ gpd = pd;
+ }
}
-
- prov_chain = prov_chain->pm_next;
}
pd = gpd;
@@ -507,10 +529,11 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
*/
if (triedl == NULL)
*error = CRYPTO_MECH_NOT_SUPPORTED;
- } else
+ } else {
KCF_PROV_REFHOLD(pd);
+ }
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
return (pd);
}
@@ -536,6 +559,7 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
crypto_mech_info_list_t *mil;
crypto_mech_type_t m2id = mech2->cm_type;
kcf_mech_entry_t *me;
+ kcf_lock_withpad_t *mp;
/* when mech is a valid mechanism, me will be its mech_entry */
if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
@@ -547,7 +571,9 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
if (mepp != NULL)
*mepp = me;
- mutex_enter(&me->me_mutex);
+
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
prov_chain = me->me_hw_prov_chain;
/*
@@ -571,7 +597,6 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
*/
while (prov_chain != NULL) {
pd = prov_chain->pm_prov_desc;
- len = KCF_PROV_LOAD(pd);
if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
!KCF_IS_PROV_USABLE(pd) ||
@@ -582,12 +607,21 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
continue;
}
- /* Save the best provider capable of m1 */
- if (len < gqlen) {
- *prov_mt1 =
- prov_chain->pm_mech_info.cm_mech_number;
- gqlen = len;
+#define PMD_MECH_NUM(pmdp) (pmdp)->pm_mech_info.cm_mech_number
+
+ /* Do load calculation only if needed */
+ if (prov_chain->pm_next == NULL && pdm1 == NULL) {
+ *prov_mt1 = PMD_MECH_NUM(prov_chain);
pdm1 = pd;
+ } else {
+ len = KCF_PROV_LOAD(pd);
+
+ /* Save the best provider capable of m1 */
+ if (len < gqlen) {
+ *prov_mt1 = PMD_MECH_NUM(prov_chain);
+ gqlen = len;
+ pdm1 = pd;
+ }
}
/* See if pd can do me2 too */
@@ -597,15 +631,23 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
fg2) == 0)
continue;
- if ((mil->ml_kcf_mechid == m2id) &&
- (len < dgqlen)) {
- /* Bingo! */
- dgqlen = len;
- pdm1m2 = pd;
- *prov_mt2 =
- mil->ml_mech_info.cm_mech_number;
- *prov_mt1 = prov_chain->
- pm_mech_info.cm_mech_number;
+#define MIL_MECH_NUM(mil) (mil)->ml_mech_info.cm_mech_number
+
+ if (mil->ml_kcf_mechid == m2id) { /* Bingo! */
+
+ /* Do load calculation only if needed */
+ if (prov_chain->pm_next == NULL &&
+ pdm1m2 == NULL) {
+ pdm1m2 = pd;
+ *prov_mt2 = MIL_MECH_NUM(mil);
+ } else {
+ if (len < dgqlen) {
+ dgqlen = len;
+ pdm1m2 = pd;
+ *prov_mt2 =
+ MIL_MECH_NUM(mil);
+ }
+ }
break;
}
}
@@ -648,7 +690,7 @@ kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
else
KCF_PROV_REFHOLD(pd);
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
return (pd);
}
diff --git a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
index 9abec9902a..64fdeaa176 100644
--- a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
+++ b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c
@@ -713,12 +713,11 @@ crypto_load_soft_disabled(char *name, uint_t new_count,
if (provider->pd_kstat != NULL)
KCF_PROV_REFRELE(provider);
- mutex_enter(&provider->pd_lock);
/* Wait till the existing requests complete. */
- while (provider->pd_state != KCF_PROV_FREED) {
- cv_wait(&provider->pd_remove_cv, &provider->pd_lock);
+ while (kcf_get_refcnt(provider, B_TRUE) > 0) {
+ /* wait 1 second and try again. */
+ delay(1 * drv_usectohz(1000000));
}
- mutex_exit(&provider->pd_lock);
}
if (new_count == 0) {
diff --git a/usr/src/uts/common/crypto/core/kcf_mech_tabs.c b/usr/src/uts/common/crypto/core/kcf_mech_tabs.c
index 8836aa917f..099f73a291 100644
--- a/usr/src/uts/common/crypto/core/kcf_mech_tabs.c
+++ b/usr/src/uts/common/crypto/core/kcf_mech_tabs.c
@@ -104,7 +104,24 @@ kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = {
};
/*
- * Per-algorithm internal threasholds for the minimum input size of before
+ * Protects fields in kcf_mech_entry. This is an array
+ * of locks indexed by the cpuid. A reader needs to hold
+ * a single lock while a writer needs to hold all locks.
+ * krwlock_t is not an option here because the hold time
+ * is very small for these locks.
+ */
+kcf_lock_withpad_t *me_mutexes;
+
+#define ME_MUTEXES_ENTER_ALL() \
+ for (int i = 0; i < max_ncpus; i++) \
+ mutex_enter(&me_mutexes[i].kl_lock);
+
+#define ME_MUTEXES_EXIT_ALL() \
+ for (int i = 0; i < max_ncpus; i++) \
+ mutex_exit(&me_mutexes[i].kl_lock);
+
+/*
+ * Per-algorithm internal thresholds for the minimum input size of before
* offloading to hardware provider.
* Dispatching a crypto operation to a hardware provider entails paying the
* cost of an additional context switch. Measurments with Sun Accelerator 4000
@@ -239,8 +256,6 @@ kcf_init_mech_tabs()
max = kcf_mech_tabs_tab[class].met_size;
me_tab = kcf_mech_tabs_tab[class].met_tab;
for (i = 0; i < max; i++) {
- mutex_init(&(me_tab[i].me_mutex), NULL,
- MUTEX_DEFAULT, NULL);
if (me_tab[i].me_name[0] != 0) {
me_tab[i].me_mechid = KCF_MECHID(class, i);
(void) mod_hash_insert(kcf_mech_hash,
@@ -249,6 +264,12 @@ kcf_init_mech_tabs()
}
}
}
+
+ me_mutexes = kmem_zalloc(max_ncpus * sizeof (kcf_lock_withpad_t),
+ KM_SLEEP);
+ for (i = 0; i < max_ncpus; i++) {
+ mutex_init(&me_mutexes[i].kl_lock, NULL, MUTEX_DEFAULT, NULL);
+ }
}
/*
@@ -305,7 +326,7 @@ kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
size = kcf_mech_tabs_tab[class].met_size;
while (i < size) {
- mutex_enter(&(me_tab[i].me_mutex));
+ ME_MUTEXES_ENTER_ALL();
if (me_tab[i].me_name[0] == 0) {
/* Found an empty spot */
(void) strncpy(me_tab[i].me_name, mechname,
@@ -318,14 +339,14 @@ kcf_create_mech_entry(kcf_ops_class_t class, char *mechname)
*/
me_tab[i].me_threshold = 0;
- mutex_exit(&(me_tab[i].me_mutex));
+ ME_MUTEXES_EXIT_ALL();
/* Add the new mechanism to the hash table */
(void) mod_hash_insert(kcf_mech_hash,
(mod_hash_key_t)me_tab[i].me_name,
(mod_hash_val_t)&(me_tab[i].me_mechid));
break;
}
- mutex_exit(&(me_tab[i].me_mutex));
+ ME_MUTEXES_EXIT_ALL();
i++;
}
@@ -441,7 +462,6 @@ kcf_add_mech_provider(short mech_indx,
[KCF_MECH2INDEX(kcf_mech_type)] = mech_indx;
KCF_PROV_REFHOLD(prov_desc);
- KCF_PROV_IREFHOLD(prov_desc);
dual_fg_mask = mech_info->cm_func_group_mask & CRYPTO_FG_DUAL_MASK;
@@ -481,9 +501,9 @@ kcf_add_mech_provider(short mech_indx,
* Ignore hard-coded entries in the mech table
* if the provider hasn't registered.
*/
- mutex_enter(&me->me_mutex);
+ ME_MUTEXES_ENTER_ALL();
if (me->me_hw_prov_chain == NULL && me->me_sw_prov == NULL) {
- mutex_exit(&me->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
kmem_free(mil, sizeof (*mil));
kmem_free(mil2, sizeof (*mil2));
continue;
@@ -508,7 +528,7 @@ kcf_add_mech_provider(short mech_indx,
if (prov_mech2 == NULL) {
kmem_free(mil2, sizeof (*mil2));
- mutex_exit(&me->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
continue;
}
@@ -532,7 +552,7 @@ kcf_add_mech_provider(short mech_indx,
if (prov_mech2 == NULL)
kmem_free(mil2, sizeof (*mil2));
- mutex_exit(&me->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
}
add_entry:
@@ -543,16 +563,16 @@ add_entry:
switch (prov_desc->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
- mutex_enter(&mech_entry->me_mutex);
+ ME_MUTEXES_ENTER_ALL();
prov_mech->pm_me = mech_entry;
prov_mech->pm_next = mech_entry->me_hw_prov_chain;
mech_entry->me_hw_prov_chain = prov_mech;
mech_entry->me_num_hwprov++;
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
break;
case CRYPTO_SW_PROVIDER:
- mutex_enter(&mech_entry->me_mutex);
+ ME_MUTEXES_ENTER_ALL();
if (mech_entry->me_sw_prov != NULL) {
/*
* There is already a SW provider for this mechanism.
@@ -579,7 +599,7 @@ add_entry:
/* We'll wrap around after 4 billion registrations! */
mech_entry->me_gen_swprov = kcf_gen_swprov++;
}
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
break;
}
@@ -633,7 +653,7 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
return;
}
- mutex_enter(&mech_entry->me_mutex);
+ ME_MUTEXES_ENTER_ALL();
switch (prov_desc->pd_prov_type) {
@@ -649,7 +669,7 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
if (prov_mech == NULL) {
/* entry not found, simply return */
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
return;
}
@@ -663,7 +683,7 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
if (mech_entry->me_sw_prov == NULL ||
mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
/* not the software provider for this mechanism */
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
return;
}
prov_mech = mech_entry->me_sw_prov;
@@ -671,7 +691,7 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
break;
}
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
/* Free the dual ops cross-reference lists */
mil = prov_mech->pm_mi_list;
@@ -683,7 +703,7 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
continue;
}
- mutex_enter(&mech_entry->me_mutex);
+ ME_MUTEXES_ENTER_ALL();
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_chain = mech_entry->me_hw_prov_chain;
else
@@ -707,14 +727,13 @@ kcf_remove_mech_provider(char *mech_name, kcf_provider_desc_t *prov_desc)
prov_chain = prov_chain->pm_next;
}
- mutex_exit(&mech_entry->me_mutex);
+ ME_MUTEXES_EXIT_ALL();
kmem_free(mil, sizeof (crypto_mech_info_list_t));
mil = next;
}
/* free entry */
KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
- KCF_PROV_IREFRELE(prov_mech->pm_prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
}
@@ -811,6 +830,7 @@ crypto_mech2id_common(char *mechname, boolean_t load_module)
kcf_ops_class_t class;
boolean_t second_time = B_FALSE;
boolean_t try_to_load_software_provider = B_FALSE;
+ kcf_lock_withpad_t *mp;
try_again:
mt = kcf_mech_hash_find(mechname);
@@ -821,11 +841,13 @@ try_again:
class = KCF_MECH2CLASS(mt);
i = KCF_MECH2INDEX(mt);
me = &(kcf_mech_tabs_tab[class].met_tab[i]);
- mutex_enter(&(me->me_mutex));
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
+
if (load_module && !auto_unload_flag_set(me->me_sw_prov)) {
try_to_load_software_provider = B_TRUE;
}
- mutex_exit(&(me->me_mutex));
+ mutex_exit(&mp->kl_lock);
}
if (mt == CRYPTO_MECH_INVALID || try_to_load_software_provider) {
diff --git a/usr/src/uts/common/crypto/core/kcf_prov_tabs.c b/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
index baaaffb24a..d6a7be9b8d 100644
--- a/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
+++ b/usr/src/uts/common/crypto/core/kcf_prov_tabs.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -65,7 +65,7 @@
* prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
*/
static kcf_provider_desc_t **prov_tab = NULL;
-static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
+kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
static uint_t prov_tab_num = 0; /* number of providers in table */
static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
@@ -119,7 +119,6 @@ kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
/* initialize entry */
prov_tab[i] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
- KCF_PROV_IREFHOLD(prov_desc);
prov_tab_num++;
mutex_exit(&prov_tab_mutex);
@@ -178,7 +177,6 @@ kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
*/
KCF_PROV_REFRELE(prov_desc);
- KCF_PROV_IREFRELE(prov_desc);
#if DEBUG
if (kcf_frmwrk_debug >= 1)
@@ -363,38 +361,12 @@ kcf_alloc_provider_desc(crypto_provider_info_t *info)
mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&desc->pd_resume_cv, NULL, CV_DEFAULT, NULL);
- cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
- return (desc);
-}
-
-/*
- * Called by KCF_PROV_REFRELE when a provider's reference count drops
- * to zero. We free the descriptor when the last reference is released.
- * However, for software providers, we do not free it when there is an
- * unregister thread waiting. We signal that thread in this case and
- * that thread is responsible for freeing the descriptor.
- */
-void
-kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
-{
- mutex_enter(&desc->pd_lock);
- switch (desc->pd_prov_type) {
- case CRYPTO_SW_PROVIDER:
- if (desc->pd_state == KCF_PROV_REMOVED ||
- desc->pd_state == KCF_PROV_DISABLED) {
- desc->pd_state = KCF_PROV_FREED;
- cv_broadcast(&desc->pd_remove_cv);
- mutex_exit(&desc->pd_lock);
- break;
- }
- /* FALLTHRU */
+ desc->pd_nbins = max_ncpus;
+ desc->pd_percpu_bins =
+ kmem_zalloc(desc->pd_nbins * sizeof (kcf_prov_cpu_t), KM_SLEEP);
- case CRYPTO_HW_PROVIDER:
- case CRYPTO_LOGICAL_PROVIDER:
- mutex_exit(&desc->pd_lock);
- kcf_free_provider_desc(desc);
- }
+ return (desc);
}
/*
@@ -499,8 +471,13 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
kmem_free(desc->pd_name, strlen(desc->pd_name) + 1);
}
- if (desc->pd_sched_info.ks_taskq != NULL)
- taskq_destroy(desc->pd_sched_info.ks_taskq);
+ if (desc->pd_taskq != NULL)
+ taskq_destroy(desc->pd_taskq);
+
+ if (desc->pd_percpu_bins != NULL) {
+ kmem_free(desc->pd_percpu_bins,
+ desc->pd_nbins * sizeof (kcf_prov_cpu_t));
+ }
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
@@ -798,6 +775,7 @@ kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
kcf_mech_entry_t **mep, boolean_t log_warn)
{
kcf_mech_entry_t *me;
+ kcf_lock_withpad_t *mp;
/* get the mechanism entry for this mechanism */
if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
@@ -807,7 +785,8 @@ kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
* Get the software provider for this mechanism.
* Lock the mech_entry until we grab the 'pd'.
*/
- mutex_enter(&me->me_mutex);
+ mp = &me_mutexes[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
@@ -815,12 +794,12 @@ kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
if (log_warn)
cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
me->me_name);
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
return (CRYPTO_MECH_NOT_SUPPORTED);
}
KCF_PROV_REFHOLD(*pd);
- mutex_exit(&me->me_mutex);
+ mutex_exit(&mp->kl_lock);
if (mep != NULL)
*mep = me;
@@ -897,7 +876,6 @@ verify_unverified_providers()
continue;
KCF_PROV_REFHOLD(pd);
- KCF_PROV_IREFHOLD(pd);
/*
* We need to drop this lock, since it could be
@@ -915,3 +893,31 @@ verify_unverified_providers()
mutex_exit(&prov_tab_mutex);
}
+
+/* protected by prov_tab_mutex */
+boolean_t kcf_need_provtab_walk = B_FALSE;
+
+void
+kcf_free_unregistered_provs()
+{
+ int i;
+ kcf_provider_desc_t *pd;
+ boolean_t walk_again = B_FALSE;
+
+ mutex_enter(&prov_tab_mutex);
+ for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
+ if ((pd = prov_tab[i]) == NULL ||
+ pd->pd_state != KCF_PROV_UNREGISTERED)
+ continue;
+
+ if (kcf_get_refcnt(pd, B_TRUE) == 0) {
+ mutex_exit(&prov_tab_mutex);
+ kcf_free_provider_desc(pd);
+ mutex_enter(&prov_tab_mutex);
+ } else
+ walk_again = B_TRUE;
+ }
+
+ kcf_need_provtab_walk = walk_again;
+ mutex_exit(&prov_tab_mutex);
+}
diff --git a/usr/src/uts/common/crypto/core/kcf_sched.c b/usr/src/uts/common/crypto/core/kcf_sched.c
index 9930191a78..b11cffc215 100644
--- a/usr/src/uts/common/crypto/core/kcf_sched.c
+++ b/usr/src/uts/common/crypto/core/kcf_sched.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -269,6 +269,7 @@ process_req_hwp(void *ireq)
kcf_provider_desc_t *pd;
kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
+ kcf_prov_cpu_t *mp;
pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
sreq->sn_provider : areq->an_provider;
@@ -291,7 +292,8 @@ process_req_hwp(void *ireq)
* processed. This is how we know when it's safe to unregister
* a provider. This step must precede the pd_state check below.
*/
- KCF_PROV_IREFHOLD(pd);
+ mp = &(pd->pd_percpu_bins[CPU_SEQID]);
+ KCF_PROV_JOB_HOLD(mp);
/*
* Fail the request if the provider has failed. We return a
@@ -307,6 +309,7 @@ process_req_hwp(void *ireq)
if (ctype == CRYPTO_SYNCH) {
mutex_enter(&sreq->sn_lock);
sreq->sn_state = REQ_INPROGRESS;
+ sreq->sn_mp = mp;
mutex_exit(&sreq->sn_lock);
ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
@@ -340,6 +343,7 @@ process_req_hwp(void *ireq)
}
}
areq->an_state = REQ_INPROGRESS;
+ areq->an_mp = mp;
mutex_exit(&areq->an_lock);
error = common_submit_request(areq->an_provider, ctx,
@@ -355,7 +359,7 @@ bail:
*/
return;
} else { /* CRYPTO_SUCCESS or other failure */
- KCF_PROV_IREFRELE(pd);
+ KCF_PROV_JOB_RELE(mp);
if (ctype == CRYPTO_SYNCH)
kcf_sop_done(sreq, error);
else
@@ -524,12 +528,11 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
old_pd = areq->an_provider;
/*
- * Add old_pd to the list of providers already tried. We release
- * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
- * kcf_free_triedlist().
+ * Add old_pd to the list of providers already tried.
+ * We release the new hold on old_pd in kcf_free_triedlist().
*/
if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
- KM_NOSLEEP) == NULL)
+ KM_NOSLEEP | KCF_HOLD_PROV) == NULL)
return (error);
if (mech1 && !mech2) {
@@ -579,7 +582,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
break;
case CRYPTO_HW_PROVIDER: {
- taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
+ taskq_t *taskq = new_pd->pd_taskq;
if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
(taskqid_t)0) {
@@ -592,6 +595,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
}
}
+ KCF_PROV_REFRELE(new_pd);
return (error);
}
@@ -614,11 +618,12 @@ int
kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
{
- int error = CRYPTO_SUCCESS;
+ int error;
kcf_areq_node_t *areq;
kcf_sreq_node_t *sreq;
kcf_context_t *kcf_ctx;
- taskq_t *taskq = pd->pd_sched_info.ks_taskq;
+ taskq_t *taskq;
+ kcf_prov_cpu_t *mp;
kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
@@ -631,6 +636,8 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
break;
case CRYPTO_HW_PROVIDER:
+ taskq = pd->pd_taskq;
+
/*
* Special case for CRYPTO_SYNCHRONOUS providers that
* never return a CRYPTO_QUEUED error. We skip any
@@ -638,15 +645,17 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
*/
if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
EMPTY_TASKQ(taskq)) {
- KCF_PROV_IREFHOLD(pd);
+ mp = &(pd->pd_percpu_bins[CPU_SEQID]);
+ KCF_PROV_JOB_HOLD(mp);
+
if (pd->pd_state == KCF_PROV_READY) {
error = common_submit_request(pd, ctx,
params, KCF_RHNDL(KM_SLEEP));
- KCF_PROV_IREFRELE(pd);
+ KCF_PROV_JOB_RELE(mp);
ASSERT(error != CRYPTO_QUEUED);
break;
}
- KCF_PROV_IREFRELE(pd);
+ KCF_PROV_JOB_RELE(mp);
}
sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
@@ -770,6 +779,7 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
goto done;
}
+ taskq = pd->pd_taskq;
ASSERT(taskq != NULL);
/*
* We can not tell from taskq_dispatch() return
@@ -830,6 +840,7 @@ kcf_free_context(kcf_context_t *kcf_ctx)
kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
+ kcf_prov_cpu_t *mp;
/* Release the second context, if any */
@@ -844,10 +855,11 @@ kcf_free_context(kcf_context_t *kcf_ctx)
* doesn't unregister from the framework while
* we're calling the entry point.
*/
- KCF_PROV_IREFHOLD(pd);
+ mp = &(pd->pd_percpu_bins[CPU_SEQID]);
+ KCF_PROV_JOB_HOLD(mp);
mutex_exit(&pd->pd_lock);
(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
- KCF_PROV_IREFRELE(pd);
+ KCF_PROV_JOB_RELE(mp);
} else {
mutex_exit(&pd->pd_lock);
}
@@ -1492,6 +1504,8 @@ kcf_svc_wait(int *nthrs)
case -1:
/* Timed out. Recalculate the min/max threads */
compute_min_max_threads();
+ if (kcf_need_provtab_walk)
+ kcf_free_unregistered_provs();
break;
default:
@@ -2002,7 +2016,7 @@ out:
case CRYPTO_HW_PROVIDER: {
kcf_provider_desc_t *old_pd;
- taskq_t *taskq = pd->pd_sched_info.ks_taskq;
+ taskq_t *taskq = pd->pd_taskq;
/*
* Set the params for the second step in the
diff --git a/usr/src/uts/common/crypto/io/crypto.c b/usr/src/uts/common/crypto/io/crypto.c
index 06ab994737..974407154b 100644
--- a/usr/src/uts/common/crypto/io/crypto.c
+++ b/usr/src/uts/common/crypto/io/crypto.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -60,13 +60,17 @@ extern int kcf_sha1_threshold;
/*
* Locking notes:
*
- * crypto_lock protects the global array of minor structures. It
- * also protects the cm_refcnt member of each of these structures.
- * The cm_cv is used to signal decrements in the cm_refcnt,
- * and is used with the global crypto_lock.
+ * crypto_locks protects the global array of minor structures.
+ * crypto_locks is an array of locks indexed by the cpuid. A reader needs
+ * to hold a single lock while a writer needs to hold all locks.
+ * krwlock_t is not an option here because the hold time
+ * is very small for these locks.
*
- * Other fields in the minor structure are protected by the
- * cm_lock member of the minor structure.
+ * The fields in the minor structure are protected by the cm_lock member
+ * of the minor structure. The cm_cv is used to signal decrements
+ * in the cm_refcnt, and is used with the cm_lock.
+ *
+ * The locking order is crypto_locks followed by cm_lock.
*/
/*
@@ -165,7 +169,15 @@ static minor_t crypto_minors_table_count = 0;
*/
static vmem_t *crypto_arena = NULL; /* Arena for device minors */
static minor_t crypto_minors_count = 0;
-static kmutex_t crypto_lock;
+static kcf_lock_withpad_t *crypto_locks;
+
+#define CRYPTO_ENTER_ALL_LOCKS() \
+ for (i = 0; i < max_ncpus; i++) \
+ mutex_enter(&crypto_locks[i].kl_lock);
+
+#define CRYPTO_EXIT_ALL_LOCKS() \
+ for (i = 0; i < max_ncpus; i++) \
+ mutex_exit(&crypto_locks[i].kl_lock);
#define RETURN_LIST B_TRUE
#define DONT_RETURN_LIST B_FALSE
@@ -337,6 +349,8 @@ crypto_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
static int
crypto_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
+ int i;
+
if (cmd != DDI_ATTACH) {
return (DDI_FAILURE);
}
@@ -362,7 +376,11 @@ crypto_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
return (DDI_FAILURE);
}
- mutex_init(&crypto_lock, NULL, MUTEX_DRIVER, NULL);
+ crypto_locks = kmem_zalloc(max_ncpus * sizeof (kcf_lock_withpad_t),
+ KM_SLEEP);
+ for (i = 0; i < max_ncpus; i++)
+ mutex_init(&crypto_locks[i].kl_lock, NULL, MUTEX_DRIVER, NULL);
+
crypto_dip = dip;
/* allocate integer space for minor numbers */
@@ -377,19 +395,22 @@ static int
crypto_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
minor_t i;
+ kcf_lock_withpad_t *mp;
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
+ mp = &crypto_locks[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
+
/* check if device is open */
- mutex_enter(&crypto_lock);
for (i = 0; i < crypto_minors_table_count; i++) {
if (crypto_minors[i] != NULL) {
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
return (DDI_FAILURE);
}
}
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
crypto_dip = NULL;
ddi_remove_minor_node(dip, NULL);
@@ -401,7 +422,9 @@ crypto_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
sizeof (crypto_minor_t *) * crypto_minors_table_count);
crypto_minors = NULL;
crypto_minors_table_count = 0;
- mutex_destroy(&crypto_lock);
+ for (i = 0; i < max_ncpus; i++)
+ mutex_destroy(&crypto_locks[i].kl_lock);
+
vmem_destroy(crypto_arena);
crypto_arena = NULL;
@@ -414,6 +437,8 @@ crypto_open(dev_t *devp, int flag, int otyp, cred_t *credp)
{
crypto_minor_t *cm = NULL;
minor_t mn;
+ kcf_lock_withpad_t *mp;
+ int i;
if (otyp != OTYP_CHR)
return (ENXIO);
@@ -425,8 +450,10 @@ crypto_open(dev_t *devp, int flag, int otyp, cred_t *credp)
if (flag & FEXCL)
return (ENOTSUP);
- mutex_enter(&crypto_lock);
again:
+ mp = &crypto_locks[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
+
/* grow the minors table if needed */
if (crypto_minors_count >= crypto_minors_table_count) {
crypto_minor_t **newtable;
@@ -437,7 +464,7 @@ again:
big_count = crypto_minors_count + chunk;
if (big_count > MAXMIN) {
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
return (ENOMEM);
}
@@ -445,15 +472,16 @@ again:
new_size = sizeof (crypto_minor_t *) *
(crypto_minors_table_count + chunk);
- mutex_exit(&crypto_lock);
- newtable = kmem_zalloc(new_size, KM_SLEEP);
- mutex_enter(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
+ newtable = kmem_zalloc(new_size, KM_SLEEP);
+ CRYPTO_ENTER_ALL_LOCKS();
/*
* Check if table grew while we were sleeping.
* The minors table never shrinks.
*/
if (crypto_minors_table_count > saved_count) {
+ CRYPTO_EXIT_ALL_LOCKS();
kmem_free(newtable, new_size);
goto again;
}
@@ -474,8 +502,10 @@ again:
crypto_minors = newtable;
crypto_minors_table_count += chunk;
+ CRYPTO_EXIT_ALL_LOCKS();
+ } else {
+ mutex_exit(&mp->kl_lock);
}
- mutex_exit(&crypto_lock);
/* allocate a new minor number starting with 1 */
mn = (minor_t)(uintptr_t)vmem_alloc(crypto_arena, 1, VM_SLEEP);
@@ -484,11 +514,11 @@ again:
mutex_init(&cm->cm_lock, NULL, MUTEX_DRIVER, NULL);
cv_init(&cm->cm_cv, NULL, CV_DRIVER, NULL);
- mutex_enter(&crypto_lock);
+ CRYPTO_ENTER_ALL_LOCKS();
cm->cm_refcnt = 1;
crypto_minors[mn - 1] = cm;
crypto_minors_count++;
- mutex_exit(&crypto_lock);
+ CRYPTO_EXIT_ALL_LOCKS();
*devp = makedevice(getmajor(*devp), mn);
@@ -504,35 +534,44 @@ crypto_close(dev_t dev, int flag, int otyp, cred_t *credp)
minor_t mn = getminor(dev);
uint_t i;
size_t total = 0;
+ kcf_lock_withpad_t *mp;
+
+ mp = &crypto_locks[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
- mutex_enter(&crypto_lock);
if (mn > crypto_minors_table_count) {
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
cmn_err(CE_WARN, "crypto_close: bad minor (too big) %d", mn);
return (ENODEV);
}
cm = crypto_minors[mn - 1];
if (cm == NULL) {
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
cmn_err(CE_WARN, "crypto_close: duplicate close of minor %d",
getminor(dev));
return (ENODEV);
}
- cm->cm_refcnt --; /* decrement refcnt held in open */
- while (cm->cm_refcnt > 0) {
- cv_wait(&cm->cm_cv, &crypto_lock);
- }
+ mutex_exit(&mp->kl_lock);
- /* take it out of the global table */
+ CRYPTO_ENTER_ALL_LOCKS();
+ /*
+ * We free the minor number, mn, from the crypto_arena
+ * only later. This ensures that we won't race with another
+ * thread in crypto_open with the same minor number.
+ */
crypto_minors[mn - 1] = NULL;
crypto_minors_count--;
-
- vmem_free(crypto_arena, (void *)(uintptr_t)mn, 1);
+ CRYPTO_EXIT_ALL_LOCKS();
mutex_enter(&cm->cm_lock);
- mutex_exit(&crypto_lock);
+ cm->cm_refcnt --; /* decrement refcnt held in open */
+ while (cm->cm_refcnt > 0) {
+ cv_wait(&cm->cm_cv, &cm->cm_lock);
+ }
+
+ vmem_free(crypto_arena, (void *)(uintptr_t)mn, 1);
/* free all session table entries starting with 1 */
for (i = 1; i < cm->cm_session_table_count; i++) {
@@ -567,6 +606,7 @@ crypto_close(dev_t dev, int flag, int otyp, cred_t *credp)
kcf_free_provider_tab(cm->cm_provider_count,
cm->cm_provider_array);
+ mutex_exit(&cm->cm_lock);
mutex_destroy(&cm->cm_lock);
cv_destroy(&cm->cm_cv);
kmem_free(cm, sizeof (crypto_minor_t));
@@ -578,27 +618,27 @@ static crypto_minor_t *
crypto_hold_minor(minor_t minor)
{
crypto_minor_t *cm;
+ kcf_lock_withpad_t *mp;
if (minor > crypto_minors_table_count)
return (NULL);
- mutex_enter(&crypto_lock);
+ mp = &crypto_locks[CPU_SEQID];
+ mutex_enter(&mp->kl_lock);
+
if ((cm = crypto_minors[minor - 1]) != NULL) {
- cm->cm_refcnt++;
+ atomic_add_32(&cm->cm_refcnt, 1);
}
- mutex_exit(&crypto_lock);
+ mutex_exit(&mp->kl_lock);
return (cm);
}
static void
crypto_release_minor(crypto_minor_t *cm)
{
- mutex_enter(&crypto_lock);
- cm->cm_refcnt--;
- if (cm->cm_refcnt == 0) {
+ if (atomic_add_32_nv(&cm->cm_refcnt, -1) == 0) {
cv_signal(&cm->cm_cv);
}
- mutex_exit(&crypto_lock);
}
/*
diff --git a/usr/src/uts/common/crypto/spi/kcf_spi.c b/usr/src/uts/common/crypto/spi/kcf_spi.c
index 6aead8fb0f..90e4dd6a0c 100644
--- a/usr/src/uts/common/crypto/spi/kcf_spi.c
+++ b/usr/src/uts/common/crypto/spi/kcf_spi.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -250,12 +250,12 @@ crypto_register_provider(crypto_provider_info_t *info,
* to keep some entries cached to improve performance.
*/
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
- prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
+ prov_desc->pd_taskq = taskq_create("kcf_taskq",
crypto_taskq_threads, minclsyspri,
crypto_taskq_minalloc, crypto_taskq_maxalloc,
TASKQ_PREPOPULATE);
else
- prov_desc->pd_sched_info.ks_taskq = NULL;
+ prov_desc->pd_taskq = NULL;
/* no kernel session to logical providers */
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
@@ -308,7 +308,6 @@ crypto_register_provider(crypto_provider_info_t *info,
sizeof (kcf_stats_ks_data_template));
prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
KCF_PROV_REFHOLD(prov_desc);
- KCF_PROV_IREFHOLD(prov_desc);
prov_desc->pd_kstat->ks_private = prov_desc;
prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
kstat_install(prov_desc->pd_kstat);
@@ -319,9 +318,8 @@ crypto_register_provider(crypto_provider_info_t *info,
process_logical_providers(info, prov_desc);
if (need_verify == 1) {
- /* kcf_verify_signature routine will release these holds */
+ /* kcf_verify_signature routine will release this hold */
KCF_PROV_REFHOLD(prov_desc);
- KCF_PROV_IREFHOLD(prov_desc);
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) {
/*
@@ -356,6 +354,27 @@ bail:
return (ret);
}
+/* Return the number of holds on a provider. */
+int
+kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
+{
+ int i;
+ int refcnt = 0;
+
+ if (do_lock)
+ for (i = 0; i < pd->pd_nbins; i++)
+ mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
+
+ for (i = 0; i < pd->pd_nbins; i++)
+ refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
+
+ if (do_lock)
+ for (i = 0; i < pd->pd_nbins; i++)
+ mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
+
+ return (refcnt);
+}
+
/*
* This routine is used to notify the framework when a provider is being
* removed. Hardware providers call this routine in their detach routines.
@@ -385,7 +404,7 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
}
saved_state = desc->pd_state;
- desc->pd_state = KCF_PROV_REMOVED;
+ desc->pd_state = KCF_PROV_UNREGISTERING;
if (saved_state == KCF_PROV_BUSY) {
/*
@@ -395,25 +414,6 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
cv_broadcast(&desc->pd_resume_cv);
}
- if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
- /*
- * Check if this provider is currently being used.
- * pd_irefcnt is the number of holds from the internal
- * structures. We add one to account for the above lookup.
- */
- if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
- desc->pd_state = saved_state;
- mutex_exit(&desc->pd_lock);
- /* Release reference held by kcf_prov_tab_lookup(). */
- KCF_PROV_REFRELE(desc);
- /*
- * The administrator presumably will stop the clients
- * thus removing the holds, when they get the busy
- * return value. Any retry will succeed then.
- */
- return (CRYPTO_BUSY);
- }
- }
mutex_exit(&desc->pd_lock);
if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
@@ -440,40 +440,58 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
delete_kstat(desc);
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
- /* Release reference held by kcf_prov_tab_lookup(). */
- KCF_PROV_REFRELE(desc);
-
/*
- * Wait till the existing requests complete.
+ * Wait till the existing requests with the provider complete
+ * and all the holds are released. All the holds on a software
+ * provider are from kernel clients and the hold time
+ * is expected to be short. So, we won't be stuck here forever.
*/
- mutex_enter(&desc->pd_lock);
- while (desc->pd_state != KCF_PROV_FREED)
- cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
- mutex_exit(&desc->pd_lock);
+ while (kcf_get_refcnt(desc, B_TRUE) > 1) {
+ /* wait 1 second and try again. */
+ delay(1 * drv_usectohz(1000000));
+ }
} else {
+ int i;
+ kcf_prov_cpu_t *mp;
+
/*
* Wait until requests that have been sent to the provider
* complete.
*/
- mutex_enter(&desc->pd_lock);
- while (desc->pd_irefcnt > 0)
- cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
- mutex_exit(&desc->pd_lock);
+ for (i = 0; i < desc->pd_nbins; i++) {
+ mp = &(desc->pd_percpu_bins[i]);
+
+ mutex_enter(&mp->kp_lock);
+ while (mp->kp_jobcnt > 0) {
+ cv_wait(&mp->kp_cv, &mp->kp_lock);
+ }
+ mutex_exit(&mp->kp_lock);
+ }
}
+ mutex_enter(&desc->pd_lock);
+ desc->pd_state = KCF_PROV_UNREGISTERED;
+ mutex_exit(&desc->pd_lock);
+
kcf_do_notify(desc, B_FALSE);
- if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
- /*
- * This is the only place where kcf_free_provider_desc()
- * is called directly. KCF_PROV_REFRELE() should free the
- * structure in all other places.
- */
- ASSERT(desc->pd_state == KCF_PROV_FREED &&
- desc->pd_refcnt == 0);
+ /* Release reference held by kcf_prov_tab_lookup(). */
+ KCF_PROV_REFRELE(desc);
+
+ if (kcf_get_refcnt(desc, B_TRUE) == 0) {
kcf_free_provider_desc(desc);
} else {
- KCF_PROV_REFRELE(desc);
+ ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
+ /*
+ * We could avoid this if /dev/crypto can proactively
+ * remove any holds on us from a dormant PKCS #11 app.
+ * For now, a kcfd thread does a periodic check of the
+ * provider table for KCF_PROV_UNREGISTERED entries
+ * and frees them when refcnt reaches zero.
+ */
+ mutex_enter(&prov_tab_mutex);
+ kcf_need_provtab_walk = B_TRUE;
+ mutex_exit(&prov_tab_mutex);
}
return (CRYPTO_SUCCESS);
@@ -592,17 +610,13 @@ crypto_op_notification(crypto_req_handle_t handle, int error)
if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
- if (error != CRYPTO_SUCCESS)
- sreq->sn_provider->pd_sched_info.ks_nfails++;
- KCF_PROV_IREFRELE(sreq->sn_provider);
+ KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
kcf_sop_done(sreq, error);
} else {
kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
ASSERT(ctype == CRYPTO_ASYNCH);
- if (error != CRYPTO_SUCCESS)
- areq->an_provider->pd_sched_info.ks_nfails++;
- KCF_PROV_IREFRELE(areq->an_provider);
+ KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
kcf_aop_done(areq, error);
}
}
@@ -764,6 +778,7 @@ kcf_prov_kstat_update(kstat_t *ksp, int rw)
{
kcf_prov_stats_t *ks_data;
kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
+ int i;
if (rw == KSTAT_WRITE)
return (EACCES);
@@ -776,16 +791,20 @@ kcf_prov_kstat_update(kstat_t *ksp, int rw)
ks_data->ps_ops_failed.value.ui64 = 0;
ks_data->ps_ops_busy_rval.value.ui64 = 0;
} else {
- ks_data->ps_ops_total.value.ui64 =
- pd->pd_sched_info.ks_ndispatches;
- ks_data->ps_ops_failed.value.ui64 =
- pd->pd_sched_info.ks_nfails;
- ks_data->ps_ops_busy_rval.value.ui64 =
- pd->pd_sched_info.ks_nbusy_rval;
- ks_data->ps_ops_passed.value.ui64 =
- pd->pd_sched_info.ks_ndispatches -
- pd->pd_sched_info.ks_nfails -
- pd->pd_sched_info.ks_nbusy_rval;
+ uint64_t dtotal, ftotal, btotal;
+
+ dtotal = ftotal = btotal = 0;
+ /* No locking done since an exact count is not required. */
+ for (i = 0; i < pd->pd_nbins; i++) {
+ dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
+ ftotal += pd->pd_percpu_bins[i].kp_nfails;
+ btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
+ }
+
+ ks_data->ps_ops_total.value.ui64 = dtotal;
+ ks_data->ps_ops_failed.value.ui64 = ftotal;
+ ks_data->ps_ops_busy_rval.value.ui64 = btotal;
+ ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
}
return (0);
@@ -837,7 +856,6 @@ redo_register_provider(kcf_provider_desc_t *pd)
* table.
*/
KCF_PROV_REFHOLD(pd);
- KCF_PROV_IREFHOLD(pd);
}
/*
@@ -854,7 +872,6 @@ add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
mutex_enter(&p2->pd_lock);
new->pl_next = p2->pd_provider_list;
p2->pd_provider_list = new;
- KCF_PROV_IREFHOLD(p1);
new->pl_provider = p1;
mutex_exit(&p2->pd_lock);
}
@@ -884,7 +901,6 @@ remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
}
/* detach and free kcf_provider_list structure */
- KCF_PROV_IREFRELE(p1);
*prev = pl->pl_next;
kmem_free(pl, sizeof (*pl));
mutex_exit(&p2->pd_lock);
@@ -942,7 +958,6 @@ remove_provider(kcf_provider_desc_t *pp)
if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
p->pd_provider_list == NULL)
p->pd_flags &= ~KCF_LPROV_MEMBER;
- KCF_PROV_IREFRELE(p);
next = e->pl_next;
kmem_free(e, sizeof (*e));
}
@@ -1010,6 +1025,5 @@ delete_kstat(kcf_provider_desc_t *desc)
kstat_delete(kspd->pd_kstat);
desc->pd_kstat = NULL;
KCF_PROV_REFRELE(kspd);
- KCF_PROV_IREFRELE(kspd);
}
}
diff --git a/usr/src/uts/common/sys/crypto/impl.h b/usr/src/uts/common/sys/crypto/impl.h
index d416e710c5..c9b38d45ae 100644
--- a/usr/src/uts/common/sys/crypto/impl.h
+++ b/usr/src/uts/common/sys/crypto/impl.h
@@ -43,6 +43,7 @@
#include <sys/project.h>
#include <sys/taskq.h>
#include <sys/rctl.h>
+#include <sys/cpuvar.h>
#endif /* _KERNEL */
#ifdef __cplusplus
@@ -51,8 +52,6 @@ extern "C" {
#ifdef _KERNEL
-#define KCF_MODULE "kcf"
-
/*
* Prefixes convention: structures internal to the kernel cryptographic
* framework start with 'kcf_'. Exposed structure start with 'crypto_'.
@@ -79,45 +78,42 @@ typedef struct kcf_stats {
kstat_named_t ks_taskq_maxalloc;
} kcf_stats_t;
+#define CPU_SEQID (CPU->cpu_seqid)
+
+typedef struct kcf_lock_withpad {
+ kmutex_t kl_lock;
+ uint8_t kl_pad[64 - sizeof (kmutex_t)];
+} kcf_lock_withpad_t;
+
/*
- * Keep all the information needed by the scheduler from
- * this provider.
+ * Per-CPU structure used by a provider to keep track of
+ * various counters.
*/
-typedef struct kcf_sched_info {
- /* The number of operations dispatched. */
- uint64_t ks_ndispatches;
+typedef struct kcf_prov_cpu {
+ kmutex_t kp_lock;
+ int kp_holdcnt; /* can go negative! */
+ uint_t kp_jobcnt;
- /* The number of operations that failed. */
- uint64_t ks_nfails;
+ uint64_t kp_ndispatches;
+ uint64_t kp_nfails;
+ uint64_t kp_nbusy_rval;
+ kcondvar_t kp_cv;
- /* The number of operations that returned CRYPTO_BUSY. */
- uint64_t ks_nbusy_rval;
-
- /* taskq used to dispatch crypto requests */
- taskq_t *ks_taskq;
-} kcf_sched_info_t;
+ uint8_t kp_pad[64 - sizeof (kmutex_t) - 2 * sizeof (int) -
+ 3 * sizeof (uint64_t) - sizeof (kcondvar_t)];
+} kcf_prov_cpu_t;
/*
- * pd_irefcnt approximates the number of inflight requests to the
- * provider. Though we increment this counter during registration for
- * other purposes, that base value is mostly same across all providers.
- * So, it is a good measure of the load on a provider when it is not
- * in a busy state. Once a provider notifies it is busy, requests
+ * kcf_get_refcnt(pd) is the number of inflight requests to the
+ * provider. So, it is a good measure of the load on a provider when
+ * it is not in a busy state. Once a provider notifies it is busy, requests
* backup in the taskq. So, we use tq_nalloc in that case which gives
* the number of task entries in the task queue. Note that we do not
* acquire any locks here as it is not critical to get the exact number
- * and the lock contention may be too costly for this code path.
+ * and the lock contention is too costly for this code path.
*/
#define KCF_PROV_LOAD(pd) ((pd)->pd_state != KCF_PROV_BUSY ? \
- (pd)->pd_irefcnt : (pd)->pd_sched_info.ks_taskq->tq_nalloc)
-
-#define KCF_PROV_INCRSTATS(pd, error) { \
- (pd)->pd_sched_info.ks_ndispatches++; \
- if (error == CRYPTO_BUSY) \
- (pd)->pd_sched_info.ks_nbusy_rval++; \
- else if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) \
- (pd)->pd_sched_info.ks_nfails++; \
-}
+ kcf_get_refcnt(pd, B_FALSE) : (pd)->pd_taskq->tq_nalloc)
/*
@@ -160,14 +156,14 @@ typedef enum {
* if the current state < KCF_PROV_DISABLED.
*/
KCF_PROV_DISABLED,
- KCF_PROV_REMOVED,
- KCF_PROV_FREED
+ KCF_PROV_UNREGISTERING,
+ KCF_PROV_UNREGISTERED
} kcf_prov_state_t;
#define KCF_IS_PROV_UNVERIFIED(pd) ((pd)->pd_state == KCF_PROV_UNVERIFIED)
#define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY || \
(pd)->pd_state == KCF_PROV_BUSY)
-#define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_REMOVED)
+#define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_UNREGISTERING)
/* Internal flags valid for pd_flags field */
#define KCF_PROV_RESTRICTED 0x40000000
@@ -181,8 +177,10 @@ typedef enum {
* pd_prov_type: Provider type, hardware or software
* pd_sid: Session ID of the provider used by kernel clients.
* This is valid only for session-oriented providers.
- * pd_refcnt: Reference counter to this provider descriptor
- * pd_irefcnt: References held by the framework internal structs
+ * pd_taskq: taskq used to dispatch crypto requests
+ * pd_nbins: number of bins in pd_percpu_bins
+ * pd_percpu_bins: Pointer to an array of per-CPU structures
+ * containing a lock, a cv and various counters.
* pd_lock: lock protects pd_state and pd_provider_list
* pd_state: State value of the provider
* pd_provider_list: Used to cross-reference logical providers and their
@@ -194,18 +192,16 @@ typedef enum {
* number to an index in pd_mechanisms array
* pd_mechanisms: Array of mechanisms supported by the provider, specified
* by the provider during registration
- * pd_sched_info: Scheduling information associated with the provider
* pd_mech_list_count: The number of entries in pi_mechanisms, specified
* by the provider during registration
* pd_name: Device name or module name
* pd_instance: Device instance
* pd_module_id: Module ID returned by modload
* pd_mctlp: Pointer to modctl structure for this provider
- * pd_remove_cv: cv to wait on while the provider queue drains
* pd_description: Provider description string
- * pd_flags bitwise OR of pi_flags from crypto_provider_info_t
+ * pd_flags: bitwise OR of pi_flags from crypto_provider_info_t
* and other internal flags defined above.
- * pd_hash_limit Maximum data size that hash mechanisms of this provider
+ * pd_hash_limit: Maximum data size that hash mechanisms of this provider
* can support.
* pd_kcf_prov_handle: KCF-private handle assigned by KCF
* pd_prov_id: Identification # assigned by KCF to provider
@@ -215,8 +211,9 @@ typedef enum {
typedef struct kcf_provider_desc {
crypto_provider_type_t pd_prov_type;
crypto_session_id_t pd_sid;
- uint_t pd_refcnt;
- uint_t pd_irefcnt;
+ taskq_t *pd_taskq;
+ uint_t pd_nbins;
+ kcf_prov_cpu_t *pd_percpu_bins;
kmutex_t pd_lock;
kcf_prov_state_t pd_state;
struct kcf_provider_list *pd_provider_list;
@@ -226,13 +223,11 @@ typedef struct kcf_provider_desc {
ushort_t pd_mech_indx[KCF_OPS_CLASSSIZE]\
[KCF_MAXMECHTAB];
crypto_mech_info_t *pd_mechanisms;
- kcf_sched_info_t pd_sched_info;
uint_t pd_mech_list_count;
char *pd_name;
uint_t pd_instance;
int pd_module_id;
struct modctl *pd_mctlp;
- kcondvar_t pd_remove_cv;
char *pd_description;
uint_t pd_flags;
uint_t pd_hash_limit;
@@ -253,34 +248,62 @@ typedef struct kcf_provider_list {
* it REFHOLD()s. A new provider descriptor which is referenced only
* by the providers table has a reference counter of one.
*/
-#define KCF_PROV_REFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_refcnt, 1); \
- ASSERT((desc)->pd_refcnt != 0); \
+#define KCF_PROV_REFHOLD(desc) { \
+ kcf_prov_cpu_t *mp; \
+ \
+ mp = &((desc)->pd_percpu_bins[CPU_SEQID]); \
+ mutex_enter(&mp->kp_lock); \
+ mp->kp_holdcnt++; \
+ mutex_exit(&mp->kp_lock); \
}
-#define KCF_PROV_IREFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_irefcnt, 1); \
- ASSERT((desc)->pd_irefcnt != 0); \
+#define KCF_PROV_REFRELE(desc) { \
+ kcf_prov_cpu_t *mp; \
+ \
+ mp = &((desc)->pd_percpu_bins[CPU_SEQID]); \
+ mutex_enter(&mp->kp_lock); \
+ mp->kp_holdcnt--; \
+ mutex_exit(&mp->kp_lock); \
}
-#define KCF_PROV_IREFRELE(desc) { \
- ASSERT((desc)->pd_irefcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_irefcnt, -1) == 0) { \
- cv_broadcast(&(desc)->pd_remove_cv); \
- } \
+#define KCF_PROV_REFHELD(desc) (kcf_get_refcnt(desc, B_TRUE) >= 1)
+
+/*
+ * The JOB macros are used only for a hardware provider.
+ * Hardware providers can have holds that stay forever.
+ * So, the job counter is used to check if it is safe to
+ * unregister a provider.
+ */
+#define KCF_PROV_JOB_HOLD(mp) { \
+ mutex_enter(&(mp)->kp_lock); \
+ (mp)->kp_jobcnt++; \
+ mutex_exit(&(mp)->kp_lock); \
}
-#define KCF_PROV_REFHELD(desc) ((desc)->pd_refcnt >= 1)
+#define KCF_PROV_JOB_RELE(mp) { \
+ mutex_enter(&(mp)->kp_lock); \
+ (mp)->kp_jobcnt--; \
+ if ((mp)->kp_jobcnt == 0) \
+ cv_signal(&(mp)->kp_cv); \
+ mutex_exit(&(mp)->kp_lock); \
+}
-#define KCF_PROV_REFRELE(desc) { \
- ASSERT((desc)->pd_refcnt != 0); \
- membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) { \
- kcf_provider_zero_refcnt((desc)); \
- } \
+#define KCF_PROV_JOB_RELE_STAT(mp, doincr) { \
+ if (doincr) \
+ (mp)->kp_nfails++; \
+ KCF_PROV_JOB_RELE(mp); \
}
+#define KCF_PROV_INCRSTATS(pd, error) { \
+ kcf_prov_cpu_t *mp; \
+ \
+ mp = &((pd)->pd_percpu_bins[CPU_SEQID]); \
+ mp->kp_ndispatches++; \
+ if ((error) == CRYPTO_BUSY) \
+ mp->kp_nbusy_rval++; \
+ else if ((error) != CRYPTO_SUCCESS && (error) != CRYPTO_QUEUED) \
+ mp->kp_nfails++; \
+}
/* list of crypto_mech_info_t valid as the second mech in a dual operation */
@@ -310,10 +333,11 @@ typedef struct kcf_prov_mech_desc {
#define pm_provider_handle pm_prov_desc.pd_provider_handle
#define pm_ops_vector pm_prov_desc.pd_ops_vector
+extern kcf_lock_withpad_t *me_mutexes;
#define KCF_CPU_PAD (128 - sizeof (crypto_mech_name_t) - \
sizeof (crypto_mech_type_t) - \
- sizeof (kmutex_t) - 2 * sizeof (kcf_prov_mech_desc_t *) - \
+ 2 * sizeof (kcf_prov_mech_desc_t *) - \
sizeof (int) - sizeof (uint32_t) - sizeof (size_t))
/*
@@ -323,7 +347,6 @@ typedef struct kcf_prov_mech_desc {
typedef struct kcf_mech_entry {
crypto_mech_name_t me_name; /* mechanism name */
crypto_mech_type_t me_mechid; /* Internal id for mechanism */
- kmutex_t me_mutex; /* access protection */
kcf_prov_mech_desc_t *me_hw_prov_chain; /* list of HW providers */
kcf_prov_mech_desc_t *me_sw_prov; /* SW provider */
/*
@@ -1304,7 +1327,6 @@ extern int kcf_add_mech_provider(short, kcf_provider_desc_t *,
extern void kcf_remove_mech_provider(char *, kcf_provider_desc_t *);
extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **);
extern kcf_provider_desc_t *kcf_alloc_provider_desc(crypto_provider_info_t *);
-extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *);
extern void kcf_free_provider_desc(kcf_provider_desc_t *);
extern void kcf_soft_config_init(void);
extern int get_sw_provider_for_mech(crypto_mech_name_t, char **);
@@ -1354,6 +1376,11 @@ extern kcf_provider_desc_t *kcf_prov_tab_lookup(crypto_provider_id_t);
extern int kcf_get_sw_prov(crypto_mech_type_t, kcf_provider_desc_t **,
kcf_mech_entry_t **, boolean_t);
+extern kmutex_t prov_tab_mutex;
+extern boolean_t kcf_need_provtab_walk;
+extern void kcf_free_unregistered_provs();
+extern int kcf_get_refcnt(kcf_provider_desc_t *, boolean_t);
+
/* Access to the policy table */
extern boolean_t is_mech_disabled(kcf_provider_desc_t *, crypto_mech_name_t);
extern boolean_t is_mech_disabled_byname(crypto_provider_type_t, char *,
diff --git a/usr/src/uts/common/sys/crypto/sched_impl.h b/usr/src/uts/common/sys/crypto/sched_impl.h
index b457e5b7a4..f3efe3857f 100644
--- a/usr/src/uts/common/sys/crypto/sched_impl.h
+++ b/usr/src/uts/common/sys/crypto/sched_impl.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_CRYPTO_SCHED_IMPL_H
#define _SYS_CRYPTO_SCHED_IMPL_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Scheduler internal structures.
*/
@@ -104,6 +102,9 @@ typedef struct kcf_prov_tried {
struct kcf_prov_tried *pt_next;
} kcf_prov_tried_t;
+/* Must be different from KM_SLEEP and KM_NOSLEEP */
+#define KCF_HOLD_PROV 0x1000
+
#define IS_FG_SUPPORTED(mdesc, fg) \
(((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
@@ -153,6 +154,8 @@ typedef struct kcf_sreq_node {
/* Provider handling this request */
kcf_provider_desc_t *sn_provider;
+
+ kcf_prov_cpu_t *sn_mp;
} kcf_sreq_node_t;
/*
@@ -199,6 +202,7 @@ typedef struct kcf_areq_node {
/* Provider handling this request */
kcf_provider_desc_t *an_provider;
+ kcf_prov_cpu_t *an_mp;
kcf_prov_tried_t *an_tried_plist;
struct kcf_areq_node *an_idnext; /* Next in ID hash */