diff options
Diffstat (limited to 'usr/src/uts/common/crypto')
-rw-r--r-- | usr/src/uts/common/crypto/core/kcf_cryptoadm.c | 6 | ||||
-rw-r--r-- | usr/src/uts/common/crypto/io/aes.c | 186 | ||||
-rw-r--r-- | usr/src/uts/common/crypto/io/dprov.c | 111 |
3 files changed, 280 insertions, 23 deletions
diff --git a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c index 11c4a8f2fe..d3fa3b5f86 100644 --- a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c +++ b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c @@ -99,7 +99,8 @@ kcf_soft_config_init(void) * * # /etc/crypto/kcf.conf * des:supportedlist=CKM_DES_CBC,CKM_DES_ECB,CKM_DES3_CBC,CKM_DES3_ECB - * aes:supportedlist=CKM_AES_ECB,CKM_AES_CBC,CKM_AES_CTR,CKM_AES_CCM + * aes:supportedlist=CKM_AES_ECB,CKM_AES_CBC,CKM_AES_CTR,CKM_AES_CCM, + * CKM_AES_GCM * arcfour:supportedlist=CKM_RC4 * blowfish:supportedlist=CKM_BLOWFISH_ECB,CKM_BLOWFISH_CBC * ecc:supportedlist=CKM_EC_KEY_PAIR_GEN,CKM_ECDH1_DERIVE,CKM_ECDSA,\ @@ -130,7 +131,8 @@ kcf_soft_config_init(void) static crypto_mech_name_t des_mechs[] = { "CKM_DES_CBC", "CKM_DES_ECB", "CKM_DES3_CBC", "CKM_DES3_ECB", ""}; static crypto_mech_name_t aes_mechs[] = { - "CKM_AES_ECB", "CKM_AES_CBC", "CKM_AES_CTR", "CKM_AES_CCM", ""}; + "CKM_AES_ECB", "CKM_AES_CBC", "CKM_AES_CTR", "CKM_AES_CCM", + "CKM_AES_GCM", ""}; static crypto_mech_name_t arcfour_mechs[] = { "CKM_RC4", ""}; static crypto_mech_name_t blowfish_mechs[] = { diff --git a/usr/src/uts/common/crypto/io/aes.c b/usr/src/uts/common/crypto/io/aes.c index 5e0739fb1c..9e368cf418 100644 --- a/usr/src/uts/common/crypto/io/aes.c +++ b/usr/src/uts/common/crypto/io/aes.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * AES provider for the Kernel Cryptographic Framework (KCF) */ @@ -66,7 +64,8 @@ typedef enum aes_mech_type { AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */ - AES_CCM_MECH_INFO_TYPE /* SUN_CKM_AES_CCM */ + AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ + AES_GCM_MECH_INFO_TYPE /* SUN_CKM_AES_GCM */ } aes_mech_type_t; /* @@ -103,6 +102,11 @@ static crypto_mech_info_t aes_mech_info_tab[] = { {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, + AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, + /* AES_GCM */ + {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, + CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | + CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES} }; @@ -301,6 +305,15 @@ aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag) if (ctx != NULL) p = ccm_alloc_ctx(kmflag); break; + case AES_GCM_MECH_INFO_TYPE: + if (mechanism->cm_param != NULL && + mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + break; + } + if (ctx != NULL) + p = gcm_alloc_ctx(kmflag); + break; default: rv = CRYPTO_MECHANISM_INVALID; } @@ -447,8 +460,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, * The length requirement for AES CCM mode has already been checked * at init time */ - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0) && + if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) return (CRYPTO_DATA_LEN_RANGE); @@ -460,6 +472,8 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, */ if (aes_ctx->ac_flags & CCM_MODE) { length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; + } else if (aes_ctx->ac_flags & GCM_MODE) { + length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; } else { length_needed = plaintext->cd_length; } @@ -486,7 +500,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, */ if (aes_ctx->ac_flags & CCM_MODE) { /* - * aes_ccm_encrypt_final() will compute the MAC and append + * ccm_encrypt_final() will compute the MAC and append * it to existing ciphertext. So, need to adjust the left over * length value accordingly */ @@ -505,6 +519,28 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, ciphertext->cd_offset - saved_offset; } ciphertext->cd_offset = saved_offset; + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* + * gcm_encrypt_final() will compute the MAC and append + * it to existing ciphertext. So, need to adjust the left over + * length value accordingly + */ + + /* order of following 2 lines MUST not be reversed */ + ciphertext->cd_offset = ciphertext->cd_length; + ciphertext->cd_length = saved_length - ciphertext->cd_length; + ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + if (ret != CRYPTO_SUCCESS) { + return (ret); + } + + if (plaintext != ciphertext) { + ciphertext->cd_length = + ciphertext->cd_offset - saved_offset; + } + ciphertext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); @@ -540,8 +576,7 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, * The length requirement for AES CCM mode has already been checked * at init time */ - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0) && + if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) { return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); } @@ -556,8 +591,18 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, * smaller than size of the cipher text. */ if (aes_ctx->ac_flags & CCM_MODE) { - if (plaintext->cd_length < aes_ctx->ac_data_len) { - plaintext->cd_length = aes_ctx->ac_data_len; + if (plaintext->cd_length < aes_ctx->ac_processed_data_len) { + plaintext->cd_length = aes_ctx->ac_processed_data_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } + saved_offset = plaintext->cd_offset; + saved_length = plaintext->cd_length; + } else if (aes_ctx->ac_flags & GCM_MODE) { + gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; + size_t pt_len = ciphertext->cd_length - ctx->gcm_tag_len; + + if (plaintext->cd_length < pt_len) { + plaintext->cd_length = pt_len; return (CRYPTO_BUFFER_TOO_SMALL); } saved_offset = plaintext->cd_offset; @@ -596,6 +641,23 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, } plaintext->cd_offset = saved_offset; + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* order of following 2 lines MUST not be reversed */ + plaintext->cd_offset = plaintext->cd_length; + plaintext->cd_length = saved_length - plaintext->cd_length; + + ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + if (ret == CRYPTO_SUCCESS) { + if (plaintext != ciphertext) { + plaintext->cd_length = + plaintext->cd_offset - saved_offset; + } + } else { + plaintext->cd_length = saved_length; + } + + plaintext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); @@ -700,20 +762,29 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, AES_ARG_INPLACE(ciphertext, plaintext); - /* compute number of bytes that will hold the plaintext */ - out_len = aes_ctx->ac_remainder_len; - out_len += ciphertext->cd_length; - out_len &= ~(AES_BLOCK_LEN - 1); - - /* return length needed to store the output */ - if (plaintext->cd_length < out_len) { - plaintext->cd_length = out_len; - return (CRYPTO_BUFFER_TOO_SMALL); + /* + * Compute number of bytes that will hold the plaintext. + * This is not necessary for CCM and GCM since these mechanisms + * never return plaintext for update operations. + */ + if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) == 0) { + out_len = aes_ctx->ac_remainder_len; + out_len += ciphertext->cd_length; + out_len &= ~(AES_BLOCK_LEN - 1); + + /* return length needed to store the output */ + if (plaintext->cd_length < out_len) { + plaintext->cd_length = out_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; + if (aes_ctx->ac_flags & GCM_MODE) + gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req)); + /* * Do the AES update on the specified input data. */ @@ -796,6 +867,17 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, if (ret != CRYPTO_SUCCESS) { return (ret); } + } else if (aes_ctx->ac_flags & GCM_MODE) { + size_t saved_offset = data->cd_offset; + + ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + if (ret != CRYPTO_SUCCESS) { + return (ret); + } + data->cd_length = data->cd_offset - saved_offset; + data->cd_offset = saved_offset; } else { /* * There must be no unprocessed plaintext. @@ -883,11 +965,37 @@ aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, if (ret != CRYPTO_SUCCESS) { return (ret); } + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* + * This is where all the plaintext is returned, make sure + * the plaintext buffer is big enough + */ + gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; + size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; + + if (data->cd_length < pt_len) { + data->cd_length = pt_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } + + saved_offset = data->cd_offset; + saved_length = data->cd_length; + ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + if (ret == CRYPTO_SUCCESS) { + data->cd_length = data->cd_offset - saved_offset; + } else { + data->cd_length = saved_length; + } + + data->cd_offset = saved_offset; + if (ret != CRYPTO_SUCCESS) { + return (ret); + } } - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0)) { + if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) { data->cd_length = 0; } @@ -980,6 +1088,13 @@ aes_encrypt_atomic(crypto_provider_handle_t provider, if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { + ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx, + ciphertext, AES_BLOCK_LEN, aes_encrypt_block, + aes_copy_block, aes_xor_block); + if (ret != CRYPTO_SUCCESS) + goto out; + ASSERT(aes_ctx.ac_remainder_len == 0); } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { if (aes_ctx.ac_remainder_len > 0) { ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx, @@ -1070,6 +1185,9 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; + if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) + gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req)); + /* * Do an update on the specified input data. */ @@ -1107,6 +1225,18 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, } else { plaintext->cd_length = saved_length; } + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { + ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx, + plaintext, AES_BLOCK_LEN, aes_encrypt_block, + aes_xor_block); + ASSERT(aes_ctx.ac_remainder_len == 0); + if ((ret == CRYPTO_SUCCESS) && + (ciphertext != plaintext)) { + plaintext->cd_length = + plaintext->cd_offset - saved_offset; + } else { + plaintext->cd_length = saved_length; + } } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { ASSERT(aes_ctx.ac_remainder_len == 0); if (ciphertext != plaintext) @@ -1140,6 +1270,11 @@ out: if (aes_ctx.ac_pt_buf != NULL) { kmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len); } + } else if (aes_ctx.ac_flags & GCM_MODE) { + if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) { + kmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf, + ((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len); + } } return (ret); @@ -1273,6 +1408,15 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); break; + case AES_GCM_MECH_INFO_TYPE: + if (mechanism->cm_param == NULL || + mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + break; case AES_ECB_MECH_INFO_TYPE: aes_ctx->ac_flags |= ECB_MODE; } diff --git a/usr/src/uts/common/crypto/io/dprov.c b/usr/src/uts/common/crypto/io/dprov.c index e08a70dcaf..a6a070ad6a 100644 --- a/usr/src/uts/common/crypto/io/dprov.c +++ b/usr/src/uts/common/crypto/io/dprov.c @@ -231,6 +231,7 @@ typedef enum dprov_mech_type { AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */ AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ + AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ RC4_MECH_INFO_TYPE, /* SUN_CKM_RC4 */ RSA_PKCS_MECH_INFO_TYPE, /* SUN_CKM_RSA_PKCS */ RSA_X_509_MECH_INFO_TYPE, /* SUN_CKM_RSA_X_509 */ @@ -495,6 +496,13 @@ static crypto_mech_info_t dprov_mech_info_tab[] = { CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | CRYPTO_FG_MAC_DECRYPT_ATOMIC, AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, + /* AES-GCM */ + {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, + CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_ENCRYPT_MAC | + CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | + CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | + CRYPTO_FG_MAC_DECRYPT_ATOMIC, + AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, /* RC4 */ {SUN_CKM_RC4, RC4_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | @@ -2298,6 +2306,7 @@ dprov_valid_cipher_mech(crypto_mech_type_t mech_type) mech_type == AES_ECB_MECH_INFO_TYPE || mech_type == AES_CTR_MECH_INFO_TYPE || mech_type == AES_CCM_MECH_INFO_TYPE || + mech_type == AES_GCM_MECH_INFO_TYPE || mech_type == RC4_MECH_INFO_TYPE || mech_type == RSA_PKCS_MECH_INFO_TYPE || mech_type == RSA_X_509_MECH_INFO_TYPE || @@ -4371,6 +4380,88 @@ out: return (rv); } +/* + * Resource control checks don't need to be done. Why? Because this routine + * knows the size of the structure, and it can't be overridden by a user. + * This is different from the crypto module, which has no knowledge of + * specific mechanisms, and therefore has to trust specified size of the + * parameter. This trust, or lack of trust, is why the size of the + * parameter has to be charged against the project resource control. + */ +static int +copyin_aes_gcm_mech(crypto_mechanism_t *in_mech, crypto_mechanism_t *out_mech, + int *out_error, int mode) +{ + STRUCT_DECL(crypto_mechanism, mech); + STRUCT_DECL(CK_AES_GCM_PARAMS, params); + CK_AES_GCM_PARAMS *aes_gcm_params; + caddr_t pp; + size_t param_len; + int error = 0; + int rv = 0; + + STRUCT_INIT(mech, mode); + STRUCT_INIT(params, mode); + bcopy(in_mech, STRUCT_BUF(mech), STRUCT_SIZE(mech)); + pp = STRUCT_FGETP(mech, cm_param); + param_len = STRUCT_FGET(mech, cm_param_len); + + if (param_len != STRUCT_SIZE(params)) { + rv = CRYPTO_ARGUMENTS_BAD; + goto out; + } + + out_mech->cm_type = STRUCT_FGET(mech, cm_type); + out_mech->cm_param = NULL; + out_mech->cm_param_len = 0; + if (pp != NULL) { + size_t nonce_len, auth_data_len, total_param_len; + + if (copyin((char *)pp, STRUCT_BUF(params), param_len) != 0) { + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + + nonce_len = STRUCT_FGET(params, ulIvLen); + auth_data_len = STRUCT_FGET(params, ulAADLen); + + /* allocate param structure */ + total_param_len = + sizeof (CK_AES_GCM_PARAMS) + nonce_len + auth_data_len; + aes_gcm_params = kmem_alloc(total_param_len, KM_NOSLEEP); + if (aes_gcm_params == NULL) { + rv = CRYPTO_HOST_MEMORY; + goto out; + } + aes_gcm_params->ulTagBits = STRUCT_FGET(params, ulTagBits); + aes_gcm_params->ulIvLen = nonce_len; + aes_gcm_params->ulAADLen = auth_data_len; + aes_gcm_params->pIv + = (uchar_t *)aes_gcm_params + sizeof (CK_AES_GCM_PARAMS); + aes_gcm_params->pAAD = aes_gcm_params->pIv + nonce_len; + + if (copyin((char *)STRUCT_FGETP(params, pIv), + aes_gcm_params->pIv, nonce_len) != 0) { + kmem_free(aes_gcm_params, total_param_len); + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + if (copyin((char *)STRUCT_FGETP(params, pAAD), + aes_gcm_params->pAAD, auth_data_len) != 0) { + kmem_free(aes_gcm_params, total_param_len); + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + out_mech->cm_param = (char *)aes_gcm_params; + out_mech->cm_param_len = sizeof (CK_AES_GCM_PARAMS); + } +out: + *out_error = error; + return (rv); +} /* * Resource control checks don't need to be done. Why? Because this routine @@ -4594,6 +4685,10 @@ dprov_copyin_mechanism(crypto_provider_handle_t provider, rv = copyin_aes_ccm_mech(umech, kmech, &error, mode); goto out; + case AES_GCM_MECH_INFO_TYPE: + rv = copyin_aes_gcm_mech(umech, kmech, &error, mode); + goto out; + case DH_PKCS_DERIVE_MECH_INFO_TYPE: expected_param_len = param_len; break; @@ -4690,6 +4785,22 @@ dprov_free_mechanism(crypto_provider_handle_t provider, mech->cm_param = NULL; mech->cm_param_len = 0; } + return (CRYPTO_SUCCESS); + } + case AES_GCM_MECH_INFO_TYPE: { + CK_AES_GCM_PARAMS *params; + size_t total_param_len; + + if ((mech->cm_param != NULL) && (mech->cm_param_len != 0)) { + /* LINTED: pointer alignment */ + params = (CK_AES_GCM_PARAMS *)mech->cm_param; + total_param_len = mech->cm_param_len + + params->ulIvLen + params->ulAADLen; + kmem_free(params, total_param_len); + mech->cm_param = NULL; + mech->cm_param_len = 0; + } + return (CRYPTO_SUCCESS); } default: |