diff options
-rw-r--r-- | usr/src/common/crypto/aes/aes_impl.c | 8 | ||||
-rw-r--r-- | usr/src/common/crypto/modes/gcm.c | 626 | ||||
-rw-r--r-- | usr/src/common/crypto/modes/modes.c | 32 | ||||
-rw-r--r-- | usr/src/common/crypto/modes/modes.h | 78 | ||||
-rw-r--r-- | usr/src/uts/common/Makefile.files | 2 | ||||
-rw-r--r-- | usr/src/uts/common/crypto/core/kcf_cryptoadm.c | 6 | ||||
-rw-r--r-- | usr/src/uts/common/crypto/io/aes.c | 186 | ||||
-rw-r--r-- | usr/src/uts/common/crypto/io/dprov.c | 111 | ||||
-rw-r--r-- | usr/src/uts/common/sys/crypto/common.h | 23 |
9 files changed, 1040 insertions, 32 deletions
diff --git a/usr/src/common/crypto/aes/aes_impl.c b/usr/src/common/crypto/aes/aes_impl.c index 357480eb6a..1c34641229 100644 --- a/usr/src/common/crypto/aes/aes_impl.c +++ b/usr/src/common/crypto/aes/aes_impl.c @@ -1691,6 +1691,10 @@ aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length, rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); + } else if (aes_ctx->ac_flags & GCM_MODE) { + rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); #endif } else if (aes_ctx->ac_flags & CBC_MODE) { rv = cbc_encrypt_contiguous_blocks(ctx, @@ -1723,6 +1727,10 @@ aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length, rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); + } else if (aes_ctx->ac_flags & GCM_MODE) { + rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length, + out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); #endif } else if (aes_ctx->ac_flags & CBC_MODE) { rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out, diff --git a/usr/src/common/crypto/modes/gcm.c b/usr/src/common/crypto/modes/gcm.c new file mode 100644 index 0000000000..4802d387d0 --- /dev/null +++ b/usr/src/common/crypto/modes/gcm.c @@ -0,0 +1,626 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _KERNEL +#include <strings.h> +#include <limits.h> +#include <assert.h> +#include <security/cryptoki.h> +#endif + +#include <sys/types.h> +#include <sys/kmem.h> +#include <modes/modes.h> +#include <sys/crypto/common.h> +#include <sys/crypto/impl.h> +#include <sys/byteorder.h> + +struct aes_block { + uint64_t a; + uint64_t b; +}; + +static void +gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res) +{ + uint64_t R = { 0xe100000000000000ULL }; + struct aes_block z = { 0, 0 }; + struct aes_block v; + uint64_t x; + int i, j; + + v.a = ntohll(y[0]); + v.b = ntohll(y[1]); + + for (j = 0; j < 2; j++) { + x = ntohll(x_in[j]); + for (i = 0; i < 64; i++, x <<= 1) { + if (x & 0x8000000000000000ULL) { + z.a ^= v.a; + z.b ^= v.b; + } + if (v.b & 1ULL) { + v.b = (v.a << 63)|(v.b >> 1); + v.a = (v.a >> 1) ^ R; + } else { + v.b = (v.a << 63)|(v.b >> 1); + v.a = v.a >> 1; + } + } + } + res[0] = htonll(z.a); + res[1] = htonll(z.b); +} + +#define GHASH(c, d, t) \ + xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \ + gcm_mul((uint64_t *)(c)->gcm_ghash, (c)->gcm_H, (uint64_t *)(t)); + +/* + * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode + * is done in another function. + */ +int +gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t remainder = length; + size_t need; + uint8_t *datap = (uint8_t *)data; + uint8_t *blockp; + uint8_t *lastp; + void *iov_or_mp; + offset_t offset; + uint8_t *out_data_1; + uint8_t *out_data_2; + size_t out_data_1_len; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + + if (length + ctx->gcm_remainder_len < block_size) { + /* accumulate bytes here and return */ + bcopy(datap, + (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, + length); + ctx->gcm_remainder_len += length; + ctx->gcm_copy_to = datap; + return (CRYPTO_SUCCESS); + } + + lastp = (uint8_t *)ctx->gcm_cb; + if (out != NULL) + crypto_init_ptrs(out, &iov_or_mp, &offset); + + do { + /* Unprocessed data from last call. */ + if (ctx->gcm_remainder_len > 0) { + need = block_size - ctx->gcm_remainder_len; + + if (need > remainder) + return (CRYPTO_DATA_LEN_RANGE); + + bcopy(datap, &((uint8_t *)ctx->gcm_remainder) + [ctx->gcm_remainder_len], need); + + blockp = (uint8_t *)ctx->gcm_remainder; + } else { + blockp = datap; + } + + /* + * Increment counter. Counter bits are confined + * to the bottom 32 bits of the counter block. + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, + (uint8_t *)ctx->gcm_tmp); + xor_block(blockp, (uint8_t *)ctx->gcm_tmp); + + lastp = (uint8_t *)ctx->gcm_tmp; + + ctx->gcm_processed_data_len += block_size; + + if (out == NULL) { + if (ctx->gcm_remainder_len > 0) { + bcopy(blockp, ctx->gcm_copy_to, + ctx->gcm_remainder_len); + bcopy(blockp + ctx->gcm_remainder_len, datap, + need); + } + } else { + crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, + &out_data_1_len, &out_data_2, block_size); + + /* copy block to where it belongs */ + if (out_data_1_len == block_size) { + copy_block(lastp, out_data_1); + } else { + bcopy(lastp, out_data_1, out_data_1_len); + if (out_data_2 != NULL) { + bcopy(lastp + out_data_1_len, + out_data_2, + block_size - out_data_1_len); + } + } + /* update offset */ + out->cd_offset += block_size; + } + + /* add ciphertext to the hash */ + GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash); + + /* Update pointer to next block of data to be processed. */ + if (ctx->gcm_remainder_len != 0) { + datap += need; + ctx->gcm_remainder_len = 0; + } else { + datap += block_size; + } + + remainder = (size_t)&data[length] - (size_t)datap; + + /* Incomplete last block. */ + if (remainder > 0 && remainder < block_size) { + bcopy(datap, ctx->gcm_remainder, remainder); + ctx->gcm_remainder_len = remainder; + ctx->gcm_copy_to = datap; + goto out; + } + ctx->gcm_copy_to = NULL; + + } while (remainder > 0); +out: + return (CRYPTO_SUCCESS); +} + +/* ARGSUSED */ +int +gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + uint8_t *ghash, *macp; + int i, rv; + + if (out->cd_length < + (ctx->gcm_remainder_len + ctx->gcm_tag_len)) { + return (CRYPTO_DATA_LEN_RANGE); + } + + ghash = (uint8_t *)ctx->gcm_ghash; + + if (ctx->gcm_remainder_len > 0) { + uint64_t counter; + uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp; + + /* + * Here is where we deal with data that is not a + * multiple of the block size. + */ + + /* + * Increment counter. + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, + (uint8_t *)ctx->gcm_tmp); + + macp = (uint8_t *)ctx->gcm_remainder; + bzero(macp + ctx->gcm_remainder_len, + block_size - ctx->gcm_remainder_len); + + /* XOR with counter block */ + for (i = 0; i < ctx->gcm_remainder_len; i++) { + macp[i] ^= tmpp[i]; + } + + /* add ciphertext to the hash */ + GHASH(ctx, macp, ghash); + + ctx->gcm_processed_data_len += ctx->gcm_remainder_len; + } + + ctx->gcm_len_a_len_c[1] = htonll(ctx->gcm_processed_data_len << 3); + GHASH(ctx, ctx->gcm_len_a_len_c, ghash); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, + (uint8_t *)ctx->gcm_J0); + xor_block((uint8_t *)ctx->gcm_J0, ghash); + + if (ctx->gcm_remainder_len > 0) { + rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + } + out->cd_offset += ctx->gcm_remainder_len; + ctx->gcm_remainder_len = 0; + rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + out->cd_offset += ctx->gcm_tag_len; + + return (CRYPTO_SUCCESS); +} + +/* + * This will only deal with decrypting the last block of the input that + * might not be a multiple of block length. + */ +static void +gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *datap, *outp, *counterp; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + int i; + + /* + * Increment counter. + * Counter bits are confined to the bottom 32 bits + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + datap = (uint8_t *)ctx->gcm_remainder; + outp = &((ctx->gcm_pt_buf)[index]); + counterp = (uint8_t *)ctx->gcm_tmp; + + /* authentication tag */ + bzero((uint8_t *)ctx->gcm_tmp, block_size); + bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len); + + /* add ciphertext to the hash */ + GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash); + + /* decrypt remaining ciphertext */ + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp); + + /* XOR with counter block */ + for (i = 0; i < ctx->gcm_remainder_len; i++) { + outp[i] = datap[i] ^ counterp[i]; + } +} + +/* ARGSUSED */ +int +gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, + crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t new_len; + uint8_t *new; + + /* + * Copy contiguous ciphertext input blocks to plaintext buffer. + * Ciphertext will be decrypted in the final. + */ + if (length > 0) { + new_len = ctx->gcm_pt_buf_len + length; +#ifdef _KERNEL + new = kmem_alloc(new_len, ctx->gcm_kmflag); + bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); + kmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); +#else + new = malloc(new_len); + bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len); + free(ctx->gcm_pt_buf); +#endif + if (new == NULL) + return (CRYPTO_HOST_MEMORY); + + ctx->gcm_pt_buf = new; + ctx->gcm_pt_buf_len = new_len; + bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len], + length); + ctx->gcm_processed_data_len += length; + } + + ctx->gcm_remainder_len = 0; + return (CRYPTO_SUCCESS); +} + +int +gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + size_t pt_len; + size_t remainder; + uint8_t *ghash; + uint8_t *blockp; + uint8_t *cbp; + uint64_t counter; + uint64_t counter_mask = ntohll(0x00000000ffffffffULL); + int processed = 0, rv; + + ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len); + + pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; + ghash = (uint8_t *)ctx->gcm_ghash; + blockp = ctx->gcm_pt_buf; + remainder = pt_len; + while (remainder > 0) { + /* add ciphertext to the hash */ + GHASH(ctx, blockp, ghash); + + /* + * Increment counter. + * Counter bits are confined to the bottom 32 bits + */ + counter = ntohll(ctx->gcm_cb[1] & counter_mask); + counter = htonll(counter + 1); + counter &= counter_mask; + ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; + + cbp = (uint8_t *)ctx->gcm_tmp; + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp); + + /* XOR with ciphertext */ + xor_block(cbp, blockp); + + processed += block_size; + blockp += block_size; + remainder -= block_size; + + /* Incomplete last block */ + if (remainder > 0 && remainder < block_size) { + bcopy(blockp, ctx->gcm_remainder, remainder); + ctx->gcm_remainder_len = remainder; + /* + * not expecting anymore ciphertext, just + * compute plaintext for the remaining input + */ + gcm_decrypt_incomplete_block(ctx, block_size, + processed, encrypt_block, xor_block); + ctx->gcm_remainder_len = 0; + goto out; + } + } +out: + ctx->gcm_len_a_len_c[1] = htonll(pt_len << 3); + GHASH(ctx, ctx->gcm_len_a_len_c, ghash); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, + (uint8_t *)ctx->gcm_J0); + xor_block((uint8_t *)ctx->gcm_J0, ghash); + + /* compare the input authentication tag with what we calculated */ + if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { + /* They don't match */ + return (CRYPTO_INVALID_MAC); + } else { + rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); + if (rv != CRYPTO_SUCCESS) + return (rv); + out->cd_offset += pt_len; + } + return (CRYPTO_SUCCESS); +} + +static int +gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param) +{ + size_t tag_len; + + /* + * Check the length of the authentication tag (in bits). + */ + tag_len = gcm_param->ulTagBits; + switch (tag_len) { + case 32: + case 64: + case 96: + case 104: + case 112: + case 120: + case 128: + break; + default: + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + + if (gcm_param->ulIvLen == 0) + return (CRYPTO_MECHANISM_PARAM_INVALID); + + return (CRYPTO_SUCCESS); +} + +static void +gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len, + gcm_ctx_t *ctx, size_t block_size, + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *cb; + ulong_t remainder = iv_len; + ulong_t processed = 0; + uint8_t *datap, *ghash; + uint64_t len_a_len_c[2]; + + ghash = (uint8_t *)ctx->gcm_ghash; + cb = (uint8_t *)ctx->gcm_cb; + if (iv_len == 12) { + bcopy(iv, cb, 12); + cb[12] = 0; + cb[13] = 0; + cb[14] = 0; + cb[15] = 1; + /* J0 will be used again in the final */ + copy_block(cb, (uint8_t *)ctx->gcm_J0); + } else { + /* GHASH the IV */ + do { + if (remainder < block_size) { + bzero(cb, block_size); + bcopy(&(iv[processed]), cb, remainder); + datap = (uint8_t *)cb; + remainder = 0; + } else { + datap = (uint8_t *)(&(iv[processed])); + processed += block_size; + remainder -= block_size; + } + GHASH(ctx, datap, ghash); + } while (remainder > 0); + + len_a_len_c[0] = 0; + len_a_len_c[1] = htonll(iv_len << 3); + GHASH(ctx, len_a_len_c, ctx->gcm_J0); + + /* J0 will be used again in the final */ + copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb); + } +} + +/* + * The following function is called at encrypt or decrypt init time + * for AES GCM mode. + */ +int +gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len, + unsigned char *auth_data, size_t auth_data_len, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + uint8_t *ghash, *datap, *authp; + size_t remainder, processed; + + /* encrypt zero block to get subkey H */ + bzero(ctx->gcm_H, sizeof (ctx->gcm_H)); + encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H, + (uint8_t *)ctx->gcm_H); + + gcm_format_initial_blocks(iv, iv_len, ctx, block_size, + copy_block, xor_block); + + authp = (uint8_t *)ctx->gcm_tmp; + ghash = (uint8_t *)ctx->gcm_ghash; + bzero(authp, block_size); + bzero(ghash, block_size); + + processed = 0; + remainder = auth_data_len; + do { + if (remainder < block_size) { + /* + * There's not a block full of data, pad rest of + * buffer with zero + */ + bzero(authp, block_size); + bcopy(&(auth_data[processed]), authp, remainder); + datap = (uint8_t *)authp; + remainder = 0; + } else { + datap = (uint8_t *)(&(auth_data[processed])); + processed += block_size; + remainder -= block_size; + } + + /* add auth data to the hash */ + GHASH(ctx, datap, ghash); + + } while (remainder > 0); + + return (CRYPTO_SUCCESS); +} + +int +gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)) +{ + int rv; + CK_AES_GCM_PARAMS *gcm_param; + + if (param != NULL) { + gcm_param = (CK_AES_GCM_PARAMS *)param; + + if ((rv = gcm_validate_args(gcm_param)) != 0) { + return (rv); + } + + gcm_ctx->gcm_tag_len = gcm_param->ulTagBits; + gcm_ctx->gcm_tag_len >>= 3; + gcm_ctx->gcm_processed_data_len = 0; + + /* these values are in bits */ + gcm_ctx->gcm_len_a_len_c[0] = htonll(gcm_param->ulAADLen << 3); + + rv = CRYPTO_SUCCESS; + gcm_ctx->gcm_flags |= GCM_MODE; + } else { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + goto out; + } + + if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen, + gcm_param->pAAD, gcm_param->ulAADLen, block_size, + encrypt_block, copy_block, xor_block) != 0) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + } +out: + return (rv); +} + +void * +gcm_alloc_ctx(int kmflag) +{ + gcm_ctx_t *gcm_ctx; + +#ifdef _KERNEL + if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) +#else + if ((gcm_ctx = calloc(1, sizeof (gcm_ctx_t))) == NULL) +#endif + return (NULL); + + gcm_ctx->gcm_flags = GCM_MODE; + return (gcm_ctx); +} + +void +gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag) +{ + ctx->gcm_kmflag = kmflag; +} diff --git a/usr/src/common/crypto/modes/modes.c b/usr/src/common/crypto/modes/modes.c index f5753bfc72..972f30b2df 100644 --- a/usr/src/common/crypto/modes/modes.c +++ b/usr/src/common/crypto/modes/modes.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - #ifndef _KERNEL #include <stdlib.h> #endif @@ -172,25 +170,33 @@ crypto_free_mode_ctx(void *ctx) { common_ctx_t *common_ctx = (common_ctx_t *)ctx; - if (common_ctx->cc_flags & ECB_MODE) + switch (common_ctx->cc_flags & + (ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE)) { + case ECB_MODE: #ifdef _KERNEL kmem_free(common_ctx, sizeof (ecb_ctx_t)); #else free(common_ctx); #endif - else if (common_ctx->cc_flags & CBC_MODE) + break; + + case CBC_MODE: #ifdef _KERNEL kmem_free(common_ctx, sizeof (cbc_ctx_t)); #else free(common_ctx); #endif - else if (common_ctx->cc_flags & CTR_MODE) + break; + + case CTR_MODE: #ifdef _KERNEL kmem_free(common_ctx, sizeof (ctr_ctx_t)); #else free(common_ctx); #endif - else if (common_ctx->cc_flags & CCM_MODE) { + break; + + case CCM_MODE: #ifdef _KERNEL if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL) kmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf, @@ -202,5 +208,19 @@ crypto_free_mode_ctx(void *ctx) free(((ccm_ctx_t *)ctx)->ccm_pt_buf); free(ctx); #endif + break; + + case GCM_MODE: +#ifdef _KERNEL + if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL) + kmem_free(((gcm_ctx_t *)ctx)->gcm_pt_buf, + ((gcm_ctx_t *)ctx)->gcm_pt_buf_len); + + kmem_free(ctx, sizeof (gcm_ctx_t)); +#else + if (((gcm_ctx_t *)ctx)->gcm_pt_buf != NULL) + free(((gcm_ctx_t *)ctx)->gcm_pt_buf); + free(ctx); +#endif } } diff --git a/usr/src/common/crypto/modes/modes.h b/usr/src/common/crypto/modes/modes.h index f5781f9270..397f4e3b3e 100644 --- a/usr/src/common/crypto/modes/modes.h +++ b/usr/src/common/crypto/modes/modes.h @@ -44,6 +44,7 @@ extern "C" { #define CBC_MODE 0x00000004 #define CTR_MODE 0x00000008 #define CCM_MODE 0x00000010 +#define GCM_MODE 0x00000020 /* * cc_keysched: Pointer to key schedule. @@ -176,6 +177,54 @@ typedef struct ccm_ctx { #define ccm_copy_to ccm_common.cc_copy_to #define ccm_flags ccm_common.cc_flags +/* + * gcm_tag_len: Length of authentication tag. + * + * gcm_ghash: Stores output from the GHASH function. + * + * gcm_processed_data_len: + * Length of processed plaintext (encrypt) or + * length of processed ciphertext (decrypt). + * + * gcm_pt_buf: Stores the decrypted plaintext returned by + * decrypt_final when the computed authentication + * tag matches the user supplied tag. + * + * gcm_pt_buf_len: Length of the plaintext buffer. + * + * gcm_H: Subkey. + * + * gcm_J0: Pre-counter block generated from the IV. + * + * gcm_len_a_len_c: 64-bit representations of the bit lengths of + * AAD and ciphertext. + * + * gcm_kmflag: Current value of kmflag. Used only for allocating + * the plaintext buffer during decryption. + */ +typedef struct gcm_ctx { + struct common_ctx gcm_common; + size_t gcm_tag_len; + size_t gcm_processed_data_len; + size_t gcm_pt_buf_len; + uint32_t gcm_tmp[4]; + uint64_t gcm_ghash[2]; + uint64_t gcm_H[2]; + uint64_t gcm_J0[2]; + uint64_t gcm_len_a_len_c[2]; + uint8_t *gcm_pt_buf; + int gcm_kmflag; +} gcm_ctx_t; + +#define gcm_keysched gcm_common.cc_keysched +#define gcm_keysched_len gcm_common.cc_keysched_len +#define gcm_cb gcm_common.cc_iv +#define gcm_remainder gcm_common.cc_remainder +#define gcm_remainder_len gcm_common.cc_remainder_len +#define gcm_lastp gcm_common.cc_lastp +#define gcm_copy_to gcm_common.cc_copy_to +#define gcm_flags gcm_common.cc_flags + typedef struct aes_ctx { union { ecb_ctx_t acu_ecb; @@ -183,6 +232,7 @@ typedef struct aes_ctx { ctr_ctx_t acu_ctr; #ifdef _KERNEL ccm_ctx_t acu_ccm; + gcm_ctx_t acu_gcm; #endif } acu; } aes_ctx_t; @@ -260,15 +310,36 @@ extern int ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t, void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); +extern int gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t, + crypto_data_t *, size_t, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)); + +extern int gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t, + crypto_data_t *, size_t, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)); + int ccm_encrypt_final(ccm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); +int gcm_encrypt_final(gcm_ctx_t *, crypto_data_t *, size_t, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)); + extern int ccm_decrypt_final(ccm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); +extern int gcm_decrypt_final(gcm_ctx_t *, crypto_data_t *, size_t, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)); + extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)); @@ -282,6 +353,11 @@ extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); +extern int gcm_init_ctx(gcm_ctx_t *, char *, size_t, + int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), + void (*copy_block)(uint8_t *, uint8_t *), + void (*xor_block)(uint8_t *, uint8_t *)); + extern void calculate_ccm_mac(ccm_ctx_t *, uint8_t *, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)); @@ -293,7 +369,9 @@ extern void *ecb_alloc_ctx(int); extern void *cbc_alloc_ctx(int); extern void *ctr_alloc_ctx(int); extern void *ccm_alloc_ctx(int); +extern void *gcm_alloc_ctx(int); extern void crypto_free_mode_ctx(void *); +extern void gcm_set_kmflag(gcm_ctx_t *, int); #ifdef __cplusplus } diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files index f9c8a7682c..9efa40936a 100644 --- a/usr/src/uts/common/Makefile.files +++ b/usr/src/uts/common/Makefile.files @@ -1367,7 +1367,7 @@ KCF_OBJS += kcf.o kcf_callprov.o kcf_cbufcall.o kcf_cipher.o kcf_crypto.o \ kcf_keys.o kcf_mac.o kcf_mech_tabs.o kcf_miscapi.o \ kcf_object.o kcf_policy.o kcf_prov_lib.o kcf_prov_tabs.o \ kcf_sched.o kcf_session.o kcf_sign.o kcf_spi.o kcf_verify.o \ - kcf_random.o modes.o ecb.o cbc.o ctr.o ccm.o + kcf_random.o modes.o ecb.o cbc.o ctr.o ccm.o gcm.o CRYPTOADM_OBJS += cryptoadm.o diff --git a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c index 11c4a8f2fe..d3fa3b5f86 100644 --- a/usr/src/uts/common/crypto/core/kcf_cryptoadm.c +++ b/usr/src/uts/common/crypto/core/kcf_cryptoadm.c @@ -99,7 +99,8 @@ kcf_soft_config_init(void) * * # /etc/crypto/kcf.conf * des:supportedlist=CKM_DES_CBC,CKM_DES_ECB,CKM_DES3_CBC,CKM_DES3_ECB - * aes:supportedlist=CKM_AES_ECB,CKM_AES_CBC,CKM_AES_CTR,CKM_AES_CCM + * aes:supportedlist=CKM_AES_ECB,CKM_AES_CBC,CKM_AES_CTR,CKM_AES_CCM, + * CKM_AES_GCM * arcfour:supportedlist=CKM_RC4 * blowfish:supportedlist=CKM_BLOWFISH_ECB,CKM_BLOWFISH_CBC * ecc:supportedlist=CKM_EC_KEY_PAIR_GEN,CKM_ECDH1_DERIVE,CKM_ECDSA,\ @@ -130,7 +131,8 @@ kcf_soft_config_init(void) static crypto_mech_name_t des_mechs[] = { "CKM_DES_CBC", "CKM_DES_ECB", "CKM_DES3_CBC", "CKM_DES3_ECB", ""}; static crypto_mech_name_t aes_mechs[] = { - "CKM_AES_ECB", "CKM_AES_CBC", "CKM_AES_CTR", "CKM_AES_CCM", ""}; + "CKM_AES_ECB", "CKM_AES_CBC", "CKM_AES_CTR", "CKM_AES_CCM", + "CKM_AES_GCM", ""}; static crypto_mech_name_t arcfour_mechs[] = { "CKM_RC4", ""}; static crypto_mech_name_t blowfish_mechs[] = { diff --git a/usr/src/uts/common/crypto/io/aes.c b/usr/src/uts/common/crypto/io/aes.c index 5e0739fb1c..9e368cf418 100644 --- a/usr/src/uts/common/crypto/io/aes.c +++ b/usr/src/uts/common/crypto/io/aes.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * AES provider for the Kernel Cryptographic Framework (KCF) */ @@ -66,7 +64,8 @@ typedef enum aes_mech_type { AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */ - AES_CCM_MECH_INFO_TYPE /* SUN_CKM_AES_CCM */ + AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ + AES_GCM_MECH_INFO_TYPE /* SUN_CKM_AES_GCM */ } aes_mech_type_t; /* @@ -103,6 +102,11 @@ static crypto_mech_info_t aes_mech_info_tab[] = { {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, + AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, + /* AES_GCM */ + {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, + CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | + CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES} }; @@ -301,6 +305,15 @@ aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag) if (ctx != NULL) p = ccm_alloc_ctx(kmflag); break; + case AES_GCM_MECH_INFO_TYPE: + if (mechanism->cm_param != NULL && + mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { + rv = CRYPTO_MECHANISM_PARAM_INVALID; + break; + } + if (ctx != NULL) + p = gcm_alloc_ctx(kmflag); + break; default: rv = CRYPTO_MECHANISM_INVALID; } @@ -447,8 +460,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, * The length requirement for AES CCM mode has already been checked * at init time */ - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0) && + if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) return (CRYPTO_DATA_LEN_RANGE); @@ -460,6 +472,8 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, */ if (aes_ctx->ac_flags & CCM_MODE) { length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; + } else if (aes_ctx->ac_flags & GCM_MODE) { + length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; } else { length_needed = plaintext->cd_length; } @@ -486,7 +500,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, */ if (aes_ctx->ac_flags & CCM_MODE) { /* - * aes_ccm_encrypt_final() will compute the MAC and append + * ccm_encrypt_final() will compute the MAC and append * it to existing ciphertext. So, need to adjust the left over * length value accordingly */ @@ -505,6 +519,28 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, ciphertext->cd_offset - saved_offset; } ciphertext->cd_offset = saved_offset; + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* + * gcm_encrypt_final() will compute the MAC and append + * it to existing ciphertext. So, need to adjust the left over + * length value accordingly + */ + + /* order of following 2 lines MUST not be reversed */ + ciphertext->cd_offset = ciphertext->cd_length; + ciphertext->cd_length = saved_length - ciphertext->cd_length; + ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + if (ret != CRYPTO_SUCCESS) { + return (ret); + } + + if (plaintext != ciphertext) { + ciphertext->cd_length = + ciphertext->cd_offset - saved_offset; + } + ciphertext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); @@ -540,8 +576,7 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, * The length requirement for AES CCM mode has already been checked * at init time */ - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0) && + if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) { return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); } @@ -556,8 +591,18 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, * smaller than size of the cipher text. */ if (aes_ctx->ac_flags & CCM_MODE) { - if (plaintext->cd_length < aes_ctx->ac_data_len) { - plaintext->cd_length = aes_ctx->ac_data_len; + if (plaintext->cd_length < aes_ctx->ac_processed_data_len) { + plaintext->cd_length = aes_ctx->ac_processed_data_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } + saved_offset = plaintext->cd_offset; + saved_length = plaintext->cd_length; + } else if (aes_ctx->ac_flags & GCM_MODE) { + gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; + size_t pt_len = ciphertext->cd_length - ctx->gcm_tag_len; + + if (plaintext->cd_length < pt_len) { + plaintext->cd_length = pt_len; return (CRYPTO_BUFFER_TOO_SMALL); } saved_offset = plaintext->cd_offset; @@ -596,6 +641,23 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, } plaintext->cd_offset = saved_offset; + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* order of following 2 lines MUST not be reversed */ + plaintext->cd_offset = plaintext->cd_length; + plaintext->cd_length = saved_length - plaintext->cd_length; + + ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + if (ret == CRYPTO_SUCCESS) { + if (plaintext != ciphertext) { + plaintext->cd_length = + plaintext->cd_offset - saved_offset; + } + } else { + plaintext->cd_length = saved_length; + } + + plaintext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); @@ -700,20 +762,29 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, AES_ARG_INPLACE(ciphertext, plaintext); - /* compute number of bytes that will hold the plaintext */ - out_len = aes_ctx->ac_remainder_len; - out_len += ciphertext->cd_length; - out_len &= ~(AES_BLOCK_LEN - 1); - - /* return length needed to store the output */ - if (plaintext->cd_length < out_len) { - plaintext->cd_length = out_len; - return (CRYPTO_BUFFER_TOO_SMALL); + /* + * Compute number of bytes that will hold the plaintext. + * This is not necessary for CCM and GCM since these mechanisms + * never return plaintext for update operations. + */ + if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) == 0) { + out_len = aes_ctx->ac_remainder_len; + out_len += ciphertext->cd_length; + out_len &= ~(AES_BLOCK_LEN - 1); + + /* return length needed to store the output */ + if (plaintext->cd_length < out_len) { + plaintext->cd_length = out_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; + if (aes_ctx->ac_flags & GCM_MODE) + gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req)); + /* * Do the AES update on the specified input data. */ @@ -796,6 +867,17 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, if (ret != CRYPTO_SUCCESS) { return (ret); } + } else if (aes_ctx->ac_flags & GCM_MODE) { + size_t saved_offset = data->cd_offset; + + ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + if (ret != CRYPTO_SUCCESS) { + return (ret); + } + data->cd_length = data->cd_offset - saved_offset; + data->cd_offset = saved_offset; } else { /* * There must be no unprocessed plaintext. @@ -883,11 +965,37 @@ aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, if (ret != CRYPTO_SUCCESS) { return (ret); } + } else if (aes_ctx->ac_flags & GCM_MODE) { + /* + * This is where all the plaintext is returned, make sure + * the plaintext buffer is big enough + */ + gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; + size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; + + if (data->cd_length < pt_len) { + data->cd_length = pt_len; + return (CRYPTO_BUFFER_TOO_SMALL); + } + + saved_offset = data->cd_offset; + saved_length = data->cd_length; + ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data, + AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); + if (ret == CRYPTO_SUCCESS) { + data->cd_length = data->cd_offset - saved_offset; + } else { + data->cd_length = saved_length; + } + + data->cd_offset = saved_offset; + if (ret != CRYPTO_SUCCESS) { + return (ret); + } } - if (((aes_ctx->ac_flags & CTR_MODE) == 0) && - ((aes_ctx->ac_flags & CCM_MODE) == 0)) { + if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE)) == 0) { data->cd_length = 0; } @@ -980,6 +1088,13 @@ aes_encrypt_atomic(crypto_provider_handle_t provider, if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { + ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx, + ciphertext, AES_BLOCK_LEN, aes_encrypt_block, + aes_copy_block, aes_xor_block); + if (ret != CRYPTO_SUCCESS) + goto out; + ASSERT(aes_ctx.ac_remainder_len == 0); } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { if (aes_ctx.ac_remainder_len > 0) { ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx, @@ -1070,6 +1185,9 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; + if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) + gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req)); + /* * Do an update on the specified input data. */ @@ -1107,6 +1225,18 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, } else { plaintext->cd_length = saved_length; } + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { + ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx, + plaintext, AES_BLOCK_LEN, aes_encrypt_block, + aes_xor_block); + ASSERT(aes_ctx.ac_remainder_len == 0); + if ((ret == CRYPTO_SUCCESS) && + (ciphertext != plaintext)) { + plaintext->cd_length = + plaintext->cd_offset - saved_offset; + } else { + plaintext->cd_length = saved_length; + } } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { ASSERT(aes_ctx.ac_remainder_len == 0); if (ciphertext != plaintext) @@ -1140,6 +1270,11 @@ out: if (aes_ctx.ac_pt_buf != NULL) { kmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len); } + } else if (aes_ctx.ac_flags & GCM_MODE) { + if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) { + kmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf, + ((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len); + } } return (ret); @@ -1273,6 +1408,15 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); break; + case AES_GCM_MECH_INFO_TYPE: + if (mechanism->cm_param == NULL || + mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { + return (CRYPTO_MECHANISM_PARAM_INVALID); + } + rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, + AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, + aes_xor_block); + break; case AES_ECB_MECH_INFO_TYPE: aes_ctx->ac_flags |= ECB_MODE; } diff --git a/usr/src/uts/common/crypto/io/dprov.c b/usr/src/uts/common/crypto/io/dprov.c index e08a70dcaf..a6a070ad6a 100644 --- a/usr/src/uts/common/crypto/io/dprov.c +++ b/usr/src/uts/common/crypto/io/dprov.c @@ -231,6 +231,7 @@ typedef enum dprov_mech_type { AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */ AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ + AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ RC4_MECH_INFO_TYPE, /* SUN_CKM_RC4 */ RSA_PKCS_MECH_INFO_TYPE, /* SUN_CKM_RSA_PKCS */ RSA_X_509_MECH_INFO_TYPE, /* SUN_CKM_RSA_X_509 */ @@ -495,6 +496,13 @@ static crypto_mech_info_t dprov_mech_info_tab[] = { CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | CRYPTO_FG_MAC_DECRYPT_ATOMIC, AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, + /* AES-GCM */ + {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, + CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_ENCRYPT_MAC | + CRYPTO_FG_MAC_DECRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | + CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC | + CRYPTO_FG_MAC_DECRYPT_ATOMIC, + AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, /* RC4 */ {SUN_CKM_RC4, RC4_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | @@ -2298,6 +2306,7 @@ dprov_valid_cipher_mech(crypto_mech_type_t mech_type) mech_type == AES_ECB_MECH_INFO_TYPE || mech_type == AES_CTR_MECH_INFO_TYPE || mech_type == AES_CCM_MECH_INFO_TYPE || + mech_type == AES_GCM_MECH_INFO_TYPE || mech_type == RC4_MECH_INFO_TYPE || mech_type == RSA_PKCS_MECH_INFO_TYPE || mech_type == RSA_X_509_MECH_INFO_TYPE || @@ -4371,6 +4380,88 @@ out: return (rv); } +/* + * Resource control checks don't need to be done. Why? Because this routine + * knows the size of the structure, and it can't be overridden by a user. + * This is different from the crypto module, which has no knowledge of + * specific mechanisms, and therefore has to trust specified size of the + * parameter. This trust, or lack of trust, is why the size of the + * parameter has to be charged against the project resource control. + */ +static int +copyin_aes_gcm_mech(crypto_mechanism_t *in_mech, crypto_mechanism_t *out_mech, + int *out_error, int mode) +{ + STRUCT_DECL(crypto_mechanism, mech); + STRUCT_DECL(CK_AES_GCM_PARAMS, params); + CK_AES_GCM_PARAMS *aes_gcm_params; + caddr_t pp; + size_t param_len; + int error = 0; + int rv = 0; + + STRUCT_INIT(mech, mode); + STRUCT_INIT(params, mode); + bcopy(in_mech, STRUCT_BUF(mech), STRUCT_SIZE(mech)); + pp = STRUCT_FGETP(mech, cm_param); + param_len = STRUCT_FGET(mech, cm_param_len); + + if (param_len != STRUCT_SIZE(params)) { + rv = CRYPTO_ARGUMENTS_BAD; + goto out; + } + + out_mech->cm_type = STRUCT_FGET(mech, cm_type); + out_mech->cm_param = NULL; + out_mech->cm_param_len = 0; + if (pp != NULL) { + size_t nonce_len, auth_data_len, total_param_len; + + if (copyin((char *)pp, STRUCT_BUF(params), param_len) != 0) { + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + + nonce_len = STRUCT_FGET(params, ulIvLen); + auth_data_len = STRUCT_FGET(params, ulAADLen); + + /* allocate param structure */ + total_param_len = + sizeof (CK_AES_GCM_PARAMS) + nonce_len + auth_data_len; + aes_gcm_params = kmem_alloc(total_param_len, KM_NOSLEEP); + if (aes_gcm_params == NULL) { + rv = CRYPTO_HOST_MEMORY; + goto out; + } + aes_gcm_params->ulTagBits = STRUCT_FGET(params, ulTagBits); + aes_gcm_params->ulIvLen = nonce_len; + aes_gcm_params->ulAADLen = auth_data_len; + aes_gcm_params->pIv + = (uchar_t *)aes_gcm_params + sizeof (CK_AES_GCM_PARAMS); + aes_gcm_params->pAAD = aes_gcm_params->pIv + nonce_len; + + if (copyin((char *)STRUCT_FGETP(params, pIv), + aes_gcm_params->pIv, nonce_len) != 0) { + kmem_free(aes_gcm_params, total_param_len); + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + if (copyin((char *)STRUCT_FGETP(params, pAAD), + aes_gcm_params->pAAD, auth_data_len) != 0) { + kmem_free(aes_gcm_params, total_param_len); + out_mech->cm_param = NULL; + error = EFAULT; + goto out; + } + out_mech->cm_param = (char *)aes_gcm_params; + out_mech->cm_param_len = sizeof (CK_AES_GCM_PARAMS); + } +out: + *out_error = error; + return (rv); +} /* * Resource control checks don't need to be done. Why? Because this routine @@ -4594,6 +4685,10 @@ dprov_copyin_mechanism(crypto_provider_handle_t provider, rv = copyin_aes_ccm_mech(umech, kmech, &error, mode); goto out; + case AES_GCM_MECH_INFO_TYPE: + rv = copyin_aes_gcm_mech(umech, kmech, &error, mode); + goto out; + case DH_PKCS_DERIVE_MECH_INFO_TYPE: expected_param_len = param_len; break; @@ -4690,6 +4785,22 @@ dprov_free_mechanism(crypto_provider_handle_t provider, mech->cm_param = NULL; mech->cm_param_len = 0; } + return (CRYPTO_SUCCESS); + } + case AES_GCM_MECH_INFO_TYPE: { + CK_AES_GCM_PARAMS *params; + size_t total_param_len; + + if ((mech->cm_param != NULL) && (mech->cm_param_len != 0)) { + /* LINTED: pointer alignment */ + params = (CK_AES_GCM_PARAMS *)mech->cm_param; + total_param_len = mech->cm_param_len + + params->ulIvLen + params->ulAADLen; + kmem_free(params, total_param_len); + mech->cm_param = NULL; + mech->cm_param_len = 0; + } + return (CRYPTO_SUCCESS); } default: diff --git a/usr/src/uts/common/sys/crypto/common.h b/usr/src/uts/common/sys/crypto/common.h index 361dbbaa40..795d25b07c 100644 --- a/usr/src/uts/common/sys/crypto/common.h +++ b/usr/src/uts/common/sys/crypto/common.h @@ -26,8 +26,6 @@ #ifndef _SYS_CRYPTO_COMMON_H #define _SYS_CRYPTO_COMMON_H -#pragma ident "%Z%%M% %I% %E% SMI" - /* * Header file for the common data structures of the cryptographic framework */ @@ -84,6 +82,16 @@ typedef struct CK_AES_CCM_PARAMS { uchar_t *authData; } CK_AES_CCM_PARAMS; +/* CK_AES_GCM_PARAMS provides parameters to the CKM_AES_GCM mechanism */ +typedef struct CK_AES_GCM_PARAMS { + uchar_t *pIv; + ulong_t ulIvLen; + ulong_t ulIvBits; + uchar_t *pAAD; + ulong_t ulAADLen; + ulong_t ulTagBits; +} CK_AES_GCM_PARAMS; + #ifdef _KERNEL /* * CK_ECDH1_DERIVE_PARAMS provides the parameters to the @@ -117,6 +125,16 @@ typedef struct CK_AES_CCM_PARAMS32 { caddr32_t authData; } CK_AES_CCM_PARAMS32; +/* needed for 32-bit applications running on 64-bit kernels */ +typedef struct CK_AES_GCM_PARAMS32 { + caddr32_t pIv; + uint32_t ulIvLen; + uint32_t ulIvBits; + caddr32_t pAAD; + uint32_t ulAADLen; + uint32_t ulTagBits; +} CK_AES_GCM_PARAMS32; + typedef struct CK_ECDH1_DERIVE_PARAMS32 { uint32_t kdf; uint32_t ulSharedDataLen; @@ -173,6 +191,7 @@ typedef uint32_t crypto_keysize_unit_t; #define SUN_CKM_AES_ECB "CKM_AES_ECB" #define SUN_CKM_AES_CTR "CKM_AES_CTR" #define SUN_CKM_AES_CCM "CKM_AES_CCM" +#define SUN_CKM_AES_GCM "CKM_AES_GCM" #define SUN_CKM_RC4 "CKM_RC4" #define SUN_CKM_RSA_PKCS "CKM_RSA_PKCS" #define SUN_CKM_RSA_X_509 "CKM_RSA_X_509" |