summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorJason King <jason.king@joyent.com>2019-08-21 14:05:59 +0000
committerJason King <jason.king@joyent.com>2020-01-21 10:42:28 -0600
commit2f9f8a9bcff03868ad346b312981e5d198aafd63 (patch)
treeebba65a4015a338f6e2dab50847586d2c4d256b4 /usr/src
parent709d065fa472580d0fd685caa2fe31c61c2fa25c (diff)
downloadillumos-joyent-2f9f8a9bcff03868ad346b312981e5d198aafd63.tar.gz
11966 CTR mode tries to be both a stream and block cipher and fails at both
Reviewed by: Dan McDonald <danmcd@joyent.com> Reviewed by: Robert Mustacchi <rm@fingolfin.org> Approved by: Gordon Ross <gordon.w.ross@gmail.com>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/common/crypto/aes/aes_modes.c6
-rw-r--r--usr/src/common/crypto/modes/ctr.c297
-rw-r--r--usr/src/common/crypto/modes/modes.h10
-rw-r--r--usr/src/lib/pkcs11/libsoftcrypto/common/mapfile-vers3
-rw-r--r--usr/src/lib/pkcs11/pkcs11_softtoken/common/softAESCrypt.c94
-rw-r--r--usr/src/test/crypto-tests/tests/modes/aes/ctr/aes_ctr.c2
-rw-r--r--usr/src/uts/common/crypto/io/aes.c179
7 files changed, 320 insertions, 271 deletions
diff --git a/usr/src/common/crypto/aes/aes_modes.c b/usr/src/common/crypto/aes/aes_modes.c
index b23c78d65c..92bbc862c3 100644
--- a/usr/src/common/crypto/aes/aes_modes.c
+++ b/usr/src/common/crypto/aes/aes_modes.c
@@ -21,7 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
- * Copyright 2018, Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
*/
#include <sys/types.h>
@@ -101,7 +101,7 @@ aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
- AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ AES_BLOCK_LEN, aes_encrypt_block);
} else if (aes_ctx->ac_flags & CCM_MODE) {
rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length,
out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
@@ -134,7 +134,7 @@ aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
if (aes_ctx->ac_flags & CTR_MODE) {
rv = ctr_mode_contiguous_blocks(ctx, data, length, out,
- AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
+ AES_BLOCK_LEN, aes_encrypt_block);
if (rv == CRYPTO_DATA_LEN_RANGE)
rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
} else if (aes_ctx->ac_flags & CCM_MODE) {
diff --git a/usr/src/common/crypto/modes/ctr.c b/usr/src/common/crypto/modes/ctr.c
index 919ed3ab53..0257980587 100644
--- a/usr/src/common/crypto/modes/ctr.c
+++ b/usr/src/common/crypto/modes/ctr.c
@@ -21,6 +21,8 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright 2019 Joyent, Inc.
*/
#ifndef _KERNEL
@@ -30,6 +32,7 @@
#include <security/cryptoki.h>
#endif
+#include <sys/debug.h>
#include <sys/types.h>
#include <modes/modes.h>
#include <sys/crypto/common.h>
@@ -37,164 +40,205 @@
#include <sys/byteorder.h>
/*
- * Encrypt and decrypt multiple blocks of data in counter mode.
+ * CTR (counter mode) is a stream cipher. That is, it generates a
+ * pseudo-random keystream that is used to XOR with the input to
+ * encrypt or decrypt. The pseudo-random keystream is generated by
+ * concatenating a nonce (supplied during initialzation) and with a
+ * counter (initialized to zero) to form an input block to the cipher
+ * mechanism. The resulting output of the cipher is used as a chunk
+ * of the pseudo-random keystream. Once all of the bytes of the
+ * keystream block have been used, the counter is incremented and
+ * the process repeats.
+ *
+ * Since this is a stream cipher, we do not accumulate input cipher
+ * text like we do for block modes. Instead we use ctr_ctx_t->ctr_offset
+ * to track the amount of bytes used in the current keystream block.
*/
-int
-ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
- crypto_data_t *out, size_t block_size,
- int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
- void (*xor_block)(uint8_t *, uint8_t *))
+
+static void
+ctr_new_keyblock(ctr_ctx_t *ctx,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
{
- size_t remainder = length;
- size_t need;
- uint8_t *datap = (uint8_t *)data;
- uint8_t *blockp;
- uint8_t *lastp;
- void *iov_or_mp;
- offset_t offset;
- uint8_t *out_data_1;
- uint8_t *out_data_2;
- size_t out_data_1_len;
uint64_t lower_counter, upper_counter;
- if (length + ctx->ctr_remainder_len < block_size) {
- /* accumulate bytes here and return */
- bcopy(datap,
- (uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len,
- length);
- ctx->ctr_remainder_len += length;
- ctx->ctr_copy_to = datap;
- return (CRYPTO_SUCCESS);
+ /* increment the counter */
+ lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
+ lower_counter = htonll(lower_counter + 1);
+ lower_counter &= ctx->ctr_lower_mask;
+ ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
+ lower_counter;
+
+ /* wrap around */
+ if (lower_counter == 0) {
+ upper_counter = ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
+ upper_counter = htonll(upper_counter + 1);
+ upper_counter &= ctx->ctr_upper_mask;
+ ctx->ctr_cb[0] = (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
+ upper_counter;
}
- lastp = (uint8_t *)ctx->ctr_cb;
- if (out != NULL)
- crypto_init_ptrs(out, &iov_or_mp, &offset);
-
- do {
- /* Unprocessed data from last call. */
- if (ctx->ctr_remainder_len > 0) {
- need = block_size - ctx->ctr_remainder_len;
-
- if (need > remainder)
- return (CRYPTO_DATA_LEN_RANGE);
-
- bcopy(datap, &((uint8_t *)ctx->ctr_remainder)
- [ctx->ctr_remainder_len], need);
-
- blockp = (uint8_t *)ctx->ctr_remainder;
- } else {
- blockp = datap;
- }
-
- /* ctr_cb is the counter block */
- cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
- (uint8_t *)ctx->ctr_tmp);
+ /* generate the new keyblock */
+ cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
+ (uint8_t *)ctx->ctr_keystream);
+ ctx->ctr_offset = 0;
+}
- lastp = (uint8_t *)ctx->ctr_tmp;
+/*
+ * XOR the input with the keystream and write the result to out.
+ * This requires that the amount of data in 'in' is >= outlen
+ * (ctr_mode_contiguous_blocks() guarantees this for us before we are
+ * called). As CTR mode is a stream cipher, we cannot use a cipher's
+ * xxx_xor_block function (e.g. aes_xor_block()) as we must handle
+ * arbitrary lengths of input and should not buffer/accumulate partial blocks
+ * between calls.
+ */
+static void
+ctr_xor(ctr_ctx_t *ctx, const uint8_t *in, uint8_t *out, size_t outlen,
+ size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
+{
+ const uint8_t *keyp;
+ size_t keyamt;
+ while (outlen > 0) {
/*
- * Increment Counter.
+ * This occurs once we've consumed all the bytes in the
+ * current block of the keystream. ctr_init_ctx() creates
+ * the initial block of the keystream, so we always start
+ * with a full block of key data.
*/
- lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
- lower_counter = htonll(lower_counter + 1);
- lower_counter &= ctx->ctr_lower_mask;
- ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
- lower_counter;
-
- /* wrap around */
- if (lower_counter == 0) {
- upper_counter =
- ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
- upper_counter = htonll(upper_counter + 1);
- upper_counter &= ctx->ctr_upper_mask;
- ctx->ctr_cb[0] =
- (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
- upper_counter;
+ if (ctx->ctr_offset == block_size) {
+ ctr_new_keyblock(ctx, cipher);
}
+ keyp = (uint8_t *)ctx->ctr_keystream + ctx->ctr_offset;
+ keyamt = block_size - ctx->ctr_offset;
+
/*
- * XOR encrypted counter block with the current clear block.
+ * xor a byte at a time (while we have data and output
+ * space) and try to get in, out, and keyp 32-bit aligned.
+ * If in, out, and keyp all do become 32-bit aligned,
+ * we switch to xor-ing 32-bits at a time until we run out
+ * of 32-bit chunks, then switch back to xor-ing a byte at
+ * a time for any remainder.
*/
- xor_block(blockp, lastp);
-
- if (out == NULL) {
- if (ctx->ctr_remainder_len > 0) {
- bcopy(lastp, ctx->ctr_copy_to,
- ctx->ctr_remainder_len);
- bcopy(lastp + ctx->ctr_remainder_len, datap,
- need);
- }
- } else {
- crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
- &out_data_1_len, &out_data_2, block_size);
-
- /* copy block to where it belongs */
- bcopy(lastp, out_data_1, out_data_1_len);
- if (out_data_2 != NULL) {
- bcopy(lastp + out_data_1_len, out_data_2,
- block_size - out_data_1_len);
- }
- /* update offset */
- out->cd_offset += block_size;
+ while (keyamt > 0 && outlen > 0 &&
+ !IS_P2ALIGNED(in, sizeof (uint32_t)) &&
+ !IS_P2ALIGNED(out, sizeof (uint32_t)) &&
+ !IS_P2ALIGNED(keyp, sizeof (uint32_t))) {
+ *out++ = *in++ ^ *keyp++;
+ keyamt--;
+ outlen--;
}
- /* Update pointer to next block of data to be processed. */
- if (ctx->ctr_remainder_len != 0) {
- datap += need;
- ctx->ctr_remainder_len = 0;
- } else {
- datap += block_size;
+ if (keyamt > 3 && outlen > 3 &&
+ IS_P2ALIGNED(in, sizeof (uint32_t)) &&
+ IS_P2ALIGNED(out, sizeof (uint32_t)) &&
+ IS_P2ALIGNED(keyp, sizeof (uint32_t))) {
+ const uint32_t *key32 = (const uint32_t *)keyp;
+ const uint32_t *in32 = (const uint32_t *)in;
+ uint32_t *out32 = (uint32_t *)out;
+
+ do {
+ *out32++ = *in32++ ^ *key32++;
+ keyamt -= sizeof (uint32_t);
+ outlen -= sizeof (uint32_t);
+ } while (keyamt > 3 && outlen > 3);
+
+ keyp = (const uint8_t *)key32;
+ in = (const uint8_t *)in32;
+ out = (uint8_t *)out32;
}
- remainder = (size_t)&data[length] - (size_t)datap;
-
- /* Incomplete last block. */
- if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->ctr_remainder, remainder);
- ctx->ctr_remainder_len = remainder;
- ctx->ctr_copy_to = datap;
- goto out;
+ while (keyamt > 0 && outlen > 0) {
+ *out++ = *in++ ^ *keyp++;
+ keyamt--;
+ outlen--;
}
- ctx->ctr_copy_to = NULL;
- } while (remainder > 0);
-
-out:
- return (CRYPTO_SUCCESS);
+ ctx->ctr_offset = block_size - keyamt;
+ }
}
+/*
+ * Encrypt and decrypt multiple blocks of data in counter mode.
+ */
int
-ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out,
- int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
+ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *in, size_t in_length,
+ crypto_data_t *out, size_t block_size,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct))
{
- uint8_t *lastp;
- uint8_t *p;
- int i;
- int rv;
+ size_t in_remainder = in_length;
+ uint8_t *inp = (uint8_t *)in;
+ void *iov_or_mp;
+ offset_t offset;
+ uint8_t *out_data;
+ uint8_t *out_data_remainder;
+ size_t out_data_len;
+
+ if (block_size > sizeof (ctx->ctr_keystream))
+ return (CRYPTO_ARGUMENTS_BAD);
+
+ if (out == NULL)
+ return (CRYPTO_ARGUMENTS_BAD);
- if (out->cd_length < ctx->ctr_remainder_len)
+ /* Make sure 'out->cd_offset + in_length' doesn't overflow. */
+ if (out->cd_offset < 0)
return (CRYPTO_DATA_LEN_RANGE);
+ if (SIZE_MAX - in_length < (size_t)out->cd_offset)
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb,
- (uint8_t *)ctx->ctr_tmp);
+ /*
+ * This check guarantees 'out' contains sufficient space for
+ * the resulting output.
+ */
+ if (out->cd_offset + in_length > out->cd_length)
+ return (CRYPTO_BUFFER_TOO_SMALL);
- lastp = (uint8_t *)ctx->ctr_tmp;
- p = (uint8_t *)ctx->ctr_remainder;
- for (i = 0; i < ctx->ctr_remainder_len; i++) {
- p[i] ^= lastp[i];
- }
+ crypto_init_ptrs(out, &iov_or_mp, &offset);
- rv = crypto_put_output_data(p, out, ctx->ctr_remainder_len);
- if (rv == CRYPTO_SUCCESS) {
- out->cd_offset += ctx->ctr_remainder_len;
- ctx->ctr_remainder_len = 0;
+ /* Now XOR the output with the keystream */
+ while (in_remainder > 0) {
+ /*
+ * If out is a uio_t or an mblk_t, in_remainder might be
+ * larger than an individual iovec_t or mblk_t in out.
+ * crypto_get_ptrs uses the value of offset to set the
+ * the value of out_data to the correct address for writing
+ * and sets out_data_len to reflect the largest amount of data
+ * (up to in_remainder) that can be written to out_data. It
+ * also increments offset by out_data_len. out_data_remainder
+ * is set to the start of the next segment for writing, however
+ * it is not used here since the updated value of offset
+ * will be used in the next loop iteration to locate the
+ * next mblk_t/iovec_t. Since the sum of the size of all data
+ * buffers in 'out' (out->cd_length) was checked immediately
+ * prior to starting the loop, we should always terminate
+ * the loop.
+ */
+ crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data,
+ &out_data_len, &out_data_remainder, in_remainder);
+
+ /*
+ * crypto_get_ptrs() should guarantee these, but act as a
+ * safeguard in case the behavior ever changes.
+ */
+ ASSERT3U(out_data_len, <=, in_remainder);
+ ASSERT3U(out_data_len, >, 0);
+
+ ctr_xor(ctx, inp, out_data, out_data_len, block_size, cipher);
+
+ inp += out_data_len;
+ in_remainder -= out_data_len;
}
- return (rv);
+
+ out->cd_offset += in_length;
+
+ return (CRYPTO_SUCCESS);
}
int
ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
+ int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct),
void (*copy_block)(uint8_t *, uint8_t *))
{
uint64_t upper_mask = 0;
@@ -217,6 +261,11 @@ ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
+
+ /* Generate the first block of the keystream */
+ cipher(ctr_ctx->ctr_keysched, (uint8_t *)ctr_ctx->ctr_cb,
+ (uint8_t *)ctr_ctx->ctr_keystream);
+
ctr_ctx->ctr_flags |= CTR_MODE;
return (CRYPTO_SUCCESS);
}
diff --git a/usr/src/common/crypto/modes/modes.h b/usr/src/common/crypto/modes/modes.h
index b7d247cec1..0e8fb66c8a 100644
--- a/usr/src/common/crypto/modes/modes.h
+++ b/usr/src/common/crypto/modes/modes.h
@@ -133,7 +133,8 @@ typedef struct ctr_ctx {
struct common_ctx ctr_common;
uint64_t ctr_lower_mask;
uint64_t ctr_upper_mask;
- uint32_t ctr_tmp[4];
+ size_t ctr_offset;
+ uint32_t ctr_keystream[4];
} ctr_ctx_t;
/*
@@ -310,8 +311,7 @@ extern int cbc_decrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t,
extern int ctr_mode_contiguous_blocks(ctr_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
- int (*cipher)(const void *, const uint8_t *, uint8_t *),
- void (*xor_block)(uint8_t *, uint8_t *));
+ int (*cipher)(const void *, const uint8_t *, uint8_t *));
extern int ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t,
crypto_data_t *, size_t,
@@ -359,15 +359,13 @@ extern int cmac_mode_final(cbc_ctx_t *, crypto_data_t *,
int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*xor_block)(uint8_t *, uint8_t *));
-extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *,
- int (*encrypt_block)(const void *, const uint8_t *, uint8_t *));
-
extern int cbc_init_ctx(cbc_ctx_t *, char *, size_t, size_t,
void (*copy_block)(uint8_t *, uint64_t *));
extern int cmac_init_ctx(cbc_ctx_t *, size_t);
extern int ctr_init_ctx(ctr_ctx_t *, ulong_t, uint8_t *,
+ int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
void (*copy_block)(uint8_t *, uint8_t *));
extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t,
diff --git a/usr/src/lib/pkcs11/libsoftcrypto/common/mapfile-vers b/usr/src/lib/pkcs11/libsoftcrypto/common/mapfile-vers
index 2b732729f0..1ec1d4f87c 100644
--- a/usr/src/lib/pkcs11/libsoftcrypto/common/mapfile-vers
+++ b/usr/src/lib/pkcs11/libsoftcrypto/common/mapfile-vers
@@ -22,7 +22,7 @@
#
# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
# Copyright 2017 Jason King.
-# Copyright (c) 2018, Joyent, Inc.
+# Copyright 2019 Joyent, Inc.
#
#
@@ -90,7 +90,6 @@ SYMBOL_VERSION SUNWprivate {
cmac_init_ctx;
cmac_mode_final;
ctr_alloc_ctx;
- ctr_mode_final;
ctr_init_ctx;
des3_crunch_block;
des_alloc_keysched;
diff --git a/usr/src/lib/pkcs11/pkcs11_softtoken/common/softAESCrypt.c b/usr/src/lib/pkcs11/pkcs11_softtoken/common/softAESCrypt.c
index 61a807cff7..fb8dc6d448 100644
--- a/usr/src/lib/pkcs11/pkcs11_softtoken/common/softAESCrypt.c
+++ b/usr/src/lib/pkcs11/pkcs11_softtoken/common/softAESCrypt.c
@@ -226,7 +226,7 @@ soft_aes_init_ctx(aes_ctx_t *aes_ctx, CK_MECHANISM_PTR mech_p,
CK_AES_CTR_PARAMS *pp = (CK_AES_CTR_PARAMS *)mech_p->pParameter;
rc = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
- pp->cb, aes_copy_block);
+ pp->cb, aes_encrypt_block, aes_copy_block);
break;
}
case CKM_AES_CCM: {
@@ -811,38 +811,53 @@ soft_aes_encrypt_update(soft_session_t *session_p, CK_BYTE_PTR pData,
};
CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
CK_RV rv = CKR_OK;
- size_t out_len = aes_ctx->ac_remainder_len + ulDataLen;
+ size_t out_len;
int rc;
+ /*
+ * If pData is NULL, we should have zero bytes to process, and
+ * the aes_encrypt_contiguous_blocks() call will be an effective no-op.
+ */
+ IMPLY(pData == NULL, ulDataLen == 0);
+
/* Check size of the output buffer */
- if (aes_ctx->ac_flags & CMAC_MODE) {
+ switch (mech) {
+ case CKM_AES_CMAC:
/*
* The underlying CMAC implementation handles the storing of
* extra bytes and does not output any data until *_final,
* so do not bother looking at the size of the output
* buffer at this time.
*/
- if (pData == NULL) {
- *pulEncryptedDataLen = 0;
- return (CKR_OK);
- }
- } else {
+ out_len = 0;
+ break;
+ case CKM_AES_CTR:
+ /*
+ * CTR mode is a stream cipher, so we always output exactly as
+ * much ciphertext as input plaintext
+ */
+ out_len = ulDataLen;
+ break;
+ default:
+ out_len = aes_ctx->ac_remainder_len + ulDataLen;
+
/*
* The number of complete blocks we can encrypt right now.
* The underlying implementation will buffer any remaining data
* until the next *_update call.
*/
out_len &= ~(AES_BLOCK_LEN - 1);
+ break;
+ }
- if (pEncryptedData == NULL) {
- *pulEncryptedDataLen = out_len;
- return (CKR_OK);
- }
+ if (pEncryptedData == NULL) {
+ *pulEncryptedDataLen = out_len;
+ return (CKR_OK);
+ }
- if (*pulEncryptedDataLen < out_len) {
- *pulEncryptedDataLen = out_len;
- return (CKR_BUFFER_TOO_SMALL);
- }
+ if (*pulEncryptedDataLen < out_len) {
+ *pulEncryptedDataLen = out_len;
+ return (CKR_BUFFER_TOO_SMALL);
}
rc = aes_encrypt_contiguous_blocks(aes_ctx, (char *)pData, ulDataLen,
@@ -859,15 +874,6 @@ soft_aes_encrypt_update(soft_session_t *session_p, CK_BYTE_PTR pData,
return (CKR_FUNCTION_FAILED);
}
- /*
- * Since AES counter mode is a stream cipher, we call ctr_mode_final()
- * to pick up any remaining bytes. It is an internal function that
- * does not destroy the context like *normal* final routines.
- */
- if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
- rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &out,
- aes_encrypt_block);
- }
rv = crypto2pkcs11_error_number(rc);
return (rv);
@@ -1060,6 +1066,13 @@ soft_aes_decrypt_update(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
out_len &= ~(AES_BLOCK_LEN - 1);
}
break;
+ case CKM_AES_CTR:
+ /*
+ * CKM_AES_CTR is a stream cipher, so we always output
+ * exactly as much output plaintext as input ciphertext
+ */
+ out_len = in_len;
+ break;
default:
out_len = aes_ctx->ac_remainder_len + in_len;
out_len &= ~(AES_BLOCK_LEN - 1);
@@ -1108,14 +1121,6 @@ soft_aes_decrypt_update(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
*pulDataLen = out.cd_offset;
switch (mech) {
- case CKM_AES_CTR:
- if (aes_ctx->ac_remainder_len == 0) {
- break;
- }
- rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &out,
- aes_encrypt_block);
- rv = crypto2pkcs11_error_number(rc);
- break;
case CKM_AES_CBC_PAD:
if (buffer_block == NULL) {
break;
@@ -1170,7 +1175,11 @@ soft_aes_encrypt_final(soft_session_t *session_p,
out_len = AES_BLOCK_LEN;
break;
case CKM_AES_CTR:
- out_len = aes_ctx->ac_remainder_len;
+ /*
+ * Since CKM_AES_CTR is a stream cipher, we never buffer any
+ * input, so we always have 0 remaining bytes of output.
+ */
+ out_len = 0;
break;
case CKM_AES_CCM:
out_len = aes_ctx->ac_remainder_len +
@@ -1219,12 +1228,11 @@ soft_aes_encrypt_final(soft_session_t *session_p,
break;
}
case CKM_AES_CTR:
- if (aes_ctx->ac_remainder_len == 0) {
- break;
- }
-
- rc = ctr_mode_final((ctr_ctx_t *)aes_ctx, &data,
- aes_encrypt_block);
+ /*
+ * Since CKM_AES_CTR is a stream cipher, we never
+ * buffer any data, and thus have no remaining data
+ * to output at the end
+ */
break;
case CKM_AES_CCM:
rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &data,
@@ -1361,7 +1369,11 @@ soft_aes_decrypt_final(soft_session_t *session_p, CK_BYTE_PTR pLastPart,
out_len = aes_ctx->ac_remainder_len;
break;
case CKM_AES_CTR:
- out_len = aes_ctx->ac_remainder_len;
+ /*
+ * Since CKM_AES_CTR is a stream cipher, we never have
+ * any remaining bytes to output.
+ */
+ out_len = 0;
break;
case CKM_AES_CCM:
out_len = aes_ctx->ac_data_len;
diff --git a/usr/src/test/crypto-tests/tests/modes/aes/ctr/aes_ctr.c b/usr/src/test/crypto-tests/tests/modes/aes/ctr/aes_ctr.c
index f5ef319478..fcaf042c10 100644
--- a/usr/src/test/crypto-tests/tests/modes/aes/ctr/aes_ctr.c
+++ b/usr/src/test/crypto-tests/tests/modes/aes/ctr/aes_ctr.c
@@ -27,7 +27,7 @@
* it has a remainder, so the result is different
* if len == 0 mod block_size vs len != 0 mod block_size
*/
-static size_t updatelens[] = { 16, CTEST_UPDATELEN_END };
+static size_t updatelens[] = { 16, 15, 17, CTEST_UPDATELEN_END };
int
main(void)
diff --git a/usr/src/uts/common/crypto/io/aes.c b/usr/src/uts/common/crypto/io/aes.c
index eec5384b01..e487fe8baf 100644
--- a/usr/src/uts/common/crypto/io/aes.c
+++ b/usr/src/uts/common/crypto/io/aes.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
- * Copyright 2018, Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
*/
/*
@@ -660,10 +660,16 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
AES_ARG_INPLACE(plaintext, ciphertext);
- /* compute number of bytes that will hold the ciphertext */
- out_len = aes_ctx->ac_remainder_len;
- out_len += plaintext->cd_length;
- out_len &= ~(AES_BLOCK_LEN - 1);
+ /*
+ * CTR mode does not accumulate plaintext across xx_update() calls --
+ * it always outputs the same number of bytes as the input (so
+ * ac_remainder_len is always 0). Other modes _do_ accumulate
+ * plaintext, and output only full blocks. For non-CTR modes, adjust
+ * the output size to reflect this.
+ */
+ out_len = plaintext->cd_length + aes_ctx->ac_remainder_len;
+ if ((aes_ctx->ac_flags & CTR_MODE) == 0)
+ out_len &= ~(AES_BLOCK_LEN - 1);
/*
* return length needed to store the output.
@@ -701,21 +707,11 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
ret = CRYPTO_ARGUMENTS_BAD;
}
- /*
- * Since AES counter mode is a stream cipher, we call
- * ctr_mode_final() to pick up any remaining bytes.
- * It is an internal function that does not destroy
- * the context like *normal* final routines.
- */
- if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
- ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
- ciphertext, aes_encrypt_block);
- }
-
if (ret == CRYPTO_SUCCESS) {
- if (plaintext != ciphertext)
+ if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
+ }
} else {
ciphertext->cd_length = saved_length;
}
@@ -740,20 +736,28 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
AES_ARG_INPLACE(ciphertext, plaintext);
/*
- * Compute number of bytes that will hold the plaintext.
- * This is not necessary for CCM, GCM, and GMAC since these
- * mechanisms never return plaintext for update operations.
+ * Adjust the number of bytes that will hold the plaintext (out_len).
+ * CCM, GCM, and GMAC mechanisms never return plaintext for update
+ * operations, so we set out_len to 0 for those.
+ *
+ * CTR mode does not accumulate any ciphertext across xx_decrypt
+ * calls, and always outputs as many bytes of plaintext as
+ * ciphertext.
+ *
+ * The remaining mechanisms output full blocks of plaintext, so
+ * we round out_len down to the closest multiple of AES_BLOCK_LEN.
*/
- if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
- out_len = aes_ctx->ac_remainder_len;
- out_len += ciphertext->cd_length;
+ out_len = aes_ctx->ac_remainder_len + ciphertext->cd_length;
+ if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) != 0) {
+ out_len = 0;
+ } else if ((aes_ctx->ac_flags & CTR_MODE) == 0) {
out_len &= ~(AES_BLOCK_LEN - 1);
+ }
- /* return length needed to store the output */
- if (plaintext->cd_length < out_len) {
- plaintext->cd_length = out_len;
- return (CRYPTO_BUFFER_TOO_SMALL);
- }
+ /* return length needed to store the output */
+ if (plaintext->cd_length < out_len) {
+ plaintext->cd_length = out_len;
+ return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = plaintext->cd_offset;
@@ -785,19 +789,6 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
ret = CRYPTO_ARGUMENTS_BAD;
}
- /*
- * Since AES counter mode is a stream cipher, we call
- * ctr_mode_final() to pick up any remaining bytes.
- * It is an internal function that does not destroy
- * the context like *normal* final routines.
- */
- if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
- ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
- aes_encrypt_block);
- if (ret == CRYPTO_DATA_LEN_RANGE)
- ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
- }
-
if (ret == CRYPTO_SUCCESS) {
if (ciphertext != plaintext)
plaintext->cd_length =
@@ -828,14 +819,7 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
return (CRYPTO_ARGUMENTS_BAD);
}
- if (aes_ctx->ac_flags & CTR_MODE) {
- if (aes_ctx->ac_remainder_len > 0) {
- ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
- aes_encrypt_block);
- if (ret != CRYPTO_SUCCESS)
- return (ret);
- }
- } else if (aes_ctx->ac_flags & CCM_MODE) {
+ if (aes_ctx->ac_flags & CCM_MODE) {
ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
@@ -858,7 +842,7 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
if (ret != CRYPTO_SUCCESS)
return (ret);
data->cd_length = AES_BLOCK_LEN;
- } else {
+ } else if ((aes_ctx->ac_flags & CTR_MODE) == 0) {
/*
* There must be no unprocessed plaintext.
* This happens if the length of the last data is
@@ -898,18 +882,13 @@ aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
* There must be no unprocessed ciphertext.
* This happens if the length of the last ciphertext is
* not a multiple of the AES block length.
+ *
+ * For CTR mode, ac_remainder_len is always zero (we never
+ * accumulate ciphertext across update calls with CTR mode).
*/
- if (aes_ctx->ac_remainder_len > 0) {
- if ((aes_ctx->ac_flags & CTR_MODE) == 0)
- return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- else {
- ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
- aes_encrypt_block);
- if (ret == CRYPTO_DATA_LEN_RANGE)
- ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
- if (ret != CRYPTO_SUCCESS)
- return (ret);
- }
+ if (aes_ctx->ac_remainder_len > 0 &&
+ (aes_ctx->ac_flags & CTR_MODE) == 0) {
+ return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
if (aes_ctx->ac_flags & CCM_MODE) {
@@ -1069,36 +1048,49 @@ aes_encrypt_atomic(crypto_provider_handle_t provider,
}
if (ret == CRYPTO_SUCCESS) {
- if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
- } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
- mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
- } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
- if (aes_ctx.ac_remainder_len > 0) {
- ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
- ciphertext, aes_encrypt_block);
- if (ret != CRYPTO_SUCCESS)
- goto out;
- }
- } else if (mechanism->cm_type == AES_CMAC_MECH_INFO_TYPE) {
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
+ break;
+ case AES_CTR_MECH_INFO_TYPE:
+ /*
+ * Note that this use of the ASSERT3U has a slightly
+ * different meaning than the other uses in the
+ * switch statement. The other uses are to ensure
+ * no unprocessed plaintext remains after encryption
+ * (and that the input plaintext was an exact multiple
+ * of AES_BLOCK_LEN).
+ *
+ * For CTR mode, it is ensuring that no input
+ * plaintext was ever segmented and buffered during
+ * processing (since it's a stream cipher).
+ */
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
+ break;
+ case AES_CMAC_MECH_INFO_TYPE:
ret = cmac_mode_final((cbc_ctx_t *)&aes_ctx,
ciphertext, aes_encrypt_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- } else {
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ break;
+ default:
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
+ break;
}
if (plaintext != ciphertext) {
@@ -1210,7 +1202,8 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
}
if (ret == CRYPTO_SUCCESS) {
- if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
+ switch (mechanism->cm_type) {
+ case AES_CCM_MECH_INFO_TYPE:
ASSERT(aes_ctx.ac_processed_data_len
== aes_ctx.ac_data_len);
ASSERT(aes_ctx.ac_processed_mac_len
@@ -1218,7 +1211,7 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
@@ -1226,12 +1219,13 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
} else {
plaintext->cd_length = saved_length;
}
- } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
- mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
+ break;
+ case AES_GCM_MECH_INFO_TYPE:
+ case AES_GMAC_MECH_INFO_TYPE:
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
@@ -1239,23 +1233,20 @@ aes_decrypt_atomic(crypto_provider_handle_t provider,
} else {
plaintext->cd_length = saved_length;
}
- } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
- ASSERT(aes_ctx.ac_remainder_len == 0);
- if (ciphertext != plaintext)
+ break;
+ case AES_CTR_MECH_INFO_TYPE:
+ if (ciphertext != plaintext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
- } else {
- if (aes_ctx.ac_remainder_len > 0) {
- ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
- plaintext, aes_encrypt_block);
- if (ret == CRYPTO_DATA_LEN_RANGE)
- ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
- if (ret != CRYPTO_SUCCESS)
- goto out;
}
- if (ciphertext != plaintext)
+ break;
+ default:
+ ASSERT3U(aes_ctx.ac_remainder_len, ==, 0);
+ if (ciphertext != plaintext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
+ }
+ break;
}
} else {
plaintext->cd_length = saved_length;
@@ -1391,7 +1382,7 @@ aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
}
pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
- pp->cb, aes_copy_block);
+ pp->cb, aes_encrypt_block, aes_copy_block);
break;
}
case AES_CCM_MECH_INFO_TYPE: