summaryrefslogtreecommitdiff
path: root/usr/src/common/crypto
diff options
context:
space:
mode:
authorMark Powers <Mark.Powers@Sun.COM>2008-09-12 14:31:13 -0700
committerMark Powers <Mark.Powers@Sun.COM>2008-09-12 14:31:13 -0700
commit16239bc82c111618343e0a5b1a70e0fc702d00e0 (patch)
treec177256a99bc4c0af5c31b0165e6dc6caee69ee4 /usr/src/common/crypto
parent0d6e6b604e28d77cf288ed1fbe03b50a0ca103fb (diff)
downloadillumos-joyent-16239bc82c111618343e0a5b1a70e0fc702d00e0.tar.gz
6693650 kernel implementation of AES lacks support for >64 bits long AES counter
Diffstat (limited to 'usr/src/common/crypto')
-rw-r--r--usr/src/common/crypto/modes/cbc.c114
-rw-r--r--usr/src/common/crypto/modes/ctr.c59
-rw-r--r--usr/src/common/crypto/modes/ecb.c50
-rw-r--r--usr/src/common/crypto/modes/modes.h88
4 files changed, 168 insertions, 143 deletions
diff --git a/usr/src/common/crypto/modes/cbc.c b/usr/src/common/crypto/modes/cbc.c
index 26cf9010b1..3fb17ee173 100644
--- a/usr/src/common/crypto/modes/cbc.c
+++ b/usr/src/common/crypto/modes/cbc.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _KERNEL
#include <strings.h>
#include <limits.h>
@@ -58,32 +56,32 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
uint8_t *out_data_2;
size_t out_data_1_len;
- if (length + ctx->cc_remainder_len < block_size) {
+ if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
- (uint8_t *)ctx->cc_remainder + ctx->cc_remainder_len,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
length);
- ctx->cc_remainder_len += length;
- ctx->cc_copy_to = datap;
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
- lastp = (uint8_t *)ctx->cc_iv;
+ lastp = (uint8_t *)ctx->cbc_iv;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
- if (ctx->cc_remainder_len > 0) {
- need = block_size - ctx->cc_remainder_len;
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->cc_remainder)
- [ctx->cc_remainder_len], need);
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
- blockp = (uint8_t *)ctx->cc_remainder;
+ blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
@@ -94,15 +92,15 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
* current clear block.
*/
xor_block(lastp, blockp);
- encrypt(ctx->cc_keysched, blockp, blockp);
+ encrypt(ctx->cbc_keysched, blockp, blockp);
- ctx->cc_lastp = blockp;
+ ctx->cbc_lastp = blockp;
lastp = blockp;
- if (ctx->cc_remainder_len > 0) {
- bcopy(blockp, ctx->cc_copy_to,
- ctx->cc_remainder_len);
- bcopy(blockp + ctx->cc_remainder_len, datap,
+ if (ctx->cbc_remainder_len > 0) {
+ bcopy(blockp, ctx->cbc_copy_to,
+ ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap,
need);
}
} else {
@@ -111,7 +109,7 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
* current clear block.
*/
xor_block(blockp, lastp);
- encrypt(ctx->cc_keysched, lastp, lastp);
+ encrypt(ctx->cbc_keysched, lastp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
@@ -131,9 +129,9 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
}
/* Update pointer to next block of data to be processed. */
- if (ctx->cc_remainder_len != 0) {
+ if (ctx->cbc_remainder_len != 0) {
datap += need;
- ctx->cc_remainder_len = 0;
+ ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
@@ -142,12 +140,12 @@ cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->cc_remainder, remainder);
- ctx->cc_remainder_len = remainder;
- ctx->cc_copy_to = datap;
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_copy_to = datap;
goto out;
}
- ctx->cc_copy_to = NULL;
+ ctx->cbc_copy_to = NULL;
} while (remainder > 0);
@@ -155,16 +153,16 @@ out:
/*
* Save the last encrypted block in the context.
*/
- if (ctx->cc_lastp != NULL) {
- copy_block((uint8_t *)ctx->cc_lastp, (uint8_t *)ctx->cc_iv);
- ctx->cc_lastp = (uint8_t *)ctx->cc_iv;
+ if (ctx->cbc_lastp != NULL) {
+ copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv);
+ ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv;
}
return (CRYPTO_SUCCESS);
}
#define OTHER(a, ctx) \
- (((a) == (ctx)->cc_lastblock) ? (ctx)->cc_iv : (ctx)->cc_lastblock)
+ (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
/* ARGSUSED */
int
@@ -185,32 +183,32 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
uint8_t *out_data_2;
size_t out_data_1_len;
- if (length + ctx->cc_remainder_len < block_size) {
+ if (length + ctx->cbc_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
- (uint8_t *)ctx->cc_remainder + ctx->cc_remainder_len,
+ (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len,
length);
- ctx->cc_remainder_len += length;
- ctx->cc_copy_to = datap;
+ ctx->cbc_remainder_len += length;
+ ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
- lastp = ctx->cc_lastp;
+ lastp = ctx->cbc_lastp;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
- if (ctx->cc_remainder_len > 0) {
- need = block_size - ctx->cc_remainder_len;
+ if (ctx->cbc_remainder_len > 0) {
+ need = block_size - ctx->cbc_remainder_len;
if (need > remainder)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->cc_remainder)
- [ctx->cc_remainder_len], need);
+ bcopy(datap, &((uint8_t *)ctx->cbc_remainder)
+ [ctx->cbc_remainder_len], need);
- blockp = (uint8_t *)ctx->cc_remainder;
+ blockp = (uint8_t *)ctx->cbc_remainder;
} else {
blockp = datap;
}
@@ -219,11 +217,11 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx));
if (out != NULL) {
- decrypt(ctx->cc_keysched, blockp,
- (uint8_t *)ctx->cc_remainder);
- blockp = (uint8_t *)ctx->cc_remainder;
+ decrypt(ctx->cbc_keysched, blockp,
+ (uint8_t *)ctx->cbc_remainder);
+ blockp = (uint8_t *)ctx->cbc_remainder;
} else {
- decrypt(ctx->cc_keysched, blockp, blockp);
+ decrypt(ctx->cbc_keysched, blockp, blockp);
}
/*
@@ -248,16 +246,16 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* update offset */
out->cd_offset += block_size;
- } else if (ctx->cc_remainder_len > 0) {
+ } else if (ctx->cbc_remainder_len > 0) {
/* copy temporary block to where it belongs */
- bcopy(blockp, ctx->cc_copy_to, ctx->cc_remainder_len);
- bcopy(blockp + ctx->cc_remainder_len, datap, need);
+ bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len);
+ bcopy(blockp + ctx->cbc_remainder_len, datap, need);
}
/* Update pointer to next block of data to be processed. */
- if (ctx->cc_remainder_len != 0) {
+ if (ctx->cbc_remainder_len != 0) {
datap += need;
- ctx->cc_remainder_len = 0;
+ ctx->cbc_remainder_len = 0;
} else {
datap += block_size;
}
@@ -266,17 +264,17 @@ cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->cc_remainder, remainder);
- ctx->cc_remainder_len = remainder;
- ctx->cc_lastp = lastp;
- ctx->cc_copy_to = datap;
+ bcopy(datap, ctx->cbc_remainder, remainder);
+ ctx->cbc_remainder_len = remainder;
+ ctx->cbc_lastp = lastp;
+ ctx->cbc_copy_to = datap;
return (CRYPTO_SUCCESS);
}
- ctx->cc_copy_to = NULL;
+ ctx->cbc_copy_to = NULL;
} while (remainder > 0);
- ctx->cc_lastp = lastp;
+ ctx->cbc_lastp = lastp;
return (CRYPTO_SUCCESS);
}
@@ -296,11 +294,11 @@ cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len,
#else
assert(param_len == block_size);
#endif
- copy_block((uchar_t *)param, cbc_ctx->cc_iv);
+ copy_block((uchar_t *)param, cbc_ctx->cbc_iv);
}
- cbc_ctx->cc_lastp = (uint8_t *)&cbc_ctx->cc_iv[0];
- cbc_ctx->cc_flags |= CBC_MODE;
+ cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0];
+ cbc_ctx->cbc_flags |= CBC_MODE;
return (CRYPTO_SUCCESS);
}
@@ -317,6 +315,6 @@ cbc_alloc_ctx(int kmflag)
#endif
return (NULL);
- cbc_ctx->cc_flags = CBC_MODE;
+ cbc_ctx->cbc_flags = CBC_MODE;
return (cbc_ctx);
}
diff --git a/usr/src/common/crypto/modes/ctr.c b/usr/src/common/crypto/modes/ctr.c
index de75d07be1..e44dc3e642 100644
--- a/usr/src/common/crypto/modes/ctr.c
+++ b/usr/src/common/crypto/modes/ctr.c
@@ -34,10 +34,7 @@
#include <modes/modes.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
-
-#ifdef _LITTLE_ENDIAN
#include <sys/byteorder.h>
-#endif
/*
* Encrypt and decrypt multiple blocks of data in counter mode.
@@ -58,7 +55,7 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
uint8_t *out_data_1;
uint8_t *out_data_2;
size_t out_data_1_len;
- uint64_t counter;
+ uint64_t lower_counter, upper_counter;
if (length + ctx->ctr_remainder_len < block_size) {
/* accumulate bytes here and return */
@@ -97,23 +94,27 @@ ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length,
lastp = (uint8_t *)ctx->ctr_tmp;
/*
- * Increment counter. Counter bits are confined
- * to the bottom 64 bits of the counter block.
+ * Increment Counter.
*/
-#ifdef _LITTLE_ENDIAN
- counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_counter_mask);
- counter = htonll(counter + 1);
-#else
- counter = ctx->ctr_cb[1] & ctx->ctr_counter_mask;
- counter++;
-#endif /* _LITTLE_ENDIAN */
- counter &= ctx->ctr_counter_mask;
- ctx->ctr_cb[1] =
- (ctx->ctr_cb[1] & ~(ctx->ctr_counter_mask)) | counter;
+ lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask);
+ lower_counter = htonll(lower_counter + 1);
+ lower_counter &= ctx->ctr_lower_mask;
+ ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) |
+ lower_counter;
+
+ /* wrap around */
+ if (lower_counter == 0) {
+ upper_counter =
+ ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask);
+ upper_counter = htonll(upper_counter + 1);
+ upper_counter &= ctx->ctr_upper_mask;
+ ctx->ctr_cb[0] =
+ (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) |
+ upper_counter;
+ }
/*
- * XOR the previous cipher block or IV with the
- * current clear block.
+ * XOR encrypted counter block with the current clear block.
*/
xor_block(blockp, lastp);
@@ -206,18 +207,24 @@ int
ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb,
void (*copy_block)(uint8_t *, uint8_t *))
{
- uint64_t mask = 0;
+ uint64_t upper_mask = 0;
+ uint64_t lower_mask = 0;
- if (count == 0 || count > 64) {
+ if (count == 0 || count > 128) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
- while (count-- > 0)
- mask |= (1ULL << count);
+ /* upper 64 bits of the mask */
+ if (count >= 64) {
+ count -= 64;
+ upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1;
+ lower_mask = UINT64_MAX;
+ } else {
+ /* now the lower 63 bits */
+ lower_mask = (1ULL << count) - 1;
+ }
+ ctr_ctx->ctr_lower_mask = htonll(lower_mask);
+ ctr_ctx->ctr_upper_mask = htonll(upper_mask);
-#ifdef _LITTLE_ENDIAN
- mask = htonll(mask);
-#endif
- ctr_ctx->ctr_counter_mask = mask;
copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb);
ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0];
ctr_ctx->ctr_flags |= CTR_MODE;
diff --git a/usr/src/common/crypto/modes/ecb.c b/usr/src/common/crypto/modes/ecb.c
index 11ca1fd9f6..6e2efd11f2 100644
--- a/usr/src/common/crypto/modes/ecb.c
+++ b/usr/src/common/crypto/modes/ecb.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _KERNEL
#include <strings.h>
#include <limits.h>
@@ -56,50 +54,50 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
uint8_t *out_data_2;
size_t out_data_1_len;
- if (length + ctx->cc_remainder_len < block_size) {
+ if (length + ctx->ecb_remainder_len < block_size) {
/* accumulate bytes here and return */
bcopy(datap,
- (uint8_t *)ctx->cc_remainder + ctx->cc_remainder_len,
+ (uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len,
length);
- ctx->cc_remainder_len += length;
- ctx->cc_copy_to = datap;
+ ctx->ecb_remainder_len += length;
+ ctx->ecb_copy_to = datap;
return (CRYPTO_SUCCESS);
}
- lastp = (uint8_t *)ctx->cc_iv;
+ lastp = (uint8_t *)ctx->ecb_iv;
if (out != NULL)
crypto_init_ptrs(out, &iov_or_mp, &offset);
do {
/* Unprocessed data from last call. */
- if (ctx->cc_remainder_len > 0) {
- need = block_size - ctx->cc_remainder_len;
+ if (ctx->ecb_remainder_len > 0) {
+ need = block_size - ctx->ecb_remainder_len;
if (need > remainder)
return (CRYPTO_DATA_LEN_RANGE);
- bcopy(datap, &((uint8_t *)ctx->cc_remainder)
- [ctx->cc_remainder_len], need);
+ bcopy(datap, &((uint8_t *)ctx->ecb_remainder)
+ [ctx->ecb_remainder_len], need);
- blockp = (uint8_t *)ctx->cc_remainder;
+ blockp = (uint8_t *)ctx->ecb_remainder;
} else {
blockp = datap;
}
if (out == NULL) {
- cipher(ctx->cc_keysched, blockp, blockp);
+ cipher(ctx->ecb_keysched, blockp, blockp);
- ctx->cc_lastp = blockp;
+ ctx->ecb_lastp = blockp;
lastp = blockp;
- if (ctx->cc_remainder_len > 0) {
- bcopy(blockp, ctx->cc_copy_to,
- ctx->cc_remainder_len);
- bcopy(blockp + ctx->cc_remainder_len, datap,
+ if (ctx->ecb_remainder_len > 0) {
+ bcopy(blockp, ctx->ecb_copy_to,
+ ctx->ecb_remainder_len);
+ bcopy(blockp + ctx->ecb_remainder_len, datap,
need);
}
} else {
- cipher(ctx->cc_keysched, blockp, lastp);
+ cipher(ctx->ecb_keysched, blockp, lastp);
crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
&out_data_1_len, &out_data_2, block_size);
@@ -114,9 +112,9 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
}
/* Update pointer to next block of data to be processed. */
- if (ctx->cc_remainder_len != 0) {
+ if (ctx->ecb_remainder_len != 0) {
datap += need;
- ctx->cc_remainder_len = 0;
+ ctx->ecb_remainder_len = 0;
} else {
datap += block_size;
}
@@ -125,12 +123,12 @@ ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length,
/* Incomplete last block. */
if (remainder > 0 && remainder < block_size) {
- bcopy(datap, ctx->cc_remainder, remainder);
- ctx->cc_remainder_len = remainder;
- ctx->cc_copy_to = datap;
+ bcopy(datap, ctx->ecb_remainder, remainder);
+ ctx->ecb_remainder_len = remainder;
+ ctx->ecb_copy_to = datap;
goto out;
}
- ctx->cc_copy_to = NULL;
+ ctx->ecb_copy_to = NULL;
} while (remainder > 0);
@@ -151,6 +149,6 @@ ecb_alloc_ctx(int kmflag)
#endif
return (NULL);
- ecb_ctx->cc_flags = ECB_MODE;
+ ecb_ctx->ecb_flags = ECB_MODE;
return (ecb_ctx);
}
diff --git a/usr/src/common/crypto/modes/modes.h b/usr/src/common/crypto/modes/modes.h
index be57747bcd..f5781f9270 100644
--- a/usr/src/common/crypto/modes/modes.h
+++ b/usr/src/common/crypto/modes/modes.h
@@ -26,8 +26,6 @@
#ifndef _COMMON_CRYPTO_MODES_H
#define _COMMON_CRYPTO_MODES_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -60,8 +58,6 @@ extern "C" {
*
* cc_iv: Scratch buffer that sometimes contains the IV.
*
- * cc_lastblock: Scratch buffer.
- *
* cc_lastp: Pointer to previous block of ciphertext.
*
* cc_copy_to: Pointer to where encrypted residual data needs
@@ -79,7 +75,6 @@ struct common_ctx {
void *cc_keysched;
size_t cc_keysched_len;
uint64_t cc_iv[2];
- uint64_t cc_lastblock[2];
uint64_t cc_remainder[2];
size_t cc_remainder_len;
uint8_t *cc_lastp;
@@ -87,26 +82,53 @@ struct common_ctx {
uint32_t cc_flags;
};
-typedef struct common_ctx ecb_ctx_t;
-typedef struct common_ctx cbc_ctx_t;
typedef struct common_ctx common_ctx_t;
+typedef struct ecb_ctx {
+ struct common_ctx ecb_common;
+ uint64_t ecb_lastblock[2];
+} ecb_ctx_t;
+
+#define ecb_keysched ecb_common.cc_keysched
+#define ecb_keysched_len ecb_common.cc_keysched_len
+#define ecb_iv ecb_common.cc_iv
+#define ecb_remainder ecb_common.cc_remainder
+#define ecb_remainder_len ecb_common.cc_remainder_len
+#define ecb_lastp ecb_common.cc_lastp
+#define ecb_copy_to ecb_common.cc_copy_to
+#define ecb_flags ecb_common.cc_flags
+
+typedef struct cbc_ctx {
+ struct common_ctx cbc_common;
+ uint64_t cbc_lastblock[2];
+} cbc_ctx_t;
+
+#define cbc_keysched cbc_common.cc_keysched
+#define cbc_keysched_len cbc_common.cc_keysched_len
+#define cbc_iv cbc_common.cc_iv
+#define cbc_remainder cbc_common.cc_remainder
+#define cbc_remainder_len cbc_common.cc_remainder_len
+#define cbc_lastp cbc_common.cc_lastp
+#define cbc_copy_to cbc_common.cc_copy_to
+#define cbc_flags cbc_common.cc_flags
+
+/*
+ * ctr_lower_mask Bit-mask for lower 8 bytes of counter block.
+ * ctr_upper_mask Bit-mask for upper 8 bytes of counter block.
+ */
typedef struct ctr_ctx {
struct common_ctx ctr_common;
+ uint64_t ctr_lower_mask;
+ uint64_t ctr_upper_mask;
uint32_t ctr_tmp[4];
} ctr_ctx_t;
/*
- * ctr_cb Counter block.
- *
- * ctr_counter_mask Mask of counter bits in the last 8 bytes of the
- * counter block.
+ * ctr_cb Counter block.
*/
-
#define ctr_keysched ctr_common.cc_keysched
#define ctr_keysched_len ctr_common.cc_keysched_len
#define ctr_cb ctr_common.cc_iv
-#define ctr_counter_mask ctr_common.cc_lastblock[0]
#define ctr_remainder ctr_common.cc_remainder
#define ctr_remainder_len ctr_common.cc_remainder_len
#define ctr_lastp ctr_common.cc_lastp
@@ -142,12 +164,12 @@ typedef struct ccm_ctx {
size_t ccm_processed_mac_len;
uint8_t *ccm_pt_buf;
uint64_t ccm_mac_input_buf[2];
+ uint64_t ccm_counter_mask;
} ccm_ctx_t;
#define ccm_keysched ccm_common.cc_keysched
#define ccm_keysched_len ccm_common.cc_keysched_len
#define ccm_cb ccm_common.cc_iv
-#define ccm_counter_mask ccm_common.cc_lastblock[0]
#define ccm_remainder ccm_common.cc_remainder
#define ccm_remainder_len ccm_common.cc_remainder_len
#define ccm_lastp ccm_common.cc_lastp
@@ -165,12 +187,12 @@ typedef struct aes_ctx {
} acu;
} aes_ctx_t;
-#define ac_flags acu.acu_ecb.cc_flags
-#define ac_remainder_len acu.acu_ecb.cc_remainder_len
-#define ac_keysched acu.acu_ecb.cc_keysched
-#define ac_keysched_len acu.acu_ecb.cc_keysched_len
-#define ac_iv acu.acu_ecb.cc_iv
-#define ac_lastp acu.acu_ecb.cc_lastp
+#define ac_flags acu.acu_ecb.ecb_common.cc_flags
+#define ac_remainder_len acu.acu_ecb.ecb_common.cc_remainder_len
+#define ac_keysched acu.acu_ecb.ecb_common.cc_keysched
+#define ac_keysched_len acu.acu_ecb.ecb_common.cc_keysched_len
+#define ac_iv acu.acu_ecb.ecb_common.cc_iv
+#define ac_lastp acu.acu_ecb.ecb_common.cc_lastp
#define ac_pt_buf acu.acu_ccm.ccm_pt_buf
#define ac_mac_len acu.acu_ccm.ccm_mac_len
#define ac_data_len acu.acu_ccm.ccm_data_len
@@ -184,12 +206,12 @@ typedef struct blowfish_ctx {
} bcu;
} blowfish_ctx_t;
-#define bc_flags bcu.bcu_ecb.cc_flags
-#define bc_remainder_len bcu.bcu_ecb.cc_remainder_len
-#define bc_keysched bcu.bcu_ecb.cc_keysched
-#define bc_keysched_len bcu.bcu_ecb.cc_keysched_len
-#define bc_iv bcu.bcu_ecb.cc_iv
-#define bc_lastp bcu.bcu_ecb.cc_lastp
+#define bc_flags bcu.bcu_ecb.ecb_common.cc_flags
+#define bc_remainder_len bcu.bcu_ecb.ecb_common.cc_remainder_len
+#define bc_keysched bcu.bcu_ecb.ecb_common.cc_keysched
+#define bc_keysched_len bcu.bcu_ecb.ecb_common.cc_keysched_len
+#define bc_iv bcu.bcu_ecb.ecb_common.cc_iv
+#define bc_lastp bcu.bcu_ecb.ecb_common.cc_lastp
typedef struct des_ctx {
union {
@@ -198,14 +220,14 @@ typedef struct des_ctx {
} dcu;
} des_ctx_t;
-#define dc_flags dcu.dcu_ecb.cc_flags
-#define dc_remainder_len dcu.dcu_ecb.cc_remainder_len
-#define dc_keysched dcu.dcu_ecb.cc_keysched
-#define dc_keysched_len dcu.dcu_ecb.cc_keysched_len
-#define dc_iv dcu.dcu_ecb.cc_iv
-#define dc_lastp dcu.dcu_ecb.cc_lastp
+#define dc_flags dcu.dcu_ecb.ecb_common.cc_flags
+#define dc_remainder_len dcu.dcu_ecb.ecb_common.cc_remainder_len
+#define dc_keysched dcu.dcu_ecb.ecb_common.cc_keysched
+#define dc_keysched_len dcu.dcu_ecb.ecb_common.cc_keysched_len
+#define dc_iv dcu.dcu_ecb.ecb_common.cc_iv
+#define dc_lastp dcu.dcu_ecb.ecb_common.cc_lastp
-extern int ecb_cipher_contiguous_blocks(cbc_ctx_t *, char *, size_t,
+extern int ecb_cipher_contiguous_blocks(ecb_ctx_t *, char *, size_t,
crypto_data_t *, size_t, int (*cipher)(const void *, const uint8_t *,
uint8_t *));