static krb5_error_code validate_and_schedule(krb5_key key, const krb5_data *ivec, const krb5_crypto_iov *data, size_t num_data, mit_des_key_schedule schedule) { size_t i, input_length; for (i = 0, input_length = 0; i < num_data; i++) { const krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } if (key->keyblock.length != 8) return KRB5_BAD_KEYSIZE; if (input_length % 8 != 0 || (ivec != NULL && ivec->length != 8)) return KRB5_BAD_MSIZE; switch (mit_des_key_sched(key->keyblock.contents, schedule)) { case -1: return(KRB5DES_BAD_KEYPAR); case -2: return(KRB5DES_WEAK_KEY); } return 0; }
krb5_error_code krb5int_aes_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { int ret = 0; int nblocks = 0; size_t input_length, i; for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { if (input_length != BLOCK_SIZE) return KRB5_BAD_MSIZE; ret = cbc_decr(key, ivec, data, num_data); } else if (nblocks > 1) { ret = cts_decr(key, ivec, data, num_data, input_length); } return ret; }
/* In-place encryption */ static krb5_error_code k5_arcfour_docrypt_iov(const krb5_keyblock *key, const krb5_data *state, krb5_crypto_iov *data, size_t num_data) { ArcfourContext *arcfour_ctx = NULL; ArcFourCipherState *cipher_state = NULL; krb5_error_code ret; size_t i; if (key->length != 16) return KRB5_BAD_KEYSIZE; if (state != NULL && (state->length != sizeof(ArcFourCipherState))) return KRB5_BAD_MSIZE; if (state != NULL) { cipher_state = (ArcFourCipherState *)state->data; arcfour_ctx = &cipher_state->ctx; if (cipher_state->initialized == 0) { ret = k5_arcfour_init(arcfour_ctx, key->contents, key->length); if (ret != 0) return ret; cipher_state->initialized = 1; } } else { arcfour_ctx = (ArcfourContext *)malloc(sizeof(ArcfourContext)); if (arcfour_ctx == NULL) return ENOMEM; ret = k5_arcfour_init(arcfour_ctx, key->contents, key->length); if (ret != 0) { free(arcfour_ctx); return ret; } } for (i = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) k5_arcfour_crypt(arcfour_ctx, (unsigned char *)iov->data.data, (const unsigned char *)iov->data.data, iov->data.length); } if (state == NULL) { memset(arcfour_ctx, 0, sizeof(ArcfourContext)); free(arcfour_ctx); } return 0; }
static krb5_error_code validate(krb5_key key, const krb5_data *ivec, const krb5_crypto_iov *data, size_t num_data, krb5_boolean *empty) { size_t i, input_length; for (i = 0, input_length = 0; i < num_data; i++) { const krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } if (key->keyblock.length != KRB5_MIT_DES3_KEYSIZE) return(KRB5_BAD_KEYSIZE); if ((input_length%DES_BLOCK_SIZE) != 0) return(KRB5_BAD_MSIZE); if (ivec && (ivec->length != 8)) return(KRB5_BAD_MSIZE); *empty = (input_length == 0); return 0; }
static krb5_error_code krb5int_aes_decrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno, i; size_t input_length; struct iov_block_state input_pos, output_pos; CHECK_SIZES; if (aes_dec_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, blockN, &ctx); xorblock(tmp2, tmp); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); memcpy(tmp, blockN, BLOCK_SIZE); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Decrypt second last block */ dec(tmp2, blockN2, &ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &ctx); xorblock(tmp3, tmp); /* Copy out ivec first before we clobber blockN1 with plaintext */ if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); } return 0; }
static krb5_error_code krb5int_aes_encrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (aes_enc_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); xorblock(tmp, blockN); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); /* Set up for next block. */ memcpy(tmp, tmp2, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->enc_ctx.keybitlen == 0) { if (camellia_enc_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->enc_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); xorblock(tmp, block); enc(block, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); /* Set up for next block. */ memcpy(tmp, block, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; unsigned int i; size_t input_length; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->dec_ctx.keybitlen == 0) { if (camellia_dec_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->dec_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &CACHE(key)->dec_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); memcpy(tmp2, block, BLOCK_SIZE); dec(block, block, &CACHE(key)->dec_ctx); xorblock(block, tmp); memcpy(tmp, tmp2, BLOCK_SIZE); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); if (ivec != NULL) memcpy(ivec->data, blockN2, BLOCK_SIZE); /* Decrypt second last block */ dec(tmp2, blockN2, &CACHE(key)->dec_ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &CACHE(key)->dec_ctx); xorblock(tmp3, tmp); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); } return 0; }