/* Decrypt one block using CBC. */ static krb5_error_code cbc_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { int ret = 0, olen = BLOCK_SIZE; unsigned char iblock[BLOCK_SIZE], oblock[BLOCK_SIZE]; EVP_CIPHER_CTX ciph_ctx; struct iov_block_state input_pos, output_pos; EVP_CIPHER_CTX_init(&ciph_ctx); ret = EVP_DecryptInit_ex(&ciph_ctx, map_mode(key->keyblock.length), NULL, key->keyblock.contents, (ivec) ? (unsigned char*)ivec->data : NULL); if (ret == 0) return KRB5_CRYPTO_INTERNAL; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); krb5int_c_iov_get_block(iblock, BLOCK_SIZE, data, num_data, &input_pos); EVP_CIPHER_CTX_set_padding(&ciph_ctx,0); ret = EVP_DecryptUpdate(&ciph_ctx, oblock, &olen, iblock, BLOCK_SIZE); if (ret == 1) { krb5int_c_iov_put_block(data, num_data, oblock, BLOCK_SIZE, &output_pos); } EVP_CIPHER_CTX_cleanup(&ciph_ctx); zap(iblock, BLOCK_SIZE); zap(oblock, BLOCK_SIZE); return (ret == 1) ? 0 : KRB5_CRYPTO_INTERNAL; }
static krb5_error_code cts_decr(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data, size_t dlen) { int ret = 0; size_t size = 0; unsigned char *oblock = NULL; unsigned char *dbuf = NULL; unsigned char iv_cts[IV_CTS_BUF_SIZE]; struct iov_block_state input_pos, output_pos; AES_KEY deck; memset(iv_cts,0,sizeof(iv_cts)); if (ivec && ivec->data){ if (ivec->length != sizeof(iv_cts)) return KRB5_CRYPTO_INTERNAL; memcpy(iv_cts, ivec->data,ivec->length); } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); oblock = OPENSSL_malloc(dlen); if (!oblock) return ENOMEM; dbuf = OPENSSL_malloc(dlen); if (!dbuf){ OPENSSL_free(oblock); return ENOMEM; } AES_set_decrypt_key(key->keyblock.contents, NUM_BITS * key->keyblock.length, &deck); krb5int_c_iov_get_block(dbuf, dlen, data, num_data, &input_pos); size = CRYPTO_cts128_decrypt((unsigned char *)dbuf, oblock, dlen, &deck, iv_cts, (cbc128_f)AES_cbc_encrypt); if (size <= 0) ret = KRB5_CRYPTO_INTERNAL; else { krb5int_c_iov_put_block(data, num_data, oblock, dlen, &output_pos); } if (!ret && ivec && ivec->data) memcpy(ivec->data, iv_cts, sizeof(iv_cts)); zap(oblock, dlen); zap(dbuf, dlen); OPENSSL_free(oblock); OPENSSL_free(dbuf); return ret; }
void krb5int_des_cbc_encrypt(krb5_crypto_iov *data, unsigned long num_data, const mit_des_key_schedule schedule, mit_des_cblock ivec) { unsigned DES_INT32 left, right; const unsigned DES_INT32 *kp; const unsigned char *ip; struct iov_block_state input_pos, output_pos; unsigned char storage[MIT_DES_BLOCK_LENGTH], *block = NULL, *ptr; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); /* Get key pointer here. This won't need to be reinitialized. */ kp = (const unsigned DES_INT32 *)schedule; /* Initialize left and right with the contents of the initial vector. */ ip = (ivec != NULL) ? ivec : mit_des_zeroblock; GET_HALF_BLOCK(left, ip); GET_HALF_BLOCK(right, ip); /* Work the length down 8 bytes at a time. */ for (;;) { unsigned DES_INT32 temp; if (!krb5int_c_iov_get_block_nocopy(storage, MIT_DES_BLOCK_LENGTH, data, num_data, &input_pos, &ptr)) break; block = ptr; /* Decompose this block and xor it with the previous ciphertext. */ GET_HALF_BLOCK(temp, ptr); left ^= temp; GET_HALF_BLOCK(temp, ptr); right ^= temp; /* Encrypt what we have and put back into block. */ DES_DO_ENCRYPT(left, right, kp); ptr = block; PUT_HALF_BLOCK(left, ptr); PUT_HALF_BLOCK(right, ptr); krb5int_c_iov_put_block_nocopy(data, num_data, storage, MIT_DES_BLOCK_LENGTH, &output_pos, block); } if (ivec != NULL && block != NULL) { ptr = ivec; PUT_HALF_BLOCK(left, ptr); PUT_HALF_BLOCK(right, ptr); } }
static krb5_error_code k5_des3_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { int ret, olen = MIT_DES_BLOCK_LENGTH; unsigned char iblock[MIT_DES_BLOCK_LENGTH], oblock[MIT_DES_BLOCK_LENGTH]; struct iov_block_state input_pos, output_pos; EVP_CIPHER_CTX ciph_ctx; krb5_boolean empty; ret = validate(key, ivec, data, num_data, &empty); if (ret != 0 || empty) return ret; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); EVP_CIPHER_CTX_init(&ciph_ctx); ret = EVP_EncryptInit_ex(&ciph_ctx, EVP_des_ede3_cbc(), NULL, key->keyblock.contents, (ivec) ? (unsigned char*)ivec->data : NULL); if (!ret) return KRB5_CRYPTO_INTERNAL; EVP_CIPHER_CTX_set_padding(&ciph_ctx,0); for (;;) { if (!krb5int_c_iov_get_block(iblock, MIT_DES_BLOCK_LENGTH, data, num_data, &input_pos)) break; ret = EVP_EncryptUpdate(&ciph_ctx, oblock, &olen, (unsigned char *)iblock, MIT_DES_BLOCK_LENGTH); if (!ret) break; krb5int_c_iov_put_block(data, num_data, oblock, MIT_DES_BLOCK_LENGTH, &output_pos); } if (ivec != NULL) memcpy(ivec->data, oblock, MIT_DES_BLOCK_LENGTH); EVP_CIPHER_CTX_cleanup(&ciph_ctx); zap(iblock, sizeof(iblock)); zap(oblock, sizeof(oblock)); if (ret != 1) return KRB5_CRYPTO_INTERNAL; return 0; }
krb5_error_code krb5int_camellia_cbc_mac(krb5_key key, const krb5_crypto_iov *data, size_t num_data, const krb5_data *iv, krb5_data *output) { camellia_ctx ctx; unsigned char blockY[BLOCK_SIZE]; struct iov_block_state iov_state; if (output->length < BLOCK_SIZE) return KRB5_BAD_MSIZE; if (camellia_enc_key(key->keyblock.contents, key->keyblock.length, &ctx) != camellia_good) abort(); if (iv != NULL) memcpy(blockY, iv->data, BLOCK_SIZE); else memset(blockY, 0, BLOCK_SIZE); IOV_BLOCK_STATE_INIT(&iov_state); for (;;) { unsigned char blockB[BLOCK_SIZE]; if (!krb5int_c_iov_get_block(blockB, BLOCK_SIZE, data, num_data, &iov_state)) break; xorblock(blockB, blockY); if (camellia_enc_blk(blockB, blockY, &ctx) != camellia_good) abort(); } output->length = BLOCK_SIZE; memcpy(output->data, blockY, BLOCK_SIZE); return 0; }
krb5_error_code krb5int_camellia_cbc_mac(krb5_key key, const krb5_crypto_iov *data, size_t num_data, const krb5_data *iv, krb5_data *output) { CAMELLIA_KEY enck; unsigned char blockY[CAMELLIA_BLOCK_SIZE]; struct iov_block_state iov_state; if (output->length < CAMELLIA_BLOCK_SIZE) return KRB5_BAD_MSIZE; Camellia_set_key(key->keyblock.contents, NUM_BITS * key->keyblock.length, &enck); if (iv != NULL) memcpy(blockY, iv->data, CAMELLIA_BLOCK_SIZE); else memset(blockY, 0, CAMELLIA_BLOCK_SIZE); IOV_BLOCK_STATE_INIT(&iov_state); for (;;) { unsigned char blockB[CAMELLIA_BLOCK_SIZE]; if (!krb5int_c_iov_get_block(blockB, CAMELLIA_BLOCK_SIZE, data, num_data, &iov_state)) break; xorblock(blockB, blockY); Camellia_ecb_encrypt(blockB, blockY, &enck, 1); } output->length = CAMELLIA_BLOCK_SIZE; memcpy(output->data, blockY, CAMELLIA_BLOCK_SIZE); return 0; }
static krb5_error_code krb5int_aes_decrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno, i; size_t input_length; struct iov_block_state input_pos, output_pos; CHECK_SIZES; if (aes_dec_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, blockN, &ctx); xorblock(tmp2, tmp); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); memcpy(tmp, blockN, BLOCK_SIZE); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Decrypt second last block */ dec(tmp2, blockN2, &ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &ctx); xorblock(tmp3, tmp); /* Copy out ivec first before we clobber blockN1 with plaintext */ if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); } return 0; }
static krb5_error_code krb5int_aes_encrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (aes_enc_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); xorblock(tmp, blockN); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); /* Set up for next block. */ memcpy(tmp, tmp2, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->enc_ctx.keybitlen == 0) { if (camellia_enc_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->enc_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); xorblock(tmp, block); enc(block, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); /* Set up for next block. */ memcpy(tmp, block, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; unsigned int i; size_t input_length; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->dec_ctx.keybitlen == 0) { if (camellia_dec_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->dec_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &CACHE(key)->dec_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); memcpy(tmp2, block, BLOCK_SIZE); dec(block, block, &CACHE(key)->dec_ctx); xorblock(block, tmp); memcpy(tmp, tmp2, BLOCK_SIZE); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); if (ivec != NULL) memcpy(ivec->data, blockN2, BLOCK_SIZE); /* Decrypt second last block */ dec(tmp2, blockN2, &CACHE(key)->dec_ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &CACHE(key)->dec_ctx); xorblock(tmp3, tmp); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); } return 0; }
void krb5int_des3_cbc_encrypt_iov(krb5_crypto_iov *data, unsigned long num_data, const mit_des_key_schedule ks1, const mit_des_key_schedule ks2, const mit_des_key_schedule ks3, mit_des_cblock ivec) { unsigned DES_INT32 left, right; const unsigned DES_INT32 *kp1, *kp2, *kp3; const unsigned char *ip; unsigned char *op; struct iov_block_state input_pos, output_pos; unsigned char iblock[MIT_DES_BLOCK_LENGTH]; unsigned char oblock[MIT_DES_BLOCK_LENGTH]; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); /* * Get key pointer here. This won't need to be reinitialized */ kp1 = (const unsigned DES_INT32 *)ks1; kp2 = (const unsigned DES_INT32 *)ks2; kp3 = (const unsigned DES_INT32 *)ks3; /* * Initialize left and right with the contents of the initial * vector. */ if (ivec != NULL) ip = ivec; else ip = mit_des_zeroblock; GET_HALF_BLOCK(left, ip); GET_HALF_BLOCK(right, ip); /* * Suitably initialized, now work the length down 8 bytes * at a time. */ for (;;) { unsigned DES_INT32 temp; ip = iblock; op = oblock; if (!krb5int_c_iov_get_block(iblock, MIT_DES_BLOCK_LENGTH, data, num_data, &input_pos)) break; if (input_pos.iov_pos == num_data) break; GET_HALF_BLOCK(temp, ip); left ^= temp; GET_HALF_BLOCK(temp, ip); right ^= temp; /* * Encrypt what we have */ DES_DO_ENCRYPT(left, right, kp1); DES_DO_DECRYPT(left, right, kp2); DES_DO_ENCRYPT(left, right, kp3); /* * Copy the results out */ PUT_HALF_BLOCK(left, op); PUT_HALF_BLOCK(right, op); krb5int_c_iov_put_block(data, num_data, oblock, MIT_DES_BLOCK_LENGTH, &output_pos); } if (ivec != NULL) memcpy(ivec, oblock, MIT_DES_BLOCK_LENGTH); }
void krb5int_des3_cbc_decrypt_iov(krb5_crypto_iov *data, unsigned long num_data, const mit_des_key_schedule ks1, const mit_des_key_schedule ks2, const mit_des_key_schedule ks3, mit_des_cblock ivec) { unsigned DES_INT32 left, right; const unsigned DES_INT32 *kp1, *kp2, *kp3; const unsigned char *ip; unsigned DES_INT32 ocipherl, ocipherr; unsigned DES_INT32 cipherl, cipherr; unsigned char *op; struct iov_block_state input_pos, output_pos; unsigned char iblock[MIT_DES_BLOCK_LENGTH]; unsigned char oblock[MIT_DES_BLOCK_LENGTH]; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); /* * Get key pointer here. This won't need to be reinitialized */ kp1 = (const unsigned DES_INT32 *)ks1; kp2 = (const unsigned DES_INT32 *)ks2; kp3 = (const unsigned DES_INT32 *)ks3; /* * Decrypting is harder than encrypting because of * the necessity of remembering a lot more things. * Should think about this a little more... */ if (num_data == 0) return; /* * Prime the old cipher with ivec. */ if (ivec != NULL) ip = ivec; else ip = mit_des_zeroblock; GET_HALF_BLOCK(ocipherl, ip); GET_HALF_BLOCK(ocipherr, ip); /* * Now do this in earnest until we run out of length. */ for (;;) { /* * Read a block from the input into left and * right. Save this cipher block for later. */ if (!krb5int_c_iov_get_block(iblock, MIT_DES_BLOCK_LENGTH, data, num_data, &input_pos)) break; if (input_pos.iov_pos == num_data) break; ip = iblock; op = oblock; GET_HALF_BLOCK(left, ip); GET_HALF_BLOCK(right, ip); cipherl = left; cipherr = right; /* * Decrypt this. */ DES_DO_DECRYPT(left, right, kp3); DES_DO_ENCRYPT(left, right, kp2); DES_DO_DECRYPT(left, right, kp1); /* * Xor with the old cipher to get plain * text. Output 8 or less bytes of this. */ left ^= ocipherl; right ^= ocipherr; PUT_HALF_BLOCK(left, op); PUT_HALF_BLOCK(right, op); /* * Save current cipher block here */ ocipherl = cipherl; ocipherr = cipherr; krb5int_c_iov_put_block(data, num_data, oblock, MIT_DES_BLOCK_LENGTH, &output_pos); } if (ivec != NULL) { op = ivec; PUT_HALF_BLOCK(ocipherl,op); PUT_HALF_BLOCK(ocipherr, op); } }
void krb5int_des_cbc_decrypt(krb5_crypto_iov *data, unsigned long num_data, const mit_des_key_schedule schedule, mit_des_cblock ivec) { unsigned DES_INT32 left, right; const unsigned DES_INT32 *kp; const unsigned char *ip; unsigned DES_INT32 ocipherl, ocipherr; unsigned DES_INT32 cipherl, cipherr; struct iov_block_state input_pos, output_pos; unsigned char storage[MIT_DES_BLOCK_LENGTH], *block = NULL, *ptr; IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); /* Get key pointer here. This won't need to be reinitialized. */ kp = (const unsigned DES_INT32 *)schedule; /* * Decrypting is harder than encrypting because of * the necessity of remembering a lot more things. * Should think about this a little more... */ /* Prime the old cipher with ivec. */ ip = (ivec != NULL) ? ivec : mit_des_zeroblock; GET_HALF_BLOCK(ocipherl, ip); GET_HALF_BLOCK(ocipherr, ip); /* Work the length down 8 bytes at a time. */ for (;;) { if (!krb5int_c_iov_get_block_nocopy(storage, MIT_DES_BLOCK_LENGTH, data, num_data, &input_pos, &ptr)) break; block = ptr; /* Split this block into left and right. */ GET_HALF_BLOCK(left, ptr); GET_HALF_BLOCK(right, ptr); cipherl = left; cipherr = right; /* Decrypt and xor with the old cipher to get plain text. */ DES_DO_DECRYPT(left, right, kp); left ^= ocipherl; right ^= ocipherr; /* Store the encrypted halves back into block. */ ptr = block; PUT_HALF_BLOCK(left, ptr); PUT_HALF_BLOCK(right, ptr); /* Save current cipher block halves. */ ocipherl = cipherl; ocipherr = cipherr; krb5int_c_iov_put_block_nocopy(data, num_data, storage, MIT_DES_BLOCK_LENGTH, &output_pos, block); } if (ivec != NULL && block != NULL) { ptr = ivec; PUT_HALF_BLOCK(ocipherl, ptr); PUT_HALF_BLOCK(ocipherr, ptr); } }