krb5_error_code krb5int_aes_decrypt(const krb5_keyblock *key, const krb5_data *ivec, const krb5_data *input, krb5_data *output) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; CHECK_SIZES; if (aes_dec_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); nblocks = (input->length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { if (input->length < BLOCK_SIZE) return KRB5_BAD_MSIZE; dec(output->data, input->data, &ctx); } else if (nblocks > 1) { for (blockno = 0; blockno < nblocks - 2; blockno++) { dec(tmp2, input->data + blockno * BLOCK_SIZE, &ctx); xorblock(tmp2, tmp); memcpy(output->data + blockno * BLOCK_SIZE, tmp2, BLOCK_SIZE); memcpy(tmp, input->data + blockno * BLOCK_SIZE, BLOCK_SIZE); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ dec(tmp2, input->data + (nblocks - 2) * BLOCK_SIZE, &ctx); /* Set tmp3 to last ciphertext block, padded. */ memset(tmp3, 0, sizeof(tmp3)); memcpy(tmp3, input->data + (nblocks - 1) * BLOCK_SIZE, input->length - (nblocks - 1) * BLOCK_SIZE); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, tmp3); memcpy(output->data + (nblocks - 1) * BLOCK_SIZE, tmp2, input->length - (nblocks - 1) * BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ memcpy(tmp2, tmp3, input->length - (nblocks - 1) * BLOCK_SIZE); /* Decrypt, to get next to last plaintext block xor previous ciphertext. */ dec(tmp3, tmp2, &ctx); xorblock(tmp3, tmp); memcpy(output->data + (nblocks - 2) * BLOCK_SIZE, tmp3, BLOCK_SIZE); if (ivec) memcpy(ivec->data, input->data + (nblocks - 2) * BLOCK_SIZE, BLOCK_SIZE); } return 0; }
krb5_error_code krb5int_aes_encrypt(const krb5_keyblock *key, const krb5_data *ivec, const krb5_data *input, krb5_data *output) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; /* CHECK_SIZES; */ if (aes_enc_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); nblocks = (input->length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { /* Used when deriving keys. */ if (input->length < BLOCK_SIZE) return KRB5_BAD_MSIZE; enc(output->data, input->data, &ctx); } else if (nblocks > 1) { unsigned int nleft; for (blockno = 0; blockno < nblocks - 2; blockno++) { xorblock(tmp, input->data + blockno * BLOCK_SIZE); enc(tmp2, tmp, &ctx); memcpy(output->data + blockno * BLOCK_SIZE, tmp2, BLOCK_SIZE); /* Set up for next block. */ memcpy(tmp, tmp2, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ xorblock(tmp, input->data + (nblocks - 2) * BLOCK_SIZE); enc(tmp2, tmp, &ctx); nleft = input->length - (nblocks - 1) * BLOCK_SIZE; memcpy(output->data + (nblocks - 1) * BLOCK_SIZE, tmp2, nleft); memcpy(tmp, tmp2, BLOCK_SIZE); memset(tmp3, 0, sizeof(tmp3)); memcpy(tmp3, input->data + (nblocks - 1) * BLOCK_SIZE, nleft); xorblock(tmp, tmp3); enc(tmp2, tmp, &ctx); memcpy(output->data + (nblocks - 2) * BLOCK_SIZE, tmp2, BLOCK_SIZE); if (ivec) memcpy(ivec->data, tmp2, BLOCK_SIZE); } return 0; }
// encryption: data must be NUL-padded to BLOCKSIZE // decryption: data must be padded to BLOCKSIZE // len must be < 2^31 void SymmCipher::ctr_crypt(byte* data, unsigned len, m_off_t pos, ctr_iv ctriv, byte* mac, bool encrypt, bool initmac) { assert(!(pos & (KEYLENGTH - 1))); byte ctr[BLOCKSIZE], tmp[BLOCKSIZE]; MemAccess::set<int64_t>(ctr,ctriv); setint64(pos / BLOCKSIZE, ctr + sizeof ctriv); if (mac && initmac) { memcpy(mac, ctr, sizeof ctriv); memcpy(mac + sizeof ctriv, ctr, sizeof ctriv); } while ((int)len > 0) { if (encrypt) { if(mac) { xorblock(data, mac); ecb_encrypt(mac); } ecb_encrypt(ctr, tmp); xorblock(tmp, data); } else { ecb_encrypt(ctr, tmp); xorblock(tmp, data); if (mac) { if (len >= (unsigned)BLOCKSIZE) { xorblock(data, mac); } else { xorblock(data, mac, len); } ecb_encrypt(mac); } } len -= BLOCKSIZE; data += BLOCKSIZE; incblock(ctr); } }
int crypto_stream_chacha20_tinynacl_xor(unsigned char *c, const unsigned char *m, unsigned long long l, const unsigned char *n, const unsigned char *k) { unsigned char bspace[64] = {0}; unsigned char *b = bspace; long long j; crypto_uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; crypto_uint32 k0 = unpack(k + 0); crypto_uint32 k1 = unpack(k + 4); crypto_uint32 k2 = unpack(k + 8); crypto_uint32 k3 = unpack(k + 12); crypto_uint32 k4 = unpack(k + 16); crypto_uint32 k5 = unpack(k + 20); crypto_uint32 k6 = unpack(k + 24); crypto_uint32 k7 = unpack(k + 28); crypto_uint32 n0 = 0; crypto_uint32 n1 = 0; crypto_uint32 n2 = unpack(n + 0); crypto_uint32 n3 = unpack(n + 4); crypto_uint32 s0 = 0x61707865; crypto_uint32 s1 = 0x3320646E; crypto_uint32 s2 = 0x79622D32; crypto_uint32 s3 = 0x6B206574; crypto_uint64 u; if (!l) return 0; while (l >= 64) { xorblock(c, m); u = 1; u += (crypto_uint64)n0; n0 = u; u >>= 32; u += (crypto_uint64)n1; n1 = u; l -= 64; c += 64; m += 64; } if (l) { for (j = 0; j < l; ++j) b[j] = m[j]; xorblock(b, b); for (j = 0; j < l; ++j) c[j] = b[j]; } return 0; }
void SymmCipher::setkey(const byte* newkey, int type) { memcpy(key,newkey,KEYLENGTH); if (!type) xorblock(newkey+KEYLENGTH,key); aesecb_e.SetKey(key,KEYLENGTH); aesecb_d.SetKey(key,KEYLENGTH); aescbc_e.SetKeyWithIV(key,KEYLENGTH,zeroiv); aescbc_d.SetKeyWithIV(key,KEYLENGTH,zeroiv); keyvalid = 1; }
// encryption: data must be NUL-padded to BLOCKSIZE // decryption: data must be padded to BLOCKSIZE // len must be < 2^31 void SymmCipher::ctr_crypt(byte* data, unsigned len, m_off_t pos, ctr_iv ctriv, byte* mac, int encrypt) { assert(!(pos&(KEYLENGTH-1))); byte ctr[BLOCKSIZE], tmp[BLOCKSIZE]; *(uint64_t*)ctr = ctriv; setint64(pos/BLOCKSIZE,ctr+sizeof ctriv); memcpy(mac,ctr,sizeof ctriv); memcpy(mac+sizeof ctriv,ctr,sizeof ctriv); while ((int)len > 0) { if (encrypt) { xorblock(data,mac); ecb_encrypt(mac); ecb_encrypt(ctr,tmp); xorblock(tmp,data); } else { ecb_encrypt(ctr,tmp); xorblock(tmp,data); if (len >= (unsigned)BLOCKSIZE) xorblock(data,mac); else xorblock(data,mac,len); ecb_encrypt(mac); } len -= BLOCKSIZE; data += BLOCKSIZE; incblock(ctr); } }
krb5_error_code krb5int_camellia_cbc_mac(krb5_key key, const krb5_crypto_iov *data, size_t num_data, const krb5_data *iv, krb5_data *output) { camellia_ctx ctx; unsigned char blockY[BLOCK_SIZE]; struct iov_block_state iov_state; if (output->length < BLOCK_SIZE) return KRB5_BAD_MSIZE; if (camellia_enc_key(key->keyblock.contents, key->keyblock.length, &ctx) != camellia_good) abort(); if (iv != NULL) memcpy(blockY, iv->data, BLOCK_SIZE); else memset(blockY, 0, BLOCK_SIZE); IOV_BLOCK_STATE_INIT(&iov_state); for (;;) { unsigned char blockB[BLOCK_SIZE]; if (!krb5int_c_iov_get_block(blockB, BLOCK_SIZE, data, num_data, &iov_state)) break; xorblock(blockB, blockY); if (camellia_enc_blk(blockB, blockY, &ctx) != camellia_good) abort(); } output->length = BLOCK_SIZE; memcpy(output->data, blockY, BLOCK_SIZE); return 0; }
krb5_error_code krb5int_camellia_cbc_mac(krb5_key key, const krb5_crypto_iov *data, size_t num_data, const krb5_data *iv, krb5_data *output) { CAMELLIA_KEY enck; unsigned char blockY[CAMELLIA_BLOCK_SIZE]; struct iov_block_state iov_state; if (output->length < CAMELLIA_BLOCK_SIZE) return KRB5_BAD_MSIZE; Camellia_set_key(key->keyblock.contents, NUM_BITS * key->keyblock.length, &enck); if (iv != NULL) memcpy(blockY, iv->data, CAMELLIA_BLOCK_SIZE); else memset(blockY, 0, CAMELLIA_BLOCK_SIZE); IOV_BLOCK_STATE_INIT(&iov_state); for (;;) { unsigned char blockB[CAMELLIA_BLOCK_SIZE]; if (!krb5int_c_iov_get_block(blockB, CAMELLIA_BLOCK_SIZE, data, num_data, &iov_state)) break; xorblock(blockB, blockY); Camellia_ecb_encrypt(blockB, blockY, &enck, 1); } output->length = CAMELLIA_BLOCK_SIZE; memcpy(output->data, blockY, CAMELLIA_BLOCK_SIZE); return 0; }
void SymmCipher::setkey(const byte* newkey, int type) { memcpy(key, newkey, KEYLENGTH); if (!type) { xorblock(newkey + KEYLENGTH, key); } aesecb_e.SetKey(key, KEYLENGTH); aesecb_d.SetKey(key, KEYLENGTH); aescbc_e.SetKeyWithIV(key, KEYLENGTH, zeroiv); aescbc_d.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesccm8_e.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesccm8_d.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesccm16_e.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesccm16_d.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesgcm_e.SetKeyWithIV(key, KEYLENGTH, zeroiv); aesgcm_d.SetKeyWithIV(key, KEYLENGTH, zeroiv); }
static krb5_error_code krb5int_aes_decrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno, i; size_t input_length; struct iov_block_state input_pos, output_pos; CHECK_SIZES; if (aes_dec_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, blockN, &ctx); xorblock(tmp2, tmp); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); memcpy(tmp, blockN, BLOCK_SIZE); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Decrypt second last block */ dec(tmp2, blockN2, &ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &ctx); xorblock(tmp3, tmp); /* Copy out ivec first before we clobber blockN1 with plaintext */ if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); } return 0; }
static krb5_error_code krb5int_aes_encrypt_iov(const krb5_keyblock *key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { aes_ctx ctx; char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (aes_enc_key(key->contents, key->length, &ctx) != aes_good) abort(); if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block((unsigned char *)tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { char blockN2[BLOCK_SIZE]; /* second last */ char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { char blockN[BLOCK_SIZE]; krb5int_c_iov_get_block((unsigned char *)blockN, BLOCK_SIZE, data, num_data, &input_pos); xorblock(tmp, blockN); enc(tmp2, tmp, &ctx); krb5int_c_iov_put_block(data, num_data, (unsigned char *)tmp2, BLOCK_SIZE, &output_pos); /* Set up for next block. */ memcpy(tmp, tmp2, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block((unsigned char *)blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block((unsigned char *)blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, (unsigned char *)blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_encrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE]; int nblocks = 0, blockno; size_t input_length, i; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->enc_ctx.keybitlen == 0) { if (camellia_enc_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->enc_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); enc(tmp2, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); xorblock(tmp, block); enc(block, tmp, &CACHE(key)->enc_ctx); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); /* Set up for next block. */ memcpy(tmp, block, BLOCK_SIZE); } /* Do final CTS step for last two blocks (the second of which may or may not be incomplete). */ /* First, get the last two blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); /* Encrypt second last block */ xorblock(tmp, blockN2); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN2, tmp2, BLOCK_SIZE); /* blockN2 now contains first block */ memcpy(tmp, tmp2, BLOCK_SIZE); /* Encrypt last block */ xorblock(tmp, blockN1); enc(tmp2, tmp, &CACHE(key)->enc_ctx); memcpy(blockN1, tmp2, BLOCK_SIZE); /* Put the last two blocks back into the iovec (reverse order) */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); if (ivec != NULL) memcpy(ivec->data, blockN1, BLOCK_SIZE); } return 0; }
static krb5_error_code krb5int_camellia_decrypt(krb5_key key, const krb5_data *ivec, krb5_crypto_iov *data, size_t num_data) { unsigned char tmp[BLOCK_SIZE], tmp2[BLOCK_SIZE], tmp3[BLOCK_SIZE]; int nblocks = 0, blockno; unsigned int i; size_t input_length; struct iov_block_state input_pos, output_pos; if (key->cache == NULL) { key->cache = malloc(sizeof(struct camellia_key_info_cache)); if (key->cache == NULL) return ENOMEM; CACHE(key)->enc_ctx.keybitlen = CACHE(key)->dec_ctx.keybitlen = 0; } if (CACHE(key)->dec_ctx.keybitlen == 0) { if (camellia_dec_key(key->keyblock.contents, key->keyblock.length, &CACHE(key)->dec_ctx) != camellia_good) abort(); } if (ivec != NULL) memcpy(tmp, ivec->data, BLOCK_SIZE); else memset(tmp, 0, BLOCK_SIZE); for (i = 0, input_length = 0; i < num_data; i++) { krb5_crypto_iov *iov = &data[i]; if (ENCRYPT_IOV(iov)) input_length += iov->data.length; } IOV_BLOCK_STATE_INIT(&input_pos); IOV_BLOCK_STATE_INIT(&output_pos); nblocks = (input_length + BLOCK_SIZE - 1) / BLOCK_SIZE; if (nblocks == 1) { krb5int_c_iov_get_block(tmp, BLOCK_SIZE, data, num_data, &input_pos); dec(tmp2, tmp, &CACHE(key)->dec_ctx); krb5int_c_iov_put_block(data, num_data, tmp2, BLOCK_SIZE, &output_pos); } else if (nblocks > 1) { unsigned char blockN2[BLOCK_SIZE]; /* second last */ unsigned char blockN1[BLOCK_SIZE]; /* last block */ for (blockno = 0; blockno < nblocks - 2; blockno++) { unsigned char blockN[BLOCK_SIZE], *block; krb5int_c_iov_get_block_nocopy(blockN, BLOCK_SIZE, data, num_data, &input_pos, &block); memcpy(tmp2, block, BLOCK_SIZE); dec(block, block, &CACHE(key)->dec_ctx); xorblock(block, tmp); memcpy(tmp, tmp2, BLOCK_SIZE); krb5int_c_iov_put_block_nocopy(data, num_data, blockN, BLOCK_SIZE, &output_pos, block); } /* Do last two blocks, the second of which (next-to-last block of plaintext) may be incomplete. */ /* First, get the last two encrypted blocks */ memset(blockN1, 0, sizeof(blockN1)); /* pad last block with zeros */ krb5int_c_iov_get_block(blockN2, BLOCK_SIZE, data, num_data, &input_pos); krb5int_c_iov_get_block(blockN1, BLOCK_SIZE, data, num_data, &input_pos); if (ivec != NULL) memcpy(ivec->data, blockN2, BLOCK_SIZE); /* Decrypt second last block */ dec(tmp2, blockN2, &CACHE(key)->dec_ctx); /* Set tmp2 to last (possibly partial) plaintext block, and save it. */ xorblock(tmp2, blockN1); memcpy(blockN2, tmp2, BLOCK_SIZE); /* Maybe keep the trailing part, and copy in the last ciphertext block. */ input_length %= BLOCK_SIZE; memcpy(tmp2, blockN1, input_length ? input_length : BLOCK_SIZE); dec(tmp3, tmp2, &CACHE(key)->dec_ctx); xorblock(tmp3, tmp); memcpy(blockN1, tmp3, BLOCK_SIZE); /* Put the last two blocks back into the iovec */ krb5int_c_iov_put_block(data, num_data, blockN1, BLOCK_SIZE, &output_pos); krb5int_c_iov_put_block(data, num_data, blockN2, BLOCK_SIZE, &output_pos); } return 0; }
int intel_scu_ipc_write_umip(u8 *data, int len, int offset) { int i, ret = 0, offset_align; int remainder, len_align = 0; u32 dptr, sptr, cmd; u8 cs, tbl_cs = 0, *buf = NULL; Sector sect; struct block_device *bdev; char *buffer = NULL; int *holderId = NULL; int sect_no; u8 checksum; if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) { /* Opening the mmcblk0boot0 */ bdev = get_emmc_bdev(); if (bdev == NULL) { pr_err("%s: get_emmc failed!\n", __func__); return -ENODEV; } /* make sure the block device is open rw */ ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, holderId); if (ret < 0) { pr_err("%s: blk_dev_get failed!\n", __func__); return -ret; } /* get memmap of the UMIP header */ sect_no = offset / SECTOR_SIZE; remainder = offset % SECTOR_SIZE; buffer = read_dev_sector(bdev, sect_no + UMIP_HEADER_HEADROOM_SECTOR, §); /* Shouldn't need to access UMIP sector 0/1 */ if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) { pr_err("invalid umip offset\n"); ret = -EINVAL; goto bd_put; } else if (data == NULL || buffer == NULL) { pr_err("buffer is empty\n"); ret = -ENODEV; goto bd_put; } else if (len > (SECTOR_SIZE - remainder)) { pr_err("too much data to write\n"); ret = -EINVAL; goto bd_put; } lock_page(sect.v); memcpy(buffer + remainder, data, len); checksum = calc_checksum(buffer, SECTOR_SIZE); set_page_dirty(sect.v); unlock_page(sect.v); sync_blockdev(bdev); put_dev_sector(sect); /* * Updating the checksum, sector 0 (starting from UMIP * offset 0x08), we maintains 4 bytes for tracking each of * sector changes individually. For example, the dword at * offset 0x08 is used to checksum data integrity of sector * number 2, and so on so forth. It's worthnoting that only * the first byte in each 4 bytes stores checksum. * For detail, please check CTP FAS UMIP header definition */ buffer = read_dev_sector(bdev, UMIP_HEADER_SECTOR + UMIP_HEADER_HEADROOM_SECTOR, §); if (buffer == NULL) { pr_err("buffer is empty\n"); ret = -ENODEV; goto bd_put; } lock_page(sect.v); memcpy(buffer + 4 * (sect_no - UMIP_TOTAL_HEADER_SECTOR_NO) + UMIP_START_CHKSUM_ADDR, &checksum, 1/* one byte */); /* Change UMIP prologue chksum to zero */ *(buffer + UMIP_HEADER_CHKSUM_ADDR) = 0; for (i = 0; i < UMIP_TOTAL_CHKSUM_ENTRY; i++) { tbl_cs ^= *(u8 *)(buffer + 4 * i + UMIP_START_CHKSUM_ADDR); } /* Finish up with re-calcuating UMIP prologue checksum */ cs = dword_to_byte_chksum(xorblock((u32 *)buffer, SECTOR_SIZE)); *(buffer + UMIP_HEADER_CHKSUM_ADDR) = tbl_cs ^ cs; set_page_dirty(sect.v); unlock_page(sect.v); sync_blockdev(bdev); bd_put: if (buffer) put_dev_sector(sect); blkdev_put(bdev, FMODE_READ|FMODE_WRITE); return ret; } else { if (!intel_mip_base) return -ENODEV; if (offset + len > IPC_MIP_MAX_ADDR) return -EINVAL; rpmsg_global_lock(); offset_align = offset & (~0x3); len_align = (len + (offset - offset_align) + 3) & (~0x3); if (len != len_align) { buf = kzalloc(len_align, GFP_KERNEL); if (!buf) { pr_err("Alloc memory failed\n"); ret = -ENOMEM; goto fail; } ret = read_mip(buf, len_align, offset_align, 0); if (ret) goto fail; memcpy(buf + offset - offset_align, data, len); } else { buf = data; } dptr = offset_align; sptr = len_align / 4; cmd = IPC_CMD_UMIP_WR << 12 | IPCMSG_MIP_ACCESS; memcpy(intel_mip_base, buf, len_align); ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL, NULL, 0, 0, sptr, dptr); fail: if (buf && len_align != len) kfree(buf); rpmsg_global_unlock(); return ret; } }