void tmd_aes_generic_encrypt_ctr(uint8_t *output, const aes_key *key, const aes_block *iv, aes_block *newIV, const uint8_t *input, uint32_t len) { aes_block block, o; uint32_t nb_blocks = len / 16; int i; /* preload IV in block */ block128_copy(&block, iv); for ( ; nb_blocks-- > 0; block128_inc_be(&block), output += 16, input += 16) { aes_encrypt_block(&o, key, &block); block128_vxor((block128 *) output, &o, (block128 *) input); } if ((len % 16) != 0) { aes_encrypt_block(&o, key, &block); for (i = 0; i < (len % 16); i++) { *output = ((uint8_t *) &o)[i] ^ *input; output++; input++; } } if(NULL != newIV) block128_copy(newIV, &block); }
void tmd_aes_generic_gcm_decrypt(uint8_t *output, const aes_gcm *gcm, const aes_ctx *ctx, const aes_key *key, const uint8_t *input, uint32_t length, aes_ctx *newCTX) { aes_block out; memcpy(newCTX, ctx, sizeof(aes_ctx)); newCTX->length_input += length; for (; length >= 16; input += 16, output += 16, length -= 16) { block128_inc_be(&newCTX->civ); aes_encrypt_block(&out, key, &newCTX->civ); gcm_ghash_add(gcm, newCTX, (block128 *) input); block128_xor(&out, (block128 *) input); block128_copy((block128 *) output, &out); } if (length > 0) { aes_block tmp; int i; block128_inc_be(&newCTX->civ); block128_zero(&tmp); block128_copy_bytes(&tmp, input, length); gcm_ghash_add(gcm, newCTX, &tmp); aes_encrypt_block(&out, key, &newCTX->civ); block128_xor_bytes(&tmp, out.b, length); for (i = 0; i < length; i++) { output[i] = tmp.b[i]; } } }
void aes_gcm_decrypt(uint8_t *output, aes_gcm *gcm, uint8_t *input, uint32_t length) { aes_block out; gcm->length_input += length; for (; length >= 16; input += 16, output += 16, length -= 16) { block128_inc_be(&gcm->civ); aes_encrypt_block(&out, &gcm->key, &gcm->civ); gcm_ghash_add(gcm, (block128 *) input); block128_xor(&out, (block128 *) input); block128_copy((block128 *) output, &out); } if (length > 0) { aes_block tmp; int i; block128_inc_be(&gcm->civ); block128_zero(&tmp); block128_copy_bytes(&tmp, input, length); gcm_ghash_add(gcm, &tmp); aes_encrypt_block(&out, &gcm->key, &gcm->civ); block128_xor_bytes(&tmp, out.b, length); for (i = 0; i < length; i++) { output[i] = tmp.b[i]; } } }
void aes_encrypt_xts(uint8_t *output, aes_key *k1, aes_key *k2, aes_block *dataunit, uint32_t spoint, uint8_t *input, uint32_t nb_blocks) { aes_block block, tweak; if (!nb_blocks) return; #if defined(ARCH_X86) && defined(WITH_AESNI) if (have_aesni() && k1->nbr == 10) { aes_ni_encrypt_xts(output, k1, k2, (uint8_t *) dataunit, spoint, input, nb_blocks); return; } #endif /* load IV and encrypt it using k2 as the tweak */ block128_copy(&tweak, dataunit); aes_encrypt_block(&tweak, k2, &tweak); /* TO OPTIMISE: this is really inefficient way to do that */ while (spoint-- > 0) gf_mulx(&tweak); for ( ; nb_blocks-- > 0; input += 16, output += 16, gf_mulx(&tweak)) { block128_vxor(&block, (block128 *) input, &tweak); aes_encrypt_block(&block, k1, &block); block128_vxor((block128 *) output, &block, &tweak); } }
void aes_gcm_init(aes_gcm *gcm, aes_key *key, uint8_t *iv, uint32_t len) { gcm->length_aad = 0; gcm->length_input = 0; block128_zero(&gcm->h); block128_zero(&gcm->tag); block128_zero(&gcm->iv); memcpy(&gcm->key, key, sizeof(aes_key)); /* prepare H : encrypt_K(0^128) */ aes_encrypt_block(&gcm->h, key, &gcm->h); if (len == 12) { block128_copy_bytes(&gcm->iv, iv, 12); gcm->iv.b[15] = 0x01; } else { uint32_t origlen = len << 3; int i; for (; len >= 16; len -= 16, iv += 16) { block128_xor(&gcm->iv, (block128 *) iv); gf_mul(&gcm->iv, &gcm->h); } if (len > 0) { block128_xor_bytes(&gcm->iv, iv, len); gf_mul(&gcm->iv, &gcm->h); } for (i = 15; origlen; --i, origlen >>= 8) gcm->iv.b[i] ^= (uint8_t) origlen; gf_mul(&gcm->iv, &gcm->h); } block128_copy(&gcm->civ, &gcm->iv); }
void test_aes_encrypt_block() { uint8_t ret_text[16] = {0}; uint8_t text[16] = { 0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef, 0xfe,0xdc,0xba,0x98, 0x76,0x54,0x32,0x10 }; uint8_t cipher_text[16] = {0}; uint8_t ret_cipher_text[16] = { 0xff,0x0b,0x84,0x4a, 0x08,0x53,0xbf,0x7c, 0x69,0x34,0xab,0x43, 0x64,0x14,0x8f,0xb9 }; uint8_t key[32] = { 0x0f,0x15,0x71,0xc9, 0x47,0xd9,0xe8,0x59, 0x0c,0xb7,0xad,0xd6, 0xaf,0x7f,0x67,0x98, 0x0f,0x15,0x71,0xc9, 0x47,0xd9,0xe8,0x59, 0x0c,0xb7,0xad,0xd6, 0xaf,0x7f,0x67,0x98 }; aes_context ctx; CU_ASSERT_EQUAL(aes_set_key(&ctx, key, 128), SUCCESS); CU_ASSERT_EQUAL(aes_encrypt_block(&ctx, cipher_text, text), SUCCESS); int ret = memcmp(ret_cipher_text, cipher_text, sizeof(ret_cipher_text)); CU_ASSERT_EQUAL(ret, 0); CU_ASSERT_EQUAL(aes_set_key(&ctx, key, 192), SUCCESS); CU_ASSERT_EQUAL(aes_encrypt_block(&ctx, cipher_text, text), SUCCESS); CU_ASSERT_EQUAL(aes_decrypt_block(&ctx, ret_text, cipher_text), SUCCESS); ret = memcmp(ret_text, text, sizeof(cipher_text)); CU_ASSERT_EQUAL(ret, 0); CU_ASSERT_EQUAL(aes_set_key(&ctx, key, 256), SUCCESS); CU_ASSERT_EQUAL(aes_encrypt_block(&ctx, cipher_text, text), SUCCESS); CU_ASSERT_EQUAL(aes_decrypt_block(&ctx, ret_text, cipher_text), SUCCESS); ret = memcmp(ret_text, text, sizeof(cipher_text)); CU_ASSERT_EQUAL(ret, 0); }
void tmd_aes_generic_encrypt_xts(aes_block *output, const aes_key *k1, aes_key *k2, aes_block *dataunit, uint32_t spoint, aes_block *input, uint32_t nb_blocks) { aes_block block, tweak; /* load IV and encrypt it using k2 as the tweak */ block128_copy(&tweak, dataunit); aes_encrypt_block(&tweak, k2, &tweak); /* TO OPTIMISE: this is really inefficient way to do that */ while (spoint-- > 0) tmd_gf_mulx(&tweak); for ( ; nb_blocks-- > 0; input++, output++, tmd_gf_mulx(&tweak)) { block128_vxor(&block, input, &tweak); aes_encrypt_block(&block, k1, &block); block128_vxor(output, &block, &tweak); } }
void tmd_aes_gen_ctr(aes_block *output, const aes_key *key, aes_block *iv, uint32_t nb_blocks) { aes_block block; /* preload IV in block */ block128_copy(&block, iv); for ( ; nb_blocks-- > 0; output++, block128_inc_be(&block)) { aes_encrypt_block(output, key, &block); } }
void aes_gen_ctr(uint8_t *output, aes_key *key, aes_block *iv, uint32_t nb_blocks) { aes_block block; if (!nb_blocks) return; /* preload IV in block */ block128_copy(&block, iv); for ( ; nb_blocks-- > 0; output += 16, block128_inc_be(&block)) { aes_encrypt_block((block128 *) output, key, &block); } }
void aes_encrypt_ctr(uint8_t *output, aes_key *key, aes_block *iv, uint8_t *input, uint32_t len) { aes_block block, o; uint32_t nb_blocks = len / 16; int i; /* preload IV in block */ block128_copy(&block, iv); for ( ; nb_blocks-- > 0; block128_inc_be(&block), output += 16, input += 16) { aes_encrypt_block(&o, key, &block); block128_vxor((block128 *) output, &o, (block128 *) input); } if ((len % 16) != 0) { aes_encrypt_block(&o, key, &block); for (i = 0; i < (len % 16); i++) { *output = ((uint8_t *) &o)[i] ^ *input; output += 1; input += 1; } } }
void aes_encrypt_ecb(uint8_t *output, aes_key *key, uint8_t *input, uint32_t nb_blocks) { if (!nb_blocks) return; #if defined(ARCH_X86) && defined(WITH_AESNI) if (have_aesni() && key->nbr == 10) return aes_ni_encrypt_ecb(output, key, input, nb_blocks); #endif for ( ; nb_blocks-- > 0; input += 16, output += 16) { aes_encrypt_block((block128 *) output, key, (block128 *) input); } }
int main() { uint8_t ret_text[16] = {0}; uint8_t text[16] = { 0x01,0x23,0x45,0x67, 0x89,0xab,0xcd,0xef, 0xfe,0xdc,0xba,0x98, 0x76,0x54,0x32,0x10 }; uint8_t cipher_text[16] = {0}; uint8_t key[32] = { 0x0f,0x15,0x71,0xc9, 0x47,0xd9,0xe8,0x59, 0x0c,0xb7,0xad,0xd6, 0xaf,0x7f,0x67,0x98, 0x0f,0x15,0x71,0xc9, 0x47,0xd9,0xe8,0x59, 0x0c,0xb7,0xad,0xd6, 0xaf,0x7f,0x67,0x98 }; uint32_t key_bit[3] = {128, 192, 256}; aes_context ctx; int i; for (i = 0; i < sizeof(key_bit)/sizeof(key_bit[0]); ++i) { if (aes_set_key(&ctx, key, key_bit[i]) != SUCCESS) { perror("aes_set_key error."); return -1; } if(aes_encrypt_block(&ctx, cipher_text, text) != SUCCESS) { perror("aes_encrypt_block error."); return -1; } if(aes_decrypt_block(&ctx, ret_text, cipher_text) != SUCCESS) { perror("aes_decrypt_block error."); return -1; } printf("key_bit %d: \n", key_bit[i]); print("\tinput : ", text); print("\tencrypt: ", cipher_text); print("\tdecrypt: ", ret_text); } return 0; }
void aes_gcm_finish(uint8_t *tag, aes_gcm *gcm) { aes_block lblock; int i; /* tag = (tag-1 xor (lenbits(a) | lenbits(c)) ) . H */ lblock.q[0] = cpu_to_be64(gcm->length_aad << 3); lblock.q[1] = cpu_to_be64(gcm->length_input << 3); gcm_ghash_add(gcm, &lblock); aes_encrypt_block(&lblock, &gcm->key, &gcm->iv); block128_xor(&gcm->tag, &lblock); for (i = 0; i < 16; i++) { tag[i] = gcm->tag.b[i]; } }
void tmd_aes_gcm_finish(uint8_t *tag, const aes_gcm *gcm, const aes_key *key, aes_ctx *ctx) { aes_block lblock; int i; /* tag = (tag-1 xor (lenbits(a) | lenbits(c)) ) . H */ lblock.q[0] = cpu_to_be64(ctx->length_aad << 3); lblock.q[1] = cpu_to_be64(ctx->length_input << 3); gcm_ghash_add(gcm, ctx, &lblock); aes_encrypt_block(&lblock, key, &ctx->iv); block128_xor(&ctx->tag, &lblock); for (i = 0; i < 16; i++) { tag[i] = ctx->tag.b[i]; } }
void aes_encrypt_cbc(uint8_t *output, aes_key *key, aes_block *iv, uint8_t *input, uint32_t nb_blocks) { aes_block block; if (!nb_blocks) return; #if defined(ARCH_X86) && defined(WITH_AESNI) if (have_aesni() && key->nbr == 10) return aes_ni_encrypt_cbc(output, key, (uint8_t *) iv, input, nb_blocks); #endif /* preload IV in block */ block128_copy(&block, iv); for ( ; nb_blocks-- > 0; input += 16, output += 16) { block128_xor(&block, (block128 *) input); aes_encrypt_block(&block, key, &block); block128_copy((block128 *) output, &block); } }
void aes_decrypt_xts(uint8_t *output, aes_key *k1, aes_key *k2, aes_block *dataunit, uint32_t spoint, uint8_t *input, uint32_t nb_blocks) { aes_block block, tweak; if (!nb_blocks) return; /* load IV and encrypt it using k2 as the tweak */ block128_copy(&tweak, dataunit); aes_encrypt_block(&tweak, k2, &tweak); /* TO OPTIMISE: this is really inefficient way to do that */ while (spoint-- > 0) gf_mulx(&tweak); for ( ; nb_blocks-- > 0; input += 16, output += 16, gf_mulx(&tweak)) { block128_vxor(&block, (block128 *) input, &tweak); aes_decrypt_block(&block, k1, &block); block128_vxor((block128 *) output, &block, &tweak); } }
void tmd_aes_gcm_init(aes_gcm *gcm, const aes_key *key) { block128_zero(&gcm->h); /* prepare H : encrypt_K(0^128) */ aes_encrypt_block(&gcm->h, key, &gcm->h); }