void measure(void) { int i; int loop; int direction; unsigned long long mlen; unsigned long long adlen; unsigned long long clen; unsigned long long tlen; for (loop = 0;loop < LOOPS;++loop) { for (direction = 0;direction < 3;++direction) { mlen = 0; adlen = 0; for (;;) { if (direction != 1) ++mlen; if (direction != 0) ++adlen; if (mlen > MAXTEST_BYTES) break; if (adlen > MAXTEST_BYTES) break; kernelrandombytes(k,crypto_aead_KEYBYTES); kernelrandombytes(nsec,crypto_aead_NSECBYTES); kernelrandombytes(npub,crypto_aead_NPUBBYTES); kernelrandombytes(m,mlen); kernelrandombytes(ad,adlen); kernelrandombytes(c,mlen + crypto_aead_ABYTES); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_aead_encrypt(c,&clen,m,mlen,ad,adlen,nsec,npub,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(1000000 * adlen + mlen,"encrypt_cycles",cycles,TIMINGS); for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_aead_decrypt(m,&tlen,nsec,c,clen,ad,adlen,npub,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(1000000 * adlen + mlen,"decrypt_cycles",cycles,TIMINGS); if (clen > 0) { ++c[clen/2]; for (i = 0;i <= TIMINGS;++i) { cycles[i] = cpucycles(); crypto_aead_decrypt(m,&tlen,nsec,c,clen,ad,adlen,npub,k); } for (i = 0;i < TIMINGS;++i) cycles[i] = cycles[i + 1] - cycles[i]; printentry(1000000 * adlen + mlen,"forgery_decrypt_cycles",cycles,TIMINGS); } } } } }
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic, size_t mic_len) { struct scatterlist assoc, pt, ct[2]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; if (data_len == 0) return -EINVAL; memset(aead_req, 0, sizeof(aead_req_data)); sg_init_one(&pt, data, data_len); sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); sg_init_table(ct, 2); sg_set_buf(&ct[0], data, data_len); sg_set_buf(&ct[1], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_assoc(aead_req, &assoc, assoc.length); aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0); return crypto_aead_decrypt(aead_req); }
static int seqiv_aead_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = 8; if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); return crypto_aead_decrypt(subreq); }
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { struct scatterlist assoc, pt, ct[2]; struct { struct aead_request req; u8 priv[crypto_aead_reqsize(tfm)]; } aead_req; if (data_len == 0) return -EINVAL; memset(&aead_req, 0, sizeof(aead_req)); sg_init_one(&pt, data, data_len); sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); sg_init_table(ct, 2); sg_set_buf(&ct[0], data, data_len); sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); aead_request_set_tfm(&aead_req.req, tfm); aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); aead_request_set_crypt(&aead_req.req, ct, &pt, data_len + IEEE80211_CCMP_MIC_LEN, b_0); return crypto_aead_decrypt(&aead_req.req); }
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { struct scatterlist sg[3]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *)aead_req_data; if (data_len == 0) return -EINVAL; memset(aead_req, 0, sizeof(aead_req_data)); sg_init_table(sg, 3); sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len + IEEE80211_GCMP_MIC_LEN, j_0); aead_request_set_ad(aead_req, sg[0].length); return crypto_aead_decrypt(aead_req); }
static int crypto_rfc4309_decrypt(struct aead_request *req) { if (req->assoclen != 16 && req->assoclen != 20) return -EINVAL; req = crypto_rfc4309_crypt(req); return crypto_aead_decrypt(req); }
int check(const unsigned char *kat) { unsigned char w[256]; unsigned char h[256]; unsigned char k[32]; unsigned char n[16]; unsigned i; int place = 0; for(i = 0; i < sizeof w; ++i) w[i] = 255 & (i*197 + 123); for(i = 0; i < sizeof h; ++i) h[i] = 255 & (i*193 + 123); for(i = 0; i < sizeof k; ++i) k[i] = 255 & (i*191 + 123); for(i = 0; i < sizeof n; ++i) n[i] = 255 & (i*181 + 123); for(i = 0; i < sizeof w; ++i) { unsigned char m[256]; unsigned char c[256 + 32]; unsigned long long mlen; unsigned long long clen; unsigned long long hlen; memset(m, 0, sizeof m); memcpy(m, w, i); clen = 0; mlen = hlen = i; crypto_aead_encrypt(c, &clen, m, mlen, h, hlen, NULL, n, k); if( 0 != memcmp(kat, c, clen) ) {place = 1; goto fail;} memset(m, 0, sizeof m); mlen = 0; if( 0 != crypto_aead_decrypt(m, &mlen, NULL, c, clen, h, hlen, n, k) ) {place = 2; goto fail;} if( 0 != memcmp(m, w, mlen) ) {place = 3; goto fail;} kat += clen; } printf("ok\n"); return 0; fail: printf("fail at %u:%d\n", i, place); return -1; }
static int rfc4106_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; aead_request_set_tfm(req, irq_fpu_usable() ? cryptd_aead_child(cryptd_tfm) : &cryptd_tfm->base); return crypto_aead_decrypt(req); }
static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_decrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); }
static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); unsigned int authsize = crypto_aead_authsize(aead); unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize); struct blkcipher_desc desc = { .tfm = ctx->null, }; return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); } static int crypto_rfc4543_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); struct aead_request *subreq; int err; if (req->src != req->dst) { err = crypto_rfc4543_copy_src_to_dst(req, true); if (err) return err; } subreq = crypto_rfc4543_crypt(req, true); err = crypto_aead_encrypt(subreq); if (err) return err; scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, crypto_aead_authsize(aead), 1); return 0; } static int crypto_rfc4543_decrypt(struct aead_request *req) { int err; if (req->src != req->dst) { err = crypto_rfc4543_copy_src_to_dst(req, false); if (err) return err; } req = crypto_rfc4543_crypt(req, false); return crypto_aead_decrypt(req); }
static int tests_from_supercop() { static const size_t MAXTEST_BYTES = 4096; static const size_t INTERMEDIATE_TAG_BYTES = INTERMEDIATE_TAGLEN * MAXTEST_BYTES / PARTLEN; unsigned char* c = (unsigned char*)malloc((size_t)MAXTEST_BYTES + INTERMEDIATE_TAG_BYTES + CRYPTO_ABYTES); unsigned char* a = (unsigned char*)malloc((size_t)MAXTEST_BYTES); unsigned char* m = (unsigned char*)malloc((size_t)MAXTEST_BYTES); unsigned char k[CRYPTO_KEYBYTES]; unsigned char npub[CRYPTO_NPUBBYTES]; unsigned char* nsec = 0; unsigned long long alen; unsigned long long clen; unsigned long long mlen; int result = 0; fill(k, CRYPTO_KEYBYTES); fill(npub, CRYPTO_NPUBBYTES); for (size_t i = 0; i <= MAXTEST_BYTES; ++i) { mlen = i; alen = i; clen = i + CRYPTO_ABYTES; fill(a, alen); fill(m, mlen); crypto_aead_encrypt( c, &clen, m, mlen, a, alen, nsec, npub, k ); const int current_result = crypto_aead_decrypt( m, &mlen, nsec, c, clen, a, alen, npub, k ); result |= current_result; if (current_result) { printf("crypto_aead_decrypt returned %d at %zu\n", current_result, i); } } free(a); free(c); free(m); return result; }
static int cryptd_aegis256_aesni_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(aead); struct cryptd_aead *cryptd_tfm = *ctx; aead = &cryptd_tfm->base; if (irq_fpu_usable() && (!in_atomic() || !cryptd_aead_queued(cryptd_tfm))) aead = cryptd_aead_child(cryptd_tfm); aead_request_set_tfm(req, aead); return crypto_aead_decrypt(req); }
static int rfc4106_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; tfm = &cryptd_tfm->base; if (irq_fpu_usable() && (!in_atomic() || !cryptd_aead_queued(cryptd_tfm))) tfm = cryptd_aead_child(cryptd_tfm); aead_request_set_tfm(req, tfm); return crypto_aead_decrypt(req); }
static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } }
static int simd_aead_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_request *subreq; struct crypto_aead *child; subreq = aead_request_ctx(req); *subreq = *req; if (!crypto_simd_usable() || (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) child = &ctx->cryptd_tfm->base; else child = cryptd_aead_child(ctx->cryptd_tfm); aead_request_set_tfm(subreq, child); return crypto_aead_decrypt(subreq); }
errno_t test_supercop(void) { errno_t err = AUTHENC_OK; unsigned char key[SC_AES128CTR_KEY_LEN] = { 0 }; unsigned char iv[AC_GCM_IV_LEN] = { 0 }; authenc_align unsigned char msg[3 * AC_GCM_BLOCK_LEN] = { 0 }; authenc_align unsigned char cipher[4 * AC_GCM_BLOCK_LEN] = { 0 }; authenc_align unsigned char computed_msg[3 * AC_GCM_BLOCK_LEN] = { 0 }; int r; unsigned long long clen = sizeof(cipher); unsigned long long mlen = sizeof(computed_msg); r = crypto_aead_encrypt(cipher, &clen, msg, sizeof(msg), NULL, 0, NULL, iv, key); assert(r == 0); r = crypto_aead_decrypt(computed_msg, &mlen, NULL, cipher, clen, NULL, 0, iv, key); assert(r == 0); assert(mlen == sizeof(msg)); assert(memcmp(msg, computed_msg, sizeof(msg)) == 0); return err; }
static int crypto4xx_aead_fallback(struct aead_request *req, struct crypto4xx_ctx *ctx, bool do_decrypt) { char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(ctx->sw_cipher.aead)] __aligned(__alignof__(struct aead_request)); struct aead_request *subreq = (void *) aead_req_data; memset(subreq, 0, sizeof(aead_req_data)); aead_request_set_tfm(subreq, ctx->sw_cipher.aead); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return do_decrypt ? crypto_aead_decrypt(subreq) : crypto_aead_encrypt(subreq); }
ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata, const struct scatterlist *src, struct scatterlist *dst, size_t len) { int ret; reinit_completion(&cdata->async.result->completion); if (cdata->aead == 0) { ablkcipher_request_set_crypt(cdata->async.request, (struct scatterlist *)src, dst, len, cdata->async.iv); ret = crypto_ablkcipher_decrypt(cdata->async.request); } else { aead_request_set_crypt(cdata->async.arequest, (struct scatterlist *)src, dst, len, cdata->async.iv); ret = crypto_aead_decrypt(cdata->async.arequest); } return waitfor(cdata->async.result, ret); }
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, struct sk_buff *skb, const u64 pn, size_t mic_len) { u8 aad[2 * AES_BLOCK_SIZE]; u8 b_0[AES_BLOCK_SIZE]; u8 *data, *mic; size_t data_len, hdr_len; struct ieee80211_hdr *hdr = (void *)skb->data; struct scatterlist sg[3]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; hdr_len = ieee80211_hdrlen(hdr->frame_control); data_len = skb->len - hdr_len - mic_len; if (data_len <= 0) return -EINVAL; ccmp_special_blocks(hdr, hdr_len, pn, b_0, aad); memset(aead_req, 0, sizeof(aead_req_data)); mic = skb->data + skb->len - mic_len; data = skb->data + hdr_len; sg_init_table(sg, 3); sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); aead_request_set_ad(aead_req, sg[0].length); return crypto_aead_decrypt(aead_req); }
static int crypto_rfc4309_decrypt(struct aead_request *req) { req = crypto_rfc4309_crypt(req); return crypto_aead_decrypt(req); }
static int echainiv_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); int err; if (req->cryptlen < ivsize) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = echainiv_encrypt_complete; data = req; info = req->iv; if (req->src != req->dst) { struct blkcipher_desc desc = { .tfm = ctx->null, }; err = crypto_blkcipher_encrypt( &desc, req->dst, req->src, req->assoclen + req->cryptlen); if (err) return err; } if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; memcpy(info, req->iv, ivsize); } aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen - ivsize, info); aead_request_set_ad(subreq, req->assoclen + ivsize); crypto_xor(info, ctx->salt, ivsize); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); echainiv_read_iv(info, ivsize); err = crypto_aead_encrypt(subreq); echainiv_encrypt_complete2(req, err); return err; } static int echainiv_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = crypto_aead_ivsize(geniv); if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); if (req->src != req->dst) scatterwalk_map_and_copy(req->iv, req->dst, req->assoclen, ivsize, 1); return crypto_aead_decrypt(subreq); } static int echainiv_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); int err; spin_lock_init(&ctx->geniv.lock); crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); err = crypto_get_default_rng(); if (err) goto out; err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_aead_ivsize(geniv)); crypto_put_default_rng(); if (err) goto out; ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) goto out; err = aead_geniv_init(tfm); if (err) goto drop_null; ctx->geniv.child = geniv->child; geniv->child = geniv; out: return err; drop_null: crypto_put_default_null_skcipher(); goto out; } static void echainiv_exit(struct crypto_tfm *tfm) { struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->geniv.child); crypto_put_default_null_skcipher(); } static int echainiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); if (IS_ERR(inst)) return PTR_ERR(inst); spawn = aead_instance_ctx(inst); alg = crypto_spawn_aead_alg(spawn); if (alg->base.cra_aead.encrypt) goto done; err = -EINVAL; if (inst->alg.ivsize & (sizeof(u32) - 1) || inst->alg.ivsize > MAX_IV_SIZE) goto free_inst; inst->alg.encrypt = echainiv_encrypt; inst->alg.decrypt = echainiv_decrypt; inst->alg.base.cra_init = echainiv_init; inst->alg.base.cra_exit = echainiv_exit; inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; done: err = aead_register_instance(tmpl, inst); if (err) goto free_inst; out: return err; free_inst: aead_geniv_free(inst); goto out; } static void echainiv_free(struct crypto_instance *inst) { aead_geniv_free(aead_instance(inst)); }
static int aead_null_givdecrypt(struct aead_givcrypt_request *req) { return crypto_aead_decrypt(&req->areq); }
int genKAT(unsigned long long plaintext_length, unsigned long long ad_length) { if((plaintext_length > (1<<31)) || (ad_length> (1<<31))) return 1; Init(); //For generating plaintext unsigned char *key = (unsigned char*)malloc(crypto_aead_KEYBYTES); unsigned char *nonce = (unsigned char*)malloc(crypto_aead_NPUBBYTES); unsigned char *ciphertext; unsigned long long ciphertext_length; unsigned long long decrypted_length; unsigned char *plaintext = (unsigned char*)malloc((size_t)plaintext_length); unsigned char *plaintext_decrypted = (unsigned char*)malloc((size_t)plaintext_length); plaintext_length = (size_t)plaintext_length; if(plaintext==NULL || plaintext_decrypted==NULL) return 1; unsigned char *associated_data = (unsigned char*)malloc((size_t)ad_length); if(associated_data==NULL) { free(plaintext); free(plaintext_decrypted); return 1; } //Plaintext initialization unsigned char StateIn[64]; memset(StateIn,0,64); unsigned char StateOut[64]; int counter= (int)plaintext_length; unsigned char *dest_pointer = plaintext; while(counter>0) { FPerm(StateIn,StateOut); unsigned to_copy = (counter<64)?counter:64; memcpy(dest_pointer,StateOut,to_copy); dest_pointer += to_copy; (*((unsigned*)StateIn))++; counter-= to_copy; } //AD initialization counter= (int) ad_length; dest_pointer = associated_data; while(counter>0) { FPerm(StateIn,StateOut); unsigned to_copy = (counter<64)?counter:64; memcpy(dest_pointer,StateOut,to_copy); dest_pointer += to_copy; (*((unsigned*)StateIn))++; counter-= to_copy; } //Key setting FPerm(StateIn,StateOut); memcpy(key,StateOut,crypto_aead_KEYBYTES); (*((unsigned*)StateIn))++; //Nonce setting FPerm(StateIn,StateOut); memcpy(nonce,StateOut,crypto_aead_NPUBBYTES); (*((unsigned*)StateIn))++; //Ciphertext memory allocation ciphertext = (unsigned char*)malloc((size_t)(plaintext_length+crypto_aead_ABYTES)); if(ciphertext==NULL) { free(plaintext); free(plaintext_decrypted); free(associated_data); return 1; } //Writing input FILE *fp=fopen("out.log","w+"); fprintf(fp, "PLAINTEXT (%llu bytes):\n",plaintext_length); for(unsigned i=0; i<plaintext_length; ++i) { fprintf(fp, "0x%.02x ",plaintext[i]); if(i%20==19) fprintf(fp, "\n"); } fprintf(fp, "\nASSOCIATED DATA (%llu bytes):\n", ad_length); for(unsigned i=0; i<ad_length; ++i) { fprintf(fp, "0x%.02x ",associated_data[i]); if(i%20==19) fprintf(fp, "\n"); } fprintf(fp, "\n"); fprintf(fp, "\nKEY (%d bytes):\n", crypto_aead_KEYBYTES); for(unsigned i=0; i<crypto_aead_KEYBYTES; ++i) fprintf(fp, "0x%.02x ",key[i]); fprintf(fp, "\n"); //Encryption and decryption #ifdef EXTRANONCE //ExtraNonce crypto_aead_encrypt_no_nonce(ciphertext,&ciphertext_length,plaintext,plaintext_length,associated_data, ad_length,NULL,nonce,key); int result = crypto_aead_decrypt(plaintext_decrypted,&decrypted_length,NULL,ciphertext,ciphertext_length,associated_data, ad_length,nonce,key); #else //Normal nonce crypto_aead_encrypt(ciphertext,&ciphertext_length,plaintext,plaintext_length,associated_data, ad_length,NULL,nonce,key); int result = crypto_aead_decrypt(plaintext_decrypted,&decrypted_length,NULL,ciphertext,ciphertext_length,associated_data, ad_length,nonce,key); #endif if(decrypted_length != plaintext_length) printf("Plaintext length mismatch\n"); //Writing outputs fprintf(fp, "\nNONCE (%d bytes):\n", crypto_aead_NPUBBYTES); for(unsigned i=0; i<crypto_aead_NPUBBYTES; ++i) fprintf(fp, "0x%.02x ",nonce[i]); fprintf(fp, ".\n"); printf("Decryption result: %d\n",result); fprintf(fp, "\nCIPHERTEXT (%llu bytes):\n", ciphertext_length); for(unsigned i=0; i<ciphertext_length; ++i) { fprintf(fp, "0x%.02x ",ciphertext[i]); if(i%20==19) fprintf(fp, "\n"); if(i == ciphertext_length - crypto_aead_ABYTES-1) fprintf(fp, " || "); } fprintf(fp, ".\n"); fprintf(fp, "\nDECRYPTED PLAINTEXT (%llu bytes):\n", decrypted_length); for(unsigned i=0; i<decrypted_length; ++i) { fprintf(fp, "0x%.02x ",plaintext_decrypted[i]); if(i%20==19) fprintf(fp, "\n"); } fprintf(fp, ".\n"); fclose(fp); free(plaintext); free(ciphertext); free(plaintext_decrypted); free(associated_data); return 0; }
/* * AEAD algorithm self tests */ int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index, num_tv, authsize, buf_length; struct crypto_aead *tfm; struct aead_request *aead_req; struct _fips_completion fips_completion; struct scatterlist fips_sg, fips_assoc_sg; char *k_align_src = NULL; struct _fips_test_vector_aead tv_aead; num_tv = (sizeof(fips_test_vector_aead)) / (sizeof(struct _fips_test_vector_aead)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_aead, &fips_test_vector_aead[tv_index], (sizeof(struct _fips_test_vector_aead))); if (tv_aead.pln_txt_len > tv_aead.enc_txt_len) buf_length = tv_aead.pln_txt_len; else buf_length = tv_aead.enc_txt_len; /* Single buffer allocation for in place operation */ k_align_src = kzalloc(buf_length, GFP_KERNEL); if (k_align_src == NULL) { pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n", PTR_ERR(k_align_src)); return -ENOMEM; } memcpy(&k_align_src[0], tv_aead.pln_txt, tv_aead.pln_txt_len); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (selftest_d->prefix_aead_algo) { if (_fips_get_alg_cra_name(tv_aead.mod_alg, selftest_d->algo_prefix, strlen(tv_aead.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_aead(tv_aead.mod_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_aead.mod_alg); rc = -ENOMEM; goto clr_buf; } aead_req = aead_request_alloc(tfm, GFP_KERNEL); if (!aead_req) { pr_err("qcrypto:aead_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_aead_set_device(aead_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_aead_req; } init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.pln_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.pln_txt_len , tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); /**** Encryption test ****/ rc = crypto_aead_encrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:ENC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.enc_txt, tv_aead.enc_txt_len)) { rc = -1; goto clr_aead_req; } /** Decryption test **/ init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.enc_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.enc_txt_len, tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); rc = crypto_aead_decrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:DEC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.pln_txt, tv_aead.pln_txt_len)) { rc = -1; goto clr_aead_req; } clr_aead_req: aead_request_free(aead_req); clr_tfm: crypto_free_aead(tfm); clr_buf: kzfree(k_align_src); /* In case of any failure, return error */ if (rc) return rc; } return rc; }
int main() { const unsigned char key[16] = {0x7f,0x7e,0x7d,0x7c,0x7b,0x7a,0x79,0x78,0x77,0x76,0x75,0x74,0x73,0x72,0x71,0x70}; unsigned char nonce[16] = {0x09,0xf9,0x11,0x02,0x9d,0x74,0xe3,0x5b,0xd8,0x41,0x56,0xc5,0x63,0x56,0x88,0xc0}; unsigned char in[4096], out[4096],adata[4096]; unsigned long long tag[16]; //asm volatile ("mcr p15, 0, %0, c15, c9, 0\n" : : "r" (1)); int i, j, k, l=100; // srand(time(NULL)); for (i=0; i<16*BLOCKS*l; i++) in[i] = (unsigned char)rand(); uint32_t overhead = rdtsc32(); overhead = rdtsc32() - overhead; uint32_t t0,t1; uint32_t tMin = 0xFFFFFFFF; /* big number to start */ printf("Cycles for calibrate: %d\n", overhead); for (j=0;j<1000;j++) crypto_aead_encrypt(out,tag,in,4096,0,0,0,nonce,key); printf("\nEncryption: \nWithout Adata\n"); for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,64,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(64)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,128,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(128)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,256,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(256))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,512,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(512))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,1024,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(1024))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,2048,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(2048))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,4096,0,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(4096))); tMin = 0xFFFFFFFF; /* big number to start */ printf("Cycles for calibrate: %d\n", overhead); for (j=0;j<1000;j++) crypto_aead_encrypt(out,tag,in,4096,0,0,0,nonce,key); printf("\nEncryption: \nWith Adata\n"); for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,64,adata,64,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(64)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,128,adata,128,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(128)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,256,adata,256,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(256))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,512,adata,512,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(512))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,1024,adata,1024,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(1024))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,2048,adata,2048,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(2048))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_encrypt(out,tag,in,4096,adata,4096,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(4096))); tMin = 0xFFFFFFFF; /* big number to start */ printf("\ndecryption: \nWithout Adata\n"); for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,64,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(64)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,128,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(128)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,256,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(256))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,512,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(512))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,1024,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(1024))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,2048,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(2048))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,4096,0,0,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(4096))); tMin = 0xFFFFFFFF; /* big number to start */ for (j=0;j<1000;j++) crypto_aead_decrypt(out,tag,0,in,4096,0,0,nonce,key); printf("\nDecryption: \nWith Adata\n"); for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,64,adata,64,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(64)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,128,adata,128,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/(((double)(128)))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,256,adata,256,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(256))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,512,adata,512,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(512))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,1024,adata,1024,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(1024))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,2048,adata,2048,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(2048))); tMin = 0xFFFFFFFF; /* big number to start */ for (k=0;k < TIMER_SAMPLE_CNT;k++) { t0 = rdtsc32(); crypto_aead_decrypt(out,tag,0,in,4096,adata,4096,nonce,key); t1 = rdtsc32(); if (tMin > t1-t0 - overhead) tMin = t1-t0 - overhead; } printf("Cycles for YAES: %d\n", tMin); printf("Cycles per byte: %f\n", tMin/((double)(4096))); return 0; }
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* first half of xts-key is for crypt */ err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, keylen / 2); } static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) { aesni_enc(ctx, out, in); } static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } } static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } }
int benchmark(unsigned long long plaintext_length, unsigned long long ad_length) { if ((plaintext_length >(1 << 31)) || (ad_length> (1 << 31))) return 1; Init(); //For generating plaintext unsigned char *key = (unsigned char*)malloc(key_bytes); unsigned char *nonce = (unsigned char*)malloc(nonce_bytes); unsigned char *ciphertext; unsigned long long ciphertext_length; unsigned long long decrypted_length; unsigned char *plaintext = (unsigned char*)malloc((size_t)plaintext_length); unsigned char *plaintext_decrypted = (unsigned char*)malloc((size_t)plaintext_length); plaintext_length = (size_t)plaintext_length; if (plaintext == NULL || plaintext_decrypted == NULL) return 1; unsigned char *associated_data = (unsigned char*)malloc((size_t)ad_length); if (associated_data == NULL) { free(plaintext); free(plaintext_decrypted); return 1; } //Plaintext initialization unsigned char StateIn[64]; memset(StateIn, 0, 64); unsigned char StateOut[64]; int counter = (int)plaintext_length; unsigned char *dest_pointer = plaintext; while (counter>0) { FPerm(StateIn, StateOut); unsigned to_copy = (counter<64) ? counter : 64; memcpy(dest_pointer, StateOut, to_copy); dest_pointer += to_copy; (*((unsigned*)StateIn))++; counter -= to_copy; } //AD initialization counter = (int) ad_length; dest_pointer = associated_data; while (counter>0) { FPerm(StateIn, StateOut); unsigned to_copy = (counter<64) ? counter : 64; memcpy(dest_pointer, StateOut, to_copy); dest_pointer += to_copy; (*((unsigned*)StateIn))++; counter -= to_copy; } //Key setting FPerm(StateIn, StateOut); memcpy(key, StateOut, key_bytes); (*((unsigned*)StateIn))++; //Nonce setting FPerm(StateIn, StateOut); memcpy(nonce, StateOut, nonce_bytes); (*((unsigned*)StateIn))++; //Ciphertext memory allocation ciphertext = (unsigned char*)malloc((size_t)(plaintext_length + tag_bytes)); if (ciphertext == NULL) { free(plaintext); free(plaintext_decrypted); free(associated_data); return 1; } uint64_t start_time, mid_time, end_time; uint32_t start_ptr, mid_ptr, end_ptr; start_time = __rdtscp(&start_ptr); #ifdef EXTRANONCE //ExtraNonce crypto_aead_encrypt_no_nonce(ciphertext, &ciphertext_length, plaintext, plaintext_length, associated_data, ad_length, NULL, nonce, key); #else crypto_aead_encrypt(ciphertext, &ciphertext_length, plaintext, plaintext_length, associated_data, ad_length, NULL, nonce, key); #endif mid_time = __rdtscp(&mid_ptr); float speed = (float)(mid_time - start_time) / (plaintext_length + ad_length); printf("PAEQ-128: %d bytes encrypted, %2.2f cpb\n", (uint32_t)(plaintext_length + ad_length), speed); mid_time = __rdtscp(&mid_ptr); int result = crypto_aead_decrypt(plaintext_decrypted, &decrypted_length, NULL, ciphertext, ciphertext_length, associated_data, ad_length, nonce, key); end_time = __rdtscp(&end_ptr); speed = (float)(end_time - mid_time) / (plaintext_length + ad_length); printf("PAEQ-128: %d bytes decrypted, %2.2f cpb\n", (uint32_t)(plaintext_length + ad_length), speed); if (decrypted_length != plaintext_length) printf("Plaintext length mismatch\n"); if (result!=0) printf("Decryption result: %d\n", result); free(ciphertext); free(plaintext_decrypted); free(associated_data); return 0; }