void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, struct sk_buff *skb, const u64 pn, size_t mic_len) { u8 aad[2 * AES_BLOCK_SIZE]; u8 b_0[AES_BLOCK_SIZE]; u8 *data, *mic; size_t data_len, hdr_len; struct ieee80211_hdr *hdr = (void *)skb->data; struct scatterlist sg[3]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; hdr_len = ieee80211_hdrlen(hdr->frame_control); data_len = skb->len - hdr_len - IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(hdr, hdr_len, pn, b_0, aad); memset(aead_req, 0, sizeof(aead_req_data)); data = skb->data + hdr_len + IEEE80211_CCMP_HDR_LEN; mic = skb_put(skb, mic_len); sg_init_table(sg, 3); sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); aead_request_set_ad(aead_req, sg[0].length); crypto_aead_encrypt(aead_req); }
int my_decrypt(char *input, int inputlen, char *output, int outputlen, char *key, int keylen) { struct crypto_blkcipher *tfm = NULL; struct blkcipher_desc desc; struct scatterlist src[1], dst[1]; unsigned int retval = 0; tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(tfm)) { printk(KERN_INFO "crypto_alloc_blkcipher failed\n"); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; retval = crypto_blkcipher_setkey(tfm, key, keylen); if (retval) { printk(KERN_INFO "crypto_blkcipher_setkey failed\n"); crypto_free_blkcipher(tfm); return -EINVAL; } sg_init_table(src, 1); sg_set_buf(&src[0], input, inputlen); sg_init_table(dst, 1); sg_set_buf(dst, output, outputlen); retval = crypto_blkcipher_decrypt(&desc, dst, src, inputlen); crypto_free_blkcipher(tfm); return retval; }
static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag) { struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct scatterlist *sg; u8 *iv = req->iv; int err; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); /* Note: rfc 3610 and NIST 800-38C require counter of * zero to encrypt auth tag. */ memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 3); sg_set_buf(pctx->src, tag, 16); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, tag, 16); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) sg_chain(pctx->dst, 2, sg); } return 0; }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; /* Use scatter gather to MIC header and data in one go */ sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { struct scatterlist sg[3]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *)aead_req_data; if (data_len == 0) return -EINVAL; memset(aead_req, 0, sizeof(aead_req_data)); sg_init_table(sg, 3); sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); sg_set_buf(&sg[1], data, data_len); sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); aead_request_set_tfm(aead_req, tfm); aead_request_set_crypt(aead_req, sg, sg, data_len + IEEE80211_GCMP_MIC_LEN, j_0); aead_request_set_ad(aead_req, sg[0].length); return crypto_aead_decrypt(aead_req); }
static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) { struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; struct scatterlist *sg; if (areq->assoclen != 16 && areq->assoclen != 20) return -EINVAL; scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (areq->src != areq->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_rctx->src = rctx->src; aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst; return 0; }
static void AES_cbc(const __u8 *iv, int ivLength, const __u8 *key, int keyLength, const __u8 *input, int inputLength, __u8 *output, int encrypt) { struct scatterlist src[1]; struct scatterlist dst[1]; struct blkcipher_desc desc; struct crypto_blkcipher *cipher = crypto_alloc_blkcipher("cbc(aes)", 0, 0); crypto_blkcipher_setkey(cipher, key, keyLength); sg_init_table(dst, 1); sg_init_table(src, 1); sg_set_buf(&dst[0], output, inputLength); sg_set_buf(&src[0], input, inputLength); desc.tfm = cipher; desc.flags = 0; crypto_blkcipher_set_iv(cipher, iv, ivLength); if (encrypt) crypto_blkcipher_encrypt(&desc, dst, src, inputLength); else crypto_blkcipher_decrypt(&desc, dst, src, inputLength); crypto_free_blkcipher(cipher); }
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic, size_t mic_len) { struct scatterlist assoc, pt, ct[2]; char aead_req_data[sizeof(struct aead_request) + crypto_aead_reqsize(tfm)] __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; if (data_len == 0) return -EINVAL; memset(aead_req, 0, sizeof(aead_req_data)); sg_init_one(&pt, data, data_len); sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); sg_init_table(ct, 2); sg_set_buf(&ct[0], data, data_len); sg_set_buf(&ct[1], mic, mic_len); aead_request_set_tfm(aead_req, tfm); aead_request_set_assoc(aead_req, &assoc, assoc.length); aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0); return crypto_aead_decrypt(aead_req); }
static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct blkcipher_desc desc; unsigned int encrypted_datalen; char pad[16]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); ret = init_blkcipher_desc(&desc, derived_key, derived_keylen, epayload->iv, ivsize); if (ret < 0) goto out; dump_encrypted_data(epayload, encrypted_datalen); memset(pad, 0, sizeof pad); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, sizeof pad); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen); crypto_free_blkcipher(desc.tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: return ret; }
static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct scatterlist *dst; __be32 counter = cpu_to_be32(1); memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(req->iv + 12, &counter, 4); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(ablk_req, ctx->ctr); ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), req->iv); }
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, u8 *out) { unsigned int i; struct scatterlist tmp; char *opad = tfm->crt_digest.dit_hmac_block; if (*keylen > crypto_tfm_alg_blocksize(tfm)) { hash_key(tfm, key, *keylen); *keylen = crypto_tfm_alg_digestsize(tfm); } crypto_digest_final(tfm, out); memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); memcpy(opad, key, *keylen); for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) opad[i] ^= 0x5c; sg_set_buf(&tmp, opad, crypto_tfm_alg_blocksize(tfm)); crypto_digest_init(tfm); crypto_digest_update(tfm, &tmp, 1); sg_set_buf(&tmp, out, crypto_tfm_alg_digestsize(tfm)); crypto_digest_update(tfm, &tmp, 1); crypto_digest_final(tfm, out); }
int hash_lbr(uint8_t hash[DIGEST_LENGTH],struct lbr_t *lbr) { struct scatterlist sg; int i, j; /* No error checking here. If anything fails, we better go straight home anyway. */ crypto_hash_init(&armor_desc); armor_desc.flags = 0; /* Loop over all LBR entries. */ for (i = 0; i < LBR_ENTRIES; i++) { sg_set_buf(&sg, &lbr->from[(lbr->tos - i) % LBR_ENTRIES], sizeof(uint64_t)); crypto_hash_update(&armor_desc, &sg, sizeof(uint64_t)); sg_set_buf(&sg, &lbr->to [(lbr->tos - i) % LBR_ENTRIES], sizeof(uint64_t)); crypto_hash_update(&armor_desc, &sg, sizeof(uint64_t)); printdj(false, "lbr[%2d], <from: 0x%012llx, to: 0x%012llx>\n", i, lbr->from[(lbr->tos+LBR_ENTRIES-i) % LBR_ENTRIES], lbr-> to[(lbr->tos+LBR_ENTRIES-i) % LBR_ENTRIES]); } ARMOR_STAT_INC(digests); crypto_hash_final(&armor_desc, hash); printdj(false, "hash: "); for (j = 0; j < DIGEST_LENGTH; j++) printdj(false,"%02x", hash[j]); printdj(false,"\n"); return 0; }
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { struct scatterlist assoc, pt, ct[2]; struct { struct aead_request req; u8 priv[crypto_aead_reqsize(tfm)]; } aead_req; if (data_len == 0) return -EINVAL; memset(&aead_req, 0, sizeof(aead_req)); sg_init_one(&pt, data, data_len); sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad)); sg_init_table(ct, 2); sg_set_buf(&ct[0], data, data_len); sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN); aead_request_set_tfm(&aead_req.req, tfm); aead_request_set_assoc(&aead_req.req, &assoc, assoc.length); aead_request_set_crypt(&aead_req.req, ct, &pt, data_len + IEEE80211_CCMP_MIC_LEN, b_0); return crypto_aead_decrypt(&aead_req.req); }
static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); if (err) return err; /* 2. Pad Cn with zeros at the end to create C of length BB */ memset(iv, 0, sizeof(iv)); memcpy(iv, s + bsize, lastn); /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ crypto_xor(tmp, iv, bsize); /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ memcpy(d + bsize, tmp, lastn); /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); /* 6. Decrypt En to create Pn-1 */ memset(iv, 0, sizeof(iv)); sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], d, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); /* XOR with previous block */ crypto_xor(d, desc->info, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, s, bsize); return err; }
static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; }
static int encrypt_Cipher(char *key, char *src, char *dest, unsigned int len, int *written) { struct crypto_blkcipher *blkcipher = NULL; char *cipher = "cbc(aes)"; struct scatterlist sg_in[2]; struct scatterlist sg_out[1]; struct blkcipher_desc desc; unsigned int encrypted_datalen; unsigned int padlen; char pad[16]; char *iv=NULL; int ret = -EFAULT; encrypted_datalen = nearestRoundup(len); padlen = encrypted_datalen - len; blkcipher = crypto_alloc_blkcipher(cipher, 0, 0); if (IS_ERR(blkcipher)) { printk("could not allocate blkcipher handle for %s\n", cipher); return -PTR_ERR(blkcipher); } if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) { printk("key could not be set\n"); ret = -EAGAIN; goto out; } desc.flags = 0; desc.tfm = blkcipher; iv = (char *)kmalloc(crypto_blkcipher_ivsize(blkcipher) , GFP_KERNEL); if(iv==NULL) { printk("Initialisation vector not initialised\n"); ret = -ENOMEM; goto out; } memset(iv, 0, crypto_blkcipher_ivsize(blkcipher)); memset(pad, 0, sizeof pad); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, len); sg_set_buf(&sg_in[1], pad, padlen); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dest, encrypted_datalen); crypto_blkcipher_set_iv(blkcipher, iv, crypto_blkcipher_ivsize(blkcipher)); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen); (*written) = encrypted_datalen; printk("Cipher Encryption operation completed\n"); kfree(iv); crypto_free_blkcipher(blkcipher); return ret; out: if (blkcipher) crypto_free_blkcipher(blkcipher); if (iv) kfree(iv); return ret; }
/***************************************************************************** * 函 数 名 : sd_sg_init_table * * 功能描述 : SD多块数据传输sg list初始化 * * 输入参数 : const void *buf 待操作的buffer地址 unsigned int buflen 待操作的buffer大小,小于32K,为512B的整数倍; 大于32K,为32KB的整数倍,最大128K * 输出参数 : NA * * 返 回 值 : 0 : 成功;其它: 失败 * * 其它说明 : NA * *****************************************************************************/ int sd_sg_init_table(const void *buf,unsigned int buflen) { int cnt = 0; int sgcnt = 0; u8 *dataBuf = (u8*)buf; struct scatterlist *sg; struct scatterlist *sgNode; g_sd_sg = NULL; g_sgcnt = 0; if ((0 == buflen)||(NULL == buf)|| (0 != (buflen % 512))) { (void)printk("sd_sg_init_table para is err!\n"); return -1; } if (buflen < SD_TRACE_CLUSTER_SIZE) { sgcnt = 1; } else if ( 0 == buflen % SD_TRACE_CLUSTER_SIZE ) { sgcnt = ( buflen/SD_TRACE_CLUSTER_SIZE ); } else { (void)printk("sd_sg_init_table buf isn't n*32k!\n"); return -1; } /*每一个block一个 scatterlist*/ sg = (struct scatterlist *)kzalloc(sizeof(struct scatterlist)*sgcnt, GFP_KERNEL); if (NULL == sg) { (void)printk("sg kmalloc fail!\n"); return -1; } sg_init_table(sg, sgcnt); if (1 == sgcnt) { sg_set_buf(sg, (const void *)dataBuf, buflen); } else { for_each_sg(sg, sgNode, sgcnt, cnt) { sg_set_buf(sgNode, (const void *)dataBuf, SD_TRACE_CLUSTER_SIZE); dataBuf += SD_TRACE_CLUSTER_SIZE; } }
static int crypto_ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int cryptlen = req->cryptlen; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); err = crypto_ccm_auth(req, req->src, cryptlen); if (err) return err; /* Note: rfc 3610 and NIST 800-38C require counter of * zero to encrypt auth tag. */ memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, odata, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, odata, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_encrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_encrypt(abreq); if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(odata, req->dst, cryptlen, crypto_aead_authsize(aead), 1); return err; }
static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) { int bsize = crypto_blkcipher_blocksize(desc->tfm); u8 tmp[bsize], tmp2[bsize]; struct blkcipher_desc lcldesc; struct scatterlist sgsrc[1], sgdst[1]; int lastn = nbytes - bsize; u8 iv[bsize]; u8 s[bsize * 2], d[bsize * 2]; int err; if (lastn < 0) return -EINVAL; sg_init_table(sgsrc, 1); sg_init_table(sgdst, 1); memset(s, 0, sizeof(s)); scatterwalk_map_and_copy(s, src, offset, nbytes, 0); memcpy(iv, desc->info, bsize); lcldesc.tfm = ctx->child; lcldesc.info = iv; lcldesc.flags = desc->flags; sg_set_buf(&sgsrc[0], s, bsize); sg_set_buf(&sgdst[0], tmp, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d + bsize, tmp, lastn); lcldesc.info = tmp; sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], tmp2, bsize); err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); memcpy(d, tmp2, bsize); scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); memcpy(desc->info, tmp2, bsize); return err; }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg[2]; unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long)))); unsigned long *tmpsumptr = (unsigned long *)temp_sum; unsigned long *sumptr = (unsigned long *)sum; int cryptres; int retval = 1; volatile int mismatched = 0; volatile int dummy = 0; unsigned int i; tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ memset(entry->pw, 0, GR_PW_LEN); return 1; } sg_init_table(sg, 2); sg_set_buf(&sg[0], salt, GR_SALT_LEN); sg_set_buf(&sg[1], entry->pw, strlen(entry->pw)); desc.tfm = tfm; desc.flags = 0; cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw), temp_sum); memset(entry->pw, 0, GR_PW_LEN); if (cryptres) goto out; for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++) if (sumptr[i] != tmpsumptr[i]) mismatched = 1; else dummy = 1; // waste a cycle if (!mismatched) retval = dummy - 1; out: crypto_free_hash(tfm); return retval; }
static void fill_sg_out(struct scatterlist sg_out[3], void *buf, struct tls_context *tls_ctx, struct sk_buff *nskb, int tcp_payload_offset, int payload_len, int sync_size, void *dummy_buf) { sg_set_buf(&sg_out[0], dummy_buf, sync_size); sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len); /* Add room for authentication tag produced by crypto */ dummy_buf += sync_size; sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE); }
int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) { char *p; struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg; unsigned char temp_sum[GR_SHA_LEN]; volatile int retval = 0; volatile int dummy = 0; unsigned int i; sg_init_table(&sg, 1); tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { /* should never happen, since sha256 should be built in */ return 1; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); p = salt; sg_set_buf(&sg, p, GR_SALT_LEN); crypto_hash_update(&desc, &sg, sg.length); p = entry->pw; sg_set_buf(&sg, p, strlen(p)); crypto_hash_update(&desc, &sg, sg.length); crypto_hash_final(&desc, temp_sum); memset(entry->pw, 0, GR_PW_LEN); for (i = 0; i < GR_SHA_LEN; i++) if (sum[i] != temp_sum[i]) retval = 1; else dummy = 1; // waste a cycle crypto_free_hash(tfm); return retval; }
/************************************************************************** * KERNEL SHA1 FUNCTION **************************************************************************/ unsigned int sbchk_sha1(char * code, unsigned int code_len, char* result) { unsigned int ret = SEC_OK; struct scatterlist sg[1]; struct crypto_hash *tfm = NULL; struct hash_desc desc; tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { ret = SBCHK_BASE_HASH_INIT_FAIL; goto _exit; } /* sg_init_one(&sg[0], plaintext, length); */ sg_set_buf(&sg[0], code, code_len); desc.tfm = tfm; desc.flags = 0; memset(result, 0, 20); /* SHA1 returns 20 bytes */ if (crypto_hash_digest(&desc, sg, code_len, result)) { ret = SBCHK_BASE_HASH_DATA_FAIL; goto _exit; } crypto_free_hash(tfm); _exit: return ret; }
/* Make a sg_table based on sg[] of crypto request. */ static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg, int len, char *vbase, dma_addr_t pbase) { int i; int npages = 0; int offset = 0; struct scatterlist *src_sg = sg; struct scatterlist *dst_sg = NULL; npages = ss_sg_cnt(sg, len); WARN_ON(npages == 0); if (sg_alloc_table(sgt, npages, GFP_KERNEL)) { SS_ERR("sg_alloc_table(%d) failed!\n", npages); WARN_ON(1); } dst_sg = sgt->sgl; for (i=0; i<npages; i++) { sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg)); offset += sg_dma_len(src_sg); src_sg = sg_next(src_sg); dst_sg = sg_next(dst_sg); } return 0; }
long check_module_hash(char *mod, unsigned int len) { /* caller holds module_mutex, so no concurrency */ static struct hash_desc desc; static struct scatterlist sg[ MAX_MODULE_SIZE >> PAGE_SHIFT ]; int i, m, n, pages = (len + PAGE_SIZE) >> PAGE_SHIFT; unsigned char sha1_result[20]; /* allocate on first invocation */ if (!desc.tfm) desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return PTR_ERR(desc.tfm); sg_init_table( sg, pages ); for (i = 0; i < pages; i++) sg_set_buf( &sg[i], &mod[PAGE_SIZE * i], PAGE_SIZE ); crypto_hash_digest(&desc, sg, len, sha1_result); /* use the first nibble to decide where to start searching, and take single steps from there */ for (n = 0, i = ((sha1_result[0] >> 4) * modcount) >> 4; i >= 0 && i < modcount; i += (n = m)) { if (!(m = hashcmp(sha1_result, modhashes[i]))) return 0; /* bail if we are about to change direction */ if (n && (m < 0)^(n < 0)) break; } return -EPERM; }
static int mmc_copy_sglist(struct scatterlist *in_sg, int entries, struct scatterlist *out_sg, u8 *buf) { /* initialize out_sg with number of entries present in the in_sg * iterate over in_sg to get the length of each entry and copy * the same amount of buffer into out_sg */ int i = 0; if (out_sg && (entries > 0)) sg_init_table(out_sg, entries); else { pr_err("Either in_sg is empty or out_sg is NULL\n"); goto exit; } while (in_sg && entries > 0) { if (&out_sg[i]) { sg_set_buf(&out_sg[i], buf, in_sg->length); buf += in_sg->length; i++; in_sg = scatterwalk_sg_next(in_sg); entries--; } else { pr_err("in_sg is bigger than out_sg\n"); i = 0; goto exit; } } exit: return i; }
int aes_decrypt(char *buf, unsigned int keylen, void *read_buf, size_t src_len) { struct scatterlist sg; struct blkcipher_desc desc; int ret=0; struct crypto_blkcipher *tfm = crypto_alloc_blkcipher("cbc(aes)", 0, 0); if (IS_ERR(tfm)) {return PTR_ERR(tfm);} desc.tfm = tfm; desc.flags=0; ret=crypto_blkcipher_setkey((void *)tfm, buf, keylen); if(ret) { goto free_tfm; } sg_set_buf(&sg, read_buf, src_len); ret = crypto_blkcipher_decrypt(&desc, &sg, &sg, src_len); if (ret) { goto free_tfm; } free_tfm: crypto_free_blkcipher(tfm); return ret; }
/* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, int body_offset, struct xdr_netobj *cksum) { struct hash_desc desc; /* XXX add to ctx? */ struct scatterlist sg[1]; int err; desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_init(&desc); if (err) goto out; sg_set_buf(sg, header, hdrlen); err = crypto_hash_update(&desc, sg, hdrlen); if (err) goto out; err = xdr_process_buf(body, body_offset, body->len - body_offset, checksummer, &desc); if (err) goto out; err = crypto_hash_final(&desc, cksum->data); out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; }
u32 krb5_decrypt( struct crypto_blkcipher *tfm, void * iv, void * in, void * out, int length) { u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[16] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; if (crypto_blkcipher_ivsize(tfm) > 16) { dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_set_buf(sg, out, length); ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); out: dprintk("RPC: gss_k5decrypt returns %d\n",ret); return ret; }