static void update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg) { unsigned int i; for (i = 0; i < nsg; i++) { #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) struct page *pg = sg[i].page; #else struct page *pg = sg_page(&sg[i]); #endif unsigned int offset = sg[i].offset; unsigned int l = sg[i].length; do { unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); char *p = crypto_kmap(pg, 0) + offset; tfm->__crt_alg->cra_digest.dia_update (crypto_tfm_ctx(tfm), p, bytes_from_page); crypto_kunmap(p, 0); crypto_yield(tfm); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); } }
static int update2(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) { struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); if (!nbytes) return 0; for (;;) { struct page *pg = sg_page(sg); unsigned int offset = sg->offset; unsigned int l = sg->length; if (unlikely(l > nbytes)) l = nbytes; nbytes -= l; do { unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); char *src = crypto_kmap(pg, 0); char *p = src + offset; if (unlikely(offset & alignmask)) { unsigned int bytes = alignmask + 1 - (offset & alignmask); bytes = min(bytes, bytes_from_page); tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes); p += bytes; bytes_from_page -= bytes; l -= bytes; } tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes_from_page); crypto_kunmap(src, 0); crypto_yield(desc->flags); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); if (!nbytes) break; sg = scatterwalk_sg_next(sg); } return 0; }
void scatterwalk_map(struct scatter_walk *walk, int out) { walk->data = crypto_kmap(walk->page, out) + walk->offset; }
void crypto_xcbc_update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg) { struct xcbc_ops *ops = (struct xcbc_ops*)tfm->crt_cipher.cit_xcbc_block; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); unsigned int i; if (!(tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_CBC)) return; for(i = 0; i < nsg; i++) { struct page *pg = sg[i].page; unsigned int offset = sg[i].offset; unsigned int slen = sg[i].length; while (slen > 0) { unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); char *p = crypto_kmap(pg, 0) + offset; /* checking the data can fill the block */ if ((ops->len + len) <= bsize) { memcpy(ops->prev + ops->len, p, len); ops->len += len; slen -= len; /* checking the rest of the page */ if (len + offset >= PAGE_SIZE) { offset = 0; pg++; } else offset += len; crypto_kunmap(p, 0); crypto_yield(tfm); continue; } /* filling ops->prev with new data and encrypting it */ memcpy(ops->prev + ops->len, p, bsize - ops->len); len -= bsize - ops->len; p += bsize - ops->len; tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, ops->prev); tfm->__crt_alg->cra_cipher.cia_encrypt( crypto_tfm_ctx(tfm), tfm->crt_cipher.cit_iv, tfm->crt_cipher.cit_iv); /* clearing the length */ ops->len = 0; /* encrypting the rest of data */ while (len > bsize) { tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, p); tfm->__crt_alg->cra_cipher.cia_encrypt( crypto_tfm_ctx(tfm), tfm->crt_cipher.cit_iv, tfm->crt_cipher.cit_iv); p += bsize; len -= bsize; } /* keeping the surplus of blocksize */ if (len) { memcpy(ops->prev, p, len); ops->len = len; } crypto_kunmap(p, 0); crypto_yield(tfm); slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); offset = 0; pg++; } } }