static int crypto_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { #ifdef CONFIG_CRYPTO_DEV_REALTEK_DBG printk("%s: total=%d, walk=%d, blk=%d, src=%p, dst=%p\n", __FUNCTION__, walk.total, walk.nbytes, crypto_cipher_blocksize(child), walk.src.virt.addr, walk.dst.virt.addr ); #endif #ifdef CONFIG_CRYPTO_DEV_REALTEK if (ctx->rtl_ctx.mode >= 0) { nbytes = rtl_cipher_crypt(child, 1, &ctx->rtl_ctx, walk.src.virt.addr, nbytes, walk.iv, walk.dst.virt.addr); // cbc mode update memcpy(walk.iv, walk.dst.virt.addr, crypto_cipher_blocksize(child)); err = blkcipher_walk_done(desc, &walk, nbytes); continue; } #endif if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); else nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; }
static int crypto_ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_cipher *tfm, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes; int err; #ifdef CONFIG_CRYPTO_DEV_REALTEK struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); #endif err = blkcipher_walk_virt(desc, walk); while ((nbytes = walk->nbytes)) { u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; #ifdef CONFIG_CRYPTO_DEV_REALTEK if (ctx->rtl_ctx.mode >= 0) { nbytes = rtl_cipher_crypt(tfm, fn == crypto_cipher_alg(tfm)->cia_encrypt, &ctx->rtl_ctx, wsrc, nbytes, walk->iv, wdst); err = blkcipher_walk_done(desc, walk, nbytes); continue; } #endif do { fn(crypto_cipher_tfm(tfm), wdst, wsrc); wsrc += bsize; wdst += bsize; } while ((nbytes -= bsize) >= bsize); err = blkcipher_walk_done(desc, walk, nbytes); } return err; }
static int crypto_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; unsigned int bsize = crypto_cipher_blocksize(child); int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, bsize); while (walk.nbytes >= bsize) { #ifdef CONFIG_CRYPTO_DEV_REALTEK if (ctx->rtl_ctx.mode >= 0) { int i, over_flag = 0; unsigned int one = 0, len = 0; u8 over_iv[32] = {0}; u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; /* hw CTRBLK overflow handle different with linux kernel * hw engine: CTRBLK := NONCE || IV || ONE, NONCE 4 bytes, IV 8bytes, ONE 4bytes * hw engine only the ONE(4bytes) is treated as counter bytes * linux kernel uses the second method, which means the entire byte block is treated as counter bytes */ over_flag = 0; one = *((unsigned int *)(walk.iv + bsize - 4)); for (i = 0; i < (walk.nbytes / bsize); i++) { if (one == 0xffffffff) { //printk("%s %d i=%d one=%u\n", __FUNCTION__, __LINE__, i, one); over_flag = 1; break; } one++; } if (over_flag) { //before ONE overflow len = bsize*(i+1); nbytes = rtl_cipher_crypt(child, 1, &ctx->rtl_ctx, walk.src.virt.addr, len, walk.iv, walk.dst.virt.addr); //printk("%s %d len=%u nbytes=%u \n", __FUNCTION__, __LINE__, len, nbytes); src += (len - nbytes); dst += (len - nbytes); //after ONE overflow,update IV memcpy(over_iv, walk.iv, bsize - 4); crypto_inc(over_iv, bsize-4); memcpy(walk.iv, over_iv, bsize); nbytes = rtl_cipher_crypt(child, 1, &ctx->rtl_ctx, src, walk.nbytes -len, walk.iv, dst); /* increment counter in counterblock */ for (i = 0; i < ((walk.nbytes -len) / bsize); i++) crypto_inc(walk.iv, bsize); if (walk.src.virt.addr == walk.dst.virt.addr) { src += ((walk.nbytes -len) - nbytes); } else { src += ((walk.nbytes -len) - nbytes); dst += ((walk.nbytes -len) - nbytes); } } else { nbytes = rtl_cipher_crypt(child, 1, &ctx->rtl_ctx, walk.src.virt.addr, walk.nbytes, walk.iv, walk.dst.virt.addr); if (walk.src.virt.addr == walk.dst.virt.addr) { walk.src.virt.addr += (walk.nbytes - nbytes); } else { walk.dst.virt.addr += (walk.nbytes - nbytes); walk.dst.virt.addr += (walk.nbytes - nbytes); } /* increment counter in counterblock */ for (i = 0; i < (walk.nbytes / bsize); i++) crypto_inc(walk.iv, bsize); } err = blkcipher_walk_done(desc, &walk, nbytes); continue; } #endif if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_ctr_crypt_inplace(&walk, child); else nbytes = crypto_ctr_crypt_segment(&walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { crypto_ctr_crypt_final(&walk, child); err = blkcipher_walk_done(desc, &walk, 0); } return err; }