/* * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per block. */ static int crypt(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, cryptfn_t crfn, procfn_t prfn, int enc, void *info) { struct scatter_walk walk_in, walk_out; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); u8 tmp_src[bsize]; u8 tmp_dst[bsize]; if (!nbytes) return 0; if (nbytes % bsize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return -EINVAL; } scatterwalk_start(&walk_in, src); scatterwalk_start(&walk_out, dst); for(;;) { u8 *src_p, *dst_p; int in_place; scatterwalk_map(&walk_in); scatterwalk_map(&walk_out); src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); in_place = scatterwalk_samebuf(&walk_in, &walk_out, src_p, dst_p); nbytes -= bsize; scatterwalk_copychunks(src_p, &walk_in, bsize, 0); prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); scatterwalk_done(&walk_in, nbytes); scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); scatterwalk_done(&walk_out, nbytes); if (!nbytes) return 0; crypto_yield(tfm); } }
static inline int blkcipher_next_slow(struct blkcipher_desc *desc, struct blkcipher_walk *walk, unsigned int bsize, unsigned int alignmask) { unsigned int n; unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); if (walk->buffer) goto ok; walk->buffer = walk->page; if (walk->buffer) goto ok; n = aligned_bsize * 3 - (alignmask + 1) + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); walk->buffer = kmalloc(n, GFP_ATOMIC); if (!walk->buffer) return blkcipher_walk_done(desc, walk, -ENOMEM); ok: walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + aligned_bsize, bsize); scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); walk->nbytes = bsize; walk->flags |= BLKCIPHER_WALK_SLOW; return 0; }
static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = blkcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, 1); return bsize; }
static unsigned int crypt_slow(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, unsigned int bsize) { unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); u8 buffer[bsize * 2 + alignmask]; u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); u8 *dst = src + bsize; unsigned int n; n = scatterwalk_copychunks(src, in, bsize, 0); scatterwalk_advance(in, n); desc->prfn(desc, dst, src, bsize); n = scatterwalk_copychunks(dst, out, bsize, 1); scatterwalk_advance(out, n); return bsize; }
static void sg_copy_buf(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; if (!nbytes) return; scatterwalk_start(&walk, sg); scatterwalk_advance(&walk, start); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); }
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; struct scatterlist tmp[2]; if (!nbytes) return; sg = scatterwalk_ffwd(tmp, sg, start); scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); }
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; struct scatterlist tmp[2]; if (!nbytes) return; sg = scatterwalk_ffwd(tmp, sg, start); if (sg_page(sg) == virt_to_page(buf) && sg->offset == offset_in_page(buf)) return; scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); }
static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) { struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; int rc = -EINVAL; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; if (req->assoclen > nx_ctx->ap->databytelen) goto out; if (req->assoclen <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->assoc); scatterwalk_copychunks(out, &walk, req->assoclen, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); rc = 0; goto out; } nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, req->assoclen); nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); out: return rc; }
static int tls_enc_record(struct aead_request *aead_req, struct crypto_aead *aead, char *aad, char *iv, __be64 rcd_sn, struct scatter_walk *in, struct scatter_walk *out, int *in_len) { unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE]; struct scatterlist sg_in[3]; struct scatterlist sg_out[3]; u16 len; int rc; len = min_t(int, *in_len, ARRAY_SIZE(buf)); scatterwalk_copychunks(buf, in, len, 0); scatterwalk_copychunks(buf, out, len, 1); *in_len -= len; if (!*in_len) return 0; scatterwalk_pagedone(in, 0, 1); scatterwalk_pagedone(out, 1, 1); len = buf[4] | (buf[3] << 8); len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, (char *)&rcd_sn, sizeof(rcd_sn), buf[0]); memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); sg_init_table(sg_in, ARRAY_SIZE(sg_in)); sg_init_table(sg_out, ARRAY_SIZE(sg_out)); sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE); sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE); chain_to_walk(sg_in + 1, in); chain_to_walk(sg_out + 1, out); *in_len -= len; if (*in_len < 0) { *in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE; /* the input buffer doesn't contain the entire record. * trim len accordingly. The resulting authentication tag * will contain garbage, but we don't care, so we won't * include any of it in the output skb * Note that we assume the output buffer length * is larger then input buffer length + tag size */ if (*in_len < 0) len += *in_len; *in_len = 0; } if (*in_len) { scatterwalk_copychunks(NULL, in, len, 2); scatterwalk_pagedone(in, 0, 1); scatterwalk_copychunks(NULL, out, len, 2); scatterwalk_pagedone(out, 1, 1); } len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE; aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv); rc = crypto_aead_encrypt(aead_req); return rc; }
static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out, unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; } NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(csbcpb_aead->cpb.aes_gca.in_pat, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); return rc; }