/** * nx_build_sg_lists - walk the input scatterlists and build arrays of NX * scatterlists based on them. * * @nx_ctx: NX crypto context for the lists we're building * @desc: the block cipher descriptor for the operation * @dst: destination scatterlist * @src: source scatterlist * @nbytes: length of data described in the scatterlists * @iv: destination for the iv data, if the algorithm requires it * * This is common code shared by all the AES algorithms. It uses the block * cipher walk routines to traverse input and output scatterlists, building * corresponding NX scatterlists */ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, u8 *iv) { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; if (iv) memcpy(iv, desc->info, AES_BLOCK_SIZE); nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); return 0; }
static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) { struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; int rc = -EINVAL; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; if (req->assoclen > nx_ctx->ap->databytelen) goto out; if (req->assoclen <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->assoc); scatterwalk_copychunks(out, &walk, req->assoclen, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); rc = 0; goto out; } nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, req->assoclen); nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); out: return rc; }
static int gmac(struct aead_request *req, struct blkcipher_desc *desc, unsigned int assoclen) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *nx_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; /* Set GMAC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* Copy IV */ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); csbcpb->cpb.aes_gcm.bit_length_data = 0; csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_s0, csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); out: /* Restore GCM mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; return rc; }
static int generate_pat(u8 *iv, struct aead_request *req, struct nx_crypto_ctx *nx_ctx, unsigned int authsize, unsigned int nbytes, u8 *out) { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int iauth_len = 0; u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; int rc; unsigned int max_sg_len; /* zero the ctr value */ memset(iv + 15 - iv[0], 0, iv[0] + 1); /* page 78 of nx_wb.pdf has, * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes * in length. If a full message is used, the AES CCA implementation * restricts the maximum AAD length to 2^32 -1 bytes. * If partial messages are used, the implementation supports * 2^64 -1 bytes maximum AAD length. * * However, in the cryptoapi's aead_request structure, * assoclen is an unsigned int, thus it cannot hold a length * value greater than 2^32 - 1. * Thus the AAD is further constrained by this and is never * greater than 2^32. */ if (!req->assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; } else if (req->assoclen <= 14) { /* if associated data is 14 bytes or less, we do 1 GCM * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, * which is fed in through the source buffers here */ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; iauth_len = req->assoclen; } else if (req->assoclen <= 65280) { /* if associated data is less than (2^16 - 2^8), we construct * B1 differently and feed in the associated data to a CCA * operation */ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 14; } else { b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 10; } /* generate B0 */ rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); if (rc) return rc; /* generate B1: * add control info for associated data * RFC 3610 and NIST Special Publication 800-38C */ if (b1) { memset(b1, 0, 16); if (req->assoclen <= 65280) { *(u16 *)b1 = (u16)req->assoclen; scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, iauth_len, SCATTERWALK_FROM_SG); } else { *(u16 *)b1 = (u16)(0xfffe); *(u32 *)&b1[2] = (u32)req->assoclen; scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, iauth_len, SCATTERWALK_FROM_SG); } } /* now copy any remaining AAD to scatterlist and call nx... */ if (!req->assoclen) { return rc; } else if (req->assoclen <= 14) { unsigned int len = 16; nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; /* inlen should be negative, indicating to phyp that its a * pointer to an sg list */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); } else { unsigned int processed = 0, to_process; processed += iauth_len; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { to_process = min_t(u32, req->assoclen - processed, nx_ctx->ap->databytelen); nx_insg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, req->assoc, processed, &to_process); if ((to_process + processed) < req->assoclen) { NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_INTERMEDIATE; } else { NX_CPB_FDM(nx_ctx->csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; } nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, AES_BLOCK_SIZE); NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < req->assoclen); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } memcpy(out, result, AES_BLOCK_SIZE); return rc; }
static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out, unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; } NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(csbcpb_aead->cpb.aes_gca.in_pat, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); return rc; }