static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
{
	ASF_FP_LINUX_CRYPTO_FENTRY;	
	ASF_FP_LINUX_CRYPTO_FEXIT;
	return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen,
			crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
        struct crypto_aead *aead, u8 *iv)
{
	struct aead_givcrypt_request *req;
	ASF_FP_LINUX_CRYPTO_FENTRY;	

	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
							crypto_tfm_ctx_alignment());
	aead_givcrypt_set_tfm(req, aead);
	ASF_FP_LINUX_CRYPTO_FEXIT;
	return req;
}
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
				   struct blkcipher_walk *walk,
				   struct crypto_aead *tfm,
				   unsigned int blocksize)
{
	walk->flags &= ~BLKCIPHER_WALK_PHYS;
	walk->walk_blocksize = blocksize;
	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
	walk->ivsize = crypto_aead_ivsize(tfm);
	walk->alignmask = crypto_aead_alignmask(tfm);
	return blkcipher_walk_first(desc, walk);
}
Beispiel #4
0
/**
 * cc_copy_mac() - Copy MAC to temporary location
 *
 * @dev: device object
 * @req: aead request object
 * @dir: [IN] copy from/to sgl
 */
static void cc_copy_mac(struct device *dev, struct aead_request *req,
			enum cc_sg_cpy_direct dir)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	u32 skip = req->assoclen + req->cryptlen;

	if (areq_ctx->is_gcm4543)
		skip += crypto_aead_ivsize(tfm);

	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
			   (skip - areq_ctx->req_authsize), skip, dir);
}
static int seqiv_aead_init(struct crypto_tfm *tfm)
{
	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);

	spin_lock_init(&ctx->lock);

	tfm->crt_aead.reqsize = sizeof(struct aead_request);

	return crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
				    crypto_aead_ivsize(geniv)) ?:
	       aead_geniv_init(tfm);
}
Beispiel #6
0
static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
{
	struct aead_request *subreq = aead_givcrypt_reqctx(req);
	struct crypto_aead *geniv;

	if (err == -EINPROGRESS)
		return;

	if (err)
		goto out;

	geniv = aead_givcrypt_reqtfm(req);
	memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));

out:
	kfree(subreq->iv);
}
Beispiel #7
0
static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
{
	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
	struct aead_request *areq = &req->areq;
	struct aead_request *subreq = aead_givcrypt_reqctx(req);
	crypto_completion_t compl;
	void *data;
	u8 *info;
	unsigned int ivsize;
	int err;

	aead_request_set_tfm(subreq, aead_geniv_base(geniv));

	compl = areq->base.complete;
	data = areq->base.data;
	info = areq->iv;

	ivsize = crypto_aead_ivsize(geniv);

	if (unlikely(!IS_ALIGNED((unsigned long)info,
				 crypto_aead_alignmask(geniv) + 1))) {
		info = kmalloc(ivsize, areq->base.flags &
				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
								  GFP_ATOMIC);
		if (!info)
			return -ENOMEM;

		compl = seqiv_aead_complete;
		data = req;
	}

	aead_request_set_callback(subreq, areq->base.flags, compl, data);
	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
			       info);
	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);

	seqiv_geniv(ctx, info, req->seq, ivsize);
	memcpy(req->giv, info, ivsize);

	err = crypto_aead_encrypt(subreq);
	if (unlikely(info != areq->iv))
		seqiv_aead_complete2(req, err);
	return err;
}
Beispiel #8
0
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
			    struct aead_request *req,
			    struct buffer_array *sg_data,
			    bool is_last, bool do_chain)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
	struct device *dev = drvdata_to_dev(drvdata);
	int rc = 0;

	if (!req->iv) {
		areq_ctx->gen_ctx.iv_dma_addr = 0;
		goto chain_iv_exit;
	}

	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
						       hw_iv_size,
						       DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
			hw_iv_size, req->iv);
		rc = -ENOMEM;
		goto chain_iv_exit;
	}

	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
	// TODO: what about CTR?? ask Ron
	if (do_chain && areq_ctx->plaintext_authenticate_only) {
		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
		/* Chain to given list */
		cc_add_buffer_entry(dev, sg_data,
				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
				    iv_size_to_authenc, is_last,
				    &areq_ctx->assoc.mlli_nents);
		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
	}

chain_iv_exit:
	return rc;
}
Beispiel #9
0
static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
{
	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
	int err = 0;

	spin_lock_bh(&ctx->lock);
	if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
		goto unlock;

	crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
				   crypto_aead_ivsize(geniv));

unlock:
	spin_unlock_bh(&ctx->lock);

	if (err)
		return err;

	return seqiv_aead_givencrypt(req);
}
/* Allocate an AEAD request structure with extra space for SG and IV.
 *
 * For alignment considerations the IV is placed at the front, followed
 * by the request and finally the SG list.
 */
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
{
	unsigned int len;

	ASF_FP_LINUX_CRYPTO_FENTRY;	

	len = seqhilen;
	len += crypto_aead_ivsize(aead);

	if (likely(len)) {
		len += crypto_aead_alignmask(aead) &
				~(crypto_tfm_ctx_alignment() - 1);
		len = ALIGN(len, crypto_tfm_ctx_alignment());
	}

        len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
	len = ALIGN(len, __alignof__(struct scatterlist));

	len += sizeof(struct scatterlist) * nfrags;

	ASF_FP_LINUX_CRYPTO_FEXIT;
	return kmalloc(len, GFP_ATOMIC);
}
Beispiel #11
0
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
{
    struct aead_request *subreq = aead_request_ctx(req);
    struct crypto_aead *geniv;
    unsigned int ivsize;

    if (err == -EINPROGRESS)
        return;

    if (err)
        goto out;

    geniv = crypto_aead_reqtfm(req);
    ivsize = crypto_aead_ivsize(geniv);

    echainiv_write_iv(subreq->iv, ivsize);

    if (req->iv != subreq->iv)
        memcpy(req->iv, subreq->iv, ivsize);

out:
    if (req->iv != subreq->iv)
        kzfree(subreq->iv);
}
Beispiel #12
0
static int echainiv_encrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    u8 *info;
    unsigned int ivsize = crypto_aead_ivsize(geniv);
    int err;

    if (req->cryptlen < ivsize)
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = echainiv_encrypt_complete;
    data = req;
    info = req->iv;

    if (req->src != req->dst) {
        struct blkcipher_desc desc = {
            .tfm = ctx->null,
        };

        err = crypto_blkcipher_encrypt(
                  &desc, req->dst, req->src,
                  req->assoclen + req->cryptlen);
        if (err)
            return err;
    }

    if (unlikely(!IS_ALIGNED((unsigned long)info,
                             crypto_aead_alignmask(geniv) + 1))) {
        info = kmalloc(ivsize, req->base.flags &
                       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
                       GFP_ATOMIC);
        if (!info)
            return -ENOMEM;

        memcpy(info, req->iv, ivsize);
    }

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->dst, req->dst,
                           req->cryptlen - ivsize, info);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    crypto_xor(info, ctx->salt, ivsize);
    scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
    echainiv_read_iv(info, ivsize);

    err = crypto_aead_encrypt(subreq);
    echainiv_encrypt_complete2(req, err);
    return err;
}

static int echainiv_decrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    unsigned int ivsize = crypto_aead_ivsize(geniv);

    if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = req->base.complete;
    data = req->base.data;

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->src, req->dst,
                           req->cryptlen - ivsize, req->iv);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
    if (req->src != req->dst)
        scatterwalk_map_and_copy(req->iv, req->dst,
                                 req->assoclen, ivsize, 1);

    return crypto_aead_decrypt(subreq);
}

static int echainiv_init(struct crypto_tfm *tfm)
{
    struct crypto_aead *geniv = __crypto_aead_cast(tfm);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    int err;

    spin_lock_init(&ctx->geniv.lock);

    crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));

    err = crypto_get_default_rng();
    if (err)
        goto out;

    err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
                               crypto_aead_ivsize(geniv));
    crypto_put_default_rng();
    if (err)
        goto out;

    ctx->null = crypto_get_default_null_skcipher();
    err = PTR_ERR(ctx->null);
    if (IS_ERR(ctx->null))
        goto out;

    err = aead_geniv_init(tfm);
    if (err)
        goto drop_null;

    ctx->geniv.child = geniv->child;
    geniv->child = geniv;

out:
    return err;

drop_null:
    crypto_put_default_null_skcipher();
    goto out;
}

static void echainiv_exit(struct crypto_tfm *tfm)
{
    struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);

    crypto_free_aead(ctx->geniv.child);
    crypto_put_default_null_skcipher();
}

static int echainiv_aead_create(struct crypto_template *tmpl,
                                struct rtattr **tb)
{
    struct aead_instance *inst;
    struct crypto_aead_spawn *spawn;
    struct aead_alg *alg;
    int err;

    inst = aead_geniv_alloc(tmpl, tb, 0, 0);

    if (IS_ERR(inst))
        return PTR_ERR(inst);

    spawn = aead_instance_ctx(inst);
    alg = crypto_spawn_aead_alg(spawn);

    if (alg->base.cra_aead.encrypt)
        goto done;

    err = -EINVAL;
    if (inst->alg.ivsize & (sizeof(u32) - 1) ||
            inst->alg.ivsize > MAX_IV_SIZE)
        goto free_inst;

    inst->alg.encrypt = echainiv_encrypt;
    inst->alg.decrypt = echainiv_decrypt;

    inst->alg.base.cra_init = echainiv_init;
    inst->alg.base.cra_exit = echainiv_exit;

    inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
    inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
    inst->alg.base.cra_ctxsize += inst->alg.ivsize;

done:
    err = aead_register_instance(tmpl, inst);
    if (err)
        goto free_inst;

out:
    return err;

free_inst:
    aead_geniv_free(inst);
    goto out;
}

static void echainiv_free(struct crypto_instance *inst)
{
    aead_geniv_free(aead_instance(inst));
}
Beispiel #13
0
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
			      struct aead_request *req,
			      struct buffer_array *sg_data,
			      bool is_last_table, bool do_chain)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct device *dev = drvdata_to_dev(drvdata);
	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
	unsigned int authsize = areq_ctx->req_authsize;
	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
	int rc = 0;
	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
	u32 offset = 0;
	/* non-inplace mode */
	unsigned int size_for_map = req->assoclen + req->cryptlen;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	u32 sg_index = 0;
	bool chained = false;
	bool is_gcm4543 = areq_ctx->is_gcm4543;
	u32 size_to_skip = req->assoclen;

	if (is_gcm4543)
		size_to_skip += crypto_aead_ivsize(tfm);

	offset = size_to_skip;

	if (!sg_data)
		return -EINVAL;

	areq_ctx->src_sgl = req->src;
	areq_ctx->dst_sgl = req->dst;

	if (is_gcm4543)
		size_for_map += crypto_aead_ivsize(tfm);

	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
			authsize : 0;
	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
					    &src_last_bytes, &chained);
	sg_index = areq_ctx->src_sgl->length;
	//check where the data starts
	while (sg_index <= size_to_skip) {
		offset -= areq_ctx->src_sgl->length;
		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
		//if have reached the end of the sgl, then this is unexpected
		if (!areq_ctx->src_sgl) {
			dev_err(dev, "reached end of sg list. unexpected\n");
			return -EINVAL;
		}
		sg_index += areq_ctx->src_sgl->length;
		src_mapped_nents--;
	}
	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
		dev_err(dev, "Too many fragments. current %d max %d\n",
			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
		return -ENOMEM;
	}

	areq_ctx->src.nents = src_mapped_nents;

	areq_ctx->src_offset = offset;

	if (req->src != req->dst) {
		size_for_map = req->assoclen + req->cryptlen;
		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
				authsize : 0;
		if (is_gcm4543)
			size_for_map += crypto_aead_ivsize(tfm);

		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
			       &areq_ctx->dst.nents,
			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
			       &dst_mapped_nents);
		if (rc)
			goto chain_data_exit;
	}

	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
					    &dst_last_bytes, &chained);
	sg_index = areq_ctx->dst_sgl->length;
	offset = size_to_skip;

	//check where the data starts
	while (sg_index <= size_to_skip) {
		offset -= areq_ctx->dst_sgl->length;
		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
		//if have reached the end of the sgl, then this is unexpected
		if (!areq_ctx->dst_sgl) {
			dev_err(dev, "reached end of sg list. unexpected\n");
			return -EINVAL;
		}
		sg_index += areq_ctx->dst_sgl->length;
		dst_mapped_nents--;
	}
	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
		dev_err(dev, "Too many fragments. current %d max %d\n",
			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
		return -ENOMEM;
	}
	areq_ctx->dst.nents = dst_mapped_nents;
	areq_ctx->dst_offset = offset;
	if (src_mapped_nents > 1 ||
	    dst_mapped_nents  > 1 ||
	    do_chain) {
		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
					       &src_last_bytes,
					       &dst_last_bytes, is_last_table);
	} else {
		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
		cc_prepare_aead_data_dlli(req, &src_last_bytes,
					  &dst_last_bytes);
	}

chain_data_exit:
	return rc;
}
Beispiel #14
0
int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
	struct device *dev = drvdata_to_dev(drvdata);
	struct buffer_array sg_data;
	unsigned int authsize = areq_ctx->req_authsize;
	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
	int rc = 0;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	bool is_gcm4543 = areq_ctx->is_gcm4543;
	dma_addr_t dma_addr;
	u32 mapped_nents = 0;
	u32 dummy = 0; /*used for the assoc data fragments */
	u32 size_to_map = 0;
	gfp_t flags = cc_gfp_flags(&req->base);

	mlli_params->curr_pool = NULL;
	sg_data.num_of_buffers = 0;

	/* copy mac to a temporary location to deal with possible
	 * data memory overriding that caused by cache coherence problem.
	 */
	if (drvdata->coherent &&
	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
	    req->src == req->dst)
		cc_copy_mac(dev, req, CC_SG_TO_BUF);

	/* cacluate the size for cipher remove ICV in decrypt*/
	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
				req->cryptlen :
				(req->cryptlen - authsize);

	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
				  DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, dma_addr)) {
		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
			MAX_MAC_SIZE, areq_ctx->mac_buf);
		rc = -ENOMEM;
		goto aead_map_failure;
	}
	areq_ctx->mac_buf_dma_addr = dma_addr;

	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;

		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
					  DMA_TO_DEVICE);

		if (dma_mapping_error(dev, dma_addr)) {
			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, addr);
			areq_ctx->ccm_iv0_dma_addr = 0;
			rc = -ENOMEM;
			goto aead_map_failure;
		}
		areq_ctx->ccm_iv0_dma_addr = dma_addr;

		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
					  &sg_data, req->assoclen);
		if (rc)
			goto aead_map_failure;
	}

	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
					  DMA_BIDIRECTIONAL);
		if (dma_mapping_error(dev, dma_addr)) {
			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, areq_ctx->hkey);
			rc = -ENOMEM;
			goto aead_map_failure;
		}
		areq_ctx->hkey_dma_addr = dma_addr;

		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, dma_addr)) {
			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
			rc = -ENOMEM;
			goto aead_map_failure;
		}
		areq_ctx->gcm_block_len_dma_addr = dma_addr;

		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
					  AES_BLOCK_SIZE, DMA_TO_DEVICE);

		if (dma_mapping_error(dev, dma_addr)) {
			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
			areq_ctx->gcm_iv_inc1_dma_addr = 0;
			rc = -ENOMEM;
			goto aead_map_failure;
		}
		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;

		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
					  AES_BLOCK_SIZE, DMA_TO_DEVICE);

		if (dma_mapping_error(dev, dma_addr)) {
			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
			areq_ctx->gcm_iv_inc2_dma_addr = 0;
			rc = -ENOMEM;
			goto aead_map_failure;
		}
		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
	}

	size_to_map = req->cryptlen + req->assoclen;
	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
		size_to_map += authsize;

	if (is_gcm4543)
		size_to_map += crypto_aead_ivsize(tfm);
	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
		       &areq_ctx->src.nents,
		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
			LLI_MAX_NUM_OF_DATA_ENTRIES),
		       &dummy, &mapped_nents);
	if (rc)
		goto aead_map_failure;

	if (areq_ctx->is_single_pass) {
		/*
		 * Create MLLI table for:
		 *   (1) Assoc. data
		 *   (2) Src/Dst SGLs
		 *   Note: IV is contg. buffer (not an SGL)
		 */
		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
		if (rc)
			goto aead_map_failure;
		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
		if (rc)
			goto aead_map_failure;
		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
		if (rc)
			goto aead_map_failure;
	} else { /* DOUBLE-PASS flow */
		/*
		 * Prepare MLLI table(s) in this order:
		 *
		 * If ENCRYPT/DECRYPT (inplace):
		 *   (1) MLLI table for assoc
		 *   (2) IV entry (chained right after end of assoc)
		 *   (3) MLLI for src/dst (inplace operation)
		 *
		 * If ENCRYPT (non-inplace)
		 *   (1) MLLI table for assoc
		 *   (2) IV entry (chained right after end of assoc)
		 *   (3) MLLI for dst
		 *   (4) MLLI for src
		 *
		 * If DECRYPT (non-inplace)
		 *   (1) MLLI table for assoc
		 *   (2) IV entry (chained right after end of assoc)
		 *   (3) MLLI for src
		 *   (4) MLLI for dst
		 */
		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
		if (rc)
			goto aead_map_failure;
		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
		if (rc)
			goto aead_map_failure;
		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
		if (rc)
			goto aead_map_failure;
	}

	/* Mlli support -start building the MLLI according to the above
	 * results
	 */
	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
		if (rc)
			goto aead_map_failure;

		cc_update_aead_mlli_nents(drvdata, req);
		dev_dbg(dev, "assoc params mn %d\n",
			areq_ctx->assoc.mlli_nents);
		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
	}
	return 0;

aead_map_failure:
	cc_unmap_aead_request(dev, req);
	return rc;
}
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
				uint8_t *keyp, size_t keylen, int stream, int aead)
{
	int ret;

	if (aead == 0) {
		struct ablkcipher_alg *alg;

		out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
		if (unlikely(IS_ERR(out->async.s))) {
			ddebug(1, "Failed to load cipher %s", alg_name);
				return -EINVAL;
		}

		alg = crypto_ablkcipher_alg(out->async.s);
		if (alg != NULL) {
			/* Was correct key length supplied? */
			if (alg->max_keysize > 0 &&
					unlikely((keylen < alg->min_keysize) ||
					(keylen > alg->max_keysize))) {
				ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.",
						keylen, alg_name, alg->min_keysize, alg->max_keysize);
				ret = -EINVAL;
				goto error;
			}
		}

		out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
		out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
		out->alignmask = crypto_ablkcipher_alignmask(out->async.s);

		ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
	} else {
		out->async.as = crypto_alloc_aead(alg_name, 0, 0);
		if (unlikely(IS_ERR(out->async.as))) {
			ddebug(1, "Failed to load cipher %s", alg_name);
			return -EINVAL;
		}

		out->blocksize = crypto_aead_blocksize(out->async.as);
		out->ivsize = crypto_aead_ivsize(out->async.as);
		out->alignmask = crypto_aead_alignmask(out->async.as);

		ret = crypto_aead_setkey(out->async.as, keyp, keylen);
	}

	if (unlikely(ret)) {
		ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8);
		ret = -EINVAL;
		goto error;
	}

	out->stream = stream;
	out->aead = aead;

	out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL);
	if (unlikely(!out->async.result)) {
		ret = -ENOMEM;
		goto error;
	}

	init_completion(&out->async.result->completion);

	if (aead == 0) {
		out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
		if (unlikely(!out->async.request)) {
			derr(1, "error allocating async crypto request");
			ret = -ENOMEM;
			goto error;
		}

		ablkcipher_request_set_callback(out->async.request,
					CRYPTO_TFM_REQ_MAY_BACKLOG,
					cryptodev_complete, out->async.result);
	} else {
		out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
		if (unlikely(!out->async.arequest)) {
			derr(1, "error allocating async crypto request");
			ret = -ENOMEM;
			goto error;
		}

		aead_request_set_callback(out->async.arequest,
					CRYPTO_TFM_REQ_MAY_BACKLOG,
					cryptodev_complete, out->async.result);
	}

	out->init = 1;
	return 0;
error:
	if (aead == 0) {
		if (out->async.request)
			ablkcipher_request_free(out->async.request);
		if (out->async.s)
			crypto_free_ablkcipher(out->async.s);
	} else {
		if (out->async.arequest)
			aead_request_free(out->async.arequest);
		if (out->async.as)
			crypto_free_aead(out->async.as);
	}
	kfree(out->async.result);

	return ret;
}
Beispiel #16
0
static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
			       struct aead_request *req,
			       struct buffer_array *sg_data,
			       bool is_last, bool do_chain)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	int rc = 0;
	u32 mapped_nents = 0;
	struct scatterlist *current_sg = req->src;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	unsigned int sg_index = 0;
	u32 size_of_assoc = req->assoclen;
	struct device *dev = drvdata_to_dev(drvdata);

	if (areq_ctx->is_gcm4543)
		size_of_assoc += crypto_aead_ivsize(tfm);

	if (!sg_data) {
		rc = -EINVAL;
		goto chain_assoc_exit;
	}

	if (req->assoclen == 0) {
		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
		areq_ctx->assoc.nents = 0;
		areq_ctx->assoc.mlli_nents = 0;
		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
			cc_dma_buf_type(areq_ctx->assoc_buff_type),
			areq_ctx->assoc.nents);
		goto chain_assoc_exit;
	}

	//iterate over the sgl to see how many entries are for associated data
	//it is assumed that if we reach here , the sgl is already mapped
	sg_index = current_sg->length;
	//the first entry in the scatter list contains all the associated data
	if (sg_index > size_of_assoc) {
		mapped_nents++;
	} else {
		while (sg_index <= size_of_assoc) {
			current_sg = sg_next(current_sg);
			/* if have reached the end of the sgl, then this is
			 * unexpected
			 */
			if (!current_sg) {
				dev_err(dev, "reached end of sg list. unexpected\n");
				return -EINVAL;
			}
			sg_index += current_sg->length;
			mapped_nents++;
		}
	}
	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
		dev_err(dev, "Too many fragments. current %d max %d\n",
			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
		return -ENOMEM;
	}
	areq_ctx->assoc.nents = mapped_nents;

	/* in CCM case we have additional entry for
	 * ccm header configurations
	 */
	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
				(areq_ctx->assoc.nents + 1),
				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
			rc = -ENOMEM;
			goto chain_assoc_exit;
		}
	}

	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
	else
		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;

	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
			cc_dma_buf_type(areq_ctx->assoc_buff_type),
			areq_ctx->assoc.nents);
		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
				req->assoclen, 0, is_last,
				&areq_ctx->assoc.mlli_nents);
		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
	}

chain_assoc_exit:
	return rc;
}
Beispiel #17
0
void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
	u32 dummy;
	bool chained;
	u32 size_to_unmap = 0;

	if (areq_ctx->mac_buf_dma_addr) {
		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
	}

	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
		if (areq_ctx->hkey_dma_addr) {
			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
		}

		if (areq_ctx->gcm_block_len_dma_addr) {
			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
		}

		if (areq_ctx->gcm_iv_inc1_dma_addr) {
			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
		}

		if (areq_ctx->gcm_iv_inc2_dma_addr) {
			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
		}
	}

	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
		if (areq_ctx->ccm_iv0_dma_addr) {
			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
		}

		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
	}
	if (areq_ctx->gen_ctx.iv_dma_addr) {
		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
				 hw_iv_size, DMA_BIDIRECTIONAL);
	}

	/* Release pool */
	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
	    (areq_ctx->mlli_params.mlli_virt_addr)) {
		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
			&areq_ctx->mlli_params.mlli_dma_addr,
			areq_ctx->mlli_params.mlli_virt_addr);
		dma_pool_free(areq_ctx->mlli_params.curr_pool,
			      areq_ctx->mlli_params.mlli_virt_addr,
			      areq_ctx->mlli_params.mlli_dma_addr);
	}

	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
		req->assoclen, req->cryptlen);
	size_to_unmap = req->assoclen + req->cryptlen;
	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
		size_to_unmap += areq_ctx->req_authsize;
	if (areq_ctx->is_gcm4543)
		size_to_unmap += crypto_aead_ivsize(tfm);

	dma_unmap_sg(dev, req->src,
		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
				      &dummy, &chained),
		     DMA_BIDIRECTIONAL);
	if (req->src != req->dst) {
		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
			sg_virt(req->dst));
		dma_unmap_sg(dev, req->dst,
			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
					      &dummy, &chained),
			     DMA_BIDIRECTIONAL);
	}
	if (drvdata->coherent &&
	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
	    req->src == req->dst) {
		/* copy back mac from temporary location to deal with possible
		 * data memory overriding that caused by cache coherence
		 * problem.
		 */
		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
	}
}