コード例 #1
0
ファイル: ablkcipher.c プロジェクト: AlexShiLucky/linux
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
{
	struct crypto_tfm *tfm =
			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
	int ret;

	rctx->flags = tmpl->alg_flags;
	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;

	if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
	    ctx->enc_keylen != AES_KEYSIZE_256) {
		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);

		skcipher_request_set_sync_tfm(subreq, ctx->fallback);
		skcipher_request_set_callback(subreq, req->base.flags,
					      NULL, NULL);
		skcipher_request_set_crypt(subreq, req->src, req->dst,
					   req->nbytes, req->info);
		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
				crypto_skcipher_decrypt(subreq);
		skcipher_request_zero(subreq);
		return ret;
	}

	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
コード例 #2
0
ファイル: sahara.c プロジェクト: AdaLovelance/lxcGrsecKernels
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
		crypto_ablkcipher_reqtfm(req));
	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
	struct sahara_dev *dev = dev_ptr;
	int err = 0;
	int busy;

	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
		dev_err(dev->device,
			"request size is not exact amount of AES blocks\n");
		return -EINVAL;
	}

	ctx->dev = dev;

	rctx->mode = mode;
	spin_lock_bh(&dev->lock);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
	spin_unlock_bh(&dev->lock);

	if (!busy)
		tasklet_schedule(&dev->queue_task);

	return err;
}
コード例 #3
0
ファイル: ablkcipher.c プロジェクト: AlexShiLucky/linux
static void qce_ablkcipher_done(void *data)
{
	struct crypto_async_request *async_req = data;
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	u32 status;
	int error;
	bool diff_dst;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	error = qce_dma_terminate_all(&qce->dma);
	if (error)
		dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
			error);

	if (diff_dst)
		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);

	sg_free_table(&rctx->dst_tbl);

	error = qce_check_status(qce, &status);
	if (error < 0)
		dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);

	qce->async_req_done(tmpl->qce, error);
}
コード例 #4
0
ファイル: cryptd.c プロジェクト: 274914765/C
static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
                   struct crypto_blkcipher *child,
                   int err,
                   int (*crypt)(struct blkcipher_desc *desc,
                        struct scatterlist *dst,
                        struct scatterlist *src,
                        unsigned int len))
{
    struct cryptd_blkcipher_request_ctx *rctx;
    struct blkcipher_desc desc;

    rctx = ablkcipher_request_ctx(req);

    if (unlikely(err == -EINPROGRESS)) {
        rctx->complete(&req->base, err);
        return;
    }

    desc.tfm = child;
    desc.info = req->info;
    desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

    err = crypt(&desc, req->dst, req->src, req->nbytes);

    req->base.complete = rctx->complete;

    local_bh_disable();
    req->base.complete(&req->base, err);
    local_bh_enable();
}
コード例 #5
0
ファイル: ctr.c プロジェクト: 03199618/linux
static int crypto_rfc3686_crypt(struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm);
	struct crypto_ablkcipher *child = ctx->child;
	unsigned long align = crypto_ablkcipher_alignmask(tfm);
	struct crypto_rfc3686_req_ctx *rctx =
		(void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1);
	struct ablkcipher_request *subreq = &rctx->subreq;
	u8 *iv = rctx->iv;

	/* set up counter block */
	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
	memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE);

	/* initialize counter portion of counter block */
	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
		cpu_to_be32(1);

	ablkcipher_request_set_tfm(subreq, child);
	ablkcipher_request_set_callback(subreq, req->base.flags,
					req->base.complete, req->base.data);
	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes,
				     iv);

	return crypto_ablkcipher_encrypt(subreq);
}
コード例 #6
0
ファイル: sunxi_ss.c プロジェクト: 925outer/BPI-M2P-bsp
static int ss_aes_cfb128_decrypt(struct ablkcipher_request *req)
{
	ss_aes_req_ctx_t *req_ctx = ablkcipher_request_ctx(req);

	req_ctx->bitwidth = 128;
	return ss_aes_crypt(req, SS_DIR_DECRYPT, SS_METHOD_AES, SS_AES_MODE_CFB);
}
コード例 #7
0
ファイル: sahara.c プロジェクト: vikash-g-samsung-com/linux
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
	struct sahara_dev *dev = dev_ptr;
	int err = 0;

	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
		dev_err(dev->device,
			"request size is not exact amount of AES blocks\n");
		return -EINVAL;
	}

	rctx->mode = mode;

	mutex_lock(&dev->queue_mutex);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	mutex_unlock(&dev->queue_mutex);

	wake_up_process(dev->kthread);

	return err;
}
コード例 #8
0
static void mv_process_current_q(int first_block)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
	struct sec_accel_config op;

	switch (req_ctx->op) {
	case COP_AES_ECB:
		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
		break;
	case COP_AES_CBC:
	default:
		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
		if (first_block)
			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
		break;
	}
	if (req_ctx->decrypt) {
		op.config |= CFG_DIR_DEC;
		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
				AES_KEY_LEN);
	} else {
		op.config |= CFG_DIR_ENC;
		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
				AES_KEY_LEN);
	}

	switch (ctx->key_len) {
	case AES_KEYSIZE_128:
		op.config |= CFG_AES_LEN_128;
		break;
	case AES_KEYSIZE_192:
		op.config |= CFG_AES_LEN_192;
		break;
	case AES_KEYSIZE_256:
		op.config |= CFG_AES_LEN_256;
		break;
	}
	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
		ENC_P_DST(SRAM_DATA_OUT_START);
	op.enc_key_p = SRAM_DATA_KEY_P;

	setup_data_in();
	op.enc_len = cpg->p.crypt_len;
	memcpy(cpg->sram + SRAM_CONFIG, &op,
			sizeof(struct sec_accel_config));

	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
	/* GO */
	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);

	/*
	 * XXX: add timer if the interrupt does not occur for some mystery
	 * reason
	 */
}
コード例 #9
0
ファイル: chcr_algo.c プロジェクト: acton393/linux
/*
 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
 *	@req: crypto request
 */
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
		     int error_status)
{
	struct crypto_tfm *tfm = req->tfm;
	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
	struct uld_ctx *u_ctx = ULD_CTX(ctx);
	struct chcr_req_ctx ctx_req;
	struct cpl_fw6_pld *fw6_pld;
	unsigned int digestsize, updated_digestsize;

	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
	case CRYPTO_ALG_TYPE_BLKCIPHER:
		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
		ctx_req.ctx.ablk_ctx =
			ablkcipher_request_ctx(ctx_req.req.ablk_req);
		if (!error_status) {
			fw6_pld = (struct cpl_fw6_pld *)input;
			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
			       AES_BLOCK_SIZE);
		}
		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
			     ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
		if (ctx_req.ctx.ablk_ctx->skb) {
			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
			ctx_req.ctx.ablk_ctx->skb = NULL;
		}
		break;

	case CRYPTO_ALG_TYPE_AHASH:
		ctx_req.req.ahash_req = (struct ahash_request *)req;
		ctx_req.ctx.ahash_ctx =
			ahash_request_ctx(ctx_req.req.ahash_req);
		digestsize =
			crypto_ahash_digestsize(crypto_ahash_reqtfm(
							ctx_req.req.ahash_req));
		updated_digestsize = digestsize;
		if (digestsize == SHA224_DIGEST_SIZE)
			updated_digestsize = SHA256_DIGEST_SIZE;
		else if (digestsize == SHA384_DIGEST_SIZE)
			updated_digestsize = SHA512_DIGEST_SIZE;
		if (ctx_req.ctx.ahash_ctx->skb)
			ctx_req.ctx.ahash_ctx->skb = NULL;
		if (ctx_req.ctx.ahash_ctx->result == 1) {
			ctx_req.ctx.ahash_ctx->result = 0;
			memcpy(ctx_req.req.ahash_req->result, input +
			       sizeof(struct cpl_fw6_pld),
			       digestsize);
		} else {
			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
			       sizeof(struct cpl_fw6_pld),
			       updated_digestsize);
		}
		kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
		ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
		break;
	}
	return 0;
}
コード例 #10
0
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
{
	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	req_ctx->op = COP_AES_CBC;
	req_ctx->decrypt = 0;

	return mv_handle_req(req);
}
コード例 #11
0
static int cns3xxx_enc_aes_cbc(struct ablkcipher_request *req)
{
	struct cns3xxx_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	req_ctx->op = COP_AES_CBC;
	req_ctx->decrypt = 0;

	return cns3xxx_handle_req(&req->base);
}
コード例 #12
0
static void mv_crypto_algo_completion(void)
{
	struct ablkcipher_request *req = cpg->cur_req;
	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	if (req_ctx->op != COP_AES_CBC)
		return ;

	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
コード例 #13
0
ファイル: sahara.c プロジェクト: AdaLovelance/lxcGrsecKernels
static void sahara_aes_queue_task(unsigned long data)
{
	struct sahara_dev *dev = (struct sahara_dev *)data;
	struct crypto_async_request *async_req, *backlog;
	struct sahara_ctx *ctx;
	struct sahara_aes_reqctx *rctx;
	struct ablkcipher_request *req;
	int ret;

	spin_lock(&dev->lock);
	backlog = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);
	if (!async_req)
		clear_bit(FLAGS_BUSY, &dev->flags);
	spin_unlock(&dev->lock);

	if (!async_req)
		return;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	req = ablkcipher_request_cast(async_req);

	/* Request is ready to be dispatched by the device */
	dev_dbg(dev->device,
		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
		req->nbytes, req->src, req->dst);

	/* assign new request to device */
	dev->req = req;
	dev->total = req->nbytes;
	dev->in_sg = req->src;
	dev->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
	rctx->mode &= FLAGS_MODE_MASK;
	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	if ((dev->flags & FLAGS_CBC) && req->info)
		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);

	/* assign new context to device */
	ctx->dev = dev;
	dev->ctx = ctx;

	ret = sahara_hw_descriptor_create(dev);
	if (ret < 0) {
		spin_lock(&dev->lock);
		clear_bit(FLAGS_BUSY, &dev->flags);
		spin_unlock(&dev->lock);
		dev->req->base.complete(&dev->req->base, ret);
	}
}
コード例 #14
0
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
{
	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	req_ctx->op = COP_AES_CBC;
	req_ctx->decrypt = 1;

	compute_aes_dec_key(ctx);
	return mv_handle_req(req);
}
コード例 #15
0
ファイル: ccp-crypto-aes.c プロジェクト: 168519/linux
static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
				    int ret)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);

	/* Restore the original pointer */
	req->info = rctx->rfc3686_info;

	return ccp_aes_complete(async_req, ret);
}
コード例 #16
0
static int cns3xxx_dec_aes_cbc(struct ablkcipher_request *req)
{
	//struct cns3xxx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct cns3xxx_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	req_ctx->op = COP_AES_CBC;
	req_ctx->decrypt = 1;

	//compute_aes_dec_key(ctx);
	return cns3xxx_handle_req(&req->base);
}
コード例 #17
0
static void mv_crypto_algo_completion(void)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	sg_miter_stop(&cpg->p.src_sg_it);
	sg_miter_stop(&cpg->p.dst_sg_it);

	if (req_ctx->op != COP_AES_CBC)
		return ;

	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
コード例 #18
0
ファイル: ccp-crypto-aes.c プロジェクト: 168519/linux
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);

	if (ret)
		return ret;

	if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
		memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);

	return 0;
}
コード例 #19
0
ファイル: sahara.c プロジェクト: vikash-g-samsung-com/linux
static int sahara_aes_process(struct ablkcipher_request *req)
{
	struct sahara_dev *dev = dev_ptr;
	struct sahara_ctx *ctx;
	struct sahara_aes_reqctx *rctx;
	int ret;
	unsigned long timeout;

	/* Request is ready to be dispatched by the device */
	dev_dbg(dev->device,
		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
		req->nbytes, req->src, req->dst);

	/* assign new request to device */
	dev->total = req->nbytes;
	dev->in_sg = req->src;
	dev->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
	rctx->mode &= FLAGS_MODE_MASK;
	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	if ((dev->flags & FLAGS_CBC) && req->info)
		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);

	/* assign new context to device */
	dev->ctx = ctx;

	reinit_completion(&dev->dma_completion);

	ret = sahara_hw_descriptor_create(dev);
	if (ret)
		return -EINVAL;

	timeout = wait_for_completion_timeout(&dev->dma_completion,
				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
	if (!timeout) {
		dev_err(dev->device, "AES timeout\n");
		return -ETIMEDOUT;
	}

	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
		DMA_TO_DEVICE);
	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
		DMA_FROM_DEVICE);

	return 0;
}
コード例 #20
0
static void cns3xxx_crypto_algo_completion(void)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
	struct cns3xxx_req_ctx *req_ctx = ablkcipher_request_ctx(req);

	//printk("%s %d\n",__func__,__LINE__);

	sg_miter_stop(&cpg->p.src_sg_it);
	sg_miter_stop(&cpg->p.dst_sg_it);

	if (req_ctx->op != COP_AES_CBC)
		return ;

	memcpy(req->info, cpg->in_buf + IN_DATA_IV_P, 16);
}
コード例 #21
0
static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
	struct crypto_ablkcipher   *tfm    = crypto_ablkcipher_reqtfm(req);
	struct s5p_aes_ctx         *ctx    = crypto_ablkcipher_ctx(tfm);
	struct s5p_aes_reqctx      *reqctx = ablkcipher_request_ctx(req);
	struct s5p_aes_dev         *dev    = ctx->dev;

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
		pr_err("request size is not exact amount of AES blocks\n");
		return -EINVAL;
	}

	reqctx->mode = mode;

	return s5p_aes_handle_req(dev, req);
}
コード例 #22
0
static int aes_handle_req(struct aes_hwa_ctx *ctx)
{
	struct tf_crypto_aes_operation_state *state;
	struct crypto_async_request *async_req;
	struct ablkcipher_request *req;
	struct aes_reqctx *rctx;
	unsigned long flags;

	if (ctx->total)
		goto start;

	spin_lock_irqsave(&ctx->lock, flags);
	ctx->backlog = crypto_get_backlog(&ctx->queue);
	async_req = crypto_dequeue_request(&ctx->queue);
	if (!async_req)
		clear_bit(FLAGS_BUSY, &ctx->flags);
	spin_unlock_irqrestore(&ctx->lock, flags);

	if (!async_req)
		return 0;

	req = ablkcipher_request_cast(async_req);

	ctx->req = req;
	ctx->total = req->nbytes;
	ctx->in_offset = 0;
	ctx->in_sg = req->src;
	ctx->out_offset = 0;
	ctx->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	rctx->mode &= FLAGS_MODE_MASK;
	ctx->flags = (ctx->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	/*
	 * Try to get the next pending request so it can be prepared while the
	 * first one is being processed.
	 */
	spin_lock_irqsave(&ctx->lock, flags);

	if (likely(ctx->queue.qlen)) {
		struct list_head *next_async_req;

		next_async_req = ctx->queue.list.next;
		ctx->next_req = list_entry(next_async_req,
			struct crypto_async_request, list);
	} else {
コード例 #23
0
int ablk_encrypt(struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);

	if (!may_use_simd()) {
		struct ablkcipher_request *cryptd_req =
			ablkcipher_request_ctx(req);

		memcpy(cryptd_req, req, sizeof(*req));
		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);

		return crypto_ablkcipher_encrypt(cryptd_req);
	} else {
		return __ablk_encrypt(req);
	}
}
コード例 #24
0
ファイル: ccp-crypto-aes.c プロジェクト: 168519/linux
static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
	struct scatterlist *iv_sg = NULL;
	unsigned int iv_len = 0;
	int ret;

	if (!ctx->u.aes.key_len)
		return -EINVAL;

	if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
	     (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
	     (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
	    (req->nbytes & (AES_BLOCK_SIZE - 1)))
		return -EINVAL;

	if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
		if (!req->info)
			return -EINVAL;

		memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
		iv_sg = &rctx->iv_sg;
		iv_len = AES_BLOCK_SIZE;
		sg_init_one(iv_sg, rctx->iv, iv_len);
	}

	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
	INIT_LIST_HEAD(&rctx->cmd.entry);
	rctx->cmd.engine = CCP_ENGINE_AES;
	rctx->cmd.u.aes.type = ctx->u.aes.type;
	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
	rctx->cmd.u.aes.action =
		(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
	rctx->cmd.u.aes.iv = iv_sg;
	rctx->cmd.u.aes.iv_len = iv_len;
	rctx->cmd.u.aes.src = req->src;
	rctx->cmd.u.aes.src_len = req->nbytes;
	rctx->cmd.u.aes.dst = req->dst;

	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);

	return ret;
}
コード例 #25
0
ファイル: cryptd.c プロジェクト: 274914765/C
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
                    crypto_completion_t complete)
{
    struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
    struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
    struct cryptd_state *state =
        cryptd_get_state(crypto_ablkcipher_tfm(tfm));
    int err;

    rctx->complete = req->base.complete;
    req->base.complete = complete;

    spin_lock_bh(&state->lock);
    err = ablkcipher_enqueue_request(&state->queue, req);
    spin_unlock_bh(&state->lock);

    wake_up_process(state->task);
    return err;
}
コード例 #26
0
static int ablk_decrypt(struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);

	if (!irq_fpu_usable()) {
		struct ablkcipher_request *cryptd_req =
			ablkcipher_request_ctx(req);
		memcpy(cryptd_req, req, sizeof(*req));
		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
		return crypto_ablkcipher_decrypt(cryptd_req);
	} else {
		struct blkcipher_desc desc;
		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
		desc.info = req->info;
		desc.flags = 0;
		return crypto_blkcipher_crt(desc.tfm)->decrypt(
			&desc, req->dst, req->src, req->nbytes);
	}
}
コード例 #27
0
ファイル: virtio_crypto_algs.c プロジェクト: dznm/linux
static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
	struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
	struct virtio_crypto *vcrypto = ctx->vcrypto;
	int ret;
	/* Use the first data virtqueue as default */
	struct data_queue *data_vq = &vcrypto->data_vq[0];

	vc_req->ablkcipher_ctx = ctx;
	vc_req->ablkcipher_req = req;
	ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
	if (ret < 0) {
		pr_err("virtio_crypto: Encryption failed!\n");
		return ret;
	}

	return -EINPROGRESS;
}
コード例 #28
0
ファイル: dcp.c プロジェクト: BozkurTR/kernel
static void dcp_queue_task(unsigned long data)
{
	struct dcp_dev *dev = (struct dcp_dev *) data;
	struct crypto_async_request *async_req, *backlog;
	struct crypto_ablkcipher *tfm;
	struct dcp_op *ctx;
	struct dcp_dev_req_ctx *rctx;
	struct ablkcipher_request *req;
	unsigned long flags;

	spin_lock_irqsave(&dev->queue_lock, flags);

	backlog = crypto_get_backlog(&dev->queue);
	async_req = crypto_dequeue_request(&dev->queue);

	spin_unlock_irqrestore(&dev->queue_lock, flags);

	if (!async_req)
		goto ret_nothing_done;

	if (backlog)
		backlog->complete(backlog, -EINPROGRESS);

	req = ablkcipher_request_cast(async_req);
	tfm = crypto_ablkcipher_reqtfm(req);
	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(tfm);

	if (!req->src || !req->dst)
		goto ret_nothing_done;

	ctx->flags |= rctx->mode;
	ctx->req = req;

	dcp_crypt(dev, ctx);

	return;

ret_nothing_done:
	clear_bit(DCP_FLAG_BUSY, &dev->flags);
}
コード例 #29
0
ファイル: ccp-crypto-aes.c プロジェクト: 168519/linux
static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
	u8 *iv;

	/* Initialize the CTR block */
	iv = rctx->rfc3686_iv;
	memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);

	iv += CTR_RFC3686_NONCE_SIZE;
	memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);

	iv += CTR_RFC3686_IV_SIZE;
	*(__be32 *)iv = cpu_to_be32(1);

	/* Point to the new IV */
	rctx->rfc3686_info = req->info;
	req->info = rctx->rfc3686_iv;

	return ccp_aes_crypt(req, encrypt);
}
コード例 #30
0
ファイル: ablkcipher.c プロジェクト: 020gzh/linux
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
{
	struct crypto_tfm *tfm =
			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
	int ret;

	rctx->flags = tmpl->alg_flags;
	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;

	if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
	    ctx->enc_keylen != AES_KEYSIZE_256) {
		ablkcipher_request_set_tfm(req, ctx->fallback);
		ret = encrypt ? crypto_ablkcipher_encrypt(req) :
				crypto_ablkcipher_decrypt(req);
		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
		return ret;
	}

	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}