Esempio n. 1
0
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
	struct sahara_dev *dev = dev_ptr;
	int err = 0;

	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
		dev_err(dev->device,
			"request size is not exact amount of AES blocks\n");
		return -EINVAL;
	}

	rctx->mode = mode;

	mutex_lock(&dev->queue_mutex);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	mutex_unlock(&dev->queue_mutex);

	wake_up_process(dev->kthread);

	return err;
}
Esempio n. 2
0
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
		crypto_ablkcipher_reqtfm(req));
	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
	struct sahara_dev *dev = dev_ptr;
	int err = 0;
	int busy;

	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
		dev_err(dev->device,
			"request size is not exact amount of AES blocks\n");
		return -EINVAL;
	}

	ctx->dev = dev;

	rctx->mode = mode;
	spin_lock_bh(&dev->lock);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
	spin_unlock_bh(&dev->lock);

	if (!busy)
		tasklet_schedule(&dev->queue_task);

	return err;
}
static int mv_handle_req(struct ablkcipher_request *req)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&cpg->lock, flags);
	ret = ablkcipher_enqueue_request(&cpg->queue, req);
	spin_unlock_irqrestore(&cpg->lock, flags);
	wake_up_process(cpg->queue_th);
	return ret;
}
Esempio n. 4
0
File: cryptd.c Progetto: 274914765/C
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
                    crypto_completion_t complete)
{
    struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
    struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
    struct cryptd_state *state =
        cryptd_get_state(crypto_ablkcipher_tfm(tfm));
    int err;

    rctx->complete = req->base.complete;
    req->base.complete = complete;

    spin_lock_bh(&state->lock);
    err = ablkcipher_enqueue_request(&state->queue, req);
    spin_unlock_bh(&state->lock);

    wake_up_process(state->task);
    return err;
}
Esempio n. 5
0
static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
			      struct ablkcipher_request *req)
{
	unsigned long flags;
	int err;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->busy) {
		err = -EAGAIN;
		spin_unlock_irqrestore(&dev->lock, flags);
		goto exit;
	}
	dev->busy = true;

	err = ablkcipher_enqueue_request(&dev->queue, req);
	spin_unlock_irqrestore(&dev->lock, flags);

	tasklet_schedule(&dev->tasklet);

 exit:
	return err;
}
Esempio n. 6
0
static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
{
	struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
	struct dcp_dev *dev = global_dev;
	unsigned long flags;
	int err = 0;

	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
		return -EINVAL;

	rctx->mode = mode;

	spin_lock_irqsave(&dev->queue_lock, flags);
	err = ablkcipher_enqueue_request(&dev->queue, req);
	spin_unlock_irqrestore(&dev->queue_lock, flags);

	flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);

	if (!(flags & DCP_FLAG_BUSY))
		tasklet_schedule(&dev->queue_task);

	return err;
}
Esempio n. 7
0
int ss_aes_crypt(struct ablkcipher_request *req, int dir, int method, int mode)
{
	int err = 0;
	unsigned long flags = 0;
	ss_aes_req_ctx_t *req_ctx = ablkcipher_request_ctx(req);

	SS_DBG("nbytes: %d, dec: %d, method: %d, mode: %d\n", req->nbytes, dir, method, mode);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	req_ctx->dir  = dir;
	req_ctx->type = method;
	req_ctx->mode = mode;
	req->base.flags |= SS_FLAG_AES;

	spin_lock_irqsave(&ss_dev->lock, flags);
	err = ablkcipher_enqueue_request(&ss_dev->queue, req);
	spin_unlock_irqrestore(&ss_dev->lock, flags);

	queue_work(ss_dev->workqueue, &ss_dev->work);
	return err;
}
Esempio n. 8
0
static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq, 
                            u8 *iv, int dir, int mode)
{
    int err = -EINVAL; 
    unsigned long queue_flag;
    struct scatterlist *src = areq->src;
    struct scatterlist *dst = areq->dst;
    struct aes_container *aes_con = NULL;
    u32 remain, inc, nbytes = areq->nbytes;
    u32 chunk_bytes = src->length;
    
 
    aes_con = (struct aes_container *)kmalloc(sizeof(struct aes_container),
    	                                       GFP_KERNEL);

    if (!(aes_con)) {
        printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
		__func__, __LINE__);
	return -ENOMEM;
    }

    /* AES encrypt/decrypt mode */
    if (mode == 5) {
        nbytes = AES_BLOCK_SIZE;
        chunk_bytes = AES_BLOCK_SIZE;
        mode = 0;
    }

    aes_con->bytes_processed = nbytes;
    aes_con->arequest = *(areq);
    remain = nbytes;

    //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
    //        __LINE__, __func__, nbytes, chunk_bytes);

    if (remain > DEU_MAX_PACKET_SIZE) 
       inc = DEU_MAX_PACKET_SIZE;
    else if (remain > chunk_bytes)
       inc = chunk_bytes; 
    else
       inc = remain;
         
    remain -= inc;
    lq_sg_init(aes_con, src, dst);  

    if (remain <= 0)
        aes_con->complete = 1;
    else
        aes_con->complete = 0;

    aes_con->nbytes = inc;
    aes_con->iv = iv;
    aes_con->mode = mode;
    aes_con->encdec = dir;
 
    spin_lock_irqsave(&aes_queue->lock, queue_flag);

    if (aes_queue->hw_status == AES_STARTED || aes_queue->hw_status == AES_BUSY ||
             aes_queue->list.qlen > 0) {

        aes_con->flag = PROCESS_NEW_PACKET;
        err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);

         /* max queue length reached */
        if (err == -EBUSY) {
            spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
            printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
             return err;
         }

        spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
        return -EINPROGRESS;
    }
    else if (aes_queue->hw_status == AES_IDLE) 
        aes_queue->hw_status = AES_STARTED;

    aes_con->flag = PROCESS_SCATTER;
    aes_con->bytes_processed -= aes_con->nbytes;
    /* or enqueue the whole structure so as to get back the info 
     * at the moment that it's queued. nbytes might be different */
    err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);

    if (err == -EBUSY) {
        spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
        printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
        return err;
    }

    spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
    return lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, inc, dir, mode);

}
Esempio n. 9
0
static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
                               int state)
{
    u8 *iv;
    int mode, dir, err = -EINVAL;
    unsigned long queue_flag;
    u32 inc, nbytes, remain, chunk_size;
    struct scatterlist *src = NULL;
    struct scatterlist *dst = NULL;
    struct crypto_ablkcipher *cipher;
    struct aes_ctx *ctx;

    spin_lock_irqsave(&aes_queue->lock, queue_flag);

    dir = aes_con->encdec;
    mode = aes_con->mode;
    iv = aes_con->iv;
 
    if (state & PROCESS_SCATTER) {
        src = scatterwalk_sg_next(areq->src);
        dst = scatterwalk_sg_next(areq->dst);
 
        if (!src || !dst) {
            spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
            return 1;
        }
    }
    else if (state & PROCESS_NEW_PACKET) { 
        src = areq->src;
        dst = areq->dst;
    }

    remain = aes_con->bytes_processed;
    chunk_size = src->length;

    if (remain > DEU_MAX_PACKET_SIZE)
       inc = DEU_MAX_PACKET_SIZE;
    else if (remain > chunk_size)
       inc = chunk_size;
    else
       inc = remain;

    remain -= inc;
    aes_con->nbytes = inc;
 
    if (state & PROCESS_SCATTER) {
        aes_con->src_buf += aes_con->nbytes;
        aes_con->dst_buf += aes_con->nbytes;
    }

    lq_sg_init(aes_con, src, dst);

    nbytes = aes_con->nbytes;

    //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
    //          __LINE__, __func__, nbytes, chunk_size);

    cipher = crypto_ablkcipher_reqtfm(areq);
    ctx = crypto_ablkcipher_ctx(cipher);


    if (aes_queue->hw_status == AES_IDLE)
        aes_queue->hw_status = AES_STARTED;

    aes_con->bytes_processed -= aes_con->nbytes;
    err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
    if (err == -EBUSY) {
        spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
        printk("Failed to enqueue request, ln: %d, err: %d\n",
                __LINE__, err);
        return -EINVAL;
    }

    spin_unlock_irqrestore(&aes_queue->lock, queue_flag);

    err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode);
    return err;

}