Exemple #1
0
static int __devinit sunxi_ss_probe(struct platform_device *pdev)
{
	int ret = 0;
	sunxi_ss_t *sss = NULL;

	sss = devm_kzalloc(&pdev->dev, sizeof(sunxi_ss_t), GFP_KERNEL);
	if (sss == NULL) {
		SS_ERR("Unable to allocate sunxi_ss_t\n");
		return -ENOMEM;
	}

	snprintf(sss->dev_name, sizeof(sss->dev_name), SUNXI_SS_DEV_NAME);
	platform_set_drvdata(pdev, sss);

	ret = sunxi_ss_res_request(pdev);
	if (ret != 0) {
		goto err0;
	}

    sss->pdev = pdev;

	ret = sunxi_ss_hw_init(sss);
	if (ret != 0) {
		SS_ERR("SS hw init failed!\n");
		goto err1;
	}

	spin_lock_init(&sss->lock);
	INIT_WORK(&sss->work, sunxi_ss_work);
	crypto_init_queue(&sss->queue, 16);

	sss->workqueue = create_singlethread_workqueue(sss->dev_name);
	if (sss->workqueue == NULL) {
		SS_ERR("Unable to create workqueue\n");
		ret = -EPERM;
		goto err2;
	}

	ret = sunxi_ss_alg_register();
	if (ret != 0) {
		SS_ERR("sunxi_ss_alg_register() failed! return %d \n", ret);
		goto err3;
	}

	sunxi_ss_sysfs_create(pdev);

	ss_dev = sss;
	SS_DBG("SS driver probe succeed, base 0x%p, irq %d!\n", sss->base_addr, sss->irq);
	return 0;

err3:
	destroy_workqueue(sss->workqueue);
err2:
	sunxi_ss_hw_exit(sss);
err1:
	sunxi_ss_res_release(sss);
err0:
	platform_set_drvdata(pdev, NULL);
	return ret;
}
static int __init brcm_spum_init(void)
{
	spum_dev = (struct brcm_spum_device *)
		kzalloc(sizeof(struct brcm_spum_device), GFP_KERNEL);

	spin_lock_init(&spum_dev->lock);

	crypto_init_queue(&spum_dev->spum_queue, SPUM_QUEUE_LENGTH);

	spum_dev->flags = 0;

	/* Aquire DMA channels */
	if (dma_request_chan(&spum_dev->rx_dma_chan, "SPUM_OpenA") != 0) {
		pr_err("%s: Rx dma_request_chan failed\n", __func__);
		return -1;
	}
	if (dma_request_chan(&spum_dev->tx_dma_chan, "SPUM_OpenB") != 0) {
		pr_err("%s: Tx dma_request_chan failed\n", __func__);
		goto err;
	}

	pr_info("%s: DMA channel aquired rx %d tx %d\n", __func__,
			spum_dev->rx_dma_chan, spum_dev->tx_dma_chan);

	return 0;
err:
	dma_free_chan(spum_dev->rx_dma_chan);
	return -EIO;
}
Exemple #3
0
static int async_chainiv_init(struct crypto_tfm *tfm)
{
	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);

	spin_lock_init(&ctx->lock);

	crypto_init_queue(&ctx->queue, 100);
	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);

	return chainiv_init_common(tfm);
}
Exemple #4
0
static inline int cryptd_create_thread(struct cryptd_state *state,
                       int (*fn)(void *data), const char *name)
{
    spin_lock_init(&state->lock);
    mutex_init(&state->mutex);
    crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);

    state->task = kthread_run(fn, state, name);
    if (IS_ERR(state->task))
        return PTR_ERR(state->task);

    return 0;
}
static int mv_probe(struct platform_device *pdev)
{
	struct crypto_priv *cp;
	struct resource *res;
	int irq;
	int ret;

	if (cpg) {
		printk(KERN_ERR "Second crypto dev?\n");
		return -EEXIST;
	}

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
	if (!res)
		return -ENXIO;

	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	spin_lock_init(&cp->lock);
	crypto_init_queue(&cp->queue, 50);
	cp->reg = ioremap(res->start, res->end - res->start + 1);
	if (!cp->reg) {
		ret = -ENOMEM;
		goto err;
	}

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
	if (!res) {
		ret = -ENXIO;
		goto err_unmap_reg;
	}
	cp->sram_size = res->end - res->start + 1;
	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
	cp->sram = ioremap(res->start, cp->sram_size);
	if (!cp->sram) {
		ret = -ENOMEM;
		goto err_unmap_reg;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0 || irq == NO_IRQ) {
		ret = irq;
		goto err_unmap_sram;
	}
	cp->irq = irq;

	platform_set_drvdata(pdev, cp);
	cpg = cp;

	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
	if (IS_ERR(cp->queue_th)) {
		ret = PTR_ERR(cp->queue_th);
		goto err_thread;
	}

	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
			cp);
	if (ret)
		goto err_unmap_sram;

	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);

	ret = crypto_register_alg(&mv_aes_alg_ecb);
	if (ret)
		goto err_reg;

	ret = crypto_register_alg(&mv_aes_alg_cbc);
	if (ret)
		goto err_unreg_ecb;
	return 0;
err_unreg_ecb:
	crypto_unregister_alg(&mv_aes_alg_ecb);
err_thread:
	free_irq(irq, cp);
err_reg:
	kthread_stop(cp->queue_th);
err_unmap_sram:
	iounmap(cp->sram);
err_unmap_reg:
	iounmap(cp->reg);
err:
	kfree(cp);
	cpg = NULL;
	platform_set_drvdata(pdev, NULL);
	return ret;
}
static int s5p_aes_probe(struct platform_device *pdev)
{
	int                 i, j, err = -ENODEV;
	struct s5p_aes_dev *pdata;
	struct device      *dev = &pdev->dev;
	struct resource    *res;

	if (s5p_dev)
		return -EEXIST;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	if (!devm_request_mem_region(dev, res->start,
				     resource_size(res), pdev->name))
		return -EBUSY;

	pdata->clk = clk_get(dev, "secss");
	if (IS_ERR(pdata->clk)) {
		dev_err(dev, "failed to find secss clock source\n");
		return -ENOENT;
	}

	clk_enable(pdata->clk);

	spin_lock_init(&pdata->lock);
	pdata->ioaddr = devm_ioremap(dev, res->start,
				     resource_size(res));

	pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
	if (pdata->irq_hash < 0) {
		err = pdata->irq_hash;
		dev_warn(dev, "hash interrupt is not available.\n");
		goto err_irq;
	}
	err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
			       IRQF_SHARED, pdev->name, pdev);
	if (err < 0) {
		dev_warn(dev, "hash interrupt is not available.\n");
		goto err_irq;
	}

	pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
	if (pdata->irq_fc < 0) {
		err = pdata->irq_fc;
		dev_warn(dev, "feed control interrupt is not available.\n");
		goto err_irq;
	}
	err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
			       IRQF_SHARED, pdev->name, pdev);
	if (err < 0) {
		dev_warn(dev, "feed control interrupt is not available.\n");
		goto err_irq;
	}

	pdata->dev = dev;
	platform_set_drvdata(pdev, pdata);
	s5p_dev = pdata;

	tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);

	for (i = 0; i < ARRAY_SIZE(algs); i++) {
		INIT_LIST_HEAD(&algs[i].cra_list);
		err = crypto_register_alg(&algs[i]);
		if (err)
			goto err_algs;
	}

	pr_info("s5p-sss driver registered\n");

	return 0;

 err_algs:
	dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);

	for (j = 0; j < i; j++)
		crypto_unregister_alg(&algs[j]);

	tasklet_kill(&pdata->tasklet);

 err_irq:
	clk_disable(pdata->clk);
	clk_put(pdata->clk);

	s5p_dev = NULL;
	platform_set_drvdata(pdev, NULL);

	return err;
}
static int sahara_probe(struct platform_device *pdev)
{
	struct sahara_dev *dev;
	struct resource *res;
	u32 version;
	int irq;
	int err;
	int i;

	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
	if (dev == NULL) {
		dev_err(&pdev->dev, "unable to alloc data struct.\n");
		return -ENOMEM;
	}

	dev->device = &pdev->dev;
	platform_set_drvdata(pdev, dev);

	/* Get the base address */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		return -ENODEV;
	}

	if (devm_request_mem_region(&pdev->dev, res->start,
			resource_size(res), SAHARA_NAME) == NULL) {
		dev_err(&pdev->dev, "failed to request memory region\n");
		return -ENOENT;
	}
	dev->regs_base = devm_ioremap(&pdev->dev, res->start,
				      resource_size(res));
	if (!dev->regs_base) {
		dev_err(&pdev->dev, "failed to ioremap address region\n");
		return -ENOENT;
	}

	/* Get the IRQ */
	irq = platform_get_irq(pdev,  0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get irq resource\n");
		return irq;
	}

	if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
		0, SAHARA_NAME, dev) < 0) {
		dev_err(&pdev->dev, "failed to request irq\n");
		return -ENOENT;
	}

	/* clocks */
	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(dev->clk_ipg)) {
		dev_err(&pdev->dev, "Could not get ipg clock\n");
		return PTR_ERR(dev->clk_ipg);
	}

	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(dev->clk_ahb)) {
		dev_err(&pdev->dev, "Could not get ahb clock\n");
		return PTR_ERR(dev->clk_ahb);
	}

	/* Allocate HW descriptors */
	dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
			&dev->hw_phys_desc[0], GFP_KERNEL);
	if (!dev->hw_desc[0]) {
		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
		return -ENOMEM;
	}
	dev->hw_desc[1] = dev->hw_desc[0] + 1;
	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
				sizeof(struct sahara_hw_desc);

	/* Allocate space for iv and key */
	dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
				&dev->key_phys_base, GFP_KERNEL);
	if (!dev->key_base) {
		dev_err(&pdev->dev, "Could not allocate memory for key\n");
		err = -ENOMEM;
		goto err_key;
	}
	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;

	/* Allocate space for HW links */
	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
			&dev->hw_phys_link[0], GFP_KERNEL);
	if (!dev->hw_link[0]) {
		dev_err(&pdev->dev, "Could not allocate hw links\n");
		err = -ENOMEM;
		goto err_link;
	}
	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
					sizeof(struct sahara_hw_link);
		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
	}

	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);

	dev_ptr = dev;

	tasklet_init(&dev->queue_task, sahara_aes_queue_task,
		     (unsigned long)dev);
	tasklet_init(&dev->done_task, sahara_aes_done_task,
		     (unsigned long)dev);

	init_timer(&dev->watchdog);
	dev->watchdog.function = &sahara_watchdog;
	dev->watchdog.data = (unsigned long)dev;

	clk_prepare_enable(dev->clk_ipg);
	clk_prepare_enable(dev->clk_ahb);

	version = sahara_read(dev, SAHARA_REG_VERSION);
	if (version != SAHARA_VERSION_3) {
		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
			version);
		err = -ENODEV;
		goto err_algs;
	}

	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
		     SAHARA_REG_CMD);
	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
			SAHARA_CONTROL_SET_MAXBURST(8) |
			SAHARA_CONTROL_RNG_AUTORSD |
			SAHARA_CONTROL_ENABLE_INT,
			SAHARA_REG_CONTROL);

	err = sahara_register_algs(dev);
	if (err)
		goto err_algs;

	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);

	return 0;

err_algs:
	dma_free_coherent(&pdev->dev,
			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
			  dev->hw_link[0], dev->hw_phys_link[0]);
	clk_disable_unprepare(dev->clk_ipg);
	clk_disable_unprepare(dev->clk_ahb);
	dev_ptr = NULL;
err_link:
	dma_free_coherent(&pdev->dev,
			  2 * AES_KEYSIZE_128,
			  dev->key_base, dev->key_phys_base);
err_key:
	dma_free_coherent(&pdev->dev,
			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
			  dev->hw_desc[0], dev->hw_phys_desc[0]);

	return err;
}
static int rk_crypto_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct device *dev = &pdev->dev;
	struct rk_crypto_info *crypto_info;
	int err = 0;

	crypto_info = devm_kzalloc(&pdev->dev,
				   sizeof(*crypto_info), GFP_KERNEL);
	if (!crypto_info) {
		err = -ENOMEM;
		goto err_crypto;
	}

	crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
	if (IS_ERR(crypto_info->rst)) {
		err = PTR_ERR(crypto_info->rst);
		goto err_crypto;
	}

	reset_control_assert(crypto_info->rst);
	usleep_range(10, 20);
	reset_control_deassert(crypto_info->rst);

	err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
	if (err)
		goto err_crypto;

	spin_lock_init(&crypto_info->lock);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(crypto_info->reg)) {
		err = PTR_ERR(crypto_info->reg);
		goto err_crypto;
	}

	crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
	if (IS_ERR(crypto_info->aclk)) {
		err = PTR_ERR(crypto_info->aclk);
		goto err_crypto;
	}

	crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
	if (IS_ERR(crypto_info->hclk)) {
		err = PTR_ERR(crypto_info->hclk);
		goto err_crypto;
	}

	crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
	if (IS_ERR(crypto_info->sclk)) {
		err = PTR_ERR(crypto_info->sclk);
		goto err_crypto;
	}

	crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
	if (IS_ERR(crypto_info->dmaclk)) {
		err = PTR_ERR(crypto_info->dmaclk);
		goto err_crypto;
	}

	crypto_info->irq = platform_get_irq(pdev, 0);
	if (crypto_info->irq < 0) {
		dev_warn(crypto_info->dev,
			 "control Interrupt is not available.\n");
		err = crypto_info->irq;
		goto err_crypto;
	}

	err = devm_request_irq(&pdev->dev, crypto_info->irq,
			       rk_crypto_irq_handle, IRQF_SHARED,
			       "rk-crypto", pdev);

	if (err) {
		dev_err(crypto_info->dev, "irq request failed.\n");
		goto err_crypto;
	}

	crypto_info->dev = &pdev->dev;
	platform_set_drvdata(pdev, crypto_info);

	tasklet_init(&crypto_info->crypto_tasklet,
		     rk_crypto_tasklet_cb, (unsigned long)crypto_info);
	crypto_init_queue(&crypto_info->queue, 50);

	crypto_info->enable_clk = rk_crypto_enable_clk;
	crypto_info->disable_clk = rk_crypto_disable_clk;
	crypto_info->load_data = rk_load_data;
	crypto_info->unload_data = rk_unload_data;

	err = rk_crypto_register(crypto_info);
	if (err) {
		dev_err(dev, "err in register alg");
		goto err_register_alg;
	}

	dev_info(dev, "Crypto Accelerator successfully registered\n");
	return 0;

err_register_alg:
	tasklet_kill(&crypto_info->crypto_tasklet);
err_crypto:
	return err;
}
static int cns3xxx_probe(struct platform_device *pdev)
{
	struct crypto_priv *cp;
	int ret;

	Csp1ConfigDevice();
	ctx = Csp1AllocContext();

	if (cpg) {
		printk(KERN_ERR CNS3XXX_CRYPTO "Second crypto dev?\n");
		return -EEXIST;
	}


	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
	if (!cp)
		return -ENOMEM;

	spin_lock_init(&cp->lock);
	crypto_init_queue(&cp->queue, 50);
	
	cp->max_req_size = 1024;

	platform_set_drvdata(pdev, cp);
	cpg = cp;

	cp->queue_th = kthread_run(queue_manag, cp, "cns3xxx_crypto");
	if (IS_ERR(cp->queue_th)) {
		ret = PTR_ERR(cp->queue_th);
		goto err_thread;
	}

	/*ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
			cp);
	if (ret)
		goto err_unmap_sram;*/


	/*ret = crypto_register_alg(&cns3xxx_aes_alg_ecb);
	if (ret)
		goto err_reg;*/

	ret = crypto_register_alg(&cns3xxx_aes_alg_cbc);
	if (ret)
		goto err_unreg_cbc;
/*
	ret = crypto_register_ahash(&cns3xxx_sha1_alg);
	if (ret == 0)
		cpg->has_sha1 = 1;
	else
		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");

	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
	if (ret == 0) {
		cpg->has_hmac_sha1 = 1;
	} else {
		printk(KERN_WARNING MV_CESA
		       "Could not register hmac-sha1 driver\n");
	}
*/
	return 0;
err_unreg_cbc:
	crypto_unregister_alg(&cns3xxx_aes_alg_cbc);
err_thread:
	kthread_stop(cp->queue_th);
	iounmap(cp->reg);
	kfree(cp);
	cpg = NULL;
	platform_set_drvdata(pdev, NULL);
	return ret;
}
Exemple #10
0
static int sahara_probe(struct platform_device *pdev)
{
	struct sahara_dev *dev;
	struct resource *res;
	u32 version;
	int irq;
	int err;
	int i;

	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
	if (dev == NULL) {
		dev_err(&pdev->dev, "unable to alloc data struct.\n");
		return -ENOMEM;
	}

	dev->device = &pdev->dev;
	platform_set_drvdata(pdev, dev);

	/* Get the base address */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(dev->regs_base))
		return PTR_ERR(dev->regs_base);

	/* Get the IRQ */
	irq = platform_get_irq(pdev,  0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get irq resource\n");
		return irq;
	}

	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
			       0, dev_name(&pdev->dev), dev);
	if (err) {
		dev_err(&pdev->dev, "failed to request irq\n");
		return err;
	}

	/* clocks */
	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(dev->clk_ipg)) {
		dev_err(&pdev->dev, "Could not get ipg clock\n");
		return PTR_ERR(dev->clk_ipg);
	}

	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(dev->clk_ahb)) {
		dev_err(&pdev->dev, "Could not get ahb clock\n");
		return PTR_ERR(dev->clk_ahb);
	}

	/* Allocate HW descriptors */
	dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
			&dev->hw_phys_desc[0], GFP_KERNEL);
	if (!dev->hw_desc[0]) {
		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
		return -ENOMEM;
	}
	dev->hw_desc[1] = dev->hw_desc[0] + 1;
	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
				sizeof(struct sahara_hw_desc);

	/* Allocate space for iv and key */
	dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
				&dev->key_phys_base, GFP_KERNEL);
	if (!dev->key_base) {
		dev_err(&pdev->dev, "Could not allocate memory for key\n");
		err = -ENOMEM;
		goto err_key;
	}
	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;

	/* Allocate space for context: largest digest + message length field */
	dev->context_base = dma_alloc_coherent(&pdev->dev,
					SHA256_DIGEST_SIZE + 4,
					&dev->context_phys_base, GFP_KERNEL);
	if (!dev->context_base) {
		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
		err = -ENOMEM;
		goto err_key;
	}

	/* Allocate space for HW links */
	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
			&dev->hw_phys_link[0], GFP_KERNEL);
	if (!dev->hw_link[0]) {
		dev_err(&pdev->dev, "Could not allocate hw links\n");
		err = -ENOMEM;
		goto err_link;
	}
	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
					sizeof(struct sahara_hw_link);
		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
	}

	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);

	spin_lock_init(&dev->lock);
	mutex_init(&dev->queue_mutex);

	dev_ptr = dev;

	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_