Exemplo n.º 1
0
/*
 * kick_trng - sets the various parameters for enabling the initialization
 *	       of the RNG4 block in CAAM
 * @pdev - pointer to the platform device
 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
 */
static void kick_trng(struct platform_device *pdev, int ent_delay)
{
	struct device *ctrldev = &pdev->dev;
	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
	struct caam_ctrl __iomem *ctrl;
	struct rng4tst __iomem *r4tst;
	u32 val;

	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
	r4tst = &ctrl->r4tst[0];

	/* put RNG4 into program mode */
	clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);

	/*
	 * Performance-wise, it does not make sense to
	 * set the delay to a value that is lower
	 * than the last one that worked (i.e. the state handles
	 * were instantiated properly. Thus, instead of wasting
	 * time trying to set the values controlling the sample
	 * frequency, the function simply returns.
	 */
	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
	      >> RTSDCTL_ENT_DLY_SHIFT;
	if (ent_delay <= val)
		goto start_rng;

	val = rd_reg32(&r4tst->rtsdctl);
	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
	wr_reg32(&r4tst->rtsdctl, val);
	/* min. freq. count, equal to 1/4 of the entropy sample length */
	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
	/* disable maximum frequency count */
	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
	/* read the control register */
	val = rd_reg32(&r4tst->rtmctl);
start_rng:
	/*
	 * select raw sampling in both entropy shifter
	 * and statistical checker; ; put RNG4 into run mode
	 */
	clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
Exemplo n.º 2
0
static int caam_reset_hw_jr(struct device *dev)
{
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	unsigned int timeout = 100000;

	/*
	 * mask interrupts since we are going to poll
	 * for reset completion status
	 */
	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);

	/* initiate flush (required prior to reset) */
	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
		JRINT_ERR_HALT_INPROGRESS) && --timeout)
		cpu_relax();

	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
		return -EIO;
	}

	/* initiate reset */
	timeout = 100000;
	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
		cpu_relax();

	if (timeout == 0) {
		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
		return -EIO;
	}

	/* unmask interrupts */
	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);

	return 0;
}
Exemplo n.º 3
0
/**
 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
 * descriptor.
 * @dev:  device of the job ring to be used. This device should have
 *        been assigned prior by caam_jr_register().
 * @desc: points to a job descriptor that execute our request. All
 *        descriptors (and all referenced data) must be in a DMAable
 *        region, and all data references must be physical addresses
 *        accessible to CAAM (i.e. within a PAMU window granted
 *        to it).
 * @cbk:  pointer to a callback function to be invoked upon completion
 *        of this request. This has the form:
 *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
 *        where:
 *        @dev:    contains the job ring device that processed this
 *                 response.
 *        @desc:   descriptor that initiated the request, same as
 *                 "desc" being argued to caam_jr_enqueue().
 *        @status: untranslated status received from CAAM. See the
 *                 reference manual for a detailed description of
 *                 error meaning, or see the JRSTA definitions in the
 *                 register header file
 *        @areq:   optional pointer to an argument passed with the
 *                 original request
 * @areq: optional pointer to a user argument for use at callback
 *        time.
 **/
int caam_jr_enqueue(struct device *dev, u32 *desc,
		    void (*cbk)(struct device *dev, u32 *desc,
				u32 status, void *areq),
		    void *areq)
{
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	struct caam_jrentry_info *head_entry;
	int head, tail, desc_size;
	dma_addr_t desc_dma;

	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, desc_dma)) {
		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
		return -EIO;
	}

	spin_lock_bh(&jrp->inplock);

	head = jrp->head;
	tail = ACCESS_ONCE(jrp->tail);

	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
		spin_unlock_bh(&jrp->inplock);
		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
		return -EBUSY;
	}

	head_entry = &jrp->entinfo[head];
	head_entry->desc_addr_virt = desc;
	head_entry->desc_size = desc_size;
	head_entry->callbk = (void *)cbk;
	head_entry->cbkarg = areq;
	head_entry->desc_addr_dma = desc_dma;

	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;

	smp_wmb();

	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
				    (JOBR_DEPTH - 1);
	jrp->head = (head + 1) & (JOBR_DEPTH - 1);

	wr_reg32(&jrp->rregs->inpring_jobadd, 1);

	spin_unlock_bh(&jrp->inplock);

	return 0;
}
Exemplo n.º 4
0
void caam_secvio_shutdown(struct platform_device *pdev)
{
	struct device *ctrldev, *svdev;
	struct caam_drv_private *priv;
	struct caam_drv_private_secvio *svpriv;
	int i;

	ctrldev = &pdev->dev;
	priv = dev_get_drvdata(ctrldev);
	svdev = priv->secviodev;
	svpriv = dev_get_drvdata(svdev);

	/* Shut off all sources */
	wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, 0);

	/* Remove tasklets and release interrupt */
	for_each_possible_cpu(i)
		tasklet_kill(&svpriv->irqtask[i]);

	free_irq(priv->secvio_irq, svdev);

	kfree(svpriv);
}
Exemplo n.º 5
0
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
	struct device *dev = st_dev;
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	u32 irqstate;

	/*
	 * Check the output ring for ready responses, kick
	 * tasklet if jobs done.
	 */
	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
	if (!irqstate)
		return IRQ_NONE;

	/*
	 * If JobR error, we got more development work to do
	 * Flag a bug now, but we really need to shut down and
	 * restart the queue (and fix code).
	 */
	if (irqstate & JRINT_JR_ERROR) {
		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
		BUG();
	}

	/* mask valid interrupts */
	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);

	/* Have valid interrupt at this point, just ACK and trigger */
	wr_reg32(&jrp->rregs->jrintstatus, irqstate);

	preempt_disable();
	tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
	preempt_enable();

	return IRQ_HANDLED;
}
Exemplo n.º 6
0
static int snvs_secvio_remove(struct platform_device *pdev)
{
	struct device *svdev;
	struct snvs_secvio_drv_private *svpriv;
	int i;

	svdev = &pdev->dev;
	svpriv = dev_get_drvdata(svdev);

	clk_enable(svpriv->clk);
	/* Set all sources to nonfatal */
	wr_reg32(&svpriv->svregs->hp.secvio_intcfg, 0);

	/* Remove tasklets and release interrupt */
	for_each_possible_cpu(i)
		tasklet_kill(&svpriv->irqtask[i]);

	clk_disable_unprepare(svpriv->clk);
	free_irq(svpriv->irq, svdev);
	iounmap(svpriv->svregs);
	kfree(svpriv);

	return 0;
}
Exemplo n.º 7
0
/* Deferred service handler, run as interrupt-fired tasklet */
static void caam_jr_dequeue(unsigned long devarg)
{
	int hw_idx, sw_idx, i, head, tail;
	struct device *dev = (struct device *)devarg;
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
	u32 *userdesc, userstatus;
	dma_addr_t outbusaddr;
	void *userarg;
	unsigned long flags;

	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
	dma_sync_single_for_cpu(dev, outbusaddr,
				sizeof(struct jr_outentry) * JOBR_DEPTH,
				DMA_FROM_DEVICE);

	spin_lock_irqsave(&jrp->outlock, flags);

	head = ACCESS_ONCE(jrp->head);
	sw_idx = tail = jrp->tail;

	while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
	       rd_reg32(&jrp->rregs->outring_used)) {

		hw_idx = jrp->out_ring_read_index;
		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
			sw_idx = (tail + i) & (JOBR_DEPTH - 1);

			smp_read_barrier_depends();
			if (jrp->outring[hw_idx].desc ==
			    jrp->entinfo[sw_idx].desc_addr_dma)
				break; /* found */
		}
		/* we should never fail to find a matching descriptor */
		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);

		/* Unmap just-run descriptor so we can post-process */
		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
				 jrp->entinfo[sw_idx].desc_size,
				 DMA_TO_DEVICE);

		/* mark completed, avoid matching on a recycled desc addr */
		jrp->entinfo[sw_idx].desc_addr_dma = 0;

		/* Stash callback params for use outside of lock */
		usercall = jrp->entinfo[sw_idx].callbk;
		userarg = jrp->entinfo[sw_idx].cbkarg;
		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
		userstatus = jrp->outring[hw_idx].jrstatus;

		smp_mb();

		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
					   (JOBR_DEPTH - 1);

		/*
		 * if this job completed out-of-order, do not increment
		 * the tail.  Otherwise, increment tail by 1 plus the
		 * number of subsequent jobs already completed out-of-order
		 */
		if (sw_idx == tail) {
			do {
				tail = (tail + 1) & (JOBR_DEPTH - 1);
				smp_read_barrier_depends();
			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
				 jrp->entinfo[tail].desc_addr_dma == 0);

			jrp->tail = tail;
		}

		/* set done */
		wr_reg32(&jrp->rregs->outring_rmvd, 1);

		spin_unlock_irqrestore(&jrp->outlock, flags);

		/* Finally, execute user's callback */
		usercall(dev, userdesc, userstatus, userarg);

		spin_lock_irqsave(&jrp->outlock, flags);

		head = ACCESS_ONCE(jrp->head);
		sw_idx = tail = jrp->tail;
	}

	spin_unlock_irqrestore(&jrp->outlock, flags);

	/* reenable / unmask IRQs */
	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
Exemplo n.º 8
0
/*
 * Init JobR independent of platform property detection
 */
static int caam_jr_init(struct device *dev)
{
	struct caam_drv_private_jr *jrp;
	dma_addr_t inpbusaddr, outbusaddr;
	int i, error;

	jrp = dev_get_drvdata(dev);

	/* Connect job ring interrupt handler. */
	for_each_possible_cpu(i)
		tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
			     (unsigned long)dev);

	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
			    "caam-jr", dev);
	if (error) {
		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
			jrp->ridx, jrp->irq);
		irq_dispose_mapping(jrp->irq);
		jrp->irq = 0;
		return -EINVAL;
	}

	error = caam_reset_hw_jr(dev);
	if (error)
		return error;

	jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
			       GFP_KERNEL | GFP_DMA);
	jrp->outring = kzalloc(sizeof(struct jr_outentry) *
			       JOBR_DEPTH, GFP_KERNEL | GFP_DMA);

	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
			       GFP_KERNEL);

	if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
	    (jrp->entinfo == NULL)) {
		dev_err(dev, "can't allocate job rings for %d\n",
			jrp->ridx);
		return -ENOMEM;
	}

	for (i = 0; i < JOBR_DEPTH; i++)
		jrp->entinfo[i].desc_addr_dma = !0;

	/* Setup rings */
	inpbusaddr = dma_map_single(dev, jrp->inpring,
				    sizeof(u32 *) * JOBR_DEPTH,
				    DMA_TO_DEVICE);
	if (dma_mapping_error(dev, inpbusaddr)) {
		dev_err(dev, "caam_jr_init(): can't map input ring\n");
		kfree(jrp->inpring);
		kfree(jrp->outring);
		kfree(jrp->entinfo);
		return -EIO;
	}

	outbusaddr = dma_map_single(dev, jrp->outring,
				    sizeof(struct jr_outentry) * JOBR_DEPTH,
				    DMA_FROM_DEVICE);
	if (dma_mapping_error(dev, outbusaddr)) {
		dev_err(dev, "caam_jr_init(): can't map output ring\n");
			dma_unmap_single(dev, inpbusaddr,
					 sizeof(u32 *) * JOBR_DEPTH,
					 DMA_TO_DEVICE);
		kfree(jrp->inpring);
		kfree(jrp->outring);
		kfree(jrp->entinfo);
		return -EIO;
	}

	jrp->inp_ring_write_index = 0;
	jrp->out_ring_read_index = 0;
	jrp->head = 0;
	jrp->tail = 0;

	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);

	jrp->ringsize = JOBR_DEPTH;

	spin_lock_init(&jrp->inplock);
	spin_lock_init(&jrp->outlock);

	/* Select interrupt coalescing parameters */
	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));

	jrp->assign = JOBR_UNASSIGNED;
	return 0;
}
Exemplo n.º 9
0
static int snvs_secvio_probe(struct platform_device *pdev)
{
	struct device *svdev;
	struct snvs_secvio_drv_private *svpriv;
	struct device_node *np, *npirq;
	struct snvs_full __iomem *snvsregs;
	int i, error;
	u32 hpstate;

	svpriv = kzalloc(sizeof(struct snvs_secvio_drv_private), GFP_KERNEL);
	if (!svpriv)
		return -ENOMEM;

	svdev = &pdev->dev;
	dev_set_drvdata(svdev, svpriv);
	svpriv->pdev = pdev;
	np = pdev->dev.of_node;

	npirq = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
	if (!npirq) {
		dev_err(svdev, "can't identify secvio interrupt\n");
		kfree(svpriv);
		return -EINVAL;
	}
	svpriv->irq = irq_of_parse_and_map(npirq, 0);
	if (svpriv->irq <= 0) {
		kfree(svpriv);
		return -EINVAL;
	}

	snvsregs = of_iomap(np, 0);
	if (!snvsregs) {
		dev_err(svdev, "register mapping failed\n");
		return -ENOMEM;
	}
	svpriv->svregs = (struct snvs_full __force *)snvsregs;

	svpriv->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(svpriv->clk)) {
		dev_err(&pdev->dev, "can't get snvs clock\n");
		svpriv->clk = NULL;
	}

	 /* Device data set up. Now init interrupt source descriptions */
	for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
		svpriv->intsrc[i].intname = violation_src_name[i];
		svpriv->intsrc[i].handler = snvs_secvio_default;
	}
	/* Connect main handler */
	for_each_possible_cpu(i)
		tasklet_init(&svpriv->irqtask[i], snvs_secvio_dispatch,
			     (unsigned long)svdev);

	error = request_irq(svpriv->irq, snvs_secvio_interrupt,
			    IRQF_SHARED, "snvs-secvio", svdev);
	if (error) {
		dev_err(svdev, "can't connect secvio interrupt\n");
		irq_dispose_mapping(svpriv->irq);
		svpriv->irq = 0;
		iounmap(svpriv->svregs);
		kfree(svpriv);
		return -EINVAL;
	}

	clk_prepare_enable(svpriv->clk);
	/*
	 * Configure all sources as fatal violations except LP section,
	 * source #5 (typically used as an external tamper detect), and
	 * source #3 (typically unused). Whenever the transition to
	 * secure mode has occurred, these will now be "fatal" violations
	 */
	wr_reg32(&svpriv->svregs->hp.secvio_intcfg,
		 HP_SECVIO_INTEN_SRC4 | HP_SECVIO_INTEN_SRC2 |
		 HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0);

	hpstate = (rd_reg32(&svpriv->svregs->hp.status) &
			    HP_STATUS_SSM_ST_MASK) >> HP_STATUS_SSM_ST_SHIFT;
	dev_info(svdev, "violation handlers armed - %s state\n",
		 snvs_ssm_state_name[hpstate]);

	clk_disable(svpriv->clk);

	return 0;
}
Exemplo n.º 10
0
/*
 * Init JobR independent of platform property detection
 */
static int caam_jr_init(struct device *dev)
{
	struct caam_drv_private_jr *jrp;
	dma_addr_t inpbusaddr, outbusaddr;
	int i, error;

	jrp = dev_get_drvdata(dev);

	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);

	/* Connect job ring interrupt handler. */
	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
			    "caam-jobr", dev);
	if (error) {
		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
			jrp->ridx, jrp->irq);
		irq_dispose_mapping(jrp->irq);
		jrp->irq = 0;
		return -EINVAL;
	}

	error = caam_reset_hw_jr(dev);
	if (error)
		return error;

	jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
					  &inpbusaddr, GFP_KERNEL);

	jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);

	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
			       GFP_KERNEL);

	if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
	    (jrp->entinfo == NULL)) {
		dev_err(dev, "can't allocate job rings for %d\n",
			jrp->ridx);
		return -ENOMEM;
	}

	for (i = 0; i < JOBR_DEPTH; i++)
		jrp->entinfo[i].desc_addr_dma = !0;

	/* Setup rings */
	jrp->inp_ring_write_index = 0;
	jrp->out_ring_read_index = 0;
	jrp->head = 0;
	jrp->tail = 0;

	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);

	jrp->ringsize = JOBR_DEPTH;

	spin_lock_init(&jrp->inplock);
	spin_lock_init(&jrp->outlock);

	/* Select interrupt coalescing parameters */
	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));

	return 0;
}
Exemplo n.º 11
0
int caam_secvio_startup(struct platform_device *pdev)
{
	struct device *ctrldev, *svdev;
	struct caam_drv_private *ctrlpriv;
	struct caam_drv_private_secvio *svpriv;
	struct platform_device *svpdev;
	struct device_node *np;
	const void *prop;
	int i, error, secvio_inten_src;

	ctrldev = &pdev->dev;
	ctrlpriv = dev_get_drvdata(ctrldev);
	/*
	 * Set up the private block for secure memory
	 * Only one instance is possible
	 */
	svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL);
	if (svpriv == NULL) {
		dev_err(ctrldev, "can't alloc private mem for secvio\n");
		return -ENOMEM;
	}
	svpriv->parentdev = ctrldev;

	/* Create the security violation dev */
#ifdef CONFIG_OF

	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
	if (!np)
		return -ENODEV;

	ctrlpriv->secvio_irq = of_irq_to_resource(np, 0, NULL);

	prop = of_get_property(np, "secvio_src", NULL);
	if (prop)
		secvio_inten_src = of_read_ulong(prop, 1);
	else
		secvio_inten_src = HP_SECVIO_INTEN_ALL;

	printk(KERN_ERR "secvio_inten_src = %x\n", secvio_inten_src);

	svpdev = of_platform_device_create(np, NULL, ctrldev);
	if (!svpdev)
		return -ENODEV;

#else
	svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0,
					       svpriv,
				sizeof(struct caam_drv_private_secvio));

	secvio_inten_src = HP_SECVIO_INTEN_ALL;
#endif
	if (svpdev == NULL) {
		kfree(svpriv);
		return -EINVAL;
	}
	svdev = &svpdev->dev;
	dev_set_drvdata(svdev, svpriv);
	ctrlpriv->secviodev = svdev;
	svpriv->svregs = ctrlpriv->snvs;

	/*
	 * Now we have all the dev data set up. Init interrupt
	 * source descriptions
	 */
	for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
		svpriv->intsrc[i].intname = violation_src_name[i];
		svpriv->intsrc[i].handler = caam_secvio_default;
	}

	/* Connect main handler */
	for_each_possible_cpu(i)
		tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch,
			     (unsigned long)svdev);

	error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt,
			    IRQF_SHARED, "caam_secvio", svdev);
	if (error) {
		dev_err(svdev, "can't connect secvio interrupt\n");
		irq_dispose_mapping(ctrlpriv->secvio_irq);
		ctrlpriv->secvio_irq = 0;
		return -EINVAL;
	}

	/* Enable all sources */
	wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, secvio_inten_src);

	dev_info(svdev, "security violation service handlers armed\n");

	return 0;
}
Exemplo n.º 12
0
/*
 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
 *			  the software (no JR/QI used).
 * @ctrldev - pointer to device
 * @status - descriptor status, after being run
 *
 * Return: - 0 if no error occurred
 *	   - -ENODEV if the DECO couldn't be acquired
 *	   - -EAGAIN if an error occurred while executing the descriptor
 */
static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
					u32 *status)
{
	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
	struct caam_deco __iomem *deco = ctrlpriv->deco;
	unsigned int timeout = 100000;
	u32 deco_dbg_reg, flags;
	int i;


	if (ctrlpriv->virt_en == 1) {
		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);

		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
		       --timeout)
			cpu_relax();

		timeout = 100000;
	}

	clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);

	while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
								 --timeout)
		cpu_relax();

	if (!timeout) {
		dev_err(ctrldev, "failed to acquire DECO 0\n");
		clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
		return -ENODEV;
	}

	for (i = 0; i < desc_len(desc); i++)
		wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));

	flags = DECO_JQCR_WHL;
	/*
	 * If the descriptor length is longer than 4 words, then the
	 * FOUR bit in JRCTRL register must be set.
	 */
	if (desc_len(desc) >= 4)
		flags |= DECO_JQCR_FOUR;

	/* Instruct the DECO to execute it */
	clrsetbits_32(&deco->jr_ctl_hi, 0, flags);

	timeout = 10000000;
	do {
		deco_dbg_reg = rd_reg32(&deco->desc_dbg);
		/*
		 * If an error occured in the descriptor, then
		 * the DECO status field will be set to 0x0D
		 */
		if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
		    DESC_DBG_DECO_STAT_HOST_ERR)
			break;
		cpu_relax();
	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);

	*status = rd_reg32(&deco->op_status_hi) &
		  DECO_OP_STATUS_HI_ERR_MASK;

	if (ctrlpriv->virt_en == 1)
		clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);

	/* Mark the DECO as free */
	clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);

	if (!timeout)
		return -EAGAIN;

	return 0;
}