/* Top-level security violation interrupt */ static irqreturn_t snvs_secvio_interrupt(int irq, void *snvsdev) { struct device *dev = snvsdev; struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev); clk_enable(svpriv->clk); /* Check the HP secvio status register */ svpriv->irqcause = rd_reg32(&svpriv->svregs->hp.secvio_status) & HP_SECVIOST_SECVIOMASK; if (!svpriv->irqcause) { clk_disable(svpriv->clk); return IRQ_NONE; } /* Now ACK cause */ setbits32(&svpriv->svregs->hp.secvio_status, svpriv->irqcause); /* And run deferred service */ preempt_disable(); tasklet_schedule(&svpriv->irqtask[smp_processor_id()]); preempt_enable(); clk_disable(svpriv->clk); return IRQ_HANDLED; }
/* Deferred service handler. Tasklet arg is simply the SNVS dev */ static void caam_secvio_dispatch(unsigned long indev) { struct device *dev = (struct device *)indev; struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev); unsigned long flags, cause; int i; /* * Capture the interrupt cause, using masked interrupts as * identification. This only works if all are enabled; if * this changes in the future, a "cause queue" will have to * be built */ cause = rd_reg32(&svpriv->svregs->hp.secvio_int_ctl) & (HP_SECVIO_INTEN_SRC5 | HP_SECVIO_INTEN_SRC4 | HP_SECVIO_INTEN_SRC3 | HP_SECVIO_INTEN_SRC2 | HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0); /* Look through causes, call each handler if exists */ for (i = 0; i < MAX_SECVIO_SOURCES; i++) if (cause & (1 << i)) { spin_lock_irqsave(&svpriv->svlock, flags); svpriv->intsrc[i].handler(dev, i, svpriv->intsrc[i].ext); spin_unlock_irqrestore(&svpriv->svlock, flags); }; /* Re-enable now-serviced interrupts */ setbits32(&svpriv->svregs->hp.secvio_int_ctl, cause); }
/* Top-level security violation interrupt */ static irqreturn_t caam_secvio_interrupt(int irq, void *snvsdev) { struct device *dev = snvsdev; struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev); u32 irqstate; /* Check the HP secvio status register */ irqstate = rd_reg32(&svpriv->svregs->hp.secvio_status) | HP_SECVIOST_SECVIOMASK; if (!irqstate) return IRQ_NONE; /* Mask out one or more causes for deferred service */ clrbits32(&svpriv->svregs->hp.secvio_int_ctl, irqstate); /* Now ACK causes */ setbits32(&svpriv->svregs->hp.secvio_status, irqstate); /* And run deferred service */ preempt_disable(); tasklet_schedule(&svpriv->irqtask[smp_processor_id()]); preempt_enable(); return IRQ_HANDLED; }
/* * kick_trng - sets the various parameters for enabling the initialization * of the RNG4 block in CAAM * @pdev - pointer to the platform device * @ent_delay - Defines the length (in system clocks) of each entropy sample. */ static void kick_trng(struct platform_device *pdev, int ent_delay) { struct device *ctrldev = &pdev->dev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl; struct rng4tst __iomem *r4tst; u32 val; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; r4tst = &ctrl->r4tst[0]; /* put RNG4 into program mode */ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM); /* * Performance-wise, it does not make sense to * set the delay to a value that is lower * than the last one that worked (i.e. the state handles * were instantiated properly. Thus, instead of wasting * time trying to set the values controlling the sample * frequency, the function simply returns. */ val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT; if (ent_delay <= val) goto start_rng; val = rd_reg32(&r4tst->rtsdctl); val = (val & ~RTSDCTL_ENT_DLY_MASK) | (ent_delay << RTSDCTL_ENT_DLY_SHIFT); wr_reg32(&r4tst->rtsdctl, val); /* min. freq. count, equal to 1/4 of the entropy sample length */ wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); /* disable maximum frequency count */ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); /* read the control register */ val = rd_reg32(&r4tst->rtmctl); start_rng: /* * select raw sampling in both entropy shifter * and statistical checker; ; put RNG4 into run mode */ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC); }
/* * instantiate_rng - builds and executes a descriptor on DECO0, * which initializes the RNG block. * @ctrldev - pointer to device * @state_handle_mask - bitmask containing the instantiation status * for the RNG4 state handles which exist in * the RNG4 block: 1 if it's been instantiated * by an external entry, 0 otherwise. * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; * Caution: this can be done only once; if the keys need to be * regenerated, a POR is required * * Return: - 0 if no error occurred * - -ENOMEM if there isn't enough memory to allocate the descriptor * - -ENODEV if DECO0 couldn't be acquired * - -EAGAIN if an error occurred when executing the descriptor * f.i. there was a RNG hardware error due to not "good enough" * entropy being aquired. */ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, int gen_sk) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl; u32 *desc, status = 0, rdsta_val; int ret = 0, sh_idx; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); if (!desc) return -ENOMEM; for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { /* * If the corresponding bit is set, this state handle * was initialized by somebody else, so it's left alone. */ if ((1 << sh_idx) & state_handle_mask) continue; /* Create the descriptor for instantiating RNG State Handle */ build_instantiation_desc(desc, sh_idx, gen_sk); /* Try to run it through DECO0 */ ret = run_descriptor_deco0(ctrldev, desc, &status); /* * If ret is not 0, or descriptor status is not 0, then * something went wrong. No need to try the next state * handle (if available), bail out here. * Also, if for some reason, the State Handle didn't get * instantiated although the descriptor has finished * without any error (HW optimizations for later * CAAM eras), then try again. */ if (ret) break; rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || !(rdsta_val & (1 << sh_idx))) { ret = -EAGAIN; break; } dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); /* Clear the contents before recreating the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); } kfree(desc); return ret; }
static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl) { static const struct { u16 ip_id; u8 maj_rev; u8 era; } id[] = { {0x0A10, 1, 1}, {0x0A10, 2, 2}, {0x0A12, 1, 3}, {0x0A14, 1, 3}, {0x0A14, 2, 4}, {0x0A16, 1, 4}, {0x0A10, 3, 4}, {0x0A11, 1, 4}, {0x0A18, 1, 4}, {0x0A11, 2, 5}, {0x0A12, 2, 5}, {0x0A13, 1, 5}, {0x0A1C, 1, 5} }; u32 ccbvid, id_ms; u8 maj_rev, era; u16 ip_id; int i; ccbvid = rd_reg32(&ctrl->perfmon.ccb_id); era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT; if (era) /* This is '0' prior to CAAM ERA-6 */ return era; id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms); ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT; maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT; for (i = 0; i < ARRAY_SIZE(id); i++) if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev) return id[i].era; return -ENOTSUPP; }
static int caam_reset_hw_jr(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); unsigned int timeout = 100000; /* * mask interrupts since we are going to poll * for reset completion status */ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); /* initiate flush (required prior to reset) */ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == JRINT_ERR_HALT_INPROGRESS) && --timeout) cpu_relax(); if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE || timeout == 0) { dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); return -EIO; } /* initiate reset */ timeout = 100000; wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); return -EIO; } /* unmask interrupts */ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); return 0; }
/** * caam_jr_deregister() - Deregister an API and release the queue. * Returns 0 if OK, -EBUSY if queue still contains pending entries * or unprocessed results at the time of the call * @dev - points to the dev that identifies the queue to * be released. **/ int caam_jr_deregister(struct device *rdev) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); struct caam_drv_private *ctrlpriv; /* Get the owning controller's private space */ ctrlpriv = dev_get_drvdata(jrpriv->parentdev); /* * Make sure ring empty before release */ if (rd_reg32(&jrpriv->rregs->outring_used) || (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) return -EBUSY; /* Release ring */ spin_lock(&ctrlpriv->jr_alloc_lock); jrpriv->assign = JOBR_UNASSIGNED; spin_unlock(&ctrlpriv->jr_alloc_lock); return 0; }
/* Public Key Cryptography module initialization handler */ static int __init caam_pkc_init(void) { struct device_node *dev_node; struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; u32 cha_inst, pk_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); if (!dev_node) return -ENODEV; } pdev = of_find_device_by_node(dev_node); if (!pdev) { of_node_put(dev_node); return -ENODEV; } ctrldev = &pdev->dev; priv = dev_get_drvdata(ctrldev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ if (!priv) return -ENODEV; /* Determine public key hardware accelerator presence. */ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) return -ENODEV; err = crypto_register_akcipher(&caam_rsa); if (err) dev_warn(ctrldev, "%s alg registration failed\n", caam_rsa.base.cra_driver_name); else dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); return err; }
/** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's * descriptor. * @dev: device of the job ring to be used. This device should have * been assigned prior by caam_jr_register(). * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses * accessible to CAAM (i.e. within a PAMU window granted * to it). * @cbk: pointer to a callback function to be invoked upon completion * of this request. This has the form: * callback(struct device *dev, u32 *desc, u32 stat, void *arg) * where: * @dev: contains the job ring device that processed this * response. * @desc: descriptor that initiated the request, same as * "desc" being argued to caam_jr_enqueue(). * @status: untranslated status received from CAAM. See the * reference manual for a detailed description of * error meaning, or see the JRSTA definitions in the * register header file * @areq: optional pointer to an argument passed with the * original request * @areq: optional pointer to a user argument for use at callback * time. **/ int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_jrentry_info *head_entry; int head, tail, desc_size; dma_addr_t desc_dma; desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); return -EIO; } spin_lock_bh(&jrp->inplock); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { spin_unlock_bh(&jrp->inplock); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } head_entry = &jrp->entinfo[head]; head_entry->desc_addr_virt = desc; head_entry->desc_size = desc_size; head_entry->callbk = (void *)cbk; head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; jrp->inpring[jrp->inp_ring_write_index] = desc_dma; smp_wmb(); jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & (JOBR_DEPTH - 1); jrp->head = (head + 1) & (JOBR_DEPTH - 1); wr_reg32(&jrp->rregs->inpring_jobadd, 1); spin_unlock_bh(&jrp->inplock); return 0; }
/* Main per-ring interrupt handler */ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) { struct device *dev = st_dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); u32 irqstate; /* * Check the output ring for ready responses, kick * tasklet if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); if (!irqstate) return IRQ_NONE; /* * If JobR error, we got more development work to do * Flag a bug now, but we really need to shut down and * restart the queue (and fix code). */ if (irqstate & JRINT_JR_ERROR) { dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); BUG(); } /* mask valid interrupts */ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); preempt_disable(); tasklet_schedule(&jrp->irqtask[smp_processor_id()]); preempt_enable(); return IRQ_HANDLED; }
/* Deferred service handler, run as interrupt-fired tasklet */ static void caam_jr_dequeue(unsigned long devarg) { int hw_idx, sw_idx, i, head, tail; struct device *dev = (struct device *)devarg; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; dma_addr_t outbusaddr; void *userarg; unsigned long flags; outbusaddr = rd_reg64(&jrp->rregs->outring_base); dma_sync_single_for_cpu(dev, outbusaddr, sizeof(struct jr_outentry) * JOBR_DEPTH, DMA_FROM_DEVICE); spin_lock_irqsave(&jrp->outlock, flags); head = ACCESS_ONCE(jrp->head); sw_idx = tail = jrp->tail; while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && rd_reg32(&jrp->rregs->outring_used)) { hw_idx = jrp->out_ring_read_index; for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { sw_idx = (tail + i) & (JOBR_DEPTH - 1); smp_read_barrier_depends(); if (jrp->outring[hw_idx].desc == jrp->entinfo[sw_idx].desc_addr_dma) break; /* found */ } /* we should never fail to find a matching descriptor */ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ dma_unmap_single(dev, jrp->outring[hw_idx].desc, jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); /* mark completed, avoid matching on a recycled desc addr */ jrp->entinfo[sw_idx].desc_addr_dma = 0; /* Stash callback params for use outside of lock */ usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; userstatus = jrp->outring[hw_idx].jrstatus; smp_mb(); jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & (JOBR_DEPTH - 1); /* * if this job completed out-of-order, do not increment * the tail. Otherwise, increment tail by 1 plus the * number of subsequent jobs already completed out-of-order */ if (sw_idx == tail) { do { tail = (tail + 1) & (JOBR_DEPTH - 1); smp_read_barrier_depends(); } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && jrp->entinfo[tail].desc_addr_dma == 0); jrp->tail = tail; } /* set done */ wr_reg32(&jrp->rregs->outring_rmvd, 1); spin_unlock_irqrestore(&jrp->outlock, flags); /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); spin_lock_irqsave(&jrp->outlock, flags); head = ACCESS_ONCE(jrp->head); sw_idx = tail = jrp->tail; } spin_unlock_irqrestore(&jrp->outlock, flags); /* reenable / unmask IRQs */ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); }
static int snvs_secvio_probe(struct platform_device *pdev) { struct device *svdev; struct snvs_secvio_drv_private *svpriv; struct device_node *np, *npirq; struct snvs_full __iomem *snvsregs; int i, error; u32 hpstate; svpriv = kzalloc(sizeof(struct snvs_secvio_drv_private), GFP_KERNEL); if (!svpriv) return -ENOMEM; svdev = &pdev->dev; dev_set_drvdata(svdev, svpriv); svpriv->pdev = pdev; np = pdev->dev.of_node; npirq = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio"); if (!npirq) { dev_err(svdev, "can't identify secvio interrupt\n"); kfree(svpriv); return -EINVAL; } svpriv->irq = irq_of_parse_and_map(npirq, 0); if (svpriv->irq <= 0) { kfree(svpriv); return -EINVAL; } snvsregs = of_iomap(np, 0); if (!snvsregs) { dev_err(svdev, "register mapping failed\n"); return -ENOMEM; } svpriv->svregs = (struct snvs_full __force *)snvsregs; svpriv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(svpriv->clk)) { dev_err(&pdev->dev, "can't get snvs clock\n"); svpriv->clk = NULL; } /* Device data set up. Now init interrupt source descriptions */ for (i = 0; i < MAX_SECVIO_SOURCES; i++) { svpriv->intsrc[i].intname = violation_src_name[i]; svpriv->intsrc[i].handler = snvs_secvio_default; } /* Connect main handler */ for_each_possible_cpu(i) tasklet_init(&svpriv->irqtask[i], snvs_secvio_dispatch, (unsigned long)svdev); error = request_irq(svpriv->irq, snvs_secvio_interrupt, IRQF_SHARED, "snvs-secvio", svdev); if (error) { dev_err(svdev, "can't connect secvio interrupt\n"); irq_dispose_mapping(svpriv->irq); svpriv->irq = 0; iounmap(svpriv->svregs); kfree(svpriv); return -EINVAL; } clk_prepare_enable(svpriv->clk); /* * Configure all sources as fatal violations except LP section, * source #5 (typically used as an external tamper detect), and * source #3 (typically unused). Whenever the transition to * secure mode has occurred, these will now be "fatal" violations */ wr_reg32(&svpriv->svregs->hp.secvio_intcfg, HP_SECVIO_INTEN_SRC4 | HP_SECVIO_INTEN_SRC2 | HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0); hpstate = (rd_reg32(&svpriv->svregs->hp.status) & HP_STATUS_SSM_ST_MASK) >> HP_STATUS_SSM_ST_SHIFT; dev_info(svdev, "violation handlers armed - %s state\n", snvs_ssm_state_name[hpstate]); clk_disable(svpriv->clk); return 0; }
/* * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of * the software (no JR/QI used). * @ctrldev - pointer to device * @status - descriptor status, after being run * * Return: - 0 if no error occurred * - -ENODEV if the DECO couldn't be acquired * - -EAGAIN if an error occurred while executing the descriptor */ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, u32 *status) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; struct caam_deco __iomem *deco = ctrlpriv->deco; unsigned int timeout = 100000; u32 deco_dbg_reg, flags; int i; if (ctrlpriv->virt_en == 1) { clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && --timeout) cpu_relax(); timeout = 100000; } clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE); while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && --timeout) cpu_relax(); if (!timeout) { dev_err(ctrldev, "failed to acquire DECO 0\n"); clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); return -ENODEV; } for (i = 0; i < desc_len(desc); i++) wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i))); flags = DECO_JQCR_WHL; /* * If the descriptor length is longer than 4 words, then the * FOUR bit in JRCTRL register must be set. */ if (desc_len(desc) >= 4) flags |= DECO_JQCR_FOUR; /* Instruct the DECO to execute it */ clrsetbits_32(&deco->jr_ctl_hi, 0, flags); timeout = 10000000; do { deco_dbg_reg = rd_reg32(&deco->desc_dbg); /* * If an error occured in the descriptor, then * the DECO status field will be set to 0x0D */ if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == DESC_DBG_DECO_STAT_HOST_ERR) break; cpu_relax(); } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); *status = rd_reg32(&deco->op_status_hi) & DECO_OP_STATUS_HI_ERR_MASK; if (ctrlpriv->virt_en == 1) clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0); /* Mark the DECO as free */ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); if (!timeout) return -EAGAIN; return 0; }