static irqreturn_t tegra_adma_isr(int irq, void *dev_id) { struct tegra_adma_chan *tdc = dev_id; unsigned long status; unsigned long flags; spin_lock_irqsave(&tdc->vc.lock, flags); status = tegra_adma_irq_clear(tdc); if (status == 0 || !tdc->desc) { spin_unlock_irqrestore(&tdc->vc.lock, flags); return IRQ_NONE; } vchan_cyclic_callback(&tdc->desc->vd); spin_unlock_irqrestore(&tdc->vc.lock, flags); return IRQ_HANDLED; }
static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id) { struct st_fdma_dev *fdev = dev_id; irqreturn_t ret = IRQ_NONE; struct st_fdma_chan *fchan = &fdev->chans[0]; unsigned long int_sta, clr; int_sta = fdma_read(fdev, FDMA_INT_STA_OFST); clr = int_sta; for (; int_sta != 0 ; int_sta >>= 2, fchan++) { if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR))) continue; spin_lock(&fchan->vchan.lock); st_fdma_ch_sta_update(fchan, int_sta); if (fchan->fdesc) { if (!fchan->fdesc->iscyclic) { list_del(&fchan->fdesc->vdesc.node); vchan_cookie_complete(&fchan->fdesc->vdesc); fchan->fdesc = NULL; fchan->status = DMA_COMPLETE; } else { vchan_cyclic_callback(&fchan->fdesc->vdesc); } /* Start the next descriptor (if available) */ if (!fchan->fdesc) st_fdma_xfer_desc(fchan); } spin_unlock(&fchan->vchan.lock); ret = IRQ_HANDLED; } fdma_write(fdev, clr, FDMA_INT_CLR_OFST); return ret; }