int cpdma_chan_stop(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; unsigned long timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } chan->state = CPDMA_STATE_TEARDOWN; dma_reg_write(ctlr, chan->int_clear, chan->mask); /* trigger teardown */ dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ timeout = jiffies + HZ/10; /* 100 msec */ while (time_before(jiffies, timeout)) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; cpu_relax(); } WARN_ON(!time_before(jiffies, timeout)); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { struct cpdma_desc __iomem *desc = chan->head; dma_addr_t next_dma; next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ spin_unlock_irqrestore(&chan->lock, flags); __cpdma_chan_free(chan, desc, 0, -ENOSYS); spin_lock_irqsave(&chan->lock, flags); } chan->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&chan->lock, flags); return 0; }
static int __cpdma_chan_process(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); desc = chan->head; if (!desc) { chan->stats.empty_dequeue++; status = -ENOENT; goto unlock_ret; } desc_dma = desc_phys(pool, desc); status = __raw_readl(&desc->hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; status = -EBUSY; goto unlock_ret; } if (status & CPDMA_DESC_PASS_CRC) outlen -= CPDMA_DESC_CRC_LEN; status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); chan->count--; chan->stats.good_dequeue++; if (status & CPDMA_DESC_EOQ) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } spin_unlock_irqrestore(&chan->lock, flags); if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) cb_status = -ENOSYS; else cb_status = status; __cpdma_chan_free(chan, desc, outlen, cb_status); return status; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return status; }
static int __cpdma_chan_process(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; rtdm_lockctx_t context; //rtdm_printk("__cpdma_chan_process(%x)\n", chan); rtdm_lock_get_irqsave(&chan->lock, context); desc = chan->head; if (!desc) { chan->stats.empty_dequeue++; status = -ENOENT; goto unlock_ret; } desc_dma = desc_phys(pool, desc); status = __raw_readl(&desc->hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; status = -EBUSY; goto unlock_ret; } status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); chan->count--; chan->stats.good_dequeue++; if (status & CPDMA_DESC_EOQ) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } rtdm_lock_put_irqrestore(&chan->lock, context); __cpdma_chan_free(chan, desc, outlen, status); return status; unlock_ret: rtdm_lock_put_irqrestore(&chan->lock, context); return status; }
static int __cpdma_chan_process(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; u32 status, outlen; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); desc = chan->head; if (!desc) { chan->stats.empty_dequeue++; status = -ENOENT; goto unlock_ret; } desc_dma = desc_phys(pool, desc); status = __raw_readl(&desc->hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; status = -EBUSY; goto unlock_ret; } status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); chan->head = desc_from_phys(pool, desc_read(desc, sw_next)); chan_write(chan, cp, desc_dma); chan->count--; chan->stats.good_dequeue++; if ((status & CPDMA_DESC_EOQ) && (chan->head) && (!(status & CPDMA_DESC_TD_COMPLETE))) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } spin_unlock_irqrestore(&chan->lock, flags); __cpdma_chan_free(chan, desc, outlen, (int)status); return status; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return status; }