int cpdma_chan_stop(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; unsigned long timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } chan->state = CPDMA_STATE_TEARDOWN; dma_reg_write(ctlr, chan->int_clear, chan->mask); /* trigger teardown */ dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ timeout = jiffies + HZ/10; /* 100 msec */ while (time_before(jiffies, timeout)) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; cpu_relax(); } WARN_ON(!time_before(jiffies, timeout)); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { struct cpdma_desc __iomem *desc = chan->head; dma_addr_t next_dma; next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ spin_unlock_irqrestore(&chan->lock, flags); __cpdma_chan_free(chan, desc, 0, -ENOSYS); spin_lock_irqsave(&chan->lock, flags); } chan->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&chan->lock, flags); return 0; }
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } ctlr->state = CPDMA_STATE_TEARDOWN; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); ctlr->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&ctlr->lock, flags); return 0; }
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { rtdm_lockctx_t context; int i; rtdm_lock_get_irqsave(&ctlr->lock, context); if (ctlr->state != CPDMA_STATE_ACTIVE) { rtdm_lock_put_irqrestore(&ctlr->lock, context); return -EINVAL; } ctlr->state = CPDMA_STATE_TEARDOWN; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); ctlr->state = CPDMA_STATE_IDLE; rtdm_lock_put_irqrestore(&ctlr->lock, context); return 0; }
int cpdma_chan_start(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&chan->lock, flags); return -EBUSY; } if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(ctlr, chan->int_set, chan->mask); chan->state = CPDMA_STATE_ACTIVE; if (chan->head) { chan_write(chan, hdp, desc_phys(pool, chan->head)); if (chan->rxfree) chan_write(chan, rxfree, chan->count); } spin_unlock_irqrestore(&chan->lock, flags); return 0; }
int cpdma_chan_start(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&chan->lock, context); if (chan->state != CPDMA_STATE_IDLE) { rtdm_lock_put_irqrestore(&chan->lock, context); return -EBUSY; } if (ctlr->state != CPDMA_STATE_ACTIVE) { rtdm_lock_put_irqrestore(&chan->lock, context); return -EINVAL; } dma_reg_write(ctlr, chan->int_set, chan->mask); chan->state = CPDMA_STATE_ACTIVE; if (chan->head) { chan_write(chan, hdp, desc_phys(pool, chan->head)); if (chan->rxfree) chan_write(chan, rxfree, chan->count); } rtdm_lock_put_irqrestore(&chan->lock, context); return 0; }
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EBUSY; } if (ctlr->params.has_soft_reset) { unsigned timeout = 10 * 100; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); while (timeout) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; udelay(10); timeout--; } WARN_ON(!timeout); } for (i = 0; i < ctlr->num_chan; i++) { __raw_writel(0, ctlr->params.txhdp + 4 * i); __raw_writel(0, ctlr->params.rxhdp + 4 * i); __raw_writel(0, ctlr->params.txcp + 4 * i); __raw_writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); ctlr->state = CPDMA_STATE_ACTIVE; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_start(ctlr->channels[i]); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; }
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { rtdm_lockctx_t context; int i; rtdm_lock_get_irqsave(&ctlr->lock, context); if (ctlr->state != CPDMA_STATE_IDLE) { rtdm_lock_put_irqrestore(&ctlr->lock, context); return -EBUSY; } if (ctlr->params.has_soft_reset) { unsigned long timeout = jiffies + HZ/10; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); while (time_before(jiffies, timeout)) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; } WARN_ON(!time_before(jiffies, timeout)); } for (i = 0; i < ctlr->num_chan; i++) { __raw_writel(0, ctlr->params.txhdp + 4 * i); __raw_writel(0, ctlr->params.rxhdp + 4 * i); __raw_writel(0, ctlr->params.txcp + 4 * i); __raw_writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); ctlr->state = CPDMA_STATE_ACTIVE; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_start(ctlr->channels[i]); } rtdm_lock_put_irqrestore(&ctlr->lock, context); return 0; }
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, chan->mask); spin_unlock_irqrestore(&chan->lock, flags); return 0; }
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { rtdm_lockctx_t context; rtdm_lock_get_irqsave(&chan->lock, context); if (chan->state != CPDMA_STATE_ACTIVE) { rtdm_lock_put_irqrestore(&chan->lock, context); return -EINVAL; } dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, chan->mask); rtdm_lock_put_irqrestore(&chan->lock, context); return 0; }
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { unsigned long flags; int i, reg; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_int_ctrl(ctlr->channels[i], enable); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; }
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { rtdm_lockctx_t context; int i, reg; rtdm_lock_get_irqsave(&ctlr->lock, context); if (ctlr->state != CPDMA_STATE_ACTIVE) { rtdm_lock_put_irqrestore(&ctlr->lock, context); return -EINVAL; } reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_int_ctrl(ctlr->channels[i], enable); } rtdm_lock_put_irqrestore(&ctlr->lock, context); return 0; }
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); }
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 1); dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 2); }