static void fdma_reset_channels(struct fdma *fdma) { int chan_num; for (chan_num = fdma->ch_min; chan_num <= fdma->ch_max; chan_num++) writel(0, CMD_STAT_REG(chan_num)); }
static inline void fdma_handle_fdma_err_irq(struct fdma_channel *channel) { struct fdma *fdma = channel->fdma; void (*err_cb)(unsigned long) = channel->params->err_cb; unsigned long err_cb_parm = channel->params->err_cb_parm; spin_lock(&fdma->channels_lock); /* err is bits 2-4 */ fdma_dbg(fdma, "%s: FDMA error %d on channel %d\n", __FUNCTION__, (readl(CMD_STAT_REG(channel->chan_num)) >> 2) & 0x7, channel->chan_num); /* According to the spec, in case of error transfer "may be * aborted" (or may not be, sigh) so let's make the situation * clear and stop it explicitly now. */ writel(MBOX_CMD_PAUSE_CHANNEL << (channel->chan_num * 2), fdma->io_base + fdma->regs.cmd_set); channel->sw_state = FDMA_STOPPING; spin_unlock(&fdma->channels_lock); wake_up(&channel->dma_chan->wait_queue); if (err_cb) { if (channel->params->err_cb_isr) err_cb(err_cb_parm); else tasklet_schedule(&channel->fdma_error); } }
static int st_fdma_debugfs_chan_show(struct seq_file *m, void *v) { struct st_fdma_chan *fchan = m->private; char buffer[80]; int i; seq_printf(m, "--- %s (0x%p) channel %d (%s) ---\n", dev_name(fchan->fdev->dev), fchan->fdev->io_base, fchan->id, st_fdma_debugfs_fchan_state[fchan->state]); SEQ_PRINTF(m, "CMD_STAT", CMD_STAT_REG(fchan)); SEQ_PRINTF(m, "PTR", NODE_PTR_REG(fchan)); SEQ_PRINTF(m, "CTRL", NODE_CTRL_REG(fchan)); SEQ_PRINTF(m, "COUNT", NODE_COUNT_REG(fchan)); SEQ_PRINTF(m, "SADDR", NODE_SADDR_REG(fchan)); SEQ_PRINTF(m, "DADDR", NODE_DADDR_REG(fchan)); switch (fchan->type) { case ST_DMA_TYPE_TELSS: SEQ_PRINTF(m, "NPARAM", NODE_TELSS_NODE_PARAM_REG(fchan)); for (i = 0; i < ST_FDMA_LLU_TELSS_HANDSETS; ++i) { sprintf(buffer, "HPARAM[%d]", i); SEQ_PRINTF(m, buffer, NODE_TELSS_HANDSET_PARAMn_REG(fchan, i)); } break; case ST_DMA_TYPE_MCHI: SEQ_PRINTF(m, "MCHI_LENGTH", NODE_MCHI_LENGTH_REG(fchan)); SEQ_PRINTF(m, "MCHI_RX_FIFO_THR_ADDR", NODE_MCHI_RX_FIFO_THR_ADDR_REG(fchan)); SEQ_PRINTF(m, "MCHI_DSTRIDE", NODE_MCHI_DSTRIDE_REG(fchan)); break; default: /* No additional registers to print */ break; } if (fchan->dreq) { sprintf(buffer, "REQ_CONTROL[%d]", fchan->dreq->request_line); SEQ_PRINTF(m, buffer, REQ_CONTROLn_REG(fchan->fdev, fchan->dreq->request_line)); sprintf(buffer, "TRANSFER DIRECTION"); seq_printf(m, "%-30s = %s\n", buffer, st_fdma_debugfs_direction[fchan->dreq->direction]); } seq_printf(m, "\n"); return 0; }
static inline void fdma_handle_fdma_completion_irq(struct fdma_channel *channel) { struct fdma *fdma = channel->fdma; void (*comp_cb)(unsigned long) = channel->params->comp_cb; unsigned long comp_cb_parm = channel->params->comp_cb_parm; spin_lock(&fdma->channels_lock); switch (fdma_get_engine_status(channel)) { case FDMA_CHANNEL_PAUSED: switch (channel->sw_state) { case FDMA_RUNNING: /* Hit a pause node */ case FDMA_PAUSING: channel->sw_state = FDMA_PAUSED; break; case FDMA_STOPPING: writel(0, CMD_STAT_REG(channel->chan_num)); channel->sw_state = FDMA_IDLE; break; default: BUG(); } break; case FDMA_CHANNEL_IDLE: switch (channel->sw_state) { case FDMA_RUNNING: case FDMA_PAUSING: case FDMA_STOPPING: channel->sw_state = FDMA_IDLE; break; default: BUG(); } break; case FDMA_CHANNEL_RUNNING: break; default: fdma_dbg(fdma, "ERR::FDMA2 unknown interrupt status \n"); } spin_unlock(&fdma->channels_lock); wake_up(&channel->dma_chan->wait_queue); if (comp_cb) { if (channel->params->comp_cb_isr) comp_cb(comp_cb_parm); else tasklet_schedule(&channel->fdma_complete); } }
static void fdma_start_channel(struct fdma_channel *channel, unsigned long start_addr, unsigned long initial_count) { struct fdma *fdma = channel->fdma; u32 cmd_sta_value = (start_addr | CMDSTAT_FDMA_START_CHANNEL); /* See comment in fdma_get_residue() for why we do this. */ writel(initial_count, fdma->io_base + (channel->chan_num * NODE_DATA_OFFSET) + fdma->regs.cntn); writel(cmd_sta_value, CMD_STAT_REG(channel->chan_num)); writel(MBOX_CMD_START_CHANNEL << (channel->chan_num * 2), fdma->io_base + fdma->regs.cmd_set); }
/* must only be called when channel is in paused state */ static int fdma_unpause(struct fdma_channel *channel) { struct fdma *fdma = channel->fdma; unsigned long irqflags = 0; u32 cmd_sta_value; spin_lock_irqsave(&fdma->channels_lock, irqflags); if (channel->sw_state != FDMA_PAUSED) { spin_unlock_irqrestore(&fdma->channels_lock, irqflags); return -EBUSY; } cmd_sta_value = readl(CMD_STAT_REG(channel->chan_num)); cmd_sta_value &= ~CMDSTAT_FDMA_CMD_MASK; cmd_sta_value |= CMDSTAT_FDMA_RESTART_CHANNEL; writel(cmd_sta_value, CMD_STAT_REG(channel->chan_num)); writel(MBOX_CMD_START_CHANNEL << (channel->chan_num * 2), fdma->io_base + fdma->regs.cmd_set); channel->sw_state = FDMA_RUNNING; spin_unlock_irqrestore(&fdma->channels_lock, irqflags); return 0; }
static int fdma_stop(struct fdma_channel *channel) { struct fdma *fdma = channel->fdma; unsigned long cmd_val = MBOX_CMD_PAUSE_CHANNEL << (channel->chan_num * 2); unsigned long irqflags = 0; spin_lock_irqsave(&fdma->channels_lock, irqflags); switch (channel->sw_state) { case FDMA_IDLE: case FDMA_CONFIGURED: case FDMA_PAUSED: /* Hardware is already idle, simply change state */ channel->sw_state = FDMA_IDLE; writel(0, CMD_STAT_REG(channel->chan_num)); spin_unlock_irqrestore(&fdma->channels_lock, irqflags); break; case FDMA_RUNNING: /* Hardware is running, send the command */ writel(cmd_val, fdma->io_base + fdma->regs.cmd_set); /* Fall through */ case FDMA_PAUSING: case FDMA_STOPPING: /* Hardware is pausing already, wait for interrupt */ channel->sw_state = FDMA_STOPPING; spin_unlock_irqrestore(&fdma->channels_lock, irqflags); #if 0 /* In some cases this is called from a context which cannot * block, so disable the wait at the moment. */ wait_event(channel->cur_cfg->wait_queue, channel->sw_state == FDMA_IDLE); #endif break; } return 0; }
/* returns the number of bytes left to transfer for the current node */ static int fdma_get_residue(struct dma_channel *dma_chan) { struct fdma_channel *channel = dma_chan->priv_data; struct fdma *fdma = channel->fdma; struct stm_dma_params *params = channel->params; unsigned long irqflags; u32 count = 0; spin_lock_irqsave(&fdma->channels_lock, irqflags); if (likely(channel->sw_state != FDMA_IDLE)) { struct fdma_xfer_descriptor *desc = (struct fdma_xfer_descriptor *)params->priv; void __iomem *chan_base = fdma->io_base + (channel->chan_num * NODE_DATA_OFFSET); unsigned long current_node_phys; unsigned long stat1, stat2; struct fdma_llu_node *current_node; int node_num; /* Get info about current node */ do { stat1 = readl(CMD_STAT_REG(channel->chan_num)); count = readl(chan_base + fdma->regs.cntn); stat2 = readl(CMD_STAT_REG(channel->chan_num)); } while (stat1 != stat2); current_node_phys = stat1 & ~0x1f; for (node_num = 0, current_node = desc->llu_nodes; current_node->dma_addr != current_node_phys; node_num++, current_node++) BUG_ON(node_num == desc->alloced_nodes); switch (stat1 & 3) { case FDMA_CHANNEL_IDLE: /* Channel has stopped, but we haven't taken * the interrupt to change the ->sw_state * field yet. We could legitimatly return zero * here, but instead pretend we haven't quite * finished yet. Is this the right thing to * do? */ count = 1; goto unlock; case FDMA_CHANNEL_RUNNING: case FDMA_CHANNEL_PAUSED: /* Unfortuntaly the firmware appears to modify * CMD_STAT before it has modifed the COUNT. * However we write the count in * fdma_start_channel() so can assume it is * valid. */ break; case CMDSTAT_FDMA_START_CHANNEL: /* Channel hasn't started running yet, so count * hasn't yet been loaded from the node. But again * the value was written in fdma_start_channel() * so the value read from hardware is valid. */ break; } while (++node_num < desc->alloced_nodes) { current_node++; count += current_node->virt_addr->size_bytes; } } unlock: spin_unlock_irqrestore(&fdma->channels_lock, irqflags); return count; }
static int fdma_get_engine_status(struct fdma_channel *channel) { struct fdma *fdma = channel->fdma; return readl(CMD_STAT_REG(channel->chan_num)) & 3; }