static int hsi_debug_gdd_show(struct seq_file *m, void *p)
{
	struct hsi_dev *hsi_ctrl = m->private;
	void __iomem *base = hsi_ctrl->base;
	int lch;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n",
		   hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG));
	seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n",
		   hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG));

	if (!hsi_driver_device_is_hsi(pdev)) {
		seq_printf(m, "HW_ID\t\t: 0x%08x\n",
			   hsi_inl(base, HSI_SSI_GDD_HW_ID_REG));
		seq_printf(m, "PPORT_ID\t: 0x%08x\n",
			   hsi_inl(base, HSI_SSI_GDD_PPORT_ID_REG));
		seq_printf(m, "MPORT_ID\t: 0x%08x\n",
			   hsi_inl(base, HSI_SSI_GDD_MPORT_ID_REG));
		seq_printf(m, "TEST\t\t: 0x%08x\n",
			   hsi_inl(base, HSI_SSI_GDD_TEST_REG));
	}

	seq_printf(m, "GCR\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_GCR_REG));

	for (lch = 0; lch < hsi_ctrl->gdd_chan_count; lch++) {
		seq_printf(m, "\nGDD LCH %d\n=========\n", lch);
		seq_printf(m, "CSDP\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CSDP_REG(lch)));
		seq_printf(m, "CCR\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CCR_REG(lch)));
		seq_printf(m, "CICR\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CCIR_REG(lch)));
		seq_printf(m, "CSR\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CSR_REG(lch)));
		seq_printf(m, "CSSA\t\t: 0x%08x\n",
			   hsi_inl(base, HSI_GDD_CSSA_REG(lch)));
		seq_printf(m, "CDSA\t\t: 0x%08x\n",
			   hsi_inl(base, HSI_GDD_CDSA_REG(lch)));
		seq_printf(m, "CEN\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CEN_REG(lch)));
		seq_printf(m, "CSAC\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CSAC_REG(lch)));
		seq_printf(m, "CDAC\t\t: 0x%04x\n",
			   hsi_inw(base, HSI_GDD_CDAC_REG(lch)));
		if (!hsi_driver_device_is_hsi(pdev))
			seq_printf(m, "CLNK_CTRL\t: 0x%04x\n",
				   hsi_inw(base,
					   HSI_SSI_GDD_CLNK_CTRL_REG(lch)));
	}

	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	return 0;
}
示例#2
0
/**
 * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the
 *				specified hsi channel.
 * @hsi_ch - pointer to the hsi_channel to cancel DMA write.
 *
 * hsi_controller lock must be held before calling this function.
 *
 * Return: -ENXIO : No DMA channel found for specified HSI channel
 *	   -ECANCELED : DMA cancel success, data not transfered to TX FIFO
 *	   0 : DMA transfer is already over, data already transfered to TX FIFO
 *
 * Note: whatever returned value, write callback will not be called after
 *       write cancel.
 */
int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch)
{
	int lch = hsi_ch->write_data.lch;
	unsigned int port = hsi_ch->hsi_port->port_number;
	unsigned int channel = hsi_ch->channel_number;
	struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
	u16 ccr, gdd_csr;
	long buff_offset;
	u32 status_reg;
	dma_addr_t dma_h;
	size_t size;

	if (lch < 0) {
		dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
				"channel %d\n", hsi_ch->channel_number);
		return -ENXIO;
	}
	ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
	if (!(ccr & HSI_CCR_ENABLE)) {
		dev_dbg(&hsi_ch->dev->device, "Write cancel on not "
			"enabled logical channel %d CCR REG 0x%04X\n",
			lch, ccr);
	}
	status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
	status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));

	/* Clear CSR register by reading it, as it is cleared automaticaly */
	/* by HW after SW read. */
	gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
	hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
			HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
			HSI_SYS_GDD_MPU_IRQ_STATUS_REG);

	/* Unmap DMA region */
	dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch));
	size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
	dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE);

	buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel);
	if (buff_offset >= 0)
		hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base,
			     buff_offset);

	hsi_reset_ch_write(hsi_ch);
	return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
}
示例#3
0
/**
 * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the
 *				specified hsi channel.
 * @hsi_ch - pointer to the hsi_channel to cancel DMA read.
 *
 * hsi_controller lock must be held before calling this function.
 *
 * Return: -ENXIO : No DMA channel found for specified HSI channel
 *	   -ECANCELED : DMA cancel success, data not available at expected
 *	                address.
 *	   0 : DMA transfer is already over, data already available at
 *	       expected address.
 *
 * Note: whatever returned value, read callback will not be called after cancel.
 */
int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch)
{
	int lch = hsi_ch->read_data.lch;
	struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller;
	u16 ccr, gdd_csr;
	u32 status_reg;
	dma_addr_t dma_h;
	size_t size;

	/* Re-enable interrupts for polling if needed */
	if (hsi_ch->flags & HSI_CH_RX_POLL)
		hsi_driver_enable_read_interrupt(hsi_ch, NULL);

	if (lch < 0) {
		dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI "
			"channel %d\n", hsi_ch->channel_number);
		return -ENXIO;
	}

	ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch));
	if (!(ccr & HSI_CCR_ENABLE)) {
		dev_dbg(&hsi_ch->dev->device, "Read cancel on not "
			"enabled logical channel %d CCR REG 0x%04X\n",
			lch, ccr);
	}

	status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG);
	status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch));

	/* Clear CSR register by reading it, as it is cleared automaticaly */
	/* by HW after SW read */
	gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch));
	hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base,
		HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base,
		HSI_SYS_GDD_MPU_IRQ_STATUS_REG);

	/* Unmap DMA region - Access to the buffer is now safe */
	dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch));
	size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4;
	dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE);

	hsi_reset_ch_read(hsi_ch);
	return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED;
}
示例#4
0
static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
{
	void __iomem *base = hsi_ctrl->base;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
	struct hsi_channel *ch;
	unsigned int port;
	unsigned int channel;
	unsigned int is_read_path;
	u32 gdd_csr;
	dma_addr_t dma_h;
	size_t size;
	int fifo, fifo_words_avail;

	if (hsi_get_info_from_gdd_lch(hsi_ctrl, gdd_lch, &port, &channel,
				      &is_read_path) < 0) {
		dev_err(hsi_ctrl->dev, "Unable to match the DMA channel %d with"
			" an HSI channel\n", gdd_lch);
		return;
	} else {
		dev_dbg(hsi_ctrl->dev, "DMA event on gdd_lch=%d => port=%d, "
			"channel=%d, read=%d\n", gdd_lch, port, channel,
			is_read_path);
	}

	hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
		     HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	/* Warning : CSR register is cleared automaticaly by HW after SW read */
	gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));

	if (!(gdd_csr & HSI_CSR_TOUT)) {
		if (is_read_path) {	/* Read path */
			dma_h = hsi_inl(base, HSI_GDD_CDSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_sync_single_for_cpu(hsi_ctrl->dev, dma_h, size,
						DMA_FROM_DEVICE);
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_FROM_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_read(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
					"(size %d).\n", channel,  size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->read_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);

			/* Check if FIFO is correctly emptied */
			if (hsi_driver_device_is_hsi(pdev)) {
				fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
				if (unlikely(fifo < 0)) {
					dev_err(hsi_ctrl->dev, "No valid FIFO "
						"id found for channel %d.\n",
						channel);
					return;
				}
				fifo_words_avail =
					hsi_get_rx_fifo_occupancy(hsi_ctrl,
								fifo);
				if (fifo_words_avail)
					dev_dbg(hsi_ctrl->dev,
						"FIFO %d not empty "
						"after DMA copy, remaining "
						"%d/%d frames\n",
						fifo, fifo_words_avail,
						HSI_HSR_FIFO_SIZE);
			}
			/* Re-enable interrupts for polling if needed */
			if (ch->flags & HSI_CH_RX_POLL)
				hsi_driver_enable_read_interrupt(ch, NULL);
		} else {	/* Write path */
			dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_TO_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_write(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback "
					"(size %d).\n", channel, size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->write_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);
		}
	} else {
		dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
			" on gdd channel %d\n", gdd_lch);
		spin_unlock(&hsi_ctrl->lock);
		/* TODO : need to perform a DMA soft reset */
		hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
				       HSI_EVENT_ERROR, NULL);
		spin_lock(&hsi_ctrl->lock);
	}
}