Esempio n. 1
0
static void stmp3xxx_mmc_dma_release(struct stmp3xxx_mmc_host *host)
{
	stmp3xxx_dma_reset_channel(host->dmach);

	dma_free_coherent(host->dev, SSP_BUFFER_SIZE, host->dma_buf,
			  host->dma_buf_phys);

	stmp3xxx_dma_free_command(host->dmach, &host->dma_desc);
	stmp3xxx_dma_release(host->dmach);
}
Esempio n. 2
0
/* Allocate and initialise the DMA chains */
static int stmp3xxx_mmc_dma_init(struct stmp3xxx_mmc_host *host, int reset)
{
	int ret;

	if (!reset) {
		/* Allocate DMA channel */
		ret = stmp3xxx_dma_request(host->dmach,
					   host->dev, "STMP37XX MMC/SD");
		if (ret) {
			dev_err(host->dev, "Unable to request DMA channel\n");
			return ret;
		}

		host->dma_buf = dma_alloc_coherent(host->dev, SSP_BUFFER_SIZE,
						   &host->dma_buf_phys,
						   GFP_DMA);
		if (host->dma_buf == NULL) {
			dev_err(host->dev, "Unable to allocate DMA memory\n");
			ret = -ENOMEM;
			goto out_mem;
		}

		ret = stmp3xxx_dma_allocate_command(host->dmach,
						    &host->dma_desc);
		if (ret) {
			dev_err(host->dev,
				"Unable to allocate DMA descriptor\n");
			goto out_cmd;
		}

		host->dma_desc.command->next = (u32) host->dma_desc.handle;
		host->dma_desc.command->buf_ptr = (u32) host->dma_buf_phys;
		host->dma_desc.virtual_buf_ptr = host->dma_buf;
	}

	/* Reset DMA channel */
	stmp3xxx_dma_reset_channel(host->dmach);

	/* Enable DMA interrupt */
	stmp3xxx_dma_clear_interrupt(host->dmach);
	stmp3xxx_dma_enable_interrupt(host->dmach);

	return 0;

out_cmd:
	dma_free_coherent(host->dev, SSP_BUFFER_SIZE, host->dma_buf,
			  host->dma_buf_phys);
out_mem:
	stmp3xxx_dma_release(host->dmach);

	return ret;
}
Esempio n. 3
0
void stmp3xxx_lcdif_dma_release(void)
{
	int i;

	if (stmp378x_lcd_master) {
		stmp3xxx_clearl(BM_LCDIF_CTRL_LCDIF_MASTER,
				REGS_LCDIF_BASE + HW_LCDIF_CTRL);
		return;
	}

	for (i = 0; i < dma_chain_info_pos; i++)
		stmp3xxx_dma_free_command(STMP3XXX_DMA
					  (LCD_DMA_CHANNEL, STMP3XXX_BUS_APBH),
					  &video_dma_descriptor[i]);
	stmp3xxx_dma_release(STMP3XXX_DMA(LCD_DMA_CHANNEL, STMP3XXX_BUS_APBH));

	dma_chain_info_pos = 0;
}
Esempio n. 4
0
/**
 * gpmi_nand_remove - remove a GPMI device
 *
 */
static int __devexit lba_remove(struct platform_device *pdev)
{
	struct lba_data *data = platform_get_drvdata(pdev);
	int i;

	lba_core_remove(data);
	gpmi_release_hw(pdev);
	free_irq(data->irq, data);

	for (i = 0; i < max_chips; i++) {
		if (!data->nand[i].valid)
			continue;
		gpmi_free_buffers(pdev, &data->nand[i]);
		stmp3xxx_dma_free_chain(&data->nand[i].chain);
		stmp3xxx_dma_release(data->nand[i].dma_ch);
	}
	iounmap(data->io_base);
	kfree(data);

	return 0;
}
Esempio n. 5
0
static int __init lba_probe(struct platform_device *pdev)
{
	struct lba_data *data;
	struct resource *r;
	struct gpmi_perchip_data *g;
	int err;

	/* Allocate memory for the device structure (and zero it) */
	data = kzalloc(sizeof(*data) + sizeof(struct gpmi_perchip_data),
		       GFP_KERNEL);
	if (!data) {
		dev_err(&pdev->dev, "failed to allocate gpmi_nand_data\n");
		err = -ENOMEM;
		goto out1;
	}
	g_data = data;
	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		dev_err(&pdev->dev, "failed to get resource\n");
		err = -ENXIO;
		goto out2;
	}
	data->io_base = ioremap(r->start, r->end - r->start + 1);
	if (!data->io_base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		err = -EIO;
		goto out2;
	}

	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!r) {
		err = -EIO;
		dev_err(&pdev->dev, "can't get IRQ resource\n");
		goto out3;
	}
	data->irq = r->start;

	platform_set_drvdata(pdev, data);
	err = gpmi_init_hw(pdev, 1);
	if (err)
		goto out3;


	err = request_irq(data->irq,
			  gpmi_irq, 0, dev_name(&pdev->dev), data);
	if (err) {
		dev_err(&pdev->dev, "can't request GPMI IRQ\n");
		goto out4;
	}

	g = data->nand;

	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (!r) {
		dev_err(&pdev->dev, "can't get DMA resource\n");
		goto out_res;
	}
	g->cs = 0;
	g->dma_ch = r->start;

	err = stmp3xxx_dma_request(g->dma_ch, NULL, dev_name(&pdev->dev));
	if (err) {
		dev_err(&pdev->dev, "can't request DMA channel 0x%x\n",
			g->dma_ch);
		goto out_res;
	}

	err = stmp3xxx_dma_make_chain(g->dma_ch, &g->chain,
				      g->d, ARRAY_SIZE(g->d));
	if (err) {
		dev_err(&pdev->dev, "can't setup DMA chain\n");
		stmp3xxx_dma_release(g->dma_ch);
		goto out_res;
	}

	g->cmd_buffer_size = GPMI_CMD_BUF_SZ;
	g->cmdtail_buffer_size = GPMI_CMD_BUF_SZ;
	g->write_buffer_size = GPMI_WRITE_BUF_SZ;
	g->data_buffer_size = GPMI_DATA_BUF_SZ;
	g->oob_buffer_size = GPMI_OOB_BUF_SZ;

	err = gpmi_alloc_buffers(pdev, g);
	if (err) {
		dev_err(&pdev->dev, "can't setup buffers\n");
		stmp3xxx_dma_free_chain(&g->chain);
		stmp3xxx_dma_release(g->dma_ch);
		goto out_res;
	}

	g->dev = pdev;
	g->chip.priv = g;
	g->index = 0;
	g->timing = gpmi_safe_timing;

	g->cmd_buffer_sz =
		g->write_buffer_sz =
		g->data_buffer_sz =
		0;
	g->valid = !0;	/* mark the data as valid */


	lba_core_init(data);

	return 0;

out_res:
	free_irq(data->irq, data);
out4:
	gpmi_release_hw(pdev);
out3:
	platform_set_drvdata(pdev, NULL);
	iounmap(data->io_base);
out2:
	kfree(data);
out1:
	return err;
}
/* Allocate and initialize rx and tx DMA chains */
static inline int stmp_appuart_dma_init(struct stmp_appuart_port *s)
{
	int err = 0;
	struct stmp3xxx_dma_descriptor *t = &s->tx_desc;
#ifndef RX_CHAIN
	struct stmp3xxx_dma_descriptor *r = &s->rx_desc;
#else
	int i;
#endif

	err = stmp3xxx_dma_request(s->dma_rx, s->dev, s->dev->bus_id);
	if (err)
		goto out;
	err = stmp3xxx_dma_request(s->dma_tx, s->dev, s->dev->bus_id);
	if (err)
		goto out1;

#ifndef RX_CHAIN
	err = stmp3xxx_dma_allocate_command(s->dma_rx, r);
	if (err)
		goto out2;
#endif
	err = stmp3xxx_dma_allocate_command(s->dma_tx, t);
	if (err)
		goto out3;
	t->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   TX_BUFFER_SIZE,
					   &t->command->buf_ptr,
					   GFP_DMA);
	if (!t->virtual_buf_ptr)
		goto out4;
#ifdef DEBUG
	memset(t->virtual_buf_ptr, 0x4B, TX_BUFFER_SIZE);
#endif

#ifndef RX_CHAIN
	r->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   RX_BUFFER_SIZE,
					   &r->command->buf_ptr,
					   GFP_DMA);
	if (!r->virtual_buf_ptr)
		goto out5;
#ifdef DEBUG
	memset(r->virtual_buf_ptr, 0x4C, RX_BUFFER_SIZE);
#endif
#else
	stmp3xxx_dma_make_chain(s->dma_rx, &s->rx_chain, s->rxd, RX_CHAIN);
	for (i = 0; i < RX_CHAIN; i++) {
		struct stmp3xxx_dma_descriptor *r = s->rxd + i;

		r->command->cmd =
			BF_APBX_CHn_CMD_XFER_COUNT(RX_BUFFER_SIZE) |
			BF_APBX_CHn_CMD_CMDWORDS(1) |
			BM_APBX_CHn_CMD_WAIT4ENDCMD |
			BM_APBX_CHn_CMD_SEMAPHORE |
			BM_APBX_CHn_CMD_IRQONCMPLT |
			BM_APBX_CHn_CMD_CHAIN |
			BF_APBX_CHn_CMD_COMMAND(
				BV_APBX_CHn_CMD_COMMAND__DMA_WRITE);
		r->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   RX_BUFFER_SIZE,
					   &r->command->buf_ptr,
					   GFP_DMA);
		r->command->pio_words[0] = /* BM_UARTAPP_CTRL0_RUN | */
			BF_UARTAPP_CTRL0_XFER_COUNT(RX_BUFFER_SIZE)|
			BM_UARTAPP_CTRL0_RXTO_ENABLE |
			BF_UARTAPP_CTRL0_RXTIMEOUT(3);
	}
#endif
	return 0;

	/*
	 * would be necessary on other error paths

	dma_free_coherent( s->dev, RX_BUFFER_SIZE, r->virtual_buf_ptr,
			   r->command->buf_ptr);
	*/
out5:
	dma_free_coherent(s->dev, TX_BUFFER_SIZE, t->virtual_buf_ptr,
			   t->command->buf_ptr);
out4:
	stmp3xxx_dma_free_command(s->dma_tx, t);
out3:
#ifndef RX_CHAIN
	stmp3xxx_dma_free_command(s->dma_rx, r);
#endif
out2:
	stmp3xxx_dma_release(s->dma_tx);
out1:
	stmp3xxx_dma_release(s->dma_rx);
out:
	WARN_ON(err);
	return err;
}