/* set data for single block transfer */
static int sdh_setup_data(struct mmc *mmc, struct mmc_data *data)
{
	u16 data_ctl = 0;
	u16 dma_cfg = 0;
	int ret = 0;

	/* Don't support write yet. */
	if (data->flags & MMC_DATA_WRITE)
		return UNUSABLE_ERR;
	data_ctl |= ((ffs(data->blocksize) - 1) << 4);
	data_ctl |= DTX_DIR;
	bfin_write_SDH_DATA_CTL(data_ctl);
	dma_cfg = WDSIZE_32 | RESTART | WNR | DMAEN;

	bfin_write_SDH_DATA_TIMER(0xFFFF);

	blackfin_dcache_flush_invalidate_range(data->dest,
			data->dest + data->blocksize);
	/* configure DMA */
	bfin_write_DMA_START_ADDR(data->dest);
	bfin_write_DMA_X_COUNT(data->blocksize / 4);
	bfin_write_DMA_X_MODIFY(4);
	bfin_write_DMA_CONFIG(dma_cfg);
	bfin_write_SDH_DATA_LGTH(data->blocksize);
	/* kick off transfer */
	bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);

	return ret;
}
Exemple #2
0
/* set data for single block transfer */
static int sdh_setup_data(struct mmc *mmc, struct mmc_data *data)
{
	u16 data_ctl = 0;
	u16 dma_cfg = 0;
	unsigned long data_size = data->blocksize * data->blocks;

	/* Don't support write yet. */
	if (data->flags & MMC_DATA_WRITE)
		return -EOPNOTSUPP;
#ifndef RSI_BLKSZ
	data_ctl |= ((ffs(data->blocksize) - 1) << 4);
#else
	bfin_write_SDH_BLK_SIZE(data->blocksize);
#endif
	data_ctl |= DTX_DIR;
	bfin_write_SDH_DATA_CTL(data_ctl);
	dma_cfg = WDSIZE_32 | PSIZE_32 | RESTART | WNR | DMAEN;

	bfin_write_SDH_DATA_TIMER(-1);

	blackfin_dcache_flush_invalidate_range(data->dest,
			data->dest + data_size);
	/* configure DMA */
	bfin_write_DMA_START_ADDR(data->dest);
	bfin_write_DMA_X_COUNT(data_size / 4);
	bfin_write_DMA_X_MODIFY(4);
	bfin_write_DMA_CONFIG(dma_cfg);
	bfin_write_SDH_DATA_LGTH(data_size);
	/* kick off transfer */
	bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);

	return 0;
}
Exemple #3
0
/*
 * This function read or write data to endpoint fifo
 * Blackfin use DMA polling method to avoid buffer alignment issues
 *
 * ep		- Endpoint number
 * length	- Number of bytes to write to FIFO
 * fifo_data	- Pointer to data buffer to be read/write
 * is_write	- Flag for read or write
 */
void rw_fifo(u8 ep, u32 length, void *fifo_data, int is_write)
{
	struct bfin_musb_dma_regs *regs;
	u32 val = (u32)fifo_data;

	blackfin_dcache_flush_invalidate_range(fifo_data, fifo_data + length);

	regs = (void *)USB_DMA_INTERRUPT;
	regs += ep;

	/* Setup DMA address register */
	bfin_write16(&regs->addr_low, val);
	SSYNC();

	bfin_write16(&regs->addr_high, val >> 16);
	SSYNC();

	/* Setup DMA count register */
	bfin_write16(&regs->count_low, length);
	bfin_write16(&regs->count_high, 0);
	SSYNC();

	/* Enable the DMA */
	val = (ep << 4) | DMA_ENA | INT_ENA;
	if (is_write)
		val |= DIRECTION;
	bfin_write16(&regs->control, val);
	SSYNC();

	/* Wait for compelete */
	while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << ep)))
		continue;

	/* acknowledge dma interrupt */
	bfin_write_USB_DMA_INTERRUPT(1 << ep);
	SSYNC();

	/* Reset DMA */
	bfin_write16(&regs->control, 0);
	SSYNC();
}
Exemple #4
0
static unsigned long
mmc_bread(int dev, unsigned long start, lbaint_t blkcnt, void *buffer)
{
	int ret, i;
	unsigned long resp[4];
	unsigned long card_status;
	__u8 *buf = buffer;
	__u32 status;
	__u16 data_ctl = 0;
	__u16 dma_cfg = 0;

	if (blkcnt == 0)
		return 0;
	debug("mmc_bread: dev %d, start %d, blkcnt %d\n", dev, start, blkcnt);
	/* Force to use 512-byte block,because a lot of code depends on this */
	data_ctl |= 9 << 4;
	data_ctl |= DTX_DIR;
	bfin_write_SDH_DATA_CTL(data_ctl);
	dma_cfg |= WDSIZE_32 | RESTART | WNR | DMAEN;

	/* FIXME later */
	bfin_write_SDH_DATA_TIMER(0xFFFFFFFF);
	for (i = 0; i < blkcnt; ++i, ++start) {
		blackfin_dcache_flush_invalidate_range(buf + i * mmc_blkdev.blksz,
			buf + (i + 1) * mmc_blkdev.blksz);
		bfin_write_DMA_START_ADDR(buf + i * mmc_blkdev.blksz);
		bfin_write_DMA_X_COUNT(mmc_blkdev.blksz / 4);
		bfin_write_DMA_X_MODIFY(4);
		bfin_write_DMA_CONFIG(dma_cfg);
		bfin_write_SDH_DATA_LGTH(mmc_blkdev.blksz);
		/* Put the device into Transfer state */
		ret = mmc_cmd(MMC_CMD_SELECT_CARD, mmc_rca << 16, resp, MMC_RSP_R1);
		if (ret) {
			printf("MMC_CMD_SELECT_CARD failed\n");
			goto out;
		}
		/* Set block length */
		ret = mmc_cmd(MMC_CMD_SET_BLOCKLEN, mmc_blkdev.blksz, resp, MMC_RSP_R1);
		if (ret) {
			printf("MMC_CMD_SET_BLOCKLEN failed\n");
			goto out;
		}
		ret = mmc_cmd(MMC_CMD_READ_SINGLE_BLOCK,
			      start * mmc_blkdev.blksz, resp,
			      MMC_RSP_R1);
		if (ret) {
			printf("MMC_CMD_READ_SINGLE_BLOCK failed\n");
			goto out;
		}
		bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);

		do {
			udelay(1);
			status = bfin_read_SDH_STATUS();
		} while (!(status & (DAT_BLK_END | DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN)));

		if (status & (DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN)) {
			bfin_write_SDH_STATUS_CLR(DAT_TIMEOUT_STAT | \
				DAT_CRC_FAIL_STAT | RX_OVERRUN_STAT);
			goto read_error;
		} else {
			bfin_write_SDH_STATUS_CLR(DAT_BLK_END_STAT | DAT_END_STAT);
			mmc_cmd(MMC_CMD_SELECT_CARD, 0, resp, 0);
		}
	}
 out:

	return i;

 read_error:
	mmc_cmd(MMC_CMD_SEND_STATUS, mmc_rca << 16, &card_status, MMC_RSP_R1);
	printf("mmc: bread failed, status = %08x, card status = %08lx\n",
	       status, card_status);
	goto out;
}
Exemple #5
0
static int spi_dma_xfer(struct bfin_spi_slave *bss, const u8 *tx, u8 *rx,
			uint bytes)
{
	int ret = -1;
	u16 ndsize, spi_config, dma_config;
	struct dmasg dmasg[2];
	const u8 *buf;

	if (tx) {
		debug("%s: doing half duplex TX\n", __func__);
		buf = tx;
		spi_config = TDBR_DMA;
		dma_config = 0;
	} else {
		debug("%s: doing half duplex RX\n", __func__);
		buf = rx;
		spi_config = RDBR_DMA;
		dma_config = WNR;
	}

	dmasg[0].start_addr = (unsigned long)buf;
	dmasg[0].x_modify = 1;
	dma_config |= WDSIZE_8 | DMAEN;
	if (bytes <= 65536) {
		blackfin_dcache_flush_invalidate_range(buf, buf + bytes);
		ndsize = NDSIZE_5;
		dmasg[0].cfg = NDSIZE_0 | dma_config | FLOW_STOP | DI_EN;
		dmasg[0].x_count = bytes;
	} else {
		blackfin_dcache_flush_invalidate_range(buf, buf + 65536 - 1);
		ndsize = NDSIZE_7;
		dmasg[0].cfg = NDSIZE_5 | dma_config | FLOW_ARRAY | DMA2D;
		dmasg[0].x_count = 0;	/* 2^16 */
		dmasg[0].y_count = bytes >> 16;	/* count / 2^16 */
		dmasg[0].y_modify = 1;
		dmasg[1].start_addr = (unsigned long)(buf + (bytes & ~0xFFFF));
		dmasg[1].cfg = NDSIZE_0 | dma_config | FLOW_STOP | DI_EN;
		dmasg[1].x_count = bytes & 0xFFFF; /* count % 2^16 */
		dmasg[1].x_modify = 1;
	}

	dma->cfg = 0;
	dma->irq_status = DMA_DONE | DMA_ERR;
	dma->curr_desc_ptr = dmasg;
	write_SPI_CTL(bss, (bss->ctl & ~TDBR_CORE));
	write_SPI_STAT(bss, -1);
	SSYNC();

	write_SPI_TDBR(bss, CONFIG_BFIN_SPI_IDLE_VAL);
	dma->cfg = ndsize | FLOW_ARRAY | DMAEN;
	write_SPI_CTL(bss, (bss->ctl & ~TDBR_CORE) | spi_config);
	SSYNC();

	/*
	 * We already invalidated the first 64k,
	 * now while we just wait invalidate the remaining part.
	 * Its not likely that the DMA is going to overtake
	 */
	if (bytes > 65536)
		blackfin_dcache_flush_invalidate_range(buf + 65536, buf + bytes);

	while (!(dma->irq_status & DMA_DONE))
		if (ctrlc())
			goto done;

	dma->cfg = 0;

	ret = 0;
 done:
	write_SPI_CTL(bss, bss->ctl);
	return ret;
}