示例#1
0
/*
 * Write a page to NAND.
 */
static void mxs_nand_ecc_write_page(struct mtd_info *mtd,
				struct nand_chip *nand, const uint8_t *buf)
{
	struct mxs_nand_info *nand_info = nand->priv;
	struct mxs_dma_desc *d;
	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
	int ret;

	memcpy(nand_info->data_buf, buf, mtd->writesize);
	memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);

	/* Handle block mark swapping. */
	mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf);

	/* Compile the DMA descriptor - write data. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
		(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);

	d->cmd.address = 0;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WRITE |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA;
	d->cmd.pio_words[1] = 0;
	d->cmd.pio_words[2] =
		GPMI_ECCCTRL_ENABLE_ECC |
		GPMI_ECCCTRL_ECC_CMD_ENCODE |
		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
	d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;

	flush_buffers(mtd, nand_info);

	mxs_dma_desc_append(channel, d);

	/* Flush caches */
	mxs_nand_flush_data_buf(nand_info);

	/* Execute the DMA chain. */
	ret = mxs_dma_go(channel);
	if (ret) {
		printf("%s: DMA write error\n", __func__);
		goto rtn;
	}

	ret = mxs_nand_wait_for_bch_complete();
	if (ret) {
		printf("%s: BCH write timeout\n", __func__);
		goto rtn;
	}

rtn:
	mxs_nand_return_dma_descs(nand_info);
}
示例#2
0
/*
 * Write data to NAND.
 */
static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
				int length)
{
	struct nand_chip *nand = mtd->priv;
	struct mxs_nand_info *nand_info = nand->priv;
	struct mxs_dma_desc *d;
	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
	int ret;

	if (length > NAND_MAX_PAGESIZE) {
		printf("MXS NAND: DMA buffer too big\n");
		return;
	}

	if (!buf) {
		printf("MXS NAND: DMA buffer is NULL\n");
		return;
	}

	memcpy(nand_info->data_buf, buf, length);

	/* Compile the DMA descriptor - a descriptor that writes data. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
		(4 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
		(length << MXS_DMA_DESC_BYTES_OFFSET);

	d->cmd.address = (dma_addr_t)nand_info->data_buf;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WRITE |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA |
		length;

	mxs_dma_desc_append(channel, d);

	/* Flush caches */
	mxs_nand_flush_data_buf(nand_info);

	/* Execute the DMA chain. */
	ret = mxs_dma_go(channel);
	if (ret)
		printf("MXS NAND: DMA write error\n");

	mxs_nand_return_dma_descs(nand_info);
}
示例#3
0
static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
{
	uint32_t data_count = data->blocksize * data->blocks;
	uint32_t cache_data_count;
	int dmach;
	struct mxs_dma_desc *desc = priv->desc;

	memset(desc, 0, sizeof(struct mxs_dma_desc));
	desc->address = (dma_addr_t)desc;

	if (data_count % ARCH_DMA_MINALIGN)
		cache_data_count = roundup(data_count, ARCH_DMA_MINALIGN);
	else
		cache_data_count = data_count;

	if (data->flags & MMC_DATA_READ) {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE;
		priv->desc->cmd.address = (dma_addr_t)data->dest;
	} else {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ;
		priv->desc->cmd.address = (dma_addr_t)data->src;

		/* Flush data to DRAM so DMA can pick them up */
		flush_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}

	/* Invalidate the area, so no writeback into the RAM races with DMA */
	invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));

	priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM |
				(data_count << MXS_DMA_DESC_BYTES_OFFSET);

	dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + priv->id;
	mxs_dma_desc_append(dmach, priv->desc);
	if (mxs_dma_go(dmach))
		return COMM_ERR;

	/* The data arrived into DRAM, invalidate cache over them */
	if (data->flags & MMC_DATA_READ) {
		invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}

	return 0;
}
示例#4
0
/*
 * Make a circular DMA descriptor list
 */
static int mxs_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct mxs_runtime_data *prtd = runtime->private_data;
	dma_addr_t dma_buffer_phys;
	int periods_num, playback, i;

	playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : 0;
	periods_num = prtd->dma_totsize / prtd->dma_period;

	dma_buffer_phys = runtime->dma_addr;

	/* Reset DMA channel, enable interrupt */
	mxs_dma_reset(prtd->dma_ch);

	/* Set up a DMA chain to sent DMA buffer */
	for (i = 0; i < periods_num; i++) {
		int ret;
		/* Link with previous command */
		prtd->dma_desc_array[i]->cmd.cmd.bits.bytes = prtd->dma_period;
		prtd->dma_desc_array[i]->cmd.cmd.bits.irq = 1;
		prtd->dma_desc_array[i]->cmd.cmd.bits.dec_sem = 0;
		prtd->dma_desc_array[i]->cmd.cmd.bits.chain = 1;
		/* Set DMA direction */
		if (playback)
			prtd->dma_desc_array[i]->cmd.cmd.bits.command = \
				DMA_READ;
		else
			prtd->dma_desc_array[i]->cmd.cmd.bits.command = \
				DMA_WRITE;

		prtd->dma_desc_array[i]->cmd.address = dma_buffer_phys;

		ret = mxs_dma_desc_append(prtd->dma_ch, \
			prtd->dma_desc_array[i]);
		if (ret) {
			printk(KERN_ERR "%s: Failed to append DMA descriptor\n",
		       __func__);
			return ret;
		}
		/* Next data chunk */
		dma_buffer_phys += prtd->dma_period;
	}

	return 0;
}
示例#5
0
int submit_request(struct mxs_hsadc_data* pdx, unsigned long count)
{
	unsigned long sample_count;
	memset(pdx->buf, 0, count);

	pdx->desc->cmd.cmd.bits.bytes = count;
	pdx->desc->cmd.cmd.bits.pio_words = 0;
	pdx->desc->cmd.cmd.bits.wait4end = 1;
	pdx->desc->cmd.cmd.bits.dec_sem = 1;
	pdx->desc->cmd.cmd.bits.irq = 1;
	pdx->desc->cmd.cmd.bits.command = DMA_WRITE;
	pdx->desc->cmd.address = pdx->buf_phy;
	
	if(mxs_dma_desc_append(pdx->dma_ch, pdx->desc))
	{
		return -EINVAL;
	}

	//
	// byte(s) to sample count
	//
	sample_count = 
		adc_sample_percision == 8 ? count : (count >> 1); // 10-bit & 12-bit mode a sample word is two bytes size

	writel(sample_count, pdx->hsadc_base + HSADC_SEQUENCE_SAMPLES_NUM);
	writel(1,     pdx->hsadc_base + HSADC_SEQUENCE_NUM);

	writel(1<<31 | 1<<30 | 1<<29, pdx->hsadc_base + HSADC_CTRL1); // enable irq

	mxs_dma_reset(pdx->dma_ch);
	mxs_dma_ack_irq(pdx->dma_ch);
	mxs_dma_enable_irq(pdx->dma_ch, 1);
	if(mxs_dma_enable(pdx->dma_ch))
	{
		return -EINVAL;
	}

	writel(RUN, pdx->hsadc_base + HSADC_CTRL0_SET);
	writel(SOFT_TRIGGER, pdx->hsadc_base + HSADC_CTRL0_SET);
	
	return 0;
}
示例#6
0
static int mxsmmc_send_cmd_dma(struct mxsmmc_priv *priv, struct mmc_data *data)
{
	uint32_t data_count = data->blocksize * data->blocks;
	int dmach;
	struct mxs_dma_desc *desc = priv->desc;
	void *addr;
	unsigned int flags;
	struct bounce_buffer bbstate;

	memset(desc, 0, sizeof(struct mxs_dma_desc));
	desc->address = (dma_addr_t)desc;

	if (data->flags & MMC_DATA_READ) {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE;
		addr = data->dest;
		flags = GEN_BB_WRITE;
	} else {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ;
		addr = (void *)data->src;
		flags = GEN_BB_READ;
	}

	bounce_buffer_start(&bbstate, addr, data_count, flags);

	priv->desc->cmd.address = (dma_addr_t)bbstate.bounce_buffer;

	priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM |
				(data_count << MXS_DMA_DESC_BYTES_OFFSET);

	dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + priv->id + mxsmmc_id_offset;
	mxs_dma_desc_append(dmach, priv->desc);
	if (mxs_dma_go(dmach)) {
		bounce_buffer_stop(&bbstate);
		return COMM_ERR;
	}

	bounce_buffer_stop(&bbstate);

	return 0;
}
示例#7
0
/*
 * Read a page from NAND.
 */
static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
					uint8_t *buf, int page)
{
	struct mxs_nand_info *nand_info = nand->priv;
	struct mxs_dma_desc *d;
	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
	uint32_t corrected = 0, failed = 0;
	uint8_t	*status;
	int i, ret;

	/* Compile the DMA descriptor - wait for ready. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);

	d->cmd.address = 0;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA;

	mxs_dma_desc_append(channel, d);

	/* Compile the DMA descriptor - enable the BCH block and read. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
		MXS_DMA_DESC_WAIT4END |	(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);

	d->cmd.address = 0;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_READ |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA |
		(mtd->writesize + mtd->oobsize);
	d->cmd.pio_words[1] = 0;
	d->cmd.pio_words[2] =
		GPMI_ECCCTRL_ENABLE_ECC |
		GPMI_ECCCTRL_ECC_CMD_DECODE |
		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
	d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;

	mxs_dma_desc_append(channel, d);

	/* Compile the DMA descriptor - disable the BCH block. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
		(3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);

	d->cmd.address = 0;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA |
		(mtd->writesize + mtd->oobsize);
	d->cmd.pio_words[1] = 0;
	d->cmd.pio_words[2] = 0;

	mxs_dma_desc_append(channel, d);

	/* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_DEC_SEM;

	d->cmd.address = 0;

	mxs_dma_desc_append(channel, d);

	/* Execute the DMA chain. */
	ret = mxs_dma_go(channel);
	if (ret) {
		printf("MXS NAND: DMA read error\n");
		goto rtn;
	}

	ret = mxs_nand_wait_for_bch_complete();
	if (ret) {
		printf("MXS NAND: BCH read timeout\n");
		goto rtn;
	}

	/* Invalidate caches */
	mxs_nand_inval_data_buf(nand_info);

	/* Read DMA completed, now do the mark swapping. */
	mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf);

	/* Loop over status bytes, accumulating ECC status. */
	status = nand_info->oob_buf + mxs_nand_aux_status_offset();
	for (i = 0; i < mxs_nand_ecc_chunk_cnt(mtd->writesize); i++) {
		if (status[i] == 0x00)
			continue;

		if (status[i] == 0xff)
			continue;

		if (status[i] == 0xfe) {
			failed++;
			continue;
		}

		corrected += status[i];
	}

	/* Propagate ECC status to the owning MTD. */
	mtd->ecc_stats.failed += failed;
	mtd->ecc_stats.corrected += corrected;

	/*
	 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
	 * details about our policy for delivering the OOB.
	 *
	 * We fill the caller's buffer with set bits, and then copy the block
	 * mark to the caller's buffer. Note that, if block mark swapping was
	 * necessary, it has already been done, so we can rely on the first
	 * byte of the auxiliary buffer to contain the block mark.
	 */
	memset(nand->oob_poi, 0xff, mtd->oobsize);

	nand->oob_poi[0] = nand_info->oob_buf[0];

	memcpy(buf, nand_info->data_buf, mtd->writesize);

rtn:
	mxs_nand_return_dma_descs(nand_info);

	return ret;
}
示例#8
0
/*
 * Read data from NAND.
 */
static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
{
	struct nand_chip *nand = mtd->priv;
	struct mxs_nand_info *nand_info = nand->priv;
	struct mxs_dma_desc *d;
	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
	int ret;

	if (length > NAND_MAX_PAGESIZE) {
		printf("MXS NAND: DMA buffer too big\n");
		return;
	}

	if (!buf) {
		printf("MXS NAND: DMA buffer is NULL\n");
		return;
	}

	/* Compile the DMA descriptor - a descriptor that reads data. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
		(length << MXS_DMA_DESC_BYTES_OFFSET);

	d->cmd.address = (dma_addr_t)nand_info->data_buf;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_READ |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA |
		length;

	mxs_dma_desc_append(channel, d);

	/*
	 * A DMA descriptor that waits for the command to end and the chip to
	 * become ready.
	 *
	 * I think we actually should *not* be waiting for the chip to become
	 * ready because, after all, we don't care. I think the original code
	 * did that and no one has re-thought it yet.
	 */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
		MXS_DMA_DESC_WAIT4END | (4 << MXS_DMA_DESC_PIO_WORDS_OFFSET);

	d->cmd.address = 0;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_DATA;

	mxs_dma_desc_append(channel, d);

	/* Execute the DMA chain. */
	ret = mxs_dma_go(channel);
	if (ret) {
		printf("MXS NAND: DMA read error\n");
		goto rtn;
	}

	/* Invalidate caches */
	mxs_nand_inval_data_buf(nand_info);

	memcpy(buf, nand_info->data_buf, length);

rtn:
	mxs_nand_return_dma_descs(nand_info);
}
示例#9
0
/*
 * This is the function that we install in the cmd_ctrl function pointer of the
 * owning struct nand_chip. The only functions in the reference implementation
 * that use these functions pointers are cmdfunc and select_chip.
 *
 * In this driver, we implement our own select_chip, so this function will only
 * be called by the reference implementation's cmdfunc. For this reason, we can
 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
 * Flash.
 */
static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
{
	struct nand_chip *nand = mtd->priv;
	struct mxs_nand_info *nand_info = nand->priv;
	struct mxs_dma_desc *d;
	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
	int ret;

	/*
	 * If this condition is true, something is _VERY_ wrong in MTD
	 * subsystem!
	 */
	if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
		printf("MXS NAND: Command queue too long\n");
		return;
	}

	/*
	 * Every operation begins with a command byte and a series of zero or
	 * more address bytes. These are distinguished by either the Address
	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
	 * asserted. When MTD is ready to execute the command, it will
	 * deasert both latch enables.
	 *
	 * Rather than run a separate DMA operation for every single byte, we
	 * queue them up and run a single DMA operation for the entire series
	 * of command and data bytes.
	 */
	if (ctrl & (NAND_ALE | NAND_CLE)) {
		if (data != NAND_CMD_NONE)
			nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
		return;
	}

	/*
	 * If control arrives here, MTD has deasserted both the ALE and CLE,
	 * which means it's ready to run an operation. Check if we have any
	 * bytes to send.
	 */
	if (nand_info->cmd_queue_len == 0)
		return;

	/* Compile the DMA descriptor -- a descriptor that sends command. */
	d = mxs_nand_get_dma_desc(nand_info);
	d->cmd.data =
		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
		MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
		MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
		(nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);

	d->cmd.address = (dma_addr_t)nand_info->cmd_buf;

	d->cmd.pio_words[0] =
		GPMI_CTRL0_COMMAND_MODE_WRITE |
		GPMI_CTRL0_WORD_LENGTH |
		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
		GPMI_CTRL0_ADDRESS_NAND_CLE |
		GPMI_CTRL0_ADDRESS_INCREMENT |
		nand_info->cmd_queue_len;

	mxs_dma_desc_append(channel, d);

	/* Flush caches */
	mxs_nand_flush_cmd_buf(nand_info);

	/* Execute the DMA chain. */
	ret = mxs_dma_go(channel);
	if (ret)
		printf("MXS NAND: Error sending command\n");

	mxs_nand_return_dma_descs(nand_info);

	/* Reset the command queue. */
	nand_info->cmd_queue_len = 0;
}
示例#10
0
/*
 * Sends a command out on the bus.  Takes the mmc pointer,
 * a command pointer, and an optional data pointer.
 */
static int
mxsmmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
{
	struct mxsmmc_priv *priv = (struct mxsmmc_priv *)mmc->priv;
	struct mx28_ssp_regs *ssp_regs = priv->regs;
	uint32_t reg;
	int timeout;
	uint32_t data_count;
	uint32_t ctrl0;
#ifndef CONFIG_MXS_MMC_DMA
	uint32_t *data_ptr;
#else
	uint32_t cache_data_count;
#endif

	debug("MMC%d: CMD%d\n", mmc->block_dev.dev, cmd->cmdidx);

	/* Check bus busy */
	timeout = MXSMMC_MAX_TIMEOUT;
	while (--timeout) {
		udelay(1000);
		reg = readl(&ssp_regs->hw_ssp_status);
		if (!(reg &
			(SSP_STATUS_BUSY | SSP_STATUS_DATA_BUSY |
			SSP_STATUS_CMD_BUSY))) {
			break;
		}
	}

	if (!timeout) {
		printf("MMC%d: Bus busy timeout!\n", mmc->block_dev.dev);
		return TIMEOUT;
	}

	/* See if card is present */
	if (readl(&ssp_regs->hw_ssp_status) & SSP_STATUS_CARD_DETECT) {
		printf("MMC%d: No card detected!\n", mmc->block_dev.dev);
		return NO_CARD_ERR;
	}

	/* Start building CTRL0 contents */
	ctrl0 = priv->buswidth;

	/* Set up command */
	if (!(cmd->resp_type & MMC_RSP_CRC))
		ctrl0 |= SSP_CTRL0_IGNORE_CRC;
	if (cmd->resp_type & MMC_RSP_PRESENT)	/* Need to get response */
		ctrl0 |= SSP_CTRL0_GET_RESP;
	if (cmd->resp_type & MMC_RSP_136)	/* It's a 136 bits response */
		ctrl0 |= SSP_CTRL0_LONG_RESP;

	/* Command index */
	reg = readl(&ssp_regs->hw_ssp_cmd0);
	reg &= ~(SSP_CMD0_CMD_MASK | SSP_CMD0_APPEND_8CYC);
	reg |= cmd->cmdidx << SSP_CMD0_CMD_OFFSET;
	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
		reg |= SSP_CMD0_APPEND_8CYC;
	writel(reg, &ssp_regs->hw_ssp_cmd0);

	/* Command argument */
	writel(cmd->cmdarg, &ssp_regs->hw_ssp_cmd1);

	/* Set up data */
	if (data) {
		/* READ or WRITE */
		if (data->flags & MMC_DATA_READ) {
			ctrl0 |= SSP_CTRL0_READ;
		} else if (priv->mmc_is_wp(mmc->block_dev.dev)) {
			printf("MMC%d: Can not write a locked card!\n",
				mmc->block_dev.dev);
			return UNUSABLE_ERR;
		}

		ctrl0 |= SSP_CTRL0_DATA_XFER;
		reg = ((data->blocks - 1) <<
			SSP_BLOCK_SIZE_BLOCK_COUNT_OFFSET) |
			((ffs(data->blocksize) - 1) <<
			SSP_BLOCK_SIZE_BLOCK_SIZE_OFFSET);
		writel(reg, &ssp_regs->hw_ssp_block_size);

		reg = data->blocksize * data->blocks;
		writel(reg, &ssp_regs->hw_ssp_xfer_size);
	}

	/* Kick off the command */
	ctrl0 |= SSP_CTRL0_WAIT_FOR_IRQ | SSP_CTRL0_ENABLE | SSP_CTRL0_RUN;
	writel(ctrl0, &ssp_regs->hw_ssp_ctrl0);

	/* Wait for the command to complete */
	timeout = MXSMMC_MAX_TIMEOUT;
	while (--timeout) {
		udelay(1000);
		reg = readl(&ssp_regs->hw_ssp_status);
		if (!(reg & SSP_STATUS_CMD_BUSY))
			break;
	}

	if (!timeout) {
		printf("MMC%d: Command %d busy\n",
			mmc->block_dev.dev, cmd->cmdidx);
		return TIMEOUT;
	}

	/* Check command timeout */
	if (reg & SSP_STATUS_RESP_TIMEOUT) {
		printf("MMC%d: Command %d timeout (status 0x%08x)\n",
			mmc->block_dev.dev, cmd->cmdidx, reg);
		return TIMEOUT;
	}

	/* Check command errors */
	if (reg & (SSP_STATUS_RESP_CRC_ERR | SSP_STATUS_RESP_ERR)) {
		printf("MMC%d: Command %d error (status 0x%08x)!\n",
			mmc->block_dev.dev, cmd->cmdidx, reg);
		return COMM_ERR;
	}

	/* Copy response to response buffer */
	if (cmd->resp_type & MMC_RSP_136) {
		cmd->response[3] = readl(&ssp_regs->hw_ssp_sdresp0);
		cmd->response[2] = readl(&ssp_regs->hw_ssp_sdresp1);
		cmd->response[1] = readl(&ssp_regs->hw_ssp_sdresp2);
		cmd->response[0] = readl(&ssp_regs->hw_ssp_sdresp3);
	} else
		cmd->response[0] = readl(&ssp_regs->hw_ssp_sdresp0);

	/* Return if no data to process */
	if (!data)
		return 0;

	data_count = data->blocksize * data->blocks;
	timeout = MXSMMC_MAX_TIMEOUT;

#ifdef CONFIG_MXS_MMC_DMA
	if (data_count % ARCH_DMA_MINALIGN)
		cache_data_count = roundup(data_count, ARCH_DMA_MINALIGN);
	else
		cache_data_count = data_count;

	if (data->flags & MMC_DATA_READ) {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE;
		priv->desc->cmd.address = (dma_addr_t)data->dest;
	} else {
		priv->desc->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ;
		priv->desc->cmd.address = (dma_addr_t)data->src;

		/* Flush data to DRAM so DMA can pick them up */
		flush_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}

	priv->desc->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM |
				(data_count << MXS_DMA_DESC_BYTES_OFFSET);


	mxs_dma_desc_append(MXS_DMA_CHANNEL_AHB_APBH_SSP0, priv->desc);
	if (mxs_dma_go(MXS_DMA_CHANNEL_AHB_APBH_SSP0)) {
		printf("MMC%d: DMA transfer failed\n", mmc->block_dev.dev);
		return COMM_ERR;
	}

	/* The data arrived into DRAM, invalidate cache over them */
	if (data->flags & MMC_DATA_READ) {
		invalidate_dcache_range((uint32_t)priv->desc->cmd.address,
			(uint32_t)(priv->desc->cmd.address + cache_data_count));
	}
#else
	if (data->flags & MMC_DATA_READ) {
		data_ptr = (uint32_t *)data->dest;
		while (data_count && --timeout) {
			reg = readl(&ssp_regs->hw_ssp_status);
			if (!(reg & SSP_STATUS_FIFO_EMPTY)) {
				*data_ptr++ = readl(&ssp_regs->hw_ssp_data);
				data_count -= 4;
				timeout = MXSMMC_MAX_TIMEOUT;
			} else
				udelay(1000);
		}
	} else {
		data_ptr = (uint32_t *)data->src;
		timeout *= 100;
		while (data_count && --timeout) {
			reg = readl(&ssp_regs->hw_ssp_status);
			if (!(reg & SSP_STATUS_FIFO_FULL)) {
				writel(*data_ptr++, &ssp_regs->hw_ssp_data);
				data_count -= 4;
				timeout = MXSMMC_MAX_TIMEOUT;
			} else
				udelay(1000);
		}
	}

	if (!timeout) {
		printf("MMC%d: Data timeout with command %d (status 0x%08x)!\n",
			mmc->block_dev.dev, cmd->cmdidx, reg);
		return COMM_ERR;
	}
#endif

	/* Check data errors */
	reg = readl(&ssp_regs->hw_ssp_status);
	if (reg &
		(SSP_STATUS_TIMEOUT | SSP_STATUS_DATA_CRC_ERR |
		SSP_STATUS_FIFO_OVRFLW | SSP_STATUS_FIFO_UNDRFLW)) {
		printf("MMC%d: Data error with command %d (status 0x%08x)!\n",
			mmc->block_dev.dev, cmd->cmdidx, reg);
		return COMM_ERR;
	}

	return 0;
}