Пример #1
0
static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;

	davinci_pcm_period_reset(substream);
	if (prtd->ram_channel >= 0) {
		int ret = ping_pong_dma_setup(substream);
		if (ret < 0)
			return ret;

		edma_write_slot(prtd->ram_channel, &prtd->ram_params);
		edma_write_slot(prtd->asp_channel, &prtd->asp_params);

		print_buf_info(prtd->ram_channel, "ram_channel");
		print_buf_info(prtd->ram_link, "ram_link");
		print_buf_info(prtd->ram_link2, "ram_link2");
		print_buf_info(prtd->asp_channel, "asp_channel");
		print_buf_info(prtd->asp_link[0], "asp_link[0]");
		print_buf_info(prtd->asp_link[1], "asp_link[1]");

		/*
		 * There is a phase offset of 2 periods between the position
		 * used by dma setup and the position reported in the pointer
		 * function.
		 *
		 * The phase offset, when not using ping-pong buffers, is due to
		 * the two consecutive calls to davinci_pcm_enqueue_dma() below.
		 *
		 * Whereas here, with ping-pong buffers, the phase is due to
		 * there being an entire buffer transfer complete before the
		 * first dma completion event triggers davinci_pcm_dma_irq().
		 */
		davinci_pcm_period_elapsed(substream);
		davinci_pcm_period_elapsed(substream);

		return 0;
	}
	davinci_pcm_enqueue_dma(substream);
	davinci_pcm_period_elapsed(substream);

	/* Copy self-linked parameter RAM entry into master channel */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	edma_write_slot(prtd->asp_channel, &prtd->asp_params);
	davinci_pcm_enqueue_dma(substream);
	davinci_pcm_period_elapsed(substream);

	return 0;
}
Пример #2
0
static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
{
	struct snd_dma_buffer *iram_dma;
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	struct davinci_pcm_dma_params *params = prtd->params;
	int ret;

	if (!params)
		return -ENODEV;

	/* Request asp master DMA channel */
	ret = prtd->asp_channel = edma_alloc_channel(params->channel,
			davinci_pcm_dma_irq, substream,
			prtd->params->asp_chan_q);
	if (ret < 0)
		goto exit1;

	/* Request asp link channels */
	ret = prtd->asp_link[0] = edma_alloc_slot(
			EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit2;

	iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data;
	if (iram_dma) {
		if (request_ping_pong(substream, prtd, iram_dma) == 0)
			return 0;
		printk(KERN_WARNING "%s: dma channel allocation failed,"
				"not using sram\n", __func__);
	}

	/* Issue transfer completion IRQ when the channel completes a
	 * transfer, then always reload from the same slot (by a kind
	 * of loopback link).  The completion IRQ handler will update
	 * the reload slot with a new buffer.
	 *
	 * REVISIT save p_ram here after setting up everything except
	 * the buffer and its length (ccnt) ... use it as a template
	 * so davinci_pcm_enqueue_dma() takes less time in IRQ.
	 */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	prtd->asp_params.opt |= TCINTEN |
		EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel));
	prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5;
	edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
	return 0;
exit2:
	edma_free_channel(prtd->asp_channel);
	prtd->asp_channel = -1;
exit1:
	return ret;
}
Пример #3
0
static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;

	if (prtd->ram_channel >= 0) {
		int ret = ping_pong_dma_setup(substream);
		if (ret < 0)
			return ret;

		edma_write_slot(prtd->ram_channel, &prtd->ram_params);
		edma_write_slot(prtd->asp_channel, &prtd->asp_params);

		print_buf_info(prtd->ram_channel, "ram_channel");
		print_buf_info(prtd->ram_link, "ram_link");
		print_buf_info(prtd->ram_link2, "ram_link2");
		print_buf_info(prtd->asp_channel, "asp_channel");
		print_buf_info(prtd->asp_link[0], "asp_link[0]");
		print_buf_info(prtd->asp_link[1], "asp_link[1]");

		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
			/* copy 1st iram buffer */
			edma_start(prtd->ram_channel);
		}
		edma_start(prtd->asp_channel);
		return 0;
	}
	prtd->period = 0;
	davinci_pcm_enqueue_dma(substream);

	/* Copy self-linked parameter RAM entry into master channel */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	edma_write_slot(prtd->asp_channel, &prtd->asp_params);
	davinci_pcm_enqueue_dma(substream);
	edma_start(prtd->asp_channel);

	return 0;
}
Пример #4
0
static int edma_memtomemcpy(int count, unsigned long src_addr, unsigned long trgt_addr, int dma_ch)
{
	int result = 0;
	struct edmacc_param param_set;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC); //one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		printk("%s: edma copy failed \n", DEVICE_NAME);
	}

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */
	if (irqraised1 < 0) {
		printk("%s: edma copy: Event Miss Occured!!!\n", DEVICE_NAME);
		edma_stop(dma_ch);
		result = -EAGAIN;
	}

	return result;
}
Пример #5
0
static int trigger_dma_transfer_to_buf(struct cssp_cam_dev *dev, struct vb2_buffer *vb)
{
	dma_addr_t dma_buf = vb2_dma_contig_plane_dma_addr(vb, 0);

	if (!dma_buf) {
		/* Is this possible? Release the vb2_buffer with an error here, */
		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
		dev->current_vb = NULL;
		return -ENOMEM;
	}

	dev->dma_tr_params.dst = dma_buf;

	// Enable DMA
	edma_write_slot(dev->dma_ch, &dev->dma_tr_params);

	dev->current_vb = vb;

	// Enable data capture
	dev->mode |= ENABLE;
	writew(dev->mode, dev->reg_base_virt + REG_MODE);

	return 0;
}
Пример #6
0
/* 1 asp tx or rx channel using 2 parameter channels
 * 1 ram to/from iram channel using 1 parameter channel
 *
 * Playback
 * ram copy channel kicks off first,
 * 1st ram copy of entire iram buffer completion kicks off asp channel
 * asp tcc always kicks off ram copy of 1/2 iram buffer
 *
 * Record
 * asp channel starts, tcc kicks off ram copy
 */
static int request_ping_pong(struct snd_pcm_substream *substream,
		struct davinci_runtime_data *prtd,
		struct snd_dma_buffer *iram_dma)
{
	dma_addr_t asp_src_ping;
	dma_addr_t asp_dst_ping;
	int ret;
	struct davinci_pcm_dma_params *params = prtd->params;

	/* Request ram master channel */
	ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY,
				  davinci_pcm_dma_irq, substream,
				  prtd->params->ram_chan_q);
	if (ret < 0)
		goto exit1;

	/* Request ram link channel */
	ret = prtd->ram_link = edma_alloc_slot(
			EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit2;

	ret = prtd->asp_link[1] = edma_alloc_slot(
			EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit3;

	prtd->ram_link2 = -1;
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		ret = prtd->ram_link2 = edma_alloc_slot(
			EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
		if (ret < 0)
			goto exit4;
	}
	/* circle ping-pong buffers */
	edma_link(prtd->asp_link[0], prtd->asp_link[1]);
	edma_link(prtd->asp_link[1], prtd->asp_link[0]);
	/* circle ram buffers */
	edma_link(prtd->ram_link, prtd->ram_link);

	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		asp_src_ping = iram_dma->addr;
		asp_dst_ping = params->dma_addr;	/* fifo */
	} else {
		asp_src_ping = params->dma_addr;	/* fifo */
		asp_dst_ping = iram_dma->addr;
	}
	/* ping */
	edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT);
	edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT);
	edma_set_src_index(prtd->asp_link[0], 0, 0);
	edma_set_dest_index(prtd->asp_link[0], 0, 0);

	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN);
	prtd->asp_params.opt |= TCCHEN |
		EDMA_TCC(prtd->ram_channel & 0x3f);
	edma_write_slot(prtd->asp_link[0], &prtd->asp_params);

	/* pong */
	edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT);
	edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT);
	edma_set_src_index(prtd->asp_link[1], 0, 0);
	edma_set_dest_index(prtd->asp_link[1], 0, 0);

	edma_read_slot(prtd->asp_link[1], &prtd->asp_params);
	prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f));
	/* interrupt after every pong completion */
	prtd->asp_params.opt |= TCINTEN | TCCHEN |
		EDMA_TCC(prtd->ram_channel & 0x3f);
	edma_write_slot(prtd->asp_link[1], &prtd->asp_params);

	/* ram */
	edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
	edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
	pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u,"
		"for asp:%u %u %u\n", __func__,
		prtd->ram_channel, prtd->ram_link, prtd->ram_link2,
		prtd->asp_channel, prtd->asp_link[0],
		prtd->asp_link[1]);
	return 0;
exit4:
	edma_free_channel(prtd->asp_link[1]);
	prtd->asp_link[1] = -1;
exit3:
	edma_free_channel(prtd->ram_link);
	prtd->ram_link = -1;
exit2:
	edma_free_channel(prtd->ram_channel);
	prtd->ram_channel = -1;
exit1:
	return ret;
}
Пример #7
0
/*
 * Only used with ping/pong.
 * This is called after runtime->dma_addr, period_bytes and data_type are valid
 */
static int ping_pong_dma_setup(struct snd_pcm_substream *substream)
{
	unsigned short ram_src_cidx, ram_dst_cidx;
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct davinci_runtime_data *prtd = runtime->private_data;
	struct snd_dma_buffer *iram_dma =
		(struct snd_dma_buffer *)substream->dma_buffer.private_data;
	struct davinci_pcm_dma_params *params = prtd->params;
	unsigned int data_type = params->data_type;
	unsigned int acnt = params->acnt;
	/* divide by 2 for ping/pong */
	unsigned int ping_size = snd_pcm_lib_period_bytes(substream) >> 1;
	unsigned int fifo_level = prtd->params->fifo_level;
	unsigned int count;
	if ((data_type == 0) || (data_type > 4)) {
		printk(KERN_ERR "%s: data_type=%i\n", __func__, data_type);
		return -EINVAL;
	}
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		dma_addr_t asp_src_pong = iram_dma->addr + ping_size;
		ram_src_cidx = ping_size;
		ram_dst_cidx = -ping_size;
		edma_set_src(prtd->asp_link[1], asp_src_pong, INCR, W8BIT);

		edma_set_src_index(prtd->asp_link[0], data_type,
				data_type * fifo_level);
		edma_set_src_index(prtd->asp_link[1], data_type,
				data_type * fifo_level);

		edma_set_src(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
	} else {
		dma_addr_t asp_dst_pong = iram_dma->addr + ping_size;
		ram_src_cidx = -ping_size;
		ram_dst_cidx = ping_size;
		edma_set_dest(prtd->asp_link[1], asp_dst_pong, INCR, W8BIT);

		edma_set_dest_index(prtd->asp_link[0], data_type,
				data_type * fifo_level);
		edma_set_dest_index(prtd->asp_link[1], data_type,
				data_type * fifo_level);

		edma_set_dest(prtd->ram_link, runtime->dma_addr, INCR, W32BIT);
	}

	if (!fifo_level) {
		count = ping_size / data_type;
		edma_set_transfer_params(prtd->asp_link[0], acnt, count,
				1, 0, ASYNC);
		edma_set_transfer_params(prtd->asp_link[1], acnt, count,
				1, 0, ASYNC);
	} else {
		count = ping_size / (data_type * fifo_level);
		edma_set_transfer_params(prtd->asp_link[0], acnt, fifo_level,
				count, fifo_level, ABSYNC);
		edma_set_transfer_params(prtd->asp_link[1], acnt, fifo_level,
				count, fifo_level, ABSYNC);
	}

	edma_set_src_index(prtd->ram_link, ping_size, ram_src_cidx);
	edma_set_dest_index(prtd->ram_link, ping_size, ram_dst_cidx);
	edma_set_transfer_params(prtd->ram_link, ping_size, 2,
			runtime->periods, 2, ASYNC);

	/* init master params */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	edma_read_slot(prtd->ram_link, &prtd->ram_params);
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		struct edmacc_param p_ram;
		/* Copy entire iram buffer before playback started */
		prtd->ram_params.a_b_cnt = (1 << 16) | (ping_size << 1);
		/* 0 dst_bidx */
		prtd->ram_params.src_dst_bidx = (ping_size << 1);
		/* 0 dst_cidx */
		prtd->ram_params.src_dst_cidx = (ping_size << 1);
		prtd->ram_params.ccnt = 1;

		/* Skip 1st period */
		edma_read_slot(prtd->ram_link, &p_ram);
		p_ram.src += (ping_size << 1);
		p_ram.ccnt -= 1;
		edma_write_slot(prtd->ram_link2, &p_ram);
		/*
		 * When 1st started, ram -> iram dma channel will fill the
		 * entire iram.  Then, whenever a ping/pong asp buffer finishes,
		 * 1/2 iram will be filled.
		 */
		prtd->ram_params.link_bcntrld =
			EDMA_CHAN_SLOT(prtd->ram_link2) << 5;
	}
	return 0;
}
Пример #8
0
static int edma_config(void)
{
    // use AB mode, one_dma = 8KB/16bit
    static int acnt = 4096*2;
    static int bcnt = 1;
    static int ccnt = 1;

    int result = 0;
    unsigned int BRCnt = 0;
    int srcbidx = 0;
    int desbidx = 0;
    int srccidx = 0;
    int descidx = 0;
    struct edmacc_param param_set;

    printk("Initializing dma transfer...\n");

    // set dest memory
    fpga_buf  = dma_alloc_coherent (NULL, MAX_DMA_TRANSFER_IN_BYTES, &dmaphysdest, 0);
    if (!fpga_buf) {
        printk ("dma_alloc_coherent failed for physdest\n");
        return -ENOMEM;
    }

    /* Set B count reload as B count. */
    BRCnt = bcnt;

    /* Setting up the SRC/DES Index */
    srcbidx = 0;
    desbidx = acnt;

    /* A Sync Transfer Mode */
    srccidx = 0;
    descidx = acnt;
 
    // gpmc channel
    result = edma_alloc_channel (52, callback1, NULL, 0);
    
    if (result < 0) {
        printk ("edma_alloc_channel failed, error:%d", result);
        return result;
    }
     
    dma_ch = result;
    edma_set_src (dma_ch, (unsigned long)(gpmc_membase), INCR, W16BIT);
    edma_set_dest (dma_ch, (unsigned long)(dmaphysdest), INCR, W16BIT);
    edma_set_src_index (dma_ch, srcbidx, srccidx);   // use fifo, set zero
    edma_set_dest_index (dma_ch, desbidx, descidx);  // A mode

    // A Sync Transfer Mode
    edma_set_transfer_params (dma_ch, acnt, bcnt, ccnt, BRCnt, ASYNC);

    /* Enable the Interrupts on Channel 1 */
    edma_read_slot (dma_ch, &param_set);
    param_set.opt |= (1 << ITCINTEN_SHIFT);
    param_set.opt |= (1 << TCINTEN_SHIFT);
    param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
    edma_write_slot (dma_ch, &param_set);

    return 0;
}
Пример #9
0
int logi_dma_copy(struct drvr_mem* mem_dev, unsigned long trgt_addr,
		  unsigned long src_addr, int count)
{
	int result = 0;

#ifdef USE_DMA_ENGINE
	struct dma_chan *chan;
	struct dma_device *dev;
	struct dma_async_tx_descriptor *tx;
	unsigned long flags;

	chan = mem_dev->dma.chan;
	dev = chan->device;
	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	tx = dev->device_prep_dma_memcpy(chan, trgt_addr, src_addr, count, flags);

	if (!tx) {
		DBG_LOG("device_prep_dma_memcpy failed\n");
		return -ENODEV;
	}

	irqraised1 = 0u;
	dma_comp.done = 0;
	/* set the callback and submit the transaction */
	tx->callback = dma_callback;
	tx->callback_param = mem_dev;
	cookie = dmaengine_submit(tx);
	dma_async_issue_pending(chan);
#else
	struct edmacc_param param_set;
	int dma_ch = mem_dev->dma.dma_chan;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC);//one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		DBG_LOG("edma copy failed\n");
		return result;
	}

#endif /* USE_DMA_ENGINE */

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */

	if (irqraised1 < 0) {
		DBG_LOG("edma copy: Event Miss Occured!!!\n");
#ifdef USE_DMA_ENGINE
		dmaengine_terminate_all(chan);
#else
		edma_stop(dma_ch);
#endif /* USE_DMA_ENGINE */
		result = -EAGAIN;
	}

	return result;
}