Пример #1
0
static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
{
	struct snd_dma_buffer *iram_dma;
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	struct davinci_pcm_dma_params *params = prtd->params;
	int ret;

	if (!params)
		return -ENODEV;

	/* Request asp master DMA channel */
	ret = prtd->asp_channel = edma_alloc_channel(params->channel,
			davinci_pcm_dma_irq, substream,
			prtd->params->asp_chan_q);
	if (ret < 0)
		goto exit1;

	/* Request asp link channels */
	ret = prtd->asp_link[0] = edma_alloc_slot(
			EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit2;

	iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data;
	if (iram_dma) {
		if (request_ping_pong(substream, prtd, iram_dma) == 0)
			return 0;
		printk(KERN_WARNING "%s: dma channel allocation failed,"
				"not using sram\n", __func__);
	}

	/* Issue transfer completion IRQ when the channel completes a
	 * transfer, then always reload from the same slot (by a kind
	 * of loopback link).  The completion IRQ handler will update
	 * the reload slot with a new buffer.
	 *
	 * REVISIT save p_ram here after setting up everything except
	 * the buffer and its length (ccnt) ... use it as a template
	 * so davinci_pcm_enqueue_dma() takes less time in IRQ.
	 */
	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	prtd->asp_params.opt |= TCINTEN |
		EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel));
	prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5;
	edma_write_slot(prtd->asp_link[0], &prtd->asp_params);
	return 0;
exit2:
	edma_free_channel(prtd->asp_channel);
	prtd->asp_channel = -1;
exit1:
	return ret;
}
Пример #2
0
static int edma_memtomemcpy(int count, unsigned long src_addr, unsigned long trgt_addr, int dma_ch)
{
	int result = 0;
	struct edmacc_param param_set;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC); //one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		printk("%s: edma copy failed \n", DEVICE_NAME);
	}

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */
	if (irqraised1 < 0) {
		printk("%s: edma copy: Event Miss Occured!!!\n", DEVICE_NAME);
		edma_stop(dma_ch);
		result = -EAGAIN;
	}

	return result;
}
Пример #3
0
/* 1 asp tx or rx channel using 2 parameter channels
 * 1 ram to/from iram channel using 1 parameter channel
 *
 * Playback
 * ram copy channel kicks off first,
 * 1st ram copy of entire iram buffer completion kicks off asp channel
 * asp tcc always kicks off ram copy of 1/2 iram buffer
 *
 * Record
 * asp channel starts, tcc kicks off ram copy
 */
static int request_ping_pong(struct snd_pcm_substream *substream,
		struct davinci_runtime_data *prtd,
		struct snd_dma_buffer *iram_dma)
{
	dma_addr_t asp_src_ping;
	dma_addr_t asp_dst_ping;
	int ret;
	struct davinci_pcm_dma_params *params = prtd->params;

	/* Request ram master channel */
	ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY,
				  davinci_pcm_dma_irq, substream,
				  prtd->params->ram_chan_q);
	if (ret < 0)
		goto exit1;

	/* Request ram link channel */
	ret = prtd->ram_link = edma_alloc_slot(
			EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit2;

	ret = prtd->asp_link[1] = edma_alloc_slot(
			EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY);
	if (ret < 0)
		goto exit3;

	prtd->ram_link2 = -1;
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		ret = prtd->ram_link2 = edma_alloc_slot(
			EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY);
		if (ret < 0)
			goto exit4;
	}
	/* circle ping-pong buffers */
	edma_link(prtd->asp_link[0], prtd->asp_link[1]);
	edma_link(prtd->asp_link[1], prtd->asp_link[0]);
	/* circle ram buffers */
	edma_link(prtd->ram_link, prtd->ram_link);

	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		asp_src_ping = iram_dma->addr;
		asp_dst_ping = params->dma_addr;	/* fifo */
	} else {
		asp_src_ping = params->dma_addr;	/* fifo */
		asp_dst_ping = iram_dma->addr;
	}
	/* ping */
	edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT);
	edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT);
	edma_set_src_index(prtd->asp_link[0], 0, 0);
	edma_set_dest_index(prtd->asp_link[0], 0, 0);

	edma_read_slot(prtd->asp_link[0], &prtd->asp_params);
	prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN);
	prtd->asp_params.opt |= TCCHEN |
		EDMA_TCC(prtd->ram_channel & 0x3f);
	edma_write_slot(prtd->asp_link[0], &prtd->asp_params);

	/* pong */
	edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT);
	edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT);
	edma_set_src_index(prtd->asp_link[1], 0, 0);
	edma_set_dest_index(prtd->asp_link[1], 0, 0);

	edma_read_slot(prtd->asp_link[1], &prtd->asp_params);
	prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f));
	/* interrupt after every pong completion */
	prtd->asp_params.opt |= TCINTEN | TCCHEN |
		EDMA_TCC(prtd->ram_channel & 0x3f);
	edma_write_slot(prtd->asp_link[1], &prtd->asp_params);

	/* ram */
	edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
	edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT);
	pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u,"
		"for asp:%u %u %u\n", __func__,
		prtd->ram_channel, prtd->ram_link, prtd->ram_link2,
		prtd->asp_channel, prtd->asp_link[0],
		prtd->asp_link[1]);
	return 0;
exit4:
	edma_free_channel(prtd->asp_link[1]);
	prtd->asp_link[1] = -1;
exit3:
	edma_free_channel(prtd->ram_link);
	prtd->ram_link = -1;
exit2:
	edma_free_channel(prtd->ram_channel);
	prtd->ram_channel = -1;
exit1:
	return ret;
}
Пример #4
0
static int edma_config(void)
{
    // use AB mode, one_dma = 8KB/16bit
    static int acnt = 4096*2;
    static int bcnt = 1;
    static int ccnt = 1;

    int result = 0;
    unsigned int BRCnt = 0;
    int srcbidx = 0;
    int desbidx = 0;
    int srccidx = 0;
    int descidx = 0;
    struct edmacc_param param_set;

    printk("Initializing dma transfer...\n");

    // set dest memory
    fpga_buf  = dma_alloc_coherent (NULL, MAX_DMA_TRANSFER_IN_BYTES, &dmaphysdest, 0);
    if (!fpga_buf) {
        printk ("dma_alloc_coherent failed for physdest\n");
        return -ENOMEM;
    }

    /* Set B count reload as B count. */
    BRCnt = bcnt;

    /* Setting up the SRC/DES Index */
    srcbidx = 0;
    desbidx = acnt;

    /* A Sync Transfer Mode */
    srccidx = 0;
    descidx = acnt;
 
    // gpmc channel
    result = edma_alloc_channel (52, callback1, NULL, 0);
    
    if (result < 0) {
        printk ("edma_alloc_channel failed, error:%d", result);
        return result;
    }
     
    dma_ch = result;
    edma_set_src (dma_ch, (unsigned long)(gpmc_membase), INCR, W16BIT);
    edma_set_dest (dma_ch, (unsigned long)(dmaphysdest), INCR, W16BIT);
    edma_set_src_index (dma_ch, srcbidx, srccidx);   // use fifo, set zero
    edma_set_dest_index (dma_ch, desbidx, descidx);  // A mode

    // A Sync Transfer Mode
    edma_set_transfer_params (dma_ch, acnt, bcnt, ccnt, BRCnt, ASYNC);

    /* Enable the Interrupts on Channel 1 */
    edma_read_slot (dma_ch, &param_set);
    param_set.opt |= (1 << ITCINTEN_SHIFT);
    param_set.opt |= (1 << TCINTEN_SHIFT);
    param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
    edma_write_slot (dma_ch, &param_set);

    return 0;
}
Пример #5
0
int logi_dma_copy(struct drvr_mem* mem_dev, unsigned long trgt_addr,
		  unsigned long src_addr, int count)
{
	int result = 0;

#ifdef USE_DMA_ENGINE
	struct dma_chan *chan;
	struct dma_device *dev;
	struct dma_async_tx_descriptor *tx;
	unsigned long flags;

	chan = mem_dev->dma.chan;
	dev = chan->device;
	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	tx = dev->device_prep_dma_memcpy(chan, trgt_addr, src_addr, count, flags);

	if (!tx) {
		DBG_LOG("device_prep_dma_memcpy failed\n");
		return -ENODEV;
	}

	irqraised1 = 0u;
	dma_comp.done = 0;
	/* set the callback and submit the transaction */
	tx->callback = dma_callback;
	tx->callback_param = mem_dev;
	cookie = dmaengine_submit(tx);
	dma_async_issue_pending(chan);
#else
	struct edmacc_param param_set;
	int dma_ch = mem_dev->dma.dma_chan;

	edma_set_src(dma_ch, src_addr, INCR, W256BIT);
	edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT);
	edma_set_src_index(dma_ch, 1, 1);
	edma_set_dest_index(dma_ch, 1, 1);
	/* A Sync Transfer Mode */
	edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC);//one block of one frame of one array of count bytes

	/* Enable the Interrupts on Channel 1 */
	edma_read_slot(dma_ch, &param_set);
	param_set.opt |= ITCINTEN;
	param_set.opt |= TCINTEN;
	param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch));
	edma_write_slot(dma_ch, &param_set);
	irqraised1 = 0u;
	dma_comp.done = 0;
	result = edma_start(dma_ch);

	if (result != 0) {
		DBG_LOG("edma copy failed\n");
		return result;
	}

#endif /* USE_DMA_ENGINE */

	wait_for_completion(&dma_comp);

	/* Check the status of the completed transfer */

	if (irqraised1 < 0) {
		DBG_LOG("edma copy: Event Miss Occured!!!\n");
#ifdef USE_DMA_ENGINE
		dmaengine_terminate_all(chan);
#else
		edma_stop(dma_ch);
#endif /* USE_DMA_ENGINE */
		result = -EAGAIN;
	}

	return result;
}
Пример #6
0
/* 2 DMA Channels Chained, Mem-2-Mem Copy, A-SYNC Mode, INCR Mode */
int kSt_edma_memtomemcpytest_chain(int acnt, int bcnt, int ccnt, int sync_mode,
				    int event_queue)
{
	int result = 0;
	unsigned int dma_ch1 = 0;
	unsigned int dma_ch2 = 0;
	unsigned int tcc1 = ST_EDMA_TCC_ANY;
	unsigned int tcc2 = ST_EDMA_TCC_ANY;
	int i;
	int count = 0;
	unsigned int Istestpassed1 = 0u;
	unsigned int Istestpassed2 = 0u;
	unsigned int numenabled = 0;
	unsigned int BRCnt = 0;
	int srcbidx = 0;
	int desbidx = 0;
	int srccidx = 0;
	int descidx = 0;
	st_edma_param_set param_set;
	s32 trial =0;

	for (trial = 0; trial <= MAX_TRIALS; trial++)
	{

		/* Initalize source and destination buffers */
		for (count = 0u; count < (acnt * bcnt * ccnt); count++) {
			dmabufsrc1[count] = 'A' + (count % 26);
			dmabufdest1[count] = 0;

			dmabufsrc2[count] = 'A' + (count % 26);
			dmabufdest2[count] = 0;
		}

		/* Set B count reload as B count. */
		BRCnt = bcnt;

		/* Setting up the SRC/DES Index */
		srcbidx = acnt;
		desbidx = acnt;

        	if (sync_mode == ASYNC) {
                	/* A Sync Transfer Mode */
                	srccidx = acnt;
                	descidx = acnt;
               		result = kSt_davinci_request_dma(ST_EDMA_CHANNEL_ANY, "A-SYNC_DMA0",
                        	                     kSt_callback1, NULL,&dma_ch1, &tcc1, event_queue);

        	} else if (sync_mode == ABSYNC) {
                	/* AB Sync Transfer Mode */
                	srccidx = acnt * bcnt;
                	descidx = acnt * bcnt;
                	result = kSt_davinci_request_dma(ST_EDMA_CHANNEL_ANY, "AB-SYNC_DMA0",
                        	                     kSt_callback1, NULL,&dma_ch1, &tcc1, event_queue);
        	} else {
                	TEST_PRINT_ERR(" Invalid Transfer mode \n");
        	}

		if (FAILURE == result) {
			TEST_PRINT_ERR("edma_test_chain::davinci_request_dma failed for dma_ch1, error:%d", result);
			return result;
		}

		kSt_davinci_set_dma_src_params(dma_ch1, (unsigned long)(dmaphyssrc1),
					   INCR, W8BIT);

		kSt_davinci_set_dma_dest_params(dma_ch1, (unsigned long)(dmaphysdest1),
					    INCR, W8BIT);

		kSt_davinci_set_dma_src_index(dma_ch1, srcbidx, srccidx);

		kSt_davinci_set_dma_dest_index(dma_ch1, desbidx, descidx);

	       if (sync_mode == ASYNC) {
        	        /* A Sync Transfer Mode */
                	kSt_davinci_set_dma_transfer_params(dma_ch1, acnt, bcnt, ccnt, BRCnt, ASYNC);
        	} else if (sync_mode == ABSYNC) {
                	/* AB Sync Transfer Mode */
                	kSt_davinci_set_dma_transfer_params(dma_ch1, acnt, bcnt, ccnt, BRCnt, ABSYNC);
        	} else {
                	TEST_PRINT_ERR (" Invalid Transfer mode \n");
        	}


      		if (sync_mode == ASYNC) {
			/* Request Another DMA Channel */
			result = kSt_davinci_request_dma(ST_EDMA_CHANNEL_ANY, "AB-SYNC_DMA0",
						     kSt_callback2, NULL,&dma_ch2, &tcc2, event_queue);
        	} else if (sync_mode == ABSYNC) {
                	/* Request Another DMA Channel */
                	result = kSt_davinci_request_dma(ST_EDMA_CHANNEL_ANY, "AB-SYNC_DMA0",
                       		                      kSt_callback2, NULL,&dma_ch2, &tcc2, event_queue);
        	} else {
                	TEST_PRINT_ERR(" Invalid Transfer mode \n");
        	}

		if (FAILURE == result) {
			TEST_PRINT_ERR("edma_test_chain::davinci_request_dma failed for dma_ch2, error:%d", result);

			kSt_davinci_free_dma(dma_ch1);
			return result;
		}

		kSt_davinci_set_dma_src_params(dma_ch2, (unsigned long)(dmaphyssrc2),
					   INCR, W8BIT);

		kSt_davinci_set_dma_dest_params(dma_ch2, (unsigned long)(dmaphysdest2),
					    INCR, W8BIT);

		kSt_davinci_set_dma_src_index(dma_ch2, srcbidx, srccidx);

		kSt_davinci_set_dma_dest_index(dma_ch2, desbidx, descidx);

       		if (sync_mode == ASYNC) {
                	/* A Sync Transfer Mode */
                	kSt_davinci_set_dma_transfer_params(dma_ch2, acnt, bcnt, ccnt, BRCnt, ASYNC);
        	} else if (sync_mode == ABSYNC) {
                	/* AB Sync Transfer Mode */
                	kSt_davinci_set_dma_transfer_params(dma_ch2, acnt, bcnt, ccnt, BRCnt, ABSYNC);
        	} else {
                	TEST_PRINT_ERR(" Invalid Transfer mode \n");
        	}

		/* Chain both the channels */
		
		/* Chain both the channels */
		kSt_davinci_get_dma_params(dma_ch1, &param_set);
		param_set.opt |= (1 << TCCHEN_SHIFT);
		param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch2));
		kSt_davinci_set_dma_params(dma_ch1, &param_set);
		
		/* Enable the Intermediate and Final Interrupts on Channel 1.
	 	* Also, Enable the Intermediate Chaining.
	 	*/
		kSt_davinci_get_dma_params(dma_ch1, &param_set);
		param_set.opt |= (1 << ITCCHEN_SHIFT);
		param_set.opt |= (1 << TCINTEN_SHIFT);
		param_set.opt |= (1 << ITCINTEN_SHIFT);
		kSt_davinci_set_dma_params(dma_ch1, &param_set);

		/* Enable the Intermediate and Final Interrupts on Channel 2 */
		kSt_davinci_get_dma_params(dma_ch2, &param_set);
		param_set.opt |= (1 << TCINTEN_SHIFT);
		param_set.opt |= (1 << ITCINTEN_SHIFT);
		kSt_davinci_set_dma_params(dma_ch2, &param_set);

        	if (sync_mode == ASYNC) {
                	numenabled = bcnt * ccnt;
        	} else if (sync_mode == ABSYNC) {
                	numenabled = ccnt;
        	} else {
                	TEST_PRINT_ERR (" Invalid Transfer mode \n");
        	}

		for (i = 0; i < numenabled; i++) {
			irqraised2 = 0;

			/* Now enable the transfer for Master channel as many times
		 	* as calculated above.
			 */
			result = kSt_davinci_start_dma(dma_ch1);
			if (result != 0) {
				TEST_PRINT_ERR("edma_test_chain: kSt_davinci_start_dma failed ");

				kSt_davinci_stop_dma(dma_ch1);
				kSt_davinci_free_dma(dma_ch1);
				kSt_davinci_free_dma(dma_ch2);
				return result;
			}

			/* Transfer on the master channel (ch1Id) will finish after some
			* time.
			* Now, because of the enabling of intermediate chaining on channel
			* 1, after the transfer gets over, a sync event will be sent
			* to channel 2, which will trigger the transfer on it.
			* Also, Final and Intermediate Transfer Complete
			* Interrupts are enabled on channel 2, so we should wait for the
			* completion ISR on channel 2 first, before proceeding
			* ahead.
			*/
			while (irqraised2 == 0u) ;

			/* Check the status of the completed transfer */
			if (irqraised2 < 0) {
				/* Some error occured, break from the FOR loop. */
				TEST_PRINT_ERR("edma3_test_with_chaining: "
					   "Event Miss Occured!!!");
				break;
			}

		}

		/* Match the Source and Destination Buffers. */
		for (i = 0; i < (acnt * bcnt * ccnt); i++) {
			if (dmabufsrc1[i] != dmabufdest1[i]) {
				TEST_PRINT_ERR("edma_test_chain(1): "
					"Data write-read matching failed at = %u",i);
				Istestpassed1 = 0u;
				result = -1;
				break;
			}
		}
		if (i == (acnt * bcnt * ccnt)) {
			Istestpassed1 = 1u;
		}

		for (i = 0; i < (acnt * bcnt * ccnt); i++) {
			if (dmabufsrc2[i] != dmabufdest2[i]) {
				TEST_PRINT_ERR("edma_test_chain(2): "
					"Data write-read matching failed at = %u",i);
				Istestpassed2 = 0u;
				result = -1;
				break;
			}
		}
		if (i == (acnt * bcnt * ccnt)) {
			Istestpassed2 = 1u;
		}

		kSt_davinci_stop_dma(dma_ch1);
		kSt_davinci_free_dma(dma_ch1);

		kSt_davinci_stop_dma(dma_ch2);
		kSt_davinci_free_dma(dma_ch2);
	}

	if ((Istestpassed1 == 1u) && (Istestpassed2 == 1u)) {
		DEBUG_PRINT("edma_test_chain: "
			"Transfer controller/event_queue: %d", event_queue);
		DEBUG_PRINT("edma_test_chain: "
			"Mode: %d  0 -> ASYNC, 1 -> ABSYNC", sync_mode);	
		TEST_PRINT_TRC("edma_test_chain: "
	   	 	"EDMA Data Transfer Successfull on TC %d",event_queue);
	} else {
		TEST_PRINT_ERR("edma_test_chain: EDMA Data Transfer Failed");
	}
	return result;
}