static void sun7i_pcm_enqueue(struct snd_pcm_substream *substream)
{
	int play_ret = 0, capture_ret = 0;
	struct sun7i_playback_runtime_data *play_prtd = NULL;
	struct sun7i_capture_runtime_data *capture_prtd = NULL;
	dma_addr_t play_pos = 0, capture_pos = 0;
	unsigned long play_len = 0, capture_len = 0;
	unsigned int play_limit = 0, capture_limit = 0;
	
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		play_prtd = substream->runtime->private_data;
		play_pos = play_prtd->dma_pos;
		play_len = play_prtd->dma_period;
		play_limit = play_prtd->dma_limit;
		while (play_prtd->dma_loaded < play_limit) {
			if ((play_pos + play_len) > play_prtd->dma_end) {
				play_len  = play_prtd->dma_end - play_pos;
			}
			
			play_ret = sw_dma_enqueue(play_prtd->dma_hdl, play_pos, play_prtd->params->dma_addr, play_len);
			if (play_ret == 0) {
				play_prtd->dma_loaded++;
				play_pos += play_prtd->dma_period;
				if(play_pos >= play_prtd->dma_end)
					play_pos = play_prtd->dma_start;
			} else {
				break;
			}
		}
		play_prtd->dma_pos = play_pos;
	} else {
		/*pr_info("CAPTUR:sun7i_i2sdma.c::func:%s(line:%d)\n",__func__,__LINE__);*/
		capture_prtd = substream->runtime->private_data;
		capture_pos = capture_prtd->dma_pos;
		capture_len = capture_prtd->dma_period;
		capture_limit = capture_prtd->dma_limit;
		while (capture_prtd->dma_loaded < capture_limit) {
			if ((capture_pos + capture_len) > capture_prtd->dma_end) {
				capture_len  = capture_prtd->dma_end - capture_pos;
			}
			
			capture_ret = sw_dma_enqueue(capture_prtd->dma_hdl, capture_prtd->params->dma_addr, capture_pos, capture_len);
			if (capture_ret == 0) {
			capture_prtd->dma_loaded++;
			capture_pos += capture_prtd->dma_period;
			if (capture_pos >= capture_prtd->dma_end)
				capture_pos = capture_prtd->dma_start;
			} else {
				break;
			}
		}
		capture_prtd->dma_pos = capture_pos;
}
}
Exemplo n.º 2
0
static void sun4i_pcm_enqueue(struct snd_pcm_substream *substream)
{
	struct sun4i_runtime_data *prtd = substream->runtime->private_data;
	dma_addr_t pos = prtd->dma_pos;
	unsigned int limit;
	int ret;
	
	unsigned long len = prtd->dma_period;
  	limit = prtd->dma_limit;
  	while(prtd->dma_loaded < limit) {
		if((pos + len) > prtd->dma_end) {
			len  = prtd->dma_end - pos;
		}
	
		ret = sw_dma_enqueue(prtd->params->channel, substream, __bus_to_virt(pos),  len);
		if(ret == 0) {
			prtd->dma_loaded++;
			pos += prtd->dma_period;
			if(pos >= prtd->dma_end)
				pos = prtd->dma_start;
		}else {
			break;
		}
	  
	}
	prtd->dma_pos = pos;
}
Exemplo n.º 3
0
u32 __cb_hd_many_enq(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt){
			pr_info("%s, line %d, ucur_cnt %d\n", __func__, __LINE__, ucur_cnt);
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_HD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else
			pr_info("%s, line %d\n", __func__, __LINE__); /* do nothing */
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 4
0
__s32 emactx_DMAEqueueBuf(int hDma,  void * buff_addr, __u32 len)
{
	eLIBs_CleanFlushDCacheRegion(buff_addr, len);

	emactx_dma_completed_flag = 0;
	return sw_dma_enqueue(hDma, (void*)(seq_tx++), (dma_addr_t)buff_addr, len);
}
Exemplo n.º 5
0
/**
 * __cb_qd_chain - queue done callback for case DTC_CHAIN_MODE
 * @dma_hdl:	dma handle
 * @parg:	args registerd with cb function
 * @cause:	case for this cb, DMA_CB_OK means data transfer OK,
 * 		DMA_CB_ABORT means stopped before transfer complete
 *
 * Returns 0 if sucess, the err line number if failed.
 */
u32 __cb_qd_chain(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		pr_info("%s: DMA_CB_OK!\n", __func__);
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			printk("%s, line %d\n", __func__, __LINE__);
			/* NOTE: fatal err, when read here, g_acur_cnt has changed by other place, 2012-12-2 */
			//ucur_saddr = g_src_addr + atomic_read(&g_acur_cnt) * DTC_ONE_LEN;
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
#if 0
		/*
		 * we have complete enqueueing, but not means it's the last qd irq,
		 * in test, we found sometimes never meet if(ucur_cnt == uloop_cnt...
		 * that is, enqueue complete during hd/fd callback.
		 */
		} else if(ucur_cnt == uloop_cnt){
			printk("%s, line %d\n", __func__, __LINE__);
			sw_dma_dump_chan(dma_hdl); /* for debug */

			/* maybe it's the last irq; or next will be the last irq, need think about */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[DTC_CHAIN_MODE]);
#endif
		} else {
			printk("%s, line %d\n", __func__, __LINE__);
			sw_dma_dump_chan(dma_hdl); /* for debug */

			/* maybe it's the last irq */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[DTC_CHAIN_MODE]);
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 6
0
u32 __cb_qd_single_mode(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_1T_TOTAL_LEN / DTC_1T_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		g_qd_cnt++;
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_1T_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_1T_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_1T_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt >= uloop_cnt){
			/* we have complete enqueueing, but not means it's the last qd irq */
			//if(true == sw_dma_sgmd_buflist_empty(dma_hdl)) {
			if(true) {
				/* 这里也不能认为是传完, 测试发现两次到这里,原因, __dtc_single_mode中enqueue
				之前加了cnt, 但enqueue被irq打断, 一直挂着, 只等irq的enqueue和transfer结束, 此时
				当然buflist_empty, 这时__dtc_single_mode才有机会继续未完成的唯一enqueue, 导致两次
				到这里.	因此本demo, 这里不能认为数据完全传完, 但对于其他场景, 一般可认为qd中list空了就结束了.
				*/
				/* maybe it's the last irq */
				atomic_set(&g_adma_done, 1);
				wake_up_interruptible(&g_dtc_queue[DTC_SINGLE_MODE]);
			}
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 7
0
u32 __cb_qd_single_mode(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		g_qd_cnt++;
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt >= uloop_cnt){
			/* we have complete enqueueing, but not means it's the last qd irq */
			//if(true == sw_dma_sgmd_buflist_empty(dma_hdl)) {
			if(true) {
				/* ����Ҳ������Ϊ�Ǵ���, ���Է������ε�����,ԭ��, __dtc_single_mode��enqueue
				֮ǰ����cnt, ��enqueue��irq���, һֱ����, ֻ��irq��enqueue��transfer����, ��ʱ
				��Ȼbuflist_empty, ��ʱ__dtc_single_mode���л������δ��ɵ�Ψһenqueue, ��������
				������.	��˱�demo, ���ﲻ����Ϊ������ȫ����, ��������������, һ�����Ϊqd��list���˾ͽ�����.
				*/
				/* maybe it's the last irq */
				atomic_set(&g_adma_done, 1);
				wake_up_interruptible(&g_dtc_queue[DTC_SINGLE_MODE]);
			}
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 8
0
u32 __cb_qd_stopcmd(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	switch(cause) {
	case DMA_CB_OK:
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt == uloop_cnt){
			/*
			 * we have complete enqueueing, but not means it's the last qd irq,
			 * in test, we found sometimes never meet if(ucur_cnt == uloop_cnt...
			 * that is, enqueue complete during hd/fd callback.
			 */
			/* maybe it's the last irq; or next will be the last irq, need think about */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[0]);
		} else {
			/* maybe it's the last irq */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[0]);
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 9
0
/**
 * __dtc_many_enq - dma test case for DTC_1TM2M_MANY_ENQ
 *
 * Returns 0 if success, the err line number if failed.
 */
u32 __dtc_many_enq(void)
{
	u32 	uret = 0;
	void 	*src_vaddr = NULL, *dst_vaddr = NULL;
	u32 	usrc_paddr = 0, udst_paddr = 0;
	dm_hdl_t dma_hdl = (dm_hdl_t)NULL;
	struct dma_cb_t done_cb;
	struct dma_op_cb_t op_cb;
	struct dma_config_t dma_config;

	pr_info("%s enter\n", __func__);

	src_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&usrc_paddr, GFP_KERNEL);
	if(NULL == src_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: src_vaddr 0x%08x, usrc_paddr 0x%08x\n", __func__, (u32)src_vaddr, usrc_paddr);
	dst_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&udst_paddr, GFP_KERNEL);
	if(NULL == dst_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: dst_vaddr 0x%08x, udst_paddr 0x%08x\n", __func__, (u32)dst_vaddr, udst_paddr);

	get_random_bytes(src_vaddr, DTC_TOTAL_LEN);
	memset(dst_vaddr, 0x54, DTC_TOTAL_LEN);

	atomic_set(&g_acur_cnt, 0);
	g_src_addr = usrc_paddr;
	g_dst_addr = udst_paddr;

	dma_hdl = sw_dma_request("m2m_dma", DMA_WORK_MODE_CHAIN);
	if(NULL == dma_hdl) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_request success, dma_hdl 0x%08x\n", __func__, (u32)dma_hdl);

	/* set callback */
	memset(&done_cb, 0, sizeof(done_cb));
	memset(&op_cb, 0, sizeof(op_cb));
	done_cb.func = __cb_qd_many_enq;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_QD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set queuedone_cb success\n", __func__);
	done_cb.func = __cb_fd_many_enq;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_FD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set fulldone_cb success\n", __func__);
	done_cb.func = __cb_hd_many_enq;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_HD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set halfdone_cb success\n", __func__);
	op_cb.func = __cb_op_many_enq;
	op_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_OP_CB, (void *)&op_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set op_cb success\n", __func__);

	memset(&dma_config, 0, sizeof(dma_config));
	dma_config.xfer_type = DMAXFER_D_BWORD_S_BWORD;
	dma_config.address_type = DMAADDRT_D_LN_S_LN;
	dma_config.para = 0;
	dma_config.irq_spt = CHAN_IRQ_HD | CHAN_IRQ_FD | CHAN_IRQ_QD;
	dma_config.src_addr = usrc_paddr;
	dma_config.dst_addr = udst_paddr;
	dma_config.byte_cnt = DTC_ONE_LEN;
	dma_config.bconti_mode = false;
	dma_config.src_drq_type = DRQSRC_SDRAM;
	dma_config.dst_drq_type = DRQDST_SDRAM;
	if(0 != sw_dma_config(dma_hdl, &dma_config, ENQUE_PHASE_NORMAL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_config success\n", __func__);
	sw_dma_dump_chan(dma_hdl);

	/* start dma */
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_START, NULL)) {
		uret = __LINE__;
		goto end;
	}

	/* normal enqueue and callback enqueue simutanously */
	{
		u32 	ucur_cnt = 0, ucur_saddr = 0, ucur_daddr = 0;
		u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;

		while((ucur_cnt = atomic_add_return(1, &g_acur_cnt)) < uloop_cnt) {
			pr_info("%s, line %d, ucur_cnt %d\n", __func__, __LINE__, ucur_cnt);
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL))
				printk("%s err, line %d\n", __func__, __LINE__);

			/* in order to meet cb/normal enqueue simutanously */
			msleep(0);
		}
	}
	pr_info("%s, line %d\n", __func__, __LINE__);

	__waitdone_many_enq();

	if(0 == memcmp(src_vaddr, dst_vaddr, DTC_TOTAL_LEN))
		pr_info("%s: data check ok!\n", __func__);
	else {
		pr_err("%s: data check err!\n", __func__);
		uret = __LINE__; /* return err */
		goto end;
	}

	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_stop success\n", __func__);
	if(0 != sw_dma_release(dma_hdl)) {
		uret = __LINE__;
		goto end;
	}
	dma_hdl = (dm_hdl_t)NULL;
	pr_info("%s: sw_dma_release success\n", __func__);

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	else
		pr_info("%s, success!\n", __func__);

	if((dm_hdl_t)NULL != dma_hdl) {
		pr_err("%s, stop and release dma handle now!\n", __func__);
		if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
		if(0 != sw_dma_release(dma_hdl))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
	}

	if(NULL != src_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, src_vaddr, usrc_paddr);
	if(NULL != dst_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, dst_vaddr, udst_paddr);

	pr_info("%s end!\n", __func__);
	return uret;
}
Exemplo n.º 10
0
u32 __cb_qd_many_enq(dm_hdl_t dma_hdl, void *parg, enum dma_cb_cause_e cause)
{
	u32 	uret = 0;
	u32	ucur_saddr = 0, ucur_daddr = 0;
	u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
	u32 	ucur_cnt = 0;

	pr_info("%s: called!\n", __func__);
	switch(cause) {
	case DMA_CB_OK:
		pr_info("%s: DMA_CB_OK!\n", __func__);
		/* enqueue if not done */
		ucur_cnt = atomic_add_return(1, &g_acur_cnt);
		if(ucur_cnt < uloop_cnt) {
			pr_info("%s, line %d, ucur_cnt %d\n", __func__, __LINE__, ucur_cnt);
			//ucur_saddr = g_src_addr + atomic_read(&g_acur_cnt) * DTC_ONE_LEN; /* BUG: data check maybe err */
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_QD))
				printk("%s err, line %d\n", __func__, __LINE__);
		} else if(ucur_cnt == uloop_cnt){
			/*
			 * we have complete enqueueing, but not means it's the last qd irq,
			 * in test, we found sometimes never meet if(ucur_cnt == uloop_cnt...
			 * that is, enqueue complete during hd/fd callback.
			 */
			pr_info("%s, line %d\n", __func__, __LINE__);

#if 0			/* NOTE: cannot sigal g_adma_done here, because maybe it's NOT the last qd irq */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[0]);
#endif
		} else {
			/*
			 * NOTE: cannot sigal g_adma_done here, because maybe:
			 * (1) it's the last irq, in this case, we can sigal g_adma_done
			 * (2) it's the last but one irq. maybe ucur_cnt already > uloop_cnt before(eg: in hd/fd cb),
			 * 	so, at this time, it's the last but one irq, after this, __dma_chan_handle_qd will start
			 *	the rest buffer.
			 * in test, we find here(pr_info("%s, line %d\n", __func__, __LINE__) below) will meet 2 times.
			 */
			pr_info("%s, line %d\n", __func__, __LINE__);

#if 0
			sw_dma_dump_chan(dma_hdl); /* for debug */
			/* maybe it's the last irq */
			atomic_set(&g_adma_done, 1);
			wake_up_interruptible(&g_dtc_queue[0]);
#endif
		}
		break;
	case DMA_CB_ABORT:
		pr_info("%s: DMA_CB_ABORT!\n", __func__);
		break;
	default:
		uret = __LINE__;
		goto end;
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	return uret;
}
Exemplo n.º 11
0
u32 __dtc_stopcmd(void)
{
	u32 	uret = 0;
	u32 	i = 0;
	void 	*src_vaddr = NULL, *dst_vaddr = NULL;
	u32 	usrc_paddr = 0, udst_paddr = 0;
	dm_hdl_t dma_hdl = (dm_hdl_t)NULL;
	struct dma_cb_t done_cb;
	struct dma_op_cb_t op_cb;
	struct dma_config_t dma_config;

	pr_info("%s enter\n", __func__);

	src_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&usrc_paddr, GFP_KERNEL);
	if(NULL == src_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: src_vaddr 0x%08x, usrc_paddr 0x%08x\n", __func__, (u32)src_vaddr, usrc_paddr);
	dst_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&udst_paddr, GFP_KERNEL);
	if(NULL == dst_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: dst_vaddr 0x%08x, udst_paddr 0x%08x\n", __func__, (u32)dst_vaddr, udst_paddr);

	get_random_bytes(src_vaddr, DTC_TOTAL_LEN);
	memset(dst_vaddr, 0x54, DTC_TOTAL_LEN);

	atomic_set(&g_acur_cnt, 0);
	g_src_addr = usrc_paddr;
	g_dst_addr = udst_paddr;

	dma_hdl = sw_dma_request("case_stp_dma", DMA_WORK_MODE_CHAIN);
	//dma_hdl = sw_dma_request("case_stp_dma", DMA_WORK_MODE_SINGLE);
	if(NULL == dma_hdl) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_request success, dma_hdl 0x%08x\n", __func__, (u32)dma_hdl);

	/* set callback */
	memset(&done_cb, 0, sizeof(done_cb));
	memset(&op_cb, 0, sizeof(op_cb));
	done_cb.func = __cb_qd_stopcmd;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_QD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set queuedone_cb success\n", __func__);
	done_cb.func = __cb_fd_stopcmd;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_FD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set fulldone_cb success\n", __func__);
	done_cb.func = __cb_hd_stopcmd;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_HD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set halfdone_cb success\n", __func__);
	op_cb.func = __cb_op_stopcmd;
	op_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_OP_CB, (void *)&op_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set op_cb success\n", __func__);

	memset(&dma_config, 0, sizeof(dma_config));
	dma_config.src_drq_type = DRQSRC_SDRAM;
	dma_config.dst_drq_type = DRQDST_SDRAM;
	dma_config.bconti_mode = false; /* must be 0, otherwise irq will come again and again */
	dma_config.xfer_type = DMAXFER_D_BWORD_S_BWORD;
	dma_config.address_type = DMAADDRT_D_LN_S_LN; /* change with dma type */
	dma_config.irq_spt = CHAN_IRQ_HD | CHAN_IRQ_FD | CHAN_IRQ_QD;
	dma_config.src_addr = usrc_paddr;
	dma_config.dst_addr = udst_paddr;
	dma_config.byte_cnt = DTC_ONE_LEN;
	dma_config.para = 0; /* to check here */
	if(0 != sw_dma_config(dma_hdl, &dma_config, ENQUE_PHASE_NORMAL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_config success\n", __func__);

	atomic_set(&g_adma_done, 0);
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_START, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_start success\n", __func__);

	/* callback enqueue and normal enqueue simutanously */
	i = 0;
	while(i++ < 100) {
		u32 	ucur_saddr = 0, ucur_daddr = 0;
		u32	uindex = 0;

		get_random_bytes(&uindex, sizeof(uindex));
		uindex  %= (DTC_TOTAL_LEN / DTC_ONE_LEN);
		ucur_saddr = g_src_addr + uindex * DTC_ONE_LEN;
		ucur_daddr = g_dst_addr + uindex * DTC_ONE_LEN;
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
	}

#if 0
	if(0 != __waitdone_stopcmd()) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: __waitdone_stopcmd sucess\n", __func__);
#endif
	sw_dma_dump_chan(dma_hdl);

	/* stop and release dma channel */
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_stop success\n", __func__);
	if(0 != sw_dma_release(dma_hdl)) {
		uret = __LINE__;
		goto end;
	}
	dma_hdl = (dm_hdl_t)NULL;
	pr_info("%s: sw_dma_release success\n", __func__);

	/* check if data ok */
	if(0 == memcmp(src_vaddr, dst_vaddr, DTC_TOTAL_LEN))
		pr_info("%s: data check ok!\n", __func__);
	else {
		pr_err("%s: data check err!\n", __func__);
		//uret = __LINE__; /* we donnot need data ok, just test stop cmd */
	}

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	else
		pr_info("%s success!\n", __func__);

	if((dm_hdl_t)NULL != dma_hdl) {
		if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
		if(0 != sw_dma_release(dma_hdl))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
	}

	if(NULL != src_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, src_vaddr, usrc_paddr);
	if(NULL != dst_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, dst_vaddr, udst_paddr);

	return uret;
}
Exemplo n.º 12
0
u32 __dtc_case_enq_aftdone(void)
{
	u32 	uret = 0;
	u32 	i = 0;
	void 	*src_vaddr = NULL, *dst_vaddr = NULL;
	u32 	src_paddr = 0, dst_paddr = 0;
	dm_hdl_t dma_hdl = (dm_hdl_t)NULL;
	struct dma_cb_t done_cb;
	struct dma_op_cb_t op_cb;
	struct dma_config_t dma_config;

	pr_info("%s enter\n", __func__);

	/* prepare the buffer and data */
	src_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&src_paddr, GFP_KERNEL);
	if(NULL == src_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: src_vaddr 0x%08x, src_paddr 0x%08x\n", __func__, (u32)src_vaddr, src_paddr);
	dst_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&dst_paddr, GFP_KERNEL);
	if(NULL == dst_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: dst_vaddr 0x%08x, dst_paddr 0x%08x\n", __func__, (u32)dst_vaddr, dst_paddr);

	/* init src buffer */
	get_random_bytes(src_vaddr, DTC_TOTAL_LEN);
	memset(dst_vaddr, 0x54, DTC_TOTAL_LEN);

	/* init loop para */
	atomic_set(&g_acur_cnt, 0);
	g_src_addr = src_paddr;
	g_dst_addr = dst_paddr;

	/* request dma channel */
	dma_hdl = sw_dma_request("m2m_dma", DMA_WORK_MODE_CHAIN);
	if(NULL == dma_hdl) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_request success, dma_hdl 0x%08x\n", __func__, (u32)dma_hdl);

	/* set callback */
	memset(&done_cb, 0, sizeof(done_cb));
	memset(&op_cb, 0, sizeof(op_cb));
	done_cb.func = __cb_qd_case_enq_aftdone;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_QD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set queuedone_cb success\n", __func__);
	done_cb.func = __cb_fd_case_enq_aftdone;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_FD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set fulldone_cb success\n", __func__);
	done_cb.func = __cb_hd_case_enq_aftdone;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_HD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set halfdone_cb success\n", __func__);
	op_cb.func = __cb_op_case_enq_aftdone;
	op_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_OP_CB, (void *)&op_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set op_cb success\n", __func__);

	/* enqueue buffer */
	memset(&dma_config, 0, sizeof(dma_config));
	dma_config.src_drq_type = DRQSRC_SDRAM;
	dma_config.dst_drq_type = DRQDST_SDRAM;
	//dma_config.conti_mode = 1;
	dma_config.bconti_mode 	= false; /* must be 0, otherwise irq will come again and again */
	dma_config.xfer_type 	= DMAXFER_D_BWORD_S_BWORD;
	dma_config.address_type = DMAADDRT_D_LN_S_LN; /* change with dma type */
	dma_config.irq_spt 	= CHAN_IRQ_HD | CHAN_IRQ_FD | CHAN_IRQ_QD;
	dma_config.src_addr 	= src_paddr;
	dma_config.dst_addr 	= dst_paddr;
	dma_config.byte_cnt 	= DTC_ONE_LEN;
	dma_config.para 	= 0;
	if(0 != sw_dma_config(dma_hdl, &dma_config, ENQUE_PHASE_NORMAL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_config success\n", __func__);
	sw_dma_dump_chan(dma_hdl);

	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_START, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_start success\n", __func__);

	if(0 != __waitdone_case_enq_aftdone()) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: __waitdone_case_enq_aftdone sucess\n", __func__);

	/* after done. app and fd_cb enqueue simutanously */
	i = 0;
	while(i++ < 30) {
		u32 ucur_saddr = 0, ucur_daddr = 0;

		pr_info("%s: i %d\n", __func__, i);
		ucur_saddr = g_src_addr + 0 * DTC_ONE_LEN;
		ucur_daddr = g_dst_addr + 0 * DTC_ONE_LEN;
		if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL)) {
			uret = __LINE__;
			goto end;
		}
		msleep(1);
	}
	msleep(2000);

	if(0 == memcmp(src_vaddr, dst_vaddr, DTC_TOTAL_LEN))
		pr_info("%s: data check ok!\n", __func__);
	else {
		pr_err("%s: data check err!\n", __func__);
		uret = __LINE__; /* return err */
		goto end;
	}

	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_stop success\n", __func__);
	if(0 != sw_dma_release(dma_hdl)) {
		uret = __LINE__;
		goto end;
	}
	dma_hdl = (dm_hdl_t)NULL;
	pr_info("%s: sw_dma_release success\n", __func__);

end:
	/* print err line */
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret);
	else
		pr_info("%s success!\n", __func__);

	if((dm_hdl_t)NULL != dma_hdl) {
		if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) {
			pr_err("%s err, line %d!\n", __func__, __LINE__);
		}
		if(0 != sw_dma_release(dma_hdl)) {
			pr_err("%s err, line %d!\n", __func__, __LINE__);
		}
	}

	if(NULL != src_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, src_vaddr, src_paddr);
	if(NULL != dst_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, dst_vaddr, dst_paddr);

	return uret;
}
Exemplo n.º 13
0
/**
 * __dtc_chain_mode - dma test case for chain mode
 *
 * Returns 0 if success, the err line number if failed.
 */
u32 __dtc_chain_mode(void)
{
	u32 	uret = 0;
	void 	*src_vaddr = NULL, *dst_vaddr = NULL;
	u32 	src_paddr = 0, dst_paddr = 0;
	dm_hdl_t dma_hdl = (dm_hdl_t)NULL;
	struct dma_cb_t done_cb;
	struct dma_op_cb_t op_cb;
	struct dma_config_t dma_config;

	pr_info("%s enter\n", __func__);

	/* prepare the buffer and data */
	src_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&src_paddr, GFP_KERNEL);
	if(NULL == src_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: src_vaddr 0x%08x, src_paddr 0x%08x\n", __func__, (u32)src_vaddr, src_paddr);
	dst_vaddr = dma_alloc_coherent(NULL, DTC_TOTAL_LEN, (dma_addr_t *)&dst_paddr, GFP_KERNEL);
	if(NULL == dst_vaddr) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: dst_vaddr 0x%08x, dst_paddr 0x%08x\n", __func__, (u32)dst_vaddr, dst_paddr);

	/* init src buffer */
	get_random_bytes(src_vaddr, DTC_TOTAL_LEN);
	memset(dst_vaddr, 0x54, DTC_TOTAL_LEN);

	/* init loop para */
	atomic_set(&g_acur_cnt, 0);
	g_src_addr = src_paddr;
	g_dst_addr = dst_paddr;

	/* request dma channel */
	dma_hdl = sw_dma_request("m2m_dma", DMA_WORK_MODE_CHAIN);
	if(NULL == dma_hdl) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_request success, dma_hdl 0x%08x\n", __func__, (u32)dma_hdl);

	/* set queue done callback */
	memset(&done_cb, 0, sizeof(done_cb));
	memset(&op_cb, 0, sizeof(op_cb));
	done_cb.func = __cb_qd_chain;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_QD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set queuedone_cb success\n", __func__);
	/* set full done callback */
	done_cb.func = __cb_fd_chain;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_FD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set fulldone_cb success\n", __func__);
	/* set half done callback */
	done_cb.func = __cb_hd_chain;
	done_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_HD_CB, (void *)&done_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set halfdone_cb success\n", __func__);
	/* set operation done callback */
	op_cb.func = __cb_op_chain;
	op_cb.parg = NULL;
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_SET_OP_CB, (void *)&op_cb)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: set op_cb success\n", __func__);

	/* set config para */
	memset(&dma_config, 0, sizeof(dma_config));
	dma_config.xfer_type 	= DMAXFER_D_BWORD_S_BWORD;
	dma_config.address_type = DMAADDRT_D_LN_S_LN;
	dma_config.para 	= 0;
	dma_config.irq_spt 	= CHAN_IRQ_HD | CHAN_IRQ_FD | CHAN_IRQ_QD;
	dma_config.src_addr 	= src_paddr;
	dma_config.dst_addr 	= dst_paddr;
	dma_config.byte_cnt 	= DTC_ONE_LEN;
	//dma_config.conti_mode = 1;
	dma_config.bconti_mode = false;
	dma_config.src_drq_type = DRQSRC_SDRAM;
	dma_config.dst_drq_type = DRQDST_SDRAM;
	/* enqueue buffer */
	if(0 != sw_dma_config(dma_hdl, &dma_config, ENQUE_PHASE_NORMAL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_config success\n", __func__);
	/* dump chain */
	sw_dma_dump_chan(dma_hdl);

	/* start dma */
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_START, NULL)) {
		uret = __LINE__;
		goto end;
	}

	/* enqueue other buffer, with callback enqueue simutanously */
	{
		u32 	ucur_cnt = 0, ucur_saddr = 0, ucur_daddr = 0;
		u32	uloop_cnt = DTC_TOTAL_LEN / DTC_ONE_LEN;
		while((ucur_cnt = atomic_add_return(1, &g_acur_cnt)) < uloop_cnt) {
			ucur_saddr = g_src_addr + ucur_cnt * DTC_ONE_LEN;
			ucur_daddr = g_dst_addr + ucur_cnt * DTC_ONE_LEN;
			if(0 != sw_dma_enqueue(dma_hdl, ucur_saddr, ucur_daddr, DTC_ONE_LEN, ENQUE_PHASE_NORMAL))
				printk("%s err, line %d\n", __func__, __LINE__);
		}
	}
	pr_info("%s, line %d\n", __func__, __LINE__);

	/* wait dma done */
	if(0 != __waitdone_chain()) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: __waitdone_chain sucess\n", __func__);

	/*
	 * NOTE: must sleep here, becase when __waitdone_chain return, buffer enqueue complete, but
	 * data might not transfer complete, 2012-11-14
	 */
	msleep(1000);

	/* check if data ok */
	if(0 == memcmp(src_vaddr, dst_vaddr, DTC_TOTAL_LEN))
		pr_info("%s: data check ok!\n", __func__);
	else {
		pr_err("%s: data check err!\n", __func__);
		uret = __LINE__; /* return err */
		goto end;
	}

	/* stop and release dma channel */
	if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL)) {
		uret = __LINE__;
		goto end;
	}
	pr_info("%s: sw_dma_stop success\n", __func__);
	if(0 != sw_dma_release(dma_hdl)) {
		uret = __LINE__;
		goto end;
	}
	dma_hdl = (dm_hdl_t)NULL;
	pr_info("%s: sw_dma_release success\n", __func__);

end:
	if(0 != uret)
		pr_err("%s err, line %d!\n", __func__, uret); /* print err line */
	else
		pr_info("%s, success!\n", __func__);

	/* stop and free dma channel, if need */
	if((dm_hdl_t)NULL != dma_hdl) {
		pr_err("%s, stop and release dma handle now!\n", __func__);
		if(0 != sw_dma_ctl(dma_hdl, DMA_OP_STOP, NULL))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
		if(0 != sw_dma_release(dma_hdl))
			pr_err("%s err, line %d!\n", __func__, __LINE__);
	}
	pr_err("%s, line %d!\n", __func__, __LINE__);

	/* free dma memory */
	if(NULL != src_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, src_vaddr, src_paddr);
	if(NULL != dst_vaddr)
		dma_free_coherent(NULL, DTC_TOTAL_LEN, dst_vaddr, dst_paddr);

	pr_err("%s, end!\n", __func__);
	return uret;
}