Ejemplo n.º 1
0
static int s3c_dma_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s3c24xx_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	unsigned long totbytes = params_buffer_bytes(params);
	struct s3c_dma_params *dma =
		snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
	int ret = 0;


	pr_debug("Entered %s\n", __func__);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params -HW */
	if (prtd->params == NULL) {
		/* prepare DMA */
		prtd->params = dma;

		pr_debug("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);

		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret < 0) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}

		/* use the circular buffering if we have it available. */
		if (s3c_dma_has_circular())
			s3c2410_dma_setflags(prtd->params->channel,
					     S3C2410_DMAF_CIRCULAR);
	}

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
				    s3c24xx_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_loaded = 0;
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);

	return 0;
}
Ejemplo n.º 2
0
static unsigned s3c_dma_request(enum dma_ch dma_ch,
				struct samsung_dma_req *param,
				struct device *dev, char *ch_name)
{
	struct cb_data *data;

	if (s3c2410_dma_request(dma_ch, param->client, NULL) < 0) {
		s3c2410_dma_free(dma_ch, param->client);
		return 0;
	}

	if (param->cap == DMA_CYCLIC)
		s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);

	data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
	data->ch = dma_ch;
	list_add_tail(&data->node, &dma_list);

	return (unsigned)dma_ch;
}
Ejemplo n.º 3
0
static unsigned s3c_dma_request(enum dma_ch dma_ch,
				 struct samsung_dma_info *info)
{
	struct cb_data *data;

	if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
		s3c2410_dma_free(dma_ch, info->client);
		return 0;
	}

	data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
	data->ch = dma_ch;
	list_add_tail(&data->node, &dma_list);

	s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);

	if (info->cap == DMA_CYCLIC)
		s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);

	s3c2410_dma_config(dma_ch, info->width);

	return (unsigned)dma_ch;
}
Ejemplo n.º 4
0
static int s3c_dma_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s3c24xx_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s3c_dma_params *dma = rtd->dai->cpu_dai->dma_data;
	unsigned long totbytes = params_buffer_bytes(params);
	unsigned int  periods  = params_periods(params);
	int ret = 0;

	pr_debug("Entered %s\n", __func__);

	if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		totbytes *= CONFIG_ANDROID_BUF_NUM;
	}

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params -HW */
	if (prtd->params == NULL) {
		/* prepare DMA */
		prtd->params = dma;

		pr_debug("params %p, client %p, channel %d\n", prtd->params,
				prtd->params->client, prtd->params->channel);

		ret = s3c2410_dma_request(prtd->params->channel,
				prtd->params->client, NULL);

		if (ret < 0) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}

		/* use the circular buffering if we have it available. */
		/*		if (s3c_dma_has_circular())
				s3c2410_dma_setflags(prtd->params->channel,
				S3C2410_DMAF_CIRCULAR);*/ //sayanta commented..need to check fucntionality
	} 

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
			s3c24xx_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_loaded = 0;
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);
	printk("%s: DmaAddr=@%x Total=%lubytes PrdSz=%u #Prds=%u, dmaEnd 0x%x\n",
			__func__, runtime->dma_addr, totbytes, params_period_bytes(params), periods, prtd->dma_end);

	//	pr_debug("Finishing %s ..dma_period=%u..dma_bytes=%lu..dma_limit=%d..\n",__func__,prtd->dma_period,totbytes,prtd->dma_limit);
	return 0;
}
Ejemplo n.º 5
0
int s3c_mem_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	unsigned long *virt_addr;
	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;
	struct s3c_mem_dma_param dma_param;

	switch (cmd) {
		case S3C_MEM_ALLOC:
			mutex_lock(&mem_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_ALLOC:
			mutex_lock(&mem_cacheable_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);

			break;

		case S3C_MEM_SHARE_ALLOC:
			mutex_lock(&mem_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_SHARE_ALLOC:
			mutex_lock(&mem_cacheable_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);

			break;

		case S3C_MEM_FREE:
			mutex_lock(&mem_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed !!\n");
				mutex_unlock(&mem_free_lock);
				return -EINVAL;
			}
			virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);

			kfree(virt_addr);
			param.size = 0;
			DEBUG("do_munmap() succeed !!\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_free_lock);

			break;

		case S3C_MEM_SHARE_FREE:
			mutex_lock(&mem_share_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
				mutex_unlock(&mem_share_free_lock);
				return -EINVAL;
			}

			param.vir_addr = 0;
			DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_share_free_lock);

			break;

#if 0 /* IOCTL for Old PL-080 codes (dma-pl080.c) */
		case S3C_MEM_DMA_COPY:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			//printk("S3C_MEM_DMA_COPY called\n");

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

 //  		    	printk("MEMCPY src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, dma_param.src_addr);
			s3c2410_dma_config(DMACH_3D_M2M, 8);

			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
#if 0 /* Test code with hard coding */
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x10000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x20000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x30000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x40000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x50000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
#endif /* Test code with hard coding */

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			break;

		case S3C_MEM_DMA_SET:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

//   		    	printk("MEMSET src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_SET, dma_param.src_addr); 
			s3c2410_dma_config(DMACH_3D_M2M, 8);
			
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			break;
#endif

		default:
			DEBUG("s3c_mem_ioctl() : default !!\n");
			return -EINVAL;
	}

	return 0;
}
Ejemplo n.º 6
0
static int s5p_pcm_hw_params_nm(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s5p_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s5p_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data;
	unsigned long totbytes = params_buffer_bytes(params);
	int ret=0;

	s3cdbg("Entered %s, params = %p \n", __FUNCTION__, prtd->params);

	/* By Jung */
	if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
        totbytes = params_buffer_bytes(params) * ANDROID_BUF_NUM;
    else
        totbytes = params_buffer_bytes(params);


	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params */
	if (prtd->params == NULL) {
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);

		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	} else if (prtd->params != dma) {

		s3c2410_dma_free(prtd->params->channel, prtd->params->client);

		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);

		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	}

	/* channel needs configuring for mem=>device, increment memory addr,
	 * sync to pclk, half-word transfers to the IIS-FIFO. */
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, 0,
				prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);

	} else {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_HW, 0,
				prtd->params->dma_addr);		

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);
	}

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
				    s5p_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_loaded = 0;
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);
	s3cdbg("DmaAddr=@%x Total=%lubytes PrdSz=%u #Prds=%u\n",
				runtime->dma_addr, totbytes, params_period_bytes(params), runtime->hw.periods_min);

	return 0;
}
Ejemplo n.º 7
0
static int s3c24xx_pcm_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s3c24xx_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s3c24xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data;
	unsigned long totbytes;
	int ret=0;
	
	s3cdbg("Entered %s, params = %p \n", __FUNCTION__, prtd->params);

	if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
		totbytes = params_buffer_bytes(params) * ANDROID_BUF_NUM;
	
	else 
		totbytes = params_buffer_bytes(params);

//	printk("[%d]:ring_buf_num %d\n", substream->stream, ring_buf_num);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params */
	if (prtd->params == NULL) {
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	} else if (prtd->params != dma) {
		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	}

	/* channel needs configuring for mem=>device, increment memory addr,
	 * sync to pclk, half-word transfers to the IIS-FIFO. */
#if !defined (CONFIG_CPU_S3C6400) && !defined (CONFIG_CPU_S3C6410)  && !defined(CONFIG_CPU_S5PC100) && !defined (CONFIG_CPU_S5P6440)
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, S3C2410_DISRCC_INC |
				S3C2410_DISRCC_APB, prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_SYNC_PCLK | 
				S3C2410_DCON_HANDSHAKE);
	} else {
		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_HANDSHAKE | 
				S3C2410_DCON_SYNC_PCLK);

		s3c2410_dma_devconfig(prtd->params->channel,
					S3C2410_DMASRC_HW, 0x3,
					prtd->params->dma_addr);
	}

#else
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM,
				prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size);
	} else {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_HW,
				prtd->params->dma_addr);		

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size);
	}
#endif

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
				    s3c24xx_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);

	s3cdbg("Entered %s, line %d \n", __FUNCTION__, __LINE__);
	return 0;
}
Ejemplo n.º 8
0
static int s3c_g3d_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg)
{

	u32 val;
	DMA_BLOCK_STRUCT dma_block;
	s3c_3d_dma_info dma_info;
	DECLARE_COMPLETION_ONSTACK(complete);

	switch(cmd) {
		case WAIT_FOR_FLUSH:
			
			//if fifo has already been flushed, return;
			val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
			//printk("read pipestate = 0x%x\n",val);
			if((val & arg) ==0)break;

			// enable interrupt
			interrupt_already_recevied = 0;
			__raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK);
			__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);

			//printk("wait for flush (arg=0x%lx)\n",arg);
			

			while(1){
					wait_event_interruptible(waitq, (interrupt_already_recevied>0));
					__raw_writel(0,s3c_g3d_base+FGGB_INTMASK);
					interrupt_already_recevied = 0;
					//if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq);
					val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
					//printk("in while read pipestate = 0x%x\n",val);
					if(val & arg){}
					else{
						 break;
					}
					__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);
			}
			break;

		case GET_CONFIG: 
			copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT));
			break;

		case START_DMA_BLOCK:
			copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT));
			if(dma_block.offset%4!=0)
			{	
				printk("G3D: dma offset is not aligned by word\n");
				return -EINVAL;
			}
			if(dma_block.size%4!=0)
			{
				printk("G3D: dma size is not aligned by word\n");
				return -EINVAL;
			}
			if(dma_block.offset+dma_block.size >g3d_config.dma_buffer_size)
			{
				printk("G3D: offset+size exceeds dam buffer\n");
				return -EINVAL;
			}

			dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset;
			dma_info.len = dma_block.size;
			dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE;

		//	printk(" dma src=0x%x\n",dma_info.src);
		//	printk(" dma len =%u\n",dma_info.len);
		//	printk(" dma dst = 0x%x\n",dma_info.dst);
			
			dma_3d_done = &complete;
			
			if (s3c2410_dma_request(DMACH_3D_IN, &s3c6410_3d_dma_client, NULL)) 
			{
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -EFAULT;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_IN, s3c_g3d_dma_finish);
			s3c2410_dma_devconfig(DMACH_3D_IN, S3C_DMA_MEM2G3D, 1, (u_long) dma_info.src);
			s3c2410_dma_config(DMACH_3D_IN, 4, 4);
			s3c2410_dma_setflags(DMACH_3D_IN, S3C2410_DMAF_AUTOSTART);
			//consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE);
		//	s3c2410_dma_enqueue(DMACH_3D_IN, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len);
			s3c2410_dma_enqueue(DMACH_3D_IN, NULL, (dma_addr_t) dma_info.dst, dma_info.len);

		//	printk("wait for end of dma operation\n");
			wait_for_completion(&complete);
		//	printk("dma operation is performed\n");

			s3c2410_dma_free(DMACH_3D_IN, &s3c6410_3d_dma_client);
		
			
			break;
		
		default:
			return -EINVAL;
	}
	return 0;
}
Ejemplo n.º 9
0
int s3c_audio_attach(struct inode *inode, struct file *file,
			audio_state_t *state)
{
	audio_stream_t *os = state->output_stream;
	audio_stream_t *is = state->input_stream;
	int err, need_tx_dma=0;

	down(&state->sem);

	/* access control */
	err = -ENODEV;
	if ((file->f_mode & FMODE_WRITE) && !os)
		goto out;
	if ((file->f_mode & FMODE_READ) && !is)
		goto out;
	err = -EBUSY;
	if ((file->f_mode & FMODE_WRITE) && state->wr_ref)
		goto out;
	if ((file->f_mode & FMODE_READ) && state->rd_ref)
		goto out;
	err = -EINVAL;
	if ((file->f_mode & FMODE_READ) && state->need_tx_for_rx && !os)
		goto out;

	if(file->f_mode & FMODE_READ)
		file->f_mode = FMODE_READ;

	/* request DMA channels */
	if (file->f_mode & FMODE_WRITE) {
		if(s3c2410_dma_request(os->dma, &s3c_play_dma_client, NULL)) {
			printk(KERN_WARNING  "unable to get DMA channel.\n" );
			err = -EBUSY;
			goto out;
		}

		err = s3c_iis_dma_init(os,0);
	}
	if (file->f_mode & FMODE_READ) {
		if(s3c2410_dma_request(is->dma, &s3c_rec_dma_client, NULL)) {
			printk(KERN_WARNING  "unable to get DMA channel.\n" );
			err = -EBUSY;
			if (need_tx_dma)
				s3c2410_dma_free(is->dma,&s3c_play_dma_client);
			goto out;
		}	

		err = s3c_iis_dma_init(is,1);
	}

	/* now complete initialisation */
	if (!AUDIO_ACTIVE(state)) {
		if (state->hw_init)
			state->hw_init(state->data);
	}

	if ((file->f_mode & FMODE_WRITE)) {
		state->wr_ref = 1;
		audio_reset(os);
		os->fragsize = AUDIO_FRAGSIZE_DEFAULT;
		os->nbfrags = AUDIO_NBFRAGS_DEFAULT;
		os->mapped = 0;
		init_waitqueue_head(&os->wq);
	}
	if (file->f_mode & FMODE_READ) {
		state->rd_ref = 1;
		audio_reset(is);
		is->fragsize = AUDIO_FRAGSIZE_DEFAULT;
		is->nbfrags = AUDIO_NBFRAGS_DEFAULT;
		is->mapped = 0;
		init_waitqueue_head(&is->wq);
	}

	file->private_data = state;
	file->f_op = &s3c_f_ops;

	err = 0;

out:
	up(&state->sem);
	return err;
}
Ejemplo n.º 10
0
static int s3cmci_probe(struct platform_device *pdev, int is2440)
{
	struct mmc_host 	*mmc;
	struct s3cmci_host 	*host;

	int ret;

	mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto probe_out;
	}

	host = mmc_priv(mmc);
	host->mmc 	= mmc;
	host->pdev	= pdev;

	host->pdata = pdev->dev.platform_data;
	if (!host->pdata) {
		pdev->dev.platform_data = &s3cmci_def_pdata;
		host->pdata = &s3cmci_def_pdata;
	}

	spin_lock_init(&host->complete_lock);
	tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
	if (is2440) {
		host->is2440	= 1;
		host->sdiimsk	= S3C2440_SDIIMSK;
		host->sdidata	= S3C2440_SDIDATA;
		host->clk_div	= 1;
	} else {
		host->is2440	= 0;
		host->sdiimsk	= S3C2410_SDIIMSK;
		host->sdidata	= S3C2410_SDIDATA;
		host->clk_div	= 2;
	}

	host->dodma		= 0;
	host->complete_what 	= COMPLETION_NONE;
	host->pio_active 		= XFER_NONE;

	host->dma		= S3CMCI_DMA;

	host->irq_cd 	= s3c2410_gpio_getirq(host->pdata->gpio_detect);

	s3c2410_gpio_cfgpin(host->pdata->gpio_detect, S3C2410_GPIO_IRQ);

	host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!host->mem) {
		dev_err(&pdev->dev, "failed to get io memory region resouce.\n");

		ret = -ENOENT;
		goto probe_free_host;
	}

	host->mem = request_mem_region(host->mem->start,
		RESSIZE(host->mem), pdev->name);

	if (!host->mem) {
		dev_err(&pdev->dev, "failed to request io memory region.\n");
		ret = -ENOENT;
		goto probe_free_host;
	}

	host->base = ioremap(host->mem->start, RESSIZE(host->mem));
	if (host->base == 0) {
		dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
		ret = -EINVAL;
		goto probe_free_mem_region;
	}

	host->irq = platform_get_irq(pdev, 0);
	if (host->irq == 0) {
		dev_err(&pdev->dev, "failed to get interrupt resouce.\n");
		ret = -EINVAL;
		goto probe_iounmap;
	}

	if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
		dev_err(&pdev->dev, "failed to request mci interrupt.\n");
		ret = -ENOENT;
		goto probe_iounmap;
	}

	disable_irq(host->irq);

printk("======>>>> card detect [ %d ]\n", host->pdata->gpio_detect);

	s3c2410_gpio_cfgpin(host->pdata->gpio_detect, S3C2410_GPIO_IRQ);
	set_irq_type(host->irq_cd, IRQT_BOTHEDGE);
//	set_irq_type(host->irq_cd, IRQT_LOW);

	if (request_irq(host->irq_cd, s3cmci_irq_cd, 0, DRIVER_NAME, host)) {
		dev_err(&pdev->dev,
			"failed to request card detect interrupt.\n");

		ret = -ENOENT;
		goto probe_free_irq;
	}

	if (host->pdata->gpio_wprotect)
		s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
				    S3C2410_GPIO_INPUT);

	if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL)) {
		dev_err(&pdev->dev, "unable to get DMA channel.\n");
		ret = -EBUSY;
		goto probe_free_irq_cd;
	}

	host->clk = clk_get(&pdev->dev, "sdi");
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "failed to find clock source.\n");
		ret = PTR_ERR(host->clk);
		host->clk = NULL;
		goto probe_free_host;
	}

	if ((ret = clk_enable(host->clk))) {
		dev_err(&pdev->dev, "failed to enable clock source.\n");
		goto clk_free;
	}

	host->clk_rate = clk_get_rate(host->clk);

	mmc->ops 	= &s3cmci_ops;
	mmc->ocr_avail	= host->pdata->ocr_avail;
	mmc->caps	= MMC_CAP_4_BIT_DATA;
	mmc->f_min 	= host->clk_rate / (host->clk_div * 256);
	mmc->f_max 	= host->clk_rate / host->clk_div;

	mmc->max_blk_count	= 4095;
	mmc->max_blk_size	= 4095;
	mmc->max_req_size	= 4095 * 512;
	mmc->max_seg_size	= mmc->max_req_size;

	mmc->max_phys_segs	= 128;
	mmc->max_hw_segs	= 128;

	dbg(host, dbg_debug, "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
		(host->is2440?"2440":""),
		host->base, host->irq, host->irq_cd, host->dma);

	if ((ret = mmc_add_host(mmc))) {
		dev_err(&pdev->dev, "failed to add mmc host.\n");
		goto free_dmabuf;
	}

	platform_set_drvdata(pdev, mmc);

	dev_info(&pdev->dev,"initialisation done.\n");

	return 0;

 free_dmabuf:
	clk_disable(host->clk);

 clk_free:
	clk_put(host->clk);

 probe_free_irq_cd:
 	free_irq(host->irq_cd, host);

 probe_free_irq:
 	free_irq(host->irq, host);

 probe_iounmap:
	iounmap(host->base);

 probe_free_mem_region:
	release_mem_region(host->mem->start, RESSIZE(host->mem));

 probe_free_host:
	mmc_free_host(mmc);
 probe_out:
	return ret;
}
Ejemplo n.º 11
0
/* s3c_spi_message_start
 *
 * configure the spi controler and transmit start of a message onto the bus
*/
static void s3c_spi_message_start(struct s3c_spi *spi)
{
    struct spi_msg *msg = spi->msg;

    u32 spi_chcfg = 0, spi_slavecfg, spi_inten= 0, spi_packet=0;

//	u8 prescaler = 0;		// 44.435 Mhz
    u8 prescaler = 1;		// 22.2175 Mhz
//	u8 prescaler = 2;		// 14.81 Mhz
//	u8 prescaler = 3;		// 11.10875 Mhz
//	u8 prescaler = 4;		// 8.887Mhz

    u32 spi_clkcfg = 0, spi_modecfg = 0 ;

    /* initialise the spi controller */
    s3c_spi_hw_init(spi);

    /* 1. Set transfer type (CPOL & CPHA set) */
    spi_chcfg = SPI_CH_RISING | SPI_CH_FORMAT_A;

    if (spi->msg->flags & SPI_M_MODE_MASTER) {
        spi_chcfg |= SPI_CH_MASTER;
    } else if(spi->msg->flags & SPI_M_MODE_SLAVE) {
        spi_chcfg |= SPI_CH_SLAVE;
    }

    writel( spi_chcfg , spi->regs + S3C_CH_CFG);

    /* 2. Set clock configuration register */
    spi_clkcfg = SPI_ENCLK_ENABLE;

#if defined CONFIG_SPICLK_PCLK
    spi_clkcfg |= SPI_CLKSEL_PCLK;
#elif defined CONFIG_SPICLK_EPLL
    spi_clkcfg |= SPI_CLKSEL_ECLK;
#elif defined CONFIG_SPICLK_USBCLK
    spi_clkcfg |= SPI_CLKSEL_USBCLK;
#else
#error you must define correct confige file.
#endif
    writel( spi_clkcfg , spi->regs + S3C_CLK_CFG);

    spi_clkcfg = readl( spi->regs + S3C_CLK_CFG);

    /* SPI clockout = clock source / (2 * (prescaler +1)) */
    spi_clkcfg |= prescaler;
    writel( spi_clkcfg , spi->regs + S3C_CLK_CFG);

    /* 3. Set SPI MODE configuration register */
#ifdef CONFIG_WORD_TRANSIZE
    spi_modecfg = SPI_MODE_CH_TSZ_WORD| SPI_MODE_BUS_TSZ_WORD;
#else
    spi_modecfg = SPI_MODE_CH_TSZ_BYTE| SPI_MODE_BUS_TSZ_BYTE;
#endif
    spi_modecfg |= SPI_MODE_TXDMA_OFF| SPI_MODE_SINGLE| SPI_MODE_RXDMA_OFF;

    if (msg->flags & SPI_M_DMA_MODE) {
        spi_modecfg |= SPI_MODE_TXDMA_ON| SPI_MODE_RXDMA_ON;
    }

    if (msg->wbuf)
        spi_modecfg |= ( 0x3f << 5); /* Tx FIFO trigger level in INT mode */
    if (msg->rbuf)
        spi_modecfg |= ( 0x3f << 11); /* Rx FIFO trigger level in INT mode */

    spi_modecfg |= ( 0x3ff << 19);
    writel(spi_modecfg, spi->regs + S3C_MODE_CFG);

    /* 4. Set SPI INT_EN register */

    if (msg->wbuf)
        spi_inten = SPI_INT_TX_FIFORDY_EN|SPI_INT_TX_UNDERRUN_EN|SPI_INT_TX_OVERRUN_EN;
    if (msg->rbuf) {
        spi_inten = SPI_INT_RX_FIFORDY_EN|SPI_INT_RX_UNDERRUN_EN|SPI_INT_RX_OVERRUN_EN|SPI_INT_TRAILING_EN	;
    }
    writel(spi_inten, spi->regs + S3C_SPI_INT_EN);

    writel(0x1f, spi->regs + S3C_PENDING_CLR);

    /* 5. Set Packet Count configuration register */
    spi_packet = SPI_PACKET_CNT_EN;
    spi_packet |= 0xffff;
    writel(spi_packet, spi->regs + S3C_PACKET_CNT);

    /* 6. Set Tx or Rx Channel on */
    spi_chcfg = readl(spi->regs + S3C_CH_CFG);
    spi_chcfg |= SPI_CH_TXCH_OFF | SPI_CH_RXCH_OFF;

    if (msg->wbuf)
        spi_chcfg |= SPI_CH_TXCH_ON;
    if (msg->rbuf)
        spi_chcfg |= SPI_CH_RXCH_ON;

    writel(spi_chcfg, spi->regs + S3C_CH_CFG);

    if (msg->flags & SPI_M_DMA_MODE) {
        spi->dma = S3C_SPI_DMA;

        if (msg->wbuf)
            spi->subchannel = DMACH_SPI0_OUT;
        if (msg->rbuf)
            spi->subchannel = DMACH_SPI0_IN;

        if (s3c2410_dma_request(spi->subchannel, &s3c6400spi_dma_client, NULL)) {
            printk(KERN_WARNING  "unable to get DMA channel.\n" );
        }

        s3c2410_dma_set_buffdone_fn(spi->subchannel, s3c_spi_dma_cb);
        s3c2410_dma_set_opfn(spi->subchannel,  NULL);


        if (msg->wbuf)
            s3c_spi_dma_init(spi, 0);
        if (msg->rbuf)
            s3c_spi_dma_init(spi, 1);

        s3c2410_dma_enqueue(spi->subchannel, (void *) spi, spi->dmabuf_addr, spi->msg->len);
    }

    /* 7. Set nSS low to start Tx or Rx operation */
    spi_slavecfg = readl(spi->regs + S3C_SLAVE_SEL);
    spi_slavecfg &= SPI_SLAVE_SIG_ACT;
    spi_slavecfg |= (0x3f << 4);
    writel(spi_slavecfg, spi->regs + S3C_SLAVE_SEL);

    print_reg(spi);
}
Ejemplo n.º 12
0
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	u32 val;
	DMA_BLOCK_STRUCT dma_block;
	s3c_3d_dma_info dma_info;
	DECLARE_COMPLETION_ONSTACK(complete);

	struct mm_struct *mm = current->mm;
	struct s3c_3d_mem_alloc param;
	struct s3c_3d_pm_status param_pm;

	unsigned int timer;
	
	switch (cmd) {
	case WAIT_FOR_FLUSH:
		//if fifo has already been flushed, return;
		val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
		//printk("read pipestate = 0x%x\n",val);
		if((val & arg) ==0) break;

		// enable interrupt
		interrupt_already_recevied = 0;
		__raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK);
		__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);

		//printk("wait for flush (arg=0x%lx)\n",arg);

		timer = 1000000;

		while(timer) {
			wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ);

			__raw_writel(0,s3c_g3d_base+FGGB_INTMASK);
			interrupt_already_recevied = 0;
			//if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq);
			val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
			//printk("in while read pipestate = 0x%x\n",val);
			if(val & arg){
			} else{
				break;
			}
			__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);
			timer --;
		}
		break;

	case GET_CONFIG:
		if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) {
			printk("G3D: copy_to_user failed to get g3d_config\n");
			return -EFAULT;		
		}
		break;

	case START_DMA_BLOCK:
		if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) {
			printk("G3D: copy_to_user failed to get dma_block\n");
			return -EFAULT;		
		}

		if (dma_block.offset%4!=0) {
			printk("G3D: dma offset is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.size%4!=0) {
			printk("G3D: dma size is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) {
			printk("G3D: offset+size exceeds dam buffer\n");
			return -EINVAL;
		}

		dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset;
		dma_info.len = dma_block.size;
		dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE;

		DEBUG(" dma src=0x%x\n", dma_info.src);
		DEBUG(" dma len =%u\n", dma_info.len);
		DEBUG(" dma dst = 0x%x\n", dma_info.dst);

		dma_3d_done = &complete;

		if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) {
			printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n");
			return -EFAULT;
		}

		s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish);
		s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src);
		s3c2410_dma_config(DMACH_3D_M2M, 4, 4);
		s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART);

		//consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE);
	//	s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len);
		s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len);

	//	printk("wait for end of dma operation\n");
		wait_for_completion(&complete);
	//	printk("dma operation is performed\n");

		s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client);

		break;

	case S3C_3D_MEM_ALLOC:		
		mutex_lock(&mem_alloc_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
       
		flag = MEM_ALLOC;
		
		param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data);

		if (param.size == 0){
			printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}			
             
		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
		param.phy_addr = physical_address;

       // printk("alloc %d\n", param.size);
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;		
		}

		flag = 0;
		
//		printk("\n\n====Success the malloc from kernel=====\n");
		mutex_unlock(&mem_alloc_lock);
		
		break;

	case S3C_3D_MEM_FREE:	
		mutex_lock(&mem_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		/*
		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}
		*/

		s3c_g3d_release_chunk(param.phy_addr, param.size);
		//printk("KERNEL : virt_addr = 0x%X\n", virt_addr);
		//printk("free %d\n", param.size);


		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}
		
		mutex_unlock(&mem_free_lock);
		
		break;

	case S3C_3D_SFR_LOCK:
		mutex_lock(&mem_sfr_lock);
		mutex_lock_processID = (unsigned int)file->private_data;
		DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n");
		break;

	case S3C_3D_SFR_UNLOCK:
		mutex_lock_processID = 0;
		mutex_unlock(&mem_sfr_lock);
		DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n");
		break;

	case S3C_3D_MEM_ALLOC_SHARE:		
		mutex_lock(&mem_alloc_share_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;

		physical_address = param.phy_addr;

		DEBUG("param.phy_addr = %08x\n", physical_address);

		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}

		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;		
		}

		flag = 0;
		
		mutex_unlock(&mem_alloc_share_lock);
		
		break;

	case S3C_3D_MEM_SHARE_FREE:	
		mutex_lock(&mem_share_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		mutex_unlock(&mem_share_free_lock);
		
		break;

	case S3C_3D_CACHE_INVALID:
		mutex_lock(&cache_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_invalid_lock);
			return -EFAULT;	
		}
		dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_invalid_lock);
		break;

	case S3C_3D_CACHE_CLEAN:
		mutex_lock(&cache_clean_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_clean_lock);
			return -EFAULT;	
		}
		dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_lock);
		break;

	case S3C_3D_CACHE_CLEAN_INVALID:
		mutex_lock(&cache_clean_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&cache_clean_invalid_lock);
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_invalid_lock);
		break;

	case S3C_3D_POWER_INIT:
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		break;

	case S3C_3D_CRITICAL_SECTION:
#ifdef USE_G3D_DOMAIN_GATING
		mutex_lock(&pm_critical_section_lock);
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;	
		}

//		param_pm.memStatus = check_memStatus((unsigned int)file->private_data);

		if(param_pm.criticalSection) g_G3D_CriticalFlag++;
		else g_G3D_CriticalFlag--;

		if(g_G3D_CriticalFlag==0)
		{/*kick power off*/
			/*power off*/
			/*kick timer*/
			mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL);
		}
		else if(g_G3D_CriticalFlag>0)
		{/*kick power on*/
			if(domain_off_check(S3C64XX_DOMAIN_G))
			{/*if powered off*/                        
				if(g_G3D_SelfPowerOFF)
				{/*powered off by 3D PM or by Resume*/
					/*power on*/
					s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D);
					if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) {
						printk("[3D] s3c_wait_blk_pwr_ready err\n");
						mutex_unlock(&pm_critical_section_lock);
						return -EFAULT;	
					}
					clk_g3d_enable();
					/*Need here??*/
					softReset_g3d();
					// printk("[3D] Power on\n");  
				}
				else
				{
					/*powered off by the system :: error*/
					printk("Error on the system :: app tries to work during sleep\n");
					mutex_unlock(&pm_critical_section_lock);
					return -EFAULT;	
				}
			}
			else
			{
				/*already powered on : nothing to do*/
				//g_G3D_SelfPowerOFF=0;
			}
		}
		else if(g_G3D_CriticalFlag < 0) 
		{
			printk("Error on the system :: g_G3D_CriticalFlag < 0\n");
		}
//		printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection);

		if (copy_to_user((void *)arg,&param_pm,sizeof(struct s3c_3d_pm_status)))
		{
			printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n");

			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;		
		}
		mutex_unlock(&pm_critical_section_lock);
#endif /* USE_G3D_DOMAIN_GATING */
		break;

	default:
		DEBUG("s3c_g3d_ioctl() : default !!\n");
		return -EINVAL;
	}
	
	return 0;
}
Ejemplo n.º 13
0
static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
{
	struct s3cmci_host *host;
	struct mmc_host	*mmc;
	int ret;

	mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto probe_out;
	}

	host = mmc_priv(mmc);
	host->mmc 	= mmc;
	host->pdev	= pdev;
	host->is2440	= is2440;

	host->pdata = pdev->dev.platform_data;
	if (!host->pdata) {
		pdev->dev.platform_data = &s3cmci_def_pdata;
		host->pdata = &s3cmci_def_pdata;
	}

	spin_lock_init(&host->complete_lock);
	tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);

	if (is2440) {
		host->sdiimsk	= S3C2440_SDIIMSK;
		host->sdidata	= S3C2440_SDIDATA;
		host->clk_div	= 1;
	} else {
		host->sdiimsk	= S3C2410_SDIIMSK;
		host->sdidata	= S3C2410_SDIDATA;
		host->clk_div	= 2;
	}

	host->dodma		= 0;
	host->complete_what 	= COMPLETION_NONE;
	host->pio_active 	= XFER_NONE;

	host->dma		= S3CMCI_DMA;

	host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!host->mem) {
		dev_err(&pdev->dev,
			"failed to get io memory region resouce.\n");

		ret = -ENOENT;
		goto probe_free_host;
	}

	host->mem = request_mem_region(host->mem->start,
				       RESSIZE(host->mem), pdev->name);

	if (!host->mem) {
		dev_err(&pdev->dev, "failed to request io memory region.\n");
		ret = -ENOENT;
		goto probe_free_host;
	}

	host->base = ioremap(host->mem->start, RESSIZE(host->mem));
	if (host->base == 0) {
		dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
		ret = -EINVAL;
		goto probe_free_mem_region;
	}

	host->irq = platform_get_irq(pdev, 0);
	if (host->irq == 0) {
		dev_err(&pdev->dev, "failed to get interrupt resouce.\n");
		ret = -EINVAL;
		goto probe_iounmap;
	}

	if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
		dev_err(&pdev->dev, "failed to request mci interrupt.\n");
		ret = -ENOENT;
		goto probe_iounmap;
	}

	/* We get spurious interrupts even when we have set the IMSK
	 * register to ignore everything, so use disable_irq() to make
	 * ensure we don't lock the system with un-serviceable requests. */

	disable_irq(host->irq);

	host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);

	if (host->irq_cd >= 0) {
		if (request_irq(host->irq_cd, s3cmci_irq_cd,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				DRIVER_NAME, host)) {
			dev_err(&pdev->dev, "can't get card detect irq.\n");
			ret = -ENOENT;
			goto probe_free_irq;
		}
	} else {
		dev_warn(&pdev->dev, "host detect has no irq available\n");
		s3c2410_gpio_cfgpin(host->pdata->gpio_detect,
				    S3C2410_GPIO_INPUT);
	}

	if (host->pdata->gpio_wprotect)
		s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
				    S3C2410_GPIO_INPUT);

	if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) {
		dev_err(&pdev->dev, "unable to get DMA channel.\n");
		ret = -EBUSY;
		goto probe_free_irq_cd;
	}

	host->clk = clk_get(&pdev->dev, "sdi");
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "failed to find clock source.\n");
		ret = PTR_ERR(host->clk);
		host->clk = NULL;
		goto probe_free_host;
	}

	ret = clk_enable(host->clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable clock source.\n");
		goto clk_free;
	}

	host->clk_rate = clk_get_rate(host->clk);

	mmc->ops 	= &s3cmci_ops;
	mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;
	mmc->caps	= MMC_CAP_4_BIT_DATA;
	mmc->f_min 	= host->clk_rate / (host->clk_div * 256);
	mmc->f_max 	= host->clk_rate / host->clk_div;

	if (host->pdata->ocr_avail)
		mmc->ocr_avail = host->pdata->ocr_avail;

	mmc->max_blk_count	= 4095;
	mmc->max_blk_size	= 4095;
	mmc->max_req_size	= 4095 * 512;
	mmc->max_seg_size	= mmc->max_req_size;

	mmc->max_phys_segs	= 128;
	mmc->max_hw_segs	= 128;

	dbg(host, dbg_debug,
	    "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
	    (host->is2440?"2440":""),
	    host->base, host->irq, host->irq_cd, host->dma);

	ret = mmc_add_host(mmc);
	if (ret) {
		dev_err(&pdev->dev, "failed to add mmc host.\n");
		goto free_dmabuf;
	}

	platform_set_drvdata(pdev, mmc);
	dev_info(&pdev->dev, "initialisation done.\n");

	return 0;

 free_dmabuf:
	clk_disable(host->clk);

 clk_free:
	clk_put(host->clk);

 probe_free_irq_cd:
	if (host->irq_cd >= 0)
		free_irq(host->irq_cd, host);

 probe_free_irq:
	free_irq(host->irq, host);

 probe_iounmap:
	iounmap(host->base);

 probe_free_mem_region:
	release_mem_region(host->mem->start, RESSIZE(host->mem));

 probe_free_host:
	mmc_free_host(mmc);
 probe_out:
	return ret;
}