static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param) { struct cb_data *data; dma_addr_t pos = param->buf; dma_addr_t end = param->buf + param->len; list_for_each_entry(data, &dma_list, node) if (data->ch == ch) break; if (!data->fp) { s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb); data->fp = param->fp; data->fp_param = param->fp_param; } if (param->cap != DMA_CYCLIC) { s3c2410_dma_enqueue(ch, (void *)data, param->buf, param->len); return 0; } while (pos < end) { s3c2410_dma_enqueue(ch, (void *)data, pos, param->period); pos += param->period; } return 0; }
/* s3c24xx_pcm_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void s3c24xx_pcm_enqueue(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; int ret; DBG("Entered %s\n", __func__); while (prtd->dma_loaded < prtd->dma_limit) { unsigned long len = prtd->dma_period; DBG("dma_loaded: %d\n", prtd->dma_loaded); if ((pos + len) > prtd->dma_end) { len = prtd->dma_end - pos; DBG(KERN_DEBUG "%s: corrected dma len %ld\n", __func__, len); } ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); if (ret == 0) { prtd->dma_loaded++; pos += prtd->dma_period; if (pos >= prtd->dma_end) pos = prtd->dma_start; } else break; } prtd->dma_pos = pos; }
/* s3c24xx_pcm_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void s3c24xx_pcm_enqueue(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; unsigned long len = prtd->dma_period; dma_addr_t pos = prtd->dma_pos; int ret; unsigned long next_len = 0; #if defined (CONFIG_CPU_S3C6400) || defined (CONFIG_CPU_S3C6410) /* Next length prediction */ dma_addr_t pred_pos; #endif s3cdbg("Entered %s\n", __FUNCTION__); if ((pos + len) > prtd->dma_end) { len = prtd->dma_end - pos; s3cdbg(KERN_DEBUG "%s: corrected dma len %ld\n", __FUNCTION__, len); } #if defined (CONFIG_CPU_S3C6400) || defined (CONFIG_CPU_S3C6410) /* DMA with I2S might be unstable when length is too short. */ pred_pos = pos + prtd->dma_period; next_len = prtd->dma_period; if ((pred_pos + next_len) > prtd->dma_end) { next_len = prtd->dma_end - pred_pos; } if (next_len <= 32) { /* next transfer is too short */ len += next_len; /* transfer with next small period */ ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); pos += next_len; } else ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); #else ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); #endif prtd->dma_pos = pos; }
/* s3c_dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void s3c_dma_enqueue(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned long len; unsigned int limit; int ret; pr_debug("Entered %s\n", __func__); if (s3c_dma_has_circular()) limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; else limit = prtd->dma_limit; pr_debug("%s: loaded %d, limit %d\n", __func__, prtd->dma_loaded, limit); if (s3c_dma_has_infiniteloop()) { ret = s3c2410_dma_enqueue_ring(prtd->params->channel, substream, pos, prtd->dma_period, limit); if (ret == 0) { prtd->dma_loaded += limit; pos += prtd->dma_period; } } else { while (prtd->dma_loaded < limit) { len = prtd->dma_period; pr_debug("dma_loaded: %d\n", prtd->dma_loaded); if ((pos + len) > prtd->dma_end) { len = prtd->dma_end - pos; pr_debug("%s: corrected dma len %ld\n", __func__, len); } ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); if (ret == 0) { prtd->dma_loaded++; pos += prtd->dma_period; if (pos >= prtd->dma_end) pos = prtd->dma_start; } else break; } } prtd->dma_pos = pos; }
static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info) { struct cb_data *data; int len = (info->cap == DMA_CYCLIC) ? info->period : info->len; list_for_each_entry(data, &dma_list, node) if (data->ch == ch) break; if (!data->fp) { s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb); data->fp = info->fp; data->fp_param = info->fp_param; } s3c2410_dma_enqueue(ch, (void *)data, info->buf, len); return 0; }
static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) { int dma_len, i; int rw = (data->flags & MMC_DATA_WRITE)?1:0; if (rw != ((data->flags & MMC_DATA_READ)?0:1)) return -EINVAL; s3cmci_dma_setup(host, rw?S3C2410_DMASRC_MEM:S3C2410_DMASRC_HW); s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, (rw)?DMA_TO_DEVICE:DMA_FROM_DEVICE); if (dma_len == 0) return -ENOMEM; host->dma_complete = 0; host->dmatogo = dma_len; for (i = 0; i < dma_len; i++) { int res; dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, sg_dma_address(&data->sg[i]), sg_dma_len(&data->sg[i])); res = s3c2410_dma_enqueue(host->dma, (void *) host, sg_dma_address(&data->sg[i]), sg_dma_len(&data->sg[i])); if (res) { s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); return -EBUSY; } } s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START); return 0; }
/* s3c_dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void s3c_dma_enqueue(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned int limit; int ret; pr_debug("Entered %s\n", __func__); // if (s3c_dma_has_circular()) //sayanta ..need to check functionality // limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; // else limit = prtd->dma_limit; pr_debug("%s: loaded %d, limit %d\n", __func__, prtd->dma_loaded, limit); while (prtd->dma_loaded < limit) { unsigned long len = prtd->dma_period; pr_debug("dma_loaded: %d\n", prtd->dma_loaded); if ((pos + len) > prtd->dma_end) { len = prtd->dma_end - pos; pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n", __func__, len); } pr_debug("enqing at %x, %lu bytes\n", pos, len); ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); if (ret == 0) { prtd->dma_loaded++; pos += prtd->dma_period; if (pos >= prtd->dma_end) pos = prtd->dma_start; } else break; } prtd->dma_pos = pos; }
static void audio_process_dma(audio_stream_t *s) { while (s->pending_frags) { audio_buf_t *b = &s->buffers[s->dma_head]; u_int dma_size = s->fragsize - b->offset; s3c2410_dma_enqueue(s->dma, (void *) s, b->dma_addr + b->offset, dma_size); b->dma_ref++; b->offset += dma_size; if (b->offset >= s->fragsize) { s->pending_frags--; if (++s->dma_head >= s->nbfrags) s->dma_head = 0; } } }
/* s3c_dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void s3c_dma_enqueue(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned long len = prtd->dma_period; int ret; pr_debug("Entered %s\n", __FUNCTION__); if ((pos + len) > prtd->dma_end) { len = prtd->dma_end - pos; pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n", __FUNCTION__, len); } pr_debug("enqing at %x, %d bytes\n", pos, len); ret = s3c2410_dma_enqueue(prtd->params->channel, substream, pos, len); prtd->dma_pos = pos; }
int s3c_mem_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { unsigned long *virt_addr; struct mm_struct *mm = current->mm; struct s3c_mem_alloc param; struct s3c_mem_dma_param dma_param; switch (cmd) { case S3C_MEM_ALLOC: mutex_lock(&mem_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = MEM_ALLOC; param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_alloc_lock); break; case S3C_MEM_CACHEABLE_ALLOC: mutex_lock(&mem_cacheable_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_CACHEABLE; param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); break; case S3C_MEM_SHARE_ALLOC: mutex_lock(&mem_share_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_SHARE_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_share_alloc_lock); break; case S3C_MEM_CACHEABLE_SHARE_ALLOC: mutex_lock(&mem_cacheable_share_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_CACHEABLE_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_SHARE_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); break; case S3C_MEM_FREE: mutex_lock(&mem_free_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed !!\n"); mutex_unlock(&mem_free_lock); return -EINVAL; } virt_addr = (unsigned long *)phys_to_virt(param.phy_addr); kfree(virt_addr); param.size = 0; DEBUG("do_munmap() succeed !!\n"); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } mutex_unlock(&mem_free_lock); break; case S3C_MEM_SHARE_FREE: mutex_lock(&mem_share_free_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed - MEM_SHARE_FREE!!\n"); mutex_unlock(&mem_share_free_lock); return -EINVAL; } param.vir_addr = 0; DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n"); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } mutex_unlock(&mem_share_free_lock); break; #if 0 /* IOCTL for Old PL-080 codes (dma-pl080.c) */ case S3C_MEM_DMA_COPY: if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } //printk("S3C_MEM_DMA_COPY called\n"); if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel.\n"); return -1; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish); //dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL); // printk("MEMCPY src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size); /* Source address */ s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, dma_param.src_addr); s3c2410_dma_config(DMACH_3D_M2M, 8); /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); #if 0 /* Test code with hard coding */ /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x10000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x20000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x30000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x40000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x50000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); #endif /* Test code with hard coding */ s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client); if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } break; case S3C_MEM_DMA_SET: if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel.\n"); return -1; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish); //dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL); // printk("MEMSET src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size); /* Source address */ s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_SET, dma_param.src_addr); s3c2410_dma_config(DMACH_3D_M2M, 8); /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client); if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } break; #endif default: DEBUG("s3c_mem_ioctl() : default !!\n"); return -EINVAL; } return 0; }
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { u32 val; DMA_BLOCK_STRUCT dma_block; s3c_3d_dma_info dma_info; DECLARE_COMPLETION_ONSTACK(complete); switch(cmd) { case WAIT_FOR_FLUSH: //if fifo has already been flushed, return; val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("read pipestate = 0x%x\n",val); if((val & arg) ==0)break; // enable interrupt interrupt_already_recevied = 0; __raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK); __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); //printk("wait for flush (arg=0x%lx)\n",arg); while(1){ wait_event_interruptible(waitq, (interrupt_already_recevied>0)); __raw_writel(0,s3c_g3d_base+FGGB_INTMASK); interrupt_already_recevied = 0; //if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq); val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("in while read pipestate = 0x%x\n",val); if(val & arg){} else{ break; } __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); } break; case GET_CONFIG: copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT)); break; case START_DMA_BLOCK: copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT)); if(dma_block.offset%4!=0) { printk("G3D: dma offset is not aligned by word\n"); return -EINVAL; } if(dma_block.size%4!=0) { printk("G3D: dma size is not aligned by word\n"); return -EINVAL; } if(dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) { printk("G3D: offset+size exceeds dam buffer\n"); return -EINVAL; } dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset; dma_info.len = dma_block.size; dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE; // printk(" dma src=0x%x\n",dma_info.src); // printk(" dma len =%u\n",dma_info.len); // printk(" dma dst = 0x%x\n",dma_info.dst); dma_3d_done = &complete; if (s3c2410_dma_request(DMACH_3D_IN, &s3c6410_3d_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel.\n"); return -EFAULT; } s3c2410_dma_set_buffdone_fn(DMACH_3D_IN, s3c_g3d_dma_finish); s3c2410_dma_devconfig(DMACH_3D_IN, S3C_DMA_MEM2G3D, 1, (u_long) dma_info.src); s3c2410_dma_config(DMACH_3D_IN, 4, 4); s3c2410_dma_setflags(DMACH_3D_IN, S3C2410_DMAF_AUTOSTART); //consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE); // s3c2410_dma_enqueue(DMACH_3D_IN, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len); s3c2410_dma_enqueue(DMACH_3D_IN, NULL, (dma_addr_t) dma_info.dst, dma_info.len); // printk("wait for end of dma operation\n"); wait_for_completion(&complete); // printk("dma operation is performed\n"); s3c2410_dma_free(DMACH_3D_IN, &s3c6410_3d_dma_client); break; default: return -EINVAL; } return 0; }
/* s3c_spi_message_start * * configure the spi controler and transmit start of a message onto the bus */ static void s3c_spi_message_start(struct s3c_spi *spi) { struct spi_msg *msg = spi->msg; u32 spi_chcfg = 0, spi_slavecfg, spi_inten= 0, spi_packet=0; // u8 prescaler = 0; // 44.435 Mhz u8 prescaler = 1; // 22.2175 Mhz // u8 prescaler = 2; // 14.81 Mhz // u8 prescaler = 3; // 11.10875 Mhz // u8 prescaler = 4; // 8.887Mhz u32 spi_clkcfg = 0, spi_modecfg = 0 ; /* initialise the spi controller */ s3c_spi_hw_init(spi); /* 1. Set transfer type (CPOL & CPHA set) */ spi_chcfg = SPI_CH_RISING | SPI_CH_FORMAT_A; if (spi->msg->flags & SPI_M_MODE_MASTER) { spi_chcfg |= SPI_CH_MASTER; } else if(spi->msg->flags & SPI_M_MODE_SLAVE) { spi_chcfg |= SPI_CH_SLAVE; } writel( spi_chcfg , spi->regs + S3C_CH_CFG); /* 2. Set clock configuration register */ spi_clkcfg = SPI_ENCLK_ENABLE; #if defined CONFIG_SPICLK_PCLK spi_clkcfg |= SPI_CLKSEL_PCLK; #elif defined CONFIG_SPICLK_EPLL spi_clkcfg |= SPI_CLKSEL_ECLK; #elif defined CONFIG_SPICLK_USBCLK spi_clkcfg |= SPI_CLKSEL_USBCLK; #else #error you must define correct confige file. #endif writel( spi_clkcfg , spi->regs + S3C_CLK_CFG); spi_clkcfg = readl( spi->regs + S3C_CLK_CFG); /* SPI clockout = clock source / (2 * (prescaler +1)) */ spi_clkcfg |= prescaler; writel( spi_clkcfg , spi->regs + S3C_CLK_CFG); /* 3. Set SPI MODE configuration register */ #ifdef CONFIG_WORD_TRANSIZE spi_modecfg = SPI_MODE_CH_TSZ_WORD| SPI_MODE_BUS_TSZ_WORD; #else spi_modecfg = SPI_MODE_CH_TSZ_BYTE| SPI_MODE_BUS_TSZ_BYTE; #endif spi_modecfg |= SPI_MODE_TXDMA_OFF| SPI_MODE_SINGLE| SPI_MODE_RXDMA_OFF; if (msg->flags & SPI_M_DMA_MODE) { spi_modecfg |= SPI_MODE_TXDMA_ON| SPI_MODE_RXDMA_ON; } if (msg->wbuf) spi_modecfg |= ( 0x3f << 5); /* Tx FIFO trigger level in INT mode */ if (msg->rbuf) spi_modecfg |= ( 0x3f << 11); /* Rx FIFO trigger level in INT mode */ spi_modecfg |= ( 0x3ff << 19); writel(spi_modecfg, spi->regs + S3C_MODE_CFG); /* 4. Set SPI INT_EN register */ if (msg->wbuf) spi_inten = SPI_INT_TX_FIFORDY_EN|SPI_INT_TX_UNDERRUN_EN|SPI_INT_TX_OVERRUN_EN; if (msg->rbuf) { spi_inten = SPI_INT_RX_FIFORDY_EN|SPI_INT_RX_UNDERRUN_EN|SPI_INT_RX_OVERRUN_EN|SPI_INT_TRAILING_EN ; } writel(spi_inten, spi->regs + S3C_SPI_INT_EN); writel(0x1f, spi->regs + S3C_PENDING_CLR); /* 5. Set Packet Count configuration register */ spi_packet = SPI_PACKET_CNT_EN; spi_packet |= 0xffff; writel(spi_packet, spi->regs + S3C_PACKET_CNT); /* 6. Set Tx or Rx Channel on */ spi_chcfg = readl(spi->regs + S3C_CH_CFG); spi_chcfg |= SPI_CH_TXCH_OFF | SPI_CH_RXCH_OFF; if (msg->wbuf) spi_chcfg |= SPI_CH_TXCH_ON; if (msg->rbuf) spi_chcfg |= SPI_CH_RXCH_ON; writel(spi_chcfg, spi->regs + S3C_CH_CFG); if (msg->flags & SPI_M_DMA_MODE) { spi->dma = S3C_SPI_DMA; if (msg->wbuf) spi->subchannel = DMACH_SPI0_OUT; if (msg->rbuf) spi->subchannel = DMACH_SPI0_IN; if (s3c2410_dma_request(spi->subchannel, &s3c6400spi_dma_client, NULL)) { printk(KERN_WARNING "unable to get DMA channel.\n" ); } s3c2410_dma_set_buffdone_fn(spi->subchannel, s3c_spi_dma_cb); s3c2410_dma_set_opfn(spi->subchannel, NULL); if (msg->wbuf) s3c_spi_dma_init(spi, 0); if (msg->rbuf) s3c_spi_dma_init(spi, 1); s3c2410_dma_enqueue(spi->subchannel, (void *) spi, spi->dmabuf_addr, spi->msg->len); } /* 7. Set nSS low to start Tx or Rx operation */ spi_slavecfg = readl(spi->regs + S3C_SLAVE_SEL); spi_slavecfg &= SPI_SLAVE_SIG_ACT; spi_slavecfg |= (0x3f << 4); writel(spi_slavecfg, spi->regs + S3C_SLAVE_SEL); print_reg(spi); }
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { u32 val; DMA_BLOCK_STRUCT dma_block; s3c_3d_dma_info dma_info; DECLARE_COMPLETION_ONSTACK(complete); struct mm_struct *mm = current->mm; struct s3c_3d_mem_alloc param; struct s3c_3d_pm_status param_pm; unsigned int timer; switch (cmd) { case WAIT_FOR_FLUSH: //if fifo has already been flushed, return; val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("read pipestate = 0x%x\n",val); if((val & arg) ==0) break; // enable interrupt interrupt_already_recevied = 0; __raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK); __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); //printk("wait for flush (arg=0x%lx)\n",arg); timer = 1000000; while(timer) { wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ); __raw_writel(0,s3c_g3d_base+FGGB_INTMASK); interrupt_already_recevied = 0; //if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq); val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("in while read pipestate = 0x%x\n",val); if(val & arg){ } else{ break; } __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); timer --; } break; case GET_CONFIG: if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) { printk("G3D: copy_to_user failed to get g3d_config\n"); return -EFAULT; } break; case START_DMA_BLOCK: if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) { printk("G3D: copy_to_user failed to get dma_block\n"); return -EFAULT; } if (dma_block.offset%4!=0) { printk("G3D: dma offset is not aligned by word\n"); return -EINVAL; } if (dma_block.size%4!=0) { printk("G3D: dma size is not aligned by word\n"); return -EINVAL; } if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) { printk("G3D: offset+size exceeds dam buffer\n"); return -EINVAL; } dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset; dma_info.len = dma_block.size; dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE; DEBUG(" dma src=0x%x\n", dma_info.src); DEBUG(" dma len =%u\n", dma_info.len); DEBUG(" dma dst = 0x%x\n", dma_info.dst); dma_3d_done = &complete; if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n"); return -EFAULT; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish); s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src); s3c2410_dma_config(DMACH_3D_M2M, 4, 4); s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART); //consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE); // s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len); s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len); // printk("wait for end of dma operation\n"); wait_for_completion(&complete); // printk("dma operation is performed\n"); s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client); break; case S3C_3D_MEM_ALLOC: mutex_lock(&mem_alloc_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = MEM_ALLOC; param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data); if (param.size == 0){ printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x\n", param.vir_addr); if(param.vir_addr == -EINVAL) { printk("S3C_3D_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; // printk("alloc %d\n", param.size); DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = 0; // printk("\n\n====Success the malloc from kernel=====\n"); mutex_unlock(&mem_alloc_lock); break; case S3C_3D_MEM_FREE: mutex_lock(&mem_free_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); /* if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed !!\n"); mutex_unlock(&mem_free_lock); return -EINVAL; } */ s3c_g3d_release_chunk(param.phy_addr, param.size); //printk("KERNEL : virt_addr = 0x%X\n", virt_addr); //printk("free %d\n", param.size); param.size = 0; DEBUG("do_munmap() succeed !!\n"); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } mutex_unlock(&mem_free_lock); break; case S3C_3D_SFR_LOCK: mutex_lock(&mem_sfr_lock); mutex_lock_processID = (unsigned int)file->private_data; DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n"); break; case S3C_3D_SFR_UNLOCK: mutex_lock_processID = 0; mutex_unlock(&mem_sfr_lock); DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n"); break; case S3C_3D_MEM_ALLOC_SHARE: mutex_lock(&mem_alloc_share_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } flag = MEM_ALLOC_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x\n", physical_address); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x\n", param.vir_addr); if(param.vir_addr == -EINVAL) { printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_alloc_share_lock); break; case S3C_3D_MEM_SHARE_FREE: mutex_lock(&mem_share_free_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed - MEM_SHARE_FREE!!\n"); mutex_unlock(&mem_share_free_lock); return -EINVAL; } param.vir_addr = 0; DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n"); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } mutex_unlock(&mem_share_free_lock); break; case S3C_3D_CACHE_INVALID: mutex_lock(&cache_invalid_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&cache_invalid_lock); return -EFAULT; } dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_invalid_lock); break; case S3C_3D_CACHE_CLEAN: mutex_lock(&cache_clean_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&cache_clean_lock); return -EFAULT; } dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_clean_lock); break; case S3C_3D_CACHE_CLEAN_INVALID: mutex_lock(&cache_clean_invalid_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&cache_clean_invalid_lock); printk("ERR: Invalid Cache Error\n"); return -EFAULT; } dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_clean_invalid_lock); break; case S3C_3D_POWER_INIT: if(copy_from_user(¶m_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){ printk("ERR: Invalid Cache Error\n"); return -EFAULT; } break; case S3C_3D_CRITICAL_SECTION: #ifdef USE_G3D_DOMAIN_GATING mutex_lock(&pm_critical_section_lock); if(copy_from_user(¶m_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } // param_pm.memStatus = check_memStatus((unsigned int)file->private_data); if(param_pm.criticalSection) g_G3D_CriticalFlag++; else g_G3D_CriticalFlag--; if(g_G3D_CriticalFlag==0) {/*kick power off*/ /*power off*/ /*kick timer*/ mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL); } else if(g_G3D_CriticalFlag>0) {/*kick power on*/ if(domain_off_check(S3C64XX_DOMAIN_G)) {/*if powered off*/ if(g_G3D_SelfPowerOFF) {/*powered off by 3D PM or by Resume*/ /*power on*/ s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D); if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) { printk("[3D] s3c_wait_blk_pwr_ready err\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } clk_g3d_enable(); /*Need here??*/ softReset_g3d(); // printk("[3D] Power on\n"); } else { /*powered off by the system :: error*/ printk("Error on the system :: app tries to work during sleep\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } } else { /*already powered on : nothing to do*/ //g_G3D_SelfPowerOFF=0; } } else if(g_G3D_CriticalFlag < 0) { printk("Error on the system :: g_G3D_CriticalFlag < 0\n"); } // printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection); if (copy_to_user((void *)arg,¶m_pm,sizeof(struct s3c_3d_pm_status))) { printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } mutex_unlock(&pm_critical_section_lock); #endif /* USE_G3D_DOMAIN_GATING */ break; default: DEBUG("s3c_g3d_ioctl() : default !!\n"); return -EINVAL; } return 0; }