void g2d_sysmmu_on(struct g2d_global *g2d_dev) { g2d_clk_enable(g2d_dev); s5p_sysmmu_enable(g2d_dev->dev, (unsigned long)virt_to_phys((void *)init_mm.pgd)); g2d_clk_disable(g2d_dev); }
static unsigned int g2d_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; if (atomic_read(&g2d_dev->in_use) == 0) { mask = POLLOUT | POLLWRNORM; g2d_clk_disable(g2d_dev); mutex_unlock(&g2d_dev->lock); } else { poll_wait(file, &g2d_dev->waitq, wait); if(atomic_read(&g2d_dev->in_use) == 0) { mask = POLLOUT | POLLWRNORM; g2d_clk_disable(g2d_dev); mutex_unlock(&g2d_dev->lock); } } return mask; }
static int g2d_remove(struct platform_device *dev) { FIMG2D_DEBUG("g2d_remove called !\n"); free_irq(g2d_dev->irq_num, NULL); if (g2d_dev->mem != NULL) { FIMG2D_INFO("releasing resource\n"); iounmap(g2d_dev->base); release_resource(g2d_dev->mem); kfree(g2d_dev->mem); } misc_deregister(&fimg2d_dev); atomic_set(&g2d_dev->in_use, 0); atomic_set(&g2d_dev->num_of_object, 0); g2d_clk_disable(g2d_dev); if (g2d_dev->clock) { clk_put(g2d_dev->clock); g2d_dev->clock = NULL; } mutex_destroy(&g2d_dev->lock); kfree(g2d_dev); #if defined(CONFIG_HAS_EARLYSUSPEND) unregister_early_suspend(&g2d_dev->early_suspend); #endif #if defined(CONFIG_S5PV310_DEV_PD) /* disable the power domain */ pm_runtime_put(&dev->dev); pm_runtime_disable(&dev->dev); #endif FIMG2D_DEBUG("g2d_remove ok!\n"); return 0; }
void g2d_sysmmu_off(struct g2d_global *g2d_dev) { g2d_clk_enable(g2d_dev); s5p_sysmmu_disable(g2d_dev->dev); g2d_clk_disable(g2d_dev); }
void g2d_sysmmu_off(struct g2d_global *g2d_dev) { g2d_clk_enable(g2d_dev); sysmmu_off(SYSMMU_G2D); g2d_clk_disable(g2d_dev); }
static long g2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { g2d_params params; int ret = -1; struct g2d_dma_info dma_info; switch(cmd) { case G2D_GET_MEMORY : ret = copy_to_user((unsigned int *)arg, &(g2d_dev->reserved_mem.base), sizeof(g2d_dev->reserved_mem.base)); if (ret) { FIMG2D_ERROR("error : copy_to_user\n"); return -EINVAL; } return 0; case G2D_GET_MEMORY_SIZE : ret = copy_to_user((unsigned int *)arg, &(g2d_dev->reserved_mem.size), sizeof(g2d_dev->reserved_mem.size)); if (ret) { FIMG2D_ERROR("error : copy_to_user\n"); return -EINVAL; } return 0; case G2D_DMA_CACHE_CLEAN : case G2D_DMA_CACHE_FLUSH : mutex_lock(&g2d_dev->lock); ret = copy_from_user(&dma_info, (struct g2d_dma_info *)arg, sizeof(dma_info)); if (ret) { FIMG2D_ERROR("error : copy_from_user\n"); mutex_unlock(&g2d_dev->lock); return -EINVAL; } if (dma_info.addr == 0) { FIMG2D_ERROR("addr Null Error!!!\n"); mutex_unlock(&g2d_dev->lock); return -EINVAL; } g2d_mem_cache_op(cmd, (void *)dma_info.addr, dma_info.size); mutex_unlock(&g2d_dev->lock); return 0; case G2D_SYNC : g2d_check_fifo_state_wait(g2d_dev); ret = 0; goto g2d_ioctl_done; case G2D_RESET : g2d_reset(g2d_dev); FIMG2D_ERROR("G2D TimeOut Error\n"); ret = 0; goto g2d_ioctl_done; case G2D_BLIT: if (atomic_read(&g2d_dev->ready_to_run) == 0) goto g2d_ioctl_done2; mutex_lock(&g2d_dev->lock); g2d_clk_enable(g2d_dev); if (copy_from_user(¶ms, (struct g2d_params *)arg, sizeof(g2d_params))) { FIMG2D_ERROR("error : copy_from_user\n"); goto g2d_ioctl_done; } g2d_dev->irq_handled = 0; atomic_set(&g2d_dev->in_use, 1); if (atomic_read(&g2d_dev->ready_to_run) == 0) goto g2d_ioctl_done; if (!g2d_do_blit(g2d_dev, ¶ms)) { g2d_dev->irq_handled = 1; goto g2d_ioctl_done; } if (!(params.flag.render_mode & G2D_HYBRID_MODE)) { if(!(file->f_flags & O_NONBLOCK)) { if (!g2d_wait_for_finish(g2d_dev, ¶ms)) goto g2d_ioctl_done; } } else { ret = 0; goto g2d_ioctl_done2; } ret = 0; break; default : goto g2d_ioctl_done2; break; } g2d_ioctl_done : g2d_clk_disable(g2d_dev); mutex_unlock(&g2d_dev->lock); atomic_set(&g2d_dev->in_use, 0); g2d_ioctl_done2 : return ret; }