int g2d_wait_for_finish(struct g2d_global *g2d_dev, g2d_params *params) { if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) { FIMG2D_ERROR("error : sysmmu_faulted early\n"); atomic_set(&g2d_dev->is_mmu_faulted, 0); return false; } if (params->flag.render_mode & G2D_POLLING) { g2d_check_fifo_state_wait(g2d_dev); } else { if(wait_event_interruptible_timeout(g2d_dev->waitq, (atomic_read(&g2d_dev->in_use) == 0), msecs_to_jiffies(G2D_TIMEOUT)) == 0) { if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) { FIMG2D_ERROR("error : sysmmu_faulted\n"); FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr); } else { g2d_reset(g2d_dev); FIMG2D_ERROR("error : waiting for interrupt is timeout\n"); } atomic_set(&g2d_dev->is_mmu_faulted, 0); g2d_fail_debug(params); return false; } else if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) { FIMG2D_ERROR("error : sysmmu_faulted but auto recoveried\n"); atomic_set(&g2d_dev->is_mmu_faulted, 0); return false; } } atomic_set(&g2d_dev->in_use, 0); return true; }
int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size) { #ifdef CONFIG_S5P_MEM_CMA struct cma_info mem_info; int err; char cma_name[8]; #endif #ifdef CONFIG_S5P_MEM_CMA /* CMA */ sprintf(cma_name, "fimg2d"); err = cma_info(&mem_info, dev, 0); FIMG2D_DEBUG("[cma_info] start_addr : 0x%x, end_addr : 0x%x, " "total_size : 0x%x, free_size : 0x%x\n", mem_info.lower_bound, mem_info.upper_bound, mem_info.total_size, mem_info.free_size); if (err) { FIMG2D_ERROR("%s: get cma info failed\n", __func__); return -1; } *size = mem_info.total_size; *base = (dma_addr_t)cma_alloc (dev, cma_name, (size_t)(*size), 0); FIMG2D_DEBUG("size = 0x%x\n", *size); FIMG2D_DEBUG("*base phys= 0x%x\n", *base); FIMG2D_DEBUG("*base virt = 0x%x\n", (u32)phys_to_virt(*base)); #else *base = s5p_get_media_memory_bank(S5P_MDEV_FIMG2D, 0); #endif return 0; }
int __init g2d_init(void) { if(platform_driver_register(&fimg2d_driver)!=0) { FIMG2D_ERROR("platform device register Failed \n"); return -1; } FIMG2D_DEBUG("ok!\n"); return 0; }
void g2d_fail_debug(g2d_params *params) { FIMG2D_ERROR("src : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n", params->src_rect.x, params->src_rect.y, params->src_rect.w, params->src_rect.h, params->src_rect.full_w, params->src_rect.full_h, params->src_rect.color_format, params->src_rect.bytes_per_pixel, (u32)params->src_rect.addr); FIMG2D_ERROR("dst : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n", params->dst_rect.x, params->dst_rect.y, params->dst_rect.w, params->dst_rect.h, params->dst_rect.full_w, params->dst_rect.full_h, params->dst_rect.color_format, params->dst_rect.bytes_per_pixel, (u32)params->dst_rect.addr); FIMG2D_ERROR("clip: %d, %d, %d, %d\n", params->clip.t, params->clip.b, params->clip.l, params->clip.r); FIMG2D_ERROR("flag: %d, %d, %d, %d / %d, %d, %d, %d / %d, %d, %d, %d\n", params->flag.rotate_val, params->flag.alpha_val, params->flag.blue_screen_mode, params->flag.color_key_val, params->flag.color_switch_val, params->flag.src_color, params->flag.third_op_mode, params->flag.rop_mode, params->flag.mask_mode, params->flag.render_mode, params->flag.potterduff_mode, params->flag.memory_type); }
int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params) { unsigned long pgd; int need_dst_clean = true; if ((params->src_rect.addr == NULL) || (params->dst_rect.addr == NULL)) { FIMG2D_ERROR("error : addr Null\n"); return false; } if (params->flag.memory_type == G2D_MEMORY_KERNEL) { params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr); params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr); pgd = (unsigned long)init_mm.pgd; } else { pgd = (unsigned long)current->mm->pgd; } if (params->flag.memory_type == G2D_MEMORY_USER) { g2d_clip clip_src; g2d_clip_for_src(¶ms->src_rect, ¶ms->dst_rect, ¶ms->clip, &clip_src); if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip)) return false; g2d_dev->src_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect), (unsigned int)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); if (g2d_dev->src_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Src is not in valid pagetable\n"); return false; } g2d_dev->dst_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Dst is not in valid pagetable\n"); return false; } g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect), (u32)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (u32)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (params->flag.render_mode & G2D_CACHE_OP) { /*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect), (void *)GET_START_ADDR(params->dst_rect), (unsigned int)GET_REAL_SIZE(params->src_rect), (unsigned int)GET_REAL_SIZE(params->dst_rect));*/ // need_dst_clean = g2d_check_need_dst_cache_clean(params); g2d_mem_inner_cache(params); g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean); } } s5p_sysmmu_set_tablebase_pgd(g2d_dev->dev, (u32)virt_to_phys((void *)pgd)); if(g2d_init_regs(g2d_dev, params) < 0) { return false; } /* Do bitblit */ g2d_start_bitblt(g2d_dev, params); if (!need_dst_clean) g2d_mem_outer_cache_inv(params); return true; }
static int g2d_probe(struct platform_device *pdev) { struct resource *res; int ret; struct clk *parent; struct clk *sclk; FIMG2D_DEBUG("start probe : name=%s num=%d res[0].start=0x%x res[1].start=0x%x\n", pdev->name, pdev->num_resources, pdev->resource[0].start, pdev->resource[1].start); /* alloc g2d global */ g2d_dev = kzalloc(sizeof(*g2d_dev), GFP_KERNEL); if (!g2d_dev) { FIMG2D_ERROR( "not enough memory\n"); return -ENOENT; goto probe_out; } /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if(res == NULL) { FIMG2D_ERROR("failed to get memory region resouce\n"); return -ENOENT; goto err_get_res; } /* request momory region */ g2d_dev->mem = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if(g2d_dev->mem == NULL) { FIMG2D_ERROR("failed to reserve memory region\n"); return -ENOENT; goto err_mem_req; } /* ioremap */ g2d_dev->base = ioremap(g2d_dev->mem->start, g2d_dev->mem->end - res->start + 1); if(g2d_dev->base == NULL) { FIMG2D_ERROR("failed ioremap\n"); ret = -ENOENT; goto err_mem_map; } /* get irq */ g2d_dev->irq_num = platform_get_irq(pdev, 0); if(g2d_dev->irq_num <= 0) { FIMG2D_ERROR("failed to get irq resouce\n"); ret = -ENOENT; goto err_irq_req; } /* request irq */ ret = request_irq(g2d_dev->irq_num, g2d_irq, IRQF_DISABLED, pdev->name, NULL); if (ret) { FIMG2D_ERROR("request_irq(g2d) failed.\n"); ret = -ENOENT; goto err_irq_req; } /* clock domain setting*/ parent = clk_get(&pdev->dev, "mout_mpll"); if (IS_ERR(parent)) { FIMG2D_ERROR("failed to get parent clock\n"); ret = -ENOENT; goto err_clk_get1; } sclk = clk_get(&pdev->dev, "sclk_fimg2d"); if (IS_ERR(sclk)) { FIMG2D_ERROR("failed to get sclk_g2d clock\n"); ret = -ENOENT; goto err_clk_get2; } clk_set_parent(sclk, parent); clk_set_rate(sclk, 250 * MHZ); /* clock for gating */ g2d_dev->clock = clk_get(&pdev->dev, "fimg2d"); if (IS_ERR(g2d_dev->clock)) { FIMG2D_ERROR("failed to get clock clock\n"); ret = -ENOENT; goto err_clk_get3; } ret = g2d_init_mem(&pdev->dev, &g2d_dev->reserved_mem.base, &g2d_dev->reserved_mem.size); if (ret != 0) { FIMG2D_ERROR("failed to init. fimg2d mem"); ret = -ENOMEM; goto err_mem; } /* blocking I/O */ init_waitqueue_head(&g2d_dev->waitq); /* atomic init */ atomic_set(&g2d_dev->in_use, 0); atomic_set(&g2d_dev->num_of_object, 0); atomic_set(&g2d_dev->is_mmu_faulted, 0); g2d_dev->faulted_addr = 0; /* misc register */ ret = misc_register(&fimg2d_dev); if (ret) { FIMG2D_ERROR("cannot register miscdev on minor=%d (%d)\n", G2D_MINOR, ret); goto err_misc_reg; } mutex_init(&g2d_dev->lock); g2d_sysmmu_on(g2d_dev); #if defined(CONFIG_S5PV310_DEV_PD) /* to use the runtime PM helper functions */ pm_runtime_enable(&pdev->dev); /* enable the power domain */ pm_runtime_get_sync(&pdev->dev); #endif #if defined(CONFIG_HAS_EARLYSUSPEND) g2d_dev->early_suspend.suspend = g2d_early_suspend; g2d_dev->early_suspend.resume = g2d_late_resume; g2d_dev->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 3; register_early_suspend(&g2d_dev->early_suspend); #endif g2d_dev->dev = &pdev->dev; atomic_set(&g2d_dev->ready_to_run, 1); FIMG2D_DEBUG("g2d_probe ok!\n"); return 0; err_misc_reg: clk_put(g2d_dev->clock); g2d_dev->clock = NULL; err_mem: err_clk_get3: clk_put(sclk); err_clk_get2: clk_put(parent); err_clk_get1: free_irq(g2d_dev->irq_num, NULL); err_irq_req: iounmap(g2d_dev->base); err_mem_map: release_resource(g2d_dev->mem); kfree(g2d_dev->mem); err_mem_req: err_get_res: kfree(g2d_dev); probe_out: FIMG2D_ERROR("g2d: sec_g2d_probe fail!\n"); return ret; }
static long g2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { g2d_params params; int ret = -1; struct g2d_dma_info dma_info; switch(cmd) { case G2D_GET_MEMORY : ret = copy_to_user((unsigned int *)arg, &(g2d_dev->reserved_mem.base), sizeof(g2d_dev->reserved_mem.base)); if (ret) { FIMG2D_ERROR("error : copy_to_user\n"); return -EINVAL; } return 0; case G2D_GET_MEMORY_SIZE : ret = copy_to_user((unsigned int *)arg, &(g2d_dev->reserved_mem.size), sizeof(g2d_dev->reserved_mem.size)); if (ret) { FIMG2D_ERROR("error : copy_to_user\n"); return -EINVAL; } return 0; case G2D_DMA_CACHE_CLEAN : case G2D_DMA_CACHE_FLUSH : mutex_lock(&g2d_dev->lock); ret = copy_from_user(&dma_info, (struct g2d_dma_info *)arg, sizeof(dma_info)); if (ret) { FIMG2D_ERROR("error : copy_from_user\n"); mutex_unlock(&g2d_dev->lock); return -EINVAL; } if (dma_info.addr == 0) { FIMG2D_ERROR("addr Null Error!!!\n"); mutex_unlock(&g2d_dev->lock); return -EINVAL; } g2d_mem_cache_op(cmd, (void *)dma_info.addr, dma_info.size); mutex_unlock(&g2d_dev->lock); return 0; case G2D_SYNC : g2d_check_fifo_state_wait(g2d_dev); ret = 0; goto g2d_ioctl_done; case G2D_RESET : g2d_reset(g2d_dev); FIMG2D_ERROR("G2D TimeOut Error\n"); ret = 0; goto g2d_ioctl_done; case G2D_BLIT: if (atomic_read(&g2d_dev->ready_to_run) == 0) goto g2d_ioctl_done2; mutex_lock(&g2d_dev->lock); g2d_clk_enable(g2d_dev); if (copy_from_user(¶ms, (struct g2d_params *)arg, sizeof(g2d_params))) { FIMG2D_ERROR("error : copy_from_user\n"); goto g2d_ioctl_done; } g2d_dev->irq_handled = 0; atomic_set(&g2d_dev->in_use, 1); if (atomic_read(&g2d_dev->ready_to_run) == 0) goto g2d_ioctl_done; if (!g2d_do_blit(g2d_dev, ¶ms)) { g2d_dev->irq_handled = 1; goto g2d_ioctl_done; } if (!(params.flag.render_mode & G2D_HYBRID_MODE)) { if(!(file->f_flags & O_NONBLOCK)) { if (!g2d_wait_for_finish(g2d_dev, ¶ms)) goto g2d_ioctl_done; } } else { ret = 0; goto g2d_ioctl_done2; } ret = 0; break; default : goto g2d_ioctl_done2; break; } g2d_ioctl_done : g2d_clk_disable(g2d_dev); mutex_unlock(&g2d_dev->lock); atomic_set(&g2d_dev->in_use, 0); g2d_ioctl_done2 : return ret; }