/* register_debug_features - for registering debug features. * currently registered features are like as follows... * - iovmm falult handler * - ... */ static void register_debug_features(void) { /* 1. fault handler registration */ iovmm_set_fault_handler(g_display_driver.display_driver, disp_driver_fault_handler, &g_display_driver); }
static void gsc_m2m_device_run(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc; unsigned long flags; int ret; bool is_set = false; if (WARN(!ctx, "null hardware context\n")) return; gsc = ctx->gsc_dev; if (in_irq()) ret = pm_runtime_get(&gsc->pdev->dev); else ret = pm_runtime_get_sync(&gsc->pdev->dev); if (ret < 0) { gsc_err("fail to pm_runtime_get"); return; } spin_lock_irqsave(&ctx->slock, flags); /* Reconfigure hardware if the context has changed. */ if (gsc->m2m.ctx != ctx) { gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p", gsc->m2m.ctx, ctx); ctx->state |= GSC_PARAMS; gsc->m2m.ctx = ctx; } is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; ctx->state &= ~GSC_CTX_STOP_REQ; if (is_set) { wake_up(&gsc->irq_queue); goto put_device; } ret = gsc_fill_addr(ctx); if (ret) { gsc_err("Wrong address"); goto put_device; } if (ctx->state & GSC_PARAMS) { gsc_hw_set_sw_reset(gsc); ret = gsc_wait_reset(gsc); if (ret < 0) { gsc_err("gscaler s/w reset timeout"); goto put_device; } gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_frm_done_irq_mask(gsc, false); gsc_hw_set_deadlock_irq_mask(gsc, false); gsc_hw_set_read_slave_error_mask(gsc, false); gsc_hw_set_write_slave_error_mask(gsc, false); gsc_hw_set_gsc_irq_enable(gsc, true); gsc_hw_set_one_frm_mode(gsc, true); gsc_hw_set_freerun_clock_mode(gsc, false); if (gsc_set_scaler_info(ctx)) { gsc_err("Scaler setup error"); goto put_device; } gsc_hw_set_input_path(ctx); gsc_hw_set_in_size(ctx); gsc_hw_set_in_image_format(ctx); gsc_hw_set_output_path(ctx); gsc_hw_set_out_size(ctx); gsc_hw_set_out_image_format(ctx); gsc_hw_set_prescaler(ctx); gsc_hw_set_mainscaler(ctx); gsc_hw_set_h_coef(ctx); gsc_hw_set_v_coef(ctx); if (ctx->scaler.is_scaled_down) gsc_hw_set_output_rotation(ctx); else gsc_hw_set_input_rotation(ctx); gsc_hw_set_global_alpha(ctx); if (is_rotation) { ret = gsc_check_rotation_size(ctx); if (ret < 0) goto put_device; } } gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM); gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM); ctx->state &= ~GSC_PARAMS; if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) { /* One frame mode sequence GSCALER_ON on -> GSCALER_OP_STATUS is operating -> GSCALER_ON off */ gsc_hw_enable_control(gsc, true); #ifdef GSC_PERF gsc->start_time = sched_clock(); #endif ret = gsc_wait_operating(gsc); if (ret < 0) { gsc_err("gscaler wait operating timeout"); goto put_device; } gsc->op_timer.expires = (jiffies + 2 * HZ); mod_timer(&gsc->op_timer, gsc->op_timer.expires); } spin_unlock_irqrestore(&ctx->slock, flags); iovmm_set_fault_handler(&gsc->pdev->dev, gsc_sysmmu_m2m_fault_handler, ctx); return; put_device: ctx->state &= ~GSC_PARAMS; spin_unlock_irqrestore(&ctx->slock, flags); pm_runtime_put_sync(&gsc->pdev->dev); }
static int jpeg_probe(struct platform_device *pdev) { struct jpeg_dev *jpeg; struct resource *res; int i, ret; jpeg = devm_kzalloc(&pdev->dev, sizeof(struct jpeg_dev), GFP_KERNEL); if (!jpeg) { dev_err(&pdev->dev, "%s: not enough memory\n", __func__); return -ENOMEM; } ret = of_property_read_u32(pdev->dev.of_node, "ip_ver", &jpeg->ver); if (ret) { dev_err(&pdev->dev, "%s: ip_ver doesn't exist\n", __func__); return -EINVAL; } jpeg->dev = &pdev->dev; spin_lock_init(&jpeg->slock); /* Get memory resource and map SFR region. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); jpeg->regs = devm_request_and_ioremap(&pdev->dev, res); if (jpeg->regs == NULL) { dev_err(&pdev->dev, "failed to claim register region\n"); return -ENOENT; } /* Get IRQ resource and register IRQ handler. */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "failed to get IRQ resource\n"); return -ENXIO; } /* Get memory resource and map SFR region. */ ret = devm_request_irq(&pdev->dev, res->start, (void *)jpeg_irq_handler, 0, pdev->name, jpeg); if (ret) { dev_err(&pdev->dev, "failed to install irq\n"); return ret; } /* clock */ for (i = 0; i < JPEG_CLK_NUM; i++) jpeg->clocks[i] = ERR_PTR(-ENOENT); ret = jpeg_clk_get(jpeg); if (ret) return ret; jpeg->oneshot_dev = m2m1shot_create_device(&pdev->dev, &jpeg_oneshot_ops, "jpeg", pdev->id, -1); if (IS_ERR(jpeg->oneshot_dev)) { pr_err("%s: Failed to create m2m1shot device\n", __func__); ret = PTR_ERR(jpeg->oneshot_dev); goto err_m2m1shot; } platform_set_drvdata(pdev, jpeg); ret = exynos_create_iovmm(&pdev->dev, 3, 3); if (ret) { dev_err(&pdev->dev, "%s: Failed(%d) to create IOVMM\n", __func__, ret); goto err_iovmm; } ret = iovmm_activate(&pdev->dev); if (ret) { dev_err(&pdev->dev, "%s: Failed(%d) to activate IOVMM\n", __func__, ret); /* nothing to do for exynos_create_iovmm() */ goto err_iovmm; } iovmm_set_fault_handler(&pdev->dev, jpeg_sysmmu_fault_handler, jpeg); pm_runtime_enable(&pdev->dev); if (!IS_ENABLED(CONFIG_PM_RUNTIME)) { jpeg_clock_gating(jpeg, true); set_bit(DEV_RUNTIME_RESUME, &jpeg->state); } dev_info(&pdev->dev, "JPEG driver register successfully"); return 0; err_iovmm: m2m1shot_destroy_device(jpeg->oneshot_dev); err_m2m1shot: jpeg_clk_put(jpeg); return ret; }
static int fimg2d_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct fimg2d_platdata *pdata; #ifdef CONFIG_OF struct device *dev = &pdev->dev; int id = 0; #else pdata = to_fimg2d_plat(&pdev->dev); #endif dev_info(&pdev->dev, "++%s\n", __func__); #ifdef CONFIG_OF if (dev->of_node) { id = of_alias_get_id(pdev->dev.of_node, "fimg2d"); } else { id = pdev->id; pdata = dev->platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } } #else if (!to_fimg2d_plat(&pdev->dev)) { fimg2d_err("failed to get platform data\n"); return -ENOMEM; } #endif /* global structure */ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { fimg2d_err("failed to allocate memory for controller\n"); return -ENOMEM; } #ifdef CONFIG_OF pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { fimg2d_err("failed to allocate memory for controller\n"); kfree(ctrl); return -ENOMEM; } ctrl->pdata = pdata; g2d_parse_dt(dev->of_node, ctrl->pdata); #endif /* setup global ctrl */ ret = fimg2d_setup_controller(ctrl); if (ret) { fimg2d_err("failed to setup controller\n"); goto drv_free; } ctrl->dev = &pdev->dev; /* memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { fimg2d_err("failed to get resource\n"); ret = -ENOENT; goto drv_free; } ctrl->mem = request_mem_region(res->start, resource_size(res), pdev->name); if (!ctrl->mem) { fimg2d_err("failed to request memory region\n"); ret = -ENOMEM; goto drv_free; } /* ioremap */ ctrl->regs = ioremap(res->start, resource_size(res)); if (!ctrl->regs) { fimg2d_err("failed to ioremap for SFR\n"); ret = -ENOENT; goto mem_free; } fimg2d_debug("base address: 0x%lx\n", (unsigned long)res->start); /* irq */ ctrl->irq = platform_get_irq(pdev, 0); if (!ctrl->irq) { fimg2d_err("failed to get irq resource\n"); ret = -ENOENT; goto reg_unmap; } fimg2d_debug("irq: %d\n", ctrl->irq); ret = request_irq(ctrl->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, ctrl); if (ret) { fimg2d_err("failed to request irq\n"); ret = -ENOENT; goto reg_unmap; } ret = fimg2d_clk_setup(ctrl); if (ret) { fimg2d_err("failed to setup clk\n"); ret = -ENOENT; goto irq_free; } spin_lock_init(&ctrl->qoslock); #ifdef CONFIG_PM_RUNTIME pm_runtime_enable(ctrl->dev); fimg2d_info("enable runtime pm\n"); #else fimg2d_clk_on(ctrl); #endif #ifdef FIMG2D_IOVMM_PAGETABLE exynos_create_iovmm(dev, 3, 3); #endif iovmm_set_fault_handler(dev, fimg2d_sysmmu_fault_handler, ctrl); fimg2d_debug("register sysmmu page fault handler\n"); /* misc register */ ret = misc_register(&fimg2d_dev); if (ret) { fimg2d_err("failed to register misc driver\n"); goto clk_release; } fimg2d_pm_qos_add(ctrl); dev_info(&pdev->dev, "fimg2d registered successfully\n"); return 0; clk_release: #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(ctrl->dev); #else fimg2d_clk_off(ctrl); #endif fimg2d_clk_release(ctrl); irq_free: free_irq(ctrl->irq, NULL); reg_unmap: iounmap(ctrl->regs); mem_free: release_mem_region(res->start, resource_size(res)); drv_free: #ifdef BLIT_WORKQUE if (ctrl->work_q) destroy_workqueue(ctrl->work_q); #endif mutex_destroy(&ctrl->drvlock); #ifdef CONFIG_OF kfree(pdata); #endif kfree(ctrl); return ret; }
static int fimc_is_probe(struct platform_device *pdev) { struct exynos_platform_fimc_is *pdata; #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) struct resource *mem_res; struct resource *regs_res; #endif struct fimc_is_core *core; int ret = -ENODEV; #ifndef ENABLE_IS_CORE int i; #endif u32 stream; struct pinctrl_state *s; probe_info("%s:start(%ld, %ld)\n", __func__, sizeof(struct fimc_is_core), sizeof(struct fimc_is_video_ctx)); core = kzalloc(sizeof(struct fimc_is_core), GFP_KERNEL); if (!core) { probe_err("core is NULL"); return -ENOMEM; } fimc_is_dev = &pdev->dev; dev_set_drvdata(fimc_is_dev, core); pdata = dev_get_platdata(&pdev->dev); if (!pdata) { #ifdef CONFIG_OF ret = fimc_is_parse_dt(pdev); if (ret) { err("fimc_is_parse_dt is fail(%d)", ret); return ret; } pdata = dev_get_platdata(&pdev->dev); #else BUG(); #endif } #ifdef USE_ION_ALLOC core->fimc_ion_client = ion_client_create(ion_exynos, "fimc-is"); #endif core->pdev = pdev; core->pdata = pdata; core->current_position = SENSOR_POSITION_REAR; device_init_wakeup(&pdev->dev, true); /* for mideaserver force down */ atomic_set(&core->rsccount, 0); #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { probe_err("Failed to get io memory region(%p)", mem_res); goto p_err1; } regs_res = request_mem_region(mem_res->start, resource_size(mem_res), pdev->name); if (!regs_res) { probe_err("Failed to request io memory region(%p)", regs_res); goto p_err1; } core->regs_res = regs_res; core->regs = ioremap_nocache(mem_res->start, resource_size(mem_res)); if (!core->regs) { probe_err("Failed to remap io region(%p)", core->regs); goto p_err2; } #else core->regs_res = NULL; core->regs = NULL; #endif #ifdef ENABLE_IS_CORE core->irq = platform_get_irq(pdev, 0); if (core->irq < 0) { probe_err("Failed to get irq(%d)", core->irq); goto p_err3; } #endif ret = pdata->clk_get(&pdev->dev); if (ret) { probe_err("clk_get is fail(%d)", ret); goto p_err3; } ret = fimc_is_mem_probe(&core->resourcemgr.mem, core->pdev); if (ret) { probe_err("fimc_is_mem_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_resourcemgr_probe(&core->resourcemgr, core); if (ret) { probe_err("fimc_is_resourcemgr_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_interface_probe(&core->interface, &core->resourcemgr.minfo, (ulong)core->regs, core->irq, core); if (ret) { probe_err("fimc_is_interface_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_debug_probe(); if (ret) { probe_err("fimc_is_deubg_probe is fail(%d)", ret); goto p_err3; } ret = fimc_is_vender_probe(&core->vender); if (ret) { probe_err("fimc_is_vender_probe is fail(%d)", ret); goto p_err3; } /* group initialization */ ret = fimc_is_groupmgr_probe(&core->groupmgr); if (ret) { probe_err("fimc_is_groupmgr_probe is fail(%d)", ret); goto p_err3; } for (stream = 0; stream < FIMC_IS_STREAM_COUNT; ++stream) { ret = fimc_is_ischain_probe(&core->ischain[stream], &core->interface, &core->resourcemgr, &core->groupmgr, &core->resourcemgr.mem, core->pdev, stream); if (ret) { probe_err("fimc_is_ischain_probe(%d) is fail(%d)", stream, ret); goto p_err3; } #ifndef ENABLE_IS_CORE core->ischain[stream].hardware = &core->hardware; #endif } ret = v4l2_device_register(&pdev->dev, &core->v4l2_dev); if (ret) { dev_err(&pdev->dev, "failed to register fimc-is v4l2 device\n"); goto p_err3; } #ifdef SOC_30S /* video entity - 3a0 */ fimc_is_30s_video_probe(core); #endif #ifdef SOC_30C /* video entity - 3a0 capture */ fimc_is_30c_video_probe(core); #endif #ifdef SOC_30P /* video entity - 3a0 preview */ fimc_is_30p_video_probe(core); #endif #ifdef SOC_31S /* video entity - 3a1 */ fimc_is_31s_video_probe(core); #endif #ifdef SOC_31C /* video entity - 3a1 capture */ fimc_is_31c_video_probe(core); #endif #ifdef SOC_31P /* video entity - 3a1 preview */ fimc_is_31p_video_probe(core); #endif #ifdef SOC_I0S /* video entity - isp0 */ fimc_is_i0s_video_probe(core); #endif #ifdef SOC_I0C /* video entity - isp0 capture */ fimc_is_i0c_video_probe(core); #endif #ifdef SOC_I0P /* video entity - isp0 preview */ fimc_is_i0p_video_probe(core); #endif #ifdef SOC_I1S /* video entity - isp1 */ fimc_is_i1s_video_probe(core); #endif #ifdef SOC_I1C /* video entity - isp1 capture */ fimc_is_i1c_video_probe(core); #endif #ifdef SOC_I1P /* video entity - isp1 preview */ fimc_is_i1p_video_probe(core); #endif #ifdef SOC_DIS /* video entity - dis */ fimc_is_dis_video_probe(core); #endif #ifdef SOC_SCC /* video entity - scc */ fimc_is_scc_video_probe(core); #endif #ifdef SOC_SCP /* video entity - scp */ fimc_is_scp_video_probe(core); #endif #ifdef SOC_MCS /* video entity - scp */ fimc_is_m0s_video_probe(core); fimc_is_m1s_video_probe(core); fimc_is_m0p_video_probe(core); fimc_is_m1p_video_probe(core); fimc_is_m2p_video_probe(core); fimc_is_m3p_video_probe(core); fimc_is_m4p_video_probe(core); #endif platform_set_drvdata(pdev, core); #ifndef ENABLE_IS_CORE ret = fimc_is_interface_ischain_probe(&core->interface_ischain, &core->hardware, &core->resourcemgr, core->pdev, (ulong)core->regs); if (ret) { dev_err(&pdev->dev, "interface_ischain_probe fail\n"); goto p_err1; } ret = fimc_is_hardware_probe(&core->hardware, &core->interface, &core->interface_ischain); if (ret) { dev_err(&pdev->dev, "hardware_probe fail\n"); goto p_err1; } /* set sysfs for set position to actuator */ sysfs_actuator.init_step = 0; for (i = 0; i < INIT_MAX_SETTING; i++) { sysfs_actuator.init_positions[i] = -1; sysfs_actuator.init_delays[i] = -1; } #endif #if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433) #if defined(CONFIG_VIDEOBUF2_ION) if (core->resourcemgr.mem.alloc_ctx) vb2_ion_attach_iommu(core->resourcemgr.mem.alloc_ctx); #endif #endif EXYNOS_MIF_ADD_NOTIFIER(&exynos_fimc_is_mif_throttling_nb); #if defined(CONFIG_PM_RUNTIME) pm_runtime_enable(&pdev->dev); #endif #ifdef ENABLE_FAULT_HANDLER #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) exynos_sysmmu_set_fault_handler(fimc_is_dev, fimc_is_fault_handler); #else iovmm_set_fault_handler(fimc_is_dev, fimc_is_fault_handler, NULL); #endif #endif /* set sysfs for debuging */ sysfs_debug.en_clk_gate = 0; sysfs_debug.en_dvfs = 1; #ifdef ENABLE_CLOCK_GATE sysfs_debug.en_clk_gate = 1; #ifdef HAS_FW_CLOCK_GATE sysfs_debug.clk_gate_mode = CLOCK_GATE_MODE_FW; #else sysfs_debug.clk_gate_mode = CLOCK_GATE_MODE_HOST; #endif #endif ret = sysfs_create_group(&core->pdev->dev.kobj, &fimc_is_debug_attr_group); s = pinctrl_lookup_state(pdata->pinctrl, "release"); if (pinctrl_select_state(pdata->pinctrl, s) < 0) { probe_err("pinctrl_select_state is fail\n"); goto p_err3; } probe_info("%s:end\n", __func__); return 0; p_err3: iounmap(core->regs); #if defined (ENABLE_IS_CORE) || defined (USE_MCUCTL) p_err2: release_mem_region(regs_res->start, resource_size(regs_res)); #endif p_err1: kfree(core); return ret; }