static int s5p_mfc_cmd_host2risc(int cmd, struct s5p_mfc_cmd_args *args) { int cur_cmd; unsigned long timeout; timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */ do { if (time_after(jiffies, timeout)) { mfc_err("Timeout while waiting for hardware.\n"); return -EIO; } cur_cmd = s5p_mfc_read_reg(S5P_FIMV_HOST2RISC_CMD); } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY); s5p_mfc_write_reg(args->arg[0], S5P_FIMV_HOST2RISC_ARG1); s5p_mfc_write_reg(args->arg[1], S5P_FIMV_HOST2RISC_ARG2); s5p_mfc_write_reg(args->arg[2], S5P_FIMV_HOST2RISC_ARG3); s5p_mfc_write_reg(args->arg[3], S5P_FIMV_HOST2RISC_ARG4); /* Issue the command */ s5p_mfc_write_reg(cmd, S5P_FIMV_HOST2RISC_CMD); return 0; }
int s5p_mfc_clock_on(struct s5p_mfc_dev *dev) { int ret = 0; int state, val; unsigned long flags; dev->pm.clock_on_steps = 1; MFC_TRACE_DEV("++ clock_on: Set clock rate(%d)\n", dev->curr_rate); ret = clk_enable(dev->pm.clock); if (ret < 0) return ret; if (dev->pm.base_type != MFCBUF_INVALID) s5p_mfc_init_memctrl(dev, dev->pm.base_type); dev->pm.clock_on_steps |= 0x1 << 1; if (dev->curr_ctx_drm && dev->is_support_smc) { spin_lock_irqsave(&dev->pm.clklock, flags); mfc_debug(3, "Begin: enable protection\n"); ret = exynos_smc(SMC_PROTECTION_SET, 0, dev->id, SMC_PROTECTION_ENABLE); dev->pm.clock_on_steps |= 0x1 << 2; if (!ret) { printk("Protection Enable failed! ret(%u)\n", ret); spin_unlock_irqrestore(&dev->pm.clklock, flags); clk_disable(dev->pm.clock); return -EACCES; } mfc_debug(3, "End: enable protection\n"); spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { ret = s5p_mfc_mem_resume(dev->alloc_ctx[0]); if (ret < 0) { dev->pm.clock_on_steps |= 0x1 << 3; clk_disable(dev->pm.clock); return ret; } } dev->pm.clock_on_steps |= 0x1 << 4; if (IS_MFCV6(dev)) { spin_lock_irqsave(&dev->pm.clklock, flags); if ((atomic_inc_return(&dev->clk_ref) == 1) && FW_HAS_BUS_RESET(dev)) { val = s5p_mfc_read_reg(dev, S5P_FIMV_MFC_BUS_RESET_CTRL); val &= ~(0x1); s5p_mfc_write_reg(dev, val, S5P_FIMV_MFC_BUS_RESET_CTRL); } spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { atomic_inc_return(&dev->clk_ref); } dev->pm.clock_on_steps |= 0x1 << 5; state = atomic_read(&dev->clk_ref); mfc_debug(2, "+ %d\n", state); MFC_TRACE_DEV("-- clock_on : ref state(%d)\n", state); return 0; }
/* Reset the device */ static int s5p_mfc_reset(struct s5p_mfc_dev *dev) { int i; unsigned int mc_status; unsigned long timeout; mfc_debug_enter(); /* Stop procedure */ /* FIXME: F/W can be access invalid address */ /* Reset VI */ /* s5p_mfc_write_reg(0x3f7, S5P_FIMV_SW_RESET); */ if (IS_MFCV6(dev)) { /* Reset IP */ s5p_mfc_write_reg(0xFEE, S5P_FIMV_MFC_RESET); /* except RISC, reset */ s5p_mfc_write_reg(0x0, S5P_FIMV_MFC_RESET); /* reset release */ /* Zero Initialization of MFC registers */ s5p_mfc_write_reg(0, S5P_FIMV_RISC2HOST_CMD); s5p_mfc_write_reg(0, S5P_FIMV_HOST2RISC_CMD); s5p_mfc_write_reg(0, S5P_FIMV_FW_VERSION); for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT; i++) s5p_mfc_write_reg(0, S5P_FIMV_REG_CLEAR_BEGIN + (i*4)); /* Reset */ s5p_mfc_write_reg(0, S5P_FIMV_RISC_ON); s5p_mfc_write_reg(0x1FFF, S5P_FIMV_MFC_RESET); s5p_mfc_write_reg(0, S5P_FIMV_MFC_RESET); } else { s5p_mfc_write_reg(0x3f6, S5P_FIMV_SW_RESET); /* reset RISC */ s5p_mfc_write_reg(0x3e2, S5P_FIMV_SW_RESET); /* All reset except for MC */ mdelay(10); timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* Check MC status */ do { if (time_after(jiffies, timeout)) { mfc_err("Timeout while resetting MFC.\n"); return -EIO; } mc_status = s5p_mfc_read_reg(S5P_FIMV_MC_STATUS); } while (mc_status & 0x3); s5p_mfc_write_reg(0x0, S5P_FIMV_SW_RESET); s5p_mfc_write_reg(0x3fe, S5P_FIMV_SW_RESET); } mfc_debug_leave(); return 0; }
int s5p_mfc_clock_on(struct s5p_mfc_dev *dev) { int ret = 0; int state, val; unsigned long flags; #ifdef CONFIG_MFC_USE_BUS_DEVFREQ MFC_TRACE_DEV("++ clock_on: Set clock rate(%d)\n", dev->curr_rate); mutex_lock(&dev->curr_rate_lock); s5p_mfc_clock_set_rate(dev, dev->curr_rate); mutex_unlock(&dev->curr_rate_lock); #endif ret = clk_enable(dev->pm.clock); if (ret < 0) return ret; if (dev->curr_ctx_drm && dev->is_support_smc) { spin_lock_irqsave(&dev->pm.clklock, flags); mfc_debug(3, "Begin: enable protection\n"); ret = exynos_smc(SMC_PROTECTION_SET, 0, dev->id, SMC_PROTECTION_ENABLE); if (!ret) { printk("Protection Enable failed! ret(%u)\n", ret); spin_unlock_irqrestore(&dev->pm.clklock, flags); clk_disable(dev->pm.clock); return ret; } mfc_debug(3, "End: enable protection\n"); spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { ret = s5p_mfc_mem_resume(dev->alloc_ctx[0]); if (ret < 0) { clk_disable(dev->pm.clock); return ret; } } if (IS_MFCV6(dev)) { spin_lock_irqsave(&dev->pm.clklock, flags); if ((atomic_inc_return(&dev->clk_ref) == 1) && FW_HAS_BUS_RESET(dev)) { val = s5p_mfc_read_reg(dev, S5P_FIMV_MFC_BUS_RESET_CTRL); val &= ~(0x1); s5p_mfc_write_reg(dev, val, S5P_FIMV_MFC_BUS_RESET_CTRL); } spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { atomic_inc_return(&dev->clk_ref); } state = atomic_read(&dev->clk_ref); mfc_debug(2, "+ %d\n", state); MFC_TRACE_DEV("-- clock_on : ref state(%d)\n", state); return 0; }
void s5p_mfc_clock_off(struct s5p_mfc_dev *dev) { int state, val; unsigned long timeout, flags; int ret = 0; if (IS_MFCV6(dev)) { spin_lock_irqsave(&dev->pm.clklock, flags); if ((atomic_dec_return(&dev->clk_ref) == 0) && FW_HAS_BUS_RESET(dev)) { s5p_mfc_write_reg(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL); timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* Check bus status */ do { if (time_after(jiffies, timeout)) { mfc_err_dev("Timeout while resetting MFC.\n"); break; } val = s5p_mfc_read_reg(dev, S5P_FIMV_MFC_BUS_RESET_CTRL); } while ((val & 0x2) == 0); } spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { atomic_dec_return(&dev->clk_ref); } state = atomic_read(&dev->clk_ref); if (state < 0) { mfc_err_dev("Clock state is wrong(%d)\n", state); atomic_set(&dev->clk_ref, 0); } else { if (dev->curr_ctx_drm && dev->is_support_smc) { mfc_debug(3, "Begin: disable protection\n"); spin_lock_irqsave(&dev->pm.clklock, flags); ret = exynos_smc(SMC_PROTECTION_SET, 0, dev->id, SMC_PROTECTION_DISABLE); if (!ret) { printk("Protection Disable failed! ret(%u)\n", ret); spin_unlock_irqrestore(&dev->pm.clklock, flags); clk_disable(dev->pm.clock); return; } mfc_debug(3, "End: disable protection\n"); spin_unlock_irqrestore(&dev->pm.clklock, flags); } else { s5p_mfc_mem_suspend(dev->alloc_ctx[0]); } clk_disable(dev->pm.clock); } mfc_debug(2, "- %d\n", state); }
static inline int s5p_mfc_bus_reset(struct s5p_mfc_dev *dev) { unsigned int status; unsigned long timeout; /* Reset */ s5p_mfc_write_reg(0x1, S5P_FIMV_MFC_BUS_RESET_CTRL); timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* Check bus status */ do { if (time_after(jiffies, timeout)) { mfc_err("Timeout while resetting MFC.\n"); return -EIO; } status = s5p_mfc_read_reg(S5P_FIMV_MFC_BUS_RESET_CTRL); } while ((status & 0x2) == 0); return 0; }
int s5p_mfc_clock_on(void) { int ret = 0; int state, val; struct s5p_mfc_dev *dev = platform_get_drvdata(to_platform_device(pm->device)); unsigned long flags; #ifdef CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ s5p_mfc_clock_set_rate(dev, dev->curr_rate); #endif ret = clk_enable(pm->clock); if (ret < 0) return ret; if (!dev->curr_ctx_drm) { ret = s5p_mfc_mem_resume(dev->alloc_ctx[0]); if (ret < 0) { clk_disable(pm->clock); return ret; } } if (IS_MFCV6(dev)) { spin_lock_irqsave(&pm->clklock, flags); if ((atomic_inc_return(&clk_ref) == 1) && FW_HAS_BUS_RESET(dev)) { val = s5p_mfc_read_reg(S5P_FIMV_MFC_BUS_RESET_CTRL); val &= ~(0x1); s5p_mfc_write_reg(val, S5P_FIMV_MFC_BUS_RESET_CTRL); } spin_unlock_irqrestore(&pm->clklock, flags); } else { atomic_inc_return(&clk_ref); } state = atomic_read(&clk_ref); mfc_debug(3, "+ %d", state); return 0; }
void s5p_mfc_clock_off(void) { int state, val; unsigned long timeout, flags; struct s5p_mfc_dev *dev = platform_get_drvdata(to_platform_device(pm->device)); if (IS_MFCV6(dev)) { spin_lock_irqsave(&pm->clklock, flags); if ((atomic_dec_return(&clk_ref) == 0) && FW_HAS_BUS_RESET(dev)) { s5p_mfc_write_reg(0x1, S5P_FIMV_MFC_BUS_RESET_CTRL); timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* Check bus status */ do { if (time_after(jiffies, timeout)) { mfc_err("Timeout while resetting MFC.\n"); break; } val = s5p_mfc_read_reg( S5P_FIMV_MFC_BUS_RESET_CTRL); } while ((val & 0x2) == 0); } spin_unlock_irqrestore(&pm->clklock, flags); } else { atomic_dec_return(&clk_ref); } state = atomic_read(&clk_ref); if (state < 0) { mfc_err("Clock state is wrong(%d)\n", state); atomic_set(&clk_ref, 0); } else { if (!dev->curr_ctx_drm) s5p_mfc_mem_suspend(dev->alloc_ctx[0]); clk_disable(pm->clock); } }
/* Reset the device */ static int s5p_mfc_reset(struct s5p_mfc_dev *dev) { int i; unsigned int status; unsigned long timeout; mfc_debug_enter(); if (!dev) { mfc_err("no mfc device to run\n"); return -EINVAL; } /* Stop procedure */ /* Reset VI */ /* s5p_mfc_write_reg(dev, 0x3f7, S5P_FIMV_SW_RESET); */ if (IS_MFCV6(dev)) { /* Zero Initialization of MFC registers */ s5p_mfc_write_reg(dev, 0, S5P_FIMV_RISC2HOST_CMD); s5p_mfc_write_reg(dev, 0, S5P_FIMV_HOST2RISC_CMD); s5p_mfc_write_reg(dev, 0, S5P_FIMV_FW_VERSION); for (i = 0; i < S5P_FIMV_REG_CLEAR_COUNT; i++) s5p_mfc_write_reg(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN + (i*4)); if (IS_MFCv6X(dev)) if (s5p_mfc_bus_reset(dev)) return -EIO; if (!IS_MFCV8(dev)) s5p_mfc_write_reg(dev, 0, S5P_FIMV_RISC_ON); s5p_mfc_write_reg(dev, 0x1FFF, S5P_FIMV_MFC_RESET); s5p_mfc_write_reg(dev, 0, S5P_FIMV_MFC_RESET); } else { s5p_mfc_write_reg(dev, 0x3f6, S5P_FIMV_SW_RESET); /* reset RISC */ s5p_mfc_write_reg(dev, 0x3e2, S5P_FIMV_SW_RESET); /* All reset except for MC */ mdelay(10); timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT); /* Check MC status */ do { if (time_after(jiffies, timeout)) { mfc_err_dev("Timeout while resetting MFC.\n"); return -EIO; } status = s5p_mfc_read_reg(dev, S5P_FIMV_MC_STATUS); } while (status & 0x3); s5p_mfc_write_reg(dev, 0x0, S5P_FIMV_SW_RESET); s5p_mfc_write_reg(dev, 0x3fe, S5P_FIMV_SW_RESET); } mfc_debug_leave(); return 0; }