static int sec_mst_notifier(struct notifier_block *self,
				unsigned long cmd, void *v)
{
	u64 r0 = 0, r1 = 0, r2 = 0, r3 = 0;
	int result=0;

	switch (cmd) {
	case LPA_ENTER:
		printk(KERN_INFO "MST_LDO_DRV]]] lpa enter\n");

		/* save gpios & set previous state */
		r0 = (0x83000011);
		result = exynos_smc(r0, r1, r2, r3);
		printk(KERN_INFO "MST_LDO_DRV]]] lpa enter after prev mode smc : %x\n", result);

		break;
	case LPA_EXIT:
		printk(KERN_INFO "MST_LDO_DRV]]] lpa exit\n");

		/* restore gpios */
		r0 = (0x8300000d);
		result = exynos_smc(r0, r1, r2, r3);
		rt = result;
		printk(KERN_INFO "MST_LDO_DRV]]] lpa exit after restore smc : %x\n", result);

		break;
	}

	return NOTIFY_OK;
}
static int exynos_secure_mode_disable(struct kbase_device *kbdev)
{
	/* Turn off secure mode and reset GPU : TZPC */
	int ret = 0;

	if (!kbdev && !kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}
#if defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
	gpu_cacheclean(kbdev);

#if MALI_SEC_ASP_SECURE_RENDERING
	ret = exynos_smc(SMC_DRM_SECBUF_CFW_UNPROT,
                     kbdev->sec_sr_info.secure_crc_phys, kbdev->sec_sr_info.secure_crc_sizes,
                     PROT_G3D);

	if(ret != DRMDRV_OK) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: CRC : failed to unset secure buffer region by err 0x%x, physical addr 0x%08x\n",
			__func__, ret, (unsigned int)kbdev->sec_sr_info.secure_crc_phys);
		goto secure_out;
	}
#endif

	ret = exynos_smc(SMC_PROTECTION_SET, 0,
                     PROT_G3D, SMC_PROTECTION_DISABLE);

	if (ret == SMC_TZPC_OK)
		ret = 0;

secure_out:
#endif // defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
	return ret;
}
static int __init exynos4_l2x0_cache_init(void)
{
	u32 tag_latency = 0x110;
	u32 data_latency = soc_is_exynos4210() ? 0x110 : 0x120;
	u32 prefetch = (soc_is_exynos4412() &&
			samsung_rev() >= EXYNOS4412_REV_1_0) ?
			0x71000007 : 0x30000007;
	u32 aux_val = 0x7C470001;
	u32 aux_mask = 0xC200FFFF;

#ifdef CONFIG_ARM_TRUSTZONE
	exynos_smc(SMC_CMD_L2X0SETUP1, tag_latency, data_latency, prefetch);
	exynos_smc(SMC_CMD_L2X0SETUP2,
		   L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
		   aux_val, aux_mask);
	exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0);
	exynos_smc(SMC_CMD_L2X0CTRL, 1, 0, 0);
#else
	__raw_writel(tag_latency, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
	__raw_writel(data_latency, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
	__raw_writel(prefetch, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
	__raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
		     S5P_VA_L2CC + L2X0_POWER_CTRL);
#endif

	l2x0_init(S5P_VA_L2CC, aux_val, aux_mask);

#ifdef CONFIG_ARM_TRUSTZONE
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
	outer_cache.set_debug = exynos4_l2x0_set_debug;
#endif
#endif

	return 0;
}
Example #4
0
static void exynos_l2_write_sec(unsigned long val, unsigned reg)
{
	static int l2cache_enabled;

	switch (reg) {
	case L2X0_CTRL:
		if (val & L2X0_CTRL_EN) {
			/*
			 * Before the cache can be enabled, due to firmware
			 * design, SMC_CMD_L2X0INVALL must be called.
			 */
			if (!l2cache_enabled) {
				exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0);
				l2cache_enabled = 1;
			}
		} else {
			l2cache_enabled = 0;
		}
		exynos_smc(SMC_CMD_L2X0CTRL, val, 0, 0);
		break;

	case L2X0_DEBUG_CTRL:
		exynos_smc(SMC_CMD_L2X0DEBUG, val, 0, 0);
		break;

	default:
		WARN_ONCE(1, "%s: ignoring write to reg 0x%x\n", __func__, reg);
	}
}
ssize_t fmpfw_hash_update(struct fmp_info *info, struct hash_data *hdata,
				struct scatterlist *sg, size_t len)
{
	int ret = 0;
	unsigned long addr;
	struct device *dev = info->dev;
	struct hmac_sha256_fmpfw_info *fmpfw_info = hdata->fmpfw_info;

	fmpfw_info->s.step = UPDATE;
	fmpfw_info->s.input = (uint32_t)sg_phys(sg);
	__flush_dcache_area(sg_virt(sg), len);
	fmpfw_info->s.input_len = len;
	__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
	addr = virt_to_phys(fmpfw_info);

	reinit_completion(&hdata->async.result->completion);
	if (fmpfw_info->hmac_mode) {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_HMAC_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW HMAC SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	} else {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	}

	return waitfor(info, hdata->async.result, ret);
}
void set_refresh_rate(unsigned int auto_refresh)
{
	/*
	 * uRlk = FIN / 100000;
	 * refresh_usec =  (unsigned int)(fMicrosec * 10);
	 * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1;
	*/
	pr_debug("@@@ set_auto_refresh = 0x%02x\n", auto_refresh);

#ifdef CONFIG_ARCH_EXYNOS4
#ifdef CONFIG_ARM_TRUSTZONE
	exynos_smc(SMC_CMD_REG,
		SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC0_4212 + TIMING_AREF_OFFSET)),
		auto_refresh, 0);
	exynos_smc(SMC_CMD_REG,
		SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC1_4212 + TIMING_AREF_OFFSET)),
		auto_refresh, 0);
#else
	/* change auto refresh period in TIMING_AREF register of dmc0  */
	__raw_writel(auto_refresh, S5P_VA_DMC0 + TIMING_AREF_OFFSET);

	/* change auto refresh period in TIMING_AREF regisger of dmc1 */
	__raw_writel(auto_refresh, S5P_VA_DMC1 + TIMING_AREF_OFFSET);
#endif
#endif	/* CONFIG_ARCH_EXYNOS4 */
}
Example #7
0
int
exynos4_l2cc_init(void)
{
	const uint32_t tag_latency  = 0x110;
	const uint32_t data_latency = IS_EXYNOS4410_P() ? 0x110 : 0x120;
	const uint32_t prefetch4412   = /* 0111 0001 0000 0000 0000 0000 0000 0111 */
				PREFETCHCTL_DBLLINEF_EN  |
				PREFETCHCTL_INSTRPREF_EN |
				PREFETCHCTL_DATAPREF_EN  |
				PREFETCHCTL_PREF_DROP_EN |
				PREFETCHCTL_PREFETCH_OFFSET_7;
	const uint32_t prefetch4412_r0 = /* 0011 0000 0000 0000 0000 0000 0000 0111 */
				PREFETCHCTL_INSTRPREF_EN |
				PREFETCHCTL_DATAPREF_EN  |
				PREFETCHCTL_PREFETCH_OFFSET_7;
	const uint32_t aux_val      =    /* 0111 1100 0100 0111 0000 0000 0000 0001 */
				AUXCTL_EARLY_BRESP_EN |
				AUXCTL_I_PREFETCH     |
				AUXCTL_D_PREFETCH     |
				AUXCTL_NS_INT_ACC_CTL |
				AUXCTL_NS_INT_LOCK_EN |
				AUXCTL_SHARED_ATT_OVR |
				AUXCTL_WAY_SIZE_RSVD7 << 16 | /* why rsvd7 ??? */
				AUXCTL_FULL_LINE_WR0;
	const uint32_t aux_keepmask =    /* 1100 0010 0000 0000 1111 1111 1111 1111  */
				AUXCTL_RSVD31         |
				AUXCTL_EARLY_BRESP_EN |
				AUXCTL_CACHE_REPL_RR  |

				AUXCTL_SH_ATTR_INV_ENA|
				AUXCTL_EXCL_CACHE_CFG |
				AUXCTL_ST_BUF_DEV_LIM_EN |
				AUXCTL_HIPRO_SO_DEV_EN |
				AUXCTL_FULL_LINE_WR0  |
				0xffff;
	uint32_t prefetch;

	/* check the bitmaps are the same as the linux implementation uses */
	KASSERT(prefetch4412    == 0x71000007);
	KASSERT(prefetch4412_r0 == 0x30000007);
	KASSERT(aux_val         == 0x7C470001);
	KASSERT(aux_keepmask    == 0xC200FFFF);

	if (IS_EXYNOS4412_R0_P())
		prefetch = prefetch4412_r0;
	else
		prefetch = prefetch4412;	/* newer than >= r1_0 */
	;

	exynos_smc(SMC_CMD_L2X0SETUP1, tag_latency, data_latency, prefetch);
	exynos_smc(SMC_CMD_L2X0SETUP2,
		POWERCTL_DYNCLKGATE | POWERCTL_STANDBY,
		aux_val, aux_keepmask);
	exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0);
	exynos_smc(SMC_CMD_L2X0CTRL, 1, 0, 0);

	return 0;
}
static int exynos_cpu_suspend(unsigned long arg)
{
	flush_cache_all();	/* FIXME */

	exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE, SMC_POWERSTATE_SLEEP, 0);
	exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER, SMC_POWERSTATE_SLEEP, 0);

	pr_info("%s: return to originator\n", __func__);

	return 1; /* abort suspend */
}
Example #9
0
static int exynos_secure_mem_enable(void)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	flush_all_cpu_caches();
	ret = exynos_smc(SMC_MEM_PROT_SET, 0, 0, 1);
	if( ret == SMC_CALL_ERROR ) {
		exynos_smc(SMC_MEM_PROT_SET, 0, 0, 0);
	}

	return ret;
}
static int exyswd_rng_resume(struct device *dev)
{
	unsigned long flag;

	spin_lock_irqsave(&hwrandom_lock, flag);
#if defined(CONFIG_EXYRNG_FIPS_COMPLIANCE)
	exynos_smc(SMC_CMD_RANDOM, HWRNG_RESUME, 0, 0);
#endif
	if (hwrng_read_flag)
		exynos_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0);
	spin_unlock_irqrestore(&hwrandom_lock, flag);

	return 0;
}
void exynos5_cpu_suspend(void)
{
	unsigned int tmp;

	/* Disable wakeup by EXT_GIC */
	tmp = __raw_readl(EXYNOS5_WAKEUP_MASK);
	tmp |= EXYNOS5_DEFAULT_WAKEUP_MACK;
	__raw_writel(tmp, EXYNOS5_WAKEUP_MASK);

	/*
	 * GPS LPI mask.
	 */
	if (samsung_rev() < EXYNOS5250_REV_1_0)
		__raw_writel(0x10000, EXYNOS5_GPS_LPI);

	if (samsung_rev() >= EXYNOS5250_REV_1_0)
		exynos4_reset_assert_ctrl(0);

#ifdef CONFIG_ARM_TRUSTZONE
	exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
#else
	/* issue the standby signal into the pm unit. */
	cpu_do_idle();
#endif
}
Example #12
0
void exynos4_cpu_suspend(void)
{
	unsigned int tmp;

	if ((!soc_is_exynos4210()) && (exynos4_is_c2c_use())) {
		/* Gating CLK_IEM_APC & Enable CLK_SSS */
		tmp = __raw_readl(EXYNOS4_CLKGATE_IP_DMC);
		tmp &= ~(0x1 << 17);
		tmp |= (0x1 << 4);
		__raw_writel(tmp, EXYNOS4_CLKGATE_IP_DMC);

		/* Set MAX divider for PWI */
		tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
		tmp |= (0xF << 8);
		__raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);

		/* Set clock source for PWI */
		tmp = __raw_readl(EXYNOS4_CLKSRC_DMC);
		tmp &= ~EXYNOS4_CLKSRC_DMC_MASK;
		tmp |= ((0x6 << 16)|(0x1 << 12));
		__raw_writel(tmp, EXYNOS4_CLKSRC_DMC);
	}

	outer_flush_all();

#ifdef CONFIG_ARM_TRUSTZONE
	exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
#else
	/* issue the standby signal into the pm unit. */
	cpu_do_idle();
#endif
}
int s5p_mfc_clock_on(struct s5p_mfc_dev *dev)
{
	int ret = 0;
	int state, val;
	unsigned long flags;

	dev->pm.clock_on_steps = 1;
	MFC_TRACE_DEV("++ clock_on: Set clock rate(%d)\n", dev->curr_rate);
	ret = clk_enable(dev->pm.clock);
	if (ret < 0)
		return ret;

	if (dev->pm.base_type != MFCBUF_INVALID)
		s5p_mfc_init_memctrl(dev, dev->pm.base_type);

	dev->pm.clock_on_steps |= 0x1 << 1;
	if (dev->curr_ctx_drm && dev->is_support_smc) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		mfc_debug(3, "Begin: enable protection\n");
		ret = exynos_smc(SMC_PROTECTION_SET, 0,
					dev->id, SMC_PROTECTION_ENABLE);
		dev->pm.clock_on_steps |= 0x1 << 2;
		if (!ret) {
			printk("Protection Enable failed! ret(%u)\n", ret);
			spin_unlock_irqrestore(&dev->pm.clklock, flags);
			clk_disable(dev->pm.clock);
			return -EACCES;
		}
		mfc_debug(3, "End: enable protection\n");
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		ret = s5p_mfc_mem_resume(dev->alloc_ctx[0]);
		if (ret < 0) {
			dev->pm.clock_on_steps |= 0x1 << 3;
			clk_disable(dev->pm.clock);
			return ret;
		}
	}

	dev->pm.clock_on_steps |= 0x1 << 4;
	if (IS_MFCV6(dev)) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		if ((atomic_inc_return(&dev->clk_ref) == 1) &&
				FW_HAS_BUS_RESET(dev)) {
			val = s5p_mfc_read_reg(dev, S5P_FIMV_MFC_BUS_RESET_CTRL);
			val &= ~(0x1);
			s5p_mfc_write_reg(dev, val, S5P_FIMV_MFC_BUS_RESET_CTRL);
		}
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		atomic_inc_return(&dev->clk_ref);
	}

	dev->pm.clock_on_steps |= 0x1 << 5;
	state = atomic_read(&dev->clk_ref);
	mfc_debug(2, "+ %d\n", state);
	MFC_TRACE_DEV("-- clock_on : ref state(%d)\n", state);

	return 0;
}
Example #14
0
int
exynos_do_idle(void)
{
        exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);

	return 0;
}
static int mem_security_request(struct link_device *ld, struct io_device *iod,
				unsigned long arg)
{
	unsigned long size, addr;
	int err = 0;
	struct modem_sec_req msr;

	err = copy_from_user(&msr, (const void __user *)arg, sizeof(msr));
	if (err) {
		mif_err("%s: ERR! copy_from_user fail\n", ld->name);
		err = -EFAULT;
		goto exit;
	}

	size = shm_get_security_size(msr.mode, msr.size);
	addr = shm_get_security_addr(msr.mode);
	mif_err("mode=%lu, size=%lu, addr=%lu\n", msr.mode, size, addr);
	err = exynos_smc(SMC_ID, msr.mode, size, addr);
	mif_info("%s: return_value=%d\n", ld->name, err);

	/* To do: will be removed*/
	pmu_cp_reg_dump();

exit:
	return err;
}
Example #16
0
static int gsc_m2m_streamon(struct file *file, void *fh,
			   enum v4l2_buf_type type)
{
	struct gsc_ctx *ctx = fh_to_ctx(fh);
	struct gsc_dev *gsc = ctx->gsc_dev;
	struct exynos_platform_gscaler *pdata = gsc->pdata;

	/* The source and target color format need to be set */
	if (V4L2_TYPE_IS_OUTPUT(type)) {
		if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
			return -EINVAL;
	} else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
		return -EINVAL;
	}

	gsc_pm_qos_ctrl(gsc, GSC_QOS_ON, pdata->mif_min, pdata->int_min);

	if (gsc->protected_content) {
		int id = gsc->id + 3;
		exynos_smc(SMC_PROTECTION_SET, 0, id, 1);
		gsc_dbg("DRM enable");
	}

	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
Example #17
0
static int gsc_m2m_release(struct file *file)
{
	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
	struct gsc_dev *gsc = ctx->gsc_dev;

	gsc_dbg("pid: %d, state: 0x%lx, refcnt= %d",
		task_pid_nr(current), gsc->state, gsc->m2m.refcnt);

	v4l2_m2m_ctx_release(ctx->m2m_ctx);
	gsc_ctrls_delete(ctx);
	v4l2_fh_del(&ctx->fh);
	v4l2_fh_exit(&ctx->fh);

	if (--gsc->m2m.refcnt <= 0)
		clear_bit(ST_M2M_OPEN, &gsc->state);

	/* This is unnormal case */
	if (gsc->protected_content) {
		int id = gsc->id + 3;
		gsc_err("DRM should be disabled before device close");
		exynos_smc(SMC_PROTECTION_SET, 0, id, 0);
		gsc_set_protected_content(gsc, false);
	}

	kfree(ctx);
	return 0;
}
Example #18
0
int
exynos_cpu_boot(int cpu)
{
	exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);

	return 0;
}
static int exynos_swd_startup_test(void)
{
	uint32_t start_up_size;
	int ret = 0;

	start_up_size = EXYRNG_START_UP_SIZE;

	while (start_up_size) {
		ret = exynos_smc(SMC_CMD_RANDOM, HWRNG_GET_DATA, 1, 0);
		if (ret == HWRNG_RET_RETRY_ERROR) {
			usleep_range(50, 100);
			continue;
		}

		if (ret == HWRNG_RET_TEST_ERROR) {
			exynos_swd_test_fail();
			return -EFAULT;
		}

		if (ret != HWRNG_RET_OK) {
			return -EFAULT;
			exyrng_debug("[ExyRNG] failed to get random\n");
		}

		start_up_size -= 32;
	}

	return 0;
}
static u32 exynos_smc_read(enum cp_control reg)
{
	u32 cp_ctrl;

	cp_ctrl = exynos_smc(SMC_ID, READ_CTRL, 0, reg);
	if (!(cp_ctrl & 0xffff)) {
		cp_ctrl >>= 16;
	} else {
Example #21
0
static int exynos_secure_mem_disable(void)
{
	/* Turn off secure world mode : TZASC */
	int ret = 0;

	ret = exynos_smc(SMC_MEM_PROT_SET, 0, 0, 0);

	return ret;
}
int s5p_mfc_clock_on(struct s5p_mfc_dev *dev)
{
	int ret = 0;
	int state, val;
	unsigned long flags;

#ifdef CONFIG_MFC_USE_BUS_DEVFREQ
	MFC_TRACE_DEV("++ clock_on: Set clock rate(%d)\n", dev->curr_rate);
	mutex_lock(&dev->curr_rate_lock);
	s5p_mfc_clock_set_rate(dev, dev->curr_rate);
	mutex_unlock(&dev->curr_rate_lock);
#endif
	ret = clk_enable(dev->pm.clock);
	if (ret < 0)
		return ret;

	if (dev->curr_ctx_drm && dev->is_support_smc) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		mfc_debug(3, "Begin: enable protection\n");
		ret = exynos_smc(SMC_PROTECTION_SET, 0,
					dev->id, SMC_PROTECTION_ENABLE);
		if (!ret) {
			printk("Protection Enable failed! ret(%u)\n", ret);
			spin_unlock_irqrestore(&dev->pm.clklock, flags);
			clk_disable(dev->pm.clock);
			return ret;
		}
		mfc_debug(3, "End: enable protection\n");
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		ret = s5p_mfc_mem_resume(dev->alloc_ctx[0]);
		if (ret < 0) {
			clk_disable(dev->pm.clock);
			return ret;
		}
	}

	if (IS_MFCV6(dev)) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		if ((atomic_inc_return(&dev->clk_ref) == 1) &&
				FW_HAS_BUS_RESET(dev)) {
			val = s5p_mfc_read_reg(dev, S5P_FIMV_MFC_BUS_RESET_CTRL);
			val &= ~(0x1);
			s5p_mfc_write_reg(dev, val, S5P_FIMV_MFC_BUS_RESET_CTRL);
		}
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		atomic_inc_return(&dev->clk_ref);
	}

	state = atomic_read(&dev->clk_ref);
	mfc_debug(2, "+ %d\n", state);
	MFC_TRACE_DEV("-- clock_on : ref state(%d)\n", state);

	return 0;
}
Example #23
0
/* MALI_SEC_SECURE_RENDERING */
static int exynos_secure_mode_enable(void)
{
	/* enable secure mode : TZPC */
	int ret = 0;

	ret = exynos_smc(SMC_PROTECTION_SET, 0,
                            0xc, SMC_PROTECTION_ENABLE);

	return ret;
}
Example #24
0
static int exynos_secure_mode_disable(void)
{
	/* Turn off secure mode and reset GPU : TZPC */
	int ret = 0;

	ret = exynos_smc(SMC_PROTECTION_SET, 0,
                            0xc, SMC_PROTECTION_DISABLE);

	return ret;
}
Example #25
0
static int sec_mst_notifier(struct notifier_block *self,
				unsigned long cmd, void *v)
{
	u64 r0 = 0, r1 = 0, r2 = 0, r3 = 0;
	int result=0;
	switch (cmd) {
	case LPA_ENTER:
		/* save gpios & set previous state */
		r0 = (0x83000011);
		result = exynos_smc(r0, r1, r2, r3);
		break;
	case LPA_EXIT:
		/* restore gpios */
		r0 = (0x8300000d);
		result = exynos_smc(r0, r1, r2, r3);
		rt = result;
		break;
	}
	return NOTIFY_OK;
}
static int exyswd_rng_suspend(struct device *dev)
{
	unsigned long flag;

	spin_lock_irqsave(&hwrandom_lock, flag);
	if (hwrng_read_flag)
		exynos_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0);
	spin_unlock_irqrestore(&hwrandom_lock, flag);

	return 0;
}
Example #27
0
static int exynos_cpu_suspend(unsigned long arg)
{
	flush_cache_all();
	outer_flush_all();

	exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);

	pr_info("Failed to suspend the system\n");
	writel(0, sysram_ns_base_addr + EXYNOS_BOOT_FLAG);
	return 1;
}
void s5p_mfc_clock_off(struct s5p_mfc_dev *dev)
{
	int state, val;
	unsigned long timeout, flags;
	int ret = 0;

	if (IS_MFCV6(dev)) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		if ((atomic_dec_return(&dev->clk_ref) == 0) &&
				FW_HAS_BUS_RESET(dev)) {
			s5p_mfc_write_reg(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);

			timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
			/* Check bus status */
			do {
				if (time_after(jiffies, timeout)) {
					mfc_err_dev("Timeout while resetting MFC.\n");
					break;
				}
				val = s5p_mfc_read_reg(dev,
						S5P_FIMV_MFC_BUS_RESET_CTRL);
			} while ((val & 0x2) == 0);
		}
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		atomic_dec_return(&dev->clk_ref);
	}

	state = atomic_read(&dev->clk_ref);
	if (state < 0) {
		mfc_err_dev("Clock state is wrong(%d)\n", state);
		atomic_set(&dev->clk_ref, 0);
	} else {
		if (dev->curr_ctx_drm && dev->is_support_smc) {
			mfc_debug(3, "Begin: disable protection\n");
			spin_lock_irqsave(&dev->pm.clklock, flags);
			ret = exynos_smc(SMC_PROTECTION_SET, 0,
					dev->id, SMC_PROTECTION_DISABLE);
			if (!ret) {
				printk("Protection Disable failed! ret(%u)\n", ret);
				spin_unlock_irqrestore(&dev->pm.clklock, flags);
				clk_disable(dev->pm.clock);
				return;
			}
			mfc_debug(3, "End: disable protection\n");
			spin_unlock_irqrestore(&dev->pm.clklock, flags);
		} else {
			s5p_mfc_mem_suspend(dev->alloc_ctx[0]);
		}
		clk_disable(dev->pm.clock);
	}
	mfc_debug(2, "- %d\n", state);
}
Example #29
0
int do_fmp_fw_integrity_check(void)
{
	int err = 0;

	err = exynos_smc(SMC_CMD_FMP, FMP_FW_INTEGRITY, 0, 0);
	if (err) {
		printk(KERN_ERR "Fail to check integrity for FMP F/W. err = 0x%x\n", err);
		return -1;
	}

	return 0;
}
static int sec_mst_notifier(struct notifier_block *self,
				unsigned long cmd, void *v)
{
	u64 r0 = 0, r1 = 0, r2 = 0, r3 = 0;
	int result=0;

	switch (cmd) {
	case LPA_ENTER:
		printk(KERN_INFO "MST_FTM_DRV]]] lpa enter");

		/* save gpios & set previous state */
		r0 = (0x83000011);
		result = exynos_smc(r0, r1, r2, r3);

		if(result == MST_NOT_SUPPORT){
			printk(KERN_INFO "MST_FTM_DRV]]] lpa enter do nothing after prev mode smc : %x\n", result);
		}else{
			printk(KERN_INFO "MST_FTM_DRV]]] lpa enter success after prev mode smc : %x\n", result);
		}

		break;
	case LPA_EXIT:
		printk(KERN_INFO "MST_FTM_DRV]]] lpa exit");

		/* restore gpios */
		r0 = (0x8300000d);
		result = exynos_smc(r0, r1, r2, r3);

		if(result == MST_NOT_SUPPORT){
			printk(KERN_INFO "MST_FTM_DRV]]] lpa exit do nothing after restore smc : %x\n", result);
		}else{
			printk(KERN_INFO "MST_FTM_DRV]]] lpa exit success after restore smc : %x\n", result);
		}

		break;
	}

	return NOTIFY_OK;
}