int imx_otp_handler(uint32_t smc_fid,
		void *handle,
		u_register_t x1,
		u_register_t x2)
{
	int ret;
	uint32_t fuse;

	switch (smc_fid) {
	case IMX_SIP_OTP_READ:
		ret = sc_misc_otp_fuse_read(ipc_handle, x1, &fuse);
		SMC_RET2(handle, ret, fuse);
		break;
	case IMX_SIP_OTP_WRITE:
		ret = sc_misc_otp_fuse_write(ipc_handle, x1, x2);
		SMC_RET1(handle, ret);
		break;
	default:
		ret = SMC_UNK;
		SMC_RET1(handle, ret);
		break;
	}

	return ret;
}
/**
 * sip_svc_smc_handler() - Top-level SiP Service SMC handler
 *
 * Handler for all SiP SMC calls. Handles standard SIP requests
 * and calls PM SMC handler if the call is for a PM-API function.
 */
uint64_t sip_svc_smc_handler(uint32_t smc_fid,
			     uint64_t x1,
			     uint64_t x2,
			     uint64_t x3,
			     uint64_t x4,
			     void *cookie,
			     void *handle,
			     uint64_t flags)
{
	/* Let PM SMC handler deal with PM-related requests */
	if (is_pm_fid(smc_fid)) {
		return pm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
				      flags);
	}

	switch (smc_fid) {
	case ZYNQMP_SIP_SVC_CALL_COUNT:
		/* PM functions + default functions */
		SMC_RET1(handle, PM_API_MAX + 2);

	case ZYNQMP_SIP_SVC_UID:
		SMC_UUID_RET(handle, zynqmp_sip_uuid);

	case ZYNQMP_SIP_SVC_VERSION:
		SMC_RET2(handle, SIP_SVC_VERSION_MAJOR, SIP_SVC_VERSION_MINOR);

	default:
		WARN("Unimplemented SiP Service Call: 0x%x\n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
/*******************************************************************************
 * This function is responsible for handling all SiP calls from the NS world
 ******************************************************************************/
uint64_t tegra_sip_handler(uint32_t smc_fid,
			   uint64_t x1,
			   uint64_t x2,
			   uint64_t x3,
			   uint64_t x4,
			   void *cookie,
			   void *handle,
			   uint64_t flags)
{
	uint32_t ns;
	int err;

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);
	if (!ns)
		SMC_RET1(handle, SMC_UNK);

	switch (smc_fid) {

	case TEGRA_SIP_NEW_VIDEOMEM_REGION:

		/*
		 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
		 * or falls outside of the valid DRAM range
		 */
		err = bl31_check_ns_address(x1, x2);
		if (err)
			SMC_RET1(handle, err);

		/*
		 * Check if Video Memory is aligned to 1MB.
		 */
		if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
			ERROR("Unaligned Video Memory base address!\n");
			SMC_RET1(handle, -ENOTSUP);
		}

		/* new video memory carveout settings */
		tegra_memctrl_videomem_setup(x1, x2);

		SMC_RET1(handle, 0);

	default:
		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
		break;
	}

	SMC_RET1(handle, SMC_UNK);
}
示例#4
0
/*
 * This helper function handles Secure EL1 preemption. The preemption could be
 * due Non Secure interrupts or EL3 interrupts. In both the cases we context
 * switch to the normal world and in case of EL3 interrupts, it will again be
 * routed to EL3 which will get handled at the exception vectors.
 */
uint64_t tspd_handle_sp_preemption(void *handle)
{
	cpu_context_t *ns_cpu_context;

	assert(handle == cm_get_context(SECURE));
	cm_el1_sysregs_context_save(SECURE);
	/* Get a reference to the non-secure context */
	ns_cpu_context = cm_get_context(NON_SECURE);
	assert(ns_cpu_context);

	/*
	 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
	 * is preempted, the secure system register context which will get
	 * overwritten must be additionally saved. This is currently done
	 * by the TSPD S-EL1 interrupt handler.
	 */

	/*
	 * Restore non-secure state.
	 */
	cm_el1_sysregs_context_restore(NON_SECURE);
	cm_set_next_eret_context(NON_SECURE);

	/*
	 * The TSP was preempted during STD SMC execution.
	 * Return back to the normal world with SMC_PREEMPTED as error
	 * code in x0.
	 */
	SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
}
/*
 * This function is responsible for handling all SiP calls from the NS world
 */
uint64_t sip_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	switch (smc_fid) {
	case SIP_SVC_CALL_COUNT:
		/* Return the number of Mediatek SiP Service Calls. */
		SMC_RET1(handle,
			 MTK_COMMON_SIP_NUM_CALLS + MTK_PLAT_SIP_NUM_CALLS);

	case SIP_SVC_UID:
		/* Return UID to the caller */
		SMC_UUID_RET(handle, mtk_sip_svc_uid);

	case SIP_SVC_VERSION:
		/* Return the version of current implementation */
		SMC_RET2(handle, MTK_SIP_SVC_VERSION_MAJOR,
			MTK_SIP_SVC_VERSION_MINOR);

	default:
		return mediatek_sip_handler(smc_fid, x1, x2, x3, x4,
			cookie, handle, flags);
	}
}
/*******************************************************************************
 * Function to invoke the registered `handle` corresponding to the smc_fid.
 ******************************************************************************/
uintptr_t handle_runtime_svc(uint32_t smc_fid,
			     void *cookie,
			     void *handle,
			     unsigned int flags)
{
	u_register_t x1, x2, x3, x4;
	int index;
	unsigned int idx;
	const rt_svc_desc_t *rt_svc_descs;

	assert(handle);
	idx = get_unique_oen_from_smc_fid(smc_fid);
	assert(idx < MAX_RT_SVCS);

	index = rt_svc_descs_indices[idx];
	if (index < 0 || index >= (int)RT_SVC_DECS_NUM)
		SMC_RET1(handle, SMC_UNK);

	rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;

	get_smc_params_from_ctx(handle, x1, x2, x3, x4);

	return rt_svc_descs[index].handle(smc_fid, x1, x2, x3, x4, cookie,
						handle, flags);
}
/*******************************************************************************
 * This function is the handler registered for S-EL1 interrupts by the
 * OPTEED. It validates the interrupt and upon success arranges entry into
 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt.
 ******************************************************************************/
static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
					    uint32_t flags,
					    void *handle,
					    void *cookie)
{
	uint32_t linear_id;
	optee_context_t *optee_ctx;

	/* Check the security state when the exception was generated */
	assert(get_interrupt_src_ss(flags) == NON_SECURE);

	/* Sanity check the pointer to this cpu's context */
	assert(handle == cm_get_context(NON_SECURE));

	/* Save the non-secure context before entering the OPTEE */
	cm_el1_sysregs_context_save(NON_SECURE);

	/* Get a reference to this cpu's OPTEE context */
	linear_id = plat_my_core_pos();
	optee_ctx = &opteed_sp_context[linear_id];
	assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));

	cm_set_elr_el3(SECURE, (uint64_t)&optee_vectors->fiq_entry);
	cm_el1_sysregs_context_restore(SECURE);
	cm_set_next_eret_context(SECURE);

	/*
	 * Tell the OPTEE that it has to handle an FIQ (synchronously).
	 * Also the instruction in normal world where the interrupt was
	 * generated is passed for debugging purposes. It is safe to
	 * retrieve this address from ELR_EL3 as the secure context will
	 * not take effect until el3_exit().
	 */
	SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
}
示例#8
0
/*******************************************************************************
 * PSCI top level handler for servicing SMCs.
 ******************************************************************************/
uint64_t psci_smc_handler(uint32_t smc_fid,
			  uint64_t x1,
			  uint64_t x2,
			  uint64_t x3,
			  uint64_t x4,
			  void *cookie,
			  void *handle,
			  uint64_t flags)
{
	uint64_t rc;

	switch (smc_fid) {
	case PSCI_VERSION:
		rc = psci_version();
		break;

	case PSCI_CPU_OFF:
		rc = __psci_cpu_off();
		break;

	case PSCI_CPU_SUSPEND_AARCH64:
	case PSCI_CPU_SUSPEND_AARCH32:
		rc = __psci_cpu_suspend(x1, x2, x3);
		break;

	case PSCI_CPU_ON_AARCH64:
	case PSCI_CPU_ON_AARCH32:
		rc = psci_cpu_on(x1, x2, x3);
		break;

	case PSCI_AFFINITY_INFO_AARCH32:
	case PSCI_AFFINITY_INFO_AARCH64:
		rc = psci_affinity_info(x1, x2);
		break;

	case PSCI_MIG_AARCH32:
	case PSCI_MIG_AARCH64:
		rc = psci_migrate(x1);
		break;

	case PSCI_MIG_INFO_TYPE:
		rc = psci_migrate_info_type();
		break;

	case PSCI_MIG_INFO_UP_CPU_AARCH32:
	case PSCI_MIG_INFO_UP_CPU_AARCH64:
		rc = psci_migrate_info_up_cpu();
		break;

	default:
		rc = SMC_UNK;
		WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
	}

	SMC_RET1(handle, rc);
}
/*******************************************************************************
 * This function is responsible for handling all T210 SiP calls
 ******************************************************************************/
int plat_sip_handler(uint32_t smc_fid,
		     uint64_t x1,
		     uint64_t x2,
		     uint64_t x3,
		     uint64_t x4,
		     const void *cookie,
		     void *handle,
		     uint64_t flags)
{
	uint32_t val, ns;

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);
	if (!ns)
		SMC_RET1(handle, SMC_UNK);

	switch (smc_fid) {
	case TEGRA_SIP_PMC_COMMANDS:

		/* check the address is within PMC range and is 4byte aligned */
		if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3))
			return -EINVAL;

		/* pmc_secure_scratch registers are not accessible */
		if (((x2 >= PMC_SECURE_SCRATCH0) && (x2 <= PMC_SECURE_SCRATCH5)) ||
		    ((x2 >= PMC_SECURE_SCRATCH6) && (x2 <= PMC_SECURE_SCRATCH7)) ||
		    ((x2 >= PMC_SECURE_SCRATCH8) && (x2 <= PMC_SECURE_SCRATCH79)) ||
		    ((x2 >= PMC_SECURE_SCRATCH80) && (x2 <= PMC_SECURE_SCRATCH119)))
				return -EFAULT;

		/* PMC secure-only registers are not accessible */
		if ((x2 == PMC_DPD_ENABLE_0) || (x2 == PMC_FUSE_CONTROL_0) ||
		    (x2 == PMC_CRYPTO_OP_0))
			return -EFAULT;

		/* Perform PMC read/write */
		if (x1 == PMC_READ) {
			val = mmio_read_32((uint32_t)(TEGRA_PMC_BASE + x2));
			write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, val);
		} else if (x1 == PMC_WRITE) {
			mmio_write_32((uint32_t)(TEGRA_PMC_BASE + x2), (uint32_t)x3);
		} else {
			return -EINVAL;
		}

		break;

	default:
		ERROR("%s: unsupported function ID\n", __func__);
		return -ENOTSUP;
	}

	return 0;
}
示例#10
0
/*******************************************************************************
 * OEM top level handler for servicing SMCs.
 ******************************************************************************/
uintptr_t oem_smc_handler(uint32_t smc_fid,
			u_register_t x1,
			u_register_t x2,
			u_register_t x3,
			u_register_t x4,
			void *cookie,
			void *handle,
			u_register_t flags)
{
	WARN("Unimplemented OEM Call: 0x%x\n", smc_fid);
	SMC_RET1(handle, SMC_UNK);
}
uint64_t mediatek_plat_sip_handler(uint32_t smc_fid,
				uint64_t x1,
				uint64_t x2,
				uint64_t x3,
				uint64_t x4,
				void *cookie,
				void *handle,
				uint64_t flags)
{
	ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
	SMC_RET1(handle, SMC_UNK);
}
static uintptr_t trusty_generic_platform_smc(uint32_t smc_fid,
			 u_register_t x1,
			 u_register_t x2,
			 u_register_t x3,
			 u_register_t x4,
			 void *cookie,
			 void *handle,
			 u_register_t flags)
{
	switch (smc_fid) {
	case SMC_FC_DEBUG_PUTC:
		trusty_dputc(x1, is_caller_secure(flags));
		SMC_RET1(handle, 0);

	case SMC_FC_GET_REG_BASE:
	case SMC_FC64_GET_REG_BASE:
		SMC_RET1(handle, trusty_get_reg_base(x1));

	default:
		NOTICE("%s(0x%x, 0x%lx) unknown smc\n", __func__, smc_fid, x1);
		SMC_RET1(handle, SMC_UNK);
	}
}
示例#13
0
/*
 * Top-level OEM Service SMC handler. This handler will in turn dispatch
 * calls to related SMC handler
 */
uintptr_t oem_svc_smc_handler(uint32_t smc_fid,
			 u_register_t x1,
			 u_register_t x2,
			 u_register_t x3,
			 u_register_t x4,
			 void *cookie,
			 void *handle,
			 u_register_t flags)
{
	/*
	 * Dispatch OEM calls to OEM Common handler and return its return value
	 */
	if (is_oem_fid(smc_fid)) {
		return oem_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
					handle, flags);
	}

	switch (smc_fid) {
	case OEM_SVC_CALL_COUNT:
		/*
		 * Return the number of OEM Service Calls.
		 */
		SMC_RET1(handle, OEM_SVC_NUM_CALLS);

	case OEM_SVC_UID:
		/* Return UID to the caller */
		SMC_UUID_RET(handle, oem_svc_uid);

	case OEM_SVC_VERSION:
		/* Return the version of current implementation */
		SMC_RET2(handle, OEM_VERSION_MAJOR, OEM_VERSION_MINOR);

	default:
		WARN("Unimplemented OEM Service Call: 0x%x\n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
uint64_t rockchip_plat_sip_handler(uint32_t smc_fid,
				   uint64_t x1,
				   uint64_t x2,
				   uint64_t x3,
				   uint64_t x4,
				   void *cookie,
				   void *handle,
				   uint64_t flags)
{
	switch (smc_fid) {
	default:
		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
示例#15
0
/*******************************************************************************
 * This function is responsible for handling all SMCs in the Trusted OS/App
 * range from the non-secure state as defined in the SMC Calling Convention
 * Document. It is also responsible for communicating with the Secure payload
 * to delegate work and return results back to the non-secure state. Lastly it
 * will also return any information that the secure payload needs to do the
 * work assigned to it.
 ******************************************************************************/
uint64_t fiqd_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	/* DON'T support any SMC call here */
	switch (smc_fid) {
	default:
		break;
	}

	SMC_RET1(handle, SMC_UNK);
}
示例#16
0
uint64_t tspd_handle_sp_preemption(void *handle)
{
	cpu_context_t *ns_cpu_context;
	assert(handle == cm_get_context(SECURE));
	cm_el1_sysregs_context_save(SECURE);
	/* Get a reference to the non-secure context */
	ns_cpu_context = cm_get_context(NON_SECURE);
	assert(ns_cpu_context);

	/*
	 * Restore non-secure state. The secure system
	 * register context will be saved when required.
	 */
	cm_el1_sysregs_context_restore(NON_SECURE);
	cm_set_next_eret_context(NON_SECURE);

	SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
}
/*
 * This function handles Mediatek defined SiP Calls */
uint64_t mediatek_sip_handler(uint32_t smc_fid,
			uint64_t x1,
			uint64_t x2,
			uint64_t x3,
			uint64_t x4,
			void *cookie,
			void *handle,
			uint64_t flags)
{
	uint32_t ns;

	/* if parameter is sent from SMC32. Clean top 32 bits */
	clean_top_32b_of_param(smc_fid, &x1, &x2, &x3, &x4);

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);
	if (!ns) {
		/* SiP SMC service secure world's call */
		;
	} else {
		/* SiP SMC service normal world's call */
		switch (smc_fid) {
#if MTK_SIP_SET_AUTHORIZED_SECURE_REG_ENABLE
		case MTK_SIP_SET_AUTHORIZED_SECURE_REG: {
			/* only use ret here */
			uint64_t ret;

			ret = mt_sip_set_authorized_sreg((uint32_t)x1,
				(uint32_t)x2);
			SMC_RET1(handle, ret);
		}
#endif
#if MTK_SIP_KERNEL_BOOT_ENABLE
		case MTK_SIP_KERNEL_BOOT_AARCH32:
			boot_to_kernel(x1, x2, x3, x4);
			SMC_RET0(handle);
#endif
		}
	}

	return mediatek_plat_sip_handler(smc_fid, x1, x2, x3, x4,
					cookie, handle, flags);

}
int imx_srtc_handler(uint32_t smc_fid,
		    void *handle,
		    u_register_t x1,
		    u_register_t x2,
		    u_register_t x3,
		    u_register_t x4)
{
	int ret;

	switch (x1) {
	case IMX_SIP_SRTC_SET_TIME:
		ret = imx_srtc_set_time(x2, x3, x4);
		break;
	default:
		ret = SMC_UNK;
	}

	SMC_RET1(handle, ret);
}
示例#19
0
/*******************************************************************************
 * OEM top level handler for servicing SMCs.
 ******************************************************************************/
uint64_t oem_smc_handler(uint32_t smc_fid,
			uint64_t x1,
			uint64_t x2,
			uint64_t x3,
			uint64_t x4,
			void *cookie,
			void *handle,
			uint64_t flags)
{
	uint64_t rc;

	switch (smc_fid) {
	default:
		rc = SMC_UNK;
		WARN("Unimplemented OEM Call: 0x%x\n", smc_fid);
	}

	SMC_RET1(handle, rc);
}
/*******************************************************************************
 * This function is responsible for handling all SiP calls
 ******************************************************************************/
uint64_t tegra_sip_handler(uint32_t smc_fid,
			   uint64_t x1,
			   uint64_t x2,
			   uint64_t x3,
			   uint64_t x4,
			   void *cookie,
			   void *handle,
			   uint64_t flags)
{
	uint32_t regval;
	int err;

	/* Check if this is a SoC specific SiP */
	err = plat_sip_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
	if (err == 0)
		SMC_RET1(handle, (uint64_t)err);

	switch (smc_fid) {

	case TEGRA_SIP_NEW_VIDEOMEM_REGION:

		/* clean up the high bits */
		x2 = (uint32_t)x2;

		/*
		 * Check if Video Memory overlaps TZDRAM (contains bl31/bl32)
		 * or falls outside of the valid DRAM range
		 */
		err = bl31_check_ns_address(x1, x2);
		if (err)
			SMC_RET1(handle, err);

		/*
		 * Check if Video Memory is aligned to 1MB.
		 */
		if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) {
			ERROR("Unaligned Video Memory base address!\n");
			SMC_RET1(handle, -ENOTSUP);
		}

		/*
		 * The GPU is the user of the Video Memory region. In order to
		 * transition to the new memory region smoothly, we program the
		 * new base/size ONLY if the GPU is in reset mode.
		 */
		regval = mmio_read_32(TEGRA_CAR_RESET_BASE +
				      TEGRA_GPU_RESET_REG_OFFSET);
		if ((regval & GPU_RESET_BIT) == 0U) {
			ERROR("GPU not in reset! Video Memory setup failed\n");
			SMC_RET1(handle, -ENOTSUP);
		}

		/* new video memory carveout settings */
		tegra_memctrl_videomem_setup(x1, x2);

		SMC_RET1(handle, 0);
		break;

	/*
	 * The NS world registers the address of its handler to be
	 * used for processing the FIQ. This is normally used by the
	 * NS FIQ debugger driver to detect system hangs by programming
	 * a watchdog timer to fire a FIQ interrupt.
	 */
	case TEGRA_SIP_FIQ_NS_ENTRYPOINT:

		if (!x1)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * TODO: Check if x1 contains a valid DRAM address
		 */

		/* store the NS world's entrypoint */
		tegra_fiq_set_ns_entrypoint(x1);

		SMC_RET1(handle, 0);
		break;

	/*
	 * The NS world's FIQ handler issues this SMC to get the NS EL1/EL0
	 * CPU context when the FIQ interrupt was triggered. This allows the
	 * NS world to understand the CPU state when the watchdog interrupt
	 * triggered.
	 */
	case TEGRA_SIP_FIQ_NS_GET_CONTEXT:

		/* retrieve context registers when FIQ triggered */
		tegra_fiq_get_intr_context();

		SMC_RET0(handle);
		break;

	case TEGRA_SIP_ENABLE_FAKE_SYSTEM_SUSPEND:
		/*
		 * System suspend fake mode is set if we are on VDK and we make
		 * a debug SIP call. This mode ensures that we excercise debug
		 * path instead of the regular code path to suit the pre-silicon
		 * platform needs. These include replacing the call to WFI by
		 * a warm reset request.
		 */
		if (tegra_platform_is_emulation() != 0U) {

			tegra_fake_system_suspend = 1;
			SMC_RET1(handle, 0);
		}

		/*
		 * We return to the external world as if this SIP is not
		 * implemented in case, we are not running on VDK.
		 */
		break;

	default:
		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
		break;
	}

	SMC_RET1(handle, SMC_UNK);
}
示例#21
0
/*******************************************************************************
 * This function is responsible for handling all SMCs in the Trusted OS/App
 * range from the non-secure state as defined in the SMC Calling Convention
 * Document. It is also responsible for communicating with the Secure payload
 * to delegate work and return results back to the non-secure state. Lastly it
 * will also return any information that the secure payload needs to do the
 * work assigned to it.
 ******************************************************************************/
uint64_t tspd_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	cpu_context_t *ns_cpu_context;
	uint32_t linear_id = plat_my_core_pos(), ns;
	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
	uint64_t rc;
#if TSP_INIT_ASYNC
	entry_point_info_t *next_image_info;
#endif

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);

	switch (smc_fid) {

	/*
	 * This function ID is used by TSP to indicate that it was
	 * preempted by a normal world IRQ.
	 *
	 */
	case TSP_PREEMPTED:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		return tspd_handle_sp_preemption(handle);

	/*
	 * This function ID is used only by the TSP to indicate that it has
	 * finished handling a S-EL1 FIQ interrupt. Execution should resume
	 * in the normal world.
	 */
	case TSP_HANDLED_S_EL1_FIQ:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		assert(handle == cm_get_context(SECURE));

		/*
		 * Restore the relevant EL3 state which saved to service
		 * this SMC.
		 */
		if (get_std_smc_active_flag(tsp_ctx->state)) {
			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
				    CTX_SPSR_EL3,
				    tsp_ctx->saved_spsr_el3);
			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
				    CTX_ELR_EL3,
				    tsp_ctx->saved_elr_el3);
#if TSPD_ROUTE_IRQ_TO_EL3
			/*
			 * Need to restore the previously interrupted
			 * secure context.
			 */
			memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
				TSPD_SP_CTX_SIZE);
#endif
		}

		/* Get a reference to the non-secure context */
		ns_cpu_context = cm_get_context(NON_SECURE);
		assert(ns_cpu_context);

		/*
		 * Restore non-secure state. There is no need to save the
		 * secure system register context since the TSP was supposed
		 * to preserve it during S-EL1 interrupt handling.
		 */
		cm_el1_sysregs_context_restore(NON_SECURE);
		cm_set_next_eret_context(NON_SECURE);

		SMC_RET0((uint64_t) ns_cpu_context);


	/*
	 * This function ID is used only by the TSP to indicate that it was
	 * interrupted due to a EL3 FIQ interrupt. Execution should resume
	 * in the normal world.
	 */
	case TSP_EL3_FIQ:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		assert(handle == cm_get_context(SECURE));

		/* Assert that standard SMC execution has been preempted */
		assert(get_std_smc_active_flag(tsp_ctx->state));

		/* Save the secure system register state */
		cm_el1_sysregs_context_save(SECURE);

		/* Get a reference to the non-secure context */
		ns_cpu_context = cm_get_context(NON_SECURE);
		assert(ns_cpu_context);

		/* Restore non-secure state */
		cm_el1_sysregs_context_restore(NON_SECURE);
		cm_set_next_eret_context(NON_SECURE);

		SMC_RET1(ns_cpu_context, TSP_EL3_FIQ);


	/*
	 * This function ID is used only by the SP to indicate it has
	 * finished initialising itself after a cold boot
	 */
	case TSP_ENTRY_DONE:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * Stash the SP entry points information. This is done
		 * only once on the primary cpu
		 */
		assert(tsp_vectors == NULL);
		tsp_vectors = (tsp_vectors_t *) x1;

		if (tsp_vectors) {
			set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);

			/*
			 * TSP has been successfully initialized. Register power
			 * managemnt hooks with PSCI
			 */
			psci_register_spd_pm_hook(&tspd_pm);

			/*
			 * Register an interrupt handler for S-EL1 interrupts
			 * when generated during code executing in the
			 * non-secure state.
			 */
			flags = 0;
			set_interrupt_rm_flag(flags, NON_SECURE);
			rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
						tspd_sel1_interrupt_handler,
						flags);
			if (rc)
				panic();

#if TSPD_ROUTE_IRQ_TO_EL3
			/*
			 * Register an interrupt handler for NS interrupts when
			 * generated during code executing in secure state are
			 * routed to EL3.
			 */
			flags = 0;
			set_interrupt_rm_flag(flags, SECURE);

			rc = register_interrupt_type_handler(INTR_TYPE_NS,
						tspd_ns_interrupt_handler,
						flags);
			if (rc)
				panic();

			/*
			 * Disable the interrupt NS locally since it will be enabled globally
			 * within cm_init_my_context.
			 */
			disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
		}


#if TSP_INIT_ASYNC
		/* Save the Secure EL1 system register context */
		assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
		cm_el1_sysregs_context_save(SECURE);

		/* Program EL3 registers to enable entry into the next EL */
		next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
		assert(next_image_info);
		assert(NON_SECURE ==
				GET_SECURITY_STATE(next_image_info->h.attr));

		cm_init_my_context(next_image_info);
		cm_prepare_el3_exit(NON_SECURE);
		SMC_RET0(cm_get_context(NON_SECURE));
#else
		/*
		 * SP reports completion. The SPD must have initiated
		 * the original request through a synchronous entry
		 * into the SP. Jump back to the original C runtime
		 * context.
		 */
		tspd_synchronous_sp_exit(tsp_ctx, x1);
#endif

	/*
	 * These function IDs is used only by the SP to indicate it has
	 * finished:
	 * 1. turning itself on in response to an earlier psci
	 *    cpu_on request
	 * 2. resuming itself after an earlier psci cpu_suspend
	 *    request.
	 */
	case TSP_ON_DONE:
	case TSP_RESUME_DONE:

	/*
	 * These function IDs is used only by the SP to indicate it has
	 * finished:
	 * 1. suspending itself after an earlier psci cpu_suspend
	 *    request.
	 * 2. turning itself off in response to an earlier psci
	 *    cpu_off request.
	 */
	case TSP_OFF_DONE:
	case TSP_SUSPEND_DONE:
	case TSP_SYSTEM_OFF_DONE:
	case TSP_SYSTEM_RESET_DONE:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * SP reports completion. The SPD must have initiated the
		 * original request through a synchronous entry into the SP.
		 * Jump back to the original C runtime context, and pass x1 as
		 * return value to the caller
		 */
		tspd_synchronous_sp_exit(tsp_ctx, x1);

		/*
		 * Request from non-secure client to perform an
		 * arithmetic operation or response from secure
		 * payload to an earlier request.
		 */
	case TSP_FAST_FID(TSP_ADD):
	case TSP_FAST_FID(TSP_SUB):
	case TSP_FAST_FID(TSP_MUL):
	case TSP_FAST_FID(TSP_DIV):

	case TSP_STD_FID(TSP_ADD):
	case TSP_STD_FID(TSP_SUB):
	case TSP_STD_FID(TSP_MUL):
	case TSP_STD_FID(TSP_DIV):
		if (ns) {
			/*
			 * This is a fresh request from the non-secure client.
			 * The parameters are in x1 and x2. Figure out which
			 * registers need to be preserved, save the non-secure
			 * state and send the request to the secure payload.
			 */
			assert(handle == cm_get_context(NON_SECURE));

			/* Check if we are already preempted */
			if (get_std_smc_active_flag(tsp_ctx->state))
				SMC_RET1(handle, SMC_UNK);

			cm_el1_sysregs_context_save(NON_SECURE);

			/* Save x1 and x2 for use by TSP_GET_ARGS call below */
			store_tsp_args(tsp_ctx, x1, x2);

			/*
			 * We are done stashing the non-secure context. Ask the
			 * secure payload to do the work now.
			 */

			/*
			 * Verify if there is a valid context to use, copy the
			 * operation type and parameters to the secure context
			 * and jump to the fast smc entry point in the secure
			 * payload. Entry into S-EL1 will take place upon exit
			 * from this function.
			 */
			assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));

			/* Set appropriate entry for SMC.
			 * We expect the TSP to manage the PSTATE.I and PSTATE.F
			 * flags as appropriate.
			 */
			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
				cm_set_elr_el3(SECURE, (uint64_t)
						&tsp_vectors->fast_smc_entry);
			} else {
				set_std_smc_active_flag(tsp_ctx->state);
				cm_set_elr_el3(SECURE, (uint64_t)
						&tsp_vectors->std_smc_entry);
#if TSPD_ROUTE_IRQ_TO_EL3
				/*
				 * Enable the routing of NS interrupts to EL3
				 * during STD SMC processing on this core.
				 */
				enable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
			}

			cm_el1_sysregs_context_restore(SECURE);
			cm_set_next_eret_context(SECURE);
			SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
		} else {
			/*
			 * This is the result from the secure client of an
			 * earlier request. The results are in x1-x3. Copy it
			 * into the non-secure context, save the secure state
			 * and return to the non-secure state.
			 */
			assert(handle == cm_get_context(SECURE));
			cm_el1_sysregs_context_save(SECURE);

			/* Get a reference to the non-secure context */
			ns_cpu_context = cm_get_context(NON_SECURE);
			assert(ns_cpu_context);

			/* Restore non-secure state */
			cm_el1_sysregs_context_restore(NON_SECURE);
			cm_set_next_eret_context(NON_SECURE);
			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) {
				clr_std_smc_active_flag(tsp_ctx->state);
#if TSPD_ROUTE_IRQ_TO_EL3
				/*
				 * Disable the routing of NS interrupts to EL3
				 * after STD SMC processing is finished on this
				 * core.
				 */
				disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
			}

			SMC_RET3(ns_cpu_context, x1, x2, x3);
		}

		break;

		/*
		 * Request from non secure world to resume the preempted
		 * Standard SMC call.
		 */
	case TSP_FID_RESUME:
		/* RESUME should be invoked only by normal world */
		if (!ns) {
			assert(0);
			break;
		}

		/*
		 * This is a resume request from the non-secure client.
		 * save the non-secure state and send the request to
		 * the secure payload.
		 */
		assert(handle == cm_get_context(NON_SECURE));

		/* Check if we are already preempted before resume */
		if (!get_std_smc_active_flag(tsp_ctx->state))
			SMC_RET1(handle, SMC_UNK);

		cm_el1_sysregs_context_save(NON_SECURE);

		/*
		 * We are done stashing the non-secure context. Ask the
		 * secure payload to do the work now.
		 */
#if TSPD_ROUTE_IRQ_TO_EL3
		/*
		 * Enable the routing of NS interrupts to EL3 during resumption
		 * of STD SMC call on this core.
		 */
		enable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif



		/* We just need to return to the preempted point in
		 * TSP and the execution will resume as normal.
		 */
		cm_el1_sysregs_context_restore(SECURE);
		cm_set_next_eret_context(SECURE);
		SMC_RET0(&tsp_ctx->cpu_ctx);

		/*
		 * This is a request from the secure payload for more arguments
		 * for an ongoing arithmetic operation requested by the
		 * non-secure world. Simply return the arguments from the non-
		 * secure client in the original call.
		 */
	case TSP_GET_ARGS:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		get_tsp_args(tsp_ctx, x1, x2);
		SMC_RET2(handle, x1, x2);

	case TOS_CALL_COUNT:
		/*
		 * Return the number of service function IDs implemented to
		 * provide service to non-secure
		 */
		SMC_RET1(handle, TSP_NUM_FID);

	case TOS_UID:
		/* Return TSP UID to the caller */
		SMC_UUID_RET(handle, tsp_uuid);

	case TOS_CALL_VERSION:
		/* Return the version of current implementation */
		SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);

	default:
		break;
	}

	SMC_RET1(handle, SMC_UNK);
}
示例#22
0
// ************************************************************************************
// fastcall handler
static uint64_t tbase_fastcall_handler(uint32_t smc_fid,
        uint64_t x1,
        uint64_t x2,
        uint64_t x3,
        uint64_t x4,
        void *cookie,
        void *handle,
        uint64_t flags)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  int caller_security_state = flags&1;
      
  if (caller_security_state==SECURE) {
    switch(maskSWdRegister(smc_fid)) {
      case TBASE_SMC_FASTCALL_RETURN: {
        // Return values from fastcall already in cpu_context!
        // TODO: Could we skip saving sysregs?
        DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_RETURN\n\r");
        tbase_synchronous_sp_exit(tbase_ctx, 0, 1);
      } 
      case TBASE_SMC_FASTCALL_CONFIG_OK: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_CONFIG_OK\n\r");
        configure_tbase(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      } 
      case TBASE_SMC_FASTCALL_OUTPUT: {
        output(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      }
      case TBASE_SMC_FASTCALL_STATUS: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_STATUS\n\r");
        tbase_status(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      }
      case TBASE_SMC_FASTCALL_INPUT: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_INPUT\n\r");
        smc_fid = plat_tbase_input(x1,&x2,&(tbase_ctx->tbase_input_fastcall));
        SMC_RET3(handle,smc_fid,page_align(registerFileEnd[REGISTER_FILE_NWD] - registerFileStart[REGISTER_FILE_NWD], UP)+(uint64_t)&(tbase_ctx->tbase_input_fastcall)- registerFileStart[REGISTER_FILE_MONITOR],x2);
        break;
      }
      case TBASE_SMC_FASTCALL_DUMP: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_DUMP\n\r");
        tbase_triggerSgiDump();
        SMC_RET1(handle,smc_fid);
        break;
      }
      
      default: {
        // What now?
        DBG_PRINTF( "tbase_fastcall_handler SMC_UNK %x\n\r", smc_fid );
        SMC_RET1(handle, SMC_UNK);
        break;
      }
    }
  }
  else
  {
    if (smc_fid == TBASE_SMC_AEE_DUMP)         // N-world can request AEE Dump function
    {
      mt_atf_trigger_WDT_FIQ();
      // Once we return to the N-world's caller,
      // FIQ will be trigged and bring us on EL3 (ATF) on core #0 because HW wiring.
      // Then FIQ will be handled the same way as for HW WDT FIQ.

      //Do we need to save-recover n-context before being able to use it for return?
      cm_el1_sysregs_context_restore(NON_SECURE);
      cm_set_next_eret_context(NON_SECURE);
      return 0;
    }
    if ((tbaseExecutionStatus&TBASE_STATUS_FASTCALL_OK_BIT)==0) {
      // TBASE must be initialized to be usable
      // TODO: What is correct error code?
      DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" );
      SMC_RET1(handle, SMC_UNK);
      return 0;
    }
    if(tbase_ctx->state == TBASE_STATE_OFF) {
      DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" );
      SMC_RET1(handle, SMC_UNK);
      return 0;
    }

    DBG_PRINTF( "tbase_fastcall_handler NWd %x\n\r", smc_fid );
    // So far all fastcalls go to tbase
    // Save NWd context
    gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle);
    write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); // These are not saved yet
    write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 );
    cm_el1_sysregs_context_save(NON_SECURE);

    // Load SWd context
    tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_FASTCALL);
#if DEBUG
    print_fastcall_params("entry", NON_SECURE);
#endif
    tbase_synchronous_sp_entry(tbase_ctx);
    cm_el1_sysregs_context_restore(NON_SECURE);
    cm_set_next_eret_context(NON_SECURE);
    return 0; // Does not seem to matter what we return
  }
}
/*
 * Top-level Standard Service SMC handler. This handler will in turn dispatch
 * calls to PSCI SMC handler
 */
static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
			     u_register_t x1,
			     u_register_t x2,
			     u_register_t x3,
			     u_register_t x4,
			     void *cookie,
			     void *handle,
			     u_register_t flags)
{
	/*
	 * Dispatch PSCI calls to PSCI SMC handler and return its return
	 * value
	 */
	if (is_psci_fid(smc_fid)) {
		uint64_t ret;

#if ENABLE_RUNTIME_INSTRUMENTATION

		/*
		 * Flush cache line so that even if CPU power down happens
		 * the timestamp update is reflected in memory.
		 */
		PMF_WRITE_TIMESTAMP(rt_instr_svc,
		    RT_INSTR_ENTER_PSCI,
		    PMF_CACHE_MAINT,
		    get_cpu_data(cpu_data_pmf_ts[CPU_DATA_PMF_TS0_IDX]));
#endif

		ret = psci_smc_handler(smc_fid, x1, x2, x3, x4,
		    cookie, handle, flags);

#if ENABLE_RUNTIME_INSTRUMENTATION
		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
		    RT_INSTR_EXIT_PSCI,
		    PMF_NO_CACHE_MAINT);
#endif

		SMC_RET1(handle, ret);
	}

#if ENABLE_SPM && SPM_DEPRECATED
	/*
	 * Dispatch SPM calls to SPM SMC handler and return its return
	 * value
	 */
	if (is_spm_fid(smc_fid)) {
		return spm_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
				       handle, flags);
	}
#endif

#if SDEI_SUPPORT
	if (is_sdei_fid(smc_fid)) {
		return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
				flags);
	}
#endif

	switch (smc_fid) {
	case ARM_STD_SVC_CALL_COUNT:
		/*
		 * Return the number of Standard Service Calls. PSCI is the only
		 * standard service implemented; so return number of PSCI calls
		 */
		SMC_RET1(handle, PSCI_NUM_CALLS);

	case ARM_STD_SVC_UID:
		/* Return UID to the caller */
		SMC_UUID_RET(handle, arm_svc_uid);

	case ARM_STD_SVC_VERSION:
		/* Return the version of current implementation */
		SMC_RET2(handle, STD_SVC_VERSION_MAJOR, STD_SVC_VERSION_MINOR);

	default:
		WARN("Unimplemented Standard Service Call: 0x%x \n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
示例#24
0
static uint64_t tbase_smc_handler(uint32_t smc_fid,
        uint64_t x1,
        uint64_t x2,
        uint64_t x3,
        uint64_t x4,
        void *cookie,
        void *handle,
        uint64_t flags)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  int caller_security_state = flags&1;
 
  DBG_PRINTF("tbase_smc_handler %d %x\n\r", caller_security_state, smc_fid);  
  
  if (caller_security_state==SECURE) {
    // Yield to NWd
    // TODO: Check id
    if (tbaseInitStatus==TBASE_INIT_CONFIG_OK) {
      // Save sysregs to all cores.
      // After this tbase can work on any core.
      save_sysregs_allcore();
      tbaseInitStatus = TBASE_INIT_SYSREGS_OK;
      if (tbaseExecutionStatus==TBASE_STATUS_UNINIT) {
        tbaseExecutionStatus = TBASE_STATUS_NORMAL;
      }
    }
    // If above check fails, it is not possible to return to tbase.
    tbase_synchronous_sp_exit(tbase_ctx, 0, 1);
  } 
  else {
    if ((tbaseExecutionStatus&TBASE_STATUS_SMC_OK_BIT)==0) {
      // TBASE must be initialized to be usable
      DBG_PRINTF( "tbase_smc_handler tbase not ready for smc.\n\r");
      // TODO: What is correct error code?
      SMC_RET1(handle, SMC_UNK);
      return 1;
    }
    if(tbase_ctx->state == TBASE_STATE_OFF) {
      DBG_PRINTF( "tbase_smc_handler tbase not ready for fastcall\n\r" );
      return 1;
    }

    // NSIQ, go to SWd
    // TODO: Check id?
    
    // Save NWd
    gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle);
    write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 );
    cm_el1_sysregs_context_save(NON_SECURE);
    
    // Load SWd
    tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_SMC);
    // Enter tbase. tbase must return using normal SMC, which will continue here.   
    tbase_synchronous_sp_entry(tbase_ctx);
    // Load NWd
    cm_el1_sysregs_context_restore(NON_SECURE);
    cm_set_next_eret_context(NON_SECURE);
  }
  return 0;
}
uintptr_t mrvl_sip_smc_handler(uint32_t smc_fid,
			       u_register_t x1,
			       u_register_t x2,
			       u_register_t x3,
			       u_register_t x4,
			       void *cookie,
			       void *handle,
			       u_register_t flags)
{
	u_register_t ret;
	int i;

	debug("%s: got SMC (0x%x) x1 0x%lx, x2 0x%lx, x3 0x%lx\n",
						 __func__, smc_fid, x1, x2, x3);
	if (is_comphy_fid(smc_fid)) {

		/* some systems passes SD phys address instead of COMPHY phys
		 * address - convert it
		 */
		if (x1 & MVEBU_SD_OFFSET)
			x1 = (x1 & ~0xffffff) + MVEBU_COMPHY_OFFSET;

		if ((x1 & 0xffffff) != MVEBU_COMPHY_OFFSET) {
			ERROR("%s: Wrong smc (0x%x) address: %lx\n",
			      __func__, smc_fid, x1);
			SMC_RET1(handle, SMC_UNK);
		}

		if (x2 >= MAX_LANE_NR) {
			ERROR("%s: Wrong smc (0x%x) lane nr: %lx\n",
			      __func__, smc_fid, x2);
			SMC_RET1(handle, SMC_UNK);
		}
	}

	switch (smc_fid) {

	/* Comphy related FID's */
	case MV_SIP_COMPHY_POWER_ON:
		/* x1:  comphy_base, x2: comphy_index, x3: comphy_mode */
		ret = mvebu_cp110_comphy_power_on(x1, x2, x3);
		SMC_RET1(handle, ret);
	case MV_SIP_COMPHY_POWER_OFF:
		/* x1:  comphy_base, x2: comphy_index */
		ret = mvebu_cp110_comphy_power_off(x1, x2, x3);
		SMC_RET1(handle, ret);
	case MV_SIP_COMPHY_PLL_LOCK:
		/* x1:  comphy_base, x2: comphy_index */
		ret = mvebu_cp110_comphy_is_pll_locked(x1, x2);
		SMC_RET1(handle, ret);
	case MV_SIP_COMPHY_XFI_TRAIN:
		/* x1:  comphy_base, x2: comphy_index */
		ret = mvebu_cp110_comphy_xfi_rx_training(x1, x2);
		SMC_RET1(handle, ret);
	case MV_SIP_COMPHY_DIG_RESET:
		/* x1:  comphy_base, x2: comphy_index, x3: mode, x4: command */
		ret = mvebu_cp110_comphy_digital_reset(x1, x2, x3, x4);
		SMC_RET1(handle, ret);

	/* Miscellaneous FID's' */
	case MV_SIP_DRAM_SIZE:
		/* x1:  ap_base_addr */
		ret = mvebu_get_dram_size(x1);
		SMC_RET1(handle, ret);
	case MV_SIP_LLC_ENABLE:
		for (i = 0; i < ap_get_count(); i++)
			llc_runtime_enable(i);

		SMC_RET1(handle, 0);
#ifdef MVEBU_PMU_IRQ_WA
	case MV_SIP_PMU_IRQ_ENABLE:
		mvebu_pmu_interrupt_enable();
		SMC_RET1(handle, 0);
	case MV_SIP_PMU_IRQ_DISABLE:
		mvebu_pmu_interrupt_disable();
		SMC_RET1(handle, 0);
#endif

	default:
		ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
/**
 * ipi_smc_handler() - SMC handler for IPI SMC calls
 *
 * @smc_fid - Function identifier
 * @x1 - x4 - Arguments
 * @cookie  - Unused
 * @handler - Pointer to caller's context structure
 *
 * @return  - Unused
 *
 * Determines that smc_fid is valid and supported PM SMC Function ID from the
 * list of pm_api_ids, otherwise completes the request with
 * the unknown SMC Function ID
 *
 * The SMC calls for PM service are forwarded from SIP Service SMC handler
 * function with rt_svc_handle signature
 */
uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
			 uint64_t x3, uint64_t x4, void *cookie,
			 void *handle, uint64_t flags)
{
	int ret;
	uint32_t ipi_local_id;
	uint32_t ipi_remote_id;
	unsigned int is_secure;

	ipi_local_id = x1 & UNSIGNED32_MASK;
	ipi_remote_id = x2 & UNSIGNED32_MASK;

	if (SMC_ENTITY(smc_fid) >= SMC_ENTITY_TRUSTED_APP)
		is_secure = 1;
	else
		is_secure = 0;

	/* Validate IPI mailbox access */
	ret = ipi_mb_validate(ipi_local_id, ipi_remote_id, is_secure);
	if (ret)
		SMC_RET1(handle, ret);

	switch (SMC_FUNCTION(smc_fid)) {
	case IPI_MAILBOX_OPEN:
		ipi_mb_open(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, 0);
	case IPI_MAILBOX_RELEASE:
		ipi_mb_release(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, 0);
	case IPI_MAILBOX_STATUS_ENQUIRY:
	{
		int disable_irq;

		disable_irq = (x3 & IPI_SMC_ENQUIRY_DIRQ_MASK) ? 1 : 0;
		ret = ipi_mb_enquire_status(ipi_local_id, ipi_remote_id);
		if ((ret & IPI_MB_STATUS_RECV_PENDING) && disable_irq)
			ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, ret);
	}
	case IPI_MAILBOX_NOTIFY:
	{
		uint32_t is_blocking;

		is_blocking = (x3 & IPI_SMC_NOTIFY_BLOCK_MASK) ? 1 : 0;
		ipi_mb_notify(ipi_local_id, ipi_remote_id, is_blocking);
		SMC_RET1(handle, 0);
	}
	case IPI_MAILBOX_ACK:
	{
		int enable_irq;

		enable_irq = (x3 & IPI_SMC_ACK_EIRQ_MASK) ? 1 : 0;
		ipi_mb_ack(ipi_local_id, ipi_remote_id);
		if (enable_irq)
			ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, 0);
	}
	case IPI_MAILBOX_ENABLE_IRQ:
		ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, 0);
	case IPI_MAILBOX_DISABLE_IRQ:
		ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
		SMC_RET1(handle, 0);
	default:
		WARN("Unimplemented IPI service call: 0x%x\n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
/*
 * Handle SMC from a lower exception level to switch its execution state
 * (either from AArch64 to AArch32, or vice versa).
 *
 * smc_fid:
 *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
 *	ARM_SIP_SVC_STATE_SWITCH_32.
 * pc_hi, pc_lo:
 *	PC upon re-entry to the calling exception level; width dependent on the
 *	calling exception level.
 * cookie_hi, cookie_lo:
 *	Opaque pointer pairs received from the caller to pass it back, upon
 *	re-entry.
 * handle:
 *	Handle to saved context.
 */
int arm_execution_state_switch(unsigned int smc_fid,
		uint32_t pc_hi,
		uint32_t pc_lo,
		uint32_t cookie_hi,
		uint32_t cookie_lo,
		void *handle)
{
	/* Execution state can be switched only if EL3 is AArch64 */
#ifdef AARCH64
	int caller_64, from_el2, el, endianness, thumb = 0;
	u_register_t spsr, pc, scr, sctlr;
	entry_point_info_t ep;
	cpu_context_t *ctx = (cpu_context_t *) handle;
	el3_state_t *el3_ctx = get_el3state_ctx(ctx);

	/* That the SMC originated from NS is already validated by the caller */

	/*
	 * Disallow state switch if any of the secondaries have been brought up.
	 */
	if (psci_secondaries_brought_up())
		goto exec_denied;

	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
	caller_64 = (GET_RW(spsr) == MODE_RW_64);

	if (caller_64) {
		/*
		 * If the call originated from AArch64, expect 32-bit pointers when
		 * switching to AArch32.
		 */
		if ((pc_hi != 0) || (cookie_hi != 0))
			goto invalid_param;

		pc = pc_lo;

		/* Instruction state when entering AArch32 */
		thumb = pc & 1;
	} else {
		/* Construct AArch64 PC */
		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
	}

	/* Make sure PC is 4-byte aligned, except for Thumb */
	if ((pc & 0x3) && !thumb)
		goto invalid_param;

	/*
	 * EL3 controls register width of the immediate lower EL only. Expect
	 * this request from EL2/Hyp unless:
	 *
	 * - EL2 is not implemented;
	 * - EL2 is implemented, but was disabled. This can be inferred from
	 *   SCR_EL3.HCE.
	 */
	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
		(GET_M32(spsr) == MODE32_hyp);
	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
	if (!from_el2) {
		/* The call is from NS privilege level other than HYP */

		/*
		 * Disallow switching state if there's a Hypervisor in place;
		 * this request must be taken up with the Hypervisor instead.
		 */
		if (scr & SCR_HCE_BIT)
			goto exec_denied;
	}

	/*
	 * Return to the caller using the same endianness. Extract
	 * endianness bit from the respective system control register
	 * directly.
	 */
	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
	endianness = !!(sctlr & SCTLR_EE_BIT);

	/* Construct SPSR for the exception state we're about to switch to */
	if (caller_64) {
		int impl;

		/*
		 * Switching from AArch64 to AArch32. Ensure this CPU implements
		 * the target EL in AArch32.
		 */
		impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1);
		if (impl != EL_IMPL_A64_A32)
			goto exec_denied;

		/* Return to the equivalent AArch32 privilege level */
		el = from_el2 ? MODE32_hyp : MODE32_svc;
		spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM,
				endianness, DISABLE_ALL_EXCEPTIONS);
	} else {
		/*
		 * Switching from AArch32 to AArch64. Since it's not possible to
		 * implement an EL as AArch32-only (from which this call was
		 * raised), it's safe to assume AArch64 is also implemented.
		 */
		el = from_el2 ? MODE_EL2 : MODE_EL1;
		spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
	}

	/*
	 * Use the context management library to re-initialize the existing
	 * context with the execution state flipped. Since the library takes
	 * entry_point_info_t pointer as the argument, construct a dummy one
	 * with PC, state width, endianness, security etc. appropriately set.
	 * Other entries in the entry point structure are irrelevant for
	 * purpose.
	 */
	zeromem(&ep, sizeof(ep));
	ep.pc = pc;
	ep.spsr = spsr;
	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
			((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE |
			 EP_ST_DISABLE));

	/*
	 * Re-initialize the system register context, and exit EL3 as if for the
	 * first time. State switch is effectively a soft reset of the
	 * calling EL.
	 */
	cm_init_my_context(&ep);
	cm_prepare_el3_exit(NON_SECURE);

	/*
	 * State switch success. The caller of SMC wouldn't see the SMC
	 * returning. Instead, execution starts at the supplied entry point,
	 * with context pointers populated in registers 0 and 1.
	 */
	SMC_RET2(handle, cookie_hi, cookie_lo);

invalid_param:
	SMC_RET1(handle, STATE_SW_E_PARAM);

exec_denied:
#endif
	/* State switch denied */
	SMC_RET1(handle, STATE_SW_E_DENIED);
}
/**
 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
 * @smc_fid - Function Identifier
 * @x1 - x4 - Arguments
 * @cookie  - Unused
 * @handler - Pointer to caller's context structure
 *
 * @return  - Unused
 *
 * Determines that smc_fid is valid and supported PM SMC Function ID from the
 * list of pm_api_ids, otherwise completes the request with
 * the unknown SMC Function ID
 *
 * The SMC calls for PM service are forwarded from SIP Service SMC handler
 * function with rt_svc_handle signature
 */
uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
			uint64_t x4, void *cookie, void *handle, uint64_t flags)
{
	enum pm_ret_status ret;

	uint32_t pm_arg[4];

	/* Handle case where PM wasn't initialized properly */
	if (pm_down)
		SMC_RET1(handle, SMC_UNK);

	pm_arg[0] = (uint32_t)x1;
	pm_arg[1] = (uint32_t)(x1 >> 32);
	pm_arg[2] = (uint32_t)x2;
	pm_arg[3] = (uint32_t)(x2 >> 32);

	switch (smc_fid & FUNCID_NUM_MASK) {
	/* PM API Functions */
	case PM_SELF_SUSPEND:
		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
				      pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_REQ_SUSPEND:
		ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
				     pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_REQ_WAKEUP:
		ret = pm_req_wakeup(pm_arg[0], pm_arg[1], pm_arg[2],
				    pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_FORCE_POWERDOWN:
		ret = pm_force_powerdown(pm_arg[0], pm_arg[1]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_ABORT_SUSPEND:
		ret = pm_abort_suspend(pm_arg[0]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_SET_WAKEUP_SOURCE:
		ret = pm_set_wakeup_source(pm_arg[0], pm_arg[1], pm_arg[2]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_SYSTEM_SHUTDOWN:
		ret = pm_system_shutdown(pm_arg[0]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_REQ_NODE:
		ret = pm_req_node(pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_RELEASE_NODE:
		ret = pm_release_node(pm_arg[0]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_SET_REQUIREMENT:
		ret = pm_set_requirement(pm_arg[0], pm_arg[1], pm_arg[2],
					 pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_SET_MAX_LATENCY:
		ret = pm_set_max_latency(pm_arg[0], pm_arg[1]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_GET_API_VERSION:
		/* Check is PM API version already verified */
		if (pm_ctx.api_version == PM_VERSION)
			SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
				 ((uint64_t)PM_VERSION << 32));

		ret = pm_get_api_version(&pm_ctx.api_version);
		SMC_RET1(handle, (uint64_t)ret |
			 ((uint64_t)pm_ctx.api_version << 32));

	case PM_SET_CONFIGURATION:
		ret = pm_set_configuration(pm_arg[0]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_GET_NODE_STATUS:
		ret = pm_get_node_status(pm_arg[0]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_GET_OP_CHARACTERISTIC:
	{
		uint32_t result;

		ret = pm_get_op_characteristic(pm_arg[0], pm_arg[1], &result);
		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result << 32));
	}

	case PM_REGISTER_NOTIFIER:
		ret = pm_register_notifier(pm_arg[0], pm_arg[1], pm_arg[2],
					   pm_arg[3]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_RESET_ASSERT:
		ret = pm_reset_assert(pm_arg[0], pm_arg[1]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_RESET_GET_STATUS:
	{
		uint32_t reset_status;

		ret = pm_reset_get_status(pm_arg[0], &reset_status);
		SMC_RET1(handle, (uint64_t)ret |
			 ((uint64_t)reset_status << 32));
	}

	/* PM memory access functions */
	case PM_MMIO_WRITE:
		ret = pm_mmio_write(pm_arg[0], pm_arg[1], pm_arg[2]);
		SMC_RET1(handle, (uint64_t)ret);

	case PM_MMIO_READ:
	{
		uint32_t value;

		ret = pm_mmio_read(pm_arg[0], &value);
		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32);
	}
	default:
		WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}
}
示例#29
0
/*******************************************************************************
 * This function is responsible for handling all SMCs in the Trusted OS/App
 * range from the non-secure state as defined in the SMC Calling Convention
 * Document. It is also responsible for communicating with the Secure payload
 * to delegate work and return results back to the non-secure state. Lastly it
 * will also return any information that the secure payload needs to do the
 * work assigned to it.
 ******************************************************************************/
uint64_t tlkd_smc_handler(uint32_t smc_fid,
			 uint64_t x1,
			 uint64_t x2,
			 uint64_t x3,
			 uint64_t x4,
			 void *cookie,
			 void *handle,
			 uint64_t flags)
{
	cpu_context_t *ns_cpu_context;
	gp_regs_t *gp_regs;
	uint32_t ns;
	uint64_t par;

	/* Passing a NULL context is a critical programming error */
	assert(handle);

	/* These SMCs are only supported by CPU0 */
	if ((read_mpidr() & MPIDR_CPU_MASK) != 0)
		SMC_RET1(handle, SMC_UNK);

	/* Determine which security state this SMC originated from */
	ns = is_caller_non_secure(flags);

	switch (smc_fid) {

	/*
	 * This function ID is used by SP to indicate that it was
	 * preempted by a non-secure world IRQ.
	 */
	case TLK_PREEMPTED:

		if (ns)
			SMC_RET1(handle, SMC_UNK);

		assert(handle == cm_get_context(SECURE));
		cm_el1_sysregs_context_save(SECURE);

		/* Get a reference to the non-secure context */
		ns_cpu_context = cm_get_context(NON_SECURE);
		assert(ns_cpu_context);

		/*
		 * Restore non-secure state. There is no need to save the
		 * secure system register context since the SP was supposed
		 * to preserve it during S-EL1 interrupt handling.
		 */
		cm_el1_sysregs_context_restore(NON_SECURE);
		cm_set_next_eret_context(NON_SECURE);

		SMC_RET1(ns_cpu_context, x1);

	/*
	 * Request from non secure world to resume the preempted
	 * Standard SMC call.
	 */
	case TLK_RESUME_FID:

		/* RESUME should be invoked only by normal world */
		if (!ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * This is a resume request from the non-secure client.
		 * save the non-secure state and send the request to
		 * the secure payload.
		 */
		assert(handle == cm_get_context(NON_SECURE));

		/* Check if we are already preempted before resume */
		if (!get_std_smc_active_flag(tlk_ctx.state))
			SMC_RET1(handle, SMC_UNK);

		cm_el1_sysregs_context_save(NON_SECURE);

		/*
		 * We are done stashing the non-secure context. Ask the
		 * secure payload to do the work now.
		 */

		/* We just need to return to the preempted point in
		 * SP and the execution will resume as normal.
		 */
		cm_el1_sysregs_context_restore(SECURE);
		cm_set_next_eret_context(SECURE);
		SMC_RET0(handle);

	/*
	 * This is a request from the non-secure context to:
	 *
	 * a. register shared memory with the SP for storing it's
	 *    activity logs.
	 * b. register shared memory with the SP for passing args
	 *    required for maintaining sessions with the Trusted
	 *    Applications.
	 * c. open/close sessions
	 * d. issue commands to the Trusted Apps
	 */
	case TLK_REGISTER_LOGBUF:
	case TLK_REGISTER_REQBUF:
	case TLK_OPEN_TA_SESSION:
	case TLK_CLOSE_TA_SESSION:
	case TLK_TA_LAUNCH_OP:
	case TLK_TA_SEND_EVENT:

		if (!ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * This is a fresh request from the non-secure client.
		 * The parameters are in x1 and x2. Figure out which
		 * registers need to be preserved, save the non-secure
		 * state and send the request to the secure payload.
		 */
		assert(handle == cm_get_context(NON_SECURE));

		/* Check if we are already preempted */
		if (get_std_smc_active_flag(tlk_ctx.state))
			SMC_RET1(handle, SMC_UNK);

		cm_el1_sysregs_context_save(NON_SECURE);

		/*
		 * Verify if there is a valid context to use.
		 */
		assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE));

		/*
		 * Mark the SP state as active.
		 */
		set_std_smc_active_flag(tlk_ctx.state);

		/*
		 * We are done stashing the non-secure context. Ask the
		 * secure payload to do the work now.
		 */
		cm_el1_sysregs_context_restore(SECURE);
		cm_set_next_eret_context(SECURE);

		/*
		 * TLK is a 32-bit Trusted OS and so expects the SMC
		 * arguments via r0-r7. TLK expects the monitor frame
		 * registers to be 64-bits long. Hence, we pass x0 in
		 * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7.
		 *
		 * As smc_fid is a uint32 value, r1 contains 0.
		 */
		gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
		write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2);
		write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32));
		write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3);
		write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32));
		SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1,
			(uint32_t)(x1 >> 32));

	/*
	 * Translate NS/EL1-S virtual addresses.
	 *
	 * x1 = virtual address
	 * x3 = type (NS/S)
	 *
	 * Returns PA:lo in r0, PA:hi in r1.
	 */
	case TLK_VA_TRANSLATE:

		/* Should be invoked only by secure world */
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		/* NS virtual addresses are 64-bit long */
		if (x3 & TLK_TRANSLATE_NS_VADDR)
			x1 = (uint32_t)x1 | (x2 << 32);

		if (!x1)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * TODO: Sanity check x1. This would require platform
		 * support.
		 */

		/* virtual address and type: ns/s */
		par = tlkd_va_translate(x1, x3);

		/* return physical address in r0-r1 */
		SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0);

	/*
	 * This is a request from the SP to mark completion of
	 * a standard function ID.
	 */
	case TLK_REQUEST_DONE:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * Mark the SP state as inactive.
		 */
		clr_std_smc_active_flag(tlk_ctx.state);

		/* Get a reference to the non-secure context */
		ns_cpu_context = cm_get_context(NON_SECURE);
		assert(ns_cpu_context);

		/*
		 * This is a request completion SMC and we must switch to
		 * the non-secure world to pass the result.
		 */
		cm_el1_sysregs_context_save(SECURE);

		/*
		 * We are done stashing the secure context. Switch to the
		 * non-secure context and return the result.
		 */
		cm_el1_sysregs_context_restore(NON_SECURE);
		cm_set_next_eret_context(NON_SECURE);
		SMC_RET1(ns_cpu_context, x1);

	/*
	 * This function ID is used only by the SP to indicate it has
	 * finished initialising itself after a cold boot
	 */
	case TLK_ENTRY_DONE:
		if (ns)
			SMC_RET1(handle, SMC_UNK);

		/*
		 * SP has been successfully initialized. Register power
		 * managemnt hooks with PSCI
		 */
		psci_register_spd_pm_hook(&tlkd_pm_ops);

		/*
		 * TLK reports completion. The SPD must have initiated
		 * the original request through a synchronous entry
		 * into the SP. Jump back to the original C runtime
		 * context.
		 */
		tlkd_synchronous_sp_exit(&tlk_ctx, x1);

	/*
	 * Return the number of service function IDs implemented to
	 * provide service to non-secure
	 */
	case TOS_CALL_COUNT:
		SMC_RET1(handle, TLK_NUM_FID);

	/*
	 * Return TLK's UID to the caller
	 */
	case TOS_UID:
		SMC_UUID_RET(handle, tlk_uuid);

	/*
	 * Return the version of current implementation
	 */
	case TOS_CALL_VERSION:
		SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR);

	default:
		break;
	}

	SMC_RET1(handle, SMC_UNK);
}
示例#30
0
/*
 * This function handles ARM defined SiP Calls
 */
static uintptr_t arm_sip_handler(unsigned int smc_fid,
			u_register_t x1,
			u_register_t x2,
			u_register_t x3,
			u_register_t x4,
			void *cookie,
			void *handle,
			u_register_t flags)
{
	int call_count = 0;

	/*
	 * Dispatch PMF calls to PMF SMC handler and return its return
	 * value
	 */
	if (is_pmf_fid(smc_fid)) {
		return pmf_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
				handle, flags);
	}

	switch (smc_fid) {
	case ARM_SIP_SVC_EXE_STATE_SWITCH: {
		u_register_t pc;

		/* Allow calls from non-secure only */
		if (!is_caller_non_secure(flags))
			SMC_RET1(handle, STATE_SW_E_DENIED);

		/* Validate supplied entry point */
		pc = (u_register_t) ((x1 << 32) | (uint32_t) x2);
		if (arm_validate_ns_entrypoint(pc) != 0)
			SMC_RET1(handle, STATE_SW_E_PARAM);

		/*
		 * Pointers used in execution state switch are all 32 bits wide
		 */
		return (uintptr_t) arm_execution_state_switch(smc_fid,
				(uint32_t) x1, (uint32_t) x2, (uint32_t) x3,
				(uint32_t) x4, handle);
		}

	case ARM_SIP_SVC_CALL_COUNT:
		/* PMF calls */
		call_count += PMF_NUM_SMC_CALLS;

		/* State switch call */
		call_count += 1;

		SMC_RET1(handle, call_count);

	case ARM_SIP_SVC_UID:
		/* Return UID to the caller */
		SMC_UUID_RET(handle, arm_sip_svc_uid);

	case ARM_SIP_SVC_VERSION:
		/* Return the version of current implementation */
		SMC_RET2(handle, ARM_SIP_SVC_VERSION_MAJOR, ARM_SIP_SVC_VERSION_MINOR);

	default:
		WARN("Unimplemented ARM SiP Service Call: 0x%x \n", smc_fid);
		SMC_RET1(handle, SMC_UNK);
	}

}