/******************************************************************************
 * Versal common helper to save & restore the GICv3 on resume from system
 * suspend
 *****************************************************************************/
void plat_versal_gic_save(void)
{
	/*
	 * If an ITS is available, save its context before
	 * the Redistributor using:
	 * gicv3_its_save_disable(gits_base, &its_ctx[i])
	 * Additionnaly, an implementation-defined sequence may
	 * be required to save the whole ITS state.
	 */

	/*
	 * Save the GIC Redistributors and ITS contexts before the
	 * Distributor context. As we only handle SYSTEM SUSPEND API,
	 * we only need to save the context of the CPU that is issuing
	 * the SYSTEM SUSPEND call, i.e. the current CPU.
	 */
	gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);

	/* Save the GIC Distributor context */
	gicv3_distif_save(&dist_ctx);

	/*
	 * From here, all the components of the GIC can be safely powered down
	 * as long as there is an alternate way to handle wakeup interrupt
	 * sources.
	 */
}
Пример #2
0
/*******************************************************************************
 * This cpu is being turned off. Allow the OPTEED/OPTEE to perform any actions
 * needed
 ******************************************************************************/
static int32_t opteed_cpu_off_handler(uint64_t unused)
{
	int32_t rc = 0;
	uint32_t linear_id = plat_my_core_pos();
	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];

	assert(optee_vectors);
	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);

	/* Program the entry point and enter OPTEE */
	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->cpu_off_entry);
	rc = opteed_synchronous_sp_entry(optee_ctx);

	/*
	 * Read the response from OPTEE. A non-zero return means that
	 * something went wrong while communicating with OPTEE.
	 */
	if (rc != 0)
		panic();

	/*
	 * Reset OPTEE's context for a fresh start when this cpu is turned on
	 * subsequently.
	 */
	set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_OFF);

	 return 0;
}
Пример #3
0
/*******************************************************************************
 * This cpu has been turned on. Enter OPTEE to initialise S-EL1 and other bits
 * before passing control back to the Secure Monitor. Entry in S-El1 is done
 * after initialising minimal architectural state that guarantees safe
 * execution.
 ******************************************************************************/
static void opteed_cpu_on_finish_handler(uint64_t unused)
{
	int32_t rc = 0;
	uint32_t linear_id = plat_my_core_pos();
	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
	entry_point_info_t optee_on_entrypoint;

	assert(optee_vectors);
	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_OFF);

	opteed_init_optee_ep_state(&optee_on_entrypoint, opteed_rw,
				(uint64_t)&optee_vectors->cpu_on_entry,
				0, 0, 0, optee_ctx);

	/* Initialise this cpu's secure context */
	cm_init_my_context(&optee_on_entrypoint);

	/* Enter OPTEE */
	rc = opteed_synchronous_sp_entry(optee_ctx);

	/*
	 * Read the response from OPTEE. A non-zero return means that
	 * something went wrong while communicating with OPTEE.
	 */
	if (rc != 0)
		panic();

	/* Update its context to reflect the state OPTEE is in */
	set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
}
Пример #4
0
void plat_marvell_gic_init(void)
{
	gicv2_distif_init();
	gicv2_pcpu_distif_init();
	gicv2_set_pe_target_mask(plat_my_core_pos());
	gicv2_cpuif_enable();
}
Пример #5
0
static uint64_t a7k8k_pmu_interrupt_handler(uint32_t id,
					  uint32_t flags,
					  void *handle,
					  void *cookie)
{
	unsigned int idx = plat_my_core_pos();
	uint32_t irq;

	bakery_lock_get(&a7k8k_irq_lock);

	/* Acknowledge IRQ */
	irq = plat_ic_acknowledge_interrupt();

	plat_ic_end_of_interrupt(irq);

	if (irq != MARVELL_IRQ_PIC0) {
		bakery_lock_release(&a7k8k_irq_lock);
		return 0;
	}

	/* Acknowledge PMU overflow IRQ in PIC0 */
	mmio_setbits_32(A7K8K_PIC_CAUSE_REG, A7K8K_PIC_PMUOF_IRQ_MASK);

	/* Trigger ODMI Frame IRQ */
	mmio_write_32(A7K8K_ODMIN_SET_REG, A7K8K_ODMI_PMU_IRQ(idx));

	bakery_lock_release(&a7k8k_irq_lock);

	return 0;
}
Пример #6
0
/******************************************************************************
 * This function is invoked post CPU power up and initialization. It sets the
 * affinity info state, target power state and requested power state for the
 * current CPU and all its ancestor power domains to RUN.
 *****************************************************************************/
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
{
	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;

	/* Reset the local_state to RUN for the non cpu power domains. */
	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
		psci_non_cpu_pd_nodes[parent_idx].local_state =
				PSCI_LOCAL_STATE_RUN;
#if !USE_COHERENT_MEM
		flush_dcache_range(
				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
		psci_set_req_local_pwr_state(lvl,
					     cpu_idx,
					     PSCI_LOCAL_STATE_RUN);
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the affinity info state to ON */
	psci_set_aff_info_state(AFF_STATE_ON);

	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
	flush_cpu_data(psci_svc_cpu_data);
}
Пример #7
0
/*******************************************************************************
 * Generic handler which is called when a cpu is physically powered on. It
 * traverses the node information and finds the highest power level powered
 * off and performs generic, architectural, platform setup and state management
 * to power on that power level and power levels below it.
 * e.g. For a cpu that's been powered on, it will call the platform specific
 * code to enable the gic cpu interface and for a cluster it will enable
 * coherency at the interconnect level in addition to gic cpu interface.
 ******************************************************************************/
void psci_power_up_finish(void)
{
	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };

	/*
	 * Verify that we have been explicitly turned ON or resumed from
	 * suspend.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
		ERROR("Unexpected affinity info state");
		panic();
	}

	/*
	 * Get the maximum power domain level to traverse to after this cpu
	 * has been physically powered up.
	 */
	end_pwrlvl = get_power_on_target_pwrlvl();

	/*
	 * This function acquires the lock corresponding to each power level so
	 * that by the time all locks are taken, the system topology is snapshot
	 * and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);

	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);

	/*
	 * This CPU could be resuming from suspend or it could have just been
	 * turned on. To distinguish between these 2 cases, we examine the
	 * affinity state of the CPU:
	 *  - If the affinity state is ON_PENDING then it has just been
	 *    turned on.
	 *  - Else it is resuming from suspend.
	 *
	 * Depending on the type of warm reset identified, choose the right set
	 * of power management handler and perform the generic, architecture
	 * and platform specific handling.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
		psci_cpu_on_finish(cpu_idx, &state_info);
	else
		psci_cpu_suspend_finish(cpu_idx, &state_info);

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domains which are ancestors of this CPU to run.
	 */
	psci_set_pwr_domains_to_run(end_pwrlvl);

	/*
	 * This loop releases the lock corresponding to each power level
	 * in the reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);
}
Пример #8
0
/******************************************************************************
 * Helper function to set the target local power state that each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
 * enter. This function will be called after coordination of requested power
 * states has been done for each power level.
 *****************************************************************************/
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
					const psci_power_state_t *target_state)
{
	unsigned int parent_idx, lvl;
	const plat_local_state_t *pd_state = target_state->pwr_domain_state;

	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);

	/*
	 * Need to flush as local_state will be accessed with Data Cache
	 * disabled during power on
	 */
	flush_cpu_data(psci_svc_cpu_data.local_state);

	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local_state from state_info */
	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
		psci_non_cpu_pd_nodes[parent_idx].local_state =	pd_state[lvl];
#if !USE_COHERENT_MEM
		flush_dcache_range(
				(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}
}
Пример #9
0
/******************************************************************************
 * Helper function to return the current local power state of each power domain
 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
 * function will be called after a cpu is powered on to find the local state
 * each power domain has emerged from.
 *****************************************************************************/
static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
					     psci_power_state_t *target_state)
{
	unsigned int parent_idx, lvl;
	plat_local_state_t *pd_state = target_state->pwr_domain_state;

	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;

	/* Copy the local power state from node to state_info */
	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
#if !USE_COHERENT_MEM
		/*
		 * If using normal memory for psci_non_cpu_pd_nodes, we need
		 * to flush before reading the local power state as another
		 * cpu in the same power domain could have updated it and this
		 * code runs before caches are enabled.
		 */
		flush_dcache_range(
				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
#endif
		pd_state[lvl] =	psci_non_cpu_pd_nodes[parent_idx].local_state;
		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

	/* Set the the higher levels to RUN */
	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}
Пример #10
0
/*******************************************************************************
 * This function passes control to the XILSP image (BL32) for the first
 * time on the primary cpu after a cold boot. It assumes that a valid secure
 * context has already been created by xilspd_setup() which can be directly
 * used. It also assumes that a valid non-secure context has been initialised
 * by PSCI so it does not need to save and restore any non-secure state. This
 * function performs a synchronous entry into the Secure payload. The SP passes
 * control back to this routine through a SMC.
 ******************************************************************************/
int32_t xilspd_init(void)
{
	uint32_t linear_id = plat_my_core_pos();
	xilsp_context_t *xilsp_ctx = &xilspd_sp_context[linear_id];
	entry_point_info_t *xilsp_entry_point;
	uint64_t rc;

	/*
	 * Get information about the XILSP (BL32) image. Its absence
	 * is a critical failure.
	 */
	xilsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
	assert(xilsp_entry_point);

	cm_init_my_context(xilsp_entry_point);

	/*
	 * Arrange for an entry into the XILSP. It will be returned via
	 * XILSP_ENTRY_DONE case
	 */
	rc = xilspd_synchronous_sp_entry(xilsp_ctx);
	assert(rc != 0);

	return rc;
}
Пример #11
0
static void zynqmp_nopmu_pwr_domain_suspend(const psci_power_state_t *target_state)
{
	uint32_t r;
	unsigned int cpu_id = plat_my_core_pos();

	for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
		VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
			__func__, i, target_state->pwr_domain_state[i]);

	/* set power down request */
	r = mmio_read_32(APU_PWRCTL);
	r |= (1 << cpu_id);
	mmio_write_32(APU_PWRCTL, r);

	/* program RVBAR */
	mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry);
	mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32);

	/* clear VINITHI */
	r = mmio_read_32(APU_CONFIG_0);
	r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id);
	mmio_write_32(APU_CONFIG_0, r);

	/* enable power up on IRQ */
	mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id);
}
Пример #12
0
/*******************************************************************************
 * This cpu is being turned off. Allow the TSPD/TSP to perform any actions
 * needed
 ******************************************************************************/
static int32_t tspd_cpu_off_handler(uint64_t unused)
{
	int32_t rc = 0;
	uint32_t linear_id = plat_my_core_pos();
	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];

	assert(tsp_vectors);
	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);

	/* Program the entry point and enter the TSP */
	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry);
	rc = tspd_synchronous_sp_entry(tsp_ctx);

	/*
	 * Read the response from the TSP. A non-zero return means that
	 * something went wrong while communicating with the TSP.
	 */
	if (rc != 0)
		panic();

	/*
	 * Reset TSP's context for a fresh start when this cpu is turned on
	 * subsequently.
	 */
	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);

	 return 0;
}
Пример #13
0
/*******************************************************************************
 * This function passes control to the Secure Payload image (BL32) for the first
 * time on the primary cpu after a cold boot. It assumes that a valid secure
 * context has already been created by tspd_setup() which can be directly used.
 * It also assumes that a valid non-secure context has been initialised by PSCI
 * so it does not need to save and restore any non-secure state. This function
 * performs a synchronous entry into the Secure payload. The SP passes control
 * back to this routine through a SMC.
 ******************************************************************************/
int32_t tspd_init(void)
{
	uint32_t linear_id = plat_my_core_pos();
	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
	entry_point_info_t *tsp_entry_point;
	uint64_t rc;

	/*
	 * Get information about the Secure Payload (BL32) image. Its
	 * absence is a critical failure.
	 */
	tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
	assert(tsp_entry_point);

	cm_init_my_context(tsp_entry_point);

	/*
	 * Arrange for an entry into the test secure payload. It will be
	 * returned via TSP_ENTRY_DONE case
	 */
	rc = tspd_synchronous_sp_entry(tsp_ctx);
	assert(rc != 0);

	return rc;
}
Пример #14
0
/*******************************************************************************
 * This function takes an SP context pointer and performs a synchronous entry
 * into it.
 ******************************************************************************/
uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt)
{
	uint64_t rc;
	unsigned int linear_id = plat_my_core_pos();

	assert(sp_ctx != NULL);

	/* Assign the context of the SP to this CPU */
	spm_cpu_set_sp_ctx(linear_id, sp_ctx);
	cm_set_context(&(sp_ctx->cpu_ctx), SECURE);

	/* Restore the context assigned above */
	cm_el1_sysregs_context_restore(SECURE);
	cm_set_next_eret_context(SECURE);

	/* Invalidate TLBs at EL1. */
	tlbivmalle1();
	dsbish();

	if (can_preempt == 1) {
		enable_intr_rm_local(INTR_TYPE_NS, SECURE);
	} else {
		disable_intr_rm_local(INTR_TYPE_NS, SECURE);
	}

	/* Enter Secure Partition */
	rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx);

	/* Save secure state */
	cm_el1_sysregs_context_save(SECURE);

	return rc;
}
Пример #15
0
/*
 * Helper function to suspend a CPU power domain and its parent power domains
 * if applicable.
 */
void css_scp_suspend(const psci_power_state_t *target_state)
{
	int lvl, ret;
	uint32_t scmi_pwr_state = 0;

	/* At least power domain level 0 should be specified to be suspended */
	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
						ARM_LOCAL_STATE_OFF);

	/* Check if power down at system power domain level is requested */
	if (CSS_SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
		/* Issue SCMI command for SYSTEM_SUSPEND */
		ret = scmi_sys_pwr_state_set(scmi_handle,
				SCMI_SYS_PWR_FORCEFUL_REQ,
				SCMI_SYS_PWR_SUSPEND);
		if (ret != SCMI_E_SUCCESS) {
			ERROR("SCMI system power domain suspend return 0x%x unexpected\n",
					ret);
			panic();
		}
		return;
	}

	/*
	 * If we reach here, then assert that power down at system power domain
	 * level is running.
	 */
	assert(target_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] ==
							ARM_LOCAL_STATE_RUN);

	/* For level 0, specify `scmi_power_state_sleep` as the power state */
	SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, ARM_PWR_LVL0,
						scmi_power_state_sleep);

	for (lvl = ARM_PWR_LVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
		if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
			break;

		assert(target_state->pwr_domain_state[lvl] ==
							ARM_LOCAL_STATE_OFF);
		/*
		 * Specify `scmi_power_state_off` as power state for higher
		 * levels.
		 */
		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
						scmi_power_state_off);
	}

	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);

	ret = scmi_pwr_state_set(scmi_handle,
		plat_css_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()],
		scmi_pwr_state);

	if (ret != SCMI_E_SUCCESS) {
		ERROR("SCMI set power state command return 0x%x unexpected\n",
				ret);
		panic();
	}
}
Пример #16
0
/*******************************************************************************
 * This function is the handler registered for S-EL1 interrupts by the
 * OPTEED. It validates the interrupt and upon success arranges entry into
 * the OPTEE at 'optee_fiq_entry()' for handling the interrupt.
 ******************************************************************************/
static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
					    uint32_t flags,
					    void *handle,
					    void *cookie)
{
	uint32_t linear_id;
	optee_context_t *optee_ctx;

	/* Check the security state when the exception was generated */
	assert(get_interrupt_src_ss(flags) == NON_SECURE);

	/* Sanity check the pointer to this cpu's context */
	assert(handle == cm_get_context(NON_SECURE));

	/* Save the non-secure context before entering the OPTEE */
	cm_el1_sysregs_context_save(NON_SECURE);

	/* Get a reference to this cpu's OPTEE context */
	linear_id = plat_my_core_pos();
	optee_ctx = &opteed_sp_context[linear_id];
	assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));

	cm_set_elr_el3(SECURE, (uint64_t)&optee_vectors->fiq_entry);
	cm_el1_sysregs_context_restore(SECURE);
	cm_set_next_eret_context(SECURE);

	/*
	 * Tell the OPTEE that it has to handle an FIQ (synchronously).
	 * Also the instruction in normal world where the interrupt was
	 * generated is passed for debugging purposes. It is safe to
	 * retrieve this address from ELR_EL3 as the secure context will
	 * not take effect until el3_exit().
	 */
	SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3());
}
Пример #17
0
/*
 * Helper function to turn off a CPU power domain and its parent power domains
 * if applicable.
 */
void css_scp_off(const psci_power_state_t *target_state)
{
	int lvl = 0, ret;
	uint32_t scmi_pwr_state = 0;

	/* At-least the CPU level should be specified to be OFF */
	assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
							ARM_LOCAL_STATE_OFF);

	/* PSCI CPU OFF cannot be used to turn OFF system power domain */
	assert(target_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] ==
							ARM_LOCAL_STATE_RUN);

	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
		if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
			break;

		assert(target_state->pwr_domain_state[lvl] ==
							ARM_LOCAL_STATE_OFF);
		SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
				scmi_power_state_off);
	}

	SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);

	ret = scmi_pwr_state_set(scmi_handle,
		plat_css_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()],
		scmi_pwr_state);

	if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
		ERROR("SCMI set power state command return 0x%x unexpected\n",
				ret);
		panic();
	}
}
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
				u_register_t arg2, u_register_t arg3)
{
	/* Initialize the debug console as soon as possible */
	console_16550_register(SUNXI_UART0_BASE, SUNXI_UART0_CLK_IN_HZ,
			       SUNXI_UART0_BAUDRATE, &console);

#ifdef BL32_BASE
	/* Populate entry point information for BL32 */
	SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
	bl32_image_ep_info.pc = BL32_BASE;
#endif

	/* Populate entry point information for BL33 */
	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
	/*
	 * Tell BL31 where the non-trusted software image
	 * is located and the entry state information
	 */
	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
	bl33_image_ep_info.spsr = SPSR_64(MODE_EL2, MODE_SP_ELX,
					  DISABLE_ALL_EXCEPTIONS);
	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);

	/* Turn off all secondary CPUs */
	sunxi_disable_secondary_cpus(plat_my_core_pos());
}
Пример #19
0
static tsp_args_t *set_smc_args(uint64_t arg0,
			     uint64_t arg1,
			     uint64_t arg2,
			     uint64_t arg3,
			     uint64_t arg4,
			     uint64_t arg5,
			     uint64_t arg6,
			     uint64_t arg7)
{
	uint32_t linear_id;
	tsp_args_t *pcpu_smc_args;

	/*
	 * Return to Secure Monitor by raising an SMC. The results of the
	 * service are passed as an arguments to the SMC
	 */
	linear_id = plat_my_core_pos();
	pcpu_smc_args = &tsp_smc_args[linear_id];
	write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
	write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
	write_sp_arg(pcpu_smc_args, TSP_ARG2, arg2);
	write_sp_arg(pcpu_smc_args, TSP_ARG3, arg3);
	write_sp_arg(pcpu_smc_args, TSP_ARG4, arg4);
	write_sp_arg(pcpu_smc_args, TSP_ARG5, arg5);
	write_sp_arg(pcpu_smc_args, TSP_ARG6, arg6);
	write_sp_arg(pcpu_smc_args, TSP_ARG7, arg7);

	return pcpu_smc_args;
}
Пример #20
0
/*******************************************************************************
 * This function passes control to the OPTEE image (BL32) for the first time
 * on the primary cpu after a cold boot. It assumes that a valid secure
 * context has already been created by opteed_setup() which can be directly
 * used.  It also assumes that a valid non-secure context has been
 * initialised by PSCI so it does not need to save and restore any
 * non-secure state. This function performs a synchronous entry into
 * OPTEE. OPTEE passes control back to this routine through a SMC.
 ******************************************************************************/
static int32_t opteed_init(void)
{
	uint32_t linear_id = plat_my_core_pos();
	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
	entry_point_info_t *optee_entry_point;
	uint64_t rc;

	/*
	 * Get information about the OPTEE (BL32) image. Its
	 * absence is a critical failure.
	 */
	optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
	assert(optee_entry_point);

	cm_init_my_context(optee_entry_point);

	/*
	 * Arrange for an entry into OPTEE. It will be returned via
	 * OPTEE_ENTRY_DONE case
	 */
	rc = opteed_synchronous_sp_entry(optee_ctx);
	assert(rc != 0);

	return rc;
}
Пример #21
0
/*******************************************************************************
 * This function performs any book keeping in the test secure payload after this
 * cpu's architectural state has been restored after wakeup from an earlier psci
 * cpu_suspend request.
 ******************************************************************************/
tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
			      uint64_t arg1,
			      uint64_t arg2,
			      uint64_t arg3,
			      uint64_t arg4,
			      uint64_t arg5,
			      uint64_t arg6,
			      uint64_t arg7)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Restore the generic timer context */
	tsp_generic_timer_restore();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_resume_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx resumed. suspend level %ld\n",
		read_mpidr(), suspend_level);
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
		read_mpidr(),
		tsp_stats[linear_id].smc_count,
		tsp_stats[linear_id].eret_count,
		tsp_stats[linear_id].cpu_suspend_count);
	spin_unlock(&console_lock);
#endif
	/* Indicate to the SPD that we have completed this request */
	return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Пример #22
0
/*******************************************************************************
 * This function performs any remaining bookkeeping in the test secure payload
 * before the system is reset (in response to a psci SYSTEM_RESET request)
 ******************************************************************************/
tsp_args_t *tsp_system_reset_main(uint64_t arg0,
				uint64_t arg1,
				uint64_t arg2,
				uint64_t arg3,
				uint64_t arg4,
				uint64_t arg5,
				uint64_t arg6,
				uint64_t arg7)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
	     tsp_stats[linear_id].smc_count,
	     tsp_stats[linear_id].eret_count);
	spin_unlock(&console_lock);
#endif

	/* Indicate to the SPD that we have completed this request */
	return set_smc_args(TSP_SYSTEM_RESET_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Пример #23
0
/*******************************************************************************
 * TSP main entry point where it gets the opportunity to initialize its secure
 * state/applications. Once the state is initialized, it must return to the
 * SPD with a pointer to the 'tsp_vector_table' jump table.
 ******************************************************************************/
uint64_t tsp_main(void)
{
	NOTICE("TSP: %s\n", version_string);
	NOTICE("TSP: %s\n", build_message);
	INFO("TSP: Total memory base : 0x%lx\n", BL32_TOTAL_BASE);
	INFO("TSP: Total memory size : 0x%lx bytes\n",
			 BL32_TOTAL_LIMIT - BL32_TOTAL_BASE);

	uint32_t linear_id = plat_my_core_pos();

	/* Initialize the platform */
	tsp_platform_setup();

	/* Initialize secure/applications state here */
	tsp_generic_timer_start();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_on_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
	     read_mpidr(),
	     tsp_stats[linear_id].smc_count,
	     tsp_stats[linear_id].eret_count,
	     tsp_stats[linear_id].cpu_on_count);
	spin_unlock(&console_lock);
#endif
	return (uint64_t) &tsp_vector_table;
}
Пример #24
0
/*******************************************************************************
 * This function performs any remaining book keeping in the test secure payload
 * after this cpu's architectural state has been setup in response to an earlier
 * psci cpu_on request.
 ******************************************************************************/
tsp_args_t *tsp_cpu_on_main(void)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Initialize secure/applications state here */
	tsp_generic_timer_start();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_on_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
		read_mpidr(),
		tsp_stats[linear_id].smc_count,
		tsp_stats[linear_id].eret_count,
		tsp_stats[linear_id].cpu_on_count);
	spin_unlock(&console_lock);
#endif
	/* Indicate to the SPD that we have completed turned ourselves on */
	return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Пример #25
0
/*******************************************************************************
 * This cpu has resumed from suspend. The SPD saved the TSP context when it
 * completed the preceding suspend call. Use that context to program an entry
 * into the TSP to allow it to do any remaining book keeping
 ******************************************************************************/
static void tspd_cpu_suspend_finish_handler(uint64_t max_off_pwrlvl)
{
	int32_t rc = 0;
	uint32_t linear_id = plat_my_core_pos();
	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];

	assert(tsp_vectors);
	assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND);

	/* Program the entry point, max_off_pwrlvl and enter the SP */
	write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
		      CTX_GPREG_X0,
		      max_off_pwrlvl);
	cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_resume_entry);
	rc = tspd_synchronous_sp_entry(tsp_ctx);

	/*
	 * Read the response from the TSP. A non-zero return means that
	 * something went wrong while communicating with the TSP.
	 */
	if (rc != 0)
		panic();

	/* Update its context to reflect the state the SP is in */
	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
}
Пример #26
0
static void __dead2 sunxi_system_off(void)
{
	/* Turn off all secondary CPUs */
	sunxi_disable_secondary_cpus(plat_my_core_pos());

	sunxi_power_down();
}
Пример #27
0
/*******************************************************************************
 * This function is passed the target local power states for each power
 * domain (state_info) between the current CPU domain and its ancestors until
 * the target power level (end_pwrlvl).
 *
 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
 * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id.
 *
 * This function will only be invoked with data cache enabled and while
 * powering down a core.
 ******************************************************************************/
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
			const psci_power_state_t *state_info)
{
	int lvl, parent_idx, cpu_idx = plat_my_core_pos();

	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
	assert(state_info);

	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;

	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {

		/* Break early if the target power state is RUN */
		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
			break;

		/*
		 * The power domain is entering a low power state, so this is
		 * the last CPU for this power domain
		 */
		last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx;

		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
	}

}
Пример #28
0
static int cores_pwr_domain_off(void)
{
	uint32_t cpu_id = plat_my_core_pos();

	cpus_power_domain_off(cpu_id, core_pwr_wfi);

	return 0;
}
Пример #29
0
/*
 * This function stores the `ts` value to the storage identified by
 * `base_addr`, `tid` and current cpu id.
 * Note: The timestamp addresses are cache line aligned per cpu
 * and only the owning CPU would ever write into it.
 */
void __pmf_store_timestamp(uintptr_t base_addr,
                           unsigned int tid,
                           unsigned long long ts)
{
    unsigned long long *ts_addr = (unsigned long long *)calc_ts_addr(base_addr,
                                  tid, plat_my_core_pos());
    *ts_addr = ts;
}
Пример #30
0
/*******************************************************************************
 * This function is the handler registered for S-EL1 interrupts by the TSPD. It
 * validates the interrupt and upon success arranges entry into the TSP at
 * 'tsp_sel1_intr_entry()' for handling the interrupt.
 ******************************************************************************/
static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
					    uint32_t flags,
					    void *handle,
					    void *cookie)
{
	uint32_t linear_id;
	tsp_context_t *tsp_ctx;

	/* Check the security state when the exception was generated */
	assert(get_interrupt_src_ss(flags) == NON_SECURE);

	/* Sanity check the pointer to this cpu's context */
	assert(handle == cm_get_context(NON_SECURE));

	/* Save the non-secure context before entering the TSP */
	cm_el1_sysregs_context_save(NON_SECURE);

	/* Get a reference to this cpu's TSP context */
	linear_id = plat_my_core_pos();
	tsp_ctx = &tspd_sp_context[linear_id];
	assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));

	/*
	 * Determine if the TSP was previously preempted. Its last known
	 * context has to be preserved in this case.
	 * The TSP should return control to the TSPD after handling this
	 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
	 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
	 * structure. There is no need to save the secure system register
	 * context since the TSP is supposed to preserve it during S-EL1
	 * interrupt handling.
	 */
	if (get_std_smc_active_flag(tsp_ctx->state)) {
		tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
						      CTX_SPSR_EL3);
		tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
						     CTX_ELR_EL3);
#if TSP_NS_INTR_ASYNC_PREEMPT
		/*Need to save the previously interrupted secure context */
		memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
#endif
	}

	cm_el1_sysregs_context_restore(SECURE);
	cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
		    SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));

	cm_set_next_eret_context(SECURE);

	/*
	 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
	 * Also the instruction in normal world where the interrupt was
	 * generated is passed for debugging purposes. It is safe to retrieve
	 * this address from ELR_EL3 as the secure context will not take effect
	 * until el3_exit().
	 */
	SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
}