Esempio n. 1
0
/*******************************************************************************
 * This function performs any remaining book keeping in the test secure payload
 * after this cpu's architectural state has been setup in response to an earlier
 * psci cpu_on request.
 ******************************************************************************/
tsp_args_t *tsp_cpu_on_main(void)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Initialize secure/applications state here */
	tsp_generic_timer_start();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_on_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
		read_mpidr(),
		tsp_stats[linear_id].smc_count,
		tsp_stats[linear_id].eret_count,
		tsp_stats[linear_id].cpu_on_count);
	spin_unlock(&console_lock);
#endif
	/* Indicate to the SPD that we have completed turned ourselves on */
	return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Esempio n. 2
0
/*******************************************************************************
 * This function performs any book keeping in the test secure payload after this
 * cpu's architectural state has been restored after wakeup from an earlier psci
 * cpu_suspend request.
 ******************************************************************************/
tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
			      uint64_t arg1,
			      uint64_t arg2,
			      uint64_t arg3,
			      uint64_t arg4,
			      uint64_t arg5,
			      uint64_t arg6,
			      uint64_t arg7)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Restore the generic timer context */
	tsp_generic_timer_restore();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_resume_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx resumed. suspend level %ld\n",
		read_mpidr(), suspend_level);
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
		read_mpidr(),
		tsp_stats[linear_id].smc_count,
		tsp_stats[linear_id].eret_count,
		tsp_stats[linear_id].cpu_suspend_count);
	spin_unlock(&console_lock);
#endif
	/* Indicate to the SPD that we have completed this request */
	return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Esempio n. 3
0
/*******************************************************************************
 * This function performs any remaining bookkeeping in the test secure payload
 * before the system is reset (in response to a psci SYSTEM_RESET request)
 ******************************************************************************/
tsp_args_t *tsp_system_reset_main(uint64_t arg0,
				uint64_t arg1,
				uint64_t arg2,
				uint64_t arg3,
				uint64_t arg4,
				uint64_t arg5,
				uint64_t arg6,
				uint64_t arg7)
{
	uint32_t linear_id = plat_my_core_pos();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
	     tsp_stats[linear_id].smc_count,
	     tsp_stats[linear_id].eret_count);
	spin_unlock(&console_lock);
#endif

	/* Indicate to the SPD that we have completed this request */
	return set_smc_args(TSP_SYSTEM_RESET_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Esempio n. 4
0
int ari_online_core(uint32_t ari_base, uint32_t core)
{
	int cpu = read_mpidr() & MPIDR_CPU_MASK;
	int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >>
			MPIDR_AFFINITY_BITS;
	int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;

	/* construct the current CPU # */
	cpu |= (cluster << 2);

	/* sanity check target core id */
	if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) {
		ERROR("%s: unsupported core id (%d)\n", __func__, core);
		return EINVAL;
	}

	/*
	 * The Denver cluster has 2 CPUs only - 0, 1.
	 */
	if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) {
		ERROR("%s: unknown core id (%d)\n", __func__, core);
		return EINVAL;
	}

	/* clean the previous response state */
	ari_clobber_response(ari_base);

	return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0);
}
int32_t tsp_irq_received(void)
{
	uint32_t linear_id = plat_my_core_pos();

	tsp_stats[linear_id].irq_count++;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
	spin_lock(&console_lock);
	VERBOSE("TSP: cpu 0x%lx received irq\n", read_mpidr());
	VERBOSE("TSP: cpu 0x%lx: %d irq requests\n",
		read_mpidr(), tsp_stats[linear_id].irq_count);
	spin_unlock(&console_lock);
#endif
	return TSP_PREEMPTED;
}
Esempio n. 6
0
static int32_t tbase_init_secure_context(tbase_context *tbase_ctx)
{
  uint32_t sctlr = read_sctlr_el3();
  el1_sys_regs_t *el1_state;
  uint64_t mpidr = read_mpidr();

  /* Passing a NULL context is a critical programming error */
  assert(tbase_ctx);
  
  DBG_PRINTF("tbase_init_secure_context\n\r");

  memset(tbase_ctx, 0, sizeof(*tbase_ctx));

  /* Get a pointer to the S-EL1 context memory */
  el1_state = get_sysregs_ctx(&tbase_ctx->cpu_ctx);

  // Program the sctlr for S-EL1 execution with caches and mmu off
  sctlr &= SCTLR_EE_BIT;
  sctlr |= SCTLR_EL1_RES1;
  write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr);

  /* Set this context as ready to be initialised i.e OFF */
  tbase_ctx->state = TBASE_STATE_OFF;

  /* Associate this context with the cpu specified */
  tbase_ctx->mpidr = mpidr;

  // Set up cm context for this core
  cm_set_context(mpidr, &tbase_ctx->cpu_ctx, SECURE); 
  // cm_init_exception_stack(mpidr, SECURE);

  return 0;
}
Esempio n. 7
0
static void hikey_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
	unsigned long mpidr;
	int cpu, cluster;

	mpidr = read_mpidr();
	cluster = MPIDR_AFFLVL1_VAL(mpidr);
	cpu = MPIDR_AFFLVL0_VAL(mpidr);


	/*
	 * Enable CCI coherency for this cluster.
	 * No need for locks as no other cpu is active at the moment.
	 */
	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));

	/* Zero the jump address in the mailbox for this cpu */
	hisi_pwrc_set_core_bx_addr(cpu, cluster, 0);

	/* Program the GIC per-cpu distributor or re-distributor interface */
	gicv2_pcpu_distif_init();
	/* Enable the GIC cpu interface */
	gicv2_cpuif_enable();
}
Esempio n. 8
0
static void tbase_triggerSgiDump(void)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);

  uint32_t SGITargets;

  /* Configure SGI */
  gicd_clr_igroupr(get_plat_config()->gicd_base, FIQ_SMP_CALL_SGI);
  gicd_set_ipriorityr(get_plat_config()->gicd_base, FIQ_SMP_CALL_SGI, GIC_HIGHEST_SEC_PRIORITY);      

  /* Enable SGI */
  gicd_set_isenabler(get_plat_config()->gicd_base, FIQ_SMP_CALL_SGI);

  /* Send SGIs to all cores except the current one 
     (current will directly branch to the dump handler) */
  SGITargets = 0xFF;
  SGITargets &= ~(1 << linear_id);

  /* Trigger SGI */
  irq_raise_softirq(SGITargets, FIQ_SMP_CALL_SGI);

  /* Current core directly branches to dump handler */
  plat_tbase_dump();
}
Esempio n. 9
0
int psci_cpu_suspend(unsigned int power_state,
		     unsigned long entrypoint,
		     unsigned long context_id)
{
	int rc;
	unsigned long mpidr;
	unsigned int target_afflvl, pstate_type;

	/* Sanity check the requested state */
	target_afflvl = psci_get_pstate_afflvl(power_state);
	if (target_afflvl > MPIDR_MAX_AFFLVL)
		return PSCI_E_INVALID_PARAMS;

	pstate_type = psci_get_pstate_type(power_state);
	if (pstate_type == PSTATE_TYPE_STANDBY) {
		if  (psci_plat_pm_ops->affinst_standby)
			rc = psci_plat_pm_ops->affinst_standby(power_state);
		else
			return PSCI_E_INVALID_PARAMS;
	} else {
		mpidr = read_mpidr();
		rc = psci_afflvl_suspend(mpidr,
					 entrypoint,
					 context_id,
					 power_state,
					 MPIDR_AFFLVL0,
					 target_afflvl);
	}

	assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS);
	return rc;
}
Esempio n. 10
0
/*******************************************************************************
 * This function passes control to the OPTEE image (BL32) for the first time
 * on the primary cpu after a cold boot. It assumes that a valid secure
 * context has already been created by opteed_setup() which can be directly
 * used.  It also assumes that a valid non-secure context has been
 * initialised by PSCI so it does not need to save and restore any
 * non-secure state. This function performs a synchronous entry into
 * OPTEE. OPTEE passes control back to this routine through a SMC.
 ******************************************************************************/
static int32_t opteed_init(void)
{
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id = platform_get_core_pos(mpidr);
	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
	entry_point_info_t *optee_entry_point;
	uint64_t rc;

	/*
	 * Get information about the OPTEE (BL32) image. Its
	 * absence is a critical failure.
	 */
	optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
	assert(optee_entry_point);

	cm_init_context(mpidr, optee_entry_point);

	/*
	 * Arrange for an entry into OPTEE. It will be returned via
	 * OPTEE_ENTRY_DONE case
	 */
	rc = opteed_synchronous_sp_entry(optee_ctx);
	assert(rc != 0);

	return rc;
}
Esempio n. 11
0
int psci_cpu_suspend(unsigned int power_state,
		     unsigned long entrypoint,
		     unsigned long context_id)
{
	int rc;
	unsigned long mpidr;
	unsigned int target_afflvl, pstate_type;

	/* TODO: Standby states are not supported at the moment */
	pstate_type = psci_get_pstate_type(power_state);
	if (pstate_type == 0) {
		rc = PSCI_E_INVALID_PARAMS;
		goto exit;
	}

	/* Sanity check the requested state */
	target_afflvl = psci_get_pstate_afflvl(power_state);
	if (target_afflvl > MPIDR_MAX_AFFLVL) {
		rc = PSCI_E_INVALID_PARAMS;
		goto exit;
	}

	mpidr = read_mpidr();
	rc = psci_afflvl_suspend(mpidr,
				 entrypoint,
				 context_id,
				 power_state,
				 MPIDR_AFFLVL0,
				 target_afflvl);

exit:
	if (rc != PSCI_E_SUCCESS)
		assert(rc == PSCI_E_INVALID_PARAMS);
	return rc;
}
void bl1_plat_set_ep_info(unsigned int image_id,
		entry_point_info_t *ep_info)
{
	unsigned int data = 0;
	uintptr_t tmp = HIKEY960_NS_TMP_OFFSET;

	if (image_id != NS_BL1U_IMAGE_ID)
		panic();
	/* Copy NS BL1U from 0x1AC1_8000 to 0x1AC9_8000 */
	memcpy((void *)tmp, (void *)HIKEY960_NS_IMAGE_OFFSET,
		NS_BL1U_SIZE);
	memcpy((void *)NS_BL1U_BASE, (void *)tmp, NS_BL1U_SIZE);
	inv_dcache_range(NS_BL1U_BASE, NS_BL1U_SIZE);
	/* Initialize the GIC driver, cpu and distributor interfaces */
	gicv2_driver_init(&hikey960_gic_data);
	gicv2_distif_init();
	gicv2_pcpu_distif_init();
	gicv2_cpuif_enable();
	/* CNTFRQ is read-only in EL1 */
	write_cntfrq_el0(plat_get_syscnt_freq2());
	data = read_cpacr_el1();
	do {
		data |= 3 << 20;
		write_cpacr_el1(data);
		data = read_cpacr_el1();
	} while ((data & (3 << 20)) != (3 << 20));
	INFO("cpacr_el1:0x%x\n", data);

	ep_info->args.arg0 = 0xffff & read_mpidr();
	ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
				DISABLE_ALL_EXCEPTIONS);
}
/*******************************************************************************
 * Perform any BL1 specific platform actions.
 ******************************************************************************/
void bl1_early_platform_setup(void)
{
	const size_t bl1_size = BL1_RAM_LIMIT - BL1_RAM_BASE;

	/* Initialize the console to provide early debug support */
	console_init(PL011_UART2_BASE, PL011_UART2_CLK_IN_HZ, PL011_BAUDRATE);

	/*
	 * Enable CCI-400 for this cluster. No need for locks as no other cpu is
	 * active at the moment
	 */
	cci_init(CCI400_BASE,
		 CCI400_SL_IFACE3_CLUSTER_IX,
		 CCI400_SL_IFACE4_CLUSTER_IX);
	cci_enable_cluster_coherency(read_mpidr());

	/* Allow BL1 to see the whole Trusted RAM */
	bl1_tzram_layout.total_base = TZRAM_BASE;
	bl1_tzram_layout.total_size = TZRAM_SIZE;

	/* Calculate how much RAM BL1 is using and how much remains free */
	bl1_tzram_layout.free_base = TZRAM_BASE;
	bl1_tzram_layout.free_size = TZRAM_SIZE;
	reserve_mem(&bl1_tzram_layout.free_base,
		    &bl1_tzram_layout.free_size,
		    BL1_RAM_BASE,
		    bl1_size);

	INFO("BL1: 0x%lx - 0x%lx [size = %u]\n", BL1_RAM_BASE, BL1_RAM_LIMIT,
	     bl1_size);
}
Esempio n. 14
0
/*******************************************************************************
 * This cpu is being suspended. S-EL1 state must have been saved in the
 * resident cpu (mpidr format) if it is a UP/UP migratable TSP.
 ******************************************************************************/
static void tspd_cpu_suspend_handler(uint64_t power_state)
{
	int32_t rc = 0;
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id = platform_get_core_pos(mpidr);
	tsp_context *tsp_ctx = &tspd_sp_context[linear_id];

	assert(tsp_entry_info);
	assert(tsp_ctx->state == TSP_STATE_ON);

	/* Program the entry point, power_state parameter and enter the TSP */
	write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
		      CTX_GPREG_X0,
		      power_state);
	cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry);
	rc = tspd_synchronous_sp_entry(tsp_ctx);

	/*
	 * Read the response from the TSP. A non-zero return means that
	 * something went wrong while communicating with the TSP.
	 */
	if (rc != 0)
		panic();

	/* Update its context to reflect the state the TSP is in */
	tsp_ctx->state = TSP_STATE_SUSPEND;
}
Esempio n. 15
0
/*******************************************************************************
 * This function performs any book keeping in the test secure payload after this
 * cpu's architectural state has been restored after wakeup from an earlier psci
 * cpu_suspend request.
 ******************************************************************************/
tsp_args *tsp_cpu_resume_main(uint64_t suspend_level,
			      uint64_t arg1,
			      uint64_t arg2,
			      uint64_t arg3,
			      uint64_t arg4,
			      uint64_t arg5,
			      uint64_t arg6,
			      uint64_t arg7)
{
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id = platform_get_core_pos(mpidr);

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_resume_count++;

	spin_lock(&console_lock);
	printf("SP: cpu 0x%x resumed. suspend level %d \n\r",
	       mpidr, suspend_level);
	INFO("cpu 0x%x: %d smcs, %d erets %d cpu suspend requests\n", mpidr,
	     tsp_stats[linear_id].smc_count,
	     tsp_stats[linear_id].eret_count,
	     tsp_stats[linear_id].cpu_suspend_count);
	spin_unlock(&console_lock);

	/* Indicate to the SPD that we have completed this request */
	return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
}
Esempio n. 16
0
/*******************************************************************************
 * This cpu is being turned off. Allow the TSPD/TSP to perform any actions
 * needed
 ******************************************************************************/
static int32_t tspd_cpu_off_handler(uint64_t cookie)
{
	int32_t rc = 0;
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id = platform_get_core_pos(mpidr);
	tsp_context *tsp_ctx = &tspd_sp_context[linear_id];

	assert(tsp_entry_info);
	assert(tsp_ctx->state == TSP_STATE_ON);

	/* Program the entry point and enter the TSP */
	cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry);
	rc = tspd_synchronous_sp_entry(tsp_ctx);

	/*
	 * Read the response from the TSP. A non-zero return means that
	 * something went wrong while communicating with the TSP.
	 */
	if (rc != 0)
		panic();

	/*
	 * Reset TSP's context for a fresh start when this cpu is turned on
	 * subsequently.
	 */
	 tsp_ctx->state = TSP_STATE_OFF;

	 return 0;
}
Esempio n. 17
0
/*******************************************************************************
 * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits
 * before passing control back to the Secure Monitor. Entry in S-El1 is done
 * after initialising minimal architectural state that guarantees safe
 * execution.
 ******************************************************************************/
static void tspd_cpu_on_finish_handler(uint64_t cookie)
{
	int32_t rc = 0;
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id = platform_get_core_pos(mpidr);
	tsp_context *tsp_ctx = &tspd_sp_context[linear_id];

	assert(tsp_entry_info);
	assert(tsp_ctx->state == TSP_STATE_OFF);

	/* Initialise this cpu's secure context */
	tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry,
				TSP_AARCH64,
				mpidr,
				tsp_ctx);

	/* Enter the TSP */
	rc = tspd_synchronous_sp_entry(tsp_ctx);

	/*
	 * Read the response from the TSP. A non-zero return means that
	 * something went wrong while communicating with the SP.
	 */
	if (rc != 0)
		panic();

	/* Update its context to reflect the state the SP is in */
	tsp_ctx->state = TSP_STATE_ON;
}
Esempio n. 18
0
static tsp_args *set_smc_args(uint64_t arg0,
			     uint64_t arg1,
			     uint64_t arg2,
			     uint64_t arg3,
			     uint64_t arg4,
			     uint64_t arg5,
			     uint64_t arg6,
			     uint64_t arg7)
{
	uint64_t mpidr = read_mpidr();
	uint32_t linear_id;
	tsp_args *pcpu_smc_args;

	/*
	 * Return to Secure Monitor by raising an SMC. The results of the
	 * service are passed as an arguments to the SMC
	 */
	linear_id = platform_get_core_pos(mpidr);
	pcpu_smc_args = &tsp_smc_args[linear_id];
	write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
	write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
	write_sp_arg(pcpu_smc_args, TSP_ARG2, arg2);
	write_sp_arg(pcpu_smc_args, TSP_ARG3, arg3);
	write_sp_arg(pcpu_smc_args, TSP_ARG4, arg4);
	write_sp_arg(pcpu_smc_args, TSP_ARG5, arg5);
	write_sp_arg(pcpu_smc_args, TSP_ARG6, arg6);
	write_sp_arg(pcpu_smc_args, TSP_ARG7, arg7);

	return pcpu_smc_args;
}
Esempio n. 19
0
/*******************************************************************************
 * TSP main entry point where it gets the opportunity to initialize its secure
 * state/applications. Once the state is initialized, it must return to the
 * SPD with a pointer to the 'tsp_vector_table' jump table.
 ******************************************************************************/
uint64_t tsp_main(void)
{
	NOTICE("TSP: %s\n", version_string);
	NOTICE("TSP: %s\n", build_message);
	INFO("TSP: Total memory base : 0x%lx\n", BL32_TOTAL_BASE);
	INFO("TSP: Total memory size : 0x%lx bytes\n",
			 BL32_TOTAL_LIMIT - BL32_TOTAL_BASE);

	uint32_t linear_id = plat_my_core_pos();

	/* Initialize the platform */
	tsp_platform_setup();

	/* Initialize secure/applications state here */
	tsp_generic_timer_start();

	/* Update this cpu's statistics */
	tsp_stats[linear_id].smc_count++;
	tsp_stats[linear_id].eret_count++;
	tsp_stats[linear_id].cpu_on_count++;

#if LOG_LEVEL >= LOG_LEVEL_INFO
	spin_lock(&console_lock);
	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
	     read_mpidr(),
	     tsp_stats[linear_id].smc_count,
	     tsp_stats[linear_id].eret_count,
	     tsp_stats[linear_id].cpu_on_count);
	spin_unlock(&console_lock);
#endif
	return (uint64_t) &tsp_vector_table;
}
Esempio n. 20
0
/*******************************************************************************
 * BL31 is responsible for setting up the runtime services for the primary cpu
 * before passing control to the bootloader or an Operating System. This
 * function calls runtime_svc_init() which initializes all registered runtime
 * services. The run time services would setup enough context for the core to
 * swtich to the next exception level. When this function returns, the core will
 * switch to the programmed exception level via. an ERET.
 ******************************************************************************/
void bl31_main(void)
{
#if DEBUG
	unsigned long mpidr = read_mpidr();
#endif

	/* Perform remaining generic architectural setup from EL3 */
	bl31_arch_setup();

	/* Perform platform setup in BL1 */
	bl31_platform_setup();

	printf("BL31 %s\n\r", build_message);

	/* Initialise helper libraries */
	bl31_lib_init();

	/* Initialize the runtime services e.g. psci */
	runtime_svc_init();

	/* Clean caches before re-entering normal world */
	dcsw_op_all(DCCSW);

	/*
	 * Use the more complex exception vectors now that context
	 * management is setup. SP_EL3 should point to a 'cpu_context'
	 * structure which has an exception stack allocated.  The PSCI
	 * service should have set the context.
	 */
	assert(cm_get_context(mpidr, NON_SECURE));
	cm_set_next_eret_context(NON_SECURE);
	cm_init_pcpu_ptr_cache();
	write_vbar_el3((uint64_t) runtime_exceptions);
	isb();
	next_image_type = NON_SECURE;

	/*
	 * All the cold boot actions on the primary cpu are done. We now need to
	 * decide which is the next image (BL32 or BL33) and how to execute it.
	 * If the SPD runtime service is present, it would want to pass control
	 * to BL32 first in S-EL1. In that case, SPD would have registered a
	 * function to intialize bl32 where it takes responsibility of entering
	 * S-EL1 and returning control back to bl31_main. Once this is done we
	 * can prepare entry into BL33 as normal.
	 */

	/*
	 * If SPD had registerd an init hook, invoke it. Pass it the information
	 * about memory extents
	 */
	if (bl32_init)
		(*bl32_init)(bl31_plat_get_bl32_mem_layout());

	/*
	 * We are ready to enter the next EL. Prepare entry into the image
	 * corresponding to the desired security state after the next ERET.
	 */
	bl31_prepare_next_image_entry();
}
Esempio n. 21
0
void save_sysregs_allcore() {
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  for (int coreNro = 0; coreNro < TBASE_CORE_COUNT; coreNro++) {
    save_sysregs_core(linear_id, coreNro);
  }
  tbaseBootCoreMpidr = mpidr;
}
void plat_cci_enable(void)
{
	/*
	 * Enable CCI coherency for this cluster.
	 * No need for locks as no other cpu is active at the moment.
	 */
	cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
}
Esempio n. 23
0
/*******************************************************************************
 * The following function finish an earlier power on request. They
 * are called by the common finisher routine in psci_common.c. The `state_info`
 * is the psci_power_state from which this CPU has woken up from.
 ******************************************************************************/
void psci_cpu_on_finish(unsigned int cpu_idx,
			psci_power_state_t *state_info)
{
	/*
	 * Plat. management: Perform the platform specific actions
	 * for this cpu e.g. enabling the gic or zeroing the mailbox
	 * register. The actual state of this cpu has already been
	 * changed.
	 */
	psci_plat_pm_ops->pwr_domain_on_finish(state_info);

#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
	/*
	 * Arch. management: Enable data cache and manage stack memory
	 */
	psci_do_pwrup_cache_maintenance();
#endif

	/*
	 * All the platform specific actions for turning this cpu
	 * on have completed. Perform enough arch.initialization
	 * to run in the non-secure address space.
	 */
	psci_arch_setup();

	/*
	 * Lock the CPU spin lock to make sure that the context initialization
	 * is done. Since the lock is only used in this function to create
	 * a synchronization point with cpu_on_start(), it can be released
	 * immediately.
	 */
	psci_spin_lock_cpu(cpu_idx);
	psci_spin_unlock_cpu(cpu_idx);

	/* Ensure we have been explicitly woken up by another cpu */
	assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);

	/*
	 * Call the cpu on finish handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
		psci_spd_pm->svc_on_finish(0);

	PUBLISH_EVENT(psci_cpu_on_finish);

	/* Populate the mpidr field within the cpu node array */
	/* This needs to be done only once */
	psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;

	/*
	 * Generic management: Now we just need to retrieve the
	 * information that we had stashed away during the cpu_on
	 * call to set this cpu on its way.
	 */
	cm_prepare_el3_exit(NON_SECURE);
}
Esempio n. 24
0
static void __dead2 sunxi_pwr_down_wfi(const psci_power_state_t *target_state)
{
	u_register_t mpidr = read_mpidr();

	sunxi_cpu_off(MPIDR_AFFLVL1_VAL(mpidr), MPIDR_AFFLVL0_VAL(mpidr));

	while (1)
		wfi();
}
Esempio n. 25
0
/*******************************************************************************
 * The target cpu is being turned on.
 ******************************************************************************/
static void tbase_cpu_on_handler(uint64_t target_cpu)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  
  // TODO
  
  tbase_ctx->state = TBASE_STATE_ON;
}
/*******************************************************************************
 * This function updates the TSP statistics for FIQs handled synchronously i.e
 * the ones that have been handed over by the TSPD. It also keeps count of the
 * number of times control was passed back to the TSPD after handling an FIQ.
 * In the future it will be possible that the TSPD hands over an FIQ to the TSP
 * but does not expect it to return execution. This statistic will be useful to
 * distinguish between these two models of synchronous FIQ handling.
 * The 'elr_el3' parameter contains the address of the instruction in normal
 * world where this FIQ was generated.
 ******************************************************************************/
void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
{
	uint32_t linear_id = plat_my_core_pos();

	tsp_stats[linear_id].sync_fiq_count++;
	if (type == TSP_HANDLE_FIQ_AND_RETURN)
		tsp_stats[linear_id].sync_fiq_ret_count++;

#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
	spin_lock(&console_lock);
	VERBOSE("TSP: cpu 0x%lx sync fiq request from 0x%lx\n",
		read_mpidr(), elr_el3);
	VERBOSE("TSP: cpu 0x%lx: %d sync fiq requests, %d sync fiq returns\n",
		read_mpidr(),
		tsp_stats[linear_id].sync_fiq_count,
		tsp_stats[linear_id].sync_fiq_ret_count);
	spin_unlock(&console_lock);
#endif
}
Esempio n. 27
0
/*******************************************************************************
 * BL31 is responsible for setting up the runtime services for the primary cpu
 * before passing control to the bootloader or an Operating System. This
 * function calls runtime_svc_init() which initializes all registered runtime
 * services. The run time services would setup enough context for the core to
 * swtich to the next exception level. When this function returns, the core will
 * switch to the programmed exception level via. an ERET.
 ******************************************************************************/
void bl31_main(void)
{
#if DEBUG
    unsigned long mpidr = read_mpidr();
#endif

    /* Perform remaining generic architectural setup from EL3 */
    bl31_arch_setup();

    /* Perform platform setup in BL1 */
    bl31_platform_setup();

#if defined (__GNUC__)
    printf("BL31 Built : %s, %s\n\r", __TIME__, __DATE__);
#endif
    /* Initialise helper libraries */
    bl31_lib_init();

    /* Initialize the runtime services e.g. psci */
    runtime_svc_init();

    /* Clean caches before re-entering normal world */
    dcsw_op_all(DCCSW);

    /*
     * Use the more complex exception vectors now that context
     * management is setup. SP_EL3 should point to a 'cpu_context'
     * structure which has an exception stack allocated.  The PSCI
     * service should have set the context.
     */
    assert(cm_get_context(mpidr, NON_SECURE));
    cm_set_next_eret_context(NON_SECURE);
    write_vbar_el3((uint64_t) runtime_exceptions);

    /*
     * All the cold boot actions on the primary cpu are done. We
     * now need to decide which is the next image (BL32 or BL33)
     * and how to execute it. If the SPD runtime service is
     * present, it would want to pass control to BL32 first in
     * S-EL1. It will export the bl32_init() routine where it takes
     * responsibility of entering S-EL1 and returning control back
     * to bl31_main. Once this is done we can prepare entry into
     * BL33 as normal.
     */

    /* Tell BL32 about it memory extents as well */
    if (bl32_init)
        bl32_init(bl31_plat_get_bl32_mem_layout());

    /*
     * We are ready to enter the next EL. Prepare entry into the image
     * corresponding to the desired security state after the next ERET.
     */
    bl31_prepare_next_image_entry();
}
Esempio n. 28
0
/*******************************************************************************
 * This cpu is being suspended. S-EL1 state must have been saved in the
 * resident cpu (mpidr format), if any.
 ******************************************************************************/
static void tbase_cpu_suspend_handler(uint64_t power_state)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  assert(tbase_ctx->state == TBASE_STATE_ON);
  
  DBG_PRINTF("\r\ntbase_cpu_suspend_handler %d\r\n", linear_id);
  
  tbase_ctx->state = TBASE_STATE_SUSPEND; 
}
/*******************************************************************************
 * TSP interrupt handler is called as a part of both synchronous and
 * asynchronous handling of TSP interrupts. Currently the physical timer
 * interrupt is the only S-EL1 interrupt that this handler expects. It returns
 * 0 upon successfully handling the expected interrupt and all other
 * interrupts are treated as normal world or EL3 interrupts.
 ******************************************************************************/
int32_t tsp_common_int_handler(void)
{
	uint32_t linear_id = plat_my_core_pos(), id;

	/*
	 * Get the highest priority pending interrupt id and see if it is the
	 * secure physical generic timer interrupt in which case, handle it.
	 * Otherwise throw this interrupt at the EL3 firmware.
	 *
	 * There is a small time window between reading the highest priority
	 * pending interrupt and acknowledging it during which another
	 * interrupt of higher priority could become the highest pending
	 * interrupt. This is not expected to happen currently for TSP.
	 */
	id = plat_ic_get_pending_interrupt_id();

	/* TSP can only handle the secure physical timer interrupt */
	if (id != TSP_IRQ_SEC_PHY_TIMER)
		return tsp_handle_preemption();

	/*
	 * Acknowledge and handle the secure timer interrupt. Also sanity check
	 * if it has been preempted by another interrupt through an assertion.
	 */
	id = plat_ic_acknowledge_interrupt();
	assert(id == TSP_IRQ_SEC_PHY_TIMER);
	tsp_generic_timer_handler();
	plat_ic_end_of_interrupt(id);

	/* Update the statistics and print some messages */
	tsp_stats[linear_id].sel1_intr_count++;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
	spin_lock(&console_lock);
	VERBOSE("TSP: cpu 0x%lx handled S-EL1 interrupt %d\n",
	       read_mpidr(), id);
	VERBOSE("TSP: cpu 0x%lx: %d S-EL1 requests\n",
	     read_mpidr(), tsp_stats[linear_id].sel1_intr_count);
	spin_unlock(&console_lock);
#endif
	return 0;
}
int hikey_bl2_handle_post_image_load(unsigned int image_id)
{
	int err = 0;
	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
#ifdef SPD_opteed
	bl_mem_params_node_t *pager_mem_params = NULL;
	bl_mem_params_node_t *paged_mem_params = NULL;
#endif
	assert(bl_mem_params);

	switch (image_id) {
#ifdef AARCH64
	case BL32_IMAGE_ID:
#ifdef SPD_opteed
		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
		assert(pager_mem_params);

		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
		assert(paged_mem_params);

		err = parse_optee_header(&bl_mem_params->ep_info,
				&pager_mem_params->image_info,
				&paged_mem_params->image_info);
		if (err != 0) {
			WARN("OPTEE header parse error.\n");
		}
#endif
		bl_mem_params->ep_info.spsr = hikey_get_spsr_for_bl32_entry();
		break;
#endif

	case BL33_IMAGE_ID:
		/* BL33 expects to receive the primary CPU MPID (through r0) */
		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
		bl_mem_params->ep_info.spsr = hikey_get_spsr_for_bl33_entry();
		break;

#ifdef SCP_BL2_BASE
	case SCP_BL2_IMAGE_ID:
		/* The subsequent handling of SCP_BL2 is platform specific */
		err = plat_hikey_bl2_handle_scp_bl2(&bl_mem_params->image_info);
		if (err) {
			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
		}
		break;
#endif
	default:
		/* Do nothing in default case */
		break;
	}

	return err;
}