Exemplo n.º 1
0
void core_init_mmu_regs(void)
{
	uint64_t mair;
	uint64_t tcr;

	mair  = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
	write_mair_el1(mair);

	tcr  = TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
	tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
	tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
	tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
	tcr |= 64 - __builtin_ctzl(ADDR_SPACE_SIZE);

	/* Disable the use of TTBR1 */
	tcr |= TCR_EPD1;

	/*
	 * TCR.A1 = 0 => ASID is stored in TTBR0
	 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
	 */

	write_tcr_el1(tcr);
	write_ttbr0_el1((paddr_t)l1_xlation_table[get_core_pos()]);
	write_ttbr1_el1(0);
}
Exemplo n.º 2
0
static uint32_t main_system_reset_handler(uint32_t a0, uint32_t a1)
{
	(void)&a0;
	(void)&a1;
	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
	return 0;
}
Exemplo n.º 3
0
uint32_t generic_boot_cpu_on_handler(uint32_t a0 __maybe_unused,
				     uint32_t a1 __unused)
{
	DMSG("cpu %zu: a0 0x%x", get_core_pos(), a0);
	init_secondary_helper(PADDR_INVALID);
	return 0;
}
Exemplo n.º 4
0
static uint32_t main_cpu_resume_handler(uint32_t a0, uint32_t a1)
{
	(void)&a0;
	(void)&a1;
	/* Could restore generic timer here */
	PM_DEBUG("cpu %zu: a0 0%x", get_core_pos(), a0);
	return 0;
}
Exemplo n.º 5
0
void core_mmu_get_user_map(struct core_mmu_user_map *map)
{
	assert(user_va_idx != -1);

	map->user_map = l1_xlation_table[get_core_pos()][user_va_idx];
	if (map->user_map) {
		map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
			    TTBR_ASID_MASK;
	} else {
Exemplo n.º 6
0
uint32_t main_cpu_on_handler(uint32_t a0, uint32_t a1)
{
	size_t pos = get_core_pos();

	(void)&a0;
	(void)&a1;
	PM_DEBUG("cpu %zu: a0 0%x", pos, a0);
	main_init_helper(false, pos, NSEC_ENTRY_INVALID);
	return 0;
}
Exemplo n.º 7
0
/* teecore heap address/size is defined in scatter file */
extern unsigned char teecore_heap_start;
extern unsigned char teecore_heap_end;

static void main_fiq(void);
static void main_tee_entry_std(struct thread_smc_args *args);
static void main_tee_entry_fast(struct thread_smc_args *args);

static const struct thread_handlers handlers = {
	.std_smc = main_tee_entry_std,
	.fast_smc = main_tee_entry_fast,
	.fiq = main_fiq,
	.svc = tee_svc_handler,
	.abort = tee_pager_abort_handler,
	.cpu_on = pm_panic,
	.cpu_off = pm_panic,
	.cpu_suspend = pm_panic,
	.cpu_resume = pm_panic,
	.system_off = pm_panic,
	.system_reset = pm_panic,
};

void main_init(uint32_t nsec_entry); /* called from assembly only */
void main_init(uint32_t nsec_entry)
{
	struct sm_nsec_ctx *nsec_ctx;
	size_t pos = get_core_pos();

	/*
	 * Mask IRQ and FIQ before switch to the thread vector as the
	 * thread handler requires IRQ and FIQ to be masked while executing
	 * with the temporary stack. The thread subsystem also asserts that
	 * IRQ is blocked when using most if its functions.
	 */
	thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ);

	if (pos == 0) {
		thread_init_primary(&handlers);

		/* initialize platform */
		platform_init();
	}

	thread_init_per_cpu();

	/* Initialize secure monitor */
	nsec_ctx = sm_get_nsec_ctx();
	nsec_ctx->mon_lr = nsec_entry;
	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;

	if (pos == 0) {
		unsigned long a, s;
		/* core malloc pool init */
#ifdef CFG_TEE_MALLOC_START
		a = CFG_TEE_MALLOC_START;
		s = CFG_TEE_MALLOC_SIZE;
#else
		a = (unsigned long)&teecore_heap_start;
		s = (unsigned long)&teecore_heap_end;
		a = ((a + 1) & ~0x0FFFF) + 0x10000;	/* 64kB aligned */
		s = s & ~0x0FFFF;	/* 64kB aligned */
		s = s - a;
#endif
		malloc_add_pool((void *)a, s);

		teecore_init_ta_ram();

		if (init_teecore() != TEE_SUCCESS) {
			panic();
		}
	}

	IMSG("optee initialize finished\n");
}
Exemplo n.º 8
0
static struct thread_core_local *get_core_local(void)
{
	uint32_t cpu_id = get_core_pos();

	/*
	 * IRQs must be disabled before playing with core_local since
	 * we otherwhise may be rescheduled to a different core in the
	 * middle of this function.
	 */
	assert(read_cpsr() & CPSR_I);

	assert(cpu_id < CFG_TEE_CORE_NB_CORE);
	return &thread_core_local[cpu_id];
}
Exemplo n.º 9
0
void plat_cpu_reset_late(void)
{
	static uint32_t cntfrq;
	vaddr_t addr;

	if (!get_core_pos()) {
		/* read cnt freq */
		cntfrq = read_cntfrq();

#if defined(CFG_BOOT_SECONDARY_REQUEST)
		/* set secondary entry address */
		write32(__compiler_bswap32(CFG_TEE_LOAD_ADDR),
				DCFG_BASE + DCFG_SCRATCHRW1);

		/* release secondary cores */
		write32(__compiler_bswap32(0x1 << 1), /* cpu1 */
				DCFG_BASE + DCFG_CCSR_BRR);
		dsb();
		sev();
#endif

		/* configure CSU */

		/* first grant all peripherals */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			write32(__compiler_bswap32(CSU_ACCESS_ALL), addr);

		/* restrict key preipherals from NS */
		write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
			CSU_BASE + CSU_CSL30);
		write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
			CSU_BASE + CSU_CSL37);

		/* lock the settings */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			write32(read32(addr) |
				__compiler_bswap32(CSU_SETTING_LOCK),
				addr);
	} else {
		/* program the cntfrq, the cntfrq is banked for each core */
		write_cntfrq(cntfrq);
	}
}
Exemplo n.º 10
0
static void main_fiq(void)
{
	uint32_t iar;

	DMSG("enter");

	iar = gic_read_iar();

	while (pl011_have_rx_data(CONSOLE_UART_BASE)) {
		DMSG("cpu %zu: got 0x%x",
		     get_core_pos(), pl011_getchar(CONSOLE_UART_BASE));
	}

	gic_write_eoir(iar);

	DMSG("return");
}
Exemplo n.º 11
0
void stm32mp_register_online_cpu(void)
{
	size_t pos = get_core_pos();
	uint32_t exceptions = lock_state_access();

	if (pos == 0) {
		assert(core_state[pos] == CORE_OFF);
	} else {
		if (core_state[pos] != CORE_AWAKE) {
			core_state[pos] = CORE_OFF;
			unlock_state_access(exceptions);
			stm32_pm_cpu_power_down_wfi();
			panic();
		}
	}

	core_state[pos] = CORE_ON;
	unlock_state_access(exceptions);
}
Exemplo n.º 12
0
bool core_mmu_find_table(vaddr_t va, unsigned max_level,
		struct core_mmu_table_info *tbl_info)
{
	uint64_t *tbl = l1_xlation_table[get_core_pos()];
	uintptr_t ntbl;
	unsigned level = 1;
	vaddr_t va_base = 0;
	unsigned num_entries = NUM_L1_ENTRIES;

	while (true) {
		unsigned level_size_shift =
			L1_XLAT_ADDRESS_SHIFT - (level - 1) *
						XLAT_TABLE_ENTRIES_SHIFT;
		unsigned n = (va - va_base) >> level_size_shift;

		if (n >= num_entries)
			return false;

		if (level == max_level || level == 3 ||
			(tbl[n] & TABLE_DESC) != TABLE_DESC) {
			/*
			 * We've either reached max_level, level 3, a block
			 * mapping entry or an "invalid" mapping entry.
			 */
			tbl_info->table = tbl;
			tbl_info->va_base = va_base;
			tbl_info->level = level;
			tbl_info->shift = level_size_shift;
			tbl_info->num_entries = num_entries;
			return true;
		}

		/* Copy bits 39:12 from tbl[n] to ntbl */
		ntbl = (tbl[n] & ((1ULL << 40) - 1)) & ~((1 << 12) - 1);

		tbl = (uint64_t *)ntbl;

		va_base += n << level_size_shift;
		level++;
		num_entries = XLAT_TABLE_ENTRIES;
	}
}
Exemplo n.º 13
0
/*
 * Copyright (c) 2014, STMicroelectronics International N.V.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 * this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <stdint.h>
#include <string.h>

#include <sm/sm.h>
#include <sm/sm_defs.h>
#include <sm/tee_mon.h>
#include <sm/teesmc.h>
#include <sm/teesmc_st.h>

#include <kernel/arch_debug.h>

#include <arm32.h>
#include <kernel/thread.h>
#include <kernel/panic.h>
#include <kernel/util.h>
#include <kernel/tee_core_trace.h>
#include <kernel/misc.h>
#include <mm/tee_pager_unpg.h>
#include <mm/core_mmu.h>
#include <tee/entry.h>

#include <assert.h>

#ifdef WITH_STACK_CANARIES
#define STACK_CANARY_SIZE	(4 * sizeof(uint32_t))
#define START_CANARY_VALUE	0xdededede
#define END_CANARY_VALUE	0xabababab
#define GET_START_CANARY(name, stack_num) name[stack_num][0]
#define GET_END_CANARY(name, stack_num) \
	name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
#else
#define STACK_CANARY_SIZE	0
#endif

#define STACK_ALIGNMENT		8

#define DECLARE_STACK(name, num_stacks, stack_size) \
	static uint32_t name[num_stacks][(stack_size + STACK_CANARY_SIZE) / \
					 sizeof(uint32_t)] \
		__attribute__((section(".bss.prebss.stack"), \
			       aligned(STACK_ALIGNMENT)))

#define GET_STACK(stack) \
	((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2)


DECLARE_STACK(stack_tmp,	CFG_TEE_CORE_NB_CORE,	STACK_TMP_SIZE);
DECLARE_STACK(stack_abt,	CFG_TEE_CORE_NB_CORE,	STACK_ABT_SIZE);
DECLARE_STACK(stack_sm,		CFG_TEE_CORE_NB_CORE,	SM_STACK_SIZE);
DECLARE_STACK(stack_thread,	NUM_THREADS,		STACK_THREAD_SIZE);

const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = {
	GET_STACK(stack_tmp[0]),
#if CFG_TEE_CORE_NB_CORE > 1
	GET_STACK(stack_tmp[1]),
#endif
#if CFG_TEE_CORE_NB_CORE > 2
	GET_STACK(stack_tmp[2]),
#endif
#if CFG_TEE_CORE_NB_CORE > 3
	GET_STACK(stack_tmp[3]),
#endif
#if CFG_TEE_CORE_NB_CORE > 4
#error "Top of tmp stacks aren't defined for more than 4 CPUS"
#endif
};

static void main_fiq(void);
static void main_tee_entry(struct thread_smc_args *args);
static uint32_t main_default_pm_handler(uint32_t a0, uint32_t a1);

static void init_canaries(void)
{
	size_t n;
#define INIT_CANARY(name)						\
	for (n = 0; n < ARRAY_SIZE(name); n++) {			\
		uint32_t *start_canary = &GET_START_CANARY(name, n);	\
		uint32_t *end_canary = &GET_END_CANARY(name, n);	\
									\
		*start_canary = START_CANARY_VALUE;			\
		*end_canary = END_CANARY_VALUE;				\
	}

	INIT_CANARY(stack_tmp);
	INIT_CANARY(stack_abt);
	INIT_CANARY(stack_sm);
	INIT_CANARY(stack_thread);
}

void check_canaries(void)
{
#ifdef WITH_STACK_CANARIES
	size_t n;

#define ASSERT_STACK_CANARIES(name)					\
	for (n = 0; n < ARRAY_SIZE(name); n++) {			\
		assert(GET_START_CANARY(name, n) == START_CANARY_VALUE);\
		assert(GET_END_CANARY(name, n) == END_CANARY_VALUE);	\
	} while (0)

	ASSERT_STACK_CANARIES(stack_tmp);
	ASSERT_STACK_CANARIES(stack_abt);
	ASSERT_STACK_CANARIES(stack_sm);
	ASSERT_STACK_CANARIES(stack_thread);
#endif /*WITH_STACK_CANARIES*/
}

static const struct thread_handlers handlers = {
	.std_smc = main_tee_entry,
	.fast_smc = main_tee_entry,
	.fiq = main_fiq,
	.svc = NULL, /* XXX currently using hardcod svc handler */
	.abort = tee_pager_abort_handler,
	.cpu_on = main_default_pm_handler,
	.cpu_off = main_default_pm_handler,
	.cpu_suspend = main_default_pm_handler,
	.cpu_resume = main_default_pm_handler,
	.system_off = main_default_pm_handler,
	.system_reset = main_default_pm_handler,
};

void main_init(uint32_t nsec_entry); /* called from assembly only */
void main_init(uint32_t nsec_entry)
{
	struct sm_nsec_ctx *nsec_ctx;
	size_t pos = get_core_pos();

	/*
	 * Mask IRQ and FIQ before switch to the thread vector as the
	 * thread handler requires IRQ and FIQ to be masked while executing
	 * with the temporary stack. The thread subsystem also asserts that
	 * IRQ is blocked when using most if its functions.
	 */
	write_cpsr(read_cpsr() | CPSR_F | CPSR_I);

	if (pos == 0) {
		size_t n;

		/* Initialize canries around the stacks */
		init_canaries();

		/* Assign the thread stacks */
		for (n = 0; n < NUM_THREADS; n++) {
			if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
				panic();
		}
	}

	if (!thread_init_stack(THREAD_TMP_STACK, GET_STACK(stack_tmp[pos])))
		panic();
	if (!thread_init_stack(THREAD_ABT_STACK, GET_STACK(stack_abt[pos])))
		panic();

	thread_init_handlers(&handlers);

	/* Initialize secure monitor */
	sm_init(GET_STACK(stack_sm[pos]));
	nsec_ctx = sm_get_nsec_ctx();
	nsec_ctx->mon_lr = nsec_entry;
	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
	sm_set_entry_vector(thread_vector_table);
}
Exemplo n.º 14
0
void plat_cpu_reset_late(void)
{
	uintptr_t addr;
	uint32_t val;

	if (get_core_pos() != 0)
		return;

	/*
	 * Configure imx7 CSU, first grant all peripherals
	 * TODO: fine tune the permissions
	 */
	for (addr = CSU_CSL_START; addr != CSU_CSL_END; addr += 4)
		write32(CSU_ACCESS_ALL, core_mmu_get_va(addr, MEM_AREA_IO_SEC));

	dsb();
	/* Protect OCRAM_S */
	write32(0x003300FF, core_mmu_get_va(CSU_CSL_59, MEM_AREA_IO_SEC));
	/* Proect TZASC */
	write32(0x00FF0033, core_mmu_get_va(CSU_CSL_28, MEM_AREA_IO_SEC));
	/*
	 * Proect CSU
	 * Note: Ater this settings, CSU seems still can be read,
	 * in non-secure world but can not be written.
	 */
	write32(0x00FF0033, core_mmu_get_va(CSU_CSL_15, MEM_AREA_IO_SEC));
	/*
	 * Protect SRC
	 * write32(0x003300FF, core_mmu_get_va(CSU_CSL_12, MEM_AREA_IO_SEC));
	 */
	dsb();

	/* lock the settings */
	for (addr = CSU_CSL_START; addr != CSU_CSL_END; addr += 4) {
		val = read32(core_mmu_get_va(addr, MEM_AREA_IO_SEC));
		write32(val | CSU_SETTING_LOCK,
			core_mmu_get_va(addr, MEM_AREA_IO_SEC));
	}
}
Exemplo n.º 15
0
void core_init_mmu_regs(void)
{
	uint32_t ttbcr = TTBCR_EAE;
	uint32_t mair;

	mair  = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
	write_mair0(mair);

	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
	ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;

	/* Disable the use of TTBR1 */
	ttbcr |= TTBCR_EPD1;

	/* TTBCR.A1 = 0 => ASID is stored in TTBR0 */

	write_ttbcr(ttbcr);
	write_ttbr0_64bit((paddr_t)l1_xlation_table[get_core_pos()]);
	write_ttbr1_64bit(0);
}
Exemplo n.º 16
0
void plat_cpu_reset_late(void)
{
	vaddr_t addr;

	if (!get_core_pos()) {
#if defined(CFG_BOOT_SECONDARY_REQUEST)
		/* set secondary entry address */
		io_write32(DCFG_BASE + DCFG_SCRATCHRW1,
			   __compiler_bswap32(TEE_LOAD_ADDR));

		/* release secondary cores */
		io_write32(DCFG_BASE + DCFG_CCSR_BRR /* cpu1 */,
			   __compiler_bswap32(0x1 << 1));
		dsb();
		sev();
#endif

		/* configure CSU */

		/* first grant all peripherals */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			io_write32(addr, __compiler_bswap32(CSU_ACCESS_ALL));

		/* restrict key preipherals from NS */
		io_write32(CSU_BASE + CSU_CSL30,
			   __compiler_bswap32(CSU_ACCESS_SEC_ONLY));
		io_write32(CSU_BASE + CSU_CSL37,
			   __compiler_bswap32(CSU_ACCESS_SEC_ONLY));

		/* lock the settings */
		for (addr = CSU_BASE + CSU_CSL_START;
		     addr != CSU_BASE + CSU_CSL_END;
		     addr += 4)
			io_setbits32(addr,
				     __compiler_bswap32(CSU_SETTING_LOCK));
	}
}
Exemplo n.º 17
0
/* Override default psci_cpu_off() with platform specific sequence */
int psci_cpu_off(void)
{
	unsigned int pos = get_core_pos();
	uint32_t exceptions = 0;

	if (pos == 0) {
		EMSG("PSCI_CPU_OFF not supported for core #0");
		return PSCI_RET_INTERNAL_FAILURE;
	}

	DMSG("core %u", pos);

	exceptions = lock_state_access();

	assert(core_state[pos] == CORE_ON);
	core_state[pos] = CORE_OFF;

	unlock_state_access(exceptions);

	thread_mask_exceptions(THREAD_EXCP_ALL);
	stm32_pm_cpu_power_down_wfi();
	panic();
}
Exemplo n.º 18
0
		/* Assign the thread stacks */
		for (n = 0; n < NUM_THREADS; n++) {
			if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
				panic();
		}
	}

	if (!thread_init_stack(THREAD_TMP_STACK, GET_STACK(stack_tmp[pos])))
		panic();
	if (!thread_init_stack(THREAD_ABT_STACK, GET_STACK(stack_abt[pos])))
		panic();

	thread_init_handlers(&handlers);

	main_init_sec_mon(pos, nsec_entry);

	if (is_primary) {
		main_init_gic();
		if (init_teecore() != TEE_SUCCESS)
			panic();
		DMSG("Primary CPU switching to normal world boot\n");
	} else {
		DMSG("Secondary CPU Switching to normal world boot\n");
	}
}

#if defined(WITH_ARM_TRUSTED_FW)
uint32_t *main_init(void); /* called from assembly only */
uint32_t *main_init(void)
{
	main_init_helper(true, get_core_pos(), NSEC_ENTRY_INVALID);
	return thread_vector_table;
}
#elif defined(WITH_SEC_MON)
void main_init(uint32_t nsec_entry); /* called from assembly only */
void main_init(uint32_t nsec_entry)
{
	size_t pos = get_core_pos();

	main_init_helper(pos == 0, pos, nsec_entry);
}
Exemplo n.º 19
0
		/* Assign the thread stacks */
		for (n = 0; n < NUM_THREADS; n++) {
			if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
				panic();
		}
	}

	if (!thread_init_stack(THREAD_TMP_STACK, GET_STACK(stack_tmp[pos])))
		panic();
	if (!thread_init_stack(THREAD_ABT_STACK, GET_STACK(stack_abt[pos])))
		panic();

	thread_init_handlers(&handlers);

	main_init_sec_mon(pos, nsec_entry);

	if (is_primary) {
		main_init_gic();
		if (init_teecore() != TEE_SUCCESS)
			panic();
		DMSG("Primary CPU switching to normal world boot\n");
	} else {
		DMSG("Secondary CPU Switching to normal world boot\n");
	}
}

#if defined(WITH_ARM_TRUSTED_FW)
uint32_t *main_init(void); /* called from assembly only */
uint32_t *main_init(void)
{
	main_init_helper(true, get_core_pos(), NSEC_ENTRY_INVALID);
	return thread_vector_table;
}
Exemplo n.º 20
0
Arquivo: sm.c Projeto: gxliu/optee_os
struct sm_sec_ctx *sm_get_sec_ctx(void)
{
	return &sm_sec_ctx[get_core_pos()];
}
Exemplo n.º 21
0
bool core_mmu_user_mapping_is_active(void)
{
	assert(user_va_idx != -1);
	return !!l1_xlation_table[get_core_pos()][user_va_idx];
}
Exemplo n.º 22
0
}

static __maybe_unused void print_detailed_abort(
				struct abort_info *ai __maybe_unused,
				const char *ctx __maybe_unused)
{
	EMSG_RAW("\n");
	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "%s\n",
		ctx, abort_type_to_str(ai->abort_type), ai->va,
		fault_to_str(ai->abort_type, ai->fault_descr));
#ifdef ARM32
	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
		 read_contextidr());
	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
		 get_core_pos(), ai->regs->spsr);
	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
#endif /*ARM32*/
#ifdef ARM64
	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
		 read_contextidr_el1());
Exemplo n.º 23
0
int imx7d_lowpower_idle(uint32_t power_state __unused,
			uintptr_t entry __unused,
			uint32_t context_id __unused,
			struct sm_nsec_ctx *nsec)
{
	struct imx7_pm_info *p;
	uint32_t cpuidle_ocram_base;
	static uint32_t gic_inited;
	int ret;

	uint32_t cpu_id __maybe_unused = get_core_pos();
	uint32_t type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
		PSCI_POWER_STATE_TYPE_SHIFT;
	uint32_t cpu = get_core_pos();

	cpuidle_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START +
					     LOWPOWER_IDLE_OCRAM_OFFSET,
					     MEM_AREA_TEE_COHERENT);
	p = (struct imx7_pm_info *)cpuidle_ocram_base;

	imx_pen_lock(cpu);

	if (!lowpoweridle_init) {
		imx7d_cpuidle_init();
		lowpoweridle_init = 1;
	}

	if (type != PSCI_POWER_STATE_TYPE_POWER_DOWN)
		panic();

	p->num_online_cpus = get_online_cpus();
	p->num_lpi_cpus++;

	sm_save_unbanked_regs(&nsec->ub_regs);

	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
				(cpuidle_ocram_base + sizeof(*p)));

	/*
	 * Sometimes cpu_suspend may not really suspended, we need to check
	 * it's return value to restore reg or not
	 */
	if (ret < 0) {
		p->num_lpi_cpus--;
		imx_pen_unlock(cpu);
		DMSG("=== Not suspended, GPC IRQ Pending === %d\n", cpu_id);
		return 0;
	}

	/*
	 * Restore register of different mode in secure world
	 * When cpu powers up, after ROM init, cpu in secure SVC
	 * mode, we first need to restore monitor regs.
	 */
	sm_restore_unbanked_regs(&nsec->ub_regs);

	p->num_lpi_cpus--;
	/* Back to Linux */
	nsec->mon_lr = (uint32_t)entry;

	if (gic_inited == 0) {
		/*
		 * TODO: Call the Wakeup Late function to restore some
		 * HW configuration (e.g. TZASC)
		 */
		plat_cpu_reset_late();

		main_init_gic();
		gic_inited = 1;
		DMSG("=== Back from Suspended ===\n");
	} else {
		main_secondary_init_gic();
		gic_inited = 0;
	}

	imx_pen_unlock(cpu);

	return 0;
}