Example #1
0
void condvar_wait(struct condvar *cv, struct mutex *m)
{
	uint32_t old_itr_status;
	struct wait_queue_elem wqe;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);

	/* Link this condvar to this mutex until reinitialized */
	cpu_spin_lock(&cv->spin_lock);
	TEE_ASSERT(!cv->m || cv->m == m);
	cv->m = m;
	cpu_spin_unlock(&cv->spin_lock);

	cpu_spin_lock(&m->spin_lock);

	/* Add to mutex wait queue as a condvar waiter */
	wq_wait_init_condvar(&m->wq, &wqe, cv);

	/* Unlock the mutex */
	TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED);
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);

	thread_unmask_exceptions(old_itr_status);

	/* Wake eventual waiters */
	wq_wake_one(&m->wq);

	wq_wait_final(&m->wq, &wqe);

	mutex_lock(m);
}
Example #2
0
static bool arm_va2pa_helper(void *va, paddr_t *pa)
{
	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
	paddr_t par;
	paddr_t par_pa_mask;
	bool ret = false;

#ifdef ARM32
	write_ats1cpr((vaddr_t)va);
	isb();
#ifdef CFG_WITH_LPAE
	par = read_par64();
	par_pa_mask = PAR64_PA_MASK;
#else
	par = read_par32();
	par_pa_mask = PAR32_PA_MASK;
#endif
#endif /*ARM32*/

#ifdef ARM64
	write_at_s1e1r((vaddr_t)va);
	isb();
	par = read_par_el1();
	par_pa_mask = PAR_PA_MASK;
#endif
	if (par & PAR_F)
		goto out;
	*pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
		((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1));

	ret = true;
out:
	thread_unmask_exceptions(exceptions);
	return ret;
}
Example #3
0
/* teecore heap address/size is defined in scatter file */
extern unsigned char teecore_heap_start;
extern unsigned char teecore_heap_end;

static void main_fiq(void);
static void main_tee_entry_std(struct thread_smc_args *args);
static void main_tee_entry_fast(struct thread_smc_args *args);

static const struct thread_handlers handlers = {
	.std_smc = main_tee_entry_std,
	.fast_smc = main_tee_entry_fast,
	.fiq = main_fiq,
	.svc = tee_svc_handler,
	.abort = tee_pager_abort_handler,
	.cpu_on = pm_panic,
	.cpu_off = pm_panic,
	.cpu_suspend = pm_panic,
	.cpu_resume = pm_panic,
	.system_off = pm_panic,
	.system_reset = pm_panic,
};

void main_init(uint32_t nsec_entry); /* called from assembly only */
void main_init(uint32_t nsec_entry)
{
	struct sm_nsec_ctx *nsec_ctx;
	size_t pos = get_core_pos();

	/*
	 * Mask IRQ and FIQ before switch to the thread vector as the
	 * thread handler requires IRQ and FIQ to be masked while executing
	 * with the temporary stack. The thread subsystem also asserts that
	 * IRQ is blocked when using most if its functions.
	 */
	thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ);

	if (pos == 0) {
		thread_init_primary(&handlers);

		/* initialize platform */
		platform_init();
	}

	thread_init_per_cpu();

	/* Initialize secure monitor */
	nsec_ctx = sm_get_nsec_ctx();
	nsec_ctx->mon_lr = nsec_entry;
	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;

	if (pos == 0) {
		unsigned long a, s;
		/* core malloc pool init */
#ifdef CFG_TEE_MALLOC_START
		a = CFG_TEE_MALLOC_START;
		s = CFG_TEE_MALLOC_SIZE;
#else
		a = (unsigned long)&teecore_heap_start;
		s = (unsigned long)&teecore_heap_end;
		a = ((a + 1) & ~0x0FFFF) + 0x10000;	/* 64kB aligned */
		s = s & ~0x0FFFF;	/* 64kB aligned */
		s = s - a;
#endif
		malloc_add_pool((void *)a, s);

		teecore_init_ta_ram();

		if (init_teecore() != TEE_SUCCESS) {
			panic();
		}
	}

	IMSG("optee initialize finished\n");
}
Example #4
0
static void cv_signal(struct condvar *cv, bool only_one)
{
	uint32_t old_itr_status;
	struct mutex *m;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
	cpu_spin_lock(&cv->spin_lock);
	m = cv->m;
	cpu_spin_unlock(&cv->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	if (m)
		wq_promote_condvar(&m->wq, cv, only_one);

}
Example #5
0
void mutex_unlock(struct mutex *m)
{
	uint32_t old_itr_status;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
	cpu_spin_lock(&m->spin_lock);

	TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED);
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	wq_wake_one(&m->wq);
}
Example #6
0
bool mutex_trylock(struct mutex *m)
{
	uint32_t old_itr_status;
	enum mutex_value old_value;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
	cpu_spin_lock(&m->spin_lock);

	old_value = m->value;
	if (old_value == MUTEX_VALUE_UNLOCKED) {
		m->value = MUTEX_VALUE_LOCKED;
		thread_add_mutex(m);
	}

	cpu_spin_unlock(&m->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	return old_value == MUTEX_VALUE_UNLOCKED;
}
Example #7
0
void mutex_lock(struct mutex *m)
{
	while (true) {
		uint32_t old_itr_status;
		enum mutex_value old_value;
		struct wait_queue_elem wqe;

		/*
		 * If the mutex is locked we need to initialize the wqe
		 * before releasing the spinlock to guarantee that we don't
		 * miss the wakeup from mutex_unlock().
		 *
		 * If the mutex is unlocked we don't need to use the wqe at
		 * all.
		 */

		old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
		cpu_spin_lock(&m->spin_lock);

		old_value = m->value;
		if (old_value == MUTEX_VALUE_LOCKED) {
			wq_wait_init(&m->wq, &wqe);
		} else {
			m->value = MUTEX_VALUE_LOCKED;
			thread_add_mutex(m);
		}

		cpu_spin_unlock(&m->spin_lock);
		thread_unmask_exceptions(old_itr_status);

		if (old_value == MUTEX_VALUE_LOCKED) {
			/*
			 * Someone else is holding the lock, wait in normal
			 * world for the lock to become available.
			 */
			wq_wait_final(&m->wq, &wqe);
		} else
			return;
	}
}
Example #8
0
/* Override default psci_cpu_off() with platform specific sequence */
int psci_cpu_off(void)
{
	unsigned int pos = get_core_pos();
	uint32_t exceptions = 0;

	if (pos == 0) {
		EMSG("PSCI_CPU_OFF not supported for core #0");
		return PSCI_RET_INTERNAL_FAILURE;
	}

	DMSG("core %u", pos);

	exceptions = lock_state_access();

	assert(core_state[pos] == CORE_ON);
	core_state[pos] = CORE_OFF;

	unlock_state_access(exceptions);

	thread_mask_exceptions(THREAD_EXCP_ALL);
	stm32_pm_cpu_power_down_wfi();
	panic();
}
Example #9
0
unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len)
{
	unsigned int ret = TEE_SUCCESS;
	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);

	tee_l2cc_mutex_lock();
	switch (op) {
	case L2CACHE_INVALIDATE:
		arm_cl2_invbyway(pl310_base());
		break;
	case L2CACHE_AREA_INVALIDATE:
		if (len)
			arm_cl2_invbypa(pl310_base(), pa, pa + len - 1);
		break;
	case L2CACHE_CLEAN:
		arm_cl2_cleanbyway(pl310_base());
		break;
	case L2CACHE_AREA_CLEAN:
		if (len)
			arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1);
		break;
	case L2CACHE_CLEAN_INV:
		arm_cl2_cleaninvbyway(pl310_base());
		break;
	case L2CACHE_AREA_CLEAN_INV:
		if (len)
			arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1);
		break;
	default:
		ret = TEE_ERROR_NOT_IMPLEMENTED;
	}

	tee_l2cc_mutex_unlock();
	thread_set_exceptions(exceptions);
	return ret;
}
Example #10
0
 * 2. Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <kernel/panic.h>
#include <kernel/thread.h>
#include <trace.h>

void __panic(const char *file __maybe_unused, int line __maybe_unused,
		const char *func __maybe_unused)
{
	uint32_t __unused exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);

	EMSG_RAW("PANIC: %s %s:%d\n", func, file, line);
	while (1)
		;
}
Example #11
0
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	wq_wake_one(&m->wq, m, fname, lineno);
}

static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
			int lineno __unused)
{
	uint32_t old_itr_status;
	enum mutex_value old_value;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
	cpu_spin_lock(&m->spin_lock);

	old_value = m->value;
	if (old_value == MUTEX_VALUE_UNLOCKED) {
		m->value = MUTEX_VALUE_LOCKED;
		thread_add_mutex(m);
	}

	cpu_spin_unlock(&m->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	return old_value == MUTEX_VALUE_UNLOCKED;
}

#ifdef CFG_MUTEX_DEBUG