Пример #1
0
int thread_state_suspend(uint32_t flags, uint32_t cpsr, uint32_t pc)
{
	struct thread_core_local *l = get_core_local();
	int ct = l->curr_thread;

	assert(ct != -1);

	check_canaries();

	lock_global();

	assert(threads[ct].state == THREAD_STATE_ACTIVE);
	threads[ct].flags |= flags;
	threads[ct].regs.cpsr = cpsr;
	threads[ct].regs.pc = pc;
	threads[ct].state = THREAD_STATE_SUSPENDED;

	threads[ct].have_user_map = !tee_mmu_is_kernel_mapping();
	if (threads[ct].have_user_map) {
		tee_mmu_get_map(&threads[ct].user_map);
		tee_mmu_set_map(NULL);
	}


	l->curr_thread = -1;

	unlock_global();

	return ct;
}
Пример #2
0
static void thread_alloc_and_run(struct thread_smc_args *args)
{
	size_t n;
	struct thread_core_local *l = get_core_local();
	bool found_thread = false;

	assert(l->curr_thread == -1);

	lock_global();

	if (!have_one_active_thread() && !have_one_preempted_thread()) {
		for (n = 0; n < NUM_THREADS; n++) {
			if (threads[n].state == THREAD_STATE_FREE) {
				threads[n].state = THREAD_STATE_ACTIVE;
				found_thread = true;
				break;
			}
		}
	}

	unlock_global();

	if (!found_thread) {
		args->a0 = TEESMC_RETURN_EBUSY;
		args->a1 = 0;
		args->a2 = 0;
		args->a3 = 0;
		return;
	}

	l->curr_thread = n;

	threads[n].regs.pc = (uint32_t)thread_stdcall_entry;
	/* Stdcalls starts in SVC mode with masked IRQ and unmasked FIQ */
	threads[n].regs.cpsr = CPSR_MODE_SVC | CPSR_I;
	threads[n].flags = 0;
	/* Enable thumb mode if it's a thumb instruction */
	if (threads[n].regs.pc & 1)
		threads[n].regs.cpsr |= CPSR_T;
	/* Reinitialize stack pointer */
	threads[n].regs.svc_sp = threads[n].stack_va_end;

	/*
	 * Copy arguments into context. This will make the
	 * arguments appear in r0-r7 when thread is started.
	 */
	threads[n].regs.r0 = args->a0;
	threads[n].regs.r1 = args->a1;
	threads[n].regs.r2 = args->a2;
	threads[n].regs.r3 = args->a3;
	threads[n].regs.r4 = args->a4;
	threads[n].regs.r5 = args->a5;
	threads[n].regs.r6 = args->a6;
	threads[n].regs.r7 = args->a7;

	/* Save Hypervisor Client ID */
	threads[n].hyp_clnt_id = args->a7;

	thread_resume(&threads[n].regs);
}
Пример #3
0
static void thread_resume_from_rpc(struct thread_smc_args *args)
{
	size_t n = args->a3; /* thread id */
	struct thread_core_local *l = get_core_local();
	uint32_t rv = 0;

	assert(l->curr_thread == -1);

	lock_global();

	if (have_one_active_thread()) {
		rv = TEESMC_RETURN_EBUSY;
	} else if (n < NUM_THREADS &&
		threads[n].state == THREAD_STATE_SUSPENDED &&
		args->a7 == threads[n].hyp_clnt_id) {
		threads[n].state = THREAD_STATE_ACTIVE;
	} else {
		rv = TEESMC_RETURN_ERESUME;
	}

	unlock_global();

	if (rv) {
		args->a0 = rv;
		args->a1 = 0;
		args->a2 = 0;
		args->a3 = 0;
		return;
	}

	l->curr_thread = n;

	if (threads[n].have_user_map)
		tee_mmu_set_map(&threads[n].user_map);

	/*
	 * Return from RPC to request service of an IRQ must not
	 * get parameters from non-secure world.
	 */
	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
		/*
		 * Update returned values from RPC, values will appear in
		 * r0-r3 when thread is resumed.
		 */
		threads[n].regs.r0 = args->a0;
		threads[n].regs.r1 = args->a1;
		threads[n].regs.r2 = args->a2;
		threads[n].regs.r3 = args->a3;
		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
	}

	thread_resume(&threads[n].regs);
}
Пример #4
0
void thread_state_free(void)
{
	struct thread_core_local *l = get_core_local();

	assert(l->curr_thread != -1);

	lock_global();

	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
	threads[l->curr_thread].state = THREAD_STATE_FREE;
	threads[l->curr_thread].flags = 0;
	l->curr_thread = -1;

	unlock_global();
}