Beispiel #1
0
void condvar_wait(struct condvar *cv, struct mutex *m)
{
	uint32_t old_itr_status;
	struct wait_queue_elem wqe;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);

	/* Link this condvar to this mutex until reinitialized */
	cpu_spin_lock(&cv->spin_lock);
	TEE_ASSERT(!cv->m || cv->m == m);
	cv->m = m;
	cpu_spin_unlock(&cv->spin_lock);

	cpu_spin_lock(&m->spin_lock);

	/* Add to mutex wait queue as a condvar waiter */
	wq_wait_init_condvar(&m->wq, &wqe, cv);

	/* Unlock the mutex */
	TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED);
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);

	thread_unmask_exceptions(old_itr_status);

	/* Wake eventual waiters */
	wq_wake_one(&m->wq);

	wq_wait_final(&m->wq, &wqe);

	mutex_lock(m);
}
Beispiel #2
0
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
	TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);
	TEE_ASSERT(ctx->mmu && ctx->mmu->table &&
		   ctx->mmu->size >= TEE_MMU_UMAP_CODE_IDX);

	return ctx->mmu->table[TEE_MMU_UMAP_CODE_IDX].va;
}
Beispiel #3
0
void mutex_destroy(struct mutex *m)
{
	/*
	 * Caller guarantees that no one will try to take the mutex so
	 * there's no need to take the spinlock before accessing it.
	 */
	TEE_ASSERT(m->value == MUTEX_VALUE_UNLOCKED);
	TEE_ASSERT(wq_is_empty(&m->wq));
}
Beispiel #4
0
/*
 * core_init_mmu_map - init tee core default memory mapping
 *
 * this routine sets the static default tee core mapping.
 *
 * If an error happend: core_init_mmu_map is expected to reset.
 */
void core_init_mmu_map(void)
{
	struct tee_mmap_region mm[MAX_MMAP_REGIONS + 1];
	struct map_area *map, *in;

	/* get memory bootcfg from system */
	in = bootcfg_get_memory();
	if (!in) {
		EMSG("Invalid memory map");
		TEE_ASSERT(0);
	}
	bootcfg_pbuf_is = (unsigned long)bootcfg_get_pbuf_is_handler();
	if (bootcfg_pbuf_is == 0) {
		EMSG("invalid platform handler for pbuf_is");
		TEE_ASSERT(0);
	}

	/* we must find at least a PUB_RAM area and a TEE_RAM area */
	map_tee_ram = NULL;
	map_ta_ram = NULL;
	map_nsec_shm = NULL;

	/* map what needs to be mapped (non-null size and non INTRAM/EXTRAM) */
	map = in;
	while (map->type != MEM_AREA_NOTYPE) {
		if (map->va)
			panic();

		map->va = map->pa;	/* 1-to-1 pa = va mapping */
		if (map->type == MEM_AREA_TEE_RAM)
			map_tee_ram = map;
		else if (map->type == MEM_AREA_TA_RAM)
			map_ta_ram = map;
		else if (map->type == MEM_AREA_NSEC_SHM)
			map_nsec_shm = map;

		map++;
	}

	if ((map_tee_ram == NULL) || (map_ta_ram == NULL) ||
	    (map_nsec_shm == NULL)) {
		EMSG("mapping area missing");
		TEE_ASSERT(0);
	}

	static_memory_map = in;

	core_mmu_mmap_init(mm, ARRAY_SIZE(mm), in);

	core_init_mmu_tables(mm);
}
Beispiel #5
0
void apdu_release(struct apdu_base *apdu)
{
	TEE_ASSERT(apdu != NULL);
	apdu->refcnt--;
	if (apdu->refcnt == 0)
		free(apdu);
}
Beispiel #6
0
paddr_t core_mmu_get_ul1_ttb_pa(void)
{
	/* Note that this depends on flat mapping of TEE Core */
	paddr_t pa = (paddr_t)core_mmu_get_ul1_ttb_va();

	TEE_ASSERT(!(pa & ~TEE_MMU_TTB_UL1_MASK));
	return pa;
}
Beispiel #7
0
void tee_se_reader_unlock_basic_channel(struct tee_se_reader_proxy *proxy)
{
	TEE_ASSERT(proxy != NULL);

	mutex_lock(&proxy->mutex);
	proxy->basic_channel_locked = false;
	mutex_unlock(&proxy->mutex);
}
Beispiel #8
0
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
	const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);

	TEE_ASSERT(utc->mmu && utc->mmu->table &&
		   utc->mmu->size == TEE_MMU_UMAP_MAX_ENTRIES);

	return utc->mmu->table[1].va;
}
Beispiel #9
0
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
	const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);

	TEE_ASSERT(utc->mmu && utc->mmu->table &&
		   utc->mmu->size >= TEE_MMU_UMAP_CODE_IDX);

	return utc->mmu->table[TEE_MMU_UMAP_CODE_IDX].va;
}
Beispiel #10
0
TEE_Result tee_se_reader_get_atr(struct tee_se_reader_proxy *proxy,
		uint8_t **atr, size_t *atr_len)
{
	TEE_Result ret;
	struct tee_se_reader *r;

	TEE_ASSERT(proxy != NULL && atr != NULL && atr_len != NULL);
	ret = tee_se_reader_check_state(proxy);
	if (ret != TEE_SUCCESS)
		return ret;

	mutex_lock(&proxy->mutex);
	r = proxy->reader;

	TEE_ASSERT(r->ops->get_atr);
	ret = r->ops->get_atr(r, atr, atr_len);

	mutex_unlock(&proxy->mutex);
	return ret;
}
Beispiel #11
0
void teecore_init_ta_ram(void)
{
	vaddr_t s;
	vaddr_t e;

	/* get virtual addr/size of RAM where TA are loaded/executedNSec
	 * shared mem allcated from teecore */
	core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);

	TEE_ASSERT((s & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
	TEE_ASSERT((e & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
	/* extra check: we could rely on  core_mmu_get_mem_by_type() */
	TEE_ASSERT(tee_vbuf_is_sec(s, e - s) == true);

	TEE_ASSERT(tee_mm_is_empty(&tee_mm_sec_ddr));

	/* remove previous config and init TA ddr memory pool */
	tee_mm_final(&tee_mm_sec_ddr);
	tee_mm_init(&tee_mm_sec_ddr, s, e, CORE_MMU_USER_CODE_SHIFT,
		    TEE_MM_POOL_NO_FLAGS);
}
Beispiel #12
0
void teecore_init_pub_ram(void)
{
	vaddr_t s;
	vaddr_t e;
	unsigned int nsec_tee_size = 32 * 1024;

	/* get virtual addr/size of NSec shared mem allcated from teecore */
	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);

	TEE_ASSERT(s < e);
	TEE_ASSERT((s & SMALL_PAGE_MASK) == 0);
	TEE_ASSERT((e & SMALL_PAGE_MASK) == 0);
	/* extra check: we could rely on  core_mmu_get_mem_by_type() */
	TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true);

	/*
	 * 32kByte first bytes are allocated from teecore.
	 * Remaining is under control of the NSec allocator.
	 */
	TEE_ASSERT((e - s) > nsec_tee_size);

	TEE_ASSERT(tee_mm_is_empty(&tee_mm_pub_ddr));
	tee_mm_final(&tee_mm_pub_ddr);
	tee_mm_init(&tee_mm_pub_ddr, s, s + nsec_tee_size, SMALL_PAGE_SHIFT,
		    TEE_MM_POOL_NO_FLAGS);

	s += nsec_tee_size;
	default_nsec_shm_paddr = s;
	default_nsec_shm_size = e - s;
}
Beispiel #13
0
TEE_Result tee_se_reader_get_name(struct tee_se_reader_proxy *proxy,
		char **reader_name, size_t *reader_name_len)
{
	size_t name_len;

	TEE_ASSERT(proxy != NULL && proxy->reader != NULL);

	name_len = strlen(proxy->reader->name);
	*reader_name = proxy->reader->name;
	*reader_name_len = name_len;

	return TEE_SUCCESS;
}
Beispiel #14
0
TEE_Result tee_se_reader_transmit(struct tee_se_reader_proxy *proxy,
		uint8_t *tx_buf, size_t tx_buf_len,
		uint8_t *rx_buf, size_t *rx_buf_len)
{
	struct tee_se_reader *r;
	TEE_Result ret;

	TEE_ASSERT(proxy != NULL && proxy->reader != NULL);
	ret = tee_se_reader_check_state(proxy);
	if (ret != TEE_SUCCESS)
		return ret;

	mutex_lock(&proxy->mutex);
	r = proxy->reader;

	TEE_ASSERT(r->ops->transmit);
	ret = r->ops->transmit(r, tx_buf, tx_buf_len, rx_buf, rx_buf_len);

	mutex_unlock(&proxy->mutex);

	return ret;
}
Beispiel #15
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t user_va_base;
	size_t user_va_size;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	core_mmu_get_user_va_range(&user_va_base, &user_va_size);
	if (v >= user_va_base && v < (user_va_base + user_va_size)) {
		if (!core_mmu_user_mapping_is_active()) {
			TEE_ASSERT(pa == 0);
			return;
		}

		res = tee_mmu_user_va2pa_helper(
			to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
		if (res == TEE_SUCCESS)
			TEE_ASSERT(pa == p);
		else
			TEE_ASSERT(pa == 0);
		return;
	}
#ifdef CFG_WITH_PAGER
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe, rw areas
		 * when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			TEE_ASSERT(pa == p);
		} else
			TEE_ASSERT(pa == 0);
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p))
		TEE_ASSERT(pa == p);
	else
		TEE_ASSERT(pa == 0);
}
Beispiel #16
0
TEE_Result iso7816_exchange_apdu(struct tee_se_reader_proxy *proxy,
		struct cmd_apdu *cmd, struct resp_apdu *resp)
{
	TEE_Result ret;

	TEE_ASSERT(cmd != NULL && resp != NULL);
	ret = tee_se_reader_transmit(proxy,
			cmd->base.data_buf, cmd->base.length,
			resp->base.data_buf, &resp->base.length);

	if (ret == TEE_SUCCESS)
		parse_resp_apdu(resp);

	return ret;
}
Beispiel #17
0
void teecore_init_pub_ram(void)
{
	vaddr_t s;
	vaddr_t e;

	/* get virtual addr/size of NSec shared mem allcated from teecore */
	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);

	TEE_ASSERT(s < e);
	TEE_ASSERT((s & SMALL_PAGE_MASK) == 0);
	TEE_ASSERT((e & SMALL_PAGE_MASK) == 0);
	/* extra check: we could rely on  core_mmu_get_mem_by_type() */
	TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true);

#ifdef CFG_PL310
	/* Allocate statically the l2cc mutex */
	TEE_ASSERT((e - s) > 0);
	tee_l2cc_store_mutex_boot_pa(s);
	s += sizeof(uint32_t);		/* size of a pl310 mutex */
#endif

	default_nsec_shm_paddr = virt_to_phys((void *)s);
	default_nsec_shm_size = e - s;
}
Beispiel #18
0
void tee_se_reader_detach(struct tee_se_reader_proxy *proxy)
{
	TEE_ASSERT(proxy->refcnt > 0);

	mutex_lock(&proxy->mutex);
	proxy->refcnt--;
	if (proxy->refcnt == 0) {
		struct tee_se_reader *r = proxy->reader;

		if (r->ops->close)
			r->ops->close(r);
	}
	mutex_unlock(&proxy->mutex);

}
Beispiel #19
0
TEE_Result tee_se_reader_open_session(struct tee_se_reader_proxy *proxy,
		struct tee_se_session **session)
{
	TEE_Result ret;
	struct tee_se_session *s;

	TEE_ASSERT(session != NULL && *session == NULL);
	TEE_ASSERT(proxy != NULL && proxy->reader != NULL);

	s = tee_se_session_alloc(proxy);
	if (!s)
		return TEE_ERROR_OUT_OF_MEMORY;

	ret = tee_se_reader_attach(proxy);
	if (ret != TEE_SUCCESS)
		goto err_free_session;

	*session = s;

	return TEE_SUCCESS;
err_free_session:
	tee_se_session_free(s);
	return ret;
}
Beispiel #20
0
void mutex_unlock(struct mutex *m)
{
	uint32_t old_itr_status;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
	cpu_spin_lock(&m->spin_lock);

	TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED);
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);
	thread_unmask_exceptions(old_itr_status);

	wq_wake_one(&m->wq);
}
Beispiel #21
0
static TEE_Result internal_manage_channel(struct tee_se_session *s,
		bool open_ops, int *channel_id)
{
	struct cmd_apdu *cmd;
	struct resp_apdu *resp;
	TEE_Result ret;
	size_t tx_buf_len = 0, rx_buf_len = 1;

	uint8_t open_flag = (open_ops) ? OPEN_CHANNEL : CLOSE_CHANNEL;
	uint8_t channel_flag =
		(open_flag == OPEN_CHANNEL) ? OPEN_NEXT_AVAILABLE : *channel_id;

	TEE_ASSERT(s != NULL);

	cmd = alloc_cmd_apdu(ISO7816_CLA, MANAGE_CHANNEL_CMD, open_flag,
			channel_flag, tx_buf_len, rx_buf_len, NULL);

	resp = alloc_resp_apdu(rx_buf_len);

	ret = tee_se_session_transmit(s, cmd, resp);
	if (ret != TEE_SUCCESS) {
		EMSG("exchange apdu failed: %d", ret);
		return ret;
	}

	if (resp->sw1 == CMD_OK_SW1 && resp->sw2 == CMD_OK_SW2) {
		if (open_ops)
			*channel_id = resp->base.data_buf[0];
		ret = TEE_SUCCESS;
	} else {
		EMSG("operation failed, sw1:%02X, sw2:%02X",
				resp->sw1, resp->sw2);
		ret = TEE_ERROR_NOT_SUPPORTED;
	}

	apdu_release(to_apdu_base(cmd));
	apdu_release(to_apdu_base(resp));

	return ret;
}
Beispiel #22
0
void apdu_acquire(struct apdu_base *apdu)
{
	TEE_ASSERT(apdu != NULL);
	apdu->refcnt++;
}
Beispiel #23
0
/*
 * tee_mmu_map - alloc and fill mmu mapping table for a user TA (uTA).
 *
 * param - Contains the physical addr of the input buffers
 *         Returns logical addresses
 *
 * Allocate a table to store the N first section entries of the MMU L1 table
 * used to map the target user TA, and clear table to 0.
 * Load mapping for the TA stack_heap area, code area and params area (params
 * are the 4 GP TEE TA invoke parameters buffer).
 */
TEE_Result tee_mmu_map(struct tee_ta_ctx *ctx, struct tee_ta_param *param)
{
	TEE_Result res = TEE_SUCCESS;
	paddr_t pa = 0;
	uintptr_t smem;
	size_t n;

	TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);

	res = tee_mmu_umap_init(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	/*
	 * Map stack
	 */
	smem = tee_mm_get_smem(ctx->mm_stack);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_HEAP_STACK_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm_stack),
			    TEE_MMU_UDATA_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);

	/*
	 * Map code
	 */
	smem = tee_mm_get_smem(ctx->mm);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_CODE_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm),
			    TEE_MMU_UCODE_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);


	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];
		uint32_t attr = TEE_MMU_UDATA_ATTR;

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		if (tee_pbuf_is_non_sec(p->memref.buffer, p->memref.size))
			attr &= ~TEE_MATTR_SECURE;

		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_THR)
			attr |= TEE_MATTR_I_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
			attr |= TEE_MATTR_I_WRITE_BACK;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_THR)
			attr |= TEE_MATTR_O_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
			attr |= TEE_MATTR_O_WRITE_BACK;


		res = tee_mmu_umap_add_param(ctx->mmu,
				(paddr_t)p->memref.buffer, p->memref.size,
				attr);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	res = tee_mmu_umap_set_vas(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		res = tee_mmu_user_pa2va(ctx, (paddr_t)p->memref.buffer,
					 &p->memref.buffer);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	ctx->mmu->ta_private_vmem_start = ctx->mmu->table[0].va;

	n = TEE_MMU_UMAP_MAX_ENTRIES;
	do {
		n--;
	} while (n && !ctx->mmu->table[n].size);
	ctx->mmu->ta_private_vmem_end = ctx->mmu->table[n].va +
					ctx->mmu->table[n].size;

exit:
	if (res != TEE_SUCCESS)
		tee_mmu_umap_clear(ctx->mmu);

	return res;
}
Beispiel #24
0
static void init_runtime(uint32_t pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;
	size_t block_size;

	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
	TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * Zero BSS area. Note that globals that would normally would go
	 * into BSS which are used before this has to be put into .nozi.*
	 * to avoid getting overwritten.
	 */
	memset(__bss_start, 0, __bss_end - __bss_start);

	thread_init_boot_thread();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	EMSG("hash_size %zu", hash_size);
	TEE_ASSERT(hashes);
	memcpy(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	TEE_ASSERT(mm);
	paged_store = (uint8_t *)tee_mm_get_smem(mm);
	/* Copy init part into pageable area */
	memcpy(paged_store, __init_start, init_size);
	/* Copy pageable part after init part into pageable area */
	memcpy(paged_store + init_size, (void *)pageable_part,
		__pageable_part_end - __pageable_part_start);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
				n, page, res);
			panic();
		}
	}

	/*
	 * Copy what's not initialized in the last init page. Needed
	 * because we're not going fault in the init pages again. We can't
	 * fault in pages until we've switched to the new vector by calling
	 * thread_init_handlers() below.
	 */
	if (init_size % SMALL_PAGE_SIZE) {
		uint8_t *p;

		memcpy(__init_start + init_size, paged_store + init_size,
			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));

		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
				~SMALL_PAGE_MASK);

		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
				     SMALL_PAGE_SIZE);
	}

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	block_size = get_block_size();
	if (!tee_mm_init(&tee_mm_vcore,
			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
				block_size),
			SMALL_PAGE_SHIFT, 0))
		panic();

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	TEE_ASSERT(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged, note that there migth be
	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
	 * claimed to avoid later allocations to get that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
	TEE_ASSERT(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	TEE_ASSERT(mm);
	if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
				paged_store, hashes))
		panic();
	tee_pager_add_pages((vaddr_t)__pageable_start,
		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start +
				ROUNDUP(init_size, SMALL_PAGE_SIZE),
			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
				SMALL_PAGE_SIZE, true);

}
Beispiel #25
0
size_t apdu_get_length(struct apdu_base *apdu)
{
	TEE_ASSERT(apdu != NULL);
	return apdu->length;
}
Beispiel #26
0
int apdu_get_refcnt(struct apdu_base *apdu)
{
	TEE_ASSERT(apdu != NULL);
	return apdu->refcnt;
}
Beispiel #27
0
bool tee_se_reader_is_basic_channel_locked(struct tee_se_reader_proxy *proxy)
{
	TEE_ASSERT(proxy != NULL);
	return proxy->basic_channel_locked;
}
Beispiel #28
0
void condvar_destroy(struct condvar *cv)
{
	if (cv->m)
		TEE_ASSERT(!wq_have_condvar(&cv->m->wq, cv));
	condvar_init(cv);
}
Beispiel #29
0
int tee_se_reader_get_refcnt(struct tee_se_reader_proxy *proxy)
{
	TEE_ASSERT(proxy != NULL && proxy->reader != NULL);
	return proxy->refcnt;
}
Beispiel #30
0
void tee_se_reader_get_properties(struct tee_se_reader_proxy *proxy,
		TEE_SEReaderProperties *prop)
{
	TEE_ASSERT(proxy != NULL && proxy->reader != NULL);
	*prop = proxy->reader->prop;
}