Esempio n. 1
0
/*
 * pbuf_is_multipurpose - return true is buffer is inside unsafe DDR
 *
 * Unsafe DDR (or multipurpose DDR) is DDR that is under a firewalling
 * reconfigured at run-time: there is no static information that can
 * tell wether this RAM is tagged secured or not.
 */
static bool pbuf_is_multipurpose(unsigned long paddr, size_t size)
{
	if (core_is_buffer_intersect(paddr, size,
				     secure_only.paddr, secure_only.size))
		return false;
	if (core_is_buffer_intersect(paddr, size,
				     nsec_shared.paddr, nsec_shared.size))
		return false;

	return pbuf_is_ddr(paddr, size);
}
Esempio n. 2
0
/* return true only if buffer intersects TA private memory */
bool tee_mmu_is_vbuf_intersect_ta_private(const struct tee_ta_ctx *ctx,
					  const void *va, size_t size)
{
	return core_is_buffer_intersect(va, size,
	  ctx->mmu->ta_private_vmem_start,
	  ctx->mmu->ta_private_vmem_end - ctx->mmu->ta_private_vmem_start + 1);
}
static bool _pbuf_intersects(struct memaccess_area *a, size_t alen,
			     paddr_t pa, size_t size)
{
	size_t n;

	for (n = 0; n < alen; n++)
		if (core_is_buffer_intersect(pa, size, a[n].paddr, a[n].size))
			return true;
	return false;
}
Esempio n. 4
0
/*
 * This routine is called while MMU and core memory management are not init.
 */
struct map_area *bootcfg_get_memory(void)
{
	struct map_area *map;
	struct memaccess_area *a, *a2;
	struct map_area *ret = bootcfg_memory_map;

	/* check defined memory access layout */
	a = (struct memaccess_area *)&secure_only;
	a2 = (struct memaccess_area *)&nsec_shared;
	if (core_is_buffer_intersect(a->paddr, a->size, a2->paddr, a2->size)) {
		EMSG("invalid memory access configuration: sec/nsec");
		ret = NULL;
	}
	if (ret == NULL)
		return ret;

	/* check defined mapping (overlapping will be tested later) */
	map = bootcfg_memory_map;
	while (map->type != MEM_AREA_NOTYPE) {
		switch (map->type) {
		case MEM_AREA_TEE_RAM:
			a = (struct memaccess_area *)&secure_only;
			if (!core_is_buffer_inside(map->pa, map->size,
						a->paddr, a->size)) {
				EMSG("TEE_RAM does not fit in secure_only");
				ret = NULL;
			}
			break;
		case MEM_AREA_TA_RAM:
			a = (struct memaccess_area *)&secure_only;
			if (!core_is_buffer_inside(map->pa, map->size,
						a->paddr, a->size)) {
				EMSG("TEE_RAM does not fit in secure_only");
				ret = NULL;
			}
			break;
		case MEM_AREA_NSEC_SHM:
			a = (struct memaccess_area *)&nsec_shared;
			if (!core_is_buffer_inside(map->pa, map->size,
						a->paddr, a->size)) {
				EMSG("TEE_RAM does not fit in secure_only");
				ret = NULL;
			}
			break;
		default:
			/* other mapped areas are not checked */
			break;
		}
		map++;
	}

	return ret;
}
Esempio n. 5
0
static void insert_mmap(struct tee_mmap_region *mm, size_t max_elem,
		struct tee_mmap_region *mme)
{
	size_t n;

	for (n = 0; n < (max_elem - 1); n++) {
		if (!mm[n].size) {
			mm[n] = *mme;
			return;
		}

		if (core_is_buffer_intersect(mme->va, mme->size, mm[n].va,
					     mm[n].size)) {
			vaddr_t end_va;

			/* Check that the overlapping maps are compatible */
			if (mme->attr != mm[n].attr ||
			    (mme->pa - mme->va) != (mm[n].pa - mm[n].va)) {
				EMSG("Incompatible mmap regions");
				panic();
			}

			/* Grow the current map */
			end_va = MAX(mme->va + mme->size,
				     mm[n].va + mm[n].size);
			mm[n].va = MIN(mme->va, mm[n].va);
			mm[n].pa = MIN(mme->pa, mm[n].pa);
			mm[n].size = end_va - mm[n].va;
			return;
		}

		if (mme->va < mm[n].va) {
			memmove(mm + n + 1, mm + n,
				(max_elem - n - 1) * sizeof(*mm));
			mm[n] = *mme;
			/*
			 * Panics if the terminating element was
			 * overwritten.
			 */
			if (mm[max_elem - 1].size)
				break;
			return;
		}
	}
	EMSG("Too many mmap regions");
	panic();
}
Esempio n. 6
0
static TEE_Result tee_mmu_umap_add_param(struct tee_mmu_info *mmu, paddr_t pa,
			size_t size, uint32_t attr)
{
	struct tee_mmap_region *last_entry = NULL;
	size_t n;
	paddr_t npa;
	size_t nsz;

	/* Check that we can map memory using this attribute */
	if (!core_mmu_mattr_is_ok(attr))
		return TEE_ERROR_BAD_PARAMETERS;

	/* Find empty entry */
	for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++)
		if (!mmu->table[n].size)
			break;

	if (n == TEE_MMU_UMAP_MAX_ENTRIES) {
		/* No entries left "can't happen" */
		return TEE_ERROR_EXCESS_DATA;
	}

	tee_mmu_umap_set_pa(mmu->table + n, CORE_MMU_USER_PARAM_SIZE,
			    pa, size, attr);

	/* Try to coalesce some entries */
	while (true) {
		/* Find last param */
		n = TEE_MMU_UMAP_MAX_ENTRIES - 1;

		while (!mmu->table[n].size) {
			n--;
			if (n < TEE_MMU_UMAP_PARAM_IDX) {
				/* No param entries found, "can't happen" */
				return TEE_ERROR_BAD_STATE;
			}
		}

		if (last_entry == mmu->table + n)
			return TEE_SUCCESS; /* Can't coalesc more */
		last_entry = mmu->table + n;

		n--;
		while (n >= TEE_MMU_UMAP_PARAM_IDX) {
			struct tee_mmap_region *entry = mmu->table + n;

			n--;
			if (last_entry->attr != entry->attr) {
				if (core_is_buffer_intersect(last_entry->pa,
							     last_entry->size,
							     entry->pa,
							     entry->size))
					return TEE_ERROR_ACCESS_CONFLICT;
				continue;
			}

			if ((last_entry->pa + last_entry->size) == entry->pa ||
			    (entry->pa + entry->size) == last_entry->pa ||
			    core_is_buffer_intersect(last_entry->pa,
						     last_entry->size,
						     entry->pa, entry->size)) {
				npa = MIN(last_entry->pa, entry->pa);
				nsz = MAX(last_entry->pa + last_entry->size,
					  entry->pa + entry->size) - npa;
				entry->pa = npa;
				entry->size = nsz;
				last_entry->pa = 0;
				last_entry->size = 0;
				last_entry->attr = 0;
				break;
			}
		}
	}
}