/* * This routine is called while MMU and core memory management are not init. */ struct map_area *bootcfg_get_memory(void) { struct map_area *map; struct memaccess_area *a, *a2; struct map_area *ret = bootcfg_memory_map; /* check defined memory access layout */ a = (struct memaccess_area *)&secure_only; a2 = (struct memaccess_area *)&nsec_shared; if (core_is_buffer_intersect(a->paddr, a->size, a2->paddr, a2->size)) { EMSG("invalid memory access configuration: sec/nsec"); ret = NULL; } if (ret == NULL) return ret; /* check defined mapping (overlapping will be tested later) */ map = bootcfg_memory_map; while (map->type != MEM_AREA_NOTYPE) { switch (map->type) { case MEM_AREA_TEE_RAM: a = (struct memaccess_area *)&secure_only; if (!core_is_buffer_inside(map->pa, map->size, a->paddr, a->size)) { EMSG("TEE_RAM does not fit in secure_only"); ret = NULL; } break; case MEM_AREA_TA_RAM: a = (struct memaccess_area *)&secure_only; if (!core_is_buffer_inside(map->pa, map->size, a->paddr, a->size)) { EMSG("TEE_RAM does not fit in secure_only"); ret = NULL; } break; case MEM_AREA_NSEC_SHM: a = (struct memaccess_area *)&nsec_shared; if (!core_is_buffer_inside(map->pa, map->size, a->paddr, a->size)) { EMSG("TEE_RAM does not fit in secure_only"); ret = NULL; } break; default: /* other mapped areas are not checked */ break; } map++; } return ret; }
/* return true only if buffer fits inside TA private memory */ bool tee_mmu_is_vbuf_inside_ta_private(const struct tee_ta_ctx *ctx, const void *va, size_t size) { return core_is_buffer_inside(va, size, ctx->mmu->ta_private_vmem_start, ctx->mmu->ta_private_vmem_end - ctx->mmu->ta_private_vmem_start + 1); }
/* return true only if buffer fits inside TA private memory */ bool tee_mmu_is_vbuf_inside_ta_private(const struct user_ta_ctx *utc, const void *va, size_t size) { return core_is_buffer_inside(va, size, utc->mmu->ta_private_vmem_start, utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start + 1); }
static void carve_out_asan_mem(tee_mm_pool_t *pool) { const size_t s = pool->hi - pool->lo; tee_mm_entry_t *mm; paddr_t apa = ASAN_MAP_PA; size_t asz = ASAN_MAP_SZ; if (core_is_buffer_outside(apa, asz, pool->lo, s)) return; /* Reserve the shadow area */ if (!core_is_buffer_inside(apa, asz, pool->lo, s)) { if (apa < pool->lo) { /* * ASAN buffer is overlapping with the beginning of * the pool. */ asz -= pool->lo - apa; apa = pool->lo; } else { /* * ASAN buffer is overlapping with the end of the * pool. */ asz = pool->hi - apa; } } mm = tee_mm_alloc2(pool, apa, asz); assert(mm); }
static TEE_Result copy_to(struct elf_load_state *state, void *dst, size_t dst_size, size_t dst_offs, size_t offs, size_t len) { TEE_Result res; res = advance_to(state, offs); if (res != TEE_SUCCESS) return res; if (!len) return TEE_SUCCESS; if (len > dst_size || (len + dst_offs) > dst_size) return TEE_ERROR_SECURITY; if (!core_is_buffer_inside(state->nwdata + offs, len, state->nwdata, state->nwdata_len)) return TEE_ERROR_SECURITY; memcpy((uint8_t *)dst + dst_offs, state->nwdata + offs, len); res = crypto_ops.hash.update(state->hash_ctx, state->hash_algo, (uint8_t *)dst + dst_offs, len); if (res != TEE_SUCCESS) return res; state->next_offs = offs + len; return res; }
static bool _pbuf_is_inside(struct memaccess_area *a, size_t alen, paddr_t pa, size_t size) { size_t n; for (n = 0; n < alen; n++) if (core_is_buffer_inside(pa, size, a[n].paddr, a[n].size)) return true; return false; }
/* pbuf_is_ddr - return true is buffer is inside the DDR */ static bool pbuf_is_ddr(unsigned long paddr, size_t size) { int i = sizeof(ddr) / sizeof(*ddr); while (i--) { if (core_is_buffer_inside(paddr, size, ddr[i].paddr, ddr[i].size)) return true; } return false; }
/* * Wrapper for the platform specific pbuf_is() service. */ static bool pbuf_is(enum buf_is_attr attr, unsigned long paddr, size_t size) { switch (attr) { case CORE_MEM_SEC: return core_is_buffer_inside(paddr, size, secure_only.paddr, secure_only.size); case CORE_MEM_NON_SEC: return core_is_buffer_inside(paddr, size, nsec_shared.paddr, nsec_shared.size); case CORE_MEM_MULTPURPOSE: return pbuf_is_multipurpose(paddr, size); case CORE_MEM_EXTRAM: return pbuf_is_ddr(paddr, size); default: EMSG("unpexted request: attr=%X", attr); return false; } }
static bool pgt_entry_matches(struct pgt *p, void *ctx, vaddr_t begin, vaddr_t last) { if (!p) return false; if (p->ctx != ctx) return false; if (last <= begin) return false; if (!core_is_buffer_inside(p->vabase, SMALL_PAGE_SIZE, begin, last - begin)) return false; return true; }
TEE_Result tee_mmu_user_pa2va_helper(const struct tee_ta_ctx *ctx, paddr_t pa, void **va) { size_t n; if (!ctx->mmu->table) return TEE_ERROR_ACCESS_DENIED; for (n = 0; n < ctx->mmu->size; n++) { if (core_is_buffer_inside(pa, 1, ctx->mmu->table[n].pa, ctx->mmu->table[n].size)) { *va = (void *)((paddr_t)pa - ctx->mmu->table[n].pa + ctx->mmu->table[n].va); return TEE_SUCCESS; } } return TEE_ERROR_ACCESS_DENIED; }
static TEE_Result tee_mmu_user_va2pa_attr(const struct tee_ta_ctx *ctx, void *ua, paddr_t *pa, uint32_t *attr) { size_t n; if (!ctx->mmu->table) return TEE_ERROR_ACCESS_DENIED; for (n = 0; n < ctx->mmu->size; n++) { if (core_is_buffer_inside(ua, 1, ctx->mmu->table[n].va, ctx->mmu->table[n].size)) { *pa = (paddr_t)ua - ctx->mmu->table[n].va + ctx->mmu->table[n].pa; if (attr) *attr = ctx->mmu->table[n].attr; return TEE_SUCCESS; } } return TEE_ERROR_ACCESS_DENIED; }
/* check if target buffer fits in a core default map area */ static bool pbuf_inside_map_area(unsigned long p, size_t l, struct tee_mmap_region *map) { return core_is_buffer_inside(p, l, map->pa, map->size); }