/* * test attributes of target physical buffer * * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). * */ bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) { struct tee_mmap_region *map; /* Empty buffers complies with anything */ if (len == 0) return true; switch (attr) { case CORE_MEM_SEC: return pbuf_is_inside(secure_only, pbuf, len); case CORE_MEM_NON_SEC: return pbuf_is_inside(nsec_shared, pbuf, len); case CORE_MEM_TEE_RAM: return pbuf_inside_map_area(pbuf, len, map_tee_ram); case CORE_MEM_TA_RAM: return pbuf_inside_map_area(pbuf, len, map_ta_ram); case CORE_MEM_NSEC_SHM: return pbuf_inside_map_area(pbuf, len, map_nsec_shm); case CORE_MEM_MULTPURPOSE: return pbuf_is_multipurpose(pbuf, len); case CORE_MEM_EXTRAM: return pbuf_is_inside(ddr, pbuf, len); case CORE_MEM_CACHED: map = find_map_by_pa(pbuf); if (map == NULL || !pbuf_inside_map_area(pbuf, len, map)) return false; return map->attr >> TEE_MATTR_CACHE_SHIFT == TEE_MATTR_CACHE_CACHED; default: return false; } }
/* * test attributes of target physical buffer * * Flags: pbuf_is(SECURE, NOT_SECURE, RAM, IOMEM, KEYVAULT). * */ bool core_pbuf_is(uint32_t attr, tee_paddr_t pbuf, size_t len) { struct map_area *map; /* Empty buffers complies with anything */ if (len == 0) return true; switch (attr) { case CORE_MEM_SEC: return ((platform_pbuf_is_t)bootcfg_pbuf_is)(attr, pbuf, len); case CORE_MEM_NON_SEC: return ((platform_pbuf_is_t)bootcfg_pbuf_is)(attr, pbuf, len); case CORE_MEM_TEE_RAM: return pbuf_inside_map_area(pbuf, len, map_tee_ram); case CORE_MEM_TA_RAM: return pbuf_inside_map_area(pbuf, len, map_ta_ram); case CORE_MEM_NSEC_SHM: return pbuf_inside_map_area(pbuf, len, map_nsec_shm); case CORE_MEM_MULTPURPOSE: return ((platform_pbuf_is_t)bootcfg_pbuf_is)(attr, pbuf, len); case CORE_MEM_EXTRAM: return ((platform_pbuf_is_t)bootcfg_pbuf_is)(attr, pbuf, len); case CORE_MEM_CACHED: map = find_map_by_pa(pbuf); if (map == NULL || !pbuf_inside_map_area(pbuf, len, map)) return false; return map->cached; default: return false; } }
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) { struct core_mmu_table_info tbl_info; struct tee_mmap_region *map; size_t n; size_t granule; paddr_t p; size_t l; if (!len) return true; /* Check if the memory is already mapped */ map = find_map_by_type_and_pa(type, addr); if (map && pbuf_inside_map_area(addr, len, map)) return true; /* Find the reserved va space used for late mappings */ map = find_map_by_type(MEM_AREA_RES_VASPACE); if (!map) return false; if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info)) return false; granule = 1 << tbl_info.shift; p = ROUNDDOWN(addr, granule); l = ROUNDUP(len + addr - p, granule); /* * Something is wrong, we can't fit the va range into the selected * table. The reserved va range is possibly missaligned with * granule. */ if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries) return false; /* Find end of the memory map */ n = 0; while (static_memory_map[n].type != MEM_AREA_NOTYPE) n++; if (n < (ARRAY_SIZE(static_memory_map) - 1)) { /* There's room for another entry */ static_memory_map[n].va = map->va; static_memory_map[n].size = l; static_memory_map[n + 1].type = MEM_AREA_NOTYPE; map->va += l; map->size -= l; map = static_memory_map + n; } else { /* * There isn't room for another entry, steal the reserved * entry as it's not useful for anything else any longer. */ map->size = l; } map->type = type; map->region_size = granule; map->attr = core_mmu_type_to_attr(type); map->pa = p; set_region(&tbl_info, map); return true; }