static unsigned long mmap_desc(unsigned attr, unsigned long addr, unsigned level) { unsigned long desc = addr; desc |= level == 3 ? TABLE_DESC : BLOCK_DESC; desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0; desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); desc |= LOWER_ATTRS(ACCESS_FLAG); if (attr & MT_MEMORY) { desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH); if (attr & MT_RW) desc |= UPPER_ATTRS(XN); } else { desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); desc |= UPPER_ATTRS(XN); } debug_print(attr & MT_MEMORY ? "MEM" : "DEV"); debug_print(attr & MT_RW ? "-RW" : "-RO"); debug_print(attr & MT_NS ? "-NS" : "-S"); return desc; }
static uint32_t desc_to_mattr(uint64_t desc) { uint32_t a; if (!(desc & 1)) { if (desc & HIDDEN_DESC) return TEE_MATTR_HIDDEN_BLOCK; if (desc & PHYSPAGE_DESC) return TEE_MATTR_PHYS_BLOCK; return 0; } a = TEE_MATTR_VALID_BLOCK; if (desc & LOWER_ATTRS(ACCESS_FLAG)) a |= TEE_MATTR_PRX | TEE_MATTR_URX; if (!(desc & LOWER_ATTRS(AP_RO))) a |= TEE_MATTR_PW | TEE_MATTR_UW; if (!(desc & LOWER_ATTRS(AP_UNPRIV))) a &= ~TEE_MATTR_URWX; if (desc & UPPER_ATTRS(XN)) a &= ~(TEE_MATTR_PX | TEE_MATTR_UX); if (desc & UPPER_ATTRS(PXN)) a &= ~TEE_MATTR_PX; COMPILE_TIME_ASSERT(ATTR_DEVICE_INDEX == TEE_MATTR_CACHE_NONCACHE); COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX == TEE_MATTR_CACHE_CACHED); a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) << TEE_MATTR_CACHE_SHIFT; if (!(desc & LOWER_ATTRS(NON_GLOBAL))) a |= TEE_MATTR_GLOBAL; if (!(desc & LOWER_ATTRS(NS))) a |= TEE_MATTR_SECURE; return a; }
static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa, unsigned level) { unsigned long desc = addr_pa; int mem_type; desc |= level == 3 ? TABLE_DESC : BLOCK_DESC; desc |= attr & MT_NS ? LOWER_ATTRS(NS) : 0; desc |= attr & MT_RW ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); desc |= LOWER_ATTRS(ACCESS_FLAG); mem_type = MT_TYPE(attr); if (mem_type == MT_MEMORY) { desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH); if (attr & MT_RW) desc |= UPPER_ATTRS(XN); } else if (mem_type == MT_NON_CACHEABLE) { desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH); if (attr & MT_RW) desc |= UPPER_ATTRS(XN); } else { assert(mem_type == MT_DEVICE); desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); desc |= UPPER_ATTRS(XN); } debug_print((mem_type == MT_MEMORY) ? "MEM" : ((mem_type == MT_NON_CACHEABLE) ? "NC" : "DEV")); debug_print(attr & MT_RW ? "-RW" : "-RO"); debug_print(attr & MT_NS ? "-NS" : "-S"); return desc; }
static uint64_t mattr_to_desc(unsigned level, uint32_t attr) { uint64_t desc; uint32_t a = attr; if (a & TEE_MATTR_HIDDEN_BLOCK) return INVALID_DESC | HIDDEN_DESC; if (a & TEE_MATTR_PHYS_BLOCK) return INVALID_DESC | PHYSPAGE_DESC; if (!(a & TEE_MATTR_VALID_BLOCK)) return 0; if (a & (TEE_MATTR_PX | TEE_MATTR_PW)) a |= TEE_MATTR_PR; if (a & (TEE_MATTR_UX | TEE_MATTR_UW)) a |= TEE_MATTR_UR; if (a & TEE_MATTR_UR) a |= TEE_MATTR_PR; if (a & TEE_MATTR_UW) a |= TEE_MATTR_PW; desc = level == 3 ? TABLE_DESC : BLOCK_DESC; if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX))) desc |= UPPER_ATTRS(XN); if (!(a & TEE_MATTR_PX)) desc |= UPPER_ATTRS(PXN); if (a & TEE_MATTR_UR) desc |= LOWER_ATTRS(AP_UNPRIV); if (!(a & TEE_MATTR_PW)) desc |= LOWER_ATTRS(AP_RO); /* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */ switch ((a >> TEE_MATTR_CACHE_SHIFT) & TEE_MATTR_CACHE_MASK) { case TEE_MATTR_CACHE_NONCACHE: desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); break; case TEE_MATTR_CACHE_CACHED: desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH); break; default: /* * "Can't happen" the attribute is supposed to be checked * with core_mmu_mattr_is_ok() before. */ panic(); } if (a & (TEE_MATTR_UR | TEE_MATTR_PR)) desc |= LOWER_ATTRS(ACCESS_FLAG); if (!(a & TEE_MATTR_GLOBAL)) desc |= LOWER_ATTRS(NON_GLOBAL); desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS); return desc; }
/* * Returns a block/page table descriptor for the given level and attributes. */ uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr, unsigned long long addr_pa, int level) { uint64_t desc; int mem_type; /* Make sure that the granularity is fine enough to map this address. */ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0); desc = addr_pa; /* * There are different translation table descriptors for level 3 and the * rest. */ desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC; /* * Always set the access flag, as TF doesn't manage access flag faults. * Deduce other fields of the descriptor based on the MT_NS and MT_RW * memory region attributes. */ desc |= LOWER_ATTRS(ACCESS_FLAG); desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0; desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); /* * Do not allow unprivileged access when the mapping is for a privileged * EL. For translation regimes that do not have mappings for access for * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED. */ if (ctx->xlat_regime == EL1_EL0_REGIME) { if (attr & MT_USER) { /* EL0 mapping requested, so we give User access */ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED); } else { /* EL1 mapping requested, no User access granted */ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); } } else { assert(ctx->xlat_regime == EL3_REGIME); desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); } /* * Deduce shareability domain and executability of the memory region * from the memory type of the attributes (MT_TYPE). * * Data accesses to device memory and non-cacheable normal memory are * coherent for all observers in the system, and correspondingly are * always treated as being Outer Shareable. Therefore, for these 2 types * of memory, it is not strictly needed to set the shareability field * in the translation tables. */ mem_type = MT_TYPE(attr); if (mem_type == MT_DEVICE) { desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH); /* * Always map device memory as execute-never. * This is to avoid the possibility of a speculative instruction * fetch, which could be an issue if this memory region * corresponds to a read-sensitive peripheral. */ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); } else { /* Normal memory */ /* * Always map read-write normal memory as execute-never. * (Trusted Firmware doesn't self-modify its code, therefore * R/W memory is reserved for data storage, which must not be * executable.) * Note that setting the XN bit here is for consistency only. * The function that enables the MMU sets the SCTLR_ELx.WXN bit, * which makes any writable memory region to be treated as * execute-never, regardless of the value of the XN bit in the * translation table. * * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER * attribute to figure out the value of the XN bit. The actual * XN bit(s) to set in the descriptor depends on the context's * translation regime and the policy applied in * xlat_arch_regime_get_xn_desc(). */ if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) { desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); } if (mem_type == MT_MEMORY) { desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH); } else { assert(mem_type == MT_NON_CACHEABLE); desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH); } } return desc; }