/* * tee_uta_cache_operation - dynamic cache clean/inval request from a TA * It follows ARM recommendation: * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html * Note that this implementation assumes dsb operations are part of * cache_maintenance_l1(), and L2 cache sync are part of * cache_maintenance_l2() */ static TEE_Result cache_operation(struct tee_ta_session *sess, enum utee_cache_operation op, void *va, size_t len) { TEE_Result ret; paddr_t pa = 0; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0) return TEE_ERROR_NOT_SUPPORTED; /* * TAs are allowed to operate cache maintenance on TA memref parameters * only, not on the TA private memory. */ if (tee_mmu_is_vbuf_intersect_ta_private(utc, va, len)) return TEE_ERROR_ACCESS_DENIED; ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)va, len); if (ret != TEE_SUCCESS) return TEE_ERROR_ACCESS_DENIED; pa = virt_to_phys(va); if (!pa) return TEE_ERROR_ACCESS_DENIED; switch (op) { case TEE_CACHEFLUSH: /* Clean L1, Flush L2, Flush L1 */ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len); if (ret != TEE_SUCCESS) return ret; ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len); case TEE_CACHECLEAN: /* Clean L1, Clean L2 */ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len); case TEE_CACHEINVALIDATE: /* Inval L2, Inval L1 */ ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len); default: return TEE_ERROR_NOT_SUPPORTED; } }
/* * tee_uta_cache_operation - dynamic cache clean/inval request from a TA * It follows ARM recommendation: * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html * Note that this implementation assumes dsb operations are part of * cache_maintenance_l1(), and L2 cache sync are part of * cache_maintenance_l2() */ static TEE_Result cache_operation(struct tee_ta_session *sess, enum utee_cache_operation op, void *va, size_t len) { TEE_Result ret; paddr_t pa = 0; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0) return TEE_ERROR_NOT_SUPPORTED; ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_WRITE, (tee_uaddr_t)va, len); if (ret != TEE_SUCCESS) return TEE_ERROR_ACCESS_DENIED; pa = virt_to_phys(va); if (!pa) return TEE_ERROR_ACCESS_DENIED; switch (op) { case TEE_CACHEFLUSH: /* Clean L1, Flush L2, Flush L1 */ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len); if (ret != TEE_SUCCESS) return ret; ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len); case TEE_CACHECLEAN: /* Clean L1, Clean L2 */ ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len); case TEE_CACHEINVALIDATE: /* Inval L2, Inval L1 */ ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len); if (ret != TEE_SUCCESS) return ret; return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len); default: return TEE_ERROR_NOT_SUPPORTED; } }
static int platform_smp_boot(size_t core_idx, uint32_t entry) { uint32_t val; vaddr_t va = src_base(); if ((core_idx == 0) || (core_idx >= CFG_TEE_CORE_NB_CORE)) return OPTEE_SMC_RETURN_EBADCMD; /* set secondary cores' NS entry addresses */ ns_entry_addrs[core_idx] = entry; cache_maintenance_l1(DCACHE_AREA_CLEAN, &ns_entry_addrs[core_idx], sizeof(uint32_t)); cache_maintenance_l2(L2CACHE_AREA_CLEAN, (paddr_t)&ns_entry_addrs[core_idx], sizeof(uint32_t)); /* boot secondary cores from OP-TEE load address */ write32((uint32_t)CFG_TEE_LOAD_ADDR, va + SRC_GPR1 + core_idx * 8); /* release secondary core */ val = read32(va + SRC_SCR); val |= BIT32(SRC_SCR_CORE1_ENABLE_OFFSET + (core_idx - 1)); val |= BIT32(SRC_SCR_CORE1_RST_OFFSET + (core_idx - 1)); write32(val, va + SRC_SCR); return OPTEE_SMC_RETURN_OK; }