int core_tlb_maintenance(int op, unsigned int a) { /* * We're doing TLB invalidation because we've changed mapping. * The dsb() makes sure that written data is visible. */ dsb(); switch (op) { case TLBINV_UNIFIEDTLB: secure_mmu_unifiedtlbinvall(); break; case TLBINV_CURRENT_ASID: secure_mmu_unifiedtlbinv_curasid(); break; case TLBINV_BY_ASID: secure_mmu_unifiedtlbinv_byasid(a); break; case TLBINV_BY_MVA: EMSG("TLB_INV_SECURE_MVA is not yet supported!"); while (1) ; secure_mmu_unifiedtlbinvbymva(a); break; default: return 1; } return 0; }
/* * tee_mmu_final - finalise and free ctx mmu */ void tee_mmu_final(struct tee_ta_ctx *ctx) { uint32_t asid = 1 << ((ctx->context - 1) & 0xff); /* return ASID */ g_asid |= asid; /* clear MMU entries to avoid clash when asid is reused */ secure_mmu_unifiedtlbinv_byasid(ctx->context & 0xff); ctx->context = 0; if (ctx->mmu != NULL) { free(ctx->mmu->table); free(ctx->mmu); } ctx->mmu = NULL; }
int core_tlb_maintenance(int op, unsigned int a) { switch (op) { case TLBINV_UNIFIEDTLB: secure_mmu_unifiedtlbinvall(); break; case TLBINV_CURRENT_ASID: secure_mmu_unifiedtlbinv_curasid(); break; case TLBINV_BY_ASID: secure_mmu_unifiedtlbinv_byasid(a); break; case TLBINV_BY_MVA: EMSG("TLB_INV_SECURE_MVA is not yet supported!"); while (1) ; secure_mmu_unifiedtlbinvbymva(a); break; default: return 1; } return 0; }