static void *phys_to_virt_ta_vaspace(paddr_t pa) { TEE_Result res; void *va = NULL; if (!core_mmu_user_mapping_is_active()) return NULL; res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()), pa, &va); if (res != TEE_SUCCESS) return NULL; return va; }
TEE_Result tee_mmu_map_param(struct user_ta_ctx *utc, struct tee_ta_param *param) { TEE_Result res = TEE_SUCCESS; size_t n; /* Clear all the param entries as they can hold old information */ memset(utc->mmu->table + TEE_MMU_UMAP_PARAM_IDX, 0, (TEE_MMU_UMAP_MAX_ENTRIES - TEE_MMU_UMAP_PARAM_IDX) * sizeof(struct tee_mmap_region)); for (n = 0; n < 4; n++) { uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); TEE_Param *p = ¶m->params[n]; uint32_t attr = TEE_MMU_UDATA_ATTR; if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && param_type != TEE_PARAM_TYPE_MEMREF_INOUT) continue; if(param_type == TEE_PARAM_TYPE_MEMREF_INPUT) attr = TEE_MMU_UDATA_RATTR; if (p->memref.size == 0) continue; if (tee_pbuf_is_non_sec(p->memref.buffer, p->memref.size)) attr &= ~TEE_MATTR_SECURE; if (param->param_attr[n] == OPTEE_SMC_SHM_CACHED) attr |= TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT; else attr |= TEE_MATTR_CACHE_NONCACHE << TEE_MATTR_CACHE_SHIFT; res = tee_mmu_umap_add_param(utc->mmu, (paddr_t)p->memref.buffer, p->memref.size, attr); if (res != TEE_SUCCESS) return res; } res = tee_mmu_umap_set_vas(utc->mmu); if (res != TEE_SUCCESS) return res; for (n = 0; n < 4; n++) { uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); TEE_Param *p = ¶m->params[n]; if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && param_type != TEE_PARAM_TYPE_MEMREF_INOUT) continue; if (p->memref.size == 0) continue; res = tee_mmu_user_pa2va_helper(utc, (paddr_t)p->memref.buffer, &p->memref.buffer); if (res != TEE_SUCCESS) return res; } utc->mmu->ta_private_vmem_start = utc->mmu->table[0].va; n = TEE_MMU_UMAP_MAX_ENTRIES; do { n--; } while (n && !utc->mmu->table[n].size); utc->mmu->ta_private_vmem_end = utc->mmu->table[n].va + utc->mmu->table[n].size; return check_pgt_avail(utc->mmu->ta_private_vmem_start, utc->mmu->ta_private_vmem_end); }