void TaskSwitch() { TASK *current,*next; current = TaskCurrent(); next = TaskNextReady(); if (next) { if (current) { // 如果当前任务是主任务(即第一个任务main), // 则将其继续加入就绪列表,不要挂起 if (current->task_id == TASK_ID_START) { current->task_status = TASK_READY; TaskAddReady(current); } else { current->task_status = TASK_SUSPUD; TaskAddSuspud(current); TEE_Printf("in task switch...\n"); } TEE_MemMove(¤t->core_regs, &g_temp_regs, sizeof(CPU_CORE_REGS)); } next->task_status = TASK_RUNNING; SetCurrentTask(next); TaskDelReady(next); TEE_MemMove(&g_temp_regs, &next->core_regs, sizeof(CPU_CORE_REGS)); } }
TEE_Result TA_EXPORT TA_OpenSessionEntryPoint(uint32_t paramTypes, TEE_Param params[4], void **sessionContext) { TEE_Result tee_rv = TEE_SUCCESS; OT_LOG(LOG_INFO, "Calling the Open session entry point"); tee_rv = handle_params(paramTypes, params); if (tee_rv != TEE_SUCCESS) return tee_rv; if (storage_test(2)) return TEE_ERROR_GENERIC; if (crypto_test(2)) return TEE_ERROR_GENERIC; if (*sessionContext != NULL) { OT_LOG(LOG_ERR, "Session context should be NULL"); return TEE_ERROR_BAD_PARAMETERS; } *sessionContext = TEE_Malloc(SIZE_OF_VEC(out_vector), 0); if (*sessionContext == NULL) { OT_LOG(LOG_ERR, "Can not malloc space for session context"); return TEE_ERROR_OUT_OF_MEMORY; } TEE_MemMove(*sessionContext, out_vector, SIZE_OF_VEC(out_vector)); return tee_rv; }
static TEE_Result unpack_attrs(const uint8_t *buf, size_t blen, TEE_Attribute **attrs, uint32_t *attr_count) { TEE_Result res = TEE_SUCCESS; TEE_Attribute *a = NULL; size_t num_attrs = 0; const size_t num_attrs_size = sizeof(uint32_t); if (blen == 0) goto out; if (((uint32_t) buf & 0x3) != 0 || blen < num_attrs_size) return TEE_ERROR_BAD_PARAMETERS; num_attrs = *(uint32_t *) (void *)buf; if ((blen - num_attrs_size) < (num_attrs * sizeof(TEE_Attribute))) return TEE_ERROR_BAD_PARAMETERS; if (num_attrs > 0) { size_t n; a = TEE_Malloc(num_attrs * sizeof(TEE_Attribute), 0); if (a == NULL) return TEE_ERROR_OUT_OF_MEMORY; TEE_MemMove(a, buf + num_attrs_size, num_attrs * sizeof(TEE_Attribute)); for (n = 0; n < num_attrs; n++) { uintptr_t p; #define TEE_ATTR_BIT_VALUE (1 << 29) if ((a[n].attributeID & TEE_ATTR_BIT_VALUE) != 0) continue; /* Only memrefs need to be updated */ p = (uintptr_t) a[n].content.ref.buffer; if (p == 0) continue; if ((p + a[n].content.ref.length) > blen) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } p += (uintptr_t) buf; a[n].content.ref.buffer = (void *)p; } } res = TEE_SUCCESS; out: if (res == TEE_SUCCESS) { *attrs = a; *attr_count = num_attrs; } else { TEE_Free(a); } return res; }
static TEE_Result get_named_object_name(char *name_orig, uint32_t name_orig_size, char *name, uint32_t *name_size) { size_t pref_len = strlen(named_value_prefix); if (name_orig_size + pref_len > TEE_OBJECT_ID_MAX_LEN) return TEE_ERROR_BAD_PARAMETERS; /* Start with prefix */ TEE_MemMove(name, named_value_prefix, pref_len); /* Concatenate provided object name */ TEE_MemMove(name + pref_len, name_orig, name_orig_size); *name_size = name_orig_size + pref_len; return TEE_SUCCESS; }
static void fill_response_params(TEE_Param *params) { uint32_t i; /* Param 0 */ params[0].value.a = OUT_VALUE_A; params[0].value.b = OUT_VALUE_B; /* Param 1 & 2 */ for (i = 1; i < 3; i++) { TEE_MemMove(params[i].memref.buffer, out_vector, SIZE_OF_VEC(out_vector)); params[i].memref.size = SIZE_OF_VEC(out_vector); } /* Param 3 */ reverse_buffer(params[3].memref.buffer, params[3].memref.size, ¶ms[3].memref.size); }
static TEE_Result rpc_call_cryp(bool sec_mem, uint32_t nParamTypes, TEE_Param pParams[4], uint32_t cmd) { TEE_TASessionHandle cryp_session; TEE_Result res; uint32_t origin; TEE_Param params[4]; size_t n; uint32_t types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE); TEE_MemFill(params, 0, sizeof(TEE_Param) * 4); res = TEE_OpenTASession(&cryp_uuid, 0, types, params, &cryp_session, &origin); if (res != TEE_SUCCESS) { EMSG("rpc_sha256 - TEE_OpenTASession returned 0x%x\n", (unsigned int)res); return res; } types = nParamTypes; if (sec_mem) { TEE_MemFill(params, 0, sizeof(params)); for (n = 0; n < 4; n++) { switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: params[n].value = pParams[n].value; break; case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: params[n].memref.buffer = TEE_Malloc(pParams[n].memref.size, 0); if (!params[n].memref.buffer) TEE_Panic(0); params[n].memref.size = pParams[n].memref.size; if (TEE_PARAM_TYPE_GET(types, n) != TEE_PARAM_TYPE_MEMREF_OUTPUT) TEE_MemMove(params[n].memref.buffer, pParams[n].memref.buffer, pParams[n].memref.size); break; default: break; } } } else { TEE_MemMove(params, pParams, sizeof(params)); } res = TEE_InvokeTACommand(cryp_session, 0, cmd, types, params, &origin); if (res != TEE_SUCCESS) { EMSG("rpc_call_cryp - TEE_InvokeTACommand returned 0x%x\n", (unsigned int)res); } TEE_CloseTASession(cryp_session); if (sec_mem) { for (n = 0; n < 4; n++) { switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_VALUE_INOUT: case TEE_PARAM_TYPE_VALUE_OUTPUT: pParams[n].value = params[n].value; break; case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: if (TEE_PARAM_TYPE_GET(types, n) != TEE_PARAM_TYPE_MEMREF_INPUT) TEE_MemMove(pParams[n].memref.buffer, params[n].memref.buffer, params[n].memref.size); pParams[n].memref.size = params[n].memref.size; TEE_Free(params[n].memref.buffer); break; default: break; } } } return res; }
static TEE_Result cmd_inject(uint32_t types, TEE_Param params[TEE_NUM_PARAMS]) { TEE_Result rc; const int sec_idx = 1; /* highlight secure buffer index */ const int ns_idx = 0; /* highlight nonsecure buffer index */ if (types != TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT, TEE_PARAM_TYPE_MEMREF_OUTPUT, TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE)) { EMSG("bad parameters %x", (unsigned)types); return TEE_ERROR_BAD_PARAMETERS; } if (params[sec_idx].memref.size < params[ns_idx].memref.size) return TEE_ERROR_SHORT_BUFFER; /* * We could rely on the TEE to provide consistent buffer/size values * to reference a buffer with a unique and consistent secure attribute * value. Hence it is safe enough (and more optimal) to test only the * secure attribute of a single byte of it. Yet, since the current * test does not deal with performance, let check the secure attribute * of each byte of the buffer. */ rc = TEE_CheckMemoryAccessRights(TEE_MEMORY_ACCESS_ANY_OWNER | TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_NONSECURE, params[ns_idx].memref.buffer, params[ns_idx].memref.size); if (rc != TEE_SUCCESS) { EMSG("TEE_CheckMemoryAccessRights(nsec) failed %x", rc); return rc; } rc = TEE_CheckMemoryAccessRights(TEE_MEMORY_ACCESS_ANY_OWNER | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_SECURE, params[sec_idx].memref.buffer, params[sec_idx].memref.size); if (rc != TEE_SUCCESS) { EMSG("TEE_CheckMemoryAccessRights(secure) failed %x", rc); return rc; } #ifdef CFG_CACHE_API /* * we should invalidate cache (here we assume buffer were not * filled through cpu core caches. We flush buffers so that * cache is not corrupted in cache target buffer not aligned * on cache line size. */ rc = TEE_CacheFlush(params[sec_idx].memref.buffer, params[sec_idx].memref.size); if (rc != TEE_SUCCESS) { EMSG("TEE_CacheFlush(%p, %x) failed: 0x%x", params[sec_idx].memref.buffer, params[sec_idx].memref.size, rc); return rc; } #endif /* CFG_CACHE_API */ /* inject data */ TEE_MemMove(params[sec_idx].memref.buffer, params[ns_idx].memref.buffer, params[sec_idx].memref.size); #ifdef CFG_CACHE_API rc = TEE_CacheFlush(params[sec_idx].memref.buffer, params[sec_idx].memref.size); if (rc != TEE_SUCCESS) { EMSG("TEE_CacheFlush(%p, %x) failed: 0x%x", params[sec_idx].memref.buffer, params[sec_idx].memref.size, rc); return rc; } #endif /* CFG_CACHE_API */ return rc; }