/* * tee_time_get_ree_time(): this function implements the GP Internal API * function TEE_GetREETime() * Goal is to get the time of the Rich Execution Environment * This is why this time is provided through the supplicant */ TEE_Result tee_time_get_ree_time(TEE_Time *time) { struct tee_ta_session *sess = NULL; TEE_Result res = TEE_ERROR_BAD_PARAMETERS; struct teesmc32_param params; paddr_t phpayload = 0; paddr_t cookie = 0; TEE_Time *payload; tee_ta_get_current_session(&sess); tee_ta_set_current_session(NULL); if (!time) goto exit; thread_optee_rpc_alloc_payload(sizeof(TEE_Time), &phpayload, &cookie); if (!phpayload) goto exit; if (!TEE_ALIGNMENT_IS_OK(phpayload, TEE_Time)) goto exit; if (core_pa2va(phpayload, &payload)) goto exit; memset(¶ms, 0, sizeof(params)); params.attr = TEESMC_ATTR_TYPE_MEMREF_OUTPUT | (TEESMC_ATTR_CACHE_I_WRITE_THR | TEESMC_ATTR_CACHE_O_WRITE_THR) << TEESMC_ATTR_CACHE_SHIFT; params.u.memref.buf_ptr = phpayload; params.u.memref.size = sizeof(TEE_Time); res = thread_rpc_cmd(TEE_RPC_GET_TIME, 1, ¶ms); if (res != TEE_SUCCESS) goto exit; *time = *payload; exit: thread_optee_rpc_free_payload(cookie); tee_ta_set_current_session(sess); return res; }
static TEE_Result elf_process_rel(struct elf_load_state *state, size_t rel_sidx, vaddr_t vabase) { Elf32_Shdr *shdr = state->shdr; Elf32_Rel *rel; Elf32_Rel *rel_end; size_t sym_tab_idx; Elf32_Sym *sym_tab = NULL; size_t num_syms = 0; if (shdr[rel_sidx].sh_entsize != sizeof(Elf32_Rel)) return TEE_ERROR_BAD_FORMAT; sym_tab_idx = shdr[rel_sidx].sh_link; if (sym_tab_idx) { if (sym_tab_idx >= state->ehdr->e_shnum) return TEE_ERROR_BAD_FORMAT; if (shdr[sym_tab_idx].sh_entsize != sizeof(Elf32_Sym)) return TEE_ERROR_BAD_FORMAT; /* Check the address is inside TA memory */ if (shdr[sym_tab_idx].sh_addr > state->vasize || (shdr[sym_tab_idx].sh_addr + shdr[sym_tab_idx].sh_size) > state->vasize) return TEE_ERROR_BAD_FORMAT; sym_tab = (Elf32_Sym *)(vabase + shdr[sym_tab_idx].sh_addr); if (!TEE_ALIGNMENT_IS_OK(sym_tab, Elf32_Sym)) return TEE_ERROR_BAD_FORMAT; num_syms = shdr[sym_tab_idx].sh_size / sizeof(Elf32_Sym); } /* Check the address is inside TA memory */ if (shdr[rel_sidx].sh_addr >= state->vasize) return TEE_ERROR_BAD_FORMAT; rel = (Elf32_Rel *)(vabase + shdr[rel_sidx].sh_addr); if (!TEE_ALIGNMENT_IS_OK(rel, Elf32_Rel)) return TEE_ERROR_BAD_FORMAT; /* Check the address is inside TA memory */ if ((shdr[rel_sidx].sh_addr + shdr[rel_sidx].sh_size) >= state->vasize) return TEE_ERROR_BAD_FORMAT; rel_end = rel + shdr[rel_sidx].sh_size / sizeof(Elf32_Rel); for (; rel < rel_end; rel++) { Elf32_Addr *where; size_t sym_idx; /* Check the address is inside TA memory */ if (rel->r_offset >= state->vasize) return TEE_ERROR_BAD_FORMAT; where = (Elf32_Addr *)(vabase + rel->r_offset); if (!TEE_ALIGNMENT_IS_OK(where, Elf32_Addr)) return TEE_ERROR_BAD_FORMAT; switch (ELF32_R_TYPE(rel->r_info)) { case R_ARM_ABS32: sym_idx = ELF32_R_SYM(rel->r_info); if (sym_idx >= num_syms) return TEE_ERROR_BAD_FORMAT; *where += vabase + sym_tab[sym_idx].st_value; break; case R_ARM_RELATIVE: *where += vabase; break; default: EMSG("Unknown relocation type %d", ELF32_R_TYPE(rel->r_info)); return TEE_ERROR_BAD_FORMAT; } } return TEE_SUCCESS; }
static TEE_Result tee_buffer_update( TEE_OperationHandle op, TEE_Result(*update_func) (uint32_t state, const void *src, size_t slen, void *dst, size_t *dlen), const void *src_data, size_t src_len, void *dest_data, size_t *dest_len) { TEE_Result res; const uint8_t *src = src_data; size_t slen = src_len; uint8_t *dst = dest_data; size_t dlen = *dest_len; size_t acc_dlen = 0; size_t tmp_dlen; size_t l; size_t buffer_size; if (op->buffer_two_blocks) buffer_size = op->block_size * 2; else buffer_size = op->block_size; if (op->buffer_offs > 0) { /* Fill up complete block */ if (op->buffer_offs < op->block_size) l = MIN(slen, op->block_size - op->buffer_offs); else l = MIN(slen, buffer_size - op->buffer_offs); memcpy(op->buffer + op->buffer_offs, src, l); op->buffer_offs += l; src += l; slen -= l; if ((op->buffer_offs % op->block_size) != 0) goto out; /* Nothing left to do */ } /* If we can feed from buffer */ if (op->buffer_offs > 0 && (op->buffer_offs + slen) > buffer_size) { l = ROUNDUP(op->buffer_offs + slen - buffer_size, op->block_size); l = MIN(op->buffer_offs, l); tmp_dlen = dlen; res = update_func(op->state, op->buffer, l, dst, &tmp_dlen); if (res != TEE_SUCCESS) TEE_Panic(res); dst += tmp_dlen; dlen -= tmp_dlen; acc_dlen += tmp_dlen; op->buffer_offs -= l; if (op->buffer_offs > 0) { /* * Slen is small enough to be contained in rest buffer. */ memcpy(op->buffer, op->buffer + l, buffer_size - l); memcpy(op->buffer + op->buffer_offs, src, slen); op->buffer_offs += slen; goto out; /* Nothing left to do */ } } if (slen > buffer_size) { /* Buffer is empty, feed as much as possible from src */ if (TEE_ALIGNMENT_IS_OK(src, uint32_t)) { l = ROUNDUP(slen - buffer_size + 1, op->block_size); tmp_dlen = dlen; res = update_func(op->state, src, l, dst, &tmp_dlen); if (res != TEE_SUCCESS) TEE_Panic(res); src += l; slen -= l; dst += tmp_dlen; dlen -= tmp_dlen; acc_dlen += tmp_dlen; } else { /* * Supplied data isn't well aligned, we're forced to * feed through the buffer. */ while (slen >= op->block_size) { memcpy(op->buffer, src, op->block_size); tmp_dlen = dlen; res = update_func(op->state, op->buffer, op->block_size, dst, &tmp_dlen); if (res != TEE_SUCCESS) TEE_Panic(res); src += op->block_size; slen -= op->block_size; dst += tmp_dlen; dlen -= tmp_dlen; acc_dlen += tmp_dlen; } } } /* Slen is small enough to be contained in buffer. */ memcpy(op->buffer + op->buffer_offs, src, slen); op->buffer_offs += slen; out: *dest_len = acc_dlen; return TEE_SUCCESS; }