// RAM was just loaded from SPC, with $F0-$FF containing SMP registers // and timer counts. Copies these to proper registers. void SNES_SPC::ram_loaded() { m.rom_enabled = 0; load_regs( &RAM [0xF0] ); // Put STOP instruction around memory to catch PC underflow/overflow memset( m.ram.padding1, cpu_pad_fill, sizeof m.ram.padding1 ); memset( m.ram.padding2, cpu_pad_fill, sizeof m.ram.padding2 ); }
void _SE3(uintptr_t xax, uintptr_t xbx, uintptr_t xcx, uintptr_t xdx, uintptr_t xsi, uintptr_t xdi) { UNUSED(xdx); switch (xax) { case SE_EENTER: uintptr_t xip; void * enclave_base_addr; se_pt_regs_t* p_pt_regs; tcs_t* tcs; tcs_sim_t* tcs_sim; ssa_gpr_t* p_ssa_gpr; secs_t* secs; CEnclaveMngr* mngr; CEnclaveSim* ce; // xbx contains the address of a TCS tcs = reinterpret_cast<tcs_t*>(xbx); // Is TCS pointer page-aligned? GP_ON_EENTER(!IS_PAGE_ALIGNED(tcs)); mngr = CEnclaveMngr::get_instance(); assert(mngr != NULL); // Is it really a TCS? ce = mngr->get_enclave(tcs); GP_ON_EENTER(ce == NULL); GP_ON_EENTER(!ce->is_tcs_page(tcs)); // Check the EntryReason tcs_sim = reinterpret_cast<tcs_sim_t *>(tcs->reserved); GP_ON_EENTER(tcs_sim->tcs_state != TCS_STATE_INACTIVE); GP_ON_EENTER(tcs->cssa >= tcs->nssa); secs = ce->get_secs(); enclave_base_addr = secs->base; p_ssa_gpr = reinterpret_cast<ssa_gpr_t*>(reinterpret_cast<uintptr_t>(enclave_base_addr) + static_cast<size_t>(tcs->ossa) + secs->ssa_frame_size * SE_PAGE_SIZE - sizeof(ssa_gpr_t)); tcs_sim->saved_aep = xcx; p_pt_regs = reinterpret_cast<se_pt_regs_t*>(get_bp()); p_ssa_gpr->REG(bp_u) = p_pt_regs->xbp; p_ssa_gpr->REG(sp_u) = reinterpret_cast<uintptr_t>(p_pt_regs + 1); xcx = p_pt_regs->xip; xip = reinterpret_cast<uintptr_t>(enclave_base_addr); GP_ON_EENTER(xip == 0); //set the _tls_array to point to the self_addr of TLS section inside the enclave GP_ON_EENTER(td_mngr_set_td(enclave_base_addr, tcs) == false); // Destination depends on STATE xip += (uintptr_t)tcs->oentry; tcs_sim->tcs_state = TCS_STATE_ACTIVE; // Link the TCS to the thread GP_ON_EENTER((secs->attributes.flags & SGX_FLAGS_INITTED) == 0); // Replace the return address on the stack with the enclave entry, // so that when we return from this function, we'll enter the enclave. enclu_regs_t regs; regs.xax = tcs->cssa; regs.xbx = reinterpret_cast<uintptr_t>(tcs); regs.xcx = xcx; regs.xdx = 0; regs.xsi = xsi; regs.xdi = xdi; regs.xbp = p_ssa_gpr->REG(bp_u); regs.xsp = p_ssa_gpr->REG(sp_u); regs.xip = xip; load_regs(®s); // Returning from this function enters the enclave return; default: // There's only 1 ring 3 instruction outside the enclave: EENTER. GP(); } }
// RAM was just loaded from SPC, with $F0-$FF containing SMP registers // and timer counts. Copies these to proper registers. void SNES_SPC::ram_loaded() { rom_enabled = 0; load_regs( &ram [0xF0] ); }
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP); if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && gate_type != VMCS_INTR_T_HWINTR && gate_type != VMCS_INTR_T_NMI)) { int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); macvm_set_rip(cpu, rip + ins_len); return; } load_regs(cpu); struct x86_segment_descriptor curr_tss_desc, next_tss_desc; int ret; x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); uint32_t desc_limit; struct x86_call_gate task_gate_desc; struct vmx_segment vmx_seg; X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel); x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel); if (reason == TSR_IDT_GATE && gate_valid) { int dpl; ret = x86_read_call_gate(cpu, &task_gate_desc, gate); dpl = task_gate_desc.dpl; x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); if (tss_sel.rpl > dpl || cs.rpl > dpl) ;//DPRINTF("emulate_gp"); } desc_limit = x86_segment_limit(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { VM_PANIC("emulate_ts"); } if (reason == TSR_IRET || reason == TSR_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel); } if (reason == TSR_IRET) EFLAGS(env) &= ~RFLAGS_NT; if (reason != TSR_CALL && reason != TSR_IDT_GATE) old_tss_sel.sel = 0xffff; if (reason != TSR_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); } if (next_tss_desc.type & 8) ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); else //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); store_regs(cpu); hv_vcpu_invalidate_tlb(cpu->hvf_fd); hv_vcpu_flush(cpu->hvf_fd); }