void get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) { UINT32 tss_stack_addr; __ASSERT(pl < 3); if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { tss_stack_addr = pl * 8 + 4; if (tss_stack_addr + 7 > CPU_TR_DESC.u.seg.limit) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } tss_stack_addr += CPU_TR_DESC.u.seg.segbase; *new_esp = cpu_kmemoryread_d(tss_stack_addr); *new_ss = cpu_kmemoryread_w(tss_stack_addr + 4); } else if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_16) { tss_stack_addr = pl * 4 + 2; if (tss_stack_addr + 3 > CPU_TR_DESC.u.seg.limit) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } tss_stack_addr += CPU_TR_DESC.u.seg.segbase; *new_esp = cpu_kmemoryread_w(tss_stack_addr); *new_ss = cpu_kmemoryread_w(tss_stack_addr + 2); } else { ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); } VERBOSE(("get_stack_pointer_from_tss: pl = %d, new_esp = 0x%08x, new_ss = 0x%04x", pl, *new_esp, *new_ss)); }
/*--- * IRET_pm: new_flags & VM_FLAG */ static void IRET_pm_return_to_vm86(UINT16 new_cs, UINT32 new_ip, UINT32 new_flags) { UINT16 segsel[CPU_SEGREG_NUM]; UINT32 sp; UINT32 new_sp; int i; VERBOSE(("IRET_pm: Interrupt procedure was in virtual-8086 mode: PE=1, VM=1 in flags image")); if (CPU_STAT_CPL != 0) { ia32_panic("IRET_pm: CPL != 0"); } if (!CPU_INST_OP32) { ia32_panic("IRET_pm: 16bit mode"); } if (CPU_STAT_SS32) { sp = CPU_ESP; } else { sp = CPU_SP; } STACK_POP_CHECK(CPU_REGS_SREG(CPU_SS_INDEX), &CPU_STAT_SREG(CPU_SS_INDEX), sp, 36); new_sp = cpu_vmemoryread_d(CPU_SS_INDEX, sp + 12); segsel[CPU_SS_INDEX] = cpu_vmemoryread_w(CPU_SS_INDEX, sp + 16); segsel[CPU_ES_INDEX] = cpu_vmemoryread_w(CPU_SS_INDEX, sp + 20); segsel[CPU_DS_INDEX] = cpu_vmemoryread_w(CPU_SS_INDEX, sp + 24); segsel[CPU_FS_INDEX] = cpu_vmemoryread_w(CPU_SS_INDEX, sp + 28); segsel[CPU_GS_INDEX] = cpu_vmemoryread_w(CPU_SS_INDEX, sp + 32); segsel[CPU_CS_INDEX] = (UINT16)new_cs; for (i = 0; i < CPU_SEGREG_NUM; i++) { CPU_REGS_SREG(i) = segsel[i]; CPU_STAT_SREG_INIT(i); } /* to VM86 mode */ set_eflags(new_flags, IOPL_FLAG|I_FLAG|VM_FLAG|RF_FLAG); new_sp &= 0xffff; new_ip &= 0xffff; CPU_ESP = new_sp; SET_EIP(new_ip); }
UINT16 get_backlink_selector_from_tss(void) { UINT16 backlink; if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { if (4 > CPU_TR_DESC.u.seg.limit) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } } else if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_16) { if (2 > CPU_TR_DESC.u.seg.limit) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } } else { ia32_panic("get_backlink_selector_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); } backlink = cpu_kmemoryread_w(CPU_TR_DESC.u.seg.segbase); VERBOSE(("get_backlink_selector_from_tss: backlink selector = 0x%04x", backlink)); return backlink; }
void task_switch(selector_t *task_sel, task_switch_type_t type) { UINT32 regs[CPU_REG_NUM]; UINT32 eip; UINT32 new_flags; UINT32 cr3 = 0; UINT16 sreg[CPU_SEGREG_NUM]; UINT16 ldtr; UINT16 iobase; UINT16 t; selector_t cs_sel; int rv; UINT32 cur_base; /* current task state */ UINT32 task_base; /* new task state */ UINT32 old_flags = REAL_EFLAGREG; BOOL task16; UINT i; VERBOSE(("task_switch: start")); /* limit check */ switch (task_sel->desc.type) { case CPU_SYSDESC_TYPE_TSS_32: case CPU_SYSDESC_TYPE_TSS_BUSY_32: if (task_sel->desc.u.seg.limit < 0x67) { EXCEPTION(TS_EXCEPTION, task_sel->idx); } task16 = FALSE; break; case CPU_SYSDESC_TYPE_TSS_16: case CPU_SYSDESC_TYPE_TSS_BUSY_16: if (task_sel->desc.u.seg.limit < 0x2b) { EXCEPTION(TS_EXCEPTION, task_sel->idx); } task16 = TRUE; break; default: ia32_panic("task_switch: descriptor type is invalid."); task16 = FALSE; /* compiler happy */ break; } cur_base = CPU_TR_DESC.u.seg.segbase; task_base = task_sel->desc.u.seg.segbase; VERBOSE(("task_switch: cur task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_DESC.u.seg.limit)); VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x", task_sel->selector, task_base, task_sel->desc.u.seg.limit)); VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32)); #if defined(MORE_DEBUG) { UINT32 v; VERBOSE(("task_switch: new task")); for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { v = cpu_kmemoryread_d(task_base + i); VERBOSE(("task_switch: 0x%08x: %08x", task_base + i,v)); } } #endif if (CPU_STAT_PAGING) { /* task state paging check */ paging_check(cur_base, CPU_TR_DESC.u.seg.limit, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); paging_check(task_base, task_sel->desc.u.seg.limit, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); } /* load task state */ memset(sreg, 0, sizeof(sreg)); if (!task16) { if (CPU_STAT_PAGING) { cr3 = cpu_kmemoryread_d(task_base + 28); } eip = cpu_kmemoryread_d(task_base + 32); new_flags = cpu_kmemoryread_d(task_base + 36); for (i = 0; i < CPU_REG_NUM; i++) { regs[i] = cpu_kmemoryread_d(task_base + 40 + i * 4); } for (i = 0; i < CPU_SEGREG_NUM; i++) { sreg[i] = cpu_kmemoryread_w(task_base + 72 + i * 4); } ldtr = cpu_kmemoryread_w(task_base + 96); t = cpu_kmemoryread_w(task_base + 100); if (t & 1) { CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_TASK; } iobase = cpu_kmemoryread_w(task_base + 102); } else { eip = cpu_kmemoryread_w(task_base + 14); new_flags = cpu_kmemoryread_w(task_base + 16); for (i = 0; i < CPU_REG_NUM; i++) { regs[i] = cpu_kmemoryread_w(task_base + 18 + i * 2); } for (i = 0; i < CPU_SEGREG286_NUM; i++) { sreg[i] = cpu_kmemoryread_w(task_base + 34 + i * 2); } ldtr = cpu_kmemoryread_w(task_base + 42); iobase = 0; t = 0; } #if defined(DEBUG) VERBOSE(("task_switch: current task")); if (!task16) { VERBOSE(("task_switch: CR3 = 0x%08x", CPU_CR3)); } VERBOSE(("task_switch: eip = 0x%08x", CPU_EIP)); VERBOSE(("task_switch: eflags = 0x%08x", old_flags)); for (i = 0; i < CPU_REG_NUM; i++) { VERBOSE(("task_switch: regs[%d] = 0x%08x", i, CPU_REGS_DWORD(i))); } for (i = 0; i < CPU_SEGREG_NUM; i++) { VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, CPU_REGS_SREG(i))); } VERBOSE(("task_switch: ldtr = 0x%04x", CPU_LDTR)); VERBOSE(("task_switch: new task")); if (!task16) { VERBOSE(("task_switch: CR3 = 0x%08x", cr3)); } VERBOSE(("task_switch: eip = 0x%08x", eip)); VERBOSE(("task_switch: eflags = 0x%08x", new_flags)); for (i = 0; i < CPU_REG_NUM; i++) { VERBOSE(("task_switch: regs[%d] = 0x%08x", i, regs[i])); } for (i = 0; i < CPU_SEGREG_NUM; i++) { VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); } VERBOSE(("task_switch: ldtr = 0x%04x", ldtr)); if (!task16) { VERBOSE(("task_switch: t = 0x%04x", t)); VERBOSE(("task_switch: iobase = 0x%04x", iobase)); } #endif /* if IRET or JMP, clear busy flag in this task: need */ /* if IRET, clear NT_FLAG in current EFLAG: need */ switch (type) { case TASK_SWITCH_IRET: /* clear NT_FLAG */ old_flags &= ~NT_FLAG; /*FALLTHROUGH*/ case TASK_SWITCH_JMP: /* clear busy flags in current task */ CPU_SET_TASK_FREE(CPU_TR, &CPU_TR_DESC); break; case TASK_SWITCH_CALL: case TASK_SWITCH_INTR: /* Nothing to do */ break; default: ia32_panic("task_switch(): task switch type is invalid"); break; } /* save this task state in this task state segment */ if (!task16) { cpu_kmemorywrite_d(cur_base + 32, CPU_EIP); cpu_kmemorywrite_d(cur_base + 36, old_flags); for (i = 0; i < CPU_REG_NUM; i++) { cpu_kmemorywrite_d(cur_base + 40 + i * 4, CPU_REGS_DWORD(i)); } for (i = 0; i < CPU_SEGREG_NUM; i++) { cpu_kmemorywrite_w(cur_base + 72 + i * 4, CPU_REGS_SREG(i)); } } else { cpu_kmemorywrite_w(cur_base + 14, CPU_IP); cpu_kmemorywrite_w(cur_base + 16, (UINT16)old_flags); for (i = 0; i < CPU_REG_NUM; i++) { cpu_kmemorywrite_w(cur_base + 18 + i * 2, CPU_REGS_WORD(i)); } for (i = 0; i < CPU_SEGREG286_NUM; i++) { cpu_kmemorywrite_w(cur_base + 34 + i * 2, CPU_REGS_SREG(i)); } } #if defined(MORE_DEBUG) { UINT32 v; VERBOSE(("task_switch: current task")); for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) { v = cpu_kmemoryread_d(cur_base + i); VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v)); } } #endif /* set back link selector */ switch (type) { case TASK_SWITCH_CALL: case TASK_SWITCH_INTR: /* set back link selector */ cpu_kmemorywrite_w(task_base, CPU_TR); break; case TASK_SWITCH_IRET: case TASK_SWITCH_JMP: /* Nothing to do */ break; default: ia32_panic("task_switch(): task switch type is invalid"); break; } /* Now task switching! */ /* if CALL, INTR, set EFLAGS image NT_FLAG */ /* if CALL, INTR, JMP set busy flag */ switch (type) { case TASK_SWITCH_CALL: case TASK_SWITCH_INTR: /* set back link selector */ new_flags |= NT_FLAG; /*FALLTHROUGH*/ case TASK_SWITCH_JMP: CPU_SET_TASK_BUSY(task_sel->selector, &task_sel->desc); break; case TASK_SWITCH_IRET: /* check busy flag is active */ if (task_sel->desc.valid) { UINT32 h; h = cpu_kmemoryread_d(task_sel->addr + 4); if ((h & CPU_TSS_H_BUSY) == 0) { ia32_panic("task_switch: new task is not busy"); } } break; default: ia32_panic("task_switch(): task switch type is invalid"); break; } /* set CR0 image CPU_CR0_TS */ CPU_CR0 |= CPU_CR0_TS; /* load task selector to CPU_TR */ CPU_TR = task_sel->selector; CPU_TR_DESC = task_sel->desc; /* load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) */ /* set new CR3 */ if (!task16 && CPU_STAT_PAGING) { set_CR3(cr3); } /* set new EIP, GPR */ CPU_PREV_EIP = CPU_EIP = eip; for (i = 0; i < CPU_REG_NUM; i++) { CPU_REGS_DWORD(i) = regs[i]; } for (i = 0; i < CPU_SEGREG_NUM; i++) { CPU_REGS_SREG(i) = sreg[i]; CPU_STAT_SREG_INIT(i); } /* set new EFLAGS */ set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); /* I/O deny bitmap */ if (!task16) { if (iobase != 0 && iobase < task_sel->desc.u.seg.limit) { CPU_STAT_IOLIMIT = (UINT16)(task_sel->desc.u.seg.limit - iobase); CPU_STAT_IOADDR = task_sel->desc.u.seg.segbase + iobase; } else { CPU_STAT_IOLIMIT = 0; } } else { CPU_STAT_IOLIMIT = 0; } VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); #if defined(IA32_SUPPORT_DEBUG_REGISTER) /* check resume flag */ if (CPU_EFLAG & RF_FLAG) { CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF; } /* clear local break point flags */ CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); CPU_STAT_BP = 0; for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { if (CPU_DR7 & CPU_DR7_G(i)) { CPU_STAT_BP |= (1 << i); } } #endif /* load new LDTR */ load_ldtr(ldtr, TS_EXCEPTION); /* set new segment register */ if (!CPU_STAT_VM86) { /* clear segment descriptor cache */ for (i = 0; i < CPU_SEGREG_NUM; i++) { CPU_STAT_SREG_CLEAR(i); } /* load CS */ rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); if (rv < 0) { VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); EXCEPTION(TS_EXCEPTION, cs_sel.idx); } /* CS register must be code segment */ if (!cs_sel.desc.s || !cs_sel.desc.u.seg.c) { EXCEPTION(TS_EXCEPTION, cs_sel.idx); } /* check privilege level */ if (!cs_sel.desc.u.seg.ec) { /* non-confirming code segment */ if (cs_sel.desc.dpl != cs_sel.rpl) { EXCEPTION(TS_EXCEPTION, cs_sel.idx); } } else { /* confirming code segment */ if (cs_sel.desc.dpl < cs_sel.rpl) { EXCEPTION(TS_EXCEPTION, cs_sel.idx); } } /* code segment is not present */ rv = selector_is_not_present(&cs_sel); if (rv < 0) { EXCEPTION(NP_EXCEPTION, cs_sel.idx); } /* Now loading CS register */ load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); /* load ES, SS, DS, FS, GS segment register */ for (i = 0; i < CPU_SEGREG_NUM; i++) { if (i != CPU_CS_INDEX) { load_segreg(i, sreg[i], TS_EXCEPTION); } } } /* out of range */ if (CPU_EIP > CPU_STAT_CS_LIMIT) { VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); EXCEPTION(GP_EXCEPTION, 0); } VERBOSE(("task_switch: done.")); }
void CPUCALL exception(int num, int error_code) { #if defined(DEBUG) extern int cpu_debug_rep_cont; extern CPU_REGS cpu_debug_rep_regs; #endif int errorp = 0; __ASSERT((unsigned int)num < EXCEPTION_NUM); #if 0 iptrace_out(); debugwriteseg("execption.bin", &CPU_CS_DESC, CPU_PREV_EIP & 0xffff0000, 0x10000); #endif VERBOSE(("exception: -------------------------------------------------------------- start")); VERBOSE(("exception: %s, error_code = %x at %04x:%08x", exception_str[num], error_code, CPU_CS, CPU_PREV_EIP)); VERBOSE(("%s", cpu_reg2str())); VERBOSE(("code: %dbit(%dbit), address: %dbit(%dbit)", CPU_INST_OP32 ? 32 : 16, CPU_STATSAVE.cpu_inst_default.op_32 ? 32 : 16, CPU_INST_AS32 ? 32 : 16, CPU_STATSAVE.cpu_inst_default.as_32 ? 32 : 16)); #if defined(DEBUG) if (cpu_debug_rep_cont) { VERBOSE(("rep: original regs: ecx=%08x, esi=%08x, edi=%08x", cpu_debug_rep_regs.reg[CPU_ECX_INDEX].d, cpu_debug_rep_regs.reg[CPU_ESI_INDEX].d, cpu_debug_rep_regs.reg[CPU_EDI_INDEX].d)); } VERBOSE(("%s", cpu_disasm2str(CPU_PREV_EIP))); #endif CPU_STAT_EXCEPTION_COUNTER_INC(); if ((CPU_STAT_EXCEPTION_COUNTER >= 3) || (CPU_STAT_EXCEPTION_COUNTER == 2 && CPU_STAT_PREV_EXCEPTION == DF_EXCEPTION)) { /* Triple fault */ ia32_panic("exception: catch triple fault!"); } switch (num) { case DE_EXCEPTION: /* (F) 除算エラー */ case DB_EXCEPTION: /* (F/T) デバッグ */ case BR_EXCEPTION: /* (F) BOUND の範囲外 */ case UD_EXCEPTION: /* (F) 無効オペコード */ case NM_EXCEPTION: /* (F) デバイス使用不可 (FPU が無い) */ case MF_EXCEPTION: /* (F) 浮動小数点エラー */ CPU_EIP = CPU_PREV_EIP; if (CPU_STATSAVE.cpu_stat.backout_sp) CPU_ESP = CPU_PREV_ESP; /*FALLTHROUGH*/ case NMI_EXCEPTION: /* (I) NMI 割り込み */ case BP_EXCEPTION: /* (T) ブレークポイント */ case OF_EXCEPTION: /* (T) オーバーフロー */ errorp = 0; break; case DF_EXCEPTION: /* (A) ダブルフォルト (errcode: 0) */ errorp = 1; error_code = 0; break; case AC_EXCEPTION: /* (F) アラインメントチェック (errcode: 0) */ error_code = 0; /*FALLTHROUGH*/ case TS_EXCEPTION: /* (F) 無効 TSS (errcode) */ case NP_EXCEPTION: /* (F) セグメント不在 (errcode) */ case SS_EXCEPTION: /* (F) スタックセグメントフォルト (errcode) */ case GP_EXCEPTION: /* (F) 一般保護例外 (errcode) */ case PF_EXCEPTION: /* (F) ページフォルト (errcode) */ CPU_EIP = CPU_PREV_EIP; if (CPU_STATSAVE.cpu_stat.backout_sp) CPU_ESP = CPU_PREV_ESP; errorp = 1; break; default: ia32_panic("exception: unknown exception (%d)", num); break; } if (CPU_STAT_EXCEPTION_COUNTER >= 2) { if (dftable[exctype[CPU_STAT_PREV_EXCEPTION]][exctype[num]]) { num = DF_EXCEPTION; errorp = 1; error_code = 0; } } CPU_STAT_PREV_EXCEPTION = num; VERBOSE(("exception: ---------------------------------------------------------------- end")); interrupt(num, INTR_TYPE_EXCEPTION, errorp, error_code); CPU_STAT_EXCEPTION_COUNTER_CLEAR(); siglongjmp(exec_1step_jmpbuf, 1); }
static void CPUCALL interrupt_intr_or_trap(const descriptor_t *gsdp, int intrtype, int errorp, int error_code) { selector_t cs_sel, ss_sel; UINT stacksize; UINT32 old_flags; UINT32 new_flags; UINT32 mask; UINT32 sp; UINT32 new_ip, new_sp; UINT32 old_ip, old_sp; UINT16 old_cs, old_ss, new_ss; BOOL is32bit; int exc_errcode; int rv; new_ip = gsdp->u.gate.offset; old_ss = CPU_SS; old_cs = CPU_CS; old_ip = CPU_EIP; old_sp = CPU_ESP; old_flags = REAL_EFLAGREG; new_flags = REAL_EFLAGREG & ~(T_FLAG|RF_FLAG|NT_FLAG|VM_FLAG); mask = T_FLAG|RF_FLAG|NT_FLAG|VM_FLAG; switch (gsdp->type) { case CPU_SYSDESC_TYPE_INTR_16: case CPU_SYSDESC_TYPE_INTR_32: VERBOSE(("interrupt: INTERRUPT-GATE")); new_flags &= ~I_FLAG; mask |= I_FLAG; break; case CPU_SYSDESC_TYPE_TRAP_16: case CPU_SYSDESC_TYPE_TRAP_32: VERBOSE(("interrupt: TRAP-GATE")); break; default: ia32_panic("interrupt: gate descriptor type is invalid (type = %d)", gsdp->type); break; } exc_errcode = gsdp->u.gate.selector & ~3; if (intrtype == INTR_TYPE_EXTINTR) exc_errcode++; rv = parse_selector(&cs_sel, gsdp->u.gate.selector); if (rv < 0) { VERBOSE(("interrupt: parse_selector (selector = %04x, rv = %d)", gsdp->u.gate.selector, rv)); EXCEPTION(GP_EXCEPTION, exc_errcode); } /* check segment type */ if (SEG_IS_SYSTEM(&cs_sel.desc)) { VERBOSE(("interrupt: code segment is system segment")); EXCEPTION(GP_EXCEPTION, exc_errcode); } if (SEG_IS_DATA(&cs_sel.desc)) { VERBOSE(("interrupt: code segment is data segment")); EXCEPTION(GP_EXCEPTION, exc_errcode); } /* check privilege level */ if (cs_sel.desc.dpl > CPU_STAT_CPL) { VERBOSE(("interrupt: DPL(%d) > CPL(%d)", cs_sel.desc.dpl, CPU_STAT_CPL)); EXCEPTION(GP_EXCEPTION, exc_errcode); } /* not present */ if (selector_is_not_present(&cs_sel)) { VERBOSE(("interrupt: selector is not present")); EXCEPTION(NP_EXCEPTION, exc_errcode); } is32bit = gsdp->type & CPU_SYSDESC_TYPE_32BIT; if (!SEG_IS_CONFORMING_CODE(&cs_sel.desc) && (cs_sel.desc.dpl < CPU_STAT_CPL)) { stacksize = errorp ? 12 : 10; if (!CPU_STAT_VM86) { VERBOSE(("interrupt: INTER-PRIVILEGE-LEVEL-INTERRUPT")); } else { /* VM86 */ VERBOSE(("interrupt: INTERRUPT-FROM-VIRTUAL-8086-MODE")); if (cs_sel.desc.dpl != 0) { /* 16.3.1.1 */ VERBOSE(("interrupt: DPL[CS](%d) != 0", cs_sel.desc.dpl)); EXCEPTION(GP_EXCEPTION, exc_errcode); } stacksize += 8; } if (is32bit) { stacksize *= 2; } /* get stack pointer from TSS */ get_stack_pointer_from_tss(cs_sel.desc.dpl, &new_ss, &new_sp); /* parse stack segment descriptor */ rv = parse_selector(&ss_sel, new_ss); /* update exception error code */ exc_errcode = ss_sel.idx; if (intrtype == INTR_TYPE_EXTINTR) exc_errcode++; if (rv < 0) { VERBOSE(("interrupt: parse_selector (selector = %04x, rv = %d)", new_ss, rv)); EXCEPTION(TS_EXCEPTION, exc_errcode); } /* check privilege level */ if (ss_sel.rpl != cs_sel.desc.dpl) { VERBOSE(("interrupt: selector RPL[SS](%d) != DPL[CS](%d)", ss_sel.rpl, cs_sel.desc.dpl)); EXCEPTION(TS_EXCEPTION, exc_errcode); } if (ss_sel.desc.dpl != cs_sel.desc.dpl) { VERBOSE(("interrupt: descriptor DPL[SS](%d) != DPL[CS](%d)", ss_sel.desc.dpl, cs_sel.desc.dpl)); EXCEPTION(TS_EXCEPTION, exc_errcode); } /* stack segment must be writable data segment. */ if (SEG_IS_SYSTEM(&ss_sel.desc)) { VERBOSE(("interrupt: stack segment is system segment")); EXCEPTION(TS_EXCEPTION, exc_errcode); } if (SEG_IS_CODE(&ss_sel.desc)) { VERBOSE(("interrupt: stack segment is code segment")); EXCEPTION(TS_EXCEPTION, exc_errcode); } if (!SEG_IS_WRITABLE_DATA(&ss_sel.desc)) { VERBOSE(("interrupt: stack segment is read-only data segment")); EXCEPTION(TS_EXCEPTION, exc_errcode); } /* not present */ if (selector_is_not_present(&ss_sel)) { VERBOSE(("interrupt: selector is not present")); EXCEPTION(SS_EXCEPTION, exc_errcode); } /* check stack room size */ cpu_stack_push_check(ss_sel.idx, &ss_sel.desc, new_sp, stacksize, ss_sel.desc.d); /* out of range */ if (new_ip > cs_sel.desc.u.seg.limit) { VERBOSE(("interrupt: new_ip is out of range. new_ip = %08x, limit = %08x", new_ip, cs_sel.desc.u.seg.limit)); EXCEPTION(GP_EXCEPTION, 0); } load_ss(ss_sel.selector, &ss_sel.desc, cs_sel.desc.dpl); CPU_ESP = new_sp; load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); CPU_EIP = new_ip; if (is32bit) { if (CPU_STAT_VM86) { PUSH0_32(CPU_GS); PUSH0_32(CPU_FS); PUSH0_32(CPU_DS); PUSH0_32(CPU_ES); LOAD_SEGREG(CPU_GS_INDEX, 0); CPU_STAT_SREG(CPU_GS_INDEX).valid = 0; LOAD_SEGREG(CPU_FS_INDEX, 0); CPU_STAT_SREG(CPU_FS_INDEX).valid = 0; LOAD_SEGREG(CPU_DS_INDEX, 0); CPU_STAT_SREG(CPU_DS_INDEX).valid = 0; LOAD_SEGREG(CPU_ES_INDEX, 0); CPU_STAT_SREG(CPU_ES_INDEX).valid = 0; } PUSH0_32(old_ss); PUSH0_32(old_sp); PUSH0_32(old_flags); PUSH0_32(old_cs); PUSH0_32(old_ip); if (errorp) { PUSH0_32(error_code); } } else { if (CPU_STAT_VM86) { ia32_panic("interrupt: 16bit gate && VM86"); } PUSH0_16(old_ss); PUSH0_16(old_sp); PUSH0_16(old_flags); PUSH0_16(old_cs); PUSH0_16(old_ip); if (errorp) { PUSH0_16(error_code); } } } else { if (CPU_STAT_VM86) { VERBOSE(("interrupt: VM86")); EXCEPTION(GP_EXCEPTION, exc_errcode); } if (!SEG_IS_CONFORMING_CODE(&cs_sel.desc) && (cs_sel.desc.dpl != CPU_STAT_CPL)) { VERBOSE(("interrupt: %sCONFORMING-CODE-SEGMENT(%d) && DPL[CS](%d) != CPL", SEG_IS_CONFORMING_CODE(&cs_sel.desc) ? "" : "NON-", cs_sel.desc.dpl, CPU_STAT_CPL)); EXCEPTION(GP_EXCEPTION, exc_errcode); } VERBOSE(("interrupt: INTRA-PRIVILEGE-LEVEL-INTERRUPT")); stacksize = errorp ? 8 : 6; if (is32bit) { stacksize *= 2; } /* check stack room size */ if (CPU_STAT_SS32) { sp = CPU_ESP; } else { sp = CPU_SP; } /* * 17.1 * コールゲート、割り込みゲート、またはトラップゲートを通じて * プログラムの制御を他のコード・セグメントに移行するときは、 * 移行中に使用されるオペランド・サイズは使用されるゲートの * タイプ(16 ビットまたは32 ビット)によって決まる(移行命 * 令のD フラグ、プリフィックスのいずれにもよらない)。 */ SS_PUSH_CHECK1(sp, stacksize, is32bit); /* out of range */ if (new_ip > cs_sel.desc.u.seg.limit) { VERBOSE(("interrupt: new_ip is out of range. new_ip = %08x, limit = %08x", new_ip, cs_sel.desc.u.seg.limit)); EXCEPTION(GP_EXCEPTION, 0); } load_cs(cs_sel.selector, &cs_sel.desc, CPU_STAT_CPL); CPU_EIP = new_ip; if (is32bit) { PUSH0_32(old_flags); PUSH0_32(old_cs); PUSH0_32(old_ip); if (errorp) { PUSH0_32(error_code); } } else { PUSH0_16(old_flags); PUSH0_16(old_cs); PUSH0_16(old_ip); if (errorp) { PUSH0_16(error_code); } } } set_eflags(new_flags, mask); VERBOSE(("interrupt: new EIP = %04x:%08x, ESP = %04x:%08x", CPU_CS, CPU_EIP, CPU_SS, CPU_ESP)); }
/* undoc 386 */ void LOADALL(void) { ia32_panic("LOADALL: not implemented yet."); }