Exemple #1
0
int emu_0f(unsigned char *lina)
{
    unsigned char *orig_lina = lina;

    switch (lina[1]) {
    case 0x00:		/* lldt, ltr */
    {
	switch (REG_OPCODE(lina[2])) {
	case 2:		/* 0F 00 /2 LLDT r/m16 Load segment selector r/m16 into LDTR */
	{
	    u_int addr, is_addr, reg, selector;

	    trace("lldt\n");
	    if (!opa.pe) {
		set_guest_exc(EXC_UD, 0);
		goto exc;
	    }
	    if (opa.cpl != 0) {
		set_guest_exc(EXC_GP, 0);
		goto exc;
	    }
	    lina += 2;	/* move up to RM byte */
	    decode_rm16(&addr, &is_addr, &reg, &lina);
	    if (emu_get(is_addr, addr, &selector, 2))
		goto exc;
	    if (load_ldtr(selector))
		goto exc;

	    break;
	}
	case 3: 	 /* 0F 00 /3 LTR r/m16 Load r/m16 into TR */
	{
	    u_int addr, is_addr, reg;
	    Bit32u dtr;
	    
	    trace("ltr\n");
	    if (!opa.pe) {
		set_guest_exc(EXC_UD, 0);
		goto exc;
	    }
	    if (opa.cpl != 0) {
		set_guest_exc(EXC_GP, 0);
		goto exc;
	    }
	    lina += 2;  /* move up to RM byte */
	    decode_rm16(&addr, &is_addr, &reg, &lina);
	    if (emu_get(is_addr, addr, &dtr, 2))
		goto exc;
	    if (load_tr(dtr))
		goto exc;

	    break;
	}
	default:
	    unknown(lina);
	}
	break;
    }
    
    case 0x01:		/* invlpg, lgdt, lidt, lmsw */
    {
	int reg_op = REG_OPCODE(lina[2]);
	switch (reg_op) {
	case 2:				/* lgdt */
	case 3:				/* lidt */
	{
	    u_int addr, is_addr, reg, limit, base;
	    
	    lina += 2;                       /* move up to RM byte */
	    decode_rm(&addr, &is_addr, &reg, &lina);
	    ASSERT(is_addr);
	    /* addr now points to the m16&32; lina is at next instr */
	    if (get_memory(opa.seg, addr, &limit, 2) != 0 ||
		get_memory(opa.seg, addr+2, &base, opa.opb==4 ? 4 : 3) != 0)
		goto exc;
	    
	    /* by definition of lgdt/lidt, base is a linear address already. */
	    if (reg_op == 2) {
		set_gdt(base, limit);
	    } else {
		set_idt(base, limit);
	    }
	    debug_mem("loaded %cdt from 0x%08x\n", reg_op==2 ? 'g' : 'i', base);
	    break;
	}
	case 6:		/* 0F 01 /6  LMSW r/m16  Loads r/m16 in msw of CR0 */
	{
	    u_int addr, is_addr, reg, val;
	    
	    trace("lmsw\n");
	    
	    if (opa.pe && opa.cpl!=0) {
		set_guest_exc(13, 0);
		goto exc;
	    }
	    
	    lina += 2;         /* move up to RM byte */
	    decode_rm16(&addr, &is_addr, &reg, &lina);
	    if (emu_get(is_addr, addr, &val, 2))
		goto exc;
	    if (vmstate.cr[0] & 1 && !(val & 1))
		val |= 1;  /* can't clear PE with lmsw */
	    mov_to_cr(0, val, 0x0000000f); /* only PE, MP, EM, and TS can be affected */
	    
	    break;
	}
	case 7:		/* invlpg */
	{
	    Bit32u ptr;
	    
	    debug("invlpg\n");
	    
	    lina += 2;         /* move up to memory operand */
	    if (opa.opb==4) {
		ptr = *(Bit32u*)lina;
	    } else {
		ptr = *(Bit16u*)lina;
	    }
	    lina += opa.opb;
	    
	    if (vmstate.cr[0]&PG_MASK) {
		/* Modify a pte with itself.  This should have the desired side
		   effect of flushing that TLB entry. */
		sys_self_mod_pte_range(0, 0, /* add no flag bits */
				       0, /* remove no flag bits */
				       ptr, 1);
	    }

	    break;
	}
	default:
	    unknown(lina);
	}
	break;
    }

    case 0x06:		/* clts  0F 06 */
    {
	if (opa.cpl!=0) {
	    set_guest_exc(13, 0);
	    goto exc;
	} else {	
	    vmstate.cr[0] &= ~TS_MASK;
	    lina += 2;
	}
	break;
    }

    case 0x08:		/* invd  0F 08 */
    case 0x09:		/* wbinvd  0F 09 */
    {
	if (opa.cpl!=0) {
	    set_guest_exc(13, 0);
	    goto exc;
	} else {
	    /* will not implement */
	    lina += 2;
	}
	break;
    }

    case 0x0b:		/* UD2 */
    {
	set_guest_exc(6, 0);
	goto exc;
    }

    case 0x20:		/* MOV r <- CRx */
    {
	int cr = REG_OPCODE(lina[2]);
	int reg = RM(lina[2]);
	
	ASSERT(cr<5);
	set_reg(reg, vmstate.cr[cr], 4);
	lina += 3;
	break;
    }

    case 0x21:		/* MOV r <- DRx */
    {
	int dr = REG_OPCODE(lina[2]);
	int reg = RM(lina[2]);
	
	set_reg(reg, vmstate.dr[dr], 4);
	lina += 3;
	break;
    }

    case 0x22:		/* MOV CRx <- r */
    {
	int cr = REG_OPCODE(lina[2]);
	
	ASSERT(cr<5);
	if (opa.pe && opa.cpl!=0) {
	    set_guest_exc(13, 0);
	    goto exc;
	}
	
	mov_to_cr(cr, get_reg(RM(lina[2]), 4), 0xffffffff);
	lina += 3;
	break;
    }

    case 0x23:		/* MOV DRx <- r */
    {
	int dr = REG_OPCODE(lina[2]);

	debug("mov dr%d <- r%d\n", dr, RM(lina[2]));

	if (opa.pe && opa.cpl!=0) {
	    set_guest_exc(13, 0);
	    goto exc;
	}
	
	vmstate.dr[dr] = get_reg(RM(lina[2]), 4);
	lina += 3;
	break;
    }

    case 0x30:		/* wrmsr */
    {
	int ctr = 0;

	if (REG(ecx) == P6MSR_CTRSEL0)
	    ctr = 0;
	else if (REG(ecx) == P6MSR_CTRSEL1)
	    ctr = 1;
	else
	    unknown(lina);    /* only performance counters are implemented */

	sys_pctr(ctr==0 ? PCIOCS0 : PCIOCS1, 0, &REG(eax));
	lina += 2;
	break;
    }

    case 0x32:		/* rdmsr */
    {
	struct pctrst p;
	int ctr = 0;

	if (REG(ecx) == P6MSR_CTR0)
	    ctr = 0;
	else if (REG(ecx) == P6MSR_CTR1)
	    ctr = 1;
	else
	    unknown(lina);    /* only performance counters are implemented */

	sys_pctr(PCIOCRD, 0, &p);
	REG(eax) = p.pctr_hwc[ctr];
	REG(edx) = p.pctr_hwc[ctr] >> 32;
	lina += 2;
	break;
    }

#if 0
    case 0x33:		/* rdpmc */
    {
	struct pctrst p;

	/* or cpl!=0 and cr4 ... */
	if (REG(ecx)>1) {
	    set_guest_exc(EXC_GP, 0);
	    goto exc;
	}

	sys_pctr(PCIOCRD, 0, &p);
	REG(eax) = p.pctr_hwc[REG(ecx)];
	REG(edx) = p.pctr_hwc[REG(ecx)] >> 32;

	lina += 2;
	break;
    }
#endif

    case 0xa2:		/* cpuid */
    {
	/* cpuid may trap on a Cyrix.  I don't care. */
	leaveemu(ERR_UNIMPL);
	break;
    }

    case 0xb2:		/* lss */
    case 0xb4:		/* lfs */
    case 0xb5:		/* lgs */
    {
	int seg;

	if (lina[1]==0xb2) {
	    seg = REGNO_SS;
	} else if (lina[1]==0xb4) {
	    seg = REGNO_FS;
	} else
	    seg = REGNO_GS;
	if (load_far_pointer(&lina, seg))
	    goto exc;
	break;
    }

    case 0x31:		/* rdtsc ... should be enabled in xok */
    case 0xc8:		/* bswap  should not trap */
    default:
	unknown(lina);
    }

    REG(eip) += lina-orig_lina;
    return 0;

 exc:
    return -1;
}
Exemple #2
0
void
task_switch(selector_t *task_sel, task_switch_type_t type)
{
    UINT32 regs[CPU_REG_NUM];
    UINT32 eip;
    UINT32 new_flags;
    UINT32 cr3 = 0;
    UINT16 sreg[CPU_SEGREG_NUM];
    UINT16 ldtr;
    UINT16 iobase;
    UINT16 t;

    selector_t cs_sel;
    int rv;

    UINT32 cur_base;	/* current task state */
    UINT32 task_base;	/* new task state */
    UINT32 old_flags = REAL_EFLAGREG;
    BOOL task16;
    UINT i;

    VERBOSE(("task_switch: start"));

    /* limit check */
    switch (task_sel->desc.type) {
    case CPU_SYSDESC_TYPE_TSS_32:
    case CPU_SYSDESC_TYPE_TSS_BUSY_32:
        if (task_sel->desc.u.seg.limit < 0x67) {
            EXCEPTION(TS_EXCEPTION, task_sel->idx);
        }
        task16 = FALSE;
        break;

    case CPU_SYSDESC_TYPE_TSS_16:
    case CPU_SYSDESC_TYPE_TSS_BUSY_16:
        if (task_sel->desc.u.seg.limit < 0x2b) {
            EXCEPTION(TS_EXCEPTION, task_sel->idx);
        }
        task16 = TRUE;
        break;

    default:
        ia32_panic("task_switch: descriptor type is invalid.");
        task16 = FALSE;		/* compiler happy */
        break;
    }

    cur_base = CPU_TR_DESC.u.seg.segbase;
    task_base = task_sel->desc.u.seg.segbase;
    VERBOSE(("task_switch: cur task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_DESC.u.seg.limit));
    VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x", task_sel->selector, task_base, task_sel->desc.u.seg.limit));
    VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32));

#if defined(MORE_DEBUG)
    {
        UINT32 v;

        VERBOSE(("task_switch: new task"));
        for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) {
            v = cpu_kmemoryread_d(task_base + i);
            VERBOSE(("task_switch: 0x%08x: %08x", task_base + i,v));
        }
    }
#endif

    if (CPU_STAT_PAGING) {
        /* task state paging check */
        paging_check(cur_base, CPU_TR_DESC.u.seg.limit, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER);
        paging_check(task_base, task_sel->desc.u.seg.limit, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER);
    }

    /* load task state */
    memset(sreg, 0, sizeof(sreg));
    if (!task16) {
        if (CPU_STAT_PAGING) {
            cr3 = cpu_kmemoryread_d(task_base + 28);
        }
        eip = cpu_kmemoryread_d(task_base + 32);
        new_flags = cpu_kmemoryread_d(task_base + 36);
        for (i = 0; i < CPU_REG_NUM; i++) {
            regs[i] = cpu_kmemoryread_d(task_base + 40 + i * 4);
        }
        for (i = 0; i < CPU_SEGREG_NUM; i++) {
            sreg[i] = cpu_kmemoryread_w(task_base + 72 + i * 4);
        }
        ldtr = cpu_kmemoryread_w(task_base + 96);
        t = cpu_kmemoryread_w(task_base + 100);
        if (t & 1) {
            CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_TASK;
        }
        iobase = cpu_kmemoryread_w(task_base + 102);
    } else {
        eip = cpu_kmemoryread_w(task_base + 14);
        new_flags = cpu_kmemoryread_w(task_base + 16);
        for (i = 0; i < CPU_REG_NUM; i++) {
            regs[i] = cpu_kmemoryread_w(task_base + 18 + i * 2);
        }
        for (i = 0; i < CPU_SEGREG286_NUM; i++) {
            sreg[i] = cpu_kmemoryread_w(task_base + 34 + i * 2);
        }
        ldtr = cpu_kmemoryread_w(task_base + 42);
        iobase = 0;
        t = 0;
    }

#if defined(DEBUG)
    VERBOSE(("task_switch: current task"));
    if (!task16) {
        VERBOSE(("task_switch: CR3     = 0x%08x", CPU_CR3));
    }
    VERBOSE(("task_switch: eip     = 0x%08x", CPU_EIP));
    VERBOSE(("task_switch: eflags  = 0x%08x", old_flags));
    for (i = 0; i < CPU_REG_NUM; i++) {
        VERBOSE(("task_switch: regs[%d] = 0x%08x", i, CPU_REGS_DWORD(i)));
    }
    for (i = 0; i < CPU_SEGREG_NUM; i++) {
        VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, CPU_REGS_SREG(i)));
    }
    VERBOSE(("task_switch: ldtr    = 0x%04x", CPU_LDTR));

    VERBOSE(("task_switch: new task"));
    if (!task16) {
        VERBOSE(("task_switch: CR3     = 0x%08x", cr3));
    }
    VERBOSE(("task_switch: eip     = 0x%08x", eip));
    VERBOSE(("task_switch: eflags  = 0x%08x", new_flags));
    for (i = 0; i < CPU_REG_NUM; i++) {
        VERBOSE(("task_switch: regs[%d] = 0x%08x", i, regs[i]));
    }
    for (i = 0; i < CPU_SEGREG_NUM; i++) {
        VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i]));
    }
    VERBOSE(("task_switch: ldtr    = 0x%04x", ldtr));
    if (!task16) {
        VERBOSE(("task_switch: t       = 0x%04x", t));
        VERBOSE(("task_switch: iobase  = 0x%04x", iobase));
    }
#endif

    /* if IRET or JMP, clear busy flag in this task: need */
    /* if IRET, clear NT_FLAG in current EFLAG: need */
    switch (type) {
    case TASK_SWITCH_IRET:
        /* clear NT_FLAG */
        old_flags &= ~NT_FLAG;
    /*FALLTHROUGH*/
    case TASK_SWITCH_JMP:
        /* clear busy flags in current task */
        CPU_SET_TASK_FREE(CPU_TR, &CPU_TR_DESC);
        break;

    case TASK_SWITCH_CALL:
    case TASK_SWITCH_INTR:
        /* Nothing to do */
        break;

    default:
        ia32_panic("task_switch(): task switch type is invalid");
        break;
    }

    /* save this task state in this task state segment */
    if (!task16) {
        cpu_kmemorywrite_d(cur_base + 32, CPU_EIP);
        cpu_kmemorywrite_d(cur_base + 36, old_flags);
        for (i = 0; i < CPU_REG_NUM; i++) {
            cpu_kmemorywrite_d(cur_base + 40 + i * 4, CPU_REGS_DWORD(i));
        }
        for (i = 0; i < CPU_SEGREG_NUM; i++) {
            cpu_kmemorywrite_w(cur_base + 72 + i * 4, CPU_REGS_SREG(i));
        }
    } else {
        cpu_kmemorywrite_w(cur_base + 14, CPU_IP);
        cpu_kmemorywrite_w(cur_base + 16, (UINT16)old_flags);
        for (i = 0; i < CPU_REG_NUM; i++) {
            cpu_kmemorywrite_w(cur_base + 18 + i * 2, CPU_REGS_WORD(i));
        }
        for (i = 0; i < CPU_SEGREG286_NUM; i++) {
            cpu_kmemorywrite_w(cur_base + 34 + i * 2, CPU_REGS_SREG(i));
        }
    }

#if defined(MORE_DEBUG)
    {
        UINT32 v;

        VERBOSE(("task_switch: current task"));
        for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) {
            v = cpu_kmemoryread_d(cur_base + i);
            VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v));
        }
    }
#endif

    /* set back link selector */
    switch (type) {
    case TASK_SWITCH_CALL:
    case TASK_SWITCH_INTR:
        /* set back link selector */
        cpu_kmemorywrite_w(task_base, CPU_TR);
        break;

    case TASK_SWITCH_IRET:
    case TASK_SWITCH_JMP:
        /* Nothing to do */
        break;

    default:
        ia32_panic("task_switch(): task switch type is invalid");
        break;
    }

    /* Now task switching! */

    /* if CALL, INTR, set EFLAGS image NT_FLAG */
    /* if CALL, INTR, JMP set busy flag */
    switch (type) {
    case TASK_SWITCH_CALL:
    case TASK_SWITCH_INTR:
        /* set back link selector */
        new_flags |= NT_FLAG;
    /*FALLTHROUGH*/
    case TASK_SWITCH_JMP:
        CPU_SET_TASK_BUSY(task_sel->selector, &task_sel->desc);
        break;

    case TASK_SWITCH_IRET:
        /* check busy flag is active */
        if (task_sel->desc.valid) {
            UINT32 h;
            h = cpu_kmemoryread_d(task_sel->addr + 4);
            if ((h & CPU_TSS_H_BUSY) == 0) {
                ia32_panic("task_switch: new task is not busy");
            }
        }
        break;

    default:
        ia32_panic("task_switch(): task switch type is invalid");
        break;
    }

    /* set CR0 image CPU_CR0_TS */
    CPU_CR0 |= CPU_CR0_TS;

    /* load task selector to CPU_TR */
    CPU_TR = task_sel->selector;
    CPU_TR_DESC = task_sel->desc;

    /* load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) */

    /* set new CR3 */
    if (!task16 && CPU_STAT_PAGING) {
        set_CR3(cr3);
    }

    /* set new EIP, GPR */
    CPU_PREV_EIP = CPU_EIP = eip;
    for (i = 0; i < CPU_REG_NUM; i++) {
        CPU_REGS_DWORD(i) = regs[i];
    }
    for (i = 0; i < CPU_SEGREG_NUM; i++) {
        CPU_REGS_SREG(i) = sreg[i];
        CPU_STAT_SREG_INIT(i);
    }

    /* set new EFLAGS */
    set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG);

    /* I/O deny bitmap */
    if (!task16) {
        if (iobase != 0 && iobase < task_sel->desc.u.seg.limit) {
            CPU_STAT_IOLIMIT = (UINT16)(task_sel->desc.u.seg.limit - iobase);
            CPU_STAT_IOADDR = task_sel->desc.u.seg.segbase + iobase;
        } else {
            CPU_STAT_IOLIMIT = 0;
        }
    } else {
        CPU_STAT_IOLIMIT = 0;
    }
    VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT));

#if defined(IA32_SUPPORT_DEBUG_REGISTER)
    /* check resume flag */
    if (CPU_EFLAG & RF_FLAG) {
        CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF;
    }

    /* clear local break point flags */
    CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE);
    CPU_STAT_BP = 0;
    for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) {
        if (CPU_DR7 & CPU_DR7_G(i)) {
            CPU_STAT_BP |= (1 << i);
        }
    }
#endif

    /* load new LDTR */
    load_ldtr(ldtr, TS_EXCEPTION);

    /* set new segment register */
    if (!CPU_STAT_VM86) {
        /* clear segment descriptor cache */
        for (i = 0; i < CPU_SEGREG_NUM; i++) {
            CPU_STAT_SREG_CLEAR(i);
        }

        /* load CS */
        rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]);
        if (rv < 0) {
            VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv));
            EXCEPTION(TS_EXCEPTION, cs_sel.idx);
        }

        /* CS register must be code segment */
        if (!cs_sel.desc.s || !cs_sel.desc.u.seg.c) {
            EXCEPTION(TS_EXCEPTION, cs_sel.idx);
        }

        /* check privilege level */
        if (!cs_sel.desc.u.seg.ec) {
            /* non-confirming code segment */
            if (cs_sel.desc.dpl != cs_sel.rpl) {
                EXCEPTION(TS_EXCEPTION, cs_sel.idx);
            }
        } else {
            /* confirming code segment */
            if (cs_sel.desc.dpl < cs_sel.rpl) {
                EXCEPTION(TS_EXCEPTION, cs_sel.idx);
            }
        }

        /* code segment is not present */
        rv = selector_is_not_present(&cs_sel);
        if (rv < 0) {
            EXCEPTION(NP_EXCEPTION, cs_sel.idx);
        }

        /* Now loading CS register */
        load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl);

        /* load ES, SS, DS, FS, GS segment register */
        for (i = 0; i < CPU_SEGREG_NUM; i++) {
            if (i != CPU_CS_INDEX) {
                load_segreg(i, sreg[i], TS_EXCEPTION);
            }
        }
    }

    /* out of range */
    if (CPU_EIP > CPU_STAT_CS_LIMIT) {
        VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT));
        EXCEPTION(GP_EXCEPTION, 0);
    }

    VERBOSE(("task_switch: done."));
}