Example #1
0
void set_up_temp_gdt()
{
	temp_gdt[0] = set_seg_null;						//NULL seg
	temp_gdt[1] = set_seg(STA_X|STA_R, 0x0, 0xFFFFFFFF, 0);			//code seg
	temp_gdt[2] = set_seg(STA_W, 0x0, 0xFFFFFFFF, 0);			//data seg
	temp_gdt[3] = set_seg_real_mode(STA_X|STA_R, 0x10000, 0xFFFF, 0);	//normal code seg
	temp_gdt[4] = set_seg_real_mode(STA_W, 0x0, 0xFFFF, 0);			//normal data seg

	sda.lim = sizeof(temp_gdt)*3-1;
	sda.base = (uint32_t)temp_gdt - KERNEL_BASE_ADDR;
	lgdt(&sda);
}
Example #2
0
static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
{
    if ((env->eflags & VM_MASK)) {
        set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
        set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
        set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
        set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
        set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
        set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
    } else {
        set_seg(&sregs->_cs, &env->segs[R_CS]);
        set_seg(&sregs->_ds, &env->segs[R_DS]);
        set_seg(&sregs->_es, &env->segs[R_ES]);
        set_seg(&sregs->_fs, &env->segs[R_FS]);
        set_seg(&sregs->_gs, &env->segs[R_GS]);
        set_seg(&sregs->_ss, &env->segs[R_SS]);

        if (env->cr[0] & CR0_PE_MASK) {
            /* force ss cpl to cs cpl */
            sregs->_ss.selector = (sregs->_ss.selector & ~3) |
                                  (sregs->_cs.selector & 3);
            sregs->_ss.dpl = sregs->_ss.selector & 3;
        }
    }

    set_seg(&sregs->_tr, &env->tr);
    set_seg(&sregs->_ldt, &env->ldt);
    sregs->_idt.limit = env->idt.limit;
    sregs->_idt.base = env->idt.base;
    sregs->_gdt.limit = env->gdt.limit;
    sregs->_gdt.base = env->gdt.base;
    return 0;
}
Example #3
0
int Spliter::split(const char* line, const char* seg) {
	if (NULL == line || NULL == seg) {
		return SPLIT_ERROR;
	}
	if (SPLIT_SUCC != set_seg(seg)) {
		return SPLIT_ERROR;
	}
	int ret = split(line);
	return ret;
};
Example #4
0
void trap_init_percpu()
{
	extern struct seg_descriptor gdt[CPUNUMS + 5];
	extern int ncpu;
	uint32_t cid = get_cpuid();
	struct taskstate *pts = &(thiscpu->cpu_ts);
	//pts->ts_esp0 = KERNEL_STACKTOP - (KERNEL_STKSIZE + KERNEL_STKGAP) * cid;
	pts->ts_esp0 = KERN_STACKTOP;
	pts->ts_ss0 = _KERNEL_DS_;

	gdt[(_TSS0_ >> 3) + cid] = set_seg(STS_T32A, (uint32_t) (pts), sizeof(struct taskstate), 0);
	gdt[(_TSS0_ >> 3) + cid].s = 0;
	
	ltr(_TSS0_ + cid * sizeof(struct seg_descriptor));
	lidt(&idt_pd);
}
void kvm_arch_load_regs(CPUState *env, int level)
{
    struct kvm_regs regs;
    struct kvm_fpu fpu;
    struct kvm_sregs sregs;
    struct kvm_msr_entry msrs[100];
    int rc, n, i;

    assert(kvm_cpu_is_stopped(env) || env->thread_id == kvm_get_thread_id());

    regs.rax = env->regs[R_EAX];
    regs.rbx = env->regs[R_EBX];
    regs.rcx = env->regs[R_ECX];
    regs.rdx = env->regs[R_EDX];
    regs.rsi = env->regs[R_ESI];
    regs.rdi = env->regs[R_EDI];
    regs.rsp = env->regs[R_ESP];
    regs.rbp = env->regs[R_EBP];
#ifdef TARGET_X86_64
    regs.r8 = env->regs[8];
    regs.r9 = env->regs[9];
    regs.r10 = env->regs[10];
    regs.r11 = env->regs[11];
    regs.r12 = env->regs[12];
    regs.r13 = env->regs[13];
    regs.r14 = env->regs[14];
    regs.r15 = env->regs[15];
#endif

    regs.rflags = env->eflags;
    regs.rip = env->eip;

    kvm_set_regs(env, &regs);

#ifdef KVM_CAP_XSAVE
    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
        struct kvm_xsave* xsave;

        uint16_t cwd, swd, twd, fop;

        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
        memset(xsave, 0, sizeof(struct kvm_xsave));
        cwd = swd = twd = fop = 0;
        swd = env->fpus & ~(7 << 11);
        swd |= (env->fpstt & 7) << 11;
        cwd = env->fpuc;
        for (i = 0; i < 8; ++i)
            twd |= (!env->fptags[i]) << i;
        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
        xsave->region[1] = (uint32_t)(fop << 16) + twd;
        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
                sizeof env->fpregs);
        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
                sizeof env->xmm_regs);
        xsave->region[XSAVE_MXCSR] = env->mxcsr;
        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
                sizeof env->ymmh_regs);
        kvm_set_xsave(env, xsave);
        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
            struct kvm_xcrs xcrs;

            xcrs.nr_xcrs = 1;
            xcrs.flags = 0;
            xcrs.xcrs[0].xcr = 0;
            xcrs.xcrs[0].value = env->xcr0;
            kvm_set_xcrs(env, &xcrs);
        }
        qemu_free(xsave);
    } else {
#endif
        memset(&fpu, 0, sizeof fpu);
        fpu.fsw = env->fpus & ~(7 << 11);
        fpu.fsw |= (env->fpstt & 7) << 11;
        fpu.fcw = env->fpuc;
        for (i = 0; i < 8; ++i)
            fpu.ftwx |= (!env->fptags[i]) << i;
        memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
        memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
        fpu.mxcsr = env->mxcsr;
        kvm_set_fpu(env, &fpu);
#ifdef KVM_CAP_XSAVE
    }
#endif

    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
    if (env->interrupt_injected >= 0) {
        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
                (uint64_t)1 << (env->interrupt_injected % 64);
    }

    if ((env->eflags & VM_MASK)) {
	    set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
	    set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
	    set_v8086_seg(&sregs.es, &env->segs[R_ES]);
	    set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
	    set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
	    set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
    } else {
	    set_seg(&sregs.cs, &env->segs[R_CS]);
	    set_seg(&sregs.ds, &env->segs[R_DS]);
	    set_seg(&sregs.es, &env->segs[R_ES]);
	    set_seg(&sregs.fs, &env->segs[R_FS]);
	    set_seg(&sregs.gs, &env->segs[R_GS]);
	    set_seg(&sregs.ss, &env->segs[R_SS]);

	    if (env->cr[0] & CR0_PE_MASK) {
		/* force ss cpl to cs cpl */
		sregs.ss.selector = (sregs.ss.selector & ~3) |
			(sregs.cs.selector & 3);
		sregs.ss.dpl = sregs.ss.selector & 3;
	    }
    }

    set_seg(&sregs.tr, &env->tr);
    set_seg(&sregs.ldt, &env->ldt);

    sregs.idt.limit = env->idt.limit;
    sregs.idt.base = env->idt.base;
    sregs.gdt.limit = env->gdt.limit;
    sregs.gdt.base = env->gdt.base;

    sregs.cr0 = env->cr[0];
    sregs.cr2 = env->cr[2];
    sregs.cr3 = env->cr[3];
    sregs.cr4 = env->cr[4];

    sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
    sregs.apic_base = cpu_get_apic_base(env->apic_state);

    sregs.efer = env->efer;

    kvm_set_sregs(env, &sregs);

    /* msrs */
    n = 0;
    /* Remember to increase msrs size if you add new registers below */
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS,  env->sysenter_cs);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
    if (kvm_has_msr_star)
	kvm_msr_entry_set(&msrs[n++], MSR_STAR,              env->star);
    if (kvm_has_vm_hsave_pa)
        kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
#ifdef TARGET_X86_64
    if (lm_capable_kernel) {
        kvm_msr_entry_set(&msrs[n++], MSR_CSTAR,             env->cstar);
        kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE,      env->kernelgsbase);
        kvm_msr_entry_set(&msrs[n++], MSR_FMASK,             env->fmask);
        kvm_msr_entry_set(&msrs[n++], MSR_LSTAR  ,           env->lstar);
    }
#endif
    if (level == KVM_PUT_FULL_STATE) {
        /*
         * KVM is yet unable to synchronize TSC values of multiple VCPUs on
         * writeback. Until this is fixed, we only write the offset to SMP
         * guests after migration, desynchronizing the VCPUs, but avoiding
         * huge jump-backs that would occur without any writeback at all.
         */
        if (smp_cpus == 1 || env->tsc != 0) {
            kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
        }
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr);
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
    }
#ifdef KVM_CAP_MCE
    if (env->mcg_cap) {
        if (level == KVM_PUT_RESET_STATE)
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
        else if (level == KVM_PUT_FULL_STATE) {
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
            for (i = 0; i < (env->mcg_cap & 0xff); i++)
                kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
        }
    }
#endif

    rc = kvm_set_msrs(env, msrs, n);
    if (rc == -1)
        perror("kvm_set_msrs FAILED");

    if (level >= KVM_PUT_RESET_STATE) {
        kvm_arch_load_mpstate(env);
        kvm_load_lapic(env);
    }
    if (level == KVM_PUT_FULL_STATE) {
        if (env->kvm_vcpu_update_vapic)
            kvm_tpr_enable_vapic(env);
    }

    kvm_put_vcpu_events(env, level);
    kvm_put_debugregs(env);

    /* must be last */
    kvm_guest_debug_workarounds(env);
}