示例#1
0
/*
 * Fetch guest bundle code.
 * INPUT:
 *  gip: guest ip
 *  pbundle: used to return fetched bundle.
 */
unsigned long
fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
{
    u64     gpip=0;   // guest physical IP
    u64     *vpa;
    thash_data_t    *tlb;
    u64     mfn, maddr;
    struct page_info* page;

 again:
    if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
        gpip = pa_clear_uc(gip);	// clear UC bit
    }
    else {
        tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
//        if( tlb == NULL )
//             tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
        if (tlb)
            gpip = thash_translate(tlb, gip);
    }
    if( gpip){
        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
        if (mfn == INVALID_MFN)
            panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
        maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
    }else{
void
switch_to_physical_rid(VCPU *vcpu)
{
    u64 psr;
    u64 rr;

    switch (vcpu->arch.arch_vmx.mmu_mode) {
    case VMX_MMU_PHY_DT:
        rr = vcpu->arch.metaphysical_rid_dt;
        break;
    case VMX_MMU_PHY_D:
        rr = vcpu->arch.metaphysical_rid_d;
        break;
    default:
        panic_domain(NULL, "bad mmu mode value");
    }
    
    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, rr);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, rr);
    ia64_srlz_d();
    
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
void
switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
{
    int act;
    /* Switch to physical mode when injecting PAL_INIT */
    if (unlikely(MODE_IND(new_psr) == 0 &&
                 vcpu_regs(vcpu)->cr_iip == PAL_INIT_ENTRY))
        act = SW_2P_DT;
    else
        act = mm_switch_action(old_psr, new_psr);
    perfc_incra(vmx_switch_mm_mode, act);
    switch (act) {
    case SW_2P_DT:
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
        switch_to_physical_rid(vcpu);
        break;
    case SW_2P_D:
//        printk("V -> P_D mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
        switch_to_physical_rid(vcpu);
        break;
    case SW_2V:
//        printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
        switch_to_virtual_rid(vcpu);
        break;
    case SW_SELF:
        printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
            old_psr.val);
        break;
    case SW_NOP:
//        printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        break;
    default:
        /* Sanity check */
        panic_domain(vcpu_regs(vcpu),
                     "Unexpected virtual <--> physical mode transition, "
                     "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
        break;
    }
    return;
}
void
vmx_load_all_rr(VCPU *vcpu)
{
	unsigned long rr0, rr4;

	switch (vcpu->arch.arch_vmx.mmu_mode) {
	case VMX_MMU_VIRTUAL:
		rr0 = vcpu->arch.metaphysical_saved_rr0;
		rr4 = vcpu->arch.metaphysical_saved_rr4;
		break;
	case VMX_MMU_PHY_DT:
		rr0 = vcpu->arch.metaphysical_rid_dt;
		rr4 = vcpu->arch.metaphysical_rid_dt;
		break;
	case VMX_MMU_PHY_D:
		rr0 = vcpu->arch.metaphysical_rid_d;
		rr4 = vcpu->arch.metaphysical_rid_d;
		break;
	default:
		panic_domain(NULL, "bad mmu mode value");
	}

	ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
	ia64_dv_serialize_data();
	vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
	ia64_set_pta(VMX(vcpu, mpta));
	vmx_ia64_set_dcr(vcpu);

	ia64_srlz_d();
}
示例#5
0
void
vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
{

    u64 mask;
    REGS *regs;
    IA64_PSR old_psr, new_psr;
    old_psr.val=VCPU(vcpu, vpsr);

    regs=vcpu_regs(vcpu);
    /* We only support guest as:
     *  vpsr.pk = 0
     *  vpsr.is = 0
     * Otherwise panic
     */
    if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
        panic_domain (regs,"Setting unsupport guest psr!");
    }

    /*
     * For those IA64_PSR bits: id/da/dd/ss/ed/ia
     * Since these bits will become 0, after success execution of each
     * instruction, we will change set them to mIA64_PSR
     */
    VCPU(vcpu,vpsr) = value &
            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
                IA64_PSR_ED | IA64_PSR_IA));

    if ( !old_psr.i && (value & IA64_PSR_I) ) {
        // vpsr.i 0->1
        vcpu->arch.irq_new_condition = 1;
    }
    new_psr.val=VCPU(vcpu, vpsr);
#ifdef	VTI_DEBUG    
    {
    struct pt_regs *regs = vcpu_regs(vcpu);
    guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
    guest_psr_buf[guest_psr_index].psr = new_psr.val;
    if (++guest_psr_index >= 100)
        guest_psr_index = 0;
    }
#endif    
#if 0
    if (old_psr.i != new_psr.i) {
    if (old_psr.i)
        last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
    else
        last_guest_rsm = 0;
    }
#endif

    /*
     * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
     * , except for the following bits:
     *  ic/i/dt/si/rt/mc/it/bn/vm
     */
    mask =  IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
        IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
        IA64_PSR_VM;

    /* xenoprof:
     * don't change psr.pp.
     * It is manipulated by xenoprof.
     */
    mask |= IA64_PSR_PP;

    regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );

    if (FP_PSR(vcpu) & IA64_PSR_DFH)
        regs->cr_ipsr |= IA64_PSR_DFH;

    if (unlikely(vcpu->domain->debugger_attached)) {
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
            regs->cr_ipsr |= IA64_PSR_SS;
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
            regs->cr_ipsr |= IA64_PSR_DB;
    }

    check_mm_mode_switch(vcpu, old_psr, new_psr);
    return ;
}