Example #1
0
IA64FAULT
vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
{
    IA64_PSR vpsr;

    vpsr.val = vmx_vcpu_get_psr(vcpu);
    if ( vpsr.bn ) {
        *val=VCPU(vcpu,vgr[reg-16]);
        // Check NAT bit
        if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
            // TODO
            //panic ("NAT consumption fault\n");
            return IA64_FAULT;
        }

    }
    else {
        *val=VCPU(vcpu,vbgr[reg-16]);
        if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
            //panic ("NAT consumption fault\n");
            return IA64_FAULT;
        }

    }
    return IA64_NO_FAULT;
}
Example #2
0
static void
collect_interruption(VCPU *vcpu)
{
    u64 ipsr;
    u64 vdcr;
    u64 vifs;
    IA64_PSR vpsr;
    REGS * regs = vcpu_regs(vcpu);
    vpsr.val = vmx_vcpu_get_psr(vcpu);
    vcpu_bsw0(vcpu);
    if(vpsr.ic){

        /* Sync mpsr id/da/dd/ss/ed bits to vipsr
         * since after guest do rfi, we still want these bits on in
         * mpsr
         */

        ipsr = regs->cr_ipsr;
        vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
             | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
        vcpu_set_ipsr(vcpu, vpsr.val);

        /* Currently, for trap, we do not advance IIP to next
         * instruction. That's because we assume caller already
         * set up IIP correctly
         */

        vcpu_set_iip(vcpu , regs->cr_iip);

        /* set vifs.v to zero */
        vifs = VCPU(vcpu,ifs);
        vifs &= ~IA64_IFS_V;
        vcpu_set_ifs(vcpu, vifs);

        vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
    }

    vdcr = VCPU(vcpu,dcr);

    /* Set guest psr
     * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
     * be: set to the value of dcr.be
     * pp: set to the value of dcr.pp
     */
    vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
    vpsr.val |= ( vdcr & IA64_DCR_BE);

    /* VDCR pp bit position is different from VPSR pp bit */
    if ( vdcr & IA64_DCR_PP ) {
        vpsr.val |= IA64_PSR_PP;
    } else {
        vpsr.val &= ~IA64_PSR_PP;;
    }

    vmx_vcpu_set_psr(vcpu, vpsr.val);

}
Example #3
0
IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
{
    REGS *regs = vcpu_regs(vcpu);
    IA64_PSR vpsr;
    vpsr.val = VCPU(vcpu, vpsr);

    if(!vpsr.ic)
        VCPU(vcpu,ifs) = regs->cr_ifs;
    regs->cr_ifs = IA64_IFS_V;
    return (IA64_NO_FAULT);
}
Example #4
0
/*
 * Fetch guest bundle code.
 * INPUT:
 *  gip: guest ip
 *  pbundle: used to return fetched bundle.
 */
unsigned long
fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
{
    u64     gpip=0;   // guest physical IP
    u64     *vpa;
    thash_data_t    *tlb;
    u64     mfn, maddr;
    struct page_info* page;

 again:
    if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
        gpip = pa_clear_uc(gip);	// clear UC bit
    }
    else {
        tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
//        if( tlb == NULL )
//             tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
        if (tlb)
            gpip = thash_translate(tlb, gip);
    }
    if( gpip){
        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
        if (mfn == INVALID_MFN)
            panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
        maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
    }else{
/*
 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
 * Parameter:
 *  set_ifa: if true, set vIFA
 *  set_itir: if true, set vITIR
 *  set_iha: if true, set vIHA
 */
void
set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
          int set_ifa, int set_itir, int set_iha)
{
    IA64_PSR vpsr;
    u64 value;
    vpsr.val = VCPU(vcpu, vpsr);
    /* Vol2, Table 8-1 */
    if ( vpsr.ic ) {
        if ( set_ifa){
            vcpu_set_ifa(vcpu, vadr);
        }
        if ( set_itir) {
            value = vmx_vcpu_get_itir_on_fault(vcpu, vadr);
            vcpu_set_itir(vcpu, value);
        }

        if ( set_iha) {
            value = vmx_vcpu_thash(vcpu, vadr);
            vcpu_set_iha(vcpu, value);
        }
    }


}
Example #6
0
IA64FAULT
vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
{
    VCPU(vcpu,tpr)=val;
    vcpu->arch.irq_new_condition = 1;
    return IA64_NO_FAULT;
}
Example #7
0
u64 vmx_vcpu_get_psr(VCPU *vcpu)
{
    u64 mask;
    REGS *regs = vcpu_regs(vcpu);
    mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
           IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
    return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
}
Example #8
0
IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
{
    // TODO: Only allowed for current vcpu
    u64 ifs, psr;
    REGS *regs = vcpu_regs(vcpu);
    psr = VCPU(vcpu,ipsr);
    if (psr & IA64_PSR_BN)
        vcpu_bsw1(vcpu);
    else
        vcpu_bsw0(vcpu);
    vmx_vcpu_set_psr(vcpu,psr);
    vmx_ia64_set_dcr(vcpu);
    ifs=VCPU(vcpu,ifs);
    if(ifs>>63)
        regs->cr_ifs = ifs;
    regs->cr_iip = VCPU(vcpu,iip);
    return (IA64_NO_FAULT);
}
Example #9
0
void
vmx_ia64_set_dcr(VCPU *v)   
{
    /* xenoprof:
     * don't change psr.pp.
     * It is manipulated by xenoprof.
     */
    unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) |
        (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP);

    // if guest is runing on cpl > 0, set dcr.dm=1
    // if geust is runing on cpl = 0, set dcr.dm=0
    // because Guest OS may ld.s on tr mapped page.
    if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
        dcr_bits &= ~IA64_DCR_DM;

    ia64_set_dcr(dcr_bits);
}
Example #10
0
int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
{
    ia64_rr  vrr;
    PTA   vpta;
    IA64_PSR  vpsr; 

    vpsr.val = VCPU(vcpu, vpsr);
    vcpu_get_rr(vcpu, vadr, &vrr.rrval);
    vpta.val = vmx_vcpu_get_pta(vcpu);

    if ( vrr.ve & vpta.ve ) {
        switch ( ref ) {
        case DATA_REF:
        case NA_REF:
            return vpsr.dt;
        case INST_REF:
            return vpsr.dt && vpsr.it && vpsr.ic;
        case RSE_REF:
            return vpsr.dt && vpsr.rt;

        }
    }
    return 0;
}
Example #11
0
IA64FAULT
vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
{
    IA64_PSR vpsr;
    vpsr.val = vmx_vcpu_get_psr(vcpu);
    if ( vpsr.bn ) {
        VCPU(vcpu,vgr[reg-16]) = val;
        if(nat){
            VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
        }else{
            VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
        }
    }
    else {
        VCPU(vcpu,vbgr[reg-16]) = val;
        if(nat){
            VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
        }else{
            VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
        }
    }
    return IA64_NO_FAULT;
}
Example #12
0
void
vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
{

    u64 mask;
    REGS *regs;
    IA64_PSR old_psr, new_psr;
    old_psr.val=VCPU(vcpu, vpsr);

    regs=vcpu_regs(vcpu);
    /* We only support guest as:
     *  vpsr.pk = 0
     *  vpsr.is = 0
     * Otherwise panic
     */
    if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
        panic_domain (regs,"Setting unsupport guest psr!");
    }

    /*
     * For those IA64_PSR bits: id/da/dd/ss/ed/ia
     * Since these bits will become 0, after success execution of each
     * instruction, we will change set them to mIA64_PSR
     */
    VCPU(vcpu,vpsr) = value &
            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
                IA64_PSR_ED | IA64_PSR_IA));

    if ( !old_psr.i && (value & IA64_PSR_I) ) {
        // vpsr.i 0->1
        vcpu->arch.irq_new_condition = 1;
    }
    new_psr.val=VCPU(vcpu, vpsr);
#ifdef	VTI_DEBUG    
    {
    struct pt_regs *regs = vcpu_regs(vcpu);
    guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
    guest_psr_buf[guest_psr_index].psr = new_psr.val;
    if (++guest_psr_index >= 100)
        guest_psr_index = 0;
    }
#endif    
#if 0
    if (old_psr.i != new_psr.i) {
    if (old_psr.i)
        last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
    else
        last_guest_rsm = 0;
    }
#endif

    /*
     * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
     * , except for the following bits:
     *  ic/i/dt/si/rt/mc/it/bn/vm
     */
    mask =  IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
        IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
        IA64_PSR_VM;

    /* xenoprof:
     * don't change psr.pp.
     * It is manipulated by xenoprof.
     */
    mask |= IA64_PSR_PP;

    regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );

    if (FP_PSR(vcpu) & IA64_PSR_DFH)
        regs->cr_ipsr |= IA64_PSR_DFH;

    if (unlikely(vcpu->domain->debugger_attached)) {
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
            regs->cr_ipsr |= IA64_PSR_SS;
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
            regs->cr_ipsr |= IA64_PSR_DB;
    }

    check_mm_mode_switch(vcpu, old_psr, new_psr);
    return ;
}
IA64FAULT
ia64_hypercall(struct pt_regs *regs)
{
	struct vcpu *v = current;
	struct sal_ret_values x;
	efi_status_t efi_ret_value;
	fpswa_ret_t fpswa_ret;
	IA64FAULT fault; 
	unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;

	perfc_incra(fw_hypercall, index >> 8);
	switch (index) {
	case FW_HYPERCALL_XEN:
		return xen_hypercall(regs);

	case FW_HYPERCALL_XEN_FAST:
		return xen_fast_hypercall(regs);

	case FW_HYPERCALL_PAL_CALL:
		//printk("*** PAL hypercall: index=%d\n",regs->r28);
		//FIXME: This should call a C routine
#if 0
		// This is very conservative, but avoids a possible
		// (and deadly) freeze in paravirtualized domains due
		// to a yet-to-be-found bug where pending_interruption
		// is zero when it shouldn't be. Since PAL is called
		// in the idle loop, this should resolve it
		VCPU(v,pending_interruption) = 1;
#endif
		if (regs->r28 == PAL_HALT_LIGHT) {
			if (vcpu_deliverable_interrupts(v) ||
				event_pending(v)) {
				perfc_incr(idle_when_pending);
				vcpu_pend_unspecified_interrupt(v);
//printk("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit!  so don't
//allow it to happen... i.e. if a domain has an interrupt pending and
//it tries to halt itself because it thinks it is idle, just return here
//as deliver_pending_interrupt is called on the way out and will deliver it
			}
			else {
				perfc_incr(pal_halt_light);
				migrate_timer(&v->arch.hlt_timer,
				              v->processor);
				set_timer(&v->arch.hlt_timer,
				          vcpu_get_next_timer_ns(v));
				do_sched_op_compat(SCHEDOP_block, 0);
				/* do_block only pends a softirq */
				do_softirq();
				stop_timer(&v->arch.hlt_timer);
				/* do_block() calls
				 * local_event_delivery_enable(),
				 * but PAL CALL must be called with
				 * psr.i = 0 and psr.i is unchanged.
				 * SDM vol.2 Part I 11.10.2
				 * PAL Calling Conventions.
				 */
				local_event_delivery_disable();
			}
			regs->r8 = 0;
			regs->r9 = 0;
			regs->r10 = 0;
			regs->r11 = 0;
		}
		else {
			struct ia64_pal_retval y;

			if (regs->r28 >= PAL_COPY_PAL)
				y = xen_pal_emulator
					(regs->r28, vcpu_get_gr (v, 33),
					 vcpu_get_gr (v, 34),
					 vcpu_get_gr (v, 35));
			else
				y = xen_pal_emulator(regs->r28,regs->r29,
						     regs->r30,regs->r31);
			regs->r8 = y.status; regs->r9 = y.v0;
			regs->r10 = y.v1; regs->r11 = y.v2;
		}
		break;
	case FW_HYPERCALL_SAL_CALL:
		x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
			vcpu_get_gr(v,34),vcpu_get_gr(v,35),
			vcpu_get_gr(v,36),vcpu_get_gr(v,37),
			vcpu_get_gr(v,38),vcpu_get_gr(v,39));
		regs->r8 = x.r8; regs->r9 = x.r9;
		regs->r10 = x.r10; regs->r11 = x.r11;
		break;
	case FW_HYPERCALL_SAL_RETURN:
	        if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
			vcpu_sleep_nosync(v);
		break;
	case FW_HYPERCALL_EFI_CALL:
		efi_ret_value = efi_emulator (regs, &fault);
		if (fault != IA64_NO_FAULT) return fault;
		regs->r8 = efi_ret_value;
		break;
	case FW_HYPERCALL_IPI:
		fw_hypercall_ipi (regs);
		break;
	case FW_HYPERCALL_SET_SHARED_INFO_VA:
	        regs->r8 = domain_set_shared_info_va (regs->r28);
		break;
	case FW_HYPERCALL_FPSWA_BASE:
		switch (regs->r2) {
		case FW_HYPERCALL_FPSWA_BROKEN:
			gdprintk(XENLOG_WARNING,
				 "Old fpswa hypercall was called (0x%lx).\n"
				 "Please update your domain builder. ip 0x%lx\n",
				 FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		case FW_HYPERCALL_FPSWA:
			fpswa_ret = fw_hypercall_fpswa(v, regs);
			break;
		default:
			gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n",
				 regs->r2);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		}
		regs->r8  = fpswa_ret.status;
		regs->r9  = fpswa_ret.err0;
		regs->r10 = fpswa_ret.err1;
		regs->r11 = fpswa_ret.err2;
		break;
	case __HYPERVISOR_opt_feature:
	{
		XEN_GUEST_HANDLE(void) arg;
		struct xen_ia64_opt_feature optf;
		set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
		if (copy_from_guest(&optf, arg, 1) == 0)
			regs->r8 = domain_opt_feature(v->domain, &optf);
		else
			regs->r8 = -EFAULT;
		break;
	}
	case FW_HYPERCALL_SIOEMU:
		sioemu_hypercall(regs);
		break;
	default:
		printk("unknown ia64 fw hypercall %lx\n", regs->r2);
		regs->r8 = do_ni_hypercall();
	}
	return IA64_NO_FAULT;
}