Ejemplo n.º 1
0
static void
fw_hypercall_ipi (struct pt_regs *regs)
{
	int cpu = regs->r14;
	int vector = regs->r15;
	struct vcpu *targ;
	struct domain *d = current->domain;

	/* Be sure the target exists.  */
	if (cpu >= d->max_vcpus)
		return;
	targ = d->vcpu[cpu];
	if (targ == NULL)
		return;

  	if (vector == XEN_SAL_BOOT_RENDEZ_VEC
	    && (!targ->is_initialised
		|| test_bit(_VPF_down, &targ->pause_flags))) {

		/* First start: initialize vpcu.  */
		if (!targ->is_initialised) {
			if (arch_set_info_guest (targ, NULL) != 0) {
				printk ("arch_boot_vcpu: failure\n");
				return;
			}
		}
			
		/* First or next rendez-vous: set registers.  */
		vcpu_init_regs (targ);
		vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
		vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
		vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;

		if (test_and_clear_bit(_VPF_down,
				       &targ->pause_flags)) {
			vcpu_wake(targ);
			printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
			       targ->vcpu_id);
		}
		else
			printk ("arch_boot_vcpu: huu, already awaken!\n");
	}
	else {
		int running = targ->is_running;
		vcpu_pend_interrupt(targ, vector);
		vcpu_unblock(targ);
		if (running)
			smp_send_event_check_cpu(targ->processor);
	}
	return;
}
Ejemplo n.º 2
0
static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
	struct kvm_vcpu *target_vcpu;
	struct kvm_pt_regs *regs;
	union ia64_ipi_a addr = p->u.ipi_data.addr;
	union ia64_ipi_d data = p->u.ipi_data.data;

	target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
	if (!target_vcpu)
		return handle_vm_error(vcpu, kvm_run);

	if (!target_vcpu->arch.launched) {
		regs = vcpu_regs(target_vcpu);

		regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
		regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;

		target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
		if (waitqueue_active(&target_vcpu->wq))
			wake_up_interruptible(&target_vcpu->wq);
	} else {
		vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
		if (target_vcpu != vcpu)
			kvm_vcpu_kick(target_vcpu);
	}

	return 1;
}
Ejemplo n.º 3
0
void
inject_guest_interruption(VCPU *vcpu, u64 vec)
{
    u64 viva;
    REGS *regs;
    ISR pt_isr;

    perfc_incra(vmx_inject_guest_interruption, vec >> 8);

    regs = vcpu_regs(vcpu);

    // clear cr.isr.ir (incomplete register frame)
    pt_isr.val = VMX(vcpu,cr_isr);
    pt_isr.ir = 0;
    VMX(vcpu,cr_isr) = pt_isr.val;

    collect_interruption(vcpu);
    vmx_ia64_set_dcr(vcpu);

    viva = vmx_vcpu_get_iva(vcpu);
    regs->cr_iip = viva + vec;

    debugger_event(vec == IA64_EXTINT_VECTOR ?
                   XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
}
Ejemplo n.º 4
0
/*
 * Fetch guest bundle code.
 * INPUT:
 *  gip: guest ip
 *  pbundle: used to return fetched bundle.
 */
unsigned long
fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
{
    u64     gpip=0;   // guest physical IP
    u64     *vpa;
    thash_data_t    *tlb;
    u64     mfn, maddr;
    struct page_info* page;

 again:
    if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
        gpip = pa_clear_uc(gip);	// clear UC bit
    }
    else {
        tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
//        if( tlb == NULL )
//             tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
        if (tlb)
            gpip = thash_translate(tlb, gip);
    }
    if( gpip){
        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
        if (mfn == INVALID_MFN)
            panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
        maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
    }else{
Ejemplo n.º 5
0
u64 vmx_vcpu_get_psr(VCPU *vcpu)
{
    u64 mask;
    REGS *regs = vcpu_regs(vcpu);
    mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
           IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
    return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
}
Ejemplo n.º 6
0
void
switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
{
    int act;
    /* Switch to physical mode when injecting PAL_INIT */
    if (unlikely(MODE_IND(new_psr) == 0 &&
                 vcpu_regs(vcpu)->cr_iip == PAL_INIT_ENTRY))
        act = SW_2P_DT;
    else
        act = mm_switch_action(old_psr, new_psr);
    perfc_incra(vmx_switch_mm_mode, act);
    switch (act) {
    case SW_2P_DT:
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
        switch_to_physical_rid(vcpu);
        break;
    case SW_2P_D:
//        printk("V -> P_D mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
        switch_to_physical_rid(vcpu);
        break;
    case SW_2V:
//        printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
        switch_to_virtual_rid(vcpu);
        break;
    case SW_SELF:
        printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
            old_psr.val);
        break;
    case SW_NOP:
//        printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
//               old_psr.val, new_psr.val);
        break;
    default:
        /* Sanity check */
        panic_domain(vcpu_regs(vcpu),
                     "Unexpected virtual <--> physical mode transition, "
                     "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
        break;
    }
    return;
}
Ejemplo n.º 7
0
static void
collect_interruption(VCPU *vcpu)
{
    u64 ipsr;
    u64 vdcr;
    u64 vifs;
    IA64_PSR vpsr;
    REGS * regs = vcpu_regs(vcpu);
    vpsr.val = vmx_vcpu_get_psr(vcpu);
    vcpu_bsw0(vcpu);
    if(vpsr.ic){

        /* Sync mpsr id/da/dd/ss/ed bits to vipsr
         * since after guest do rfi, we still want these bits on in
         * mpsr
         */

        ipsr = regs->cr_ipsr;
        vpsr.val = vpsr.val | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
             | IA64_PSR_DD |IA64_PSR_SS |IA64_PSR_ED));
        vcpu_set_ipsr(vcpu, vpsr.val);

        /* Currently, for trap, we do not advance IIP to next
         * instruction. That's because we assume caller already
         * set up IIP correctly
         */

        vcpu_set_iip(vcpu , regs->cr_iip);

        /* set vifs.v to zero */
        vifs = VCPU(vcpu,ifs);
        vifs &= ~IA64_IFS_V;
        vcpu_set_ifs(vcpu, vifs);

        vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
    }

    vdcr = VCPU(vcpu,dcr);

    /* Set guest psr
     * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
     * be: set to the value of dcr.be
     * pp: set to the value of dcr.pp
     */
    vpsr.val &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
    vpsr.val |= ( vdcr & IA64_DCR_BE);

    /* VDCR pp bit position is different from VPSR pp bit */
    if ( vdcr & IA64_DCR_PP ) {
        vpsr.val |= IA64_PSR_PP;
    } else {
        vpsr.val &= ~IA64_PSR_PP;;
    }

    vmx_vcpu_set_psr(vcpu, vpsr.val);

}
Ejemplo n.º 8
0
IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
{
    REGS *regs = vcpu_regs(vcpu);
    IA64_PSR vpsr;
    vpsr.val = VCPU(vcpu, vpsr);

    if(!vpsr.ic)
        VCPU(vcpu,ifs) = regs->cr_ifs;
    regs->cr_ifs = IA64_IFS_V;
    return (IA64_NO_FAULT);
}
Ejemplo n.º 9
0
IA64FAULT
vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
{
    REGS *regs = vcpu_regs(vcpu);
    long sof = (regs->cr_ifs) & 0x7f;
    //TODO Eddie

    if (!regs) return IA64_ILLOP_FAULT;
    if (reg >= sof + 32) return IA64_ILLOP_FAULT;
#if 0
    if ( reg >= 16 && reg < 32 ) {
        return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
    }
#endif
    setreg(reg,value,nat,regs);
    return IA64_NO_FAULT;
}
Ejemplo n.º 10
0
IA64FAULT
vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
{
    REGS *regs=vcpu_regs(vcpu);
    int nat;
    //TODO, Eddie
    if (!regs) return 0;
#if 0
    if (reg >= 16 && reg < 32) {
        return vmx_vcpu_get_bgr(vcpu,reg,val);
    }
#endif
    getreg(reg,val,&nat,regs);    // FIXME: handle NATs later
    if(nat){
        return IA64_FAULT;
    }
    return IA64_NO_FAULT;
}
Ejemplo n.º 11
0
IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
{
    // TODO: Only allowed for current vcpu
    u64 ifs, psr;
    REGS *regs = vcpu_regs(vcpu);
    psr = VCPU(vcpu,ipsr);
    if (psr & IA64_PSR_BN)
        vcpu_bsw1(vcpu);
    else
        vcpu_bsw0(vcpu);
    vmx_vcpu_set_psr(vcpu,psr);
    vmx_ia64_set_dcr(vcpu);
    ifs=VCPU(vcpu,ifs);
    if(ifs>>63)
        regs->cr_ifs = ifs;
    regs->cr_iip = VCPU(vcpu,iip);
    return (IA64_NO_FAULT);
}
Ejemplo n.º 12
0
void
vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
{

    u64 mask;
    REGS *regs;
    IA64_PSR old_psr, new_psr;
    old_psr.val=VCPU(vcpu, vpsr);

    regs=vcpu_regs(vcpu);
    /* We only support guest as:
     *  vpsr.pk = 0
     *  vpsr.is = 0
     * Otherwise panic
     */
    if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
        panic_domain (regs,"Setting unsupport guest psr!");
    }

    /*
     * For those IA64_PSR bits: id/da/dd/ss/ed/ia
     * Since these bits will become 0, after success execution of each
     * instruction, we will change set them to mIA64_PSR
     */
    VCPU(vcpu,vpsr) = value &
            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
                IA64_PSR_ED | IA64_PSR_IA));

    if ( !old_psr.i && (value & IA64_PSR_I) ) {
        // vpsr.i 0->1
        vcpu->arch.irq_new_condition = 1;
    }
    new_psr.val=VCPU(vcpu, vpsr);
#ifdef	VTI_DEBUG    
    {
    struct pt_regs *regs = vcpu_regs(vcpu);
    guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
    guest_psr_buf[guest_psr_index].psr = new_psr.val;
    if (++guest_psr_index >= 100)
        guest_psr_index = 0;
    }
#endif    
#if 0
    if (old_psr.i != new_psr.i) {
    if (old_psr.i)
        last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
    else
        last_guest_rsm = 0;
    }
#endif

    /*
     * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
     * , except for the following bits:
     *  ic/i/dt/si/rt/mc/it/bn/vm
     */
    mask =  IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
        IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
        IA64_PSR_VM;

    /* xenoprof:
     * don't change psr.pp.
     * It is manipulated by xenoprof.
     */
    mask |= IA64_PSR_PP;

    regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );

    if (FP_PSR(vcpu) & IA64_PSR_DFH)
        regs->cr_ipsr |= IA64_PSR_DFH;

    if (unlikely(vcpu->domain->debugger_attached)) {
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
            regs->cr_ipsr |= IA64_PSR_SS;
        if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
            regs->cr_ipsr |= IA64_PSR_DB;
    }

    check_mm_mode_switch(vcpu, old_psr, new_psr);
    return ;
}