Пример #1
0
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
			     unsigned long *reg)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li = NULL;
	struct kvm_s390_interrupt_info *inti;
	int rc;
	u8 tmp;

	/* make sure that the new value is valid memory */
	address = address & 0x7fffe000u;
	if ((copy_from_guest(vcpu, &tmp,
		(u64) (address + vcpu->arch.sie_block->gmsor) , 1)) ||
	   (copy_from_guest(vcpu, &tmp, (u64) (address +
			vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
		*reg |= SIGP_STAT_INVALID_PARAMETER;
		return 1; /* invalid parameter */
	}

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return 2; /* busy */

	spin_lock(&fi->lock);
	if (cpu_addr < KVM_MAX_VCPUS)
		li = fi->local_int[cpu_addr];

	if (li == NULL) {
		rc = 1; /* incorrect state */
		*reg &= SIGP_STAT_INCORRECT_STATE;
		kfree(inti);
		goto out_fi;
	}

	spin_lock_bh(&li->lock);
	/* cpu must be in stopped state */
	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
		rc = 1; /* incorrect state */
		*reg &= SIGP_STAT_INCORRECT_STATE;
		kfree(inti);
		goto out_li;
	}

	inti->type = KVM_S390_SIGP_SET_PREFIX;
	inti->prefix.address = address;

	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	rc = 0; /* order accepted */

	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
	spin_unlock_bh(&li->lock);
out_fi:
	spin_unlock(&fi->lock);
	return rc;
}
Пример #2
0
int xenctl_cpumap_to_cpumask(
    cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
{
    unsigned int guest_bytes, copy_bytes;
    int err = 0;
    uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);

    if ( !bytemap )
        return -ENOMEM;

    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);

    if ( copy_bytes != 0 )
    {
        if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
            err = -EFAULT;
        if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes == copy_bytes) )
            bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
    }

    if ( err )
        /* nothing */;
    else if ( alloc_cpumask_var(cpumask) )
        bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
    else
        err = -ENOMEM;

    xfree(bytemap);

    return err;
}
Пример #3
0
/**
 * Xen scheduler callback function to perform a global (not domain-specific)
 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
 * ARINC 653 schedule or to retrieve the schedule currently in place.
 *
 * @param ops       Pointer to this instance of the scheduler structure
 * @param sc        Pointer to the scheduler operation specified by Domain 0
 */
static int
a653sched_adjust_global(const struct scheduler *ops,
                        struct xen_sysctl_scheduler_op *sc)
{
    xen_sysctl_arinc653_schedule_t local_sched;
    int rc = -EINVAL;

    switch ( sc->cmd )
    {
    case XEN_SYSCTL_SCHEDOP_putinfo:
        if ( copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1) )
        {
            rc = -EFAULT;
            break;
        }

        rc = arinc653_sched_set(ops, &local_sched);
        break;
    case XEN_SYSCTL_SCHEDOP_getinfo:
        memset(&local_sched, -1, sizeof(local_sched));
        rc = arinc653_sched_get(ops, &local_sched);
        if ( rc )
            break;

        if ( copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1) )
            rc = -EFAULT;
        break;
    }

    return rc;
}
Пример #4
0
static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{
    int rc, exception = 0;

    if (psw_extint_disabled(vcpu))
        return 0;
    if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
        return 0;
    rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
    if (rc == -EFAULT)
        exception = 1;
    rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                       &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
    if (rc == -EFAULT)
        exception = 1;
    rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                         __LC_EXT_NEW_PSW, sizeof(psw_t));
    if (rc == -EFAULT)
        exception = 1;
    if (exception) {
        printk("kvm: The guest lowcore is not mapped during interrupt "
               "delivery, killing userspace\n");
        do_exit(SIGKILL);
    }
    return 1;
}
Пример #5
0
static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
{
	cpu->pending.trap = 14;
	cpu->pending.addr = iomem_addr;
	copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
			sizeof(cpu->pending.insn));
}
Пример #6
0
/*
 * The eip contains the *virtual* address of the Guest's instruction:
 * we copy the instruction here so the Launcher doesn't have to walk
 * the page tables to decode it.  We handle the case (eg. in a kernel
 * module) where the instruction is over two pages, and the pages are
 * virtually but not physically contiguous.
 *
 * The longest possible x86 instruction is 15 bytes, but we don't handle
 * anything that strange.
 */
static void copy_from_guest(struct lg_cpu *cpu,
			    void *dst, unsigned long vaddr, size_t len)
{
	size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
	unsigned long paddr;

	BUG_ON(len > PAGE_SIZE);

	/* If it goes over a page, copy in two parts. */
	if (len > to_page_end) {
		/* But make sure the next page is mapped! */
		if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
			copy_from_guest(cpu, dst + to_page_end,
					vaddr + to_page_end,
					len - to_page_end);
		else
			/* Otherwise fill with zeroes. */
			memset(dst + to_page_end, 0, len - to_page_end);
		len = to_page_end;
	}

	/* This will kill the guest if it isn't mapped, but that
	 * shouldn't happen. */
	__lgread(cpu, dst, guest_pa(cpu, vaddr), len);
}
Пример #7
0
static int xenctl_bitmap_to_bitmap(unsigned long *bitmap,
                                   const struct xenctl_bitmap *xenctl_bitmap,
                                   unsigned int nbits)
{
    unsigned int guest_bytes, copy_bytes;
    int err = 0;
    uint8_t *bytemap = xzalloc_array(uint8_t, (nbits + 7) / 8);

    if ( !bytemap )
        return -ENOMEM;

    guest_bytes = (xenctl_bitmap->nr_bits + 7) / 8;
    copy_bytes  = min_t(unsigned int, guest_bytes, (nbits + 7) / 8);

    if ( copy_bytes != 0 )
    {
        if ( copy_from_guest(bytemap, xenctl_bitmap->bitmap, copy_bytes) )
            err = -EFAULT;
        if ( (xenctl_bitmap->nr_bits & 7) && (guest_bytes == copy_bytes) )
            bytemap[guest_bytes-1] &= ~(0xff << (xenctl_bitmap->nr_bits & 7));
    }

    if ( !err )
        bitmap_byte_to_long(bitmap, bytemap, nbits);

    xfree(bytemap);

    return err;
}
Пример #8
0
Файл: priv.c Проект: 59psi/linux
static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	psw_t new_psw;
	u64 addr;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	addr = kvm_s390_get_base_disp_s(vcpu);
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	vcpu->arch.sie_block->gpsw = new_psw;
	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	handle_new_psw(vcpu);
	return 0;
}
Пример #9
0
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
	u64 addr;
	psw_compat_t new_psw;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	addr = kvm_s390_get_base_disp_s(vcpu);

	if (addr & 7) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	if (!(new_psw.mask & PSW32_MASK_BASE)) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	vcpu->arch.sie_block->gpsw.mask =
		(new_psw.mask & ~PSW32_MASK_BASE) << 32;
	vcpu->arch.sie_block->gpsw.addr = new_psw.addr;

	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
	     PSW_MASK_EA)) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	handle_new_psw(vcpu);
out:
	return 0;
}
Пример #10
0
Файл: priv.c Проект: 59psi/linux
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
{
	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
	psw_compat_t new_psw;
	u64 addr;

	if (gpsw->mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	addr = kvm_s390_get_base_disp_s(vcpu);
	if (addr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (!(new_psw.mask & PSW32_MASK_BASE))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
	if (!is_valid_psw(gpsw))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	handle_new_psw(vcpu);
	return 0;
}
Пример #11
0
static int handle_lpswe(struct kvm_vcpu *vcpu)
{
	u64 addr;
	psw_t new_psw;

	addr = kvm_s390_get_base_disp_s(vcpu);

	if (addr & 7) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
	vcpu->arch.sie_block->gpsw.addr = new_psw.addr;

	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
	    (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
	      PSW_MASK_BA) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
	    (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
	     (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
	    ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
	     PSW_MASK_EA)) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	handle_new_psw(vcpu);
out:
	return 0;
}
Пример #12
0
static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                                   struct kvm_s390_interrupt_info *inti)
{
    const unsigned short table[] = { 2, 4, 4, 6 };
    int rc, exception = 0;

    switch (inti->type) {
    case KVM_S390_INT_EMERGENCY:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
        vcpu->stat.deliver_emergency_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_EXTERNAL_CALL:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
        vcpu->stat.deliver_external_call++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_SERVICE:
        VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
                   inti->ext.ext_params);
        vcpu->stat.deliver_service_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_VIRTIO:
        VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
                   inti->ext.ext_params, inti->ext.ext_params2);
        vcpu->stat.deliver_virtio_interrupt++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
                           inti->ext.ext_params2);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_SIGP_STOP:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
        vcpu->stat.deliver_stop_signal++;
        __set_intercept_indicator(vcpu, inti);
        break;

    case KVM_S390_SIGP_SET_PREFIX:
        VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
                   inti->prefix.address);
        vcpu->stat.deliver_prefix_signal++;
        kvm_s390_set_prefix(vcpu, inti->prefix.address);
        break;

    case KVM_S390_RESTART:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
        vcpu->stat.deliver_restart_signal++;
        rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
                                          restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
        break;

    case KVM_S390_PROGRAM_INT:
        VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
                   inti->pgm.code,
                   table[vcpu->arch.sie_block->ipa >> 14]);
        vcpu->stat.deliver_program_int++;
        rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_PGM_ILC,
                           table[vcpu->arch.sie_block->ipa >> 14]);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_PGM_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    default:
        BUG();
    }
    if (exception) {
        printk("kvm: The guest lowcore is not mapped during interrupt "
               "delivery, killing userspace\n");
        do_exit(SIGKILL);
    }
}
Пример #13
0
static void setup_emulate_insn(struct lg_cpu *cpu)
{
	cpu->pending.trap = 13;
	copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
			sizeof(cpu->pending.insn));
}
Пример #14
0
IA64FAULT
ia64_hypercall(struct pt_regs *regs)
{
	struct vcpu *v = current;
	struct sal_ret_values x;
	efi_status_t efi_ret_value;
	fpswa_ret_t fpswa_ret;
	IA64FAULT fault; 
	unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;

	perfc_incra(fw_hypercall, index >> 8);
	switch (index) {
	case FW_HYPERCALL_XEN:
		return xen_hypercall(regs);

	case FW_HYPERCALL_XEN_FAST:
		return xen_fast_hypercall(regs);

	case FW_HYPERCALL_PAL_CALL:
		//printk("*** PAL hypercall: index=%d\n",regs->r28);
		//FIXME: This should call a C routine
#if 0
		// This is very conservative, but avoids a possible
		// (and deadly) freeze in paravirtualized domains due
		// to a yet-to-be-found bug where pending_interruption
		// is zero when it shouldn't be. Since PAL is called
		// in the idle loop, this should resolve it
		VCPU(v,pending_interruption) = 1;
#endif
		if (regs->r28 == PAL_HALT_LIGHT) {
			if (vcpu_deliverable_interrupts(v) ||
				event_pending(v)) {
				perfc_incr(idle_when_pending);
				vcpu_pend_unspecified_interrupt(v);
//printk("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit!  so don't
//allow it to happen... i.e. if a domain has an interrupt pending and
//it tries to halt itself because it thinks it is idle, just return here
//as deliver_pending_interrupt is called on the way out and will deliver it
			}
			else {
				perfc_incr(pal_halt_light);
				migrate_timer(&v->arch.hlt_timer,
				              v->processor);
				set_timer(&v->arch.hlt_timer,
				          vcpu_get_next_timer_ns(v));
				do_sched_op_compat(SCHEDOP_block, 0);
				/* do_block only pends a softirq */
				do_softirq();
				stop_timer(&v->arch.hlt_timer);
				/* do_block() calls
				 * local_event_delivery_enable(),
				 * but PAL CALL must be called with
				 * psr.i = 0 and psr.i is unchanged.
				 * SDM vol.2 Part I 11.10.2
				 * PAL Calling Conventions.
				 */
				local_event_delivery_disable();
			}
			regs->r8 = 0;
			regs->r9 = 0;
			regs->r10 = 0;
			regs->r11 = 0;
		}
		else {
			struct ia64_pal_retval y;

			if (regs->r28 >= PAL_COPY_PAL)
				y = xen_pal_emulator
					(regs->r28, vcpu_get_gr (v, 33),
					 vcpu_get_gr (v, 34),
					 vcpu_get_gr (v, 35));
			else
				y = xen_pal_emulator(regs->r28,regs->r29,
						     regs->r30,regs->r31);
			regs->r8 = y.status; regs->r9 = y.v0;
			regs->r10 = y.v1; regs->r11 = y.v2;
		}
		break;
	case FW_HYPERCALL_SAL_CALL:
		x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
			vcpu_get_gr(v,34),vcpu_get_gr(v,35),
			vcpu_get_gr(v,36),vcpu_get_gr(v,37),
			vcpu_get_gr(v,38),vcpu_get_gr(v,39));
		regs->r8 = x.r8; regs->r9 = x.r9;
		regs->r10 = x.r10; regs->r11 = x.r11;
		break;
	case FW_HYPERCALL_SAL_RETURN:
	        if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
			vcpu_sleep_nosync(v);
		break;
	case FW_HYPERCALL_EFI_CALL:
		efi_ret_value = efi_emulator (regs, &fault);
		if (fault != IA64_NO_FAULT) return fault;
		regs->r8 = efi_ret_value;
		break;
	case FW_HYPERCALL_IPI:
		fw_hypercall_ipi (regs);
		break;
	case FW_HYPERCALL_SET_SHARED_INFO_VA:
	        regs->r8 = domain_set_shared_info_va (regs->r28);
		break;
	case FW_HYPERCALL_FPSWA_BASE:
		switch (regs->r2) {
		case FW_HYPERCALL_FPSWA_BROKEN:
			gdprintk(XENLOG_WARNING,
				 "Old fpswa hypercall was called (0x%lx).\n"
				 "Please update your domain builder. ip 0x%lx\n",
				 FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		case FW_HYPERCALL_FPSWA:
			fpswa_ret = fw_hypercall_fpswa(v, regs);
			break;
		default:
			gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n",
				 regs->r2);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		}
		regs->r8  = fpswa_ret.status;
		regs->r9  = fpswa_ret.err0;
		regs->r10 = fpswa_ret.err1;
		regs->r11 = fpswa_ret.err2;
		break;
	case __HYPERVISOR_opt_feature:
	{
		XEN_GUEST_HANDLE(void) arg;
		struct xen_ia64_opt_feature optf;
		set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
		if (copy_from_guest(&optf, arg, 1) == 0)
			regs->r8 = domain_opt_feature(v->domain, &optf);
		else
			regs->r8 = -EFAULT;
		break;
	}
	case FW_HYPERCALL_SIOEMU:
		sioemu_hypercall(regs);
		break;
	default:
		printk("unknown ia64 fw hypercall %lx\n", regs->r2);
		regs->r8 = do_ni_hypercall();
	}
	return IA64_NO_FAULT;
}