Пример #1
0
int cpumask_to_xenctl_cpumap(
    struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
{
    unsigned int guest_bytes, copy_bytes, i;
    uint8_t zero = 0;
    int err = 0;
    uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);

    if ( !bytemap )
        return -ENOMEM;

    guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);

    bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);

    if ( copy_bytes != 0 )
        if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
            err = -EFAULT;

    for ( i = copy_bytes; !err && i < guest_bytes; i++ )
        if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
            err = -EFAULT;

    xfree(bytemap);

    return err;
}
Пример #2
0
/**
 * Xen scheduler callback function to perform a global (not domain-specific)
 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
 * ARINC 653 schedule or to retrieve the schedule currently in place.
 *
 * @param ops       Pointer to this instance of the scheduler structure
 * @param sc        Pointer to the scheduler operation specified by Domain 0
 */
static int
a653sched_adjust_global(const struct scheduler *ops,
                        struct xen_sysctl_scheduler_op *sc)
{
    xen_sysctl_arinc653_schedule_t local_sched;
    int rc = -EINVAL;

    switch ( sc->cmd )
    {
    case XEN_SYSCTL_SCHEDOP_putinfo:
        if ( copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1) )
        {
            rc = -EFAULT;
            break;
        }

        rc = arinc653_sched_set(ops, &local_sched);
        break;
    case XEN_SYSCTL_SCHEDOP_getinfo:
        memset(&local_sched, -1, sizeof(local_sched));
        rc = arinc653_sched_get(ops, &local_sched);
        if ( rc )
            break;

        if ( copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1) )
            rc = -EFAULT;
        break;
    }

    return rc;
}
Пример #3
0
static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
{
    int rc, exception = 0;

    if (psw_extint_disabled(vcpu))
        return 0;
    if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
        return 0;
    rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
    if (rc == -EFAULT)
        exception = 1;
    rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                       &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
    if (rc == -EFAULT)
        exception = 1;
    rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                         __LC_EXT_NEW_PSW, sizeof(psw_t));
    if (rc == -EFAULT)
        exception = 1;
    if (exception) {
        printk("kvm: The guest lowcore is not mapped during interrupt "
               "delivery, killing userspace\n");
        do_exit(SIGKILL);
    }
    return 1;
}
Пример #4
0
static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                                   struct kvm_s390_interrupt_info *inti)
{
    const unsigned short table[] = { 2, 4, 4, 6 };
    int rc, exception = 0;

    switch (inti->type) {
    case KVM_S390_INT_EMERGENCY:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
        vcpu->stat.deliver_emergency_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_EXTERNAL_CALL:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
        vcpu->stat.deliver_external_call++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_SERVICE:
        VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
                   inti->ext.ext_params);
        vcpu->stat.deliver_service_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_VIRTIO:
        VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
                   inti->ext.ext_params, inti->ext.ext_params2);
        vcpu->stat.deliver_virtio_interrupt++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
                           inti->ext.ext_params2);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_SIGP_STOP:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
        vcpu->stat.deliver_stop_signal++;
        __set_intercept_indicator(vcpu, inti);
        break;

    case KVM_S390_SIGP_SET_PREFIX:
        VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
                   inti->prefix.address);
        vcpu->stat.deliver_prefix_signal++;
        kvm_s390_set_prefix(vcpu, inti->prefix.address);
        break;

    case KVM_S390_RESTART:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
        vcpu->stat.deliver_restart_signal++;
        rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
                                          restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
        break;

    case KVM_S390_PROGRAM_INT:
        VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
                   inti->pgm.code,
                   table[vcpu->arch.sie_block->ipa >> 14]);
        vcpu->stat.deliver_program_int++;
        rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_PGM_ILC,
                           table[vcpu->arch.sie_block->ipa >> 14]);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_PGM_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    default:
        BUG();
    }
    if (exception) {
        printk("kvm: The guest lowcore is not mapped during interrupt "
               "delivery, killing userspace\n");
        do_exit(SIGKILL);
    }
}
Пример #5
0
struct sal_ret_values
sal_emulator (long index, unsigned long in1, unsigned long in2,
	      unsigned long in3, unsigned long in4, unsigned long in5,
	      unsigned long in6, unsigned long in7)
{
	struct ia64_sal_retval ret_stuff;
	unsigned long r9  = 0;
	unsigned long r10 = 0;
	long r11 = 0;
	long status;

	debugger_event(XEN_IA64_DEBUG_ON_SAL);

	status = 0;
	switch (index) {
	    case SAL_FREQ_BASE:
		if (likely(!running_on_sim))
			status = ia64_sal_freq_base(in1,&r9,&r10);
		else switch (in1) {
		      case SAL_FREQ_BASE_PLATFORM:
			r9 = 200000000;
			break;

		      case SAL_FREQ_BASE_INTERVAL_TIMER:
			r9 = 700000000;
			break;

		      case SAL_FREQ_BASE_REALTIME_CLOCK:
			r9 = 1;
			break;

		      default:
			status = -1;
			break;
		}
		break;
	    case SAL_PCI_CONFIG_READ:
		if (current->domain == dom0) {
			u64 value;
			// note that args 2&3 are swapped!!
			status = ia64_sal_pci_config_read(in1,in3,in2,&value);
			r9 = value;
		}
		else
		     printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
		break;
	    case SAL_PCI_CONFIG_WRITE:
		if (current->domain == dom0) {
			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
			    (in4 > 1) ||
			    (in2 > 8) || (in2 & (in2-1)))
				printk("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
					in1,in4,in2,in3);
			// note that args are in a different order!!
			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
		}
		else
		     printk("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
		break;
	    case SAL_SET_VECTORS:
 		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
 			if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
 				/* Sanity check: cs_length1 must be 0,
 				   second vector is reserved.  */
 				status = -2;
 			}
 			else {
				struct domain *d = current->domain;
				d->arch.sal_data->boot_rdv_ip = in2;
				d->arch.sal_data->boot_rdv_r1 = in3;
			}
 		}
 		else
		{
			if (in1 > sizeof(sal_vectors)/sizeof(sal_vectors[0])-1)
				BUG();
			sal_vectors[in1].vector_type	= in1;
			sal_vectors[in1].handler_addr1	= in2;
			sal_vectors[in1].gp1		= in3;
			sal_vectors[in1].handler_len1	= in4;
			sal_vectors[in1].handler_addr2	= in5;
			sal_vectors[in1].gp2		= in6;
			sal_vectors[in1].handler_len2	= in7;
		}
		break;
	    case SAL_GET_STATE_INFO:
		if (current->domain == dom0) {
			sal_queue_entry_t *e;
			unsigned long flags;
			struct smp_call_args_t arg;

			spin_lock_irqsave(&sal_queue_lock, flags);
			if (!sal_queue || list_empty(&sal_queue[in1])) {
				sal_log_record_header_t header;
				XEN_GUEST_HANDLE(void) handle =
					*(XEN_GUEST_HANDLE(void)*)&in3;

				IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s) "
				               "no sal_queue entry found.\n",
				               rec_name[in1]);
				memset(&header, 0, sizeof(header));

				if (copy_to_guest(handle, &header, 1)) {
					printk("sal_emulator: "
					       "SAL_GET_STATE_INFO can't copy "
					       "empty header to user: 0x%lx\n",
					       in3);
				}
				status = IA64_SAL_NO_INFORMATION_AVAILABLE;
				r9 = 0;
				spin_unlock_irqrestore(&sal_queue_lock, flags);
				break;
			}
			e = list_entry(sal_queue[in1].next,
			               sal_queue_entry_t, list);

			list_del(&e->list);
			spin_unlock_irqrestore(&sal_queue_lock, flags);

			IA64_SAL_DEBUG("SAL_GET_STATE_INFO(%s <= %s) "
			               "on CPU#%d.\n",
			               rec_name[e->sal_info_type],
			               rec_name[in1], e->cpuid);

			arg.type = e->sal_info_type;
			arg.target = in3;
			arg.corrected = !!((in1 != e->sal_info_type) && 
			                (e->sal_info_type == SAL_INFO_TYPE_MCA));
			arg.domain = current->domain;
			arg.status = 0;

			if (e->cpuid == smp_processor_id()) {
				IA64_SAL_DEBUG("SAL_GET_STATE_INFO: local\n");
				get_state_info_on(&arg);
			} else {
				int ret;
				IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
				ret = smp_call_function_single(e->cpuid,
				                               get_state_info_on,
				                               &arg, 0, 1);
				if (ret < 0) {
					printk("SAL_GET_STATE_INFO "
					       "smp_call_function_single error:"
					       " %d\n", ret);
					arg.ret = 0;
					arg.status =
					     IA64_SAL_NO_INFORMATION_AVAILABLE;
				}
			}
			r9 = arg.ret;
			status = arg.status;
			if (r9 == 0) {
				xfree(e);
			} else {
				/* Re-add the entry to sal_queue */
				spin_lock_irqsave(&sal_queue_lock, flags);
				list_add(&e->list, &sal_queue[in1]);
				spin_unlock_irqrestore(&sal_queue_lock, flags);
			}
		} else {