Ejemplo n.º 1
0
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
    int i, matching_cpus = 0;
    unsigned long mpidr;
    unsigned long target_affinity;
    unsigned long target_affinity_mask;
    unsigned long lowest_affinity_level;
    struct kvm *kvm = vcpu->kvm;
    struct kvm_vcpu *tmp;

    target_affinity = *vcpu_reg(vcpu, 1);
    lowest_affinity_level = *vcpu_reg(vcpu, 2);

    /* Determine target affinity mask */
    target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
    if (!target_affinity_mask)
        return PSCI_RET_INVALID_PARAMS;

    /* Ignore other bits of target affinity */
    target_affinity &= target_affinity_mask;

    /*
     * If one or more VCPU matching target affinity are running
     * then ON else OFF
     */
    kvm_for_each_vcpu(i, tmp, kvm) {
        mpidr = kvm_vcpu_get_mpidr_aff(tmp);
        if ((mpidr & target_affinity_mask) == target_affinity) {
            matching_cpus++;
            if (!tmp->arch.power_off)
                return PSCI_0_2_AFFINITY_LEVEL_ON;
        }
    }
Ejemplo n.º 2
0
/**
 * kvm_psci_call - handle PSCI call if r0 value is in range
 * @vcpu: Pointer to the VCPU struct
 *
 * Handle PSCI calls from guests through traps from HVC or SMC instructions.
 * The calling convention is similar to SMC calls to the secure world where
 * the function number is placed in r0 and this function returns true if the
 * function number specified in r0 is withing the PSCI range, and false
 * otherwise.
 */
bool kvm_psci_call(struct kvm_vcpu *vcpu)
{
	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
	unsigned long val;

	switch (psci_fn) {
	case KVM_PSCI_FN_CPU_OFF:
		kvm_psci_vcpu_off(vcpu);
		val = KVM_PSCI_RET_SUCCESS;
		break;
	case KVM_PSCI_FN_CPU_ON:
		val = kvm_psci_vcpu_on(vcpu);
		break;
	case KVM_PSCI_FN_CPU_SUSPEND:
	case KVM_PSCI_FN_MIGRATE:
		val = KVM_PSCI_RET_NI;
		break;

	default:
		return false;
	}

	*vcpu_reg(vcpu, 0) = val;
	return true;
}
Ejemplo n.º 3
0
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
    struct kvm *kvm = source_vcpu->kvm;
    struct kvm_vcpu *vcpu = NULL;
    wait_queue_head_t *wq;
    unsigned long cpu_id;
    unsigned long context_id;
    phys_addr_t target_pc;

    cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
    if (vcpu_mode_is_32bit(source_vcpu))
        cpu_id &= ~((u32) 0);

    vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);

    /*
     * Make sure the caller requested a valid CPU and that the CPU is
     * turned off.
     */
    if (!vcpu)
        return PSCI_RET_INVALID_PARAMS;
    if (!vcpu->arch.power_off) {
        if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
            return PSCI_RET_ALREADY_ON;
        else
            return PSCI_RET_INVALID_PARAMS;
    }

    target_pc = *vcpu_reg(source_vcpu, 2);
    context_id = *vcpu_reg(source_vcpu, 3);

    kvm_reset_vcpu(vcpu);

    /* Gracefully handle Thumb2 entry point */
    if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
        target_pc &= ~((phys_addr_t) 1);
        vcpu_set_thumb(vcpu);
    }

    /* Propagate caller endianness */
    if (kvm_vcpu_is_be(source_vcpu))
        kvm_vcpu_set_be(vcpu);

    *vcpu_pc(vcpu) = target_pc;
    /*
     * NOTE: We always update r0 (or x0) because for PSCI v0.1
     * the general puspose registers are undefined upon CPU_ON.
     */
    *vcpu_reg(vcpu, 0) = context_id;
    vcpu->arch.power_off = false;
    smp_mb();		/* Make sure the above is visible */

    wq = kvm_arch_vcpu_wq(vcpu);
    wake_up_interruptible(wq);

    return PSCI_RET_SUCCESS;
}
Ejemplo n.º 4
0
/**
 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
 * @vcpu: The VCPU pointer
 * @run:  The VCPU run struct containing the mmio data
 *
 * This should only be called after returning from userspace for MMIO load
 * emulation.
 */
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	unsigned long data;
	unsigned int len;
	int mask;

	if (!run->mmio.is_write) {
		len = run->mmio.len;
		if (len > sizeof(unsigned long))
			return -EINVAL;

		data = mmio_read_buf(run->mmio.data, len);

		if (vcpu->arch.mmio_decode.sign_extend &&
		    len < sizeof(unsigned long)) {
			mask = 1U << ((len * 8) - 1);
			data = (data ^ mask) - mask;
		}

		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
			       data);
		data = vcpu_data_host_to_guest(vcpu, data, len);
		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
	}

	return 0;
}
Ejemplo n.º 5
0
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                 phys_addr_t fault_ipa)
{
    unsigned long data;
    unsigned long rt;
    int ret;
    bool is_write;
    int len;
    u8 data_buf[8];

    /*
     * Prepare MMIO operation. First decode the syndrome data we get
     * from the CPU. Then try if some in-kernel emulation feels
     * responsible, otherwise let user space do its magic.
     */
    if (kvm_vcpu_dabt_isvalid(vcpu)) {
        ret = decode_hsr(vcpu, &is_write, &len);
        if (ret)
            return ret;
    } else {
        kvm_err("load/store instruction decoding not implemented\n");
        return -ENOSYS;
    }

    rt = vcpu->arch.mmio_decode.rt;

    if (is_write) {
        data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);

        trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
        mmio_write_buf(data_buf, len, data);

        ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                               data_buf);
    } else {
        trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
                       fault_ipa, 0);

        ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                              data_buf);
    }

    /* Now prepare kvm_run for the potential return to userland. */
    run->mmio.is_write	= is_write;
    run->mmio.phys_addr	= fault_ipa;
    run->mmio.len		= len;
    memcpy(run->mmio.data, data_buf, len);

    if (!ret) {
        /* We handled the access successfully in the kernel. */
        kvm_handle_mmio_return(vcpu, run);
        return 1;
    }

    run->exit_reason	= KVM_EXIT_MMIO;
    return 0;
}
Ejemplo n.º 6
0
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
		      kvm_vcpu_hvc_get_imm(vcpu));

	if (kvm_psci_call(vcpu))
		return 1;

	kvm_inject_undefined(vcpu);
	return 1;
}
Ejemplo n.º 7
0
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	int ret;

	ret = kvm_psci_call(vcpu);
	if (ret < 0) {
		*vcpu_reg(vcpu, 0) = ~0UL;
		return 1;
	}

	return ret;
}
Ejemplo n.º 8
0
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
	struct kvm *kvm = source_vcpu->kvm;
	struct kvm_vcpu *vcpu;
	wait_queue_head_t *wq;
	unsigned long cpu_id;
	phys_addr_t target_pc;

	cpu_id = *vcpu_reg(source_vcpu, 1);
	if (vcpu_mode_is_32bit(source_vcpu))
		cpu_id &= ~((u32) 0);

	if (cpu_id >= atomic_read(&kvm->online_vcpus))
		return KVM_PSCI_RET_INVAL;

	target_pc = *vcpu_reg(source_vcpu, 2);

	vcpu = kvm_get_vcpu(kvm, cpu_id);

	wq = kvm_arch_vcpu_wq(vcpu);
	if (!waitqueue_active(wq))
		return KVM_PSCI_RET_INVAL;

	kvm_reset_vcpu(vcpu);

	/* Gracefully handle Thumb2 entry point */
	if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
		target_pc &= ~((phys_addr_t) 1);
		vcpu_set_thumb(vcpu);
	}

	*vcpu_pc(vcpu) = target_pc;
	vcpu->arch.pause = false;
	smp_mb();		/* Make sure the above is visible */

	wake_up_interruptible(wq);

	return KVM_PSCI_RET_SUCCESS;
}
Ejemplo n.º 9
0
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	int ret;

	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
			    kvm_vcpu_hvc_get_imm(vcpu));

	ret = kvm_psci_call(vcpu);
	if (ret < 0) {
		kvm_inject_undefined(vcpu);
		return 1;
	}

	return ret;
}
Ejemplo n.º 10
0
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
		 phys_addr_t fault_ipa)
{
	struct kvm_exit_mmio mmio;
	unsigned long data;
	unsigned long rt;
	int ret;

	/*
	 * Prepare MMIO operation. First stash it in a private
	 * structure that we can use for in-kernel emulation. If the
	 * kernel can't handle it, copy it into run->mmio and let user
	 * space do its magic.
	 */

	if (kvm_vcpu_dabt_isvalid(vcpu)) {
		ret = decode_hsr(vcpu, fault_ipa, &mmio);
		if (ret)
			return ret;
	} else {
		kvm_err("load/store instruction decoding not implemented\n");
		return -ENOSYS;
	}

	rt = vcpu->arch.mmio_decode.rt;
	data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);

	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
					 KVM_TRACE_MMIO_READ_UNSATISFIED,
			mmio.len, fault_ipa,
			(mmio.is_write) ? data : 0);

	if (mmio.is_write)
		mmio_write_buf(mmio.data, mmio.len, data);

	if (vgic_handle_mmio(vcpu, run, &mmio))
		return 1;

	kvm_prepare_mmio(run, &mmio);
	return 0;
}
Ejemplo n.º 11
0
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
	struct kvm *kvm = source_vcpu->kvm;
	struct kvm_vcpu *vcpu = NULL, *tmp;
	wait_queue_head_t *wq;
	unsigned long cpu_id;
	unsigned long context_id;
	unsigned long mpidr;
	phys_addr_t target_pc;
	int i;

	cpu_id = *vcpu_reg(source_vcpu, 1);
	if (vcpu_mode_is_32bit(source_vcpu))
		cpu_id &= ~((u32) 0);

	kvm_for_each_vcpu(i, tmp, kvm) {
		mpidr = kvm_vcpu_get_mpidr(tmp);
		if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
			vcpu = tmp;
			break;
		}
	}
Ejemplo n.º 12
0
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	*vcpu_reg(vcpu, 0) = ~0UL;
	return 1;
}