Exemplo n.º 1
0
static int handle_test_block(struct kvm_vcpu *vcpu)
{
	unsigned long hva;
	gpa_t addr;
	int reg2;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	addr = kvm_s390_real_to_abs(vcpu, addr);

	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
	if (kvm_is_error_hva(hva))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	/*
	 * We don't expect errors on modern systems, and do not care
	 * about storage keys (yet), so let's just clear the page.
	 */
	if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
		return -EFAULT;
	kvm_s390_set_psw_cc(vcpu, 0);
	vcpu->run->s.regs.gprs[0] = 0;
	return 0;
}
Exemplo n.º 2
0
/*
 * This function will be used to read from the physical memory of the currently
 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
 * can read from guest physical or from the guest's guest physical memory.
 */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t ngfn, void *data, int offset, int len,
			    u32 access)
{
	struct x86_exception exception;
	gfn_t real_gfn;
	gpa_t ngpa;

	ngpa     = gfn_to_gpa(ngfn);
	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
	if (real_gfn == UNMAPPED_GVA)
		return -EFAULT;

	real_gfn = gpa_to_gfn(real_gfn);

	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
}
Exemplo n.º 3
0
static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
{
	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
	unsigned int prio;

	if (!xc)
		return -ENOENT;

	for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
		struct xive_q *q = &xc->queues[prio];

		if (!q->qpage)
			continue;

		/* Mark EQ page dirty for migration */
		mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
	}
	return 0;
}
Exemplo n.º 4
0
static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
					       long eq_idx, u64 addr)
{
	struct kvm *kvm = xive->kvm;
	struct kvm_vcpu *vcpu;
	struct kvmppc_xive_vcpu *xc;
	void __user *ubufp = (void __user *) addr;
	u32 server;
	u8 priority;
	struct kvm_ppc_xive_eq kvm_eq;
	int rc;
	__be32 *qaddr = 0;
	struct page *page;
	struct xive_q *q;
	gfn_t gfn;
	unsigned long page_size;

	/*
	 * Demangle priority/server tuple from the EQ identifier
	 */
	priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
		KVM_XIVE_EQ_PRIORITY_SHIFT;
	server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
		KVM_XIVE_EQ_SERVER_SHIFT;

	if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
		return -EFAULT;

	vcpu = kvmppc_xive_find_server(kvm, server);
	if (!vcpu) {
		pr_err("Can't find server %d\n", server);
		return -ENOENT;
	}
	xc = vcpu->arch.xive_vcpu;

	if (priority != xive_prio_from_guest(priority)) {
		pr_err("Trying to restore invalid queue %d for VCPU %d\n",
		       priority, server);
		return -EINVAL;
	}
	q = &xc->queues[priority];

	pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
		 __func__, server, priority, kvm_eq.flags,
		 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);

	/*
	 * sPAPR specifies a "Unconditional Notify (n) flag" for the
	 * H_INT_SET_QUEUE_CONFIG hcall which forces notification
	 * without using the coalescing mechanisms provided by the
	 * XIVE END ESBs. This is required on KVM as notification
	 * using the END ESBs is not supported.
	 */
	if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
		pr_err("invalid flags %d\n", kvm_eq.flags);
		return -EINVAL;
	}

	rc = xive_native_validate_queue_size(kvm_eq.qshift);
	if (rc) {
		pr_err("invalid queue size %d\n", kvm_eq.qshift);
		return rc;
	}

	/* reset queue and disable queueing */
	if (!kvm_eq.qshift) {
		q->guest_qaddr  = 0;
		q->guest_qshift = 0;

		rc = xive_native_configure_queue(xc->vp_id, q, priority,
						 NULL, 0, true);
		if (rc) {
			pr_err("Failed to reset queue %d for VCPU %d: %d\n",
			       priority, xc->server_num, rc);
			return rc;
		}

		if (q->qpage) {
			put_page(virt_to_page(q->qpage));
			q->qpage = NULL;
		}

		return 0;
	}

	if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
		pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
		       1ull << kvm_eq.qshift);
		return -EINVAL;
	}

	gfn = gpa_to_gfn(kvm_eq.qaddr);
	page = gfn_to_page(kvm, gfn);
	if (is_error_page(page)) {
		pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
		return -EINVAL;
	}

	page_size = kvm_host_page_size(kvm, gfn);
	if (1ull << kvm_eq.qshift > page_size) {
		pr_warn("Incompatible host page size %lx!\n", page_size);
		return -EINVAL;
	}

	qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);

	/*
	 * Backup the queue page guest address to the mark EQ page
	 * dirty for migration.
	 */
	q->guest_qaddr  = kvm_eq.qaddr;
	q->guest_qshift = kvm_eq.qshift;

	 /*
	  * Unconditional Notification is forced by default at the
	  * OPAL level because the use of END ESBs is not supported by
	  * Linux.
	  */
	rc = xive_native_configure_queue(xc->vp_id, q, priority,
					 (__be32 *) qaddr, kvm_eq.qshift, true);
	if (rc) {
		pr_err("Failed to configure queue %d for VCPU %d: %d\n",
		       priority, xc->server_num, rc);
		put_page(page);
		return rc;
	}

	/*
	 * Only restore the queue state when needed. When doing the
	 * H_INT_SET_SOURCE_CONFIG hcall, it should not.
	 */
	if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
		rc = xive_native_set_queue_state(xc->vp_id, priority,
						 kvm_eq.qtoggle,
						 kvm_eq.qindex);
		if (rc)
			goto error;
	}

	rc = kvmppc_xive_attach_escalation(vcpu, priority,
					   xive->single_escalation);
error:
	if (rc)
		kvmppc_xive_native_cleanup_queue(vcpu, priority);
	return rc;
}