int
xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
{
	GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
	struct xen_memory_reservation *xmr = NULL;
	int rc;
	struct xencomm_handle *desc;
	unsigned int argsize;
	XENCOMM_MINI_ALIGNED(xc_area, 2);

	switch (cmd) {
	case XENMEM_increase_reservation:
	case XENMEM_decrease_reservation:
	case XENMEM_populate_physmap:
		xmr = (struct xen_memory_reservation *)arg;
		set_xen_guest_handle(extent_start_va[0],
				     xen_guest_handle(xmr->extent_start));

		argsize = sizeof(*xmr);
		rc = xencommize_memory_reservation(xc_area, xmr);
		if (rc)
			return rc;
		xc_area++;
		break;

	case XENMEM_maximum_ram_page:
		argsize = 0;
		break;

	case XENMEM_add_to_physmap:
		argsize = sizeof(struct xen_add_to_physmap);
		break;

	default:
		printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
		return -ENOSYS;
	}

	desc = xencomm_map_no_alloc(arg, argsize);
	if (desc == NULL)
		return -EINVAL;

	rc = xencomm_arch_hypercall_memory_op(cmd, desc);

	switch (cmd) {
	case XENMEM_increase_reservation:
	case XENMEM_decrease_reservation:
	case XENMEM_populate_physmap:
		set_xen_guest_handle(xmr->extent_start,
				     xen_guest_handle(extent_start_va[0]));
		break;
	}

	return rc;
}
Esempio n. 2
0
static int
xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
{
	const unsigned long cmd = hypercall->arg[0];
	int ret = 0;

	switch (cmd) {
	case XENMEM_increase_reservation:
	case XENMEM_decrease_reservation:
	case XENMEM_populate_physmap:
		return xencomm_privcmd_memory_reservation_op(hypercall);
	case XENMEM_translate_gpfn_list:
	{
		xen_translate_gpfn_list_t kern_op;
		xen_translate_gpfn_list_t __user *user_op;
		struct xencomm_handle *desc_gpfn = NULL;
		struct xencomm_handle *desc_mfn = NULL;
		struct xencomm_handle *desc_op;
		void *addr;

		user_op = (xen_translate_gpfn_list_t __user *)
			hypercall->arg[1];
		if (copy_from_user(&kern_op, user_op,
		                   sizeof(xen_translate_gpfn_list_t)))
			return -EFAULT;
		desc_op = xencomm_create_inline(&kern_op);

		if (kern_op.nr_gpfns) {
			/* gpfn_list.  */
			addr = xen_guest_handle(kern_op.gpfn_list);

			ret = xencomm_create(addr, kern_op.nr_gpfns *
			                     sizeof(*xen_guest_handle
			                            (kern_op.gpfn_list)),
			                     &desc_gpfn, GFP_KERNEL);
			if (ret)
				return ret;
			set_xen_guest_handle(kern_op.gpfn_list,
			                     (void *)desc_gpfn);

			/* mfn_list.  */
			addr = xen_guest_handle(kern_op.mfn_list);

			ret = xencomm_create(addr, kern_op.nr_gpfns *
			                     sizeof(*xen_guest_handle
			                            (kern_op.mfn_list)),
			                     &desc_mfn, GFP_KERNEL);
			if (ret)
				return ret;
			set_xen_guest_handle(kern_op.mfn_list,
			                     (void *)desc_mfn);
		}

		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);

		if (desc_gpfn)
			xencomm_free(desc_gpfn);

		if (desc_mfn)
			xencomm_free(desc_mfn);

		if (ret != 0)
			return ret;

		return ret;
	}
	default:
		printk("%s: unknown memory op %lu\n", __func__, cmd);
		ret = -ENOSYS;
	}
	return ret;
}
Esempio n. 3
0
static int
xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
{
	const unsigned long cmd = hypercall->arg[0];
	int ret = 0;
	xen_memory_reservation_t kern_op;
	xen_memory_reservation_t __user *user_op;
	struct xencomm_handle *desc = NULL;
	struct xencomm_handle *desc_op;

	user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
	if (copy_from_user(&kern_op, user_op,
			   sizeof(xen_memory_reservation_t)))
		return -EFAULT;
	desc_op = xencomm_create_inline(&kern_op);

	if (!xen_guest_handle(kern_op.extent_start)) {
		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
		if (ret < 0)
			return ret;
	} else {
		xen_ulong_t nr_done = 0;
		xen_ulong_t nr_extents = kern_op.nr_extents;
		void *addr = xen_guest_handle(kern_op.extent_start);
			
		/*
		 * Work around.
		 *   Xencomm has single page size limit caused
		 *   by xencomm_alloc()/xencomm_free() so that
		 *   we have to repeat the hypercall.
		 *   This limitation can be removed.
		 */
#define MEMORYOP_XENCOMM_LIMIT						\
		(((((PAGE_SIZE - sizeof(struct xencomm_desc)) /		\
		    sizeof(uint64_t)) - 2) * PAGE_SIZE) /		\
		 sizeof(*xen_guest_handle(kern_op.extent_start)))

		/*
		 * Work around.
		 *   Even if the above limitation is removed,
		 *   the hypercall with large number of extents 
		 *   may cause the soft lockup warning.
		 *   In order to avoid the warning, we limit
		 *   the number of extents and repeat the hypercall.
		 *   The following value is determined by evaluation.
		 *   Time of one hypercall should be smaller than
		 *   a vcpu time slice. The time with current
		 *   MEMORYOP_MAX_EXTENTS is around 5 msec.
		 *   If the following limit causes some issues,
		 *   we should decrease this value.
		 *
		 *   Another way would be that start with small value and
		 *   increase adoptively measuring hypercall time.
		 *   It might be over-kill.
		 */
#define MEMORYOP_MAX_EXTENTS	(MEMORYOP_XENCOMM_LIMIT / 512)

		while (nr_extents > 0) {
			xen_ulong_t nr_tmp = nr_extents;
			if (nr_tmp > MEMORYOP_MAX_EXTENTS)
				nr_tmp = MEMORYOP_MAX_EXTENTS;

			kern_op.nr_extents = nr_tmp;
			ret = xencomm_create
				(addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
				 nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)),
				 &desc, GFP_KERNEL);

			if (addr != NULL && nr_tmp > 0 && desc == NULL)
				return nr_done > 0 ? nr_done : -ENOMEM;

			set_xen_guest_handle(kern_op.extent_start,
					     (void *)desc);

			ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
			xencomm_free(desc);
			if (ret < 0)
				return nr_done > 0 ? nr_done : ret;

			nr_done += ret;
			nr_extents -= ret;
			if (ret < nr_tmp)
				break;

			/*
			 * prevent softlock up message.
			 * give cpu to soft lockup kernel thread.
			 */
			if (nr_extents > 0)
				schedule();
		}
		ret = nr_done;
		set_xen_guest_handle(kern_op.extent_start, addr);
	}

	if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
		return -EFAULT;

	return ret;
}