コード例 #1
0
ファイル: xencomm.c プロジェクト: 325116067/semc-qsd8x50
struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
{
	int rc;
	struct xencomm_desc *desc;

	if (xencomm_is_phys_contiguous((unsigned long)ptr))
		return xencomm_create_inline(ptr);

	rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);

	if (rc || desc == NULL)
		return NULL;

	return xencomm_pa(desc);
}
コード例 #2
0
ファイル: xencomm.c プロジェクト: 325116067/semc-qsd8x50
struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
			struct xencomm_mini *xc_desc)
{
	int rc;
	struct xencomm_desc *desc = NULL;

	if (xencomm_is_phys_contiguous((unsigned long)ptr))
		return xencomm_create_inline(ptr);

	rc = xencomm_create_mini(ptr, bytes, xc_desc,
				&desc);

	if (rc)
		return NULL;

	return xencomm_pa(desc);
}
コード例 #3
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_acm_op(privcmd_hypercall_t *hypercall)
{
	int cmd = hypercall->arg[0];
	void __user *arg = (void __user *)hypercall->arg[1];
	struct xencomm_handle *op_desc;
	struct xencomm_handle *desc = NULL;
	int ret;

	switch (cmd) {
	case ACMOP_getssid:
	{
		struct acm_getssid kern_arg;

		if (copy_from_user(&kern_arg, arg, sizeof (kern_arg)))
			return -EFAULT;

		op_desc = xencomm_create_inline(&kern_arg);

		ret = xencomm_create(xen_guest_handle(kern_arg.ssidbuf),
		                     kern_arg.ssidbuf_size, &desc, GFP_KERNEL);
		if (ret)
			return ret;

		set_xen_guest_handle(kern_arg.ssidbuf, (void *)desc);

		ret = xencomm_arch_hypercall_acm_op(cmd, op_desc);

		xencomm_free(desc);

		if (copy_to_user(arg, &kern_arg, sizeof (kern_arg)))
			return -EFAULT;

		return ret;
	}
	default:
		printk("%s: unknown acm_op cmd %d\n", __func__, cmd);
		return -ENOSYS;
	}

	return ret;
}
コード例 #4
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_dom0_op(privcmd_hypercall_t *hypercall)
{
	dom0_op_t kern_op;
	dom0_op_t __user *user_op = (dom0_op_t __user *)hypercall->arg[0];
	struct xencomm_handle *op_desc;
	struct xencomm_handle *desc = NULL;
	int ret = 0;

	if (copy_from_user(&kern_op, user_op, sizeof(dom0_op_t)))
		return -EFAULT;

	if (kern_op.interface_version != DOM0_INTERFACE_VERSION)
		return -EACCES;

	op_desc = xencomm_create_inline(&kern_op);

	switch (kern_op.cmd) {
	default:
		printk("%s: unknown dom0 cmd %d\n", __func__, kern_op.cmd);
		return -ENOSYS;
	}

	if (ret) {
		/* error mapping the nested pointer */
		return ret;
	}

	ret = xencomm_arch_hypercall_dom0_op(op_desc);

	/* FIXME: should we restore the handle?  */
	if (copy_to_user(user_op, &kern_op, sizeof(dom0_op_t)))
		ret = -EFAULT;

	if (desc)
		xencomm_free(desc);
	return ret;
}
コード例 #5
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
{
	const unsigned long cmd = hypercall->arg[0];
	int ret = 0;

	switch (cmd) {
	case XENMEM_increase_reservation:
	case XENMEM_decrease_reservation:
	case XENMEM_populate_physmap:
		return xencomm_privcmd_memory_reservation_op(hypercall);
	case XENMEM_translate_gpfn_list:
	{
		xen_translate_gpfn_list_t kern_op;
		xen_translate_gpfn_list_t __user *user_op;
		struct xencomm_handle *desc_gpfn = NULL;
		struct xencomm_handle *desc_mfn = NULL;
		struct xencomm_handle *desc_op;
		void *addr;

		user_op = (xen_translate_gpfn_list_t __user *)
			hypercall->arg[1];
		if (copy_from_user(&kern_op, user_op,
		                   sizeof(xen_translate_gpfn_list_t)))
			return -EFAULT;
		desc_op = xencomm_create_inline(&kern_op);

		if (kern_op.nr_gpfns) {
			/* gpfn_list.  */
			addr = xen_guest_handle(kern_op.gpfn_list);

			ret = xencomm_create(addr, kern_op.nr_gpfns *
			                     sizeof(*xen_guest_handle
			                            (kern_op.gpfn_list)),
			                     &desc_gpfn, GFP_KERNEL);
			if (ret)
				return ret;
			set_xen_guest_handle(kern_op.gpfn_list,
			                     (void *)desc_gpfn);

			/* mfn_list.  */
			addr = xen_guest_handle(kern_op.mfn_list);

			ret = xencomm_create(addr, kern_op.nr_gpfns *
			                     sizeof(*xen_guest_handle
			                            (kern_op.mfn_list)),
			                     &desc_mfn, GFP_KERNEL);
			if (ret)
				return ret;
			set_xen_guest_handle(kern_op.mfn_list,
			                     (void *)desc_mfn);
		}

		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);

		if (desc_gpfn)
			xencomm_free(desc_gpfn);

		if (desc_mfn)
			xencomm_free(desc_mfn);

		if (ret != 0)
			return ret;

		return ret;
	}
	default:
		printk("%s: unknown memory op %lu\n", __func__, cmd);
		ret = -ENOSYS;
	}
	return ret;
}
コード例 #6
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
{
	const unsigned long cmd = hypercall->arg[0];
	int ret = 0;
	xen_memory_reservation_t kern_op;
	xen_memory_reservation_t __user *user_op;
	struct xencomm_handle *desc = NULL;
	struct xencomm_handle *desc_op;

	user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
	if (copy_from_user(&kern_op, user_op,
			   sizeof(xen_memory_reservation_t)))
		return -EFAULT;
	desc_op = xencomm_create_inline(&kern_op);

	if (!xen_guest_handle(kern_op.extent_start)) {
		ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
		if (ret < 0)
			return ret;
	} else {
		xen_ulong_t nr_done = 0;
		xen_ulong_t nr_extents = kern_op.nr_extents;
		void *addr = xen_guest_handle(kern_op.extent_start);
			
		/*
		 * Work around.
		 *   Xencomm has single page size limit caused
		 *   by xencomm_alloc()/xencomm_free() so that
		 *   we have to repeat the hypercall.
		 *   This limitation can be removed.
		 */
#define MEMORYOP_XENCOMM_LIMIT						\
		(((((PAGE_SIZE - sizeof(struct xencomm_desc)) /		\
		    sizeof(uint64_t)) - 2) * PAGE_SIZE) /		\
		 sizeof(*xen_guest_handle(kern_op.extent_start)))

		/*
		 * Work around.
		 *   Even if the above limitation is removed,
		 *   the hypercall with large number of extents 
		 *   may cause the soft lockup warning.
		 *   In order to avoid the warning, we limit
		 *   the number of extents and repeat the hypercall.
		 *   The following value is determined by evaluation.
		 *   Time of one hypercall should be smaller than
		 *   a vcpu time slice. The time with current
		 *   MEMORYOP_MAX_EXTENTS is around 5 msec.
		 *   If the following limit causes some issues,
		 *   we should decrease this value.
		 *
		 *   Another way would be that start with small value and
		 *   increase adoptively measuring hypercall time.
		 *   It might be over-kill.
		 */
#define MEMORYOP_MAX_EXTENTS	(MEMORYOP_XENCOMM_LIMIT / 512)

		while (nr_extents > 0) {
			xen_ulong_t nr_tmp = nr_extents;
			if (nr_tmp > MEMORYOP_MAX_EXTENTS)
				nr_tmp = MEMORYOP_MAX_EXTENTS;

			kern_op.nr_extents = nr_tmp;
			ret = xencomm_create
				(addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
				 nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)),
				 &desc, GFP_KERNEL);

			if (addr != NULL && nr_tmp > 0 && desc == NULL)
				return nr_done > 0 ? nr_done : -ENOMEM;

			set_xen_guest_handle(kern_op.extent_start,
					     (void *)desc);

			ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
			xencomm_free(desc);
			if (ret < 0)
				return nr_done > 0 ? nr_done : ret;

			nr_done += ret;
			nr_extents -= ret;
			if (ret < nr_tmp)
				break;

			/*
			 * prevent softlock up message.
			 * give cpu to soft lockup kernel thread.
			 */
			if (nr_extents > 0)
				schedule();
		}
		ret = nr_done;
		set_xen_guest_handle(kern_op.extent_start, addr);
	}

	if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
		return -EFAULT;

	return ret;
}
コード例 #7
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
{
	xen_domctl_t kern_op;
	xen_domctl_t __user *user_op;
	struct xencomm_handle *op_desc;
	struct xencomm_handle *desc = NULL;
	int ret = 0;

	user_op = (xen_domctl_t __user *)hypercall->arg[0];

	if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
		return -EFAULT;

	if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
		return -EACCES;

	op_desc = xencomm_create_inline(&kern_op);

	switch (kern_op.cmd) {
	case XEN_DOMCTL_createdomain:
	case XEN_DOMCTL_destroydomain:
	case XEN_DOMCTL_pausedomain:
	case XEN_DOMCTL_unpausedomain:
	case XEN_DOMCTL_getdomaininfo:
		break;
	case XEN_DOMCTL_getmemlist:
	{
		unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;

		ret = xencomm_create(
			xen_guest_handle(kern_op.u.getmemlist.buffer),
			nr_pages * sizeof(unsigned long),
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.getmemlist.buffer,
		                     (void *)desc);
		break;
	}
	case XEN_DOMCTL_getpageframeinfo:
		break;
	case XEN_DOMCTL_getpageframeinfo2:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.getpageframeinfo2.array),
			kern_op.u.getpageframeinfo2.num,
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
		                     (void *)desc);
		break;
	case XEN_DOMCTL_shadow_op:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
			ROUND_DIV(kern_op.u.shadow_op.pages, 8),
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
		                     (void *)desc);
		break;
	case XEN_DOMCTL_max_mem:
		break;
	case XEN_DOMCTL_setvcpucontext:
	case XEN_DOMCTL_getvcpucontext:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.vcpucontext.ctxt),
			sizeof(vcpu_guest_context_t),
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
		break;
	case XEN_DOMCTL_getvcpuinfo:
		break;
	case XEN_DOMCTL_setvcpuaffinity:
	case XEN_DOMCTL_getvcpuaffinity:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
			ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8),
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
		                     (void *)desc);
		break;
	case XEN_DOMCTL_max_vcpus:
	case XEN_DOMCTL_scheduler_op:
	case XEN_DOMCTL_setdomainhandle:
	case XEN_DOMCTL_setdebugging:
	case XEN_DOMCTL_irq_permission:
	case XEN_DOMCTL_iomem_permission:
	case XEN_DOMCTL_ioport_permission:
	case XEN_DOMCTL_hypercall_init:
	case XEN_DOMCTL_arch_setup:
	case XEN_DOMCTL_settimeoffset:
	case XEN_DOMCTL_sendtrigger:
	case XEN_DOMCTL_set_address_size:
	case XEN_DOMCTL_get_address_size:
		break;
	default:
		printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
		return -ENOSYS;
	}

	if (ret) {
		/* error mapping the nested pointer */
		return ret;
	}

	ret = xencomm_arch_hypercall_domctl(op_desc);
	if (kern_op.cmd == XEN_DOMCTL_destroydomain) {
		while (ret == -EAGAIN) {
			schedule(); /* prevent softlock up message */
			ret = xencomm_arch_hypercall_domctl(op_desc);
		}
	}

	/* FIXME: should we restore the handle?  */
	if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
		ret = -EFAULT;

	if (desc)
		xencomm_free(desc);
	return ret;
}
コード例 #8
0
ファイル: xcom_privcmd.c プロジェクト: dduval/kernel-rhel5
static int
xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
{
	xen_sysctl_t kern_op;
	xen_sysctl_t __user *user_op;
	struct xencomm_handle *op_desc;
	struct xencomm_handle *desc = NULL;
	struct xencomm_handle *desc1 = NULL;
	int ret = 0;

	user_op = (xen_sysctl_t __user *)hypercall->arg[0];

	if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
		return -EFAULT;

	if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
	{
	    /*
	     * RHEL5 ABI compat: Allow through physinfo calls with
	     * newer versions for NUMA extensions
	     */
	    if (kern_op.cmd == XEN_SYSCTL_physinfo &&
		kern_op.interface_version == (XEN_SYSCTL_INTERFACE_VERSION+1))
	      printk(KERN_DEBUG "Allowing physinfo call with newer ABI version\n");
	    else
	      return -EACCES;
	}

	op_desc = xencomm_create_inline(&kern_op);

	switch (kern_op.cmd) {
	case XEN_SYSCTL_readconsole:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.readconsole.buffer),
			kern_op.u.readconsole.count,
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.readconsole.buffer,
		                     (void *)desc);
		break;
	case XEN_SYSCTL_tbuf_op:
	case XEN_SYSCTL_sched_id:
		break;
	case XEN_SYSCTL_perfc_op:
	{
		struct xencomm_handle *tmp_desc;
		xen_sysctl_t tmp_op = {
			.cmd = XEN_SYSCTL_perfc_op,
			.interface_version = XEN_SYSCTL_INTERFACE_VERSION,
			.u.perfc_op = {
				.cmd = XEN_SYSCTL_PERFCOP_query,
				// .desc.p = NULL,
				// .val.p = NULL,
			},
		};

		if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
			if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
				return -EINVAL;
			break;
		}

		/* query the buffer size for xencomm */
		tmp_desc = xencomm_create_inline(&tmp_op);
		ret = xencomm_arch_hypercall_sysctl(tmp_desc);
		if (ret)
			return ret;

		ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.desc),
		                     tmp_op.u.perfc_op.nr_counters *
		                     sizeof(xen_sysctl_perfc_desc_t),
		                     &desc, GFP_KERNEL);
		if (ret)
			return ret;

		set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);

		ret = xencomm_create(xen_guest_handle(kern_op.u.perfc_op.val),
		                     tmp_op.u.perfc_op.nr_vals *
		                     sizeof(xen_sysctl_perfc_val_t),
		                     &desc1, GFP_KERNEL);
		if (ret)
			xencomm_free(desc);

		set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
		break;
	}
	case XEN_SYSCTL_getdomaininfolist:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
			kern_op.u.getdomaininfolist.max_domains *
			sizeof(xen_domctl_getdomaininfo_t),
			&desc, GFP_KERNEL);
		set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
				     (void *)desc);
		break;
	case XEN_SYSCTL_physinfo:
		ret = xencomm_create(
			xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
			kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t),
			&desc, GFP_KERNEL);
		if (ret)
			return ret;

		set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
		                     (void *)desc);
		break;
	default:
		printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
		return -ENOSYS;
	}

	if (ret) {
		/* error mapping the nested pointer */
		return ret;
	}

	ret = xencomm_arch_hypercall_sysctl(op_desc);

	/* FIXME: should we restore the handles?  */
	if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
		ret = -EFAULT;

	if (desc)
		xencomm_free(desc);
	if (desc1)
		xencomm_free(desc1);
	return ret;
}