Пример #1
0
static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
					  const char *ioctl_name,
					  void __user *uptr)
{
	struct vmci_ctx_chkpt_buf_info get_info;
	u32 cid;
	void *cpt_buf;
	int retval;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&get_info, uptr, sizeof(get_info)))
		return -EFAULT;

	cid = vmci_ctx_get_id(vmci_host_dev->context);
	get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
						&get_info.buf_size, &cpt_buf);
	if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
		void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
		retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
		kfree(cpt_buf);

		if (retval)
			return -EFAULT;
	}

	return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
}
Пример #2
0
static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
					  const char *ioctl_name,
					  void __user *uptr)
{
	struct vmci_ctx_chkpt_buf_info set_info;
	u32 cid;
	void *cpt_buf;
	int retval;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
		return -EFAULT;

	cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
				set_info.buf_size);
	if (IS_ERR(cpt_buf))
		return PTR_ERR(cpt_buf);

	cid = vmci_ctx_get_id(vmci_host_dev->context);
	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
						   set_info.buf_size, cpt_buf);

	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;

	kfree(cpt_buf);
	return retval;
}
Пример #3
0
static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
					   const char *ioctl_name,
					   void __user *uptr)
{
	struct vmci_ctx_notify_recv_info info;
	struct vmci_handle_arr *db_handle_array;
	struct vmci_handle_arr *qp_handle_array;
	void __user *ubuf;
	u32 cid;
	int retval = 0;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
		vmci_ioctl_err("not supported for the current vmx version\n");
		return -EINVAL;
	}

	if (copy_from_user(&info, uptr, sizeof(info)))
		return -EFAULT;

	if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
	    (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
		return -EINVAL;
	}

	cid = vmci_ctx_get_id(vmci_host_dev->context);

	info.result = vmci_ctx_rcv_notifications_get(cid,
				&db_handle_array, &qp_handle_array);
	if (info.result != VMCI_SUCCESS)
		return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;

	ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
	info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
					    db_handle_array, &retval);
	if (info.result == VMCI_SUCCESS && !retval) {
		ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
		info.result = drv_cp_harray_to_user(ubuf,
						    &info.qp_handle_buf_size,
						    qp_handle_array, &retval);
	}

	if (!retval && copy_to_user(uptr, &info, sizeof(info)))
		retval = -EFAULT;

	vmci_ctx_rcv_notifications_release(cid,
				db_handle_array, qp_handle_array,
				info.result == VMCI_SUCCESS && !retval);

	return retval;
}
Пример #4
0
static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
				      const char *ioctl_name,
				      void __user *uptr)
{
	struct vmci_datagram_snd_rcv_info send_info;
	struct vmci_datagram *dg = NULL;
	u32 cid;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&send_info, uptr, sizeof(send_info)))
		return -EFAULT;

	if (send_info.len > VMCI_MAX_DG_SIZE) {
		vmci_ioctl_err("datagram is too big (size=%d)\n",
			       send_info.len);
		return -EINVAL;
	}

	if (send_info.len < sizeof(*dg)) {
		vmci_ioctl_err("datagram is too small (size=%d)\n",
			       send_info.len);
		return -EINVAL;
	}

	dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
			 send_info.len);
	if (IS_ERR(dg)) {
		vmci_ioctl_err(
			"cannot allocate memory to dispatch datagram\n");
		return PTR_ERR(dg);
	}

	if (VMCI_DG_SIZE(dg) != send_info.len) {
		vmci_ioctl_err("datagram size mismatch\n");
		kfree(dg);
		return -EINVAL;
	}

	pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
		 dg->dst.context, dg->dst.resource,
		 dg->src.context, dg->src.resource,
		 (unsigned long long)dg->payload_size);

	/* Get source context id. */
	cid = vmci_ctx_get_id(vmci_host_dev->context);
	send_info.result = vmci_datagram_dispatch(cid, dg, true);
	kfree(dg);

	return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
}
Пример #5
0
static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
					const char *ioctl_name,
					void __user *uptr)
{
	struct vmci_dbell_notify_resource_info info;
	u32 cid;

	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
		vmci_ioctl_err("invalid for current VMX versions\n");
		return -EINVAL;
	}

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&info, uptr, sizeof(info)))
		return -EFAULT;

	cid = vmci_ctx_get_id(vmci_host_dev->context);

	switch (info.action) {
	case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
		if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
			u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
			info.result = vmci_ctx_notify_dbell(cid, info.handle,
							    flags);
		} else {
			info.result = VMCI_ERROR_UNAVAILABLE;
		}
		break;

	case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
		info.result = vmci_ctx_dbell_create(cid, info.handle);
		break;

	case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
		info.result = vmci_ctx_dbell_destroy(cid, info.handle);
		break;

	default:
		vmci_ioctl_err("got unknown action (action=%d)\n",
			       info.action);
		info.result = VMCI_ERROR_INVALID_ARGS;
	}

	return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
}
Пример #6
0
static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
					  const char *ioctl_name,
					  void __user *uptr)
{
	struct vmci_ctx_chkpt_buf_info set_info;
	u32 cid;
	void *cpt_buf;
	int retval;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
		return -EFAULT;

	cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
	if (!cpt_buf) {
		vmci_ioctl_err(
			"cannot allocate memory to set cpt state (type=%d)\n",
			set_info.cpt_type);
		return -ENOMEM;
	}

	if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
			   set_info.buf_size)) {
		retval = -EFAULT;
		goto out;
	}

	cid = vmci_ctx_get_id(vmci_host_dev->context);
	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
						   set_info.buf_size, cpt_buf);

	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;

out:
	kfree(cpt_buf);
	return retval;
}
Пример #7
0
static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
				       const char *ioctl_name,
				       void __user *uptr)
{
	struct vmci_ctx_info ar_info;
	struct vmci_ctx_info __user *info = uptr;
	s32 result;
	u32 cid;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
		return -EFAULT;

	cid = vmci_ctx_get_id(vmci_host_dev->context);
	result = vmci_ctx_add_notification(cid, ar_info.remote_cid);

	return put_user(result, &info->result) ? -EFAULT : 0;
}
Пример #8
0
static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
				     const char *ioctl_name,
				     void __user *uptr)
{
	struct vmci_init_blk init_block;
	const struct cred *cred;
	int retval;

	if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
		vmci_ioctl_err("error reading init block\n");
		return -EFAULT;
	}

	mutex_lock(&vmci_host_dev->lock);

	if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
		vmci_ioctl_err("received VMCI init on initialized handle\n");
		retval = -EINVAL;
		goto out;
	}

	if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
		vmci_ioctl_err("unsupported VMCI restriction flag\n");
		retval = -EINVAL;
		goto out;
	}

	cred = get_current_cred();
	vmci_host_dev->context = vmci_ctx_create(init_block.cid,
						 init_block.flags, 0,
						 vmci_host_dev->user_version,
						 cred);
	put_cred(cred);
	if (IS_ERR(vmci_host_dev->context)) {
		retval = PTR_ERR(vmci_host_dev->context);
		vmci_ioctl_err("error initializing context\n");
		goto out;
	}

	/*
	 * Copy cid to userlevel, we do this to allow the VMX
	 * to enforce its policy on cid generation.
	 */
	init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
	if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
		vmci_ctx_destroy(vmci_host_dev->context);
		vmci_host_dev->context = NULL;
		vmci_ioctl_err("error writing init block\n");
		retval = -EFAULT;
		goto out;
	}

	vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
	atomic_inc(&vmci_host_active_users);

	retval = 0;

out:
	mutex_unlock(&vmci_host_dev->lock);
	return retval;
}
Пример #9
0
static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
					const char *ioctl_name,
					void __user *uptr)
{
	struct vmci_handle handle;
	int vmci_status;
	int __user *retptr;
	u32 cid;

	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
		vmci_ioctl_err("only valid for contexts\n");
		return -EINVAL;
	}

	cid = vmci_ctx_get_id(vmci_host_dev->context);

	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
		struct vmci_qp_alloc_info_vmvm alloc_info;
		struct vmci_qp_alloc_info_vmvm __user *info = uptr;

		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
			return -EFAULT;

		handle = alloc_info.handle;
		retptr = &info->result;

		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
						alloc_info.peer,
						alloc_info.flags,
						VMCI_NO_PRIVILEGE_FLAGS,
						alloc_info.produce_size,
						alloc_info.consume_size,
						NULL,
						vmci_host_dev->context);

		if (vmci_status == VMCI_SUCCESS)
			vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
	} else {
		struct vmci_qp_alloc_info alloc_info;
		struct vmci_qp_alloc_info __user *info = uptr;
		struct vmci_qp_page_store page_store;

		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
			return -EFAULT;

		handle = alloc_info.handle;
		retptr = &info->result;

		page_store.pages = alloc_info.ppn_va;
		page_store.len = alloc_info.num_ppns;

		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
						alloc_info.peer,
						alloc_info.flags,
						VMCI_NO_PRIVILEGE_FLAGS,
						alloc_info.produce_size,
						alloc_info.consume_size,
						&page_store,
						vmci_host_dev->context);
	}

	if (put_user(vmci_status, retptr)) {
		if (vmci_status >= VMCI_SUCCESS) {
			vmci_status = vmci_qp_broker_detach(handle,
							vmci_host_dev->context);
		}
		return -EFAULT;
	}

	return 0;
}