static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
				       unsigned long arg)
{
	struct sync_file_info info;
	struct sync_fence_info *fence_info = NULL;
	struct dma_fence **fences;
	__u32 size;
	int num_fences, ret, i;

	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
		return -EFAULT;

	if (info.flags || info.pad)
		return -EINVAL;

	fences = get_fences(sync_file, &num_fences);

	/*
	 * Passing num_fences = 0 means that userspace doesn't want to
	 * retrieve any sync_fence_info. If num_fences = 0 we skip filling
	 * sync_fence_info and return the actual number of fences on
	 * info->num_fences.
	 */
	if (!info.num_fences)
		goto no_fences;

	if (info.num_fences < num_fences)
		return -EINVAL;

	size = num_fences * sizeof(*fence_info);
	fence_info = kzalloc(size, GFP_KERNEL);
	if (!fence_info)
		return -ENOMEM;

	for (i = 0; i < num_fences; i++)
		sync_fill_fence_info(fences[i], &fence_info[i]);

	if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
			 size)) {
		ret = -EFAULT;
		goto out;
	}

no_fences:
	strlcpy(info.name, sync_file->name, sizeof(info.name));
	info.status = dma_fence_is_signaled(sync_file->fence);
	info.num_fences = num_fences;

	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
		ret = -EFAULT;
	else
		ret = 0;

out:
	kfree(fence_info);

	return ret;
}
Exemplo n.º 2
0
static int copy_query_item(void *query_hdr, size_t query_sz,
			   u32 total_length,
			   struct drm_i915_query_item *query_item)
{
	if (query_item->length == 0)
		return total_length;

	if (query_item->length < total_length)
		return -EINVAL;

	if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
			   query_sz))
		return -EFAULT;

	if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
		       total_length))
		return -EFAULT;

	return 0;
}
Exemplo n.º 3
0
Arquivo: offload.c Projeto: krzk/linux
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
			       struct bpf_prog *prog)
{
	struct ns_get_path_bpf_prog_args args = {
		.prog	= prog,
		.info	= info,
	};
	struct bpf_prog_aux *aux = prog->aux;
	struct inode *ns_inode;
	struct path ns_path;
	char __user *uinsns;
	void *res;
	u32 ulen;

	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
	if (IS_ERR(res)) {
		if (!info->ifindex)
			return -ENODEV;
		return PTR_ERR(res);
	}

	down_read(&bpf_devs_lock);

	if (!aux->offload) {
		up_read(&bpf_devs_lock);
		return -ENODEV;
	}

	ulen = info->jited_prog_len;
	info->jited_prog_len = aux->offload->jited_len;
	if (info->jited_prog_len & ulen) {
		uinsns = u64_to_user_ptr(info->jited_prog_insns);
		ulen = min_t(u32, info->jited_prog_len, ulen);
		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
			up_read(&bpf_devs_lock);
			return -EFAULT;
		}
	}

	up_read(&bpf_devs_lock);

	ns_inode = ns_path.dentry->d_inode;
	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
	info->netns_ino = ns_inode->i_ino;
	path_put(&ns_path);

	return 0;
}
Exemplo n.º 4
0
static int
query_engine_info(struct drm_i915_private *i915,
		  struct drm_i915_query_item *query_item)
{
	struct drm_i915_query_engine_info __user *query_ptr =
				u64_to_user_ptr(query_item->data_ptr);
	struct drm_i915_engine_info __user *info_ptr;
	struct drm_i915_query_engine_info query;
	struct drm_i915_engine_info info = { };
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int len, ret;

	if (query_item->flags)
		return -EINVAL;

	len = sizeof(struct drm_i915_query_engine_info) +
	      RUNTIME_INFO(i915)->num_engines *
	      sizeof(struct drm_i915_engine_info);

	ret = copy_query_item(&query, sizeof(query), len, query_item);
	if (ret != 0)
		return ret;

	if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
	    query.rsvd[2])
		return -EINVAL;

	info_ptr = &query_ptr->engines[0];

	for_each_engine(engine, i915, id) {
		info.engine.engine_class = engine->uabi_class;
		info.engine.engine_instance = engine->instance;
		info.capabilities = engine->uabi_capabilities;

		if (__copy_to_user(info_ptr, &info, sizeof(info)))
			return -EFAULT;

		query.num_engines++;
		info_ptr++;
	}
Exemplo n.º 5
0
int drm_mode_getplane_res(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
	struct drm_mode_get_plane_res *plane_resp = data;
	struct drm_plane *plane;
	uint32_t __user *plane_ptr;
	int count = 0;

	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		return -EOPNOTSUPP;

	plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);

	/*
	 * This ioctl is called twice, once to determine how much space is
	 * needed, and the 2nd time to fill it.
	 */
	drm_for_each_plane(plane, dev) {
		/*
		 * Unless userspace set the 'universal planes'
		 * capability bit, only advertise overlays.
		 */
		if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
		    !file_priv->universal_planes)
			continue;

		if (drm_lease_held(file_priv, plane->base.id)) {
			if (count < plane_resp->count_planes &&
			    put_user(plane->base.id, plane_ptr + count))
				return -EFAULT;
			count++;
		}
	}
	plane_resp->count_planes = count;

	return 0;
}
Exemplo n.º 6
0
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct qxl_device *qdev = dev->dev_private;
	struct drm_qxl_execbuffer *execbuffer = data;
	struct drm_qxl_command user_cmd;
	int cmd_num;
	int ret;

	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {

		struct drm_qxl_command __user *commands =
			u64_to_user_ptr(execbuffer->commands);

		if (copy_from_user(&user_cmd, commands + cmd_num,
				       sizeof(user_cmd)))
			return -EFAULT;

		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
		if (ret)
			return ret;
	}
	return 0;
}
Exemplo n.º 7
0
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);

	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	union drm_amdgpu_bo_list *args = data;
	uint32_t handle = args->in.list_handle;
	const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);

	struct drm_amdgpu_bo_list_entry *info;
	struct amdgpu_bo_list *list;

	int r;

	info = kvmalloc_array(args->in.bo_number,
			     sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	/* copy the handle array from userspace to a kernel buffer */
	r = -EFAULT;
	if (likely(info_size == args->in.bo_info_size)) {
		unsigned long bytes = args->in.bo_number *
			args->in.bo_info_size;

		if (copy_from_user(info, uptr, bytes))
			goto error_free;

	} else {
		unsigned long bytes = min(args->in.bo_info_size, info_size);
		unsigned i;

		memset(info, 0, args->in.bo_number * info_size);
		for (i = 0; i < args->in.bo_number; ++i) {
			if (copy_from_user(&info[i], uptr, bytes))
				goto error_free;

			uptr += args->in.bo_info_size;
		}
	}

	switch (args->in.operation) {
	case AMDGPU_BO_LIST_OP_CREATE:
		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
					  &handle);
		if (r)
			goto error_free;
		break;

	case AMDGPU_BO_LIST_OP_DESTROY:
		amdgpu_bo_list_destroy(fpriv, handle);
		handle = 0;
		break;

	case AMDGPU_BO_LIST_OP_UPDATE:
		r = -ENOENT;
		list = amdgpu_bo_list_get(fpriv, handle);
		if (!list)
			goto error_free;

		r = amdgpu_bo_list_set(adev, filp, list, info,
					      args->in.bo_number);
		amdgpu_bo_list_put(list);
		if (r)
			goto error_free;

		break;

	default:
		r = -EINVAL;
		goto error_free;
	}

	memset(args, 0, sizeof(*args));
	args->out.list_handle = handle;
	kvfree(info);

	return 0;

error_free:
	kvfree(info);
	return r;
}
Exemplo n.º 8
0
/*
 * Usage of execbuffer:
 * Relocations need to take into account the full QXLDrawable size.
 * However, the command as passed from user space must *not* contain the initial
 * QXLReleaseInfo struct (first XXX bytes)
 */
static int qxl_process_single_command(struct qxl_device *qdev,
				      struct drm_qxl_command *cmd,
				      struct drm_file *file_priv)
{
	struct qxl_reloc_info *reloc_info;
	int release_type;
	struct qxl_release *release;
	struct qxl_bo *cmd_bo;
	void *fb_cmd;
	int i, ret, num_relocs;
	int unwritten;

	switch (cmd->type) {
	case QXL_CMD_DRAW:
		release_type = QXL_RELEASE_DRAWABLE;
		break;
	case QXL_CMD_SURFACE:
	case QXL_CMD_CURSOR:
	default:
		DRM_DEBUG("Only draw commands in execbuffers\n");
		return -EINVAL;
		break;
	}

	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
		return -EINVAL;

	if (!access_ok(u64_to_user_ptr(cmd->command),
		       cmd->command_size))
		return -EFAULT;

	reloc_info = kmalloc_array(cmd->relocs_num,
				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
	if (!reloc_info)
		return -ENOMEM;

	ret = qxl_alloc_release_reserved(qdev,
					 sizeof(union qxl_release_info) +
					 cmd->command_size,
					 release_type,
					 &release,
					 &cmd_bo);
	if (ret)
		goto out_free_reloc;

	/* TODO copy slow path code from i915 */
	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
	unwritten = __copy_from_user_inatomic_nocache
		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
		 u64_to_user_ptr(cmd->command), cmd->command_size);

	{
		struct qxl_drawable *draw = fb_cmd;

		draw->mm_time = qdev->rom->mm_clock;
	}

	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
	if (unwritten) {
		DRM_ERROR("got unwritten %d\n", unwritten);
		ret = -EFAULT;
		goto out_free_release;
	}

	/* fill out reloc info structs */
	num_relocs = 0;
	for (i = 0; i < cmd->relocs_num; ++i) {
		struct drm_qxl_reloc reloc;
		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);

		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
			ret = -EFAULT;
			goto out_free_bos;
		}

		/* add the bos to the list of bos to validate -
		   need to validate first then process relocs? */
		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);

			ret = -EINVAL;
			goto out_free_bos;
		}
		reloc_info[i].type = reloc.reloc_type;

		if (reloc.dst_handle) {
			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
						 &reloc_info[i].dst_bo);
			if (ret)
				goto out_free_bos;
			reloc_info[i].dst_offset = reloc.dst_offset;
		} else {
			reloc_info[i].dst_bo = cmd_bo;
			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
		}
		num_relocs++;

		/* reserve and validate the reloc dst bo */
		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
						 &reloc_info[i].src_bo);
			if (ret)
				goto out_free_bos;
			reloc_info[i].src_offset = reloc.src_offset;
		} else {
			reloc_info[i].src_bo = NULL;
			reloc_info[i].src_offset = 0;
		}
	}

	/* validate all buffers */
	ret = qxl_release_reserve_list(release, false);
	if (ret)
		goto out_free_bos;

	for (i = 0; i < cmd->relocs_num; ++i) {
		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
			apply_reloc(qdev, &reloc_info[i]);
		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
			apply_surf_reloc(qdev, &reloc_info[i]);
	}

	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
	if (ret)
		qxl_release_backoff_reserve_list(release);
	else
		qxl_release_fence_buffer_objects(release);

out_free_bos:
out_free_release:
	if (ret)
		qxl_release_free(qdev, release);
out_free_reloc:
	kfree(reloc_info);
	return ret;
}
Exemplo n.º 9
0
static int query_topology_info(struct drm_i915_private *dev_priv,
			       struct drm_i915_query_item *query_item)
{
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
	struct drm_i915_query_topology_info topo;
	u32 slice_length, subslice_length, eu_length, total_length;
	int ret;

	if (query_item->flags != 0)
		return -EINVAL;

	if (sseu->max_slices == 0)
		return -ENODEV;

	BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));

	slice_length = sizeof(sseu->slice_mask);
	subslice_length = sseu->max_slices * sseu->ss_stride;
	eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
	total_length = sizeof(topo) + slice_length + subslice_length +
		       eu_length;

	ret = copy_query_item(&topo, sizeof(topo), total_length,
			      query_item);
	if (ret != 0)
		return ret;

	if (topo.flags != 0)
		return -EINVAL;

	memset(&topo, 0, sizeof(topo));
	topo.max_slices = sseu->max_slices;
	topo.max_subslices = sseu->max_subslices;
	topo.max_eus_per_subslice = sseu->max_eus_per_subslice;

	topo.subslice_offset = slice_length;
	topo.subslice_stride = sseu->ss_stride;
	topo.eu_offset = slice_length + subslice_length;
	topo.eu_stride = sseu->eu_stride;

	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
			   &topo, sizeof(topo)))
		return -EFAULT;

	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
			   &sseu->slice_mask, slice_length))
		return -EFAULT;

	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
					   sizeof(topo) + slice_length),
			   sseu->subslice_mask, subslice_length))
		return -EFAULT;

	if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
					   sizeof(topo) +
					   slice_length + subslice_length),
			   sseu->eu_mask, eu_length))
		return -EFAULT;

	return total_length;
}
Exemplo n.º 10
0
/**
 * qib_diagpkt_write - write an IB packet
 * @fp: the diag data device file pointer
 * @data: qib_diag_pkt structure saying where to get the packet
 * @count: size of data to write
 * @off: unused by this code
 */
static ssize_t qib_diagpkt_write(struct file *fp,
				 const char __user *data,
				 size_t count, loff_t *off)
{
	u32 __iomem *piobuf;
	u32 plen, pbufn, maxlen_reserve;
	struct qib_diag_xpkt dp;
	u32 *tmpbuf = NULL;
	struct qib_devdata *dd;
	struct qib_pportdata *ppd;
	ssize_t ret = 0;

	if (count != sizeof(dp)) {
		ret = -EINVAL;
		goto bail;
	}
	if (copy_from_user(&dp, data, sizeof(dp))) {
		ret = -EFAULT;
		goto bail;
	}

	dd = qib_lookup(dp.unit);
	if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
		ret = -ENODEV;
		goto bail;
	}
	if (!(dd->flags & QIB_INITTED)) {
		/* no hardware, freeze, etc. */
		ret = -ENODEV;
		goto bail;
	}

	if (dp.version != _DIAG_XPKT_VERS) {
		qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
			    dp.version);
		ret = -EINVAL;
		goto bail;
	}
	/* send count must be an exact number of dwords */
	if (dp.len & 3) {
		ret = -EINVAL;
		goto bail;
	}
	if (!dp.port || dp.port > dd->num_pports) {
		ret = -EINVAL;
		goto bail;
	}
	ppd = &dd->pport[dp.port - 1];

	/*
	 * need total length before first word written, plus 2 Dwords. One Dword
	 * is for padding so we get the full user data when not aligned on
	 * a word boundary. The other Dword is to make sure we have room for the
	 * ICRC which gets tacked on later.
	 */
	maxlen_reserve = 2 * sizeof(u32);
	if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
		ret = -EINVAL;
		goto bail;
	}

	plen = sizeof(u32) + dp.len;

	tmpbuf = vmalloc(plen);
	if (!tmpbuf) {
		ret = -ENOMEM;
		goto bail;
	}

	if (copy_from_user(tmpbuf,
			   u64_to_user_ptr(dp.data),
			   dp.len)) {
		ret = -EFAULT;
		goto bail;
	}

	plen >>= 2;             /* in dwords */

	if (dp.pbc_wd == 0)
		dp.pbc_wd = plen;

	piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
	if (!piobuf) {
		ret = -EBUSY;
		goto bail;
	}
	/* disarm it just to be extra sure */
	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));

	/* disable header check on pbufn for this packet */
	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);

	writeq(dp.pbc_wd, piobuf);
	/*
	 * Copy all but the trigger word, then flush, so it's written
	 * to chip before trigger word, then write trigger word, then
	 * flush again, so packet is sent.
	 */
	if (dd->flags & QIB_PIO_FLUSH_WC) {
		qib_flush_wc();
		qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
		qib_flush_wc();
		__raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
	} else
		qib_pio_copy(piobuf + 2, tmpbuf, plen);

	if (dd->flags & QIB_USE_SPCL_TRIG) {
		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;

		qib_flush_wc();
		__raw_writel(0xaebecede, piobuf + spcl_off);
	}

	/*
	 * Ensure buffer is written to the chip, then re-enable
	 * header checks (if supported by chip).  The txchk
	 * code will ensure seen by chip before returning.
	 */
	qib_flush_wc();
	qib_sendbuf_done(dd, pbufn);
	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);

	ret = sizeof(dp);

bail:
	vfree(tmpbuf);
	return ret;
}
Exemplo n.º 11
0
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
		struct drm_file *file)
{
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct drm_etnaviv_gem_submit *args = data;
	struct drm_etnaviv_gem_submit_reloc *relocs;
	struct drm_etnaviv_gem_submit_pmr *pmrs;
	struct drm_etnaviv_gem_submit_bo *bos;
	struct etnaviv_gem_submit *submit;
	struct etnaviv_gpu *gpu;
	struct sync_file *sync_file = NULL;
	struct ww_acquire_ctx ticket;
	int out_fence_fd = -1;
	void *stream;
	int ret;

	if (args->pipe >= ETNA_MAX_PIPES)
		return -EINVAL;

	gpu = priv->gpu[args->pipe];
	if (!gpu)
		return -ENXIO;

	if (args->stream_size % 4) {
		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
			  args->stream_size);
		return -EINVAL;
	}

	if (args->exec_state != ETNA_PIPE_3D &&
	    args->exec_state != ETNA_PIPE_2D &&
	    args->exec_state != ETNA_PIPE_VG) {
		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
		return -EINVAL;
	}

	if (args->flags & ~ETNA_SUBMIT_FLAGS) {
		DRM_ERROR("invalid flags: 0x%x\n", args->flags);
		return -EINVAL;
	}

	/*
	 * Copy the command submission and bo array to kernel space in
	 * one go, and do this outside of any locks.
	 */
	bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
	relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
	pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
	stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
	if (!bos || !relocs || !pmrs || !stream) {
		ret = -ENOMEM;
		goto err_submit_cmds;
	}

	ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
			     args->nr_bos * sizeof(*bos));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

	ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
			     args->nr_relocs * sizeof(*relocs));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

	ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
			     args->nr_pmrs * sizeof(*pmrs));
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

	ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
			     args->stream_size);
	if (ret) {
		ret = -EFAULT;
		goto err_submit_cmds;
	}

	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
			goto err_submit_cmds;
		}
	}

	ww_acquire_init(&ticket, &reservation_ww_class);

	submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
	if (!submit) {
		ret = -ENOMEM;
		goto err_submit_ww_acquire;
	}

	ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
				  ALIGN(args->stream_size, 8) + 8);
	if (ret)
		goto err_submit_objects;

	submit->cmdbuf.ctx = file->driver_priv;
	submit->exec_state = args->exec_state;
	submit->flags = args->flags;

	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
	if (ret)
		goto err_submit_objects;

	ret = submit_lock_objects(submit, &ticket);
	if (ret)
		goto err_submit_objects;

	if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
				      relocs, args->nr_relocs)) {
		ret = -EINVAL;
		goto err_submit_objects;
	}

	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
		submit->in_fence = sync_file_get_fence(args->fence_fd);
		if (!submit->in_fence) {
			ret = -EINVAL;
			goto err_submit_objects;
		}
	}

	ret = submit_fence_sync(submit);
	if (ret)
		goto err_submit_objects;

	ret = submit_pin_objects(submit);
	if (ret)
		goto err_submit_objects;

	ret = submit_reloc(submit, stream, args->stream_size / 4,
			   relocs, args->nr_relocs);
	if (ret)
		goto err_submit_objects;

	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
	if (ret)
		goto err_submit_objects;

	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
	submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);

	ret = etnaviv_gpu_submit(gpu, submit);
	if (ret)
		goto err_submit_objects;

	submit_attach_object_fences(submit);

	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
		/*
		 * This can be improved: ideally we want to allocate the sync
		 * file before kicking off the GPU job and just attach the
		 * fence to the sync file here, eliminating the ENOMEM
		 * possibility at this stage.
		 */
		sync_file = sync_file_create(submit->out_fence);
		if (!sync_file) {
			ret = -ENOMEM;
			goto err_submit_objects;
		}
		fd_install(out_fence_fd, sync_file->file);
	}

	args->fence_fd = out_fence_fd;
	args->fence = submit->out_fence->seqno;

err_submit_objects:
	etnaviv_submit_put(submit);

err_submit_ww_acquire:
	ww_acquire_fini(&ticket);

err_submit_cmds:
	if (ret && (out_fence_fd >= 0))
		put_unused_fd(out_fence_fd);
	if (stream)
		kvfree(stream);
	if (bos)
		kvfree(bos);
	if (relocs)
		kvfree(relocs);
	if (pmrs)
		kvfree(pmrs);

	return ret;
}