Exemple #1
0
void intel_batch_init(ScrnInfoPtr scrn)
{
	intel_screen_private *intel = intel_get_screen_private(scrn);

	intel->batch_emit_start = 0;
	intel->batch_emitting = 0;
	intel->vertex_id = 0;

	intel->last_batch_bo[0] = bo_alloc(scrn);
	intel->last_batch_bo[1] = bo_alloc(scrn);

	intel->batch_bo = bo_alloc(scrn);
	intel->batch_used = 0;
	intel->last_3d = LAST_3D_OTHER;
}
void intel_batch_init()
{
	intel_screen_private *intel = intel_get_screen_private();

    ENTER();

	intel->batch_emit_start = 0;
	intel->batch_emitting = 0;
	intel->vertex_id = 0;

	intel->last_batch_bo[0] = bo_alloc();
	intel->last_batch_bo[1] = bo_alloc();

	intel->batch_bo = bo_alloc();
	intel->batch_used = 0;
	intel->last_3d = LAST_3D_OTHER;

    LEAVE();
}
Exemple #3
0
static int bo_alloc(struct kgsl_bo *kgsl_bo)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (!kgsl_bo->offset) {
		struct drm_kgsl_gem_alloc req = {
				.handle = bo->handle,
		};
		int ret;

		/* if the buffer is already backed by pages then this
		 * doesn't actually do anything (other than giving us
		 * the offset)
		 */
		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_ALLOC,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("alloc failed: %s", strerror(errno));
			return ret;
		}

		kgsl_bo->offset = req.offset;
	}

	return 0;
}

static int kgsl_bo_offset(struct fd_bo *bo, uint64_t *offset)
{
	struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
	int ret = bo_alloc(kgsl_bo);
	if (ret)
		return ret;
	*offset = kgsl_bo->offset;
	return 0;
}

static int kgsl_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
	uint32_t timestamp = kgsl_bo_get_timestamp(to_kgsl_bo(bo));

	if (op & DRM_FREEDRENO_PREP_NOSYNC) {
		uint32_t current;
		int ret;

		/* special case for is_idle().. we can't really handle that
		 * properly in kgsl (perhaps we need a way to just disable
		 * the bo-cache for kgsl?)
		 */
		if (!pipe)
			return -EBUSY;

		ret = kgsl_pipe_timestamp(to_kgsl_pipe(pipe), &current);
		if (ret)
			return ret;

		if (timestamp > current)
			return -EBUSY;

		return 0;
	}

	if (timestamp)
		fd_pipe_wait(pipe, timestamp);

	return 0;
}
Exemple #4
0
struct fd_bo *
fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
{
	struct fd_bo *bo;

	if (!is_kgsl_pipe(pipe))
		return NULL;

	bo = fd_bo_new(pipe->dev, 1, 0);

	/* this is fugly, but works around a bug in the kernel..
	 * priv->memdesc.size never gets set, so getbufinfo ioctl
	 * thinks the buffer hasn't be allocate and fails
	 */
	if (bo) {
		void *fbmem = drm_mmap(NULL, size, PROT_READ | PROT_WRITE,
				MAP_SHARED, fbfd, 0);
		struct kgsl_map_user_mem req = {
				.memtype = KGSL_USER_MEM_TYPE_ADDR,
				.len     = size,
				.offset  = 0,
				.hostptr = (unsigned long)fbmem,
		};
		struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
		int ret;

		ret = ioctl(to_kgsl_pipe(pipe)->fd, IOCTL_KGSL_MAP_USER_MEM, &req);
		if (ret) {
			ERROR_MSG("mapping user mem failed: %s",
					strerror(errno));
			goto fail;
		}
		kgsl_bo->gpuaddr = req.gpuaddr;
		bo->map = fbmem;
	}

	return bo;
fail:
	if (bo)
		fd_bo_del(bo);
	return NULL;
}

drm_private uint32_t kgsl_bo_gpuaddr(struct kgsl_bo *kgsl_bo, uint32_t offset)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (!kgsl_bo->gpuaddr) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = bo_alloc(kgsl_bo);
		if (ret) {
			return ret;
		}

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		kgsl_bo->gpuaddr = req.gpuaddr[0];
	}
	return kgsl_bo->gpuaddr + offset;
}

/*
 * Super-cheezy way to synchronization between mesa and ddx..  the
 * SET_ACTIVE ioctl gives us a way to stash a 32b # w/ a GEM bo, and
 * GET_BUFINFO gives us a way to retrieve it.  We use this to stash
 * the timestamp of the last ISSUEIBCMDS on the buffer.
 *
 * To avoid an obscene amount of syscalls, we:
 *  1) Only set the timestamp for buffers w/ an flink name, ie.
 *     only buffers shared across processes.  This is enough to
 *     catch the DRI2 buffers.
 *  2) Only set the timestamp for buffers submitted to the 3d ring
 *     and only check the timestamps on buffers submitted to the
 *     2d ring.  This should be enough to handle synchronizing of
 *     presentation blit.  We could do synchronization in the other
 *     direction too, but that would be problematic if we are using
 *     the 3d ring from DDX, since client side wouldn't know this.
 *
 * The waiting on timestamp happens before flush, and setting of
 * timestamp happens after flush.  It is transparent to the user
 * of libdrm_freedreno as all the tracking of buffers happens via
 * _emit_reloc()..
 */

drm_private void kgsl_bo_set_timestamp(struct kgsl_bo *kgsl_bo,
		uint32_t timestamp)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (bo->name) {
		struct drm_kgsl_gem_active req = {
				.handle = bo->handle,
				.active = timestamp,
		};
		int ret;

		ret = drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SET_ACTIVE,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("set active failed: %s", strerror(errno));
		}
	}
}

drm_private uint32_t kgsl_bo_get_timestamp(struct kgsl_bo *kgsl_bo)
{
	struct fd_bo *bo = &kgsl_bo->base;
	uint32_t timestamp = 0;
	if (bo->name) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		timestamp = req.active;
	}
	return timestamp;
}