Exemple #1
0
static void
fd3_context_destroy(struct pipe_context *pctx)
{
	struct fd3_context *fd3_ctx = fd3_context(fd_context(pctx));

	util_dynarray_fini(&fd3_ctx->rbrc_patches);

	fd_bo_del(fd3_ctx->vs_pvt_mem);
	fd_bo_del(fd3_ctx->fs_pvt_mem);
	fd_bo_del(fd3_ctx->vsc_size_mem);

	pipe_resource_reference(&fd3_ctx->solid_vbuf, NULL);
	pipe_resource_reference(&fd3_ctx->blit_texcoord_vbuf, NULL);

	fd_context_destroy(pctx);
}
Exemple #2
0
static void
delete_shader(struct fd3_shader_stateobj *so)
{
	ir3_shader_destroy(so->ir);
	fd_bo_del(so->bo);
	free(so);
}
Exemple #3
0
static void
fd5_context_destroy(struct pipe_context *pctx)
{
	struct fd5_context *fd5_ctx = fd5_context(fd_context(pctx));

	fd_bo_del(fd5_ctx->vs_pvt_mem);
	fd_bo_del(fd5_ctx->fs_pvt_mem);
	fd_bo_del(fd5_ctx->vsc_size_mem);
	fd_bo_del(fd5_ctx->blit_mem);

	fd_context_cleanup_common_vbos(&fd5_ctx->base);

	u_upload_destroy(fd5_ctx->border_color_uploader);

	fd_context_destroy(pctx);
}
Exemple #4
0
static void
delete_variant(struct ir3_shader_variant *v)
{
	ir3_destroy(v->ir);
	fd_bo_del(v->bo);
	free(v);
}
static void
fd_resource_destroy(struct pipe_screen *pscreen,
		struct pipe_resource *prsc)
{
	struct fd_resource *rsc = fd_resource(prsc);
	fd_bo_del(rsc->bo);
	FREE(rsc);
}
Exemple #6
0
static void
fd4_context_destroy(struct pipe_context *pctx)
{
	struct fd4_context *fd4_ctx = fd4_context(fd_context(pctx));

	fd_bo_del(fd4_ctx->vs_pvt_mem);
	fd_bo_del(fd4_ctx->fs_pvt_mem);
	fd_bo_del(fd4_ctx->vsc_size_mem);

	pctx->delete_vertex_elements_state(pctx, fd4_ctx->solid_vbuf_state.vtx);
	pctx->delete_vertex_elements_state(pctx, fd4_ctx->blit_vbuf_state.vtx);

	pipe_resource_reference(&fd4_ctx->solid_vbuf, NULL);
	pipe_resource_reference(&fd4_ctx->blit_texcoord_vbuf, NULL);

	u_upload_destroy(fd4_ctx->border_color_uploader);

	fd_context_destroy(pctx);
}
static void
fd_resource_destroy(struct pipe_screen *pscreen,
		struct pipe_resource *prsc)
{
	struct fd_resource *rsc = fd_resource(prsc);
	if (rsc->bo)
		fd_bo_del(rsc->bo);
	list_delinit(&rsc->list);
	util_range_destroy(&rsc->valid_buffer_range);
	FREE(rsc);
}
Exemple #8
0
static void
delete_variant(struct ir3_shader_variant *v)
{
	if (v->ir)
		ir3_destroy(v->ir);
	if (v->bo)
		fd_bo_del(v->bo);
	if (v->immediates)
		free(v->immediates);
	free(v);
}
Exemple #9
0
void
fd_context_destroy(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	unsigned i;

	DBG("");

	fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);

	if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
		util_queue_destroy(&ctx->flush_queue);

	util_copy_framebuffer_state(&ctx->framebuffer, NULL);
	fd_batch_reference(&ctx->batch, NULL);  /* unref current batch */
	fd_bc_invalidate_context(ctx);

	fd_prog_fini(pctx);

	if (ctx->blitter)
		util_blitter_destroy(ctx->blitter);

	if (pctx->stream_uploader)
		u_upload_destroy(pctx->stream_uploader);

	if (ctx->clear_rs_state)
		pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state);

	if (ctx->primconvert)
		util_primconvert_destroy(ctx->primconvert);

	slab_destroy_child(&ctx->transfer_pool);

	for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe); i++) {
		struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
		if (!pipe->bo)
			break;
		fd_bo_del(pipe->bo);
	}

	fd_device_del(ctx->dev);
	fd_pipe_del(ctx->pipe);

	if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) {
		printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n",
			(uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem,
			(uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw,
			(uint32_t)ctx->stats.batch_restore);
	}
}
Exemple #10
0
static void
realloc_bo(struct fd_resource *rsc, uint32_t size)
{
	struct fd_screen *screen = fd_screen(rsc->base.b.screen);
	uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
			DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */

	if (rsc->bo)
		fd_bo_del(rsc->bo);

	rsc->bo = fd_bo_new(screen->dev, size, flags);
	rsc->timestamp = 0;
	rsc->dirty = false;
}
Exemple #11
0
static void
realloc_bo(struct fd_resource *rsc, uint32_t size)
{
	struct fd_screen *screen = fd_screen(rsc->base.b.screen);
	uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
			DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */

	/* if we start using things other than write-combine,
	 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
	 */

	if (rsc->bo)
		fd_bo_del(rsc->bo);

	rsc->bo = fd_bo_new(screen->dev, size, flags);
	rsc->timestamp = 0;
	rsc->dirty = false;
}
static void
realloc_bo(struct fd_resource *rsc, uint32_t size)
{
	struct fd_screen *screen = fd_screen(rsc->base.b.screen);
	uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
			DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */

	/* if we start using things other than write-combine,
	 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
	 */

	if (rsc->bo)
		fd_bo_del(rsc->bo);

	rsc->bo = fd_bo_new(screen->dev, size, flags);
	rsc->timestamp = 0;
	rsc->status = 0;
	rsc->pending_ctx = NULL;
	list_delinit(&rsc->list);
	util_range_set_empty(&rsc->valid_buffer_range);
}
void
fd_context_destroy(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	unsigned i;

	DBG("");

	fd_prog_fini(pctx);
	fd_hw_query_fini(pctx);

	util_dynarray_fini(&ctx->draw_patches);

	if (ctx->blitter)
		util_blitter_destroy(ctx->blitter);

	if (ctx->primconvert)
		util_primconvert_destroy(ctx->primconvert);

	util_slab_destroy(&ctx->transfer_pool);

	fd_ringmarker_del(ctx->draw_start);
	fd_ringmarker_del(ctx->draw_end);
	fd_ringmarker_del(ctx->binning_start);
	fd_ringmarker_del(ctx->binning_end);

	for (i = 0; i < ARRAY_SIZE(ctx->rings); i++)
		fd_ringbuffer_del(ctx->rings[i]);

	for (i = 0; i < ARRAY_SIZE(ctx->pipe); i++) {
		struct fd_vsc_pipe *pipe = &ctx->pipe[i];
		if (!pipe->bo)
			break;
		fd_bo_del(pipe->bo);
	}

	fd_device_del(ctx->dev);

	FREE(ctx);
}
Exemple #14
0
struct fd_bo *
fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
{
	struct fd_bo *bo;

	if (!is_kgsl_pipe(pipe))
		return NULL;

	bo = fd_bo_new(pipe->dev, 1, 0);

	/* this is fugly, but works around a bug in the kernel..
	 * priv->memdesc.size never gets set, so getbufinfo ioctl
	 * thinks the buffer hasn't be allocate and fails
	 */
	if (bo) {
		void *fbmem = drm_mmap(NULL, size, PROT_READ | PROT_WRITE,
				MAP_SHARED, fbfd, 0);
		struct kgsl_map_user_mem req = {
				.memtype = KGSL_USER_MEM_TYPE_ADDR,
				.len     = size,
				.offset  = 0,
				.hostptr = (unsigned long)fbmem,
		};
		struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
		int ret;

		ret = ioctl(to_kgsl_pipe(pipe)->fd, IOCTL_KGSL_MAP_USER_MEM, &req);
		if (ret) {
			ERROR_MSG("mapping user mem failed: %s",
					strerror(errno));
			goto fail;
		}
		kgsl_bo->gpuaddr = req.gpuaddr;
		bo->map = fbmem;
	}

	return bo;
fail:
	if (bo)
		fd_bo_del(bo);
	return NULL;
}

drm_private uint32_t kgsl_bo_gpuaddr(struct kgsl_bo *kgsl_bo, uint32_t offset)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (!kgsl_bo->gpuaddr) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = bo_alloc(kgsl_bo);
		if (ret) {
			return ret;
		}

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		kgsl_bo->gpuaddr = req.gpuaddr[0];
	}
	return kgsl_bo->gpuaddr + offset;
}

/*
 * Super-cheezy way to synchronization between mesa and ddx..  the
 * SET_ACTIVE ioctl gives us a way to stash a 32b # w/ a GEM bo, and
 * GET_BUFINFO gives us a way to retrieve it.  We use this to stash
 * the timestamp of the last ISSUEIBCMDS on the buffer.
 *
 * To avoid an obscene amount of syscalls, we:
 *  1) Only set the timestamp for buffers w/ an flink name, ie.
 *     only buffers shared across processes.  This is enough to
 *     catch the DRI2 buffers.
 *  2) Only set the timestamp for buffers submitted to the 3d ring
 *     and only check the timestamps on buffers submitted to the
 *     2d ring.  This should be enough to handle synchronizing of
 *     presentation blit.  We could do synchronization in the other
 *     direction too, but that would be problematic if we are using
 *     the 3d ring from DDX, since client side wouldn't know this.
 *
 * The waiting on timestamp happens before flush, and setting of
 * timestamp happens after flush.  It is transparent to the user
 * of libdrm_freedreno as all the tracking of buffers happens via
 * _emit_reloc()..
 */

drm_private void kgsl_bo_set_timestamp(struct kgsl_bo *kgsl_bo,
		uint32_t timestamp)
{
	struct fd_bo *bo = &kgsl_bo->base;
	if (bo->name) {
		struct drm_kgsl_gem_active req = {
				.handle = bo->handle,
				.active = timestamp,
		};
		int ret;

		ret = drmCommandWrite(bo->dev->fd, DRM_KGSL_GEM_SET_ACTIVE,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("set active failed: %s", strerror(errno));
		}
	}
}

drm_private uint32_t kgsl_bo_get_timestamp(struct kgsl_bo *kgsl_bo)
{
	struct fd_bo *bo = &kgsl_bo->base;
	uint32_t timestamp = 0;
	if (bo->name) {
		struct drm_kgsl_gem_bufinfo req = {
				.handle = bo->handle,
		};
		int ret;

		ret = drmCommandWriteRead(bo->dev->fd, DRM_KGSL_GEM_GET_BUFINFO,
				&req, sizeof(req));
		if (ret) {
			ERROR_MSG("get bufinfo failed: %s", strerror(errno));
			return 0;
		}

		timestamp = req.active;
	}
	return timestamp;
}