示例#1
0
int etna_free(struct etna_ctx *ctx)
{
    if(ctx == NULL)
        return ETNA_INVALID_ADDR;
    /* Free kernel command queue */
    etna_queue_free(ctx->queue);
#ifdef GCABI_HAS_CONTEXT
    /* Free context buffer */
    etna_bo_del(ctx->conn, ctx->ctx_bo, NULL);
#endif
    /* Free command buffers */
    for(int x=0; x<NUM_COMMAND_BUFFERS; ++x)
    {
        viv_user_signal_destroy(ctx->conn, ctx->cmdbufi[x].sig_id);
        etna_bo_del(ctx->conn, ctx->cmdbufi[x].bo, NULL);
        ETNA_FREE(ctx->cmdbuf[x]);
    }
    viv_user_signal_destroy(ctx->conn, ctx->sig_id);
#ifndef GCABI_HAS_CONTEXT
    gpu_context_free(ctx);
#endif

    ETNA_FREE(ctx);
    return ETNA_OK;
}
示例#2
0
static void flush(struct etna_cmd_stream *stream)
{
	struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
	int ret, id = priv->pipe->id;
	struct etna_gpu *gpu = priv->pipe->gpu;
	struct drm_etnaviv_gem_submit req;

	req.pipe = gpu->core;
	req.exec_state = id;
	req.bos = VOID2U64(priv->submit.bos);
	req.nr_bos = priv->submit.nr_bos;
	req.relocs = VOID2U64(priv->submit.relocs);
	req.nr_relocs = priv->submit.nr_relocs;
	req.stream = VOID2U64(stream->buffer);
	req.stream_size = stream->offset * 4; /* in bytes */

	ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
			&req, sizeof(req));

	if (ret) {
		ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
	} else {
		priv->last_timestamp = req.fence;
	}

	for (uint32_t i = 0; i < priv->nr_bos; i++) {
		struct etna_bo *bo = priv->bos[i];
		bo->current_stream = NULL;
		etna_bo_del(bo);
	}
}
示例#3
0
struct etna_bo* etna_bo_new(struct viv_conn *conn, size_t bytes, uint32_t flags)
{
    struct etna_bo *mem = ETNA_CALLOC_STRUCT(etna_bo);
    if(mem == NULL) return NULL;

    if((flags & DRM_ETNA_GEM_TYPE_MASK) == DRM_ETNA_GEM_TYPE_CMD)
    {
        mem->bo_type = ETNA_BO_TYPE_CONTIGUOUS;
        /* Command buffers must be allocated with viv_alloc_contiguous */
        if(viv_alloc_contiguous(conn, bytes,
                    &mem->address,
                    &mem->logical,
                    &mem->size)!=0)
        {
            ETNA_FREE(mem);
            return NULL;
        }
    } else {
        enum viv_surf_type type = VIV_SURF_UNKNOWN;
        enum viv_pool pool = VIV_POOL_DEFAULT;
        /* Convert GEM bits to surface type */
        switch(flags & DRM_ETNA_GEM_TYPE_MASK)
        {
        case DRM_ETNA_GEM_TYPE_IDX: type = VIV_SURF_INDEX; break;
        case DRM_ETNA_GEM_TYPE_VTX: type = VIV_SURF_VERTEX; break;
        case DRM_ETNA_GEM_TYPE_TEX: type = VIV_SURF_TEXTURE; break;
        case DRM_ETNA_GEM_TYPE_RT:  type = VIV_SURF_RENDER_TARGET; break;
        case DRM_ETNA_GEM_TYPE_ZS:  type = VIV_SURF_DEPTH; break;
        case DRM_ETNA_GEM_TYPE_HZ:  type = VIV_SURF_HIERARCHICAL_DEPTH; break;
        case DRM_ETNA_GEM_TYPE_BMP: type = VIV_SURF_BITMAP; break;
        case DRM_ETNA_GEM_TYPE_TS:  type = VIV_SURF_TILE_STATUS; break;
        default: /* Invalid type */
            ETNA_FREE(mem);
            return NULL;
            break;
        }

        mem->bo_type = ETNA_BO_TYPE_VIDMEM;
        mem->type = type;
        if(viv_alloc_linear_vidmem(conn, bytes, ETNA_VIDMEM_ALIGNMENT, type, pool, &mem->node, &mem->size)!=0)
        {
#ifdef DEBUG
            fprintf(stderr, "Error allocating memory\n");
#endif
            return NULL;
        }
#ifdef DEBUG
        printf("Allocated: mem=%p node=%08x size=%08x\n", mem, (uint32_t)mem->node, mem->size);
#endif
        int status = etna_bo_lock(conn, mem);
        if(status != ETNA_OK)
        {
            etna_bo_del(conn, mem, NULL);
            return NULL;
        }
    }
    return mem;
}
示例#4
0
static void flush(struct etna_cmd_stream *stream, int in_fence_fd,
		  int *out_fence_fd)
{
	struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
	int ret, id = priv->pipe->id;
	struct etna_gpu *gpu = priv->pipe->gpu;

	struct drm_etnaviv_gem_submit req = {
		.pipe = gpu->core,
		.exec_state = id,
		.bos = VOID2U64(priv->submit.bos),
		.nr_bos = priv->submit.nr_bos,
		.relocs = VOID2U64(priv->submit.relocs),
		.nr_relocs = priv->submit.nr_relocs,
		.stream = VOID2U64(stream->buffer),
		.stream_size = stream->offset * 4, /* in bytes */
	};

	if (in_fence_fd != -1) {
		req.flags |= ETNA_SUBMIT_FENCE_FD_IN | ETNA_SUBMIT_NO_IMPLICIT;
		req.fence_fd = in_fence_fd;
	}

	if (out_fence_fd)
		req.flags |= ETNA_SUBMIT_FENCE_FD_OUT;

	/*
	 * Pass the complete submit structure only if flags are set. Otherwise,
	 * only pass the fields up to, but not including the flags field for
	 * backwards compatiblity with older kernels.
	 */
	ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
			&req, req.flags ? sizeof(req) :
			offsetof(struct drm_etnaviv_gem_submit, flags));

	if (ret)
		ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
	else
		priv->last_timestamp = req.fence;

	for (uint32_t i = 0; i < priv->nr_bos; i++) {
		struct etna_bo *bo = priv->bos[i];

		bo->current_stream = NULL;
		etna_bo_del(bo);
	}

	if (out_fence_fd)
		*out_fence_fd = req.fence_fd;
}

void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
{
	flush(stream, -1, NULL);
	reset_buffer(stream);
}
static void test_size_rounding(struct etna_device *dev)
{
	struct etna_bo *bo;

	printf("testing size rounding ... ");

	bo = etna_bo_new(dev, 15, ETNA_BO_UNCACHED);
	assert(etna_bo_size(bo) == 4096);
	etna_bo_del(bo);

	bo = etna_bo_new(dev, 4096, ETNA_BO_UNCACHED);
	assert(etna_bo_size(bo) == 4096);
	etna_bo_del(bo);

	bo = etna_bo_new(dev, 4100, ETNA_BO_UNCACHED);
	assert(etna_bo_size(bo) == 8192);
	etna_bo_del(bo);

	printf("ok\n");
}
static void test_cache(struct etna_device *dev)
{
	struct etna_bo *bo, *tmp;

	/* allocate and free some bo's with same size - we must
	 * get the same bo over and over. */
	printf("testing bo cache ... ");

	bo = tmp = etna_bo_new(dev, 0x100, ETNA_BO_UNCACHED);
	assert(bo);
	etna_bo_del(bo);

	for (unsigned i = 0; i < 100; i++) {
		tmp = etna_bo_new(dev, 0x100, ETNA_BO_UNCACHED);
		etna_bo_del(tmp);
		assert(tmp == bo);
	}

	printf("ok\n");
}
示例#7
0
static void
etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
   struct etna_resource *rsc = etna_resource(prsc);

   if (rsc->bo)
      etna_bo_del(rsc->bo);

   if (rsc->ts_bo)
      etna_bo_del(rsc->ts_bo);

   if (rsc->scanout)
      renderonly_scanout_destroy(rsc->scanout);

   list_delinit(&rsc->list);

   pipe_resource_reference(&rsc->texture, NULL);

   FREE(rsc);
}
示例#8
0
static void etna_screen_resource_destroy(struct pipe_screen *screen,
                        struct pipe_resource *resource_)
{
    struct etna_screen *priv = etna_screen(screen);
    struct etna_resource *resource = etna_resource(resource_);
    if(resource == NULL)
        return;
    struct etna_queue *queue = NULL;
    if(resource->last_ctx != NULL)
    {
        /* XXX This could fail when multiple contexts share this resource,
         * (the last one to bind it will "own" it) or fail miserably if
         * the context was since destroyed.
         * Integrate this into etna_bo_del...
         */
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource queued destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
        queue = resource->last_ctx->ctx->queue;
    } else {
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
    }
    etna_bo_del(priv->dev, resource->bo, queue);
    etna_bo_del(priv->dev, resource->ts_bo, queue);
    FREE(resource);
}
示例#9
0
/* Clear GPU context, to rebuild it for next flush */
static int gpu_context_clear(struct etna_ctx *ctx)
{
    /* If context was used, queue free it and allocate new buffer to prevent
     * overwriting it while being used by the GPU.  Otherwise we can just
     * re-use it.
     */
    int rv;
#ifdef DEBUG
    fprintf(stderr, "gpu_context_clear (context %i)\n", (int)GCCTX(ctx)->id);
#endif
    if(GCCTX(ctx)->inUse != NULL &&
       *GCCTX(ctx)->inUse)
    {
#ifdef DEBUG
        fprintf(stderr, "gpu_context_clear: context was in use, deferred freeing and reallocating it\n");
#endif
        if((rv = etna_bo_del(ctx->conn, ctx->ctx_bo, ctx->queue)) != ETNA_OK)
        {
            return rv;
        }
        if((ctx->ctx_bo = etna_bo_new(ctx->conn, COMMAND_BUFFER_SIZE, DRM_ETNA_GEM_TYPE_CMD)) == NULL)
        {
            return ETNA_OUT_OF_MEMORY;
        }
    }
    /* Leave space at beginning of buffer for PIPE switch */
    GCCTX(ctx)->bufferSize = BEGIN_COMMIT_CLEARANCE;
    GCCTX(ctx)->logical = etna_bo_map(ctx->ctx_bo);
#ifdef GCABI_CONTEXT_HAS_PHYSICAL
    GCCTX(ctx)->bytes = etna_bo_size(ctx->ctx_bo); /* actual size of buffer */
    GCCTX(ctx)->physical = HANDLE_TO_VIV(etna_bo_gpu_address(ctx->ctx_bo));
#endif
    /* When context is empty, initial pipe should default to entry pipe so that
     * no pipe switch is needed within the context and the kernel does the
     * right thing.
     */
    GCCTX(ctx)->initialPipe = GCCTX(ctx)->entryPipe;
    return ETNA_OK;
}