Exemple #1
0
int etna_queue_free_vidmem(struct etna_queue *queue, viv_node_t node)
{
    struct _gcsHAL_INTERFACE *cmd = NULL;
    int rv;
    if((rv=etna_queue_alloc(queue, &cmd)) != ETNA_OK)
        return rv;
    cmd->command = gcvHAL_FREE_VIDEO_MEMORY;
    cmd->u.FreeVideoMemory.node = HANDLE_TO_VIV(node);
    return ETNA_OK;
}
Exemple #2
0
int etna_queue_unlock_vidmem(struct etna_queue *queue, viv_node_t node, enum viv_surf_type type)
{
    struct _gcsHAL_INTERFACE *cmd = NULL;
    int rv;
    if((rv=etna_queue_alloc(queue, &cmd)) != ETNA_OK)
        return rv;
    cmd->command = gcvHAL_UNLOCK_VIDEO_MEMORY;
    cmd->u.UnlockVideoMemory.node = HANDLE_TO_VIV(node);
    cmd->u.UnlockVideoMemory.type = convert_surf_type(type);
    return ETNA_OK;
}
Exemple #3
0
int etna_queue_signal(struct etna_queue *queue, int sig_id, enum viv_where fromWhere)
{
    struct _gcsHAL_INTERFACE *cmd = NULL;
    int rv;
    if((rv=etna_queue_alloc(queue, &cmd)) != ETNA_OK)
        return rv;
    cmd->command = gcvHAL_SIGNAL;
    cmd->u.Signal.signal = PTR_TO_VIV((void*)sig_id);
    cmd->u.Signal.auxSignal = PTR_TO_VIV((void*)0x0);
    cmd->u.Signal.process = HANDLE_TO_VIV(queue->ctx->conn->process);
    cmd->u.Signal.fromWhere = convert_where(fromWhere);
    return ETNA_OK;
}
Exemple #4
0
int etna_queue_unmap_user_memory(struct etna_queue *queue, void *memory, size_t size, viv_usermem_t info, viv_addr_t address)
{
    struct _gcsHAL_INTERFACE *cmd = NULL;
    int rv;
    if((rv=etna_queue_alloc(queue, &cmd)) != ETNA_OK)
        return rv;
    cmd->command = gcvHAL_UNMAP_USER_MEMORY;
    cmd->u.UnmapUserMemory.memory = PTR_TO_VIV(memory);
    cmd->u.UnmapUserMemory.size = size;
    cmd->u.UnmapUserMemory.info = HANDLE_TO_VIV(info);
    cmd->u.UnmapUserMemory.address = address;
    return ETNA_OK;
}
Exemple #5
0
static int gpu_context_free(struct etna_ctx *ctx)
{
    /* attach to GPU */
    int err;
    gcsHAL_INTERFACE id = {};
    id.command = gcvHAL_DETACH;
    id.u.Detach.context = HANDLE_TO_VIV(ctx->ctx);

    if((err=viv_invoke(ctx->conn, &id)) != gcvSTATUS_OK)
    {
#ifdef DEBUG
        fprintf(stderr, "Error detaching from the GPU\n");
#endif
        return ETNA_INTERNAL_ERROR;
    }

    return ETNA_OK;
}
Exemple #6
0
/* Clear GPU context, to rebuild it for next flush */
static int gpu_context_clear(struct etna_ctx *ctx)
{
    /* If context was used, queue free it and allocate new buffer to prevent
     * overwriting it while being used by the GPU.  Otherwise we can just
     * re-use it.
     */
    int rv;
#ifdef DEBUG
    fprintf(stderr, "gpu_context_clear (context %i)\n", (int)GCCTX(ctx)->id);
#endif
    if(GCCTX(ctx)->inUse != NULL &&
       *GCCTX(ctx)->inUse)
    {
#ifdef DEBUG
        fprintf(stderr, "gpu_context_clear: context was in use, deferred freeing and reallocating it\n");
#endif
        if((rv = etna_bo_del(ctx->conn, ctx->ctx_bo, ctx->queue)) != ETNA_OK)
        {
            return rv;
        }
        if((ctx->ctx_bo = etna_bo_new(ctx->conn, COMMAND_BUFFER_SIZE, DRM_ETNA_GEM_TYPE_CMD)) == NULL)
        {
            return ETNA_OUT_OF_MEMORY;
        }
    }
    /* Leave space at beginning of buffer for PIPE switch */
    GCCTX(ctx)->bufferSize = BEGIN_COMMIT_CLEARANCE;
    GCCTX(ctx)->logical = etna_bo_map(ctx->ctx_bo);
#ifdef GCABI_CONTEXT_HAS_PHYSICAL
    GCCTX(ctx)->bytes = etna_bo_size(ctx->ctx_bo); /* actual size of buffer */
    GCCTX(ctx)->physical = HANDLE_TO_VIV(etna_bo_gpu_address(ctx->ctx_bo));
#endif
    /* When context is empty, initial pipe should default to entry pipe so that
     * no pipe switch is needed within the context and the kernel does the
     * right thing.
     */
    GCCTX(ctx)->initialPipe = GCCTX(ctx)->entryPipe;
    return ETNA_OK;
}
Exemple #7
0
/* Clear GPU context, to rebuild it for next flush */
static int gpu_context_clear(struct etna_ctx *ctx)
{
    /* If context was used, queue free it and allocate new buffer to prevent
     * overwriting it while being used by the GPU.  Otherwise we can just
     * re-use it.
     */
    int rv;
#ifdef DEBUG
    printf("gpu_context_clear (context %i)\n", (int)GCCTX(ctx)->id);
#endif
    if(GCCTX(ctx)->inUse != NULL &&
       *GCCTX(ctx)->inUse)
    {
#ifdef DEBUG
        printf("gpu_context_clear: context was in use, deferred freeing and reallocating it\n");
#endif
        if((rv = gpu_context_free_buffer(ctx, &ctx->ctx_info, true)) != ETNA_OK)
        {
            return rv;
        }
        if((rv = gpu_context_allocate_buffer(ctx, &ctx->ctx_info)) != ETNA_OK)
        {
            return rv;
        }
    }
    /* Leave space at beginning of buffer for PIPE switch */
    GCCTX(ctx)->bufferSize = BEGIN_COMMIT_CLEARANCE;
    GCCTX(ctx)->logical = ctx->ctx_info.logical;
#ifdef GCABI_CONTEXT_HAS_PHYSICAL
    GCCTX(ctx)->bytes = ctx->ctx_info.bytes; /* actual size of buffer */
    GCCTX(ctx)->physical = HANDLE_TO_VIV(ctx->ctx_info.physical);
#endif
    /* When context is empty, initial pipe should default to entry pipe so that
     * no pipe switch is needed within the context and the kernel does the
     * right thing.
     */
    GCCTX(ctx)->initialPipe = GCCTX(ctx)->entryPipe;
    return ETNA_OK;
}