int etna_fence_new(struct pipe_screen *screen_h, struct etna_ctx *ctx, struct pipe_fence_handle **fence_p) { struct etna_fence *fence = NULL; struct etna_screen *screen = etna_screen(screen_h); int rv; /* XXX we do not release the fence_p reference here -- neither do the other drivers, * and clients don't seem to rely on this. */ if(fence_p == NULL) return ETNA_INVALID_ADDR; assert(*fence_p == NULL); /* re-use old fence, if available, and reset it first */ pipe_mutex_lock(screen->fence_mutex); if(screen->fence_freelist != NULL) { fence = screen->fence_freelist; screen->fence_freelist = fence->next_free; fence->next_free = NULL; } pipe_mutex_unlock(screen->fence_mutex); if(fence != NULL) { if((rv = viv_user_signal_signal(ctx->conn, fence->signal, 0)) != VIV_STATUS_OK) { BUG("Error: could not reset signal %i", fence->signal); etna_screen_destroy_fence(screen_h, fence); return rv; } fence->signalled = false; } else { fence = CALLOC_STRUCT(etna_fence); /* Create signal with manual reset; we want to be able to probe it * or wait for it without resetting it. */ if((rv = viv_user_signal_create(ctx->conn, /* manualReset */ true, &fence->signal)) != VIV_STATUS_OK) { FREE(fence); return rv; } } if((rv = etna_queue_signal(ctx->queue, fence->signal, VIV_WHERE_PIXEL)) != ETNA_OK) { BUG("error queueing signal %i", fence->signal); viv_user_signal_destroy(ctx->conn, fence->signal); FREE(fence); return rv; } pipe_reference_init(&fence->reference, 1); *fence_p = (struct pipe_fence_handle*)fence; return ETNA_OK; }
/* internal (non-inline) part of etna_reserve * - commit current command buffer (if there is a current command buffer) * - signify when current command buffer becomes available using a signal * - switch to next command buffer */ int _etna_reserve_internal(struct etna_ctx *ctx, size_t n) { int status; #ifdef DEBUG fprintf(stderr, "Buffer full\n"); #endif if((ctx->offset*4 + END_COMMIT_CLEARANCE) > COMMAND_BUFFER_SIZE) { fprintf(stderr, "%s: Command buffer overflow! This is likely a programming error in the GPU driver.\n", __func__); abort(); } #ifdef GCABI_HAS_CONTEXT if(ctx->cur_buf == ETNA_CTX_BUFFER) { fprintf(stderr, "%s: Context buffer overflow! This is likely a programming error in the GPU driver.\n", __func__); abort(); } #endif if(ctx->cur_buf != ETNA_NO_BUFFER) { #if 0 fprintf(stderr, "Submitting old buffer %i\n", ctx->cur_buf); #endif /* Queue signal to signify when buffer is available again */ if((status = etna_queue_signal(ctx->queue, ctx->cmdbufi[ctx->cur_buf].sig_id, VIV_WHERE_COMMAND)) != ETNA_OK) { fprintf(stderr, "%s: queue signal for old buffer failed: %i\n", __func__, status); abort(); /* buffer is in invalid state XXX need some kind of recovery */ } /* Otherwise, if there is something to be committed left in the current command buffer, commit it */ if((status = etna_flush(ctx, NULL)) != ETNA_OK) { fprintf(stderr, "%s: reserve failed: %i\n", __func__, status); abort(); /* buffer is in invalid state XXX need some kind of recovery */ } } /* Move on to next buffer if not enough free in current one */ if((status = switch_next_buffer(ctx)) != ETNA_OK) { fprintf(stderr, "%s: can't switch to next command buffer: %i\n", __func__, status); abort(); /* Buffer is in invalid state XXX need some kind of recovery. This could involve waiting and re-uploading the context state. */ } return status; }
int etna_finish(struct etna_ctx *ctx) { int status; if(ctx == NULL) return ETNA_INVALID_ADDR; /* Submit event queue with SIGNAL, fromWhere=gcvKERNEL_PIXEL (wait for pixel engine to finish) */ if(etna_queue_signal(ctx->queue, ctx->sig_id, VIV_WHERE_PIXEL) != 0) { return ETNA_INTERNAL_ERROR; } if((status = etna_flush(ctx, NULL)) != ETNA_OK) return status; #ifdef DEBUG fprintf(stderr, "finish: Waiting for signal...\n"); #endif /* Wait for signal */ if(viv_user_signal_wait(ctx->conn, ctx->sig_id, VIV_WAIT_INDEFINITE) != 0) { return ETNA_INTERNAL_ERROR; } return ETNA_OK; }
int etna_flush(struct etna_ctx *ctx, uint32_t *fence_out) { int status = ETNA_OK; if(ctx == NULL) return ETNA_INVALID_ADDR; if(ctx->cur_buf == ETNA_CTX_BUFFER) /* Can never flush while building context buffer */ return ETNA_INTERNAL_ERROR; if(fence_out) /* is a fence handle requested? */ { uint32_t fence; int signal; /* Need to lock the fence mutex to make sure submits are ordered by * fence number. */ pthread_mutex_lock(&ctx->conn->fence_mutex); do { /* Get next fence ID */ if((status = _viv_fence_new(ctx->conn, &fence, &signal)) != VIV_STATUS_OK) { fprintf(stderr, "%s: could not request fence\n", __func__); goto unlock_and_return_status; } } while(fence == 0); /* don't return fence handle 0 as it is interpreted as error value downstream */ /* Queue the signal. This can call in turn call this function (but * without fence) if the queue was full, so we should be able to handle * that. In that case, we will exit from this function with only * this fence in the queue and an empty command buffer. */ if((status = etna_queue_signal(ctx->queue, signal, VIV_WHERE_PIXEL)) != ETNA_OK) { fprintf(stderr, "%s: error %i queueing fence signal %i\n", __func__, status, signal); goto unlock_and_return_status; } *fence_out = fence; } /***** Start fence mutex locked */ /* Make sure to unlock the mutex before returning */ struct _gcsQUEUE *queue_first = _etna_queue_first(ctx->queue); gcoCMDBUF cur_buf = (ctx->cur_buf != ETNA_NO_BUFFER) ? ctx->cmdbuf[ctx->cur_buf] : NULL; if(cur_buf == NULL || (ctx->offset*4 <= (cur_buf->startOffset + BEGIN_COMMIT_CLEARANCE))) { /* Nothing in command buffer; but if we end up here there may be kernel commands to submit. Do this seperately. */ if(queue_first != NULL) { ctx->flushes = 0; if((status = viv_event_commit(ctx->conn, queue_first)) != 0) { #ifdef DEBUG fprintf(stderr, "Error committing kernel commands\n"); #endif goto unlock_and_return_status; } if(fence_out) /* mark fence as submitted to kernel */ _viv_fence_mark_pending(ctx->conn, *fence_out); } goto unlock_and_return_status; } cur_buf->offset = ctx->offset*4; /* Copy over current end offset into CMDBUF, for kernel */ #ifdef DEBUG fprintf(stderr, "Committing command buffer %i startOffset=%x offset=%x\n", ctx->cur_buf, cur_buf->startOffset, ctx->offset*4); #endif #ifdef DEBUG_CMDBUF etna_dump_cmd_buffer(ctx); #endif #ifdef GCABI_HAS_CONTEXT gpu_context_finish_up(ctx); #endif if(!queue_first) ctx->flushes += 1; else ctx->flushes = 0; if((status = viv_commit(ctx->conn, cur_buf, ctx->ctx, queue_first)) != 0) { #ifdef DEBUG fprintf(stderr, "Error committing command buffer\n"); #endif goto unlock_and_return_status; } if(fence_out) { _viv_fence_mark_pending(ctx->conn, *fence_out); pthread_mutex_unlock(&ctx->conn->fence_mutex); } /***** End fence mutex locked */ #ifdef GCABI_HAS_CONTEXT /* set context entryPipe to currentPipe (next commit will start with current pipe) */ GCCTX(ctx)->entryPipe = GCCTX(ctx)->currentPipe; gpu_context_clear(ctx); if(ctx->ctx_cb) { enum etna_pipe initial_pipe, final_pipe; /* Start building GPU context */ if((status = gpu_context_build_start(ctx)) != ETNA_OK) { fprintf(stderr, "%s: gpu_context_build_start failed with status %i\n", __func__, status); return status; } if((status = ctx->ctx_cb(ctx->ctx_cb_data, ctx, &initial_pipe, &final_pipe)) != ETNA_OK) { fprintf(stderr, "%s: Context callback failed with status %i\n", __func__, status); return status; } /* Set initial pipe in context */ GCCTX(ctx)->initialPipe = initial_pipe; /* Finish building GPU context */ if((status = gpu_context_build_end(ctx, final_pipe)) != ETNA_OK) { fprintf(stderr, "%s: gpu_context_build_end failed with status %i\n", __func__, status); return status; } } #endif cur_buf->startOffset = cur_buf->offset + END_COMMIT_CLEARANCE; cur_buf->offset = cur_buf->startOffset + BEGIN_COMMIT_CLEARANCE; if((cur_buf->offset + END_COMMIT_CLEARANCE) >= COMMAND_BUFFER_SIZE || ctx->flushes > ETNA_MAX_UNSIGNALED_FLUSHES) { /* nothing more fits in buffer, prevent warning about buffer overflow on next etna_reserve. */ cur_buf->startOffset = cur_buf->offset = COMMAND_BUFFER_SIZE - END_COMMIT_CLEARANCE; } /* Set writing offset for next etna_reserve. For convenience this is stored as an index instead of a byte offset. */ ctx->offset = cur_buf->offset / 4; #ifdef DEBUG #ifdef GCABI_HAS_CONTEXT fprintf(stderr, " New start offset: %x New offset: %x Contextbuffer used: %i\n", cur_buf->startOffset, cur_buf->offset, *(GCCTX(ctx)->inUse)); #else fprintf(stderr, " New start offset: %x New offset: %x\n", cur_buf->startOffset, cur_buf->offset); #endif #endif return ETNA_OK; unlock_and_return_status: /* Unlock fence mutex (if necessary) and return status */ if(fence_out) pthread_mutex_unlock(&ctx->conn->fence_mutex); return status; }