/** * Patch up the upload DMA command reserved by svga_buffer_upload_command * with the final ranges. */ void svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf) { SVGA3dCopyBox *boxes; unsigned i; struct pipe_resource *dummy; if (!sbuf->dma.pending) { return; } assert(sbuf->handle); assert(sbuf->hwbuf); assert(sbuf->map.num_ranges); assert(sbuf->dma.svga == svga); assert(sbuf->dma.boxes); /* * Patch the DMA command with the final copy box. */ SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle); boxes = sbuf->dma.boxes; for (i = 0; i < sbuf->map.num_ranges; ++i) { SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n", sbuf->map.ranges[i].start, sbuf->map.ranges[i].end); boxes[i].x = sbuf->map.ranges[i].start; boxes[i].y = 0; boxes[i].z = 0; boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start; boxes[i].h = 1; boxes[i].d = 1; boxes[i].srcx = sbuf->map.ranges[i].start; boxes[i].srcy = 0; boxes[i].srcz = 0; } sbuf->map.num_ranges = 0; assert(sbuf->head.prev && sbuf->head.next); LIST_DEL(&sbuf->head); #ifdef DEBUG sbuf->head.next = sbuf->head.prev = NULL; #endif sbuf->dma.pending = FALSE; sbuf->dma.flags.discard = FALSE; sbuf->dma.flags.unsynchronized = FALSE; sbuf->dma.svga = NULL; sbuf->dma.boxes = NULL; /* Decrement reference count (and potentially destroy) */ dummy = &sbuf->b.b; pipe_resource_reference(&dummy, NULL); }
enum pipe_error svga_buffer_create_host_surface(struct svga_screen *ss, struct svga_buffer *sbuf) { assert(!sbuf->user); if (!sbuf->handle) { sbuf->key.flags = 0; sbuf->key.format = SVGA3D_BUFFER; if (sbuf->bind_flags & PIPE_BIND_VERTEX_BUFFER) { sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER; sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER; } if (sbuf->bind_flags & PIPE_BIND_INDEX_BUFFER) { sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER; sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER; } if (sbuf->bind_flags & PIPE_BIND_CONSTANT_BUFFER) sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER; if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT; if (sbuf->bind_flags & PIPE_BIND_SAMPLER_VIEW) sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE; sbuf->key.size.width = sbuf->b.b.width0; sbuf->key.size.height = 1; sbuf->key.size.depth = 1; sbuf->key.numFaces = 1; sbuf->key.numMipLevels = 1; sbuf->key.cachable = 1; sbuf->key.arraySize = 1; SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0); sbuf->handle = svga_screen_surface_create(ss, sbuf->b.b.bind, sbuf->b.b.usage, &sbuf->key); if (!sbuf->handle) return PIPE_ERROR_OUT_OF_MEMORY; /* Always set the discard flag on the first time the buffer is written * as svga_screen_surface_create might have passed a recycled host * buffer. */ sbuf->dma.flags.discard = TRUE; SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0); } return PIPE_OK; }
/** * Patch up the upload DMA command reserved by svga_buffer_upload_command * with the final ranges. */ static void svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf) { SVGA3dCopyBox *boxes; unsigned i; assert(sbuf->handle); assert(sbuf->hwbuf); assert(sbuf->map.num_ranges); assert(sbuf->dma.svga == svga); assert(sbuf->dma.boxes); /* * Patch the DMA command with the final copy box. */ SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle); boxes = sbuf->dma.boxes; for(i = 0; i < sbuf->map.num_ranges; ++i) { SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n", sbuf->map.ranges[i].start, sbuf->map.ranges[i].end); boxes[i].x = sbuf->map.ranges[i].start; boxes[i].y = 0; boxes[i].z = 0; boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start; boxes[i].h = 1; boxes[i].d = 1; boxes[i].srcx = sbuf->map.ranges[i].start; boxes[i].srcy = 0; boxes[i].srcz = 0; } sbuf->map.num_ranges = 0; assert(sbuf->head.prev && sbuf->head.next); LIST_DEL(&sbuf->head); #ifdef DEBUG sbuf->head.next = sbuf->head.prev = NULL; #endif sbuf->dma.pending = FALSE; sbuf->dma.svga = NULL; sbuf->dma.boxes = NULL; /* Decrement reference count */ pipe_reference(&(sbuf->b.b.reference), NULL); sbuf = NULL; }
static boolean svga_get_query_result(struct pipe_context *pipe, struct pipe_query *q, boolean wait, uint64_t *result) { struct svga_context *svga = svga_context( pipe ); struct svga_screen *svgascreen = svga_screen( pipe->screen ); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_query *sq = svga_query( q ); SVGA3dQueryState state; SVGA_DBG(DEBUG_QUERY, "%s wait: %d\n", __FUNCTION__); /* The query status won't be updated by the host unless * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause a * synchronous wait on the host */ if(!sq->fence) { enum pipe_error ret; ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf); assert(ret == PIPE_OK); } svga_context_flush(svga, &sq->fence); assert(sq->fence); } state = sq->queryResult->state; if(state == SVGA3D_QUERYSTATE_PENDING) { if(!wait) return FALSE; sws->fence_finish(sws, sq->fence, 0); state = sq->queryResult->state; } assert(state == SVGA3D_QUERYSTATE_SUCCEEDED || state == SVGA3D_QUERYSTATE_FAILED); *result = (uint64_t)sq->queryResult->result32; SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, (unsigned)*result); return TRUE; }
/** * Clear the given surface to the specified value. * No masking, no scissor (clear entire buffer). */ void svga_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct svga_context *svga = svga_context( pipe ); int ret; if (buffers & PIPE_CLEAR_COLOR) SVGA_DBG(DEBUG_DMA, "clear sid %p\n", svga_surface(svga->curr.framebuffer.cbufs[0])->handle); ret = try_clear( svga, buffers, rgba, depth, stencil ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { /* Flush command buffer and retry: */ svga_context_flush( svga, NULL ); ret = try_clear( svga, buffers, rgba, depth, stencil ); } /* * Mark target surfaces as dirty * TODO Mark only cleared surfaces. */ svga_mark_surfaces_dirty(svga); assert (ret == PIPE_OK); }
static boolean svga_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx, struct pipe_fence_handle *fence, uint64_t timeout) { struct svga_winsys_screen *sws = svga_screen(screen)->sws; boolean retVal; SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_FENCEFINISH); if (!timeout) { retVal = sws->fence_signalled(sws, fence, 0) == 0; } else { SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n", __FUNCTION__, fence); retVal = sws->fence_finish(sws, fence, 0) == 0; } SVGA_STATS_TIME_POP(sws); return retVal; }
static void svga_buffer_unmap( struct pipe_screen *screen, struct pipe_buffer *buf) { struct svga_screen *ss = svga_screen(screen); struct svga_winsys_screen *sws = ss->sws; struct svga_buffer *sbuf = svga_buffer( buf ); pipe_mutex_lock(ss->swc_mutex); assert(sbuf->map.count); if(sbuf->map.count) --sbuf->map.count; if(sbuf->hwbuf) sws->buffer_unmap(sws, sbuf->hwbuf); if(sbuf->map.writing) { if(!sbuf->map.flush_explicit) { /* No mapped range was flushed -- flush the whole buffer */ SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n"); svga_buffer_add_range(sbuf, 0, sbuf->base.size); } sbuf->map.writing = FALSE; sbuf->map.flush_explicit = FALSE; } pipe_mutex_unlock(ss->swc_mutex); }
enum pipe_error svga_set_stream_output(struct svga_context *svga, struct svga_stream_output *streamout) { enum pipe_error ret = PIPE_OK; unsigned id = streamout ? streamout->id : SVGA3D_INVALID_ID; if (!svga_have_vgpu10(svga)) { return PIPE_OK; } SVGA_DBG(DEBUG_STREAMOUT, "%s streamout=0x%x id=%d\n", __FUNCTION__, streamout, id); if (svga->current_so != streamout) { /* Save current SO state */ svga->current_so = streamout; ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id); } } return ret; }
static struct pipe_stream_output_target * svga_create_stream_output_target(struct pipe_context *pipe, struct pipe_resource *buffer, unsigned buffer_offset, unsigned buffer_size) { struct svga_context *svga = svga_context(pipe); struct svga_stream_output_target *sot; SVGA_DBG(DEBUG_STREAMOUT, "%s offset=%d size=%d\n", __FUNCTION__, buffer_offset, buffer_size); assert(svga_have_vgpu10(svga)); (void) svga; sot = CALLOC_STRUCT(svga_stream_output_target); if (!sot) return NULL; pipe_reference_init(&sot->base.reference, 1); pipe_resource_reference(&sot->base.buffer, buffer); sot->base.context = pipe; sot->base.buffer = buffer; sot->base.buffer_offset = buffer_offset; sot->base.buffer_size = buffer_size; return &sot->base; }
static void svga_texture_destroy(struct pipe_screen *screen, struct pipe_resource *pt) { struct svga_screen *ss = svga_screen(screen); struct svga_texture *tex = svga_texture(pt); ss->texture_timestamp++; svga_sampler_view_reference(&tex->cached_view, NULL); /* DBG("%s deleting %p\n", __FUNCTION__, (void *) tex); */ SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle); svga_screen_surface_destroy(ss, &tex->key, &tex->handle); ss->hud.total_resource_bytes -= tex->size; FREE(tex->defined); FREE(tex->rendered_to); FREE(tex); assert(ss->hud.num_resources > 0); if (ss->hud.num_resources > 0) ss->hud.num_resources--; }
/** * Clear the given surface to the specified value. * No masking, no scissor (clear entire buffer). */ void svga_clear(struct pipe_context *pipe, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct svga_context *svga = svga_context( pipe ); enum pipe_error ret; if (buffers & PIPE_CLEAR_COLOR) SVGA_DBG(DEBUG_DMA, "clear sid %p\n", svga_surface(svga->curr.framebuffer.cbufs[0])->handle); /* flush any queued prims (don't want them to appear after the clear!) */ svga_hwtnl_flush_retry(svga); ret = try_clear( svga, buffers, color, depth, stencil ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { /* Flush command buffer and retry: */ svga_context_flush( svga, NULL ); ret = try_clear( svga, buffers, color, depth, stencil ); } /* * Mark target surfaces as dirty * TODO Mark only cleared surfaces. */ svga_mark_surfaces_dirty(svga); assert (ret == PIPE_OK); }
static void svga_end_query(struct pipe_context *pipe, struct pipe_query *q) { struct svga_context *svga = svga_context( pipe ); struct svga_query *sq = svga_query( q ); enum pipe_error ret; SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); assert(svga->sq == sq); svga_hwtnl_flush_retry(svga); /* Set to PENDING before sending EndQuery. */ sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING; ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf); assert(ret == PIPE_OK); } /* TODO: Delay flushing. We don't really need to flush here, just ensure * that there is one flush before svga_get_query_result attempts to get the * result */ svga_context_flush(svga, NULL); svga->sq = NULL; }
void svga_buffer_destroy_host_surface(struct svga_screen *ss, struct svga_buffer *sbuf) { if (sbuf->handle) { SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0); svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle); } }
static void svga_destroy_stream_output_target(struct pipe_context *pipe, struct pipe_stream_output_target *target) { struct svga_stream_output_target *sot = svga_stream_output_target(target); SVGA_DBG(DEBUG_STREAMOUT, "%s\n", __FUNCTION__); pipe_resource_reference(&sot->base.buffer, NULL); FREE(sot); }
static void svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q) { struct svga_screen *svgascreen = svga_screen(pipe->screen); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_query *sq = svga_query( q ); SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); sws->buffer_destroy(sws, sq->hwbuf); sws->fence_reference(sws, &sq->fence, NULL); FREE(sq); }
static void * svga_create_sampler_state(struct pipe_context *pipe, const struct pipe_sampler_state *sampler) { struct svga_context *svga = svga_context(pipe); struct svga_sampler_state *cso = CALLOC_STRUCT( svga_sampler_state ); cso->mipfilter = translate_mip_filter(sampler->min_mip_filter); cso->magfilter = translate_img_filter( sampler->mag_img_filter ); cso->minfilter = translate_img_filter( sampler->min_img_filter ); cso->aniso_level = MAX2( (unsigned) sampler->max_anisotropy, 1 ); cso->lod_bias = sampler->lod_bias; cso->addressu = translate_wrap_mode(sampler->wrap_s); cso->addressv = translate_wrap_mode(sampler->wrap_t); cso->addressw = translate_wrap_mode(sampler->wrap_r); cso->normalized_coords = sampler->normalized_coords; cso->compare_mode = sampler->compare_mode; cso->compare_func = sampler->compare_func; { ubyte r = float_to_ubyte(sampler->border_color[0]); ubyte g = float_to_ubyte(sampler->border_color[1]); ubyte b = float_to_ubyte(sampler->border_color[2]); ubyte a = float_to_ubyte(sampler->border_color[3]); util_pack_color_ub( r, g, b, a, PIPE_FORMAT_B8G8R8A8_UNORM, &cso->bordercolor ); } /* No SVGA3D support for: * - min/max LOD clamping */ cso->min_lod = 0; cso->view_min_lod = MAX2(sampler->min_lod, 0); cso->view_max_lod = MAX2(sampler->max_lod, 0); /* Use min_mipmap */ if (svga->debug.use_min_mipmap) { if (cso->view_min_lod == cso->view_max_lod) { cso->min_lod = cso->view_min_lod; cso->view_min_lod = 0; cso->view_max_lod = 1000; /* Just a high number */ cso->mipfilter = SVGA3D_TEX_FILTER_NONE; } } SVGA_DBG(DEBUG_VIEWS, "min %u, view(min %u, max %u) lod, mipfilter %s\n", cso->min_lod, cso->view_min_lod, cso->view_max_lod, cso->mipfilter == SVGA3D_TEX_FILTER_NONE ? "SVGA3D_TEX_FILTER_NONE" : "SOMETHING"); return cso; }
static boolean svga_fence_finish(struct pipe_screen *screen, struct pipe_fence_handle *fence, uint64_t timeout) { struct svga_winsys_screen *sws = svga_screen(screen)->sws; SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n", __FUNCTION__, fence); return sws->fence_finish(sws, fence, 0) == 0; }
static INLINE enum pipe_error svga_buffer_create_host_surface(struct svga_screen *ss, struct svga_buffer *sbuf) { if(!sbuf->handle) { sbuf->key.flags = 0; sbuf->key.format = SVGA3D_BUFFER; if(sbuf->base.usage & PIPE_BUFFER_USAGE_VERTEX) sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER; if(sbuf->base.usage & PIPE_BUFFER_USAGE_INDEX) sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER; sbuf->key.size.width = sbuf->base.size; sbuf->key.size.height = 1; sbuf->key.size.depth = 1; sbuf->key.numFaces = 1; sbuf->key.numMipLevels = 1; sbuf->key.cachable = 1; SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->base.size); sbuf->handle = svga_screen_surface_create(ss, &sbuf->key); if(!sbuf->handle) return PIPE_ERROR_OUT_OF_MEMORY; /* Always set the discard flag on the first time the buffer is written * as svga_screen_surface_create might have passed a recycled host * buffer. */ sbuf->dma.flags.discard = TRUE; SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->base.size); } return PIPE_OK; }
static void svga_tex_surface_destroy(struct pipe_surface *surf) { struct svga_surface *s = svga_surface(surf); struct svga_texture *t = svga_texture(surf->texture); struct svga_screen *ss = svga_screen(surf->texture->screen); if(s->handle != t->handle) { SVGA_DBG(DEBUG_DMA, "unref sid %p (tex surface)\n", s->handle); svga_screen_surface_destroy(ss, &s->key, &s->handle); } pipe_resource_reference(&surf->texture, NULL); FREE(surf); }
static INLINE void svga_transfer_dma_band(struct svga_context *svga, struct svga_transfer *st, SVGA3dTransferType transfer, unsigned y, unsigned h, unsigned srcy, SVGA3dSurfaceDMAFlags flags) { struct svga_texture *texture = svga_texture(st->base.resource); SVGA3dCopyBox box; enum pipe_error ret; box.x = st->base.box.x; box.y = y; box.z = st->base.box.z; box.w = st->base.box.width; box.h = h; box.d = 1; box.srcx = 0; box.srcy = srcy; box.srcz = 0; if (st->base.resource->target == PIPE_TEXTURE_CUBE) { st->face = st->base.box.z; box.z = 0; } else st->face = 0; SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - (%u, %u, %u), %ubpp\n", transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from", texture->handle, st->face, st->base.box.x, y, box.z, st->base.box.x + st->base.box.width, y + h, box.z + 1, util_format_get_blocksize(texture->b.b.format) * 8 / (util_format_get_blockwidth(texture->b.b.format)*util_format_get_blockheight(texture->b.b.format))); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); assert(ret == PIPE_OK); } }
static struct pipe_query *svga_create_query( struct pipe_context *pipe, unsigned query_type ) { struct svga_screen *svgascreen = svga_screen(pipe->screen); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_query *sq; SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); sq = CALLOC_STRUCT(svga_query); if (!sq) goto no_sq; sq->type = SVGA3D_QUERYTYPE_OCCLUSION; sq->hwbuf = svga_winsys_buffer_create(svgascreen, 1, SVGA_BUFFER_USAGE_PINNED, sizeof *sq->queryResult); if(!sq->hwbuf) goto no_hwbuf; sq->queryResult = (SVGA3dQueryResult *)sws->buffer_map(sws, sq->hwbuf, PIPE_BUFFER_USAGE_CPU_WRITE); if(!sq->queryResult) goto no_query_result; sq->queryResult->totalSize = sizeof *sq->queryResult; sq->queryResult->state = SVGA3D_QUERYSTATE_NEW; /* * We request the buffer to be pinned and assume it is always mapped. * * The reason is that we don't want to wait for fences when checking the * query status. */ sws->buffer_unmap(sws, sq->hwbuf); return &sq->base; no_query_result: sws->buffer_destroy(sws, sq->hwbuf); no_hwbuf: FREE(sq); no_sq: return NULL; }
static void svga_transfer_dma_band(struct svga_context *svga, struct svga_transfer *st, SVGA3dTransferType transfer, unsigned x, unsigned y, unsigned z, unsigned w, unsigned h, unsigned d, unsigned srcx, unsigned srcy, unsigned srcz, SVGA3dSurfaceDMAFlags flags) { struct svga_texture *texture = svga_texture(st->base.resource); SVGA3dCopyBox box; enum pipe_error ret; assert(!st->use_direct_map); box.x = x; box.y = y; box.z = z; box.w = w; box.h = h; box.d = d; box.srcx = srcx; box.srcy = srcy; box.srcz = srcz; SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - " "(%u, %u, %u), %ubpp\n", transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from", texture->handle, st->slice, x, y, z, x + w, y + h, z + 1, util_format_get_blocksize(texture->b.b.format) * 8 / (util_format_get_blockwidth(texture->b.b.format) * util_format_get_blockheight(texture->b.b.format))); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); assert(ret == PIPE_OK); } }
static void svga_begin_query(struct pipe_context *pipe, struct pipe_query *q) { struct svga_screen *svgascreen = svga_screen(pipe->screen); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_context *svga = svga_context( pipe ); struct svga_query *sq = svga_query( q ); enum pipe_error ret; SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); assert(!svga->sq); /* Need to flush out buffered drawing commands so that they don't * get counted in the query results. */ svga_hwtnl_flush_retry(svga); if(sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) { /* The application doesn't care for the pending query result. We cannot * let go the existing buffer and just get a new one because its storage * may be reused for other purposes and clobbered by the host when it * determines the query result. So the only option here is to wait for * the existing query's result -- not a big deal, given that no sane * application would do this. */ uint64_t result; svga_get_query_result(pipe, q, TRUE, &result); assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING); } sq->queryResult->state = SVGA3D_QUERYSTATE_NEW; sws->fence_reference(sws, &sq->fence, NULL); ret = SVGA3D_BeginQuery(svga->swc, sq->type); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginQuery(svga->swc, sq->type); assert(ret == PIPE_OK); } svga->sq = sq; }
void svga_destroy_sampler_view_priv(struct svga_sampler_view *v) { struct svga_texture *tex = svga_texture(v->texture); if (v->handle != tex->handle) { struct svga_screen *ss = svga_screen(v->texture->screen); SVGA_DBG(DEBUG_DMA, "unref sid %p (sampler view)\n", v->handle); svga_screen_surface_destroy(ss, &v->key, &v->handle); } /* Note: we're not refcounting the texture resource here to avoid * a circular dependency. */ v->texture = NULL; FREE(v); }
static void svga_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct svga_screen *ss = svga_screen(pipe->screen); struct svga_context *svga = svga_context(pipe); struct svga_buffer *sbuf = svga_buffer(transfer->resource); SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP); mtx_lock(&ss->swc_mutex); assert(sbuf->map.count); if (sbuf->map.count) { --sbuf->map.count; } if (svga_buffer_has_hw_storage(sbuf)) { /* Note: we may wind up flushing here and unmapping other buffers * which leads to recursively locking ss->swc_mutex. */ svga_buffer_hw_storage_unmap(svga, sbuf); } if (transfer->usage & PIPE_TRANSFER_WRITE) { if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { /* * Mapped range not flushed explicitly, so flush the whole buffer, * and tell the host to discard the contents when processing the DMA * command. */ SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n"); sbuf->dma.flags.discard = TRUE; svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0); } } mtx_unlock(&ss->swc_mutex); FREE(transfer); SVGA_STATS_TIME_POP(svga_sws(svga)); }
static void svga_texture_destroy(struct pipe_screen *screen, struct pipe_resource *pt) { struct svga_screen *ss = svga_screen(screen); struct svga_texture *tex = (struct svga_texture *)pt; ss->texture_timestamp++; svga_sampler_view_reference(&tex->cached_view, NULL); /* DBG("%s deleting %p\n", __FUNCTION__, (void *) tex); */ SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle); svga_screen_surface_destroy(ss, &tex->key, &tex->handle); FREE(tex); }
static enum pipe_error update_need_swtnl( struct svga_context *svga, unsigned dirty ) { boolean need_swtnl; if (svga->debug.no_swtnl) { svga->state.sw.need_swvfetch = FALSE; svga->state.sw.need_pipeline = FALSE; } need_swtnl = (svga->state.sw.need_swvfetch || svga->state.sw.need_pipeline); if (svga->debug.force_swtnl) { need_swtnl = TRUE; } /* * Some state changes the draw module does makes us believe we * we don't need swtnl. This causes the vdecl code to pickup * the wrong buffers and vertex formats. Try trivial/line-wide. */ if (svga->state.sw.in_swtnl_draw) need_swtnl = TRUE; if (need_swtnl != svga->state.sw.need_swtnl) { SVGA_DBG(DEBUG_SWTNL|DEBUG_PERF, "%s: need_swvfetch %s, need_pipeline %s\n", __FUNCTION__, svga->state.sw.need_swvfetch ? "true" : "false", svga->state.sw.need_pipeline ? "true" : "false"); svga->state.sw.need_swtnl = need_swtnl; svga->dirty |= SVGA_NEW_NEED_SWTNL; svga->swtnl.new_vdecl = TRUE; } return PIPE_OK; }
static void svga_flush( struct pipe_context *pipe, struct pipe_fence_handle **fence, unsigned flags) { struct svga_context *svga = svga_context(pipe); /* Emit buffered drawing commands, and any back copies. */ svga_surfaces_flush( svga ); if (flags & PIPE_FLUSH_FENCE_FD) svga->swc->hints |= SVGA_HINT_FLAG_EXPORT_FENCE_FD; /* Flush command queue. */ svga_context_flush(svga, fence); SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n", __FUNCTION__, fence ? *fence : 0x0); /* Enable to dump BMPs of the color/depth buffers each frame */ if (0) { struct pipe_framebuffer_state *fb = &svga->curr.framebuffer; static unsigned frame_no = 1; char filename[256]; unsigned i; for (i = 0; i < fb->nr_cbufs; i++) { util_snprintf(filename, sizeof(filename), "cbuf%u_%04u.bmp", i, frame_no); debug_dump_surface_bmp(&svga->pipe, filename, fb->cbufs[i]); } if (0 && fb->zsbuf) { util_snprintf(filename, sizeof(filename), "zsbuf_%04u.bmp", frame_no); debug_dump_surface_bmp(&svga->pipe, filename, fb->zsbuf); } ++frame_no; } }
static void svga_buffer_transfer_unmap( struct pipe_context *pipe, struct pipe_transfer *transfer ) { struct svga_screen *ss = svga_screen(pipe->screen); struct svga_context *svga = svga_context(pipe); struct svga_buffer *sbuf = svga_buffer(transfer->resource); pipe_mutex_lock(ss->swc_mutex); assert(sbuf->map.count); if (sbuf->map.count) { --sbuf->map.count; } if (svga_buffer_has_hw_storage(sbuf)) { svga_buffer_hw_storage_unmap(svga, sbuf); } if (transfer->usage & PIPE_TRANSFER_WRITE) { if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { /* * Mapped range not flushed explicitly, so flush the whole buffer, * and tell the host to discard the contents when processing the DMA * command. */ SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n"); sbuf->dma.flags.discard = TRUE; svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0); } } pipe_mutex_unlock(ss->swc_mutex); FREE(transfer); }
/** * Allocate a winsys_buffer (ie. DMA, aka GMR memory). * * It will flush and retry in case the first attempt to create a DMA buffer * fails, so it should not be called from any function involved in flushing * to avoid recursion. */ struct svga_winsys_buffer * svga_winsys_buffer_create( struct svga_context *svga, unsigned alignment, unsigned usage, unsigned size ) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_winsys_buffer *buf; /* Just try */ buf = sws->buffer_create(sws, alignment, usage, size); if (!buf) { SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n", size); /* Try flushing all pending DMAs */ svga_context_flush(svga, NULL); buf = sws->buffer_create(sws, alignment, usage, size); } return buf; }