static void svga_user_buffer_range(struct svga_context *svga, unsigned start, unsigned count, unsigned instance_count) { const struct pipe_vertex_element *ve = svga->curr.velems->velem; int i; /* * Release old uploaded range (if not done already) and * initialize new ranges. */ for (i=0; i < svga->curr.velems->count; i++) { struct pipe_vertex_buffer *vb = &svga->curr.vb[ve[i].vertex_buffer_index]; if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { struct svga_buffer *buffer = svga_buffer(vb->buffer); pipe_resource_reference(&buffer->uploaded.buffer, NULL); buffer->uploaded.start = ~0; buffer->uploaded.end = 0; } } for (i=0; i < svga->curr.velems->count; i++) { struct pipe_vertex_buffer *vb = &svga->curr.vb[ve[i].vertex_buffer_index]; if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { struct svga_buffer *buffer = svga_buffer(vb->buffer); unsigned first, size; unsigned instance_div = ve[i].instance_divisor; unsigned elemSize = util_format_get_blocksize(ve[i].src_format); svga->dirty |= SVGA_NEW_VBUFFER; if (instance_div) { first = ve[i].src_offset; count = (instance_count + instance_div - 1) / instance_div; size = vb->stride * (count - 1) + elemSize; } else if (vb->stride) { first = vb->stride * start + ve[i].src_offset; size = vb->stride * (count - 1) + elemSize; } else { /* Only a single vertex! * Upload with the largest vertex size the hw supports, * if possible. */ first = ve[i].src_offset; size = MIN2(16, vb->buffer->width0); } buffer->uploaded.start = MIN2(buffer->uploaded.start, first); buffer->uploaded.end = MAX2(buffer->uploaded.end, first + size); } } }
static void svga_set_vertex_buffers(struct pipe_context *pipe, unsigned count, const struct pipe_vertex_buffer *buffers) { struct svga_context *svga = svga_context(pipe); unsigned i; boolean any_user_buffer = FALSE; /* Check for no change */ if (count == svga->curr.num_vertex_buffers && memcmp(svga->curr.vb, buffers, count * sizeof buffers[0]) == 0) return; /* Adjust refcounts */ for (i = 0; i < count; i++) { pipe_resource_reference(&svga->curr.vb[i].buffer, buffers[i].buffer); if (svga_buffer_is_user_buffer(buffers[i].buffer)) any_user_buffer = TRUE; } for ( ; i < svga->curr.num_vertex_buffers; i++) pipe_resource_reference(&svga->curr.vb[i].buffer, NULL); /* Copy remaining data */ memcpy(svga->curr.vb, buffers, count * sizeof buffers[0]); svga->curr.num_vertex_buffers = count; svga->curr.any_user_vertex_buffers = any_user_buffer; svga->dirty |= SVGA_NEW_VBUFFER; }
/** * Determine whether the specified buffer is referred in the primitive queue, * for which no commands have been written yet. */ boolean svga_hwtnl_is_buffer_referred(struct svga_hwtnl *hwtnl, struct pipe_resource *buffer) { unsigned i; if (svga_buffer_is_user_buffer(buffer)) { return FALSE; } if (!hwtnl->cmd.prim_count) { return FALSE; } for (i = 0; i < hwtnl->cmd.vbuf_count; ++i) { if (hwtnl->cmd.vbufs[i].buffer == buffer) { return TRUE; } } for (i = 0; i < hwtnl->cmd.prim_count; ++i) { if (hwtnl->cmd.prim_ib[i] == buffer) { return TRUE; } } return FALSE; }
static int svga_upload_user_buffers(struct svga_context *svga, unsigned start, unsigned count, unsigned instance_count) { const struct pipe_vertex_element *ve = svga->curr.velems->velem; unsigned i; int ret; svga_user_buffer_range(svga, start, count, instance_count); for (i=0; i < svga->curr.velems->count; i++) { struct pipe_vertex_buffer *vb = &svga->curr.vb[ve[i].vertex_buffer_index]; if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { struct svga_buffer *buffer = svga_buffer(vb->buffer); /* * Check if already uploaded. Otherwise go ahead and upload. */ if (buffer->uploaded.buffer) continue; ret = u_upload_buffer( svga->upload_vb, 0, buffer->uploaded.start, buffer->uploaded.end - buffer->uploaded.start, &buffer->b.b, &buffer->uploaded.offset, &buffer->uploaded.buffer); if (ret) return ret; if (0) debug_printf("%s: %d: orig buf %p upl buf %p ofs %d sofs %d" " sz %d\n", __FUNCTION__, i, buffer, buffer->uploaded.buffer, buffer->uploaded.offset, buffer->uploaded.start, buffer->uploaded.end - buffer->uploaded.start); vb->buffer_offset = buffer->uploaded.offset; } } return PIPE_OK; }
static int upload_user_buffers( struct svga_context *svga ) { enum pipe_error ret = PIPE_OK; int i; int nr; if (0) debug_printf("%s: %d\n", __FUNCTION__, svga->curr.num_vertex_buffers); nr = svga->curr.num_vertex_buffers; for (i = 0; i < nr; i++) { if (svga_buffer_is_user_buffer(svga->curr.vb[i].buffer)) { struct svga_buffer *buffer = svga_buffer(svga->curr.vb[i].buffer); if (!buffer->uploaded.buffer) { ret = u_upload_buffer( svga->upload_vb, 0, buffer->base.size, &buffer->base, &buffer->uploaded.offset, &buffer->uploaded.buffer ); if (ret) return ret; if (0) debug_printf("%s: %d: orig buf %p upl buf %p ofs %d sz %d\n", __FUNCTION__, i, buffer, buffer->uploaded.buffer, buffer->uploaded.offset, buffer->base.size); } pipe_buffer_reference( &svga->curr.vb[i].buffer, buffer->uploaded.buffer ); svga->curr.vb[i].buffer_offset = buffer->uploaded.offset; } } if (0) debug_printf("%s: DONE\n", __FUNCTION__); return ret; }
static void svga_release_user_upl_buffers(struct svga_context *svga) { unsigned i; unsigned nr; nr = svga->curr.num_vertex_buffers; for (i = 0; i < nr; ++i) { struct pipe_vertex_buffer *vb = &svga->curr.vb[i]; if (vb->buffer && svga_buffer_is_user_buffer(vb->buffer)) { struct svga_buffer *buffer = svga_buffer(vb->buffer); buffer->uploaded.start = ~0; buffer->uploaded.end = 0; if (buffer->uploaded.buffer) pipe_resource_reference(&buffer->uploaded.buffer, NULL); } } }
enum pipe_error svga_hwtnl_flush(struct svga_hwtnl *hwtnl) { struct svga_winsys_context *swc = hwtnl->cmd.swc; struct svga_context *svga = hwtnl->svga; enum pipe_error ret; if (hwtnl->cmd.prim_count) { struct svga_winsys_surface *vb_handle[SVGA3D_INPUTREG_MAX]; struct svga_winsys_surface *ib_handle[QSZ]; struct svga_winsys_surface *handle; SVGA3dVertexDecl *vdecl; SVGA3dPrimitiveRange *prim; unsigned i; for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { assert(!svga_buffer_is_user_buffer(hwtnl->cmd.vdecl_vb[i])); handle = svga_buffer_handle(svga, hwtnl->cmd.vdecl_vb[i]); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; vb_handle[i] = handle; } for (i = 0; i < hwtnl->cmd.prim_count; i++) { if (hwtnl->cmd.prim_ib[i]) { assert(!svga_buffer_is_user_buffer(hwtnl->cmd.prim_ib[i])); handle = svga_buffer_handle(svga, hwtnl->cmd.prim_ib[i]); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; } else { handle = NULL; } ib_handle[i] = handle; } if (svga->rebind.rendertargets) { ret = svga_reemit_framebuffer_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.texture_samplers) { ret = svga_reemit_tss_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.vs) { ret = svga_reemit_vs_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.fs) { ret = svga_reemit_fs_bindings(svga); if (ret != PIPE_OK) { return ret; } } SVGA_DBG(DEBUG_DMA, "draw to sid %p, %d prims\n", svga->curr.framebuffer.cbufs[0] ? svga_surface(svga->curr.framebuffer.cbufs[0])->handle : NULL, hwtnl->cmd.prim_count); ret = SVGA3D_BeginDrawPrimitives(swc, &vdecl, hwtnl->cmd.vdecl_count, &prim, hwtnl->cmd.prim_count); if (ret != PIPE_OK) return ret; memcpy(vdecl, hwtnl->cmd.vdecl, hwtnl->cmd.vdecl_count * sizeof hwtnl->cmd.vdecl[0]); for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { /* Given rangeHint is considered to be relative to indexBias, and * indexBias varies per primitive, we cannot accurately supply an * rangeHint when emitting more than one primitive per draw command. */ if (hwtnl->cmd.prim_count == 1) { vdecl[i].rangeHint.first = hwtnl->cmd.min_index[0]; vdecl[i].rangeHint.last = hwtnl->cmd.max_index[0] + 1; } else { vdecl[i].rangeHint.first = 0; vdecl[i].rangeHint.last = 0; } swc->surface_relocation(swc, &vdecl[i].array.surfaceId, NULL, vb_handle[i], SVGA_RELOC_READ); } memcpy(prim, hwtnl->cmd.prim, hwtnl->cmd.prim_count * sizeof hwtnl->cmd.prim[0]); for (i = 0; i < hwtnl->cmd.prim_count; i++) { swc->surface_relocation(swc, &prim[i].indexArray.surfaceId, NULL, ib_handle[i], SVGA_RELOC_READ); pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL); } SVGA_FIFOCommitAll(swc); hwtnl->cmd.prim_count = 0; } return PIPE_OK; }
enum pipe_error svga_hwtnl_simple_draw_range_elements( struct svga_hwtnl *hwtnl, struct pipe_resource *index_buffer, unsigned index_size, int index_bias, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count ) { struct pipe_resource *upload_buffer = NULL; SVGA3dPrimitiveRange range; unsigned hw_prim; unsigned hw_count; unsigned index_offset = start * index_size; enum pipe_error ret = PIPE_OK; hw_prim = svga_translate_prim(prim, count, &hw_count); if (hw_count == 0) goto done; if (index_buffer && svga_buffer_is_user_buffer(index_buffer)) { assert( index_buffer->width0 >= index_offset + count * index_size ); ret = u_upload_buffer( hwtnl->upload_ib, 0, index_offset, count * index_size, index_buffer, &index_offset, &upload_buffer); if (ret != PIPE_OK) goto done; /* Don't need to worry about refcounting index_buffer as this is * just a stack variable without a counted reference of its own. * The caller holds the reference. */ index_buffer = upload_buffer; } range.primType = hw_prim; range.primitiveCount = hw_count; range.indexArray.offset = index_offset; range.indexArray.stride = index_size; range.indexWidth = index_size; range.indexBias = index_bias; ret = svga_hwtnl_prim( hwtnl, &range, min_index, max_index, index_buffer ); if (ret != PIPE_OK) goto done; done: if (upload_buffer) pipe_resource_reference( &upload_buffer, NULL ); return ret; }