static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) { unsigned i, nr = mgr->ve->count; for (i = 0; i < nr; i++) { struct pipe_vertex_buffer *vb; unsigned index; /* Per-instance attribs don't need min/max_index. */ if (mgr->ve->ve[i].instance_divisor) { continue; } index = mgr->ve->ve[i].vertex_buffer_index; vb = &mgr->b.vertex_buffer[index]; /* Constant attribs don't need min/max_index. */ if (!vb->stride) { continue; } /* Per-vertex attribs need min/max_index. */ if (u_vbuf_resource(vb->buffer)->user_ptr || mgr->ve->incompatible_layout_elem[i] || mgr->incompatible_vb[index]) { return TRUE; } } return FALSE; }
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, unsigned count, const struct pipe_vertex_buffer *bufs) { struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; mgr->any_user_vbs = FALSE; mgr->incompatible_vb_layout = FALSE; memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); if (!mgr->b.caps.fetch_dword_unaligned) { /* Check if the strides and offsets are aligned to the size of DWORD. */ for (i = 0; i < count; i++) { if (bufs[i].buffer) { if (bufs[i].stride % 4 != 0 || bufs[i].buffer_offset % 4 != 0) { mgr->incompatible_vb_layout = TRUE; mgr->incompatible_vb[i] = TRUE; } } } } for (i = 0; i < count; i++) { const struct pipe_vertex_buffer *vb = &bufs[i]; pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); mgr->b.real_vertex_buffer[i].buffer_offset = mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; mgr->b.real_vertex_buffer[i].stride = mgr->b.vertex_buffer[i].stride = vb->stride; if (!vb->buffer || mgr->incompatible_vb[i]) { pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); continue; } if (u_vbuf_resource(vb->buffer)->user_ptr) { pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); mgr->any_user_vbs = TRUE; continue; } pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); } for (i = count; i < mgr->b.nr_vertex_buffers; i++) { pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); } for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); } mgr->b.nr_vertex_buffers = count; mgr->b.nr_real_vertex_buffers = count; }
void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo) { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; struct pipe_draw_info info = *dinfo; struct r600_draw rdraw = {}; struct pipe_index_buffer ib = {}; unsigned prim, mask, ls_mask = 0; if ((!info.count && (info.indexed || !info.count_from_stream_output)) || (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) || !r600_conv_pipe_prim(info.mode, &prim)) { return; } if (!rctx->ps_shader || !rctx->vs_shader) return; r600_update_derived_state(rctx); u_vbuf_draw_begin(rctx->vbuf_mgr, &info); r600_vertex_buffer_update(rctx); rdraw.vgt_num_indices = info.count; rdraw.vgt_num_instances = info.instance_count; if (info.indexed) { /* Initialize the index buffer struct. */ pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer); ib.index_size = rctx->vbuf_mgr->index_buffer.index_size; ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size; /* Translate or upload, if needed. */ r600_translate_index_buffer(rctx, &ib, info.count); if (u_vbuf_resource(ib.buffer)->user_ptr) { r600_upload_index_buffer(rctx, &ib, info.count); } /* Initialize the r600_draw struct with index buffer info. */ if (ib.index_size == 4) { rdraw.vgt_index_type = VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0); } else { rdraw.vgt_index_type = VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0); } rdraw.indices = (struct r600_resource*)ib.buffer; rdraw.indices_bo_offset = ib.offset; rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_DMA; } else { info.index_bias = info.start; rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX; if (info.count_from_stream_output) { rdraw.vgt_draw_initiator |= S_0287F0_USE_OPAQUE(1); r600_context_draw_opaque_count(&rctx->ctx, (struct r600_so_target*)info.count_from_stream_output); } } rctx->ctx.vs_shader_so_strides = rctx->vs_shader->so_strides; mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1; if (rctx->vgt.id != R600_PIPE_STATE_VGT) { rctx->vgt.id = R600_PIPE_STATE_VGT; rctx->vgt.nregs = 0; r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, ~0, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, 0, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance, 0xFFFFFFFF, NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE, 0, S_028A0C_AUTO_RESET_CNTL(3), NULL, 0); r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL, 0, S_028814_PROVOKING_VTX_LAST(1), NULL, 0); } rctx->vgt.nregs = 0; r600_pipe_state_mod_reg(&rctx->vgt, prim); r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask); r600_pipe_state_mod_reg(&rctx->vgt, ~0); r600_pipe_state_mod_reg(&rctx->vgt, 0); r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias); r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index); r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart); r600_pipe_state_mod_reg(&rctx->vgt, 0); r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance); if (prim == V_008958_DI_PT_LINELIST) ls_mask = 1; else if (prim == V_008958_DI_PT_LINESTRIP) ls_mask = 2; r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask)); if (info.mode == PIPE_PRIM_QUADS || info.mode == PIPE_PRIM_QUAD_STRIP || info.mode == PIPE_PRIM_POLYGON) { r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1)); } r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt); if (rctx->chip_class >= EVERGREEN) { evergreen_context_draw(&rctx->ctx, &rdraw); } else { r600_context_draw(&rctx->ctx, &rdraw); } if (rctx->framebuffer.zsbuf) { struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture; ((struct r600_resource_texture *)tex)->dirty_db = TRUE; } pipe_resource_reference(&ib.buffer, NULL); u_vbuf_draw_end(rctx->vbuf_mgr); }
static void u_vbuf_get_minmax_index(struct pipe_context *pipe, struct pipe_index_buffer *ib, const struct pipe_draw_info *info, int *out_min_index, int *out_max_index) { struct pipe_transfer *transfer = NULL; const void *indices; unsigned i; unsigned restart_index = info->restart_index; if (u_vbuf_resource(ib->buffer)->user_ptr) { indices = u_vbuf_resource(ib->buffer)->user_ptr + ib->offset + info->start * ib->index_size; } else { indices = pipe_buffer_map_range(pipe, ib->buffer, ib->offset + info->start * ib->index_size, info->count * ib->index_size, PIPE_TRANSFER_READ, &transfer); } switch (ib->index_size) { case 4: { const unsigned *ui_indices = (const unsigned*)indices; unsigned max_ui = 0; unsigned min_ui = ~0U; if (info->primitive_restart) { for (i = 0; i < info->count; i++) { if (ui_indices[i] != restart_index) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } } else { for (i = 0; i < info->count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } *out_min_index = min_ui; *out_max_index = max_ui; break; } case 2: { const unsigned short *us_indices = (const unsigned short*)indices; unsigned max_us = 0; unsigned min_us = ~0U; if (info->primitive_restart) { for (i = 0; i < info->count; i++) { if (us_indices[i] != restart_index) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } } else { for (i = 0; i < info->count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } *out_min_index = min_us; *out_max_index = max_us; break; } case 1: { const unsigned char *ub_indices = (const unsigned char*)indices; unsigned max_ub = 0; unsigned min_ub = ~0U; if (info->primitive_restart) { for (i = 0; i < info->count; i++) { if (ub_indices[i] != restart_index) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } } else { for (i = 0; i < info->count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } *out_min_index = min_ub; *out_max_index = max_ub; break; } default: assert(0); *out_min_index = 0; *out_max_index = 0; } if (transfer) { pipe_buffer_unmap(pipe, transfer); } }
static void u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, int min_index, int max_index, unsigned instance_count) { unsigned i; unsigned count = max_index + 1 - min_index; unsigned nr_velems = mgr->ve->count; unsigned nr_vbufs = mgr->b.nr_vertex_buffers; struct pipe_vertex_element *velems = mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; unsigned start_offset[PIPE_MAX_ATTRIBS]; unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; /* Determine how much data needs to be uploaded. */ for (i = 0; i < nr_velems; i++) { struct pipe_vertex_element *velem = &velems[i]; unsigned index = velem->vertex_buffer_index; struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; unsigned instance_div, first, size; /* Skip the buffer generated by translate. */ if (index == mgr->fallback_vb_slot) { continue; } assert(vb->buffer); if (!u_vbuf_resource(vb->buffer)->user_ptr) { continue; } instance_div = velem->instance_divisor; first = vb->buffer_offset + velem->src_offset; if (!vb->stride) { /* Constant attrib. */ size = mgr->ve->src_format_size[i]; } else if (instance_div) { /* Per-instance attrib. */ unsigned count = (instance_count + instance_div - 1) / instance_div; size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; } else { /* Per-vertex attrib. */ first += vb->stride * min_index; size = vb->stride * (count - 1) + mgr->ve->src_format_size[i]; } /* Update offsets. */ if (!end_offset[index]) { start_offset[index] = first; end_offset[index] = first + size; } else { if (first < start_offset[index]) start_offset[index] = first; if (first + size > end_offset[index]) end_offset[index] = first + size; } } /* Upload buffers. */ for (i = 0; i < nr_vbufs; i++) { unsigned start, end = end_offset[i]; boolean flushed; struct pipe_vertex_buffer *real_vb; uint8_t *ptr; if (!end) { continue; } start = start_offset[i]; assert(start < end); real_vb = &mgr->b.real_vertex_buffer[i]; ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; u_upload_data(mgr->b.uploader, start, end - start, ptr + start, &real_vb->buffer_offset, &real_vb->buffer, &flushed); real_vb->buffer_offset -= start; } }