static void brw_emit_index_buffer(struct brw_context *brw) { struct intel_context *intel = &brw->intel; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint cut_index_setting; if (index_buffer == NULL) return; if (brw->prim_restart.enable_cut_index) { cut_index_setting = BRW_CUT_INDEX_ENABLE; } else { cut_index_setting = 0; } BEGIN_BATCH(3); OUT_BATCH(CMD_INDEX_BUFFER << 16 | cut_index_setting | get_index_type(index_buffer->type) << 8 | 1); OUT_RELOC(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, 0); OUT_RELOC(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, brw->ib.bo->size - 1); ADVANCE_BATCH(); }
i32_t eap_loaded_type_c::compare(const eap_loaded_type_c * const right) { if (get_index_type() != right->get_index_type()) { return static_cast<i32_t>(get_index_type()) - static_cast<i32_t>(right->get_index_type()); } else if (get_index() != right->get_index()) { return static_cast<i32_t>(get_index()) - static_cast<i32_t>(right->get_index()); } else { // Compares the EAP-type. return get_eap_type().compare(&(right->get_eap_type())); } }
boolean brw_upload_indices( struct brw_context *brw, const struct pipe_buffer *index_buffer, int ib_size, int start, int count) { /* Emit the indexbuffer packet: */ { struct brw_indexbuffer ib; memset(&ib, 0, sizeof(ib)); ib.header.bits.opcode = CMD_INDEX_BUFFER; ib.header.bits.length = sizeof(ib)/4 - 2; ib.header.bits.index_format = get_index_type(ib_size); ib.header.bits.cut_index_enable = 0; BEGIN_BATCH(4, 0); OUT_BATCH( ib.header.dword ); OUT_RELOC( index_buffer, PIPE_BUFFER_USAGE_GPU_READ, start); OUT_RELOC( index_buffer, PIPE_BUFFER_USAGE_GPU_READ, start + count); OUT_BATCH( 0 ); ADVANCE_BATCH(); } return TRUE; }
void brw_emit_indices(struct brw_context *brw, const struct _mesa_index_buffer *index_buffer, dri_bo *bo, GLuint offset) { struct intel_context *intel = &brw->intel; GLuint ib_size = get_size(index_buffer->type) * index_buffer->count; /* Emit the indexbuffer packet: */ { struct brw_indexbuffer ib; memset(&ib, 0, sizeof(ib)); ib.header.bits.opcode = CMD_INDEX_BUFFER; ib.header.bits.length = sizeof(ib)/4 - 2; ib.header.bits.index_format = get_index_type(index_buffer->type); ib.header.bits.cut_index_enable = 0; BEGIN_BATCH(4, IGNORE_CLIPRECTS); OUT_BATCH( ib.header.dword ); OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset); OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset + ib_size); OUT_BATCH( 0 ); ADVANCE_BATCH(); dri_bo_unreference(bo); } }
static void brw_emit_index_buffer(struct brw_context *brw) { struct intel_context *intel = &brw->intel; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; if (index_buffer == NULL) return; BEGIN_BATCH(3); OUT_BATCH(CMD_INDEX_BUFFER << 16 | /* cut index enable << 10 */ get_index_type(index_buffer->type) << 8 | 1); OUT_RELOC(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, 0); OUT_RELOC(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, brw->ib.bo->size - 1); ADVANCE_BATCH(); }
void brw_upload_indices( struct brw_context *brw, const struct _mesa_index_buffer *index_buffer ) { GLcontext *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; GLuint ib_size = get_size(index_buffer->type) * index_buffer->count; struct gl_buffer_object *bufferobj = index_buffer->obj; GLuint offset = (GLuint)index_buffer->ptr; /* Turn into a proper VBO: */ if (!bufferobj->Name) { /* Get new bufferobj, offset: */ get_space(brw, ib_size, &bufferobj, &offset); /* Straight upload */ ctx->Driver.BufferSubData( ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, offset, ib_size, index_buffer->ptr, bufferobj); } else { /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((get_size(index_buffer->type) - 1) & offset) { struct gl_buffer_object *vbo; GLuint voffset; GLubyte *map = ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, GL_DYNAMIC_DRAW_ARB, bufferobj); map += offset; get_space(brw, ib_size, &vbo, &voffset); ctx->Driver.BufferSubData(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, voffset, ib_size, map, vbo); ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj); bufferobj = vbo; offset = voffset; } } /* Emit the indexbuffer packet: */ { struct brw_indexbuffer ib; struct buffer *buffer = intel_bufferobj_buffer(intel_buffer_object(bufferobj)); memset(&ib, 0, sizeof(ib)); ib.header.bits.opcode = CMD_INDEX_BUFFER; ib.header.bits.length = sizeof(ib)/4 - 2; ib.header.bits.index_format = get_index_type(index_buffer->type); ib.header.bits.cut_index_enable = 0; BEGIN_BATCH(4, 0); OUT_BATCH( ib.header.dword ); OUT_BATCH( bmBufferOffset(intel, buffer) + offset ); OUT_BATCH( bmBufferOffset(intel, buffer) + offset + ib_size ); OUT_BATCH( 0 ); ADVANCE_BATCH(); } }
std::tuple<bool, size_t, std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC>> D3D12GSRender::upload_and_set_vertex_index_data(ID3D12GraphicsCommandList *command_list) { if (draw_command == rsx::draw_command::inlined_array) { size_t vertex_count; std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC> vertex_buffer_view; std::tie(vertex_buffer_view, vertex_count) = upload_inlined_vertex_array( vertex_arrays_info, { (const gsl::byte*) inline_vertex_array.data(), gsl::narrow<int>(inline_vertex_array.size() * sizeof(uint)) }, m_buffer_data, m_vertex_buffer_data.Get(), command_list); if (is_primitive_native(draw_mode)) return std::make_tuple(false, vertex_count, vertex_buffer_view); D3D12_INDEX_BUFFER_VIEW index_buffer_view; size_t index_count; std::tie(index_buffer_view, index_count) = generate_index_buffer_for_emulated_primitives_array({ { 0, (u32)vertex_count } }); command_list->IASetIndexBuffer(&index_buffer_view); return std::make_tuple(true, index_count, vertex_buffer_view); } if (draw_command == rsx::draw_command::array) { if (is_primitive_native(draw_mode)) { size_t vertex_count = get_vertex_count(first_count_commands); return std::make_tuple(false, vertex_count, upload_vertex_attributes(first_count_commands, command_list)); } D3D12_INDEX_BUFFER_VIEW index_buffer_view; size_t index_count; std::tie(index_buffer_view, index_count) = generate_index_buffer_for_emulated_primitives_array(first_count_commands); command_list->IASetIndexBuffer(&index_buffer_view); return std::make_tuple(true, index_count, upload_vertex_attributes(first_count_commands, command_list)); } assert(draw_command == rsx::draw_command::indexed); // Index count size_t index_count = get_index_count(draw_mode, gsl::narrow<int>(get_vertex_count(first_count_commands))); rsx::index_array_type indexed_type = rsx::to_index_array_type(rsx::method_registers[NV4097_SET_INDEX_ARRAY_DMA] >> 4); size_t index_size = get_index_type_size(indexed_type); // Alloc size_t buffer_size = align(index_count * index_size, 64); size_t heap_offset = m_buffer_data.alloc<D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT>(buffer_size); void *mapped_buffer = m_buffer_data.map<void>(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); u32 min_index, max_index; if (indexed_type == rsx::index_array_type::u16) { gsl::span<u16> dst = { (u16*)mapped_buffer, gsl::narrow<int>(buffer_size / index_size) }; std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); } if (indexed_type == rsx::index_array_type::u32) { gsl::span<u32> dst = { (u32*)mapped_buffer, gsl::narrow<int>(buffer_size / index_size) }; std::tie(min_index, max_index) = write_index_array_data_to_buffer(dst, draw_mode, first_count_commands); } m_buffer_data.unmap(CD3DX12_RANGE(heap_offset, heap_offset + buffer_size)); D3D12_INDEX_BUFFER_VIEW index_buffer_view = { m_buffer_data.get_heap()->GetGPUVirtualAddress() + heap_offset, (UINT)buffer_size, get_index_type(indexed_type) }; m_timers.buffer_upload_size += buffer_size; command_list->IASetIndexBuffer(&index_buffer_view); return std::make_tuple(true, index_count, upload_vertex_attributes({ std::make_pair(0, max_index + 1) }, command_list)); }