static void copy_array_to_vbo_array(struct brw_context *brw, struct brw_vertex_element *element, int min, int max, struct brw_vertex_buffer *buffer, GLuint dst_stride) { int src_stride = element->glarray->StrideB; const unsigned char *src = element->glarray->Ptr + min * src_stride; int count = max - min + 1; GLuint size = count * dst_stride; if (dst_stride == src_stride) { intel_upload_data(&brw->intel, src, size, dst_stride, &buffer->bo, &buffer->offset); } else { char * const map = intel_upload_map(&brw->intel, size, dst_stride); char *dst = map; while (count--) { memcpy(dst, src, dst_stride); src += src_stride; dst += dst_stride; } intel_upload_unmap(&brw->intel, map, size, dst_stride, &buffer->bo, &buffer->offset); } buffer->stride = dst_stride; }
static void copy_array_to_vbo_array(struct brw_context *brw, struct brw_vertex_element *element, int min, int max, struct brw_vertex_buffer *buffer, GLuint dst_stride) { if (min == -1) { /* If we don't have computed min/max bounds, then this must be a use of * the current attribute, which has a 0 stride. Otherwise, we wouldn't * know what data to upload. */ assert(element->glarray->StrideB == 0); intel_upload_data(&brw->intel, element->glarray->Ptr, element->element_size, element->element_size, &buffer->bo, &buffer->offset); buffer->stride = 0; return; } int src_stride = element->glarray->StrideB; const unsigned char *src = element->glarray->Ptr + min * src_stride; int count = max - min + 1; GLuint size = count * dst_stride; if (dst_stride == src_stride) { intel_upload_data(&brw->intel, src, size, dst_stride, &buffer->bo, &buffer->offset); } else { char * const map = intel_upload_map(&brw->intel, size, dst_stride); char *dst = map; while (count--) { memcpy(dst, src, dst_stride); src += src_stride; dst += dst_stride; } intel_upload_unmap(&brw->intel, map, size, dst_stride, &buffer->bo, &buffer->offset); } buffer->stride = dst_stride; }
static void copy_array_to_vbo_array(struct brw_context *brw, struct brw_vertex_element *element, int min, int max, struct brw_vertex_buffer *buffer, GLuint dst_stride) { const int src_stride = element->glarray->StrideB; /* If the source stride is zero, we just want to upload the current * attribute once and set the buffer's stride to 0. There's no need * to replicate it out. */ if (src_stride == 0) { intel_upload_data(brw, element->glarray->Ptr, element->glarray->_ElementSize, element->glarray->_ElementSize, &buffer->bo, &buffer->offset); buffer->stride = 0; return; } const unsigned char *src = element->glarray->Ptr + min * src_stride; int count = max - min + 1; GLuint size = count * dst_stride; if (dst_stride == src_stride) { intel_upload_data(brw, src, size, dst_stride, &buffer->bo, &buffer->offset); } else { char * const map = intel_upload_map(brw, size, dst_stride); char *dst = map; while (count--) { memcpy(dst, src, dst_stride); src += src_stride; dst += dst_stride; } intel_upload_unmap(brw, map, size, dst_stride, &buffer->bo, &buffer->offset); } buffer->stride = dst_stride; }
void brw_prepare_shader_draw_parameters(struct brw_context *brw) { /* For non-indirect draws, upload gl_BaseVertex. */ if (brw->vs.prog_data->uses_vertexid && brw->draw.draw_params_bo == NULL) { intel_upload_data(brw, &brw->draw.gl_basevertex, 4, 4, &brw->draw.draw_params_bo, &brw->draw.draw_params_offset); } }
void brw_prepare_shader_draw_parameters(struct brw_context *brw) { const struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(brw->vs.base.prog_data); /* For non-indirect draws, upload gl_BaseVertex. */ if ((vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) && brw->draw.draw_params_bo == NULL) { intel_upload_data(brw, &brw->draw.params, sizeof(brw->draw.params), 4, &brw->draw.draw_params_bo, &brw->draw.draw_params_offset); } if (vs_prog_data->uses_drawid) { intel_upload_data(brw, &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4, &brw->draw.draw_id_bo, &brw->draw.draw_id_offset); } }
drm_intel_bo * intel_bufferobj_source(struct intel_context *intel, struct intel_buffer_object *intel_obj, GLuint align, GLuint *offset) { if (intel_obj->buffer == NULL) { intel_upload_data(intel, intel_obj->sys_buffer, intel_obj->Base.Size, align, &intel_obj->buffer, &intel_obj->offset); intel_obj->source = 1; } *offset = intel_obj->offset; return intel_obj->buffer; }
void brw_prepare_shader_draw_parameters(struct brw_context *brw) { int *gl_basevertex_value; if (brw->draw.indexed) { brw->draw.start_vertex_location += brw->ib.start_vertex_offset; brw->draw.base_vertex_location += brw->vb.start_vertex_bias; gl_basevertex_value = &brw->draw.base_vertex_location; } else { brw->draw.start_vertex_location += brw->vb.start_vertex_bias; gl_basevertex_value = &brw->draw.start_vertex_location; } /* For non-indirect draws, upload gl_BaseVertex. */ if (brw->vs.prog_data->uses_vertexid && brw->draw.draw_params_bo == NULL) { intel_upload_data(brw, gl_basevertex_value, 4, 4, &brw->draw.draw_params_bo, &brw->draw.draw_params_offset); } }
static void brw_upload_indices(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; drm_intel_bo *old_bo = brw->ib.bo; struct gl_buffer_object *bufferobj; GLuint offset; GLuint ib_type_size; if (index_buffer == NULL) return; ib_type_size = _mesa_sizeof_type(index_buffer->type); ib_size = ib_type_size * index_buffer->count; bufferobj = index_buffer->obj; /* Turn into a proper VBO: */ if (!_mesa_is_bufferobj(bufferobj)) { /* Get new bufferobj, offset: */ intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size, &brw->ib.bo, &offset); } else { offset = (GLuint) (unsigned long) index_buffer->ptr; /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((ib_type_size - 1) & offset) { perf_debug("copying index buffer to a temporary to work around " "misaligned offset %d\n", offset); GLubyte *map = ctx->Driver.MapBufferRange(ctx, offset, ib_size, GL_MAP_READ_BIT, bufferobj, MAP_INTERNAL); intel_upload_data(brw, map, ib_size, ib_type_size, &brw->ib.bo, &offset); ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL); } else { drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj), offset, ib_size); if (bo != brw->ib.bo) { drm_intel_bo_unreference(brw->ib.bo); brw->ib.bo = bo; drm_intel_bo_reference(bo); } } } /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading * the index buffer state when we're just moving the start index * of our drawing. */ brw->ib.start_vertex_offset = offset / ib_type_size; if (brw->ib.bo != old_bo) brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; if (index_buffer->type != brw->ib.type) { brw->ib.type = index_buffer->type; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } }
static void brw_upload_indices(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; drm_intel_bo *bo = NULL; struct gl_buffer_object *bufferobj; GLuint offset; GLuint ib_type_size; if (index_buffer == NULL) return; ib_type_size = get_size(index_buffer->type); ib_size = ib_type_size * index_buffer->count; bufferobj = index_buffer->obj; /* Turn into a proper VBO: */ if (!_mesa_is_bufferobj(bufferobj)) { /* Get new bufferobj, offset: */ intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; } else { offset = (GLuint) (unsigned long) index_buffer->ptr; /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((get_size(index_buffer->type) - 1) & offset) { GLubyte *map = ctx->Driver.MapBufferRange(ctx, offset, ib_size, GL_MAP_WRITE_BIT, bufferobj); intel_upload_data(&brw->intel, map, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; ctx->Driver.UnmapBuffer(ctx, bufferobj); } else { /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading * the index buffer state when we're just moving the start index * of our drawing. */ brw->ib.start_vertex_offset = offset / ib_type_size; bo = intel_bufferobj_source(intel, intel_buffer_object(bufferobj), ib_type_size, &offset); drm_intel_bo_reference(bo); brw->ib.start_vertex_offset += offset / ib_type_size; } } if (brw->ib.bo != bo) { drm_intel_bo_unreference(brw->ib.bo); brw->ib.bo = bo; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } else { drm_intel_bo_unreference(bo); } if (index_buffer->type != brw->ib.type) { brw->ib.type = index_buffer->type; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } }