static void intel_bufferobj_copy_subdata(struct gl_context *ctx, struct gl_buffer_object *src, struct gl_buffer_object *dst, GLintptr read_offset, GLintptr write_offset, GLsizeiptr size) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_src = intel_buffer_object(src); struct intel_buffer_object *intel_dst = intel_buffer_object(dst); drm_intel_bo *src_bo, *dst_bo; GLuint src_offset; if (size == 0) return; /* If we're in system memory, just map and memcpy. */ if (intel_src->sys_buffer || intel_dst->sys_buffer || intel->gen >= 6) { /* The same buffer may be used, but note that regions copied may * not overlap. */ if (src == dst) { char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size, GL_MAP_READ_BIT, dst); memmove(ptr + write_offset, ptr + read_offset, size); intel_bufferobj_unmap(ctx, dst); } else { const char *src_ptr; char *dst_ptr; src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size, GL_MAP_READ_BIT, src); dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size, GL_MAP_WRITE_BIT, dst); memcpy(dst_ptr + write_offset, src_ptr + read_offset, size); intel_bufferobj_unmap(ctx, src); intel_bufferobj_unmap(ctx, dst); } return; } /* Otherwise, we have real BOs, so blit them. */ dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART); src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset); intel_emit_linear_blit(intel, dst_bo, write_offset, src_bo, read_offset + src_offset, size); /* Since we've emitted some blits to buffers that will (likely) be used * in rendering operations in other cache domains in this batch, emit a * flush. Once again, we wish for a domain tracker in libdrm to cover * usage inside of a batchbuffer. */ intel_batchbuffer_emit_mi_flush(intel); }
/* XXX: Do this for TexSubImage also: */ static bool try_pbo_upload(struct gl_context *ctx, struct gl_texture_image *image, const struct gl_pixelstore_attrib *unpack, GLenum format, GLenum type, const void *pixels) { struct intel_texture_image *intelImage = intel_texture_image(image); struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj); GLuint src_offset; drm_intel_bo *src_buffer; if (!_mesa_is_bufferobj(unpack->BufferObj)) return false; DBG("trying pbo upload\n"); if (intel->ctx._ImageTransferState || unpack->SkipPixels || unpack->SkipRows) { DBG("%s: image transfer\n", __FUNCTION__); return false; } ctx->Driver.AllocTextureImageBuffer(ctx, image); if (!intelImage->mt) { DBG("%s: no miptree\n", __FUNCTION__); return false; } if (!_mesa_format_matches_format_and_type(intelImage->mt->format, format, type, false)) { DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n", __FUNCTION__, _mesa_get_format_name(intelImage->mt->format), format, type); return false; } if (image->TexObject->Target == GL_TEXTURE_1D_ARRAY || image->TexObject->Target == GL_TEXTURE_2D_ARRAY) { DBG("%s: no support for array textures\n", __FUNCTION__); return false; } src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset); /* note: potential 64-bit ptr to 32-bit int cast */ src_offset += (GLuint) (unsigned long) pixels; int src_stride = _mesa_image_row_stride(unpack, image->Width, format, type); struct intel_mipmap_tree *pbo_mt = intel_miptree_create_for_bo(intel, src_buffer, intelImage->mt->format, src_offset, image->Width, image->Height, src_stride, I915_TILING_NONE); if (!pbo_mt) return false; if (!intel_miptree_blit(intel, pbo_mt, 0, 0, 0, 0, false, intelImage->mt, image->Level, image->Face, 0, 0, false, image->Width, image->Height, GL_COPY)) { DBG("%s: blit failed\n", __FUNCTION__); intel_miptree_release(&pbo_mt); return false; } intel_miptree_release(&pbo_mt); DBG("%s: success\n", __FUNCTION__); return true; }
static void brw_upload_indices(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; drm_intel_bo *bo = NULL; struct gl_buffer_object *bufferobj; GLuint offset; GLuint ib_type_size; if (index_buffer == NULL) return; ib_type_size = _mesa_sizeof_type(index_buffer->type); ib_size = ib_type_size * index_buffer->count; bufferobj = index_buffer->obj; /* Turn into a proper VBO: */ if (!_mesa_is_bufferobj(bufferobj)) { /* Get new bufferobj, offset: */ intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; } else { offset = (GLuint) (unsigned long) index_buffer->ptr; /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((ib_type_size - 1) & offset) { perf_debug("copying index buffer to a temporary to work around " "misaligned offset %d\n", offset); GLubyte *map = ctx->Driver.MapBufferRange(ctx, offset, ib_size, GL_MAP_READ_BIT, bufferobj); intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; ctx->Driver.UnmapBuffer(ctx, bufferobj); } else { /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading * the index buffer state when we're just moving the start index * of our drawing. */ brw->ib.start_vertex_offset = offset / ib_type_size; bo = intel_bufferobj_source(brw, intel_buffer_object(bufferobj), ib_type_size, &offset); drm_intel_bo_reference(bo); brw->ib.start_vertex_offset += offset / ib_type_size; } } if (brw->ib.bo != bo) { drm_intel_bo_unreference(brw->ib.bo); brw->ib.bo = bo; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } else { drm_intel_bo_unreference(bo); } if (index_buffer->type != brw->ib.type) { brw->ib.type = index_buffer->type; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } }
static void brw_prepare_vertices(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; /* CACHE_NEW_VS_PROG */ GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; const unsigned char *ptr = NULL; GLuint interleaved = 0; unsigned int min_index = brw->vb.min_index + brw->basevertex; unsigned int max_index = brw->vb.max_index + brw->basevertex; int delta, i, j; struct brw_vertex_element *upload[VERT_ATTRIB_MAX]; GLuint nr_uploads = 0; /* _NEW_POLYGON * * On gen6+, edge flags don't end up in the VUE (either in or out of the * VS). Instead, they're uploaded as the last vertex element, and the data * is passed sideband through the fixed function units. So, we need to * prepare the vertex buffer for it, but it's not present in inputs_read. */ if (brw->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL || ctx->Polygon.BackMode != GL_FILL)) { vs_inputs |= VERT_BIT_EDGEFLAG; } if (0) printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); /* Accumulate the list of enabled arrays. */ brw->vb.nr_enabled = 0; while (vs_inputs) { GLuint i = ffsll(vs_inputs) - 1; struct brw_vertex_element *input = &brw->vb.inputs[i]; vs_inputs &= ~BITFIELD64_BIT(i); brw->vb.enabled[brw->vb.nr_enabled++] = input; } if (brw->vb.nr_enabled == 0) return; if (brw->vb.nr_buffers) return; for (i = j = 0; i < brw->vb.nr_enabled; i++) { struct brw_vertex_element *input = brw->vb.enabled[i]; const struct gl_client_array *glarray = input->glarray; if (_mesa_is_bufferobj(glarray->BufferObj)) { struct intel_buffer_object *intel_buffer = intel_buffer_object(glarray->BufferObj); int k; /* If we have a VB set to be uploaded for this buffer object * already, reuse that VB state so that we emit fewer * relocations. */ for (k = 0; k < i; k++) { const struct gl_client_array *other = brw->vb.enabled[k]->glarray; if (glarray->BufferObj == other->BufferObj && glarray->StrideB == other->StrideB && glarray->InstanceDivisor == other->InstanceDivisor && (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB) { input->buffer = brw->vb.enabled[k]->buffer; input->offset = glarray->Ptr - other->Ptr; break; } } if (k == i) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; /* Named buffer object: Just reference its contents directly. */ buffer->bo = intel_bufferobj_source(brw, intel_buffer, 1, &buffer->offset); drm_intel_bo_reference(buffer->bo); buffer->offset += (uintptr_t)glarray->Ptr; buffer->stride = glarray->StrideB; buffer->step_rate = glarray->InstanceDivisor; input->buffer = j++; input->offset = 0; } /* This is a common place to reach if the user mistakenly supplies * a pointer in place of a VBO offset. If we just let it go through, * we may end up dereferencing a pointer beyond the bounds of the * GTT. We would hope that the VBO's max_index would save us, but * Mesa appears to hand us min/max values not clipped to the * array object's _MaxElement, and _MaxElement frequently appears * to be wrong anyway. * * The VBO spec allows application termination in this case, and it's * probably a service to the poor programmer to do so rather than * trying to just not render. */ assert(input->offset < brw->vb.buffers[input->buffer].bo->size); } else { /* Queue the buffer object up to be uploaded in the next pass, * when we've decided if we're doing interleaved or not. */ if (nr_uploads == 0) { interleaved = glarray->StrideB; ptr = glarray->Ptr; } else if (interleaved != glarray->StrideB || glarray->Ptr < ptr || (uintptr_t)(glarray->Ptr - ptr) + glarray->_ElementSize > interleaved) { /* If our stride is different from the first attribute's stride, * or if the first attribute's stride didn't cover our element, * disable the interleaved upload optimization. The second case * can most commonly occur in cases where there is a single vertex * and, for example, the data is stored on the application's * stack. * * NOTE: This will also disable the optimization in cases where * the data is in a different order than the array indices. * Something like: * * float data[...]; * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]); * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]); */ interleaved = 0; } upload[nr_uploads++] = input; } } /* If we need to upload all the arrays, then we can trim those arrays to * only the used elements [min_index, max_index] so long as we adjust all * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias. */ brw->vb.start_vertex_bias = 0; delta = min_index; if (nr_uploads == brw->vb.nr_enabled) { brw->vb.start_vertex_bias = -delta; delta = 0; } /* Handle any arrays to be uploaded. */ if (nr_uploads > 1) { if (interleaved) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; /* All uploads are interleaved, so upload the arrays together as * interleaved. First, upload the contents and set up upload[0]. */ copy_array_to_vbo_array(brw, upload[0], min_index, max_index, buffer, interleaved); buffer->offset -= delta * interleaved; for (i = 0; i < nr_uploads; i++) { /* Then, just point upload[i] at upload[0]'s buffer. */ upload[i]->offset = ((const unsigned char *)upload[i]->glarray->Ptr - ptr); upload[i]->buffer = j; } j++; nr_uploads = 0; } } /* Upload non-interleaved arrays */ for (i = 0; i < nr_uploads; i++) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; if (upload[i]->glarray->InstanceDivisor == 0) { copy_array_to_vbo_array(brw, upload[i], min_index, max_index, buffer, upload[i]->glarray->_ElementSize); } else { /* This is an instanced attribute, since its InstanceDivisor * is not zero. Therefore, its data will be stepped after the * instanced draw has been run InstanceDivisor times. */ uint32_t instanced_attr_max_index = (brw->num_instances - 1) / upload[i]->glarray->InstanceDivisor; copy_array_to_vbo_array(brw, upload[i], 0, instanced_attr_max_index, buffer, upload[i]->glarray->_ElementSize); } buffer->offset -= delta * buffer->stride; buffer->step_rate = upload[i]->glarray->InstanceDivisor; upload[i]->buffer = j++; upload[i]->offset = 0; } brw->vb.nr_buffers = j; }
static void brw_prepare_vertices(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = intel_context(ctx); /* CACHE_NEW_VS_PROG */ GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; const unsigned char *ptr = NULL; GLuint interleaved = 0, total_size = 0; unsigned int min_index = brw->vb.min_index; unsigned int max_index = brw->vb.max_index; int delta, i, j; GLboolean can_merge_uploads = GL_TRUE; struct brw_vertex_element *upload[VERT_ATTRIB_MAX]; GLuint nr_uploads = 0; /* First build an array of pointers to ve's in vb.inputs_read */ if (0) printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); /* Accumulate the list of enabled arrays. */ brw->vb.nr_enabled = 0; while (vs_inputs) { GLuint i = ffsll(vs_inputs) - 1; struct brw_vertex_element *input = &brw->vb.inputs[i]; vs_inputs &= ~BITFIELD64_BIT(i); if (input->glarray->Size && get_size(input->glarray->Type)) brw->vb.enabled[brw->vb.nr_enabled++] = input; } if (brw->vb.nr_enabled == 0) return; if (brw->vb.nr_buffers) goto prepare; for (i = j = 0; i < brw->vb.nr_enabled; i++) { struct brw_vertex_element *input = brw->vb.enabled[i]; const struct gl_client_array *glarray = input->glarray; int type_size = get_size(glarray->Type); input->element_size = type_size * glarray->Size; if (_mesa_is_bufferobj(glarray->BufferObj)) { struct intel_buffer_object *intel_buffer = intel_buffer_object(glarray->BufferObj); int k; for (k = 0; k < i; k++) { const struct gl_client_array *other = brw->vb.enabled[k]->glarray; if (glarray->BufferObj == other->BufferObj && glarray->StrideB == other->StrideB && glarray->InstanceDivisor == other->InstanceDivisor && (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB) { input->buffer = brw->vb.enabled[k]->buffer; input->offset = glarray->Ptr - other->Ptr; break; } } if (k == i) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; /* Named buffer object: Just reference its contents directly. */ buffer->bo = intel_bufferobj_source(intel, intel_buffer, type_size, &buffer->offset); drm_intel_bo_reference(buffer->bo); buffer->offset += (uintptr_t)glarray->Ptr; buffer->stride = glarray->StrideB; buffer->step_rate = glarray->InstanceDivisor; input->buffer = j++; input->offset = 0; } /* This is a common place to reach if the user mistakenly supplies * a pointer in place of a VBO offset. If we just let it go through, * we may end up dereferencing a pointer beyond the bounds of the * GTT. We would hope that the VBO's max_index would save us, but * Mesa appears to hand us min/max values not clipped to the * array object's _MaxElement, and _MaxElement frequently appears * to be wrong anyway. * * The VBO spec allows application termination in this case, and it's * probably a service to the poor programmer to do so rather than * trying to just not render. */ assert(input->offset < brw->vb.buffers[input->buffer].bo->size); } else { /* Queue the buffer object up to be uploaded in the next pass, * when we've decided if we're doing interleaved or not. */ if (nr_uploads == 0) { /* Position array not properly enabled: */ if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) { intel->Fallback = true; /* boolean, not bitfield */ return; } interleaved = glarray->StrideB; ptr = glarray->Ptr; } else if (interleaved != glarray->StrideB || (uintptr_t)(glarray->Ptr - ptr) > interleaved) { interleaved = 0; } else if ((uintptr_t)(glarray->Ptr - ptr) & (type_size -1)) { /* enforce natural alignment (for doubles) */ interleaved = 0; } upload[nr_uploads++] = input; total_size = ALIGN(total_size, type_size); total_size += input->element_size; if (glarray->InstanceDivisor != 0) { can_merge_uploads = GL_FALSE; } } } /* If we need to upload all the arrays, then we can trim those arrays to * only the used elements [min_index, max_index] so long as we adjust all * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias. */ brw->vb.start_vertex_bias = 0; delta = min_index; if (nr_uploads == brw->vb.nr_enabled) { brw->vb.start_vertex_bias = -delta; delta = 0; } if (delta && !brw->intel.intelScreen->relaxed_relocations) min_index = delta = 0; /* Handle any arrays to be uploaded. */ if (nr_uploads > 1) { if (interleaved && interleaved <= 2*total_size) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; /* All uploads are interleaved, so upload the arrays together as * interleaved. First, upload the contents and set up upload[0]. */ copy_array_to_vbo_array(brw, upload[0], min_index, max_index, buffer, interleaved); buffer->offset -= delta * interleaved; for (i = 0; i < nr_uploads; i++) { /* Then, just point upload[i] at upload[0]'s buffer. */ upload[i]->offset = ((const unsigned char *)upload[i]->glarray->Ptr - ptr); upload[i]->buffer = j; } j++; nr_uploads = 0; } else if ((total_size < 2048) && can_merge_uploads) { /* Upload non-interleaved arrays into a single interleaved array */ struct brw_vertex_buffer *buffer; int count = MAX2(max_index - min_index + 1, 1); int offset; char *map; map = intel_upload_map(&brw->intel, total_size * count, total_size); for (i = offset = 0; i < nr_uploads; i++) { const unsigned char *src = upload[i]->glarray->Ptr; int size = upload[i]->element_size; int stride = upload[i]->glarray->StrideB; char *dst; int n; offset = ALIGN(offset, get_size(upload[i]->glarray->Type)); dst = map + offset; src += min_index * stride; for (n = 0; n < count; n++) { memcpy(dst, src, size); src += stride; dst += total_size; } upload[i]->offset = offset; upload[i]->buffer = j; offset += size; } assert(offset == total_size); buffer = &brw->vb.buffers[j++]; intel_upload_unmap(&brw->intel, map, offset * count, offset, &buffer->bo, &buffer->offset); buffer->stride = offset; buffer->step_rate = 0; buffer->offset -= delta * offset; nr_uploads = 0; } } /* Upload non-interleaved arrays */ for (i = 0; i < nr_uploads; i++) { struct brw_vertex_buffer *buffer = &brw->vb.buffers[j]; if (upload[i]->glarray->InstanceDivisor == 0) { copy_array_to_vbo_array(brw, upload[i], min_index, max_index, buffer, upload[i]->element_size); } else { /* This is an instanced attribute, since its InstanceDivisor * is not zero. Therefore, its data will be stepped after the * instanced draw has been run InstanceDivisor times. */ uint32_t instanced_attr_max_index = (brw->num_instances - 1) / upload[i]->glarray->InstanceDivisor; copy_array_to_vbo_array(brw, upload[i], 0, instanced_attr_max_index, buffer, upload[i]->element_size); } buffer->offset -= delta * buffer->stride; buffer->step_rate = upload[i]->glarray->InstanceDivisor; upload[i]->buffer = j++; upload[i]->offset = 0; } /* can we simply extend the current vb? */ if (j == brw->vb.nr_current_buffers) { int delta = 0; for (i = 0; i < j; i++) { int d; if (brw->vb.current_buffers[i].handle != brw->vb.buffers[i].bo->handle || brw->vb.current_buffers[i].stride != brw->vb.buffers[i].stride || brw->vb.current_buffers[i].step_rate != brw->vb.buffers[i].step_rate) break; d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset; if (d < 0) break; if (i == 0) delta = d / brw->vb.current_buffers[i].stride; if (delta * brw->vb.current_buffers[i].stride != d) break; } if (i == j) { brw->vb.start_vertex_bias += delta; while (--j >= 0) drm_intel_bo_unreference(brw->vb.buffers[j].bo); j = 0; } } brw->vb.nr_buffers = j; prepare: brw_prepare_query_begin(brw); }
/* XXX: Do this for TexSubImage also: */ static bool try_pbo_upload(struct gl_context *ctx, struct gl_texture_image *image, const struct gl_pixelstore_attrib *unpack, GLenum format, GLenum type, const void *pixels) { struct intel_texture_image *intelImage = intel_texture_image(image); struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj); GLuint src_offset, src_stride; GLuint dst_x, dst_y; drm_intel_bo *dst_buffer, *src_buffer; if (!_mesa_is_bufferobj(unpack->BufferObj)) return false; DBG("trying pbo upload\n"); if (intel->ctx._ImageTransferState || unpack->SkipPixels || unpack->SkipRows) { DBG("%s: image transfer\n", __FUNCTION__); return false; } if (!_mesa_format_matches_format_and_type(image->TexFormat, format, type, false)) { DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n", __FUNCTION__, _mesa_get_format_name(image->TexFormat), format, type); return false; } ctx->Driver.AllocTextureImageBuffer(ctx, image); if (!intelImage->mt) { DBG("%s: no miptree\n", __FUNCTION__); return false; } if (image->TexObject->Target == GL_TEXTURE_1D_ARRAY || image->TexObject->Target == GL_TEXTURE_2D_ARRAY) { DBG("%s: no support for array textures\n", __FUNCTION__); return false; } dst_buffer = intelImage->mt->region->bo; src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset); /* note: potential 64-bit ptr to 32-bit int cast */ src_offset += (GLuint) (unsigned long) pixels; if (unpack->RowLength > 0) src_stride = unpack->RowLength; else src_stride = image->Width; src_stride *= intelImage->mt->region->cpp; intel_miptree_get_image_offset(intelImage->mt, intelImage->base.Base.Level, intelImage->base.Base.Face, &dst_x, &dst_y); if (!intelEmitCopyBlit(intel, intelImage->mt->cpp, src_stride, src_buffer, src_offset, false, intelImage->mt->region->pitch, dst_buffer, 0, intelImage->mt->region->tiling, 0, 0, dst_x, dst_y, image->Width, image->Height, GL_COPY)) { DBG("%s: blit failed\n", __FUNCTION__); return false; } DBG("%s: success\n", __FUNCTION__); return true; }
static void brw_prepare_indices(struct brw_context *brw) { struct gl_context *ctx = &brw->intel.ctx; struct intel_context *intel = &brw->intel; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; drm_intel_bo *bo = NULL; struct gl_buffer_object *bufferobj; GLuint offset; GLuint ib_type_size; if (index_buffer == NULL) return; ib_type_size = get_size(index_buffer->type); ib_size = ib_type_size * index_buffer->count; bufferobj = index_buffer->obj; /* Turn into a proper VBO: */ if (!_mesa_is_bufferobj(bufferobj)) { /* Get new bufferobj, offset: */ intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; } else { offset = (GLuint) (unsigned long) index_buffer->ptr; /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((get_size(index_buffer->type) - 1) & offset) { GLubyte *map = ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, GL_DYNAMIC_DRAW_ARB, bufferobj); map += offset; intel_upload_data(&brw->intel, map, ib_size, ib_type_size, &bo, &offset); brw->ib.start_vertex_offset = offset / ib_type_size; ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj); } else { /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading * the index buffer state when we're just moving the start index * of our drawing. */ brw->ib.start_vertex_offset = offset / ib_type_size; bo = intel_bufferobj_source(intel, intel_buffer_object(bufferobj), ib_type_size, &offset); drm_intel_bo_reference(bo); brw->ib.start_vertex_offset += offset / ib_type_size; } } if (brw->ib.bo != bo) { drm_intel_bo_unreference(brw->ib.bo); brw->ib.bo = bo; brw_add_validated_bo(brw, brw->ib.bo); brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } else { drm_intel_bo_unreference(bo); } if (index_buffer->type != brw->ib.type) { brw->ib.type = index_buffer->type; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } }