static void haswell_upload_cut_index(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; /* Don't trigger on Ivybridge */ if (brw->gen < 8 && !brw->is_haswell) return; const unsigned cut_index_setting = ctx->Array._PrimitiveRestart ? HSW_CUT_INDEX_ENABLE : 0; /* BRW_NEW_INDEX_BUFFER */ unsigned cut_index; if (brw->ib.ib) { cut_index = _mesa_primitive_restart_index(ctx, brw->ib.type); } else { /* There's no index buffer, but primitive restart may still apply * to glDrawArrays and such. FIXED_INDEX mode only applies to drawing * operations that use an index buffer, so we can ignore it and use * the GL restart index directly. */ cut_index = ctx->Array.RestartIndex; } BEGIN_BATCH(2); OUT_BATCH(_3DSTATE_VF << 16 | cut_index_setting | (2 - 2)); OUT_BATCH(cut_index); ADVANCE_BATCH(); }
/** * Set the restart index. */ static void setup_primitive_restart(struct gl_context *ctx, const struct _mesa_index_buffer *ib, struct pipe_draw_info *info) { if (ctx->Array._PrimitiveRestart) { info->restart_index = _mesa_primitive_restart_index(ctx, ib->type); /* Enable primitive restart only when the restart index can have an * effect. This is required for correctness in radeonsi VI support. * Other hardware may also benefit from taking a faster, non-restart path * when possible. */ if ((ib->type == GL_UNSIGNED_INT) || (ib->type == GL_UNSIGNED_SHORT && info->restart_index <= 0xffff) || (ib->type == GL_UNSIGNED_BYTE && info->restart_index <= 0xff)) info->primitive_restart = true; } }
/** * Handle primitive restart in software. * * This function breaks up calls into the driver so primitive restart * support is not required in the driver. */ void vbo_sw_primitive_restart(struct gl_context *ctx, const struct _mesa_prim *prims, GLuint nr_prims, const struct _mesa_index_buffer *ib, struct gl_buffer_object *indirect) { GLuint prim_num; struct _mesa_prim new_prim; struct _mesa_index_buffer new_ib; struct sub_primitive *sub_prims; struct sub_primitive *sub_prim; GLuint num_sub_prims; GLuint sub_prim_num; GLuint end_index; GLuint sub_end_index; GLuint restart_index = _mesa_primitive_restart_index(ctx, ib->index_size); struct _mesa_prim temp_prim; struct vbo_context *vbo = vbo_context(ctx); vbo_draw_func draw_prims_func = vbo->draw_prims; GLboolean map_ib = ib->obj->Name && !ib->obj->Mappings[MAP_INTERNAL].Pointer; void *ptr; /* If there is an indirect buffer, map it and extract the draw params */ if (indirect && prims[0].is_indirect) { const uint32_t *indirect_params; if (!ctx->Driver.MapBufferRange(ctx, 0, indirect->Size, GL_MAP_READ_BIT, indirect, MAP_INTERNAL)) { /* something went wrong with mapping, give up */ _mesa_error(ctx, GL_OUT_OF_MEMORY, "failed to map indirect buffer for sw primitive restart"); return; } assert(nr_prims == 1); new_prim = prims[0]; indirect_params = (const uint32_t *) ADD_POINTERS(indirect->Mappings[MAP_INTERNAL].Pointer, new_prim.indirect_offset); new_prim.is_indirect = 0; new_prim.count = indirect_params[0]; new_prim.num_instances = indirect_params[1]; new_prim.start = indirect_params[2]; new_prim.basevertex = indirect_params[3]; new_prim.base_instance = indirect_params[4]; new_ib = *ib; new_ib.count = new_prim.count; prims = &new_prim; ib = &new_ib; ctx->Driver.UnmapBuffer(ctx, indirect, MAP_INTERNAL); } /* Find the sub-primitives. These are regions in the index buffer which * are split based on the primitive restart index value. */ if (map_ib) { ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT, ib->obj, MAP_INTERNAL); } ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr); sub_prims = find_sub_primitives(ptr, ib->index_size, 0, ib->count, restart_index, &num_sub_prims); if (map_ib) { ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL); } /* Loop over the primitives, and use the located sub-primitives to draw * each primitive with a break to implement each primitive restart. */ for (prim_num = 0; prim_num < nr_prims; prim_num++) { end_index = prims[prim_num].start + prims[prim_num].count; memcpy(&temp_prim, &prims[prim_num], sizeof (temp_prim)); /* Loop over the sub-primitives drawing sub-ranges of the primitive. */ for (sub_prim_num = 0; sub_prim_num < num_sub_prims; sub_prim_num++) { sub_prim = &sub_prims[sub_prim_num]; sub_end_index = sub_prim->start + sub_prim->count; if (prims[prim_num].start <= sub_prim->start) { temp_prim.start = MAX2(prims[prim_num].start, sub_prim->start); temp_prim.count = MIN2(sub_end_index, end_index) - temp_prim.start; if ((temp_prim.start == sub_prim->start) && (temp_prim.count == sub_prim->count)) { draw_prims_func(ctx, &temp_prim, 1, ib, GL_TRUE, sub_prim->min_index, sub_prim->max_index, NULL, 0, NULL); } else { draw_prims_func(ctx, &temp_prim, 1, ib, GL_FALSE, -1, -1, NULL, 0, NULL); } } if (sub_end_index >= end_index) { break; } } } free(sub_prims); }
/** * Compute min and max elements by scanning the index buffer for * glDraw[Range]Elements() calls. * If primitive restart is enabled, we need to ignore restart * indexes when computing min/max. */ static void vbo_get_minmax_index(struct gl_context *ctx, const struct _mesa_prim *prim, const struct _mesa_index_buffer *ib, GLuint *min_index, GLuint *max_index, const GLuint count) { const GLboolean restart = ctx->Array._PrimitiveRestart; const GLuint restartIndex = _mesa_primitive_restart_index(ctx, ib->type); const int index_size = vbo_sizeof_ib_type(ib->type); const char *indices; GLuint i; indices = (char *) ib->ptr + prim->start * index_size; if (_mesa_is_bufferobj(ib->obj)) { GLsizeiptr size = MIN2(count * index_size, ib->obj->Size); indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size, GL_MAP_READ_BIT, ib->obj); } switch (ib->type) { case GL_UNSIGNED_INT: { const GLuint *ui_indices = (const GLuint *)indices; GLuint max_ui = 0; GLuint min_ui = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ui_indices[i] != restartIndex) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } } else { for (i = 0; i < count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } *min_index = min_ui; *max_index = max_ui; break; } case GL_UNSIGNED_SHORT: { const GLushort *us_indices = (const GLushort *)indices; GLuint max_us = 0; GLuint min_us = ~0U; if (restart) { for (i = 0; i < count; i++) { if (us_indices[i] != restartIndex) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } } else { for (i = 0; i < count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } *min_index = min_us; *max_index = max_us; break; } case GL_UNSIGNED_BYTE: { const GLubyte *ub_indices = (const GLubyte *)indices; GLuint max_ub = 0; GLuint min_ub = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ub_indices[i] != restartIndex) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } } else { for (i = 0; i < count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } *min_index = min_ub; *max_index = max_ub; break; } default: assert(0); break; } if (_mesa_is_bufferobj(ib->obj)) { ctx->Driver.UnmapBuffer(ctx, ib->obj); } }
/** * Compute min and max elements by scanning the index buffer for * glDraw[Range]Elements() calls. * If primitive restart is enabled, we need to ignore restart * indexes when computing min/max. */ static void vbo_get_minmax_index(struct gl_context *ctx, const struct _mesa_prim *prim, const struct _mesa_index_buffer *ib, GLuint *min_index, GLuint *max_index, const GLuint count) { const GLboolean restart = ctx->Array._PrimitiveRestart; const GLuint restartIndex = _mesa_primitive_restart_index(ctx, ib->type); const int index_size = vbo_sizeof_ib_type(ib->type); const char *indices; GLuint i; indices = (char *) ib->ptr + prim->start * index_size; if (_mesa_is_bufferobj(ib->obj)) { GLsizeiptr size = MIN2(count * index_size, ib->obj->Size); if (vbo_get_minmax_cached(ib->obj, ib->type, (GLintptr) indices, count, min_index, max_index)) return; indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size, GL_MAP_READ_BIT, ib->obj, MAP_INTERNAL); } switch (ib->type) { case GL_UNSIGNED_INT: { const GLuint *ui_indices = (const GLuint *)indices; GLuint max_ui = 0; GLuint min_ui = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ui_indices[i] != restartIndex) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } } else { #if defined(USE_SSE41) if (cpu_has_sse4_1) { _mesa_uint_array_min_max(ui_indices, &min_ui, &max_ui, count); } else #endif for (i = 0; i < count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } *min_index = min_ui; *max_index = max_ui; break; } case GL_UNSIGNED_SHORT: { const GLushort *us_indices = (const GLushort *)indices; GLuint max_us = 0; GLuint min_us = ~0U; if (restart) { for (i = 0; i < count; i++) { if (us_indices[i] != restartIndex) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } } else { for (i = 0; i < count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } *min_index = min_us; *max_index = max_us; break; } case GL_UNSIGNED_BYTE: { const GLubyte *ub_indices = (const GLubyte *)indices; GLuint max_ub = 0; GLuint min_ub = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ub_indices[i] != restartIndex) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } } else { for (i = 0; i < count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } *min_index = min_ub; *max_index = max_ub; break; } default: unreachable("not reached"); } if (_mesa_is_bufferobj(ib->obj)) { vbo_minmax_cache_store(ctx, ib->obj, ib->type, prim->start, count, *min_index, *max_index); ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL); } }