/* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ static GLboolean brw_try_draw_prims( GLcontext *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index ) { struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); GLboolean retval = GL_FALSE; GLuint i; GLuint ib_offset; dri_bo *ib_bo; GLboolean force_flush = GL_FALSE; int ret; if (ctx->NewState) _mesa_update_state( ctx ); brw_validate_textures( brw ); /* Bind all inputs, derive varying and size information: */ brw_merge_inputs( brw, arrays ); /* Have to validate state quite late. Will rebuild tnl_program, * which depends on varying information. * * Note this is where brw->vs->prog_data.inputs_read is calculated, * so can't access it earlier. */ LOCK_HARDWARE(intel); if (brw->intel.numClipRects == 0) { UNLOCK_HARDWARE(intel); return GL_TRUE; } { /* Flush the batch if it's approaching full, so that we don't wrap while * we've got validated state that needs to be in the same batch as the * primitives. This fraction is just a guess (minimal full state plus * a primitive is around 512 bytes), and would be better if we had * an upper bound of how much we might emit in a single * brw_try_draw_prims(). */ flush: if (force_flush) brw->no_batch_wrap = GL_FALSE; if (intel->batch->ptr - intel->batch->map > intel->batch->size * 3 / 4 /* brw_emit_prim may change the cliprect_mode to LOOP_CLIPRECTS */ || intel->batch->cliprect_mode != LOOP_CLIPRECTS || (force_flush == GL_TRUE)) intel_batchbuffer_flush(intel->batch); force_flush = GL_FALSE; brw->no_batch_wrap = GL_TRUE; /* Set the first primitive early, ahead of validate_state: */ brw_set_prim(brw, prim[0].mode, &force_flush); /* XXX: Need to separate validate and upload of state. */ ret = brw_validate_state( brw ); if (ret) { force_flush = GL_TRUE; goto flush; } /* Various fallback checks: */ if (brw->intel.Fallback) goto out; if (check_fallbacks( brw, prim, nr_prims )) goto out; /* need to account for index buffer and vertex buffer */ if (ib) { ret = brw_prepare_indices( brw, ib , &ib_bo, &ib_offset); if (ret) { force_flush = GL_TRUE; goto flush; } } ret = brw_prepare_vertices( brw, min_index, max_index); if (ret < 0) goto out; if (ret > 0) { force_flush = GL_TRUE; goto flush; } /* Upload index, vertex data: */ if (ib) brw_emit_indices( brw, ib, ib_bo, ib_offset); brw_emit_vertices( brw, min_index, max_index); for (i = 0; i < nr_prims; i++) { brw_emit_prim(brw, &prim[i]); } retval = GL_TRUE; } out: brw->no_batch_wrap = GL_FALSE; UNLOCK_HARDWARE(intel); if (!retval) DBG("%s failed\n", __FUNCTION__); return retval; }
/* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ static GLboolean brw_try_draw_prims( GLcontext *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index ) { struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); GLboolean retval = GL_FALSE; GLuint i, j; if (ctx->NewState) _mesa_update_state( ctx ); /* Bind all inputs, derive varying and size information: */ brw_merge_inputs( brw, arrays ); /* Have to validate state quite late. Will rebuild tnl_program, * which depends on varying information. * * Note this is where brw->vs->prog_data.inputs_read is calculated, * so can't access it earlier. */ LOCK_HARDWARE(intel); if (brw->intel.numClipRects == 0) { assert(intel->batch->ptr == intel->batch->map + intel->batch->offset); UNLOCK_HARDWARE(intel); return GL_TRUE; } { /* Set the first primitive early, ahead of validate_state: */ brw_set_prim(brw, prim[0].mode); /* XXX: Need to separate validate and upload of state. */ brw_validate_state( brw ); /* Various fallback checks: */ if (brw->intel.Fallback) goto out; if (check_fallbacks( brw, prim, nr_prims )) goto out; /* Upload index, vertex data: */ if (ib) brw_upload_indices( brw, ib ); if (!brw_upload_vertices( brw, min_index, max_index)) { goto out; } /* For single cliprect, state is already emitted: */ if (brw->intel.numClipRects == 1) { for (i = 0; i < nr_prims; i++) { brw_emit_prim(brw, &prim[i]); } } else { /* Otherwise, explicitly do the cliprects at this point: */ for (j = 0; j < brw->intel.numClipRects; j++) { brw_emit_cliprect(brw, &brw->intel.pClipRects[j]); /* Emit prims to batchbuffer: */ for (i = 0; i < nr_prims; i++) { brw_emit_prim(brw, &prim[i]); } } } intel->need_flush = GL_TRUE; retval = GL_TRUE; } out: /* Currently have to do this to synchronize with the map/unmap of * the vertex buffer in brw_exec_api.c. Not sure if there is any * way around this, as not every flush is due to a buffer filling * up. */ if (!intel_batchbuffer_flush( brw->intel.batch )) { DBG("%s intel_batchbuffer_flush failed\n", __FUNCTION__); retval = GL_FALSE; } if (retval && intel->thrashing) { bmSetFence(intel); } /* Free any old data so it doesn't clog up texture memory - we * won't be referencing it again. */ while (brw->vb.upload.wrap != brw->vb.upload.buf) { ctx->Driver.BufferData(ctx, GL_ARRAY_BUFFER_ARB, BRW_UPLOAD_INIT_SIZE, NULL, GL_DYNAMIC_DRAW_ARB, brw->vb.upload.vbo[brw->vb.upload.wrap]); brw->vb.upload.wrap++; brw->vb.upload.wrap %= BRW_NR_UPLOAD_BUFS; } UNLOCK_HARDWARE(intel); if (!retval) DBG("%s failed\n", __FUNCTION__); return retval; }
/* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ static GLboolean brw_try_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index ) { struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); GLboolean retval = GL_FALSE; GLboolean warn = GL_FALSE; GLboolean first_time = GL_TRUE; GLuint i; if (ctx->NewState) _mesa_update_state( ctx ); /* We have to validate the textures *before* checking for fallbacks; * otherwise, the software fallback won't be able to rely on the * texture state, the firstLevel and lastLevel fields won't be * set in the intel texture object (they'll both be 0), and the * software fallback will segfault if it attempts to access any * texture level other than level 0. */ brw_validate_textures( brw ); if (check_fallbacks(brw, prim, nr_prims)) return GL_FALSE; /* Bind all inputs, derive varying and size information: */ brw_merge_inputs( brw, arrays ); brw->ib.ib = ib; brw->state.dirty.brw |= BRW_NEW_INDICES; brw->vb.min_index = min_index; brw->vb.max_index = max_index; brw->state.dirty.brw |= BRW_NEW_VERTICES; /* Have to validate state quite late. Will rebuild tnl_program, * which depends on varying information. * * Note this is where brw->vs->prog_data.inputs_read is calculated, * so can't access it earlier. */ intel_prepare_render(intel); for (i = 0; i < nr_prims; i++) { uint32_t hw_prim; /* Flush the batch if it's approaching full, so that we don't wrap while * we've got validated state that needs to be in the same batch as the * primitives. This fraction is just a guess (minimal full state plus * a primitive is around 512 bytes), and would be better if we had * an upper bound of how much we might emit in a single * brw_try_draw_prims(). */ intel_batchbuffer_require_space(intel->batch, intel->batch->size / 4, false); hw_prim = brw_set_prim(brw, &prim[i]); if (first_time || (brw->state.dirty.brw & BRW_NEW_PRIMITIVE)) { first_time = GL_FALSE; brw_validate_state(brw); /* Various fallback checks: */ if (brw->intel.Fallback) goto out; /* Check that we can fit our state in with our existing batchbuffer, or * flush otherwise. */ if (dri_bufmgr_check_aperture_space(brw->state.validated_bos, brw->state.validated_bo_count)) { static GLboolean warned; intel_batchbuffer_flush(intel->batch); /* Validate the state after we flushed the batch (which would have * changed the set of dirty state). If we still fail to * check_aperture, warn of what's happening, but attempt to continue * on since it may succeed anyway, and the user would probably rather * see a failure and a warning than a fallback. */ brw_validate_state(brw); if (!warned && dri_bufmgr_check_aperture_space(brw->state.validated_bos, brw->state.validated_bo_count)) { warn = GL_TRUE; warned = GL_TRUE; } } intel->no_batch_wrap = GL_TRUE; brw_upload_state(brw); } brw_emit_prim(brw, &prim[i], hw_prim); intel->no_batch_wrap = GL_FALSE; retval = GL_TRUE; } if (intel->always_flush_batch) intel_batchbuffer_flush(intel->batch); out: brw_state_cache_check_size(brw); if (warn) fprintf(stderr, "i965: Single primitive emit potentially exceeded " "available aperture space\n"); if (!retval) DBG("%s failed\n", __FUNCTION__); return retval; }