/* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ static boolean brw_try_draw_elements( struct pipe_context *pipe, struct pipe_buffer *index_buffer, unsigned index_size, unsigned mode, unsigned start, unsigned count ) { struct brw_context *brw = brw_context(pipe); /* Set the first primitive ahead of validate_state: */ brw_set_prim(brw, mode); /* Upload index, vertex data: */ if (index_buffer && !brw_upload_indices( brw, index_buffer, index_size, start, count )) return FALSE; if (!brw_upload_vertex_buffers(brw)) return FALSE; if (!brw_upload_vertex_elements( brw )) return FALSE; /* XXX: Need to separate validate and upload of state. */ if (brw->state.dirty.brw) brw_validate_state( brw ); if (!brw_emit_prim(brw, index_buffer != NULL, start, count)) return FALSE; return TRUE; }
/* May fail if out of video memory for texture or vbo upload, or on * fallback conditions. */ static GLboolean brw_try_draw_prims( GLcontext *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index ) { struct intel_context *intel = intel_context(ctx); struct brw_context *brw = brw_context(ctx); GLboolean retval = GL_FALSE; GLuint i, j; if (ctx->NewState) _mesa_update_state( ctx ); /* Bind all inputs, derive varying and size information: */ brw_merge_inputs( brw, arrays ); /* Have to validate state quite late. Will rebuild tnl_program, * which depends on varying information. * * Note this is where brw->vs->prog_data.inputs_read is calculated, * so can't access it earlier. */ LOCK_HARDWARE(intel); if (brw->intel.numClipRects == 0) { assert(intel->batch->ptr == intel->batch->map + intel->batch->offset); UNLOCK_HARDWARE(intel); return GL_TRUE; } { /* Set the first primitive early, ahead of validate_state: */ brw_set_prim(brw, prim[0].mode); /* XXX: Need to separate validate and upload of state. */ brw_validate_state( brw ); /* Various fallback checks: */ if (brw->intel.Fallback) goto out; if (check_fallbacks( brw, prim, nr_prims )) goto out; /* Upload index, vertex data: */ if (ib) brw_upload_indices( brw, ib ); if (!brw_upload_vertices( brw, min_index, max_index)) { goto out; } /* For single cliprect, state is already emitted: */ if (brw->intel.numClipRects == 1) { for (i = 0; i < nr_prims; i++) { brw_emit_prim(brw, &prim[i]); } } else { /* Otherwise, explicitly do the cliprects at this point: */ for (j = 0; j < brw->intel.numClipRects; j++) { brw_emit_cliprect(brw, &brw->intel.pClipRects[j]); /* Emit prims to batchbuffer: */ for (i = 0; i < nr_prims; i++) { brw_emit_prim(brw, &prim[i]); } } } intel->need_flush = GL_TRUE; retval = GL_TRUE; } out: /* Currently have to do this to synchronize with the map/unmap of * the vertex buffer in brw_exec_api.c. Not sure if there is any * way around this, as not every flush is due to a buffer filling * up. */ if (!intel_batchbuffer_flush( brw->intel.batch )) { DBG("%s intel_batchbuffer_flush failed\n", __FUNCTION__); retval = GL_FALSE; } if (retval && intel->thrashing) { bmSetFence(intel); } /* Free any old data so it doesn't clog up texture memory - we * won't be referencing it again. */ while (brw->vb.upload.wrap != brw->vb.upload.buf) { ctx->Driver.BufferData(ctx, GL_ARRAY_BUFFER_ARB, BRW_UPLOAD_INIT_SIZE, NULL, GL_DYNAMIC_DRAW_ARB, brw->vb.upload.vbo[brw->vb.upload.wrap]); brw->vb.upload.wrap++; brw->vb.upload.wrap %= BRW_NR_UPLOAD_BUFS; } UNLOCK_HARDWARE(intel); if (!retval) DBG("%s failed\n", __FUNCTION__); return retval; }