static void i915_vbuf_render_draw_elements(struct vbuf_render *render, const ushort *indices, uint nr_indices) { struct i915_vbuf_render *i915_render = i915_vbuf_render(render); struct i915_context *i915 = i915_render->i915; unsigned save_nr_indices; save_nr_indices = nr_indices; nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback); if (!nr_indices) return; i915_vbuf_ensure_index_bounds(render, i915_render->vbo_max_index); if (i915->dirty) i915_update_derived(i915); if (i915->hardware_dirty) i915_emit_hardware_state(i915); if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) { FLUSH_BATCH(NULL); /* Make sure state is re-emitted after a flush: */ i915_update_derived(i915); i915_emit_hardware_state(i915); i915->vbo_flushed = 1; if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) { assert(0); goto out; } } OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim | PRIM_INDIRECT_ELTS | nr_indices); draw_generate_indices(render, indices, save_nr_indices, i915_render->fallback); out: return; }
static void i915_clear_render_target_render(struct pipe_context *pipe, struct pipe_surface *dst, const union pipe_color_union *color, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { struct i915_context *i915 = i915_context(pipe); struct pipe_framebuffer_state fb_state; util_blitter_save_framebuffer(i915->blitter, &i915->framebuffer); fb_state.width = dst->width; fb_state.height = dst->height; fb_state.nr_cbufs = 1; fb_state.cbufs[0] = dst; fb_state.zsbuf = NULL; pipe->set_framebuffer_state(pipe, &fb_state); if (i915->dirty) i915_update_derived(i915); i915_clear_emit(pipe, PIPE_CLEAR_COLOR, color, 0.0, 0x0, dstx, dsty, width, height); pipe->set_framebuffer_state(pipe, &i915->blitter->saved_fb_state); util_unreference_framebuffer_state(&i915->blitter->saved_fb_state); i915->blitter->saved_fb_state.nr_cbufs = ~0; }
static void i915_clear_depth_stencil_render(struct pipe_context *pipe, struct pipe_surface *dst, unsigned clear_flags, double depth, unsigned stencil, unsigned dstx, unsigned dsty, unsigned width, unsigned height) { struct i915_context *i915 = i915_context(pipe); struct pipe_framebuffer_state fb_state; util_blitter_save_framebuffer(i915->blitter, &i915->framebuffer); fb_state.width = dst->width; fb_state.height = dst->height; fb_state.nr_cbufs = 0; fb_state.zsbuf = dst; pipe->set_framebuffer_state(pipe, &fb_state); if (i915->dirty) i915_update_derived(i915); i915_clear_emit(pipe, clear_flags & PIPE_CLEAR_DEPTHSTENCIL, NULL, depth, stencil, dstx, dsty, width, height); pipe->set_framebuffer_state(pipe, &i915->blitter->saved_fb_state); util_unreference_framebuffer_state(&i915->blitter->saved_fb_state); i915->blitter->saved_fb_state.nr_cbufs = ~0; }
static void i915_vbuf_render_draw_arrays(struct vbuf_render *render, unsigned start, uint nr) { struct i915_vbuf_render *i915_render = i915_vbuf_render(render); struct i915_context *i915 = i915_render->i915; if (i915_render->fallback) { draw_arrays_fallback(render, start, nr); return; } i915_vbuf_ensure_index_bounds(render, start + nr); start += i915_render->vbo_index; if (i915->dirty) i915_update_derived(i915); if (i915->hardware_dirty) i915_emit_hardware_state(i915); if (!BEGIN_BATCH(2, 0)) { FLUSH_BATCH(NULL); /* Make sure state is re-emitted after a flush: */ i915_update_derived(i915); i915_emit_hardware_state(i915); i915->vbo_flushed = 1; if (!BEGIN_BATCH(2, 0)) { assert(0); goto out; } } OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | PRIM_INDIRECT_SEQUENTIAL | i915_render->hwprim | nr); OUT_BATCH(start); /* Beginning vertex index */ out: return; }
static void i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct i915_context *i915 = i915_context(pipe); struct draw_context *draw = i915->draw; const void *mapped_indices = NULL; /* * Ack vs contants here, helps ipers a lot. */ i915->dirty &= ~I915_NEW_VS_CONSTANTS; if (i915->dirty) i915_update_derived(i915); /* * Map index buffer, if present */ if (info->indexed) { mapped_indices = i915->index_buffer.user_buffer; if (!mapped_indices) mapped_indices = i915_buffer(i915->index_buffer.buffer)->data; draw_set_indexes(draw, (ubyte *) mapped_indices + i915->index_buffer.offset, i915->index_buffer.index_size); } if (i915->constants[PIPE_SHADER_VERTEX]) draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, i915_buffer(i915->constants[PIPE_SHADER_VERTEX])->data, (i915->current.num_user_constants[PIPE_SHADER_VERTEX] * 4 * sizeof(float))); else draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0); if (i915->num_vertex_sampler_views > 0) i915_prepare_vertex_sampling(i915); /* * Do the drawing */ draw_vbo(i915->draw, info); if (mapped_indices) draw_set_indexes(draw, NULL, 0); if (i915->num_vertex_sampler_views > 0) i915_cleanup_vertex_sampling(i915); /* * Instead of flushing on every state change, we flush once here * when we fire the vbo. */ draw_flush(i915->draw); }
void i915_clear_render(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct i915_context *i915 = i915_context(pipe); if (i915->dirty) i915_update_derived(i915); i915_clear_emit(pipe, buffers, rgba, depth, stencil, 0, 0, i915->framebuffer.width, i915->framebuffer.height); }
/** * Callback exported to the draw module. * Returns the current vertex_info. * * Side effects: * If state is dirty update derived state. */ static const struct vertex_info * i915_vbuf_render_get_vertex_info(struct vbuf_render *render) { struct i915_vbuf_render *i915_render = i915_vbuf_render(render); struct i915_context *i915 = i915_render->i915; if (i915->dirty) { /* make sure we have up to date vertex layout */ i915_update_derived(i915); } return &i915->current.vertex_info; }
static INLINE void emit_prim( struct draw_stage *stage, struct prim_header *prim, unsigned hwprim, unsigned nr ) { struct i915_context *i915 = setup_stage(stage)->i915; unsigned vertex_size; unsigned i; if (i915->dirty) i915_update_derived( i915 ); if (i915->hardware_dirty) i915_emit_hardware_state( i915 ); /* need to do this after validation! */ vertex_size = i915->current.vertex_info.size * 4; /* in bytes */ assert(vertex_size >= 12); /* never smaller than 12 bytes */ if (!BEGIN_BATCH( 1 + nr * vertex_size / 4)) { FLUSH_BATCH(NULL); /* Make sure state is re-emitted after a flush: */ i915_emit_hardware_state( i915 ); if (!BEGIN_BATCH( 1 + nr * vertex_size / 4)) { assert(0); return; } } /* Emit each triangle as a single primitive. I told you this was * simple. */ OUT_BATCH(_3DPRIMITIVE | hwprim | ((4 + vertex_size * nr)/4 - 2)); for (i = 0; i < nr; i++) emit_hw_vertex(i915, prim->v[i]); }
static boolean i915_draw_range_elements(struct pipe_context *pipe, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count) { struct i915_context *i915 = i915_context(pipe); struct draw_context *draw = i915->draw; unsigned i; if (i915->dirty) i915_update_derived(i915); /* * Map vertex buffers */ for (i = 0; i < i915->num_vertex_buffers; i++) { void *buf = pipe_buffer_map(pipe->screen, i915->vertex_buffer[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, i, buf); } /* * Map index buffer, if present */ if (indexBuffer) { void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer_range(draw, indexSize, min_index, max_index, mapped_indexes); } else { draw_set_mapped_element_buffer(draw, 0, NULL); } draw_set_mapped_constant_buffer(draw, i915->current.constants[PIPE_SHADER_VERTEX], (i915->current.num_user_constants[PIPE_SHADER_VERTEX] * 4 * sizeof(float))); /* * Do the drawing */ draw_arrays(i915->draw, prim, start, count); /* * unmap vertex/index buffers */ for (i = 0; i < i915->num_vertex_buffers; i++) { pipe_buffer_unmap(pipe->screen, i915->vertex_buffer[i].buffer); draw_set_mapped_vertex_buffer(draw, i, NULL); } if (indexBuffer) { pipe_buffer_unmap(pipe->screen, indexBuffer); draw_set_mapped_element_buffer_range(draw, 0, start, start + count - 1, NULL); } return TRUE; }