static void cell_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct cell_context *cell = cell_context(pipe); if (1 /*memcmp(&cell->framebuffer, fb, sizeof(*fb))*/) { uint i; /* unmap old surfaces */ cell_unmap_surfaces(cell); /* Finish any pending rendering to the current surface before * installing a new surface! */ cell_flush_int(cell, CELL_FLUSH_WAIT); /* update my state * (this is also where old surfaces will finally get freed) */ cell->framebuffer.width = fb->width; cell->framebuffer.height = fb->height; cell->framebuffer.nr_cbufs = fb->nr_cbufs; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { pipe_surface_reference(&cell->framebuffer.cbufs[i], fb->cbufs[i]); } pipe_surface_reference(&cell->framebuffer.zsbuf, fb->zsbuf); /* map new surfaces */ cell_map_surfaces(cell); cell->dirty |= CELL_NEW_FRAMEBUFFER; } }
/** * Draw vertex arrays, with optional indexing. * Basically, map the vertex buffers (and drawing surfaces), then hand off * the drawing to the 'draw' module. * * XXX should the element buffer be specified/bound with a separate function? */ static void cell_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct cell_context *cell = cell_context(pipe); struct draw_context *draw = cell->draw; void *mapped_indices = NULL; unsigned i; if (cell->dirty) cell_update_derived( cell ); #if 0 cell_map_surfaces(cell); #endif /* * Map vertex buffers */ for (i = 0; i < cell->num_vertex_buffers; i++) { void *buf = cell_resource(cell->vertex_buffer[i].buffer)->data; draw_set_mapped_vertex_buffer(draw, i, buf); } /* Map index buffer, if present */ if (info->indexed && cell->index_buffer.buffer) mapped_indices = cell_resource(cell->index_buffer.buffer)->data; draw_set_mapped_index_buffer(draw, mapped_indices); /* draw! */ draw_vbo(draw, info); /* * unmap vertex/index buffers - will cause draw module to flush */ for (i = 0; i < cell->num_vertex_buffers; i++) { draw_set_mapped_vertex_buffer(draw, i, NULL); } if (mapped_indices) { draw_set_mapped_index_buffer(draw, NULL); } /* * TODO: Flush only when a user vertex/index buffer is present * (or even better, modify draw module to do this * internally when this condition is seen?) */ draw_flush(draw); }
/** * Draw vertex arrays, with optional indexing. * Basically, map the vertex buffers (and drawing surfaces), then hand off * the drawing to the 'draw' module. * * XXX should the element buffer be specified/bound with a separate function? */ static boolean cell_draw_range_elements(struct pipe_context *pipe, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned mode, unsigned start, unsigned count) { struct cell_context *sp = cell_context(pipe); struct draw_context *draw = sp->draw; unsigned i; if (sp->dirty) cell_update_derived( sp ); #if 0 cell_map_surfaces(sp); #endif cell_map_constant_buffers(sp); /* * Map vertex buffers */ for (i = 0; i < sp->num_vertex_buffers; i++) { void *buf = pipe_buffer_map(pipe->screen, sp->vertex_buffer[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); cell_flush_buffer_range(sp, buf, sp->vertex_buffer[i].buffer->size); draw_set_mapped_vertex_buffer(draw, i, buf); } /* Map index buffer, if present */ if (indexBuffer) { void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes); } else { /* no index/element buffer */ draw_set_mapped_element_buffer(draw, 0, NULL); } /* draw! */ draw_arrays(draw, mode, start, count); /* * unmap vertex/index buffers - will cause draw module to flush */ for (i = 0; i < sp->num_vertex_buffers; i++) { draw_set_mapped_vertex_buffer(draw, i, NULL); pipe_buffer_unmap(pipe->screen, sp->vertex_buffer[i].buffer); } if (indexBuffer) { draw_set_mapped_element_buffer(draw, 0, NULL); pipe_buffer_unmap(pipe->screen, indexBuffer); } /* Note: leave drawing surfaces mapped */ cell_unmap_constant_buffers(sp); return TRUE; }