void RocketRenderingInterface::RenderGeometry(Vertex* vertices, int num_vertices, int* indices, int num_indices, TextureHandle texture, const Vector2f& translation) { gr_update_buffer_data(vertex_stream_buffer, sizeof(*vertices) * num_vertices, vertices); gr_update_buffer_data(index_stream_buffer, sizeof(*indices) * num_indices, indices); int bitmap; if (texture == 0) { bitmap = -1; } else { bitmap = get_texture(texture)->handle + get_texture(texture)->frame_num; } renderGeometry(vertex_stream_buffer, index_stream_buffer, num_indices, bitmap, translation); }
void batching_allocate_and_load_buffer(primitive_batch_buffer *draw_queue) { Assert(draw_queue != NULL); if ( draw_queue->buffer_size < draw_queue->desired_buffer_size ) { if ( draw_queue->buffer_ptr != NULL ) { vm_free(draw_queue->buffer_ptr); } draw_queue->buffer_size = draw_queue->desired_buffer_size; draw_queue->buffer_ptr = vm_malloc(draw_queue->desired_buffer_size); } draw_queue->desired_buffer_size = 0; size_t offset = 0; size_t num_items = draw_queue->items.size(); for ( size_t i = 0; i < num_items; ++i ) { primitive_batch_item *item = &draw_queue->items[i]; item->offset = offset; item->n_verts = item->batch->load_buffer((batch_vertex*)draw_queue->buffer_ptr, offset); item->batch->clear(); offset += item->n_verts; } if ( draw_queue->buffer_num >= 0 ) { gr_update_buffer_data(draw_queue->buffer_num, draw_queue->buffer_size, draw_queue->buffer_ptr); } }
void batch_render_all(int stream_buffer) { if ( stream_buffer >= 0 ) { // need to get vertex size int n_to_render = batch_get_size(); int n_verts = 0; if ( ( Batch_buffer_size < (n_to_render * sizeof(effect_vertex)) ) ) { if ( Batch_buffer != NULL ) { vm_free(Batch_buffer); } Batch_buffer_size = n_to_render * sizeof(effect_vertex); Batch_buffer = vm_malloc(Batch_buffer_size); } batch_load_buffer_lasers((effect_vertex*)Batch_buffer, &n_verts); batch_load_buffer_geometry_map_bitmaps((effect_vertex*)Batch_buffer, &n_verts); batch_load_buffer_distortion_map_bitmaps((effect_vertex*)Batch_buffer, &n_verts); gr_update_buffer_data(stream_buffer, Batch_buffer_size, Batch_buffer); Assert(n_verts <= n_to_render); batch_render_lasers(stream_buffer); batch_render_geometry_map_bitmaps(stream_buffer); //batch_render_distortion_map_bitmaps(true); } else { batch_render_lasers(); batch_render_geometry_map_bitmaps(); //batch_render_distortion_map_bitmaps(); } gr_clear_states(); }
CompiledGeometryHandle RocketRenderingInterface::CompileGeometry(Vertex* vertices, int num_vertices, int* indices, int num_indices, TextureHandle texture) { GR_DEBUG_SCOPE("libRocket::CompileGeometry"); auto* geom = new CompiledGeometry(); geom->vertex_buffer = gr_create_buffer(BufferType::Vertex, BufferUsageHint::Static); gr_update_buffer_data(geom->vertex_buffer, num_vertices * sizeof(Vertex), reinterpret_cast<void*>(vertices)); geom->index_buffer = gr_create_buffer(BufferType::Index, BufferUsageHint::Static); gr_update_buffer_data(geom->index_buffer, num_indices * sizeof(int), reinterpret_cast<void*>(indices)); geom->num_elements = num_indices; geom->texture = get_texture(texture); return reinterpret_cast<CompiledGeometryHandle>(geom); }
void UniformBuffer::submitData() { if (_aligner.getSize() == 0) { // No data to submit, return now to avoid causing graphics errors return; } gr_update_buffer_data(_buffer_obj, _aligner.getSize(), _aligner.getData()); }
void geometry_batch_render(int stream_buffer) { if ( stream_buffer < 0 ) { return; } size_t n_to_render = geometry_batch_get_size(); size_t n_verts = 0; if ( Batch_geometry_buffer_size < (n_to_render * sizeof(particle_pnt)) ) { if ( Batch_geometry_buffer != NULL ) { vm_free(Batch_geometry_buffer); } Batch_geometry_buffer_size = n_to_render * sizeof(particle_pnt); Batch_geometry_buffer = vm_malloc(Batch_geometry_buffer_size); } batch_load_buffer_geometry_shader_map_bitmaps((particle_pnt*)Batch_geometry_buffer, &n_verts); gr_update_buffer_data(stream_buffer, Batch_geometry_buffer_size, Batch_geometry_buffer); batch_render_geometry_shader_map_bitmaps(stream_buffer); }