Пример #1
0
void batching_load_buffers(bool distortion)
{
	GR_DEBUG_SCOPE("Batching load buffers");
	TRACE_SCOPE(tracing::LoadBatchingBuffers);

	SCP_map<batch_info, primitive_batch>::iterator bi;
	SCP_map<batch_buffer_key, primitive_batch_buffer>::iterator buffer_iter;

	for ( buffer_iter = Batching_buffers.begin(); buffer_iter != Batching_buffers.end(); ++buffer_iter ) {
		// zero out the buffers
		buffer_iter->second.desired_buffer_size = 0;
	}

	// assign primitive batch items
	for ( bi = Batching_primitives.begin(); bi != Batching_primitives.end(); ++bi ) {
		if ( bi->first.mat_type == batch_info::DISTORTION ) {
			if ( !distortion ) {
				continue;
			}
		} else {
			if ( distortion ) {
				continue;
			}
		}

		size_t num_verts = bi->second.num_verts();

		if ( num_verts > 0 ) {
			batch_info render_info = bi->second.get_render_info();
			uint vertex_mask = batching_determine_vertex_layout(&render_info);

			primitive_batch_buffer *buffer = batching_find_buffer(vertex_mask, render_info.prim_type);
			primitive_batch_item draw_item;

			draw_item.batch_item_info = render_info;
			draw_item.offset = 0;
			draw_item.n_verts = num_verts;
			draw_item.batch = &bi->second;

			buffer->desired_buffer_size += num_verts * sizeof(batch_vertex);
			buffer->items.push_back(draw_item);
		}
	}

	for ( buffer_iter = Batching_buffers.begin(); buffer_iter != Batching_buffers.end(); ++buffer_iter ) {
		batching_allocate_and_load_buffer(&buffer_iter->second);
	}
}
Пример #2
0
int batch_get_size()
{
	int n_to_render = 0;
	SCP_map<int, batch_item>::iterator bi;

	for (bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
		n_to_render += bi->second.batch.need_to_render();
	}

	for (bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
		if ( bi->second.laser )
			continue;

		n_to_render += bi->second.batch.need_to_render();
	}

	return n_to_render * 3;
}
Пример #3
0
void batching_shutdown()
{
    for ( auto buffer_iter = Batching_buffers.begin(); buffer_iter != Batching_buffers.end(); ++buffer_iter ) {
        primitive_batch_buffer *batch_buffer = &buffer_iter->second;

        if ( batch_buffer->buffer_ptr != NULL ) {
            vm_free(batch_buffer->buffer_ptr);
            batch_buffer->buffer_ptr = nullptr;
        }
    }
}
Пример #4
0
int geometry_batch_get_size()
{
	int n_to_render = 0;
	SCP_map<int, g_sdr_batch_item>::iterator bi;

	for (bi = geometry_shader_map.begin(); bi != geometry_shader_map.end(); ++bi) {
		n_to_render += bi->second.batch.need_to_render();
	}

	return n_to_render;
}
Пример #5
0
void batch_load_buffer_distortion_map_bitmaps(effect_vertex* buffer, int *n_verts)
{
	for (SCP_map<int, batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {

		if ( bi->second.laser )
			continue;

		if ( !bi->second.batch.need_to_render() )
			continue;

		Assert( bi->second.texture >= 0 );
		bi->second.batch.load_buffer(buffer, n_verts);
	}
}
Пример #6
0
void batch_load_buffer_geometry_shader_map_bitmaps(particle_pnt* buffer, int *n_verts)
{
	for (SCP_map<int, g_sdr_batch_item>::iterator bi = geometry_shader_map.begin(); bi != geometry_shader_map.end(); ++bi) {

		if ( bi->second.laser )
			continue;

		if ( !bi->second.batch.need_to_render() )
			continue;

		Assert( bi->second.texture >= 0 );
		bi->second.batch.load_buffer(buffer, n_verts);
	}
}
Пример #7
0
void batching_render_all(bool render_distortions)
{
    GR_DEBUG_SCOPE("Batching render all");
    TRACE_SCOPE(tracing::DrawEffects);

    batching_load_buffers(render_distortions);

    SCP_map<batch_buffer_key, primitive_batch_buffer>::iterator bi;

    for ( bi = Batching_buffers.begin(); bi != Batching_buffers.end(); ++bi ) {
        batching_render_buffer(&bi->second);
    }

    gr_clear_states();
}
Пример #8
0
void batch_render_geometry_shader_map_bitmaps(int buffer_handle)
{
	for (SCP_map<int, g_sdr_batch_item>::iterator bi = geometry_shader_map.begin(); bi != geometry_shader_map.end(); ++bi) {

		if ( bi->second.laser )
			continue;

		if ( !bi->second.batch.need_to_render() )
			continue;

		Assert( bi->second.texture >= 0 );
		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, bi->second.alpha);
		bi->second.batch.render_buffer(buffer_handle, bi->second.tmap_flags);
	}
}
Пример #9
0
void batch_render_lasers(int buffer_handle)
{
	for (SCP_map<int, batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {

		if ( !bi->second.laser )
			continue;

		if ( !bi->second.batch.need_to_render() )
			continue;

		Assert( bi->second.texture >= 0 );
		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, 0.99999f);
		if ( buffer_handle >= 0 ) {
			bi->second.batch.render_buffer(buffer_handle, TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
		} else {
			bi->second.batch.render(TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
		}
	}
}
Пример #10
0
void batch_render_distortion_map_bitmaps(int buffer_handle)
{
	for (SCP_map<int,batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {

		if ( bi->second.laser )
			continue;

		if ( !bi->second.batch.need_to_render() )
			continue;

		Assert( bi->second.texture >= 0 );
		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_NONE, GR_BITBLT_MODE_NORMAL, bi->second.alpha);

		if ( buffer_handle >= 0 ) {
			bi->second.batch.render_buffer(buffer_handle, bi->second.tmap_flags);
		} else {
			bi->second.batch.render( bi->second.tmap_flags);
		}
	}
}