/*
====================
OptimizeOptList
====================
*/
static	void OptimizeOptList( optimizeGroup_t *opt ) {
	optimizeGroup_t	*oldNext;

	// fix the t junctions among this single list
	// so we can match edges
	// can we avoid doing this if colinear vertexes break edges?
	oldNext = opt->nextGroup;
	opt->nextGroup = NULL;
	FixAreaGroupsTjunctions( opt );
	opt->nextGroup = oldNext;

	// create the 2D vectors
	dmapGlobals.mapPlanes[opt->planeNum].Normal().NormalVectors( opt->axis[0], opt->axis[1] );

	AddOriginalEdges( opt );
	SplitOriginalEdgesAtCrossings( opt );

#if 0
	// seperate any discontinuous areas for individual optimization
	// to reduce the scope of the problem
	SeparateIslands( opt );
#else
	DontSeparateIslands( opt );
#endif

	// now free the hash verts
	FreeTJunctionHash();

	// free the original list and use the new one
	FreeTriList( opt->triList );
	opt->triList = opt->regeneratedTris;
	opt->regeneratedTris = NULL;
}
Exemple #2
0
/*
================
FreeDMapFile
================
*/
void FreeDMapFile( void )
{
    int		i, j;

    FreeBrush( buildBrush );
    buildBrush = NULL;

    // free the entities and brushes
    for( i = 0; i < dmapGlobals.num_entities; i++ )
    {
        uEntity_t	*ent;
        primitive_t	*prim, *nextPrim;

        ent = &dmapGlobals.uEntities[i];

        FreeTree( ent->tree );

        // free primitives
        for( prim = ent->primitives; prim; prim = nextPrim )
        {
            nextPrim = prim->next;

            if( prim->brush )
            {
                FreeBrush( prim->brush );
            }

            if( prim->tris )
            {
                FreeTriList( prim->tris );
            }
            Mem_Free( prim );
        }

        // free area surfaces
        if( ent->areas )
        {
            for( j = 0; j < ent->numAreas; j++ )
            {
                uArea_t	*area;

                area = &ent->areas[j];
                FreeOptimizeGroupList( area->groups );

            }
            Mem_Free( ent->areas );
        }
    }
    Mem_Free( dmapGlobals.uEntities );

    dmapGlobals.num_entities = 0;

    // free the map lights
    for( i = 0; i < dmapGlobals.mapLights.Num(); i++ )
    {
        R_FreeLightDefDerivedData( &dmapGlobals.mapLights[i]->def );
    }
    dmapGlobals.mapLights.DeleteContents( true );
}
/*
================
FreeOptimizeGroupList
================
*/
void FreeOptimizeGroupList( optimizeGroup_t *groups ) {
	optimizeGroup_t	*next;
	for( ; groups ; groups = next ) {
		next = groups->nextGroup;
		FreeTriList( groups->triList );
		Mem_Free( groups );
	}
}
/*
==================
FixAreaGroupsTjunctions
==================
*/
void	FixAreaGroupsTjunctions(optimizeGroup_t *groupList)
{
	const mapTri_t	*tri;
	mapTri_t		*newList;
	mapTri_t		*fixed;
	int				startCount, endCount;
	optimizeGroup_t	*group;

	if (dmapGlobals.noTJunc) {
		return;
	}

	if (!groupList) {
		return;
	}

	startCount = CountGroupListTris(groupList);

	if (dmapGlobals.verbose) {
		common->Printf("----- FixAreaGroupsTjunctions -----\n");
		common->Printf("%6i triangles in\n", startCount);
	}

	HashTriangles(groupList);

	for (group = groupList ; group ; group = group->nextGroup) {
		// don't touch discrete surfaces
		if (group->material != NULL && group->material->IsDiscrete()) {
			continue;
		}

		newList = NULL;

		for (tri = group->triList ; tri ; tri = tri->next) {
			fixed = FixTriangleAgainstHash(tri);
			newList = MergeTriLists(newList, fixed);
		}

		FreeTriList(group->triList);
		group->triList = newList;
	}

	endCount = CountGroupListTris(groupList);

	if (dmapGlobals.verbose) {
		common->Printf("%6i triangles out\n", endCount);
	}
}
/*
==================
FixGlobalTjunctions
==================
*/
void	FixGlobalTjunctions( uEntity_t *e ) {
	mapTri_t	*a;
	int			vert;
	int			i;
	optimizeGroup_t	*group;
	int			areaNum;

	common->Printf( "----- FixGlobalTjunctions -----\n" );

	// clear the hash tables
	memset( hashVerts, 0, sizeof( hashVerts ) );

	numHashVerts = 0;
	numTotalVerts = 0;

	// bound all the triangles to determine the bucket size
	hashBounds.Clear();
	for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) {
		for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) {
			for ( a = group->triList ; a ; a = a->next ) {
				hashBounds.AddPoint( a->v[0].xyz );
				hashBounds.AddPoint( a->v[1].xyz );
				hashBounds.AddPoint( a->v[2].xyz );
			}
		}
	}

	// spread the bounds so it will never have a zero size
	for ( i = 0 ; i < 3 ; i++ ) {
		hashBounds[0][i] = floor( hashBounds[0][i] - 1 );
		hashBounds[1][i] = ceil( hashBounds[1][i] + 1 );
		hashIntMins[i] = hashBounds[0][i] * SNAP_FRACTIONS;

		hashScale[i] = ( hashBounds[1][i] - hashBounds[0][i] ) / HASH_BINS;
		hashIntScale[i] = hashScale[i] * SNAP_FRACTIONS;
		if ( hashIntScale[i] < 1 ) {
			hashIntScale[i] = 1;
		}
	}

	// add all the points to the hash buckets
	for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) {
		for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) {
			// don't touch discrete surfaces
			if ( group->material != NULL && group->material->IsDiscrete() ) {
				continue;
			}

			for ( a = group->triList ; a ; a = a->next ) {
				for ( vert = 0 ; vert < 3 ; vert++ ) {
					a->hashVert[vert] = GetHashVert( a->v[vert].xyz );
				}
			}
		}
	}

	// add all the func_static model vertexes to the hash buckets
	// optionally inline some of the func_static models
	if ( dmapGlobals.entityNum == 0 ) {
		for ( int eNum = 1 ; eNum < dmapGlobals.num_entities ; eNum++ ) {
			uEntity_t *entity = &dmapGlobals.uEntities[eNum];
			const char *className = entity->mapEntity->epairs.GetString( "classname" );
			if ( idStr::Icmp( className, "func_static" ) ) {
				continue;
			}
			const char *modelName = entity->mapEntity->epairs.GetString( "model" );
			if ( !modelName ) {
				continue;
			}
			if ( !strstr( modelName, ".lwo" ) && !strstr( modelName, ".ase" ) && !strstr( modelName, ".ma" ) ) {
				continue;
			}

			idRenderModel	*model = renderModelManager->FindModel( modelName );

//			common->Printf( "adding T junction verts for %s.\n", entity->mapEntity->epairs.GetString( "name" ) );

			idMat3	axis;
			// get the rotation matrix in either full form, or single angle form
			if ( !entity->mapEntity->epairs.GetMatrix( "rotation", "1 0 0 0 1 0 0 0 1", axis ) ) {
				float angle = entity->mapEntity->epairs.GetFloat( "angle" );
				if ( angle != 0.0f ) {
					axis = idAngles( 0.0f, angle, 0.0f ).ToMat3();
				} else {
					axis.Identity();
				}
			}

			idVec3	origin = entity->mapEntity->epairs.GetVector( "origin" );

			for ( i = 0 ; i < model->NumSurfaces() ; i++ ) {
				const modelSurface_t *surface = model->Surface( i );
				const srfTriangles_t *tri = surface->geometry;

				mapTri_t	mapTri;
				memset( &mapTri, 0, sizeof( mapTri ) );
				mapTri.material = surface->shader;
				// don't let discretes (autosprites, etc) merge together
				if ( mapTri.material->IsDiscrete() ) {
					mapTri.mergeGroup = (void *)surface;
				}
				for ( int j = 0 ; j < tri->numVerts ; j += 3 ) {
					idVec3 v = tri->verts[j].xyz * axis + origin;
					GetHashVert( v );
				}
			}
		}
	}



	// now fix each area
	for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) {
		for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) {
			// don't touch discrete surfaces
			if ( group->material != NULL && group->material->IsDiscrete() ) {
				continue;
			}

			mapTri_t *newList = NULL;
			for ( mapTri_t *tri = group->triList ; tri ; tri = tri->next ) {
				mapTri_t *fixed = FixTriangleAgainstHash( tri );
				newList = MergeTriLists( newList, fixed );
			}
			FreeTriList( group->triList );
			group->triList = newList;
		}
	}


	// done
	FreeTJunctionHash();
}
/*
==================
FixTriangleAgainstHashVert

Returns a list of two new mapTri if the hashVert is
on an edge of the given mapTri, otherwise returns NULL.
==================
*/
static mapTri_t *FixTriangleAgainstHashVert( const mapTri_t *a, const hashVert_t *hv ) {
	int			i;
	const idDrawVert	*v1, *v2;
	idDrawVert	split;
	idVec3		dir;
	float		len;
	float		frac;
	mapTri_t	*new1, *new2;
	idVec3		temp;
	float		d, off;
	const idVec3 *v;
	idPlane		plane1, plane2;

	v = &hv->v;

	// if the triangle already has this hashVert as a vert,
	// it can't be split by it
	if ( a->hashVert[0] == hv || a->hashVert[1] == hv || a->hashVert[2] == hv ) {
		return NULL;
	}

	split.Clear();

	// we probably should find the edge that the vertex is closest to.
	// it is possible to be < 1 unit away from multiple
	// edges, but we only want to split by one of them
	for ( i = 0 ; i < 3 ; i++ ) {
		v1 = &a->v[i];
		v2 = &a->v[(i+1)%3];
		VectorSubtract( v2->xyz, v1->xyz, dir );
		len = dir.Normalize();

		// if it is close to one of the edge vertexes, skip it
		VectorSubtract( *v, v1->xyz, temp );
		d = DotProduct( temp, dir );
		if ( d <= 0 || d >= len ) {
			continue;
		}

		// make sure it is on the line
		VectorMA( v1->xyz, d, dir, temp );
		VectorSubtract( temp, *v, temp );
		off = temp.Length();
		if ( off <= -COLINEAR_EPSILON || off >= COLINEAR_EPSILON ) {
			continue;
		}

		// take the x/y/z from the splitter,
		// but interpolate everything else from the original tri
		VectorCopy( *v, split.xyz );
		frac = d / len;
		split.st[0] = v1->st[0] + frac * ( v2->st[0] - v1->st[0] );
		split.st[1] = v1->st[1] + frac * ( v2->st[1] - v1->st[1] );
		split.normal[0] = v1->normal[0] + frac * ( v2->normal[0] - v1->normal[0] );
		split.normal[1] = v1->normal[1] + frac * ( v2->normal[1] - v1->normal[1] );
		split.normal[2] = v1->normal[2] + frac * ( v2->normal[2] - v1->normal[2] );
		split.normal.Normalize();

		// split the tri
		new1 = CopyMapTri( a );
		new1->v[(i+1)%3] = split;
		new1->hashVert[(i+1)%3] = hv;
		new1->next = NULL;

		new2 = CopyMapTri( a );
		new2->v[i] = split;
		new2->hashVert[i] = hv;
		new2->next = new1;

		plane1.FromPoints( new1->hashVert[0]->v, new1->hashVert[1]->v, new1->hashVert[2]->v );
		plane2.FromPoints( new2->hashVert[0]->v, new2->hashVert[1]->v, new2->hashVert[2]->v );

		d = DotProduct( plane1, plane2 );

		// if the two split triangle's normals don't face the same way,
		// it should not be split
		if ( d <= 0 ) {
			FreeTriList( new2 );
			continue;
		}

		return new2;
	}


	return NULL;
}
Exemple #7
0
/*
====================
WriteOutputSurfaces
====================
*/
static void WriteOutputSurfaces( int entityNum, int areaNum ) {
	mapTri_t	*ambient, *copy;
	int			surfaceNum;
	int			numSurfaces;
	idMapEntity	*entity;
	uArea_t		*area;
	optimizeGroup_t	*group, *groupStep;
	int			i; // , j;
//	int			col;
	srfTriangles_t	*uTri;
//	mapTri_t	*tri;
typedef struct interactionTris_s {
	struct interactionTris_s	*next;
	mapTri_t	*triList;
	mapLight_t	*light;
} interactionTris_t;

	interactionTris_t	*interactions, *checkInter; //, *nextInter;


	area = &dmapGlobals.uEntities[entityNum].areas[areaNum];
	entity = dmapGlobals.uEntities[entityNum].mapEntity;

	numSurfaces = CountUniqueShaders( area->groups );


	if ( entityNum == 0 ) {
		procFile->WriteFloatString( "model { /* name = */ \"_area%i\" /* numSurfaces = */ %i\n\n", 
			areaNum, numSurfaces );
	} else {
		const char *name;

		entity->epairs.GetString( "name", "", &name );
		if ( !name[0] ) {
			common->Error( "Entity %i has surfaces, but no name key", entityNum );
		}
		procFile->WriteFloatString( "model { /* name = */ \"%s\" /* numSurfaces = */ %i\n\n", 
			name, numSurfaces );
	}

	surfaceNum = 0;
	for ( group = area->groups ; group ; group = group->nextGroup ) {
		if ( group->surfaceEmited ) {
			continue;
		}

		// combine all groups compatible with this one
		// usually several optimizeGroup_t can be combined into a single
		// surface, even though they couldn't be merged together to save
		// vertexes because they had different planes, texture coordinates, or lights.
		// Different mergeGroups will stay in separate surfaces.
		ambient = NULL;

		// each light that illuminates any of the groups in the surface will
		// get its own list of indexes out of the original surface
		interactions = NULL;

		for ( groupStep = group ; groupStep ; groupStep = groupStep->nextGroup ) {
			if ( groupStep->surfaceEmited ) {
				continue;
			}
			if ( !GroupsAreSurfaceCompatible( group, groupStep ) ) {
				continue;
			}

			// copy it out to the ambient list
			copy = CopyTriList( groupStep->triList );
			ambient = MergeTriLists( ambient, copy );
			groupStep->surfaceEmited = true;

			// duplicate it into an interaction for each groupLight
			for ( i = 0 ; i < groupStep->numGroupLights ; i++ ) {
				for ( checkInter = interactions ; checkInter ; checkInter = checkInter->next ) {
					if ( checkInter->light == groupStep->groupLights[i] ) {
						break;
					}
				}
				if ( !checkInter ) {
					// create a new interaction
					checkInter = (interactionTris_t *)Mem_ClearedAlloc( sizeof( *checkInter ) );
					checkInter->light = groupStep->groupLights[i];
					checkInter->next = interactions;
					interactions = checkInter;
				}
				copy = CopyTriList( groupStep->triList );
				checkInter->triList = MergeTriLists( checkInter->triList, copy );
			}
		}

		if ( !ambient ) {
			continue;
		}

		if ( surfaceNum >= numSurfaces ) {
			common->Error( "WriteOutputSurfaces: surfaceNum >= numSurfaces" );
		}

		procFile->WriteFloatString( "/* surface %i */ { ", surfaceNum );
		surfaceNum++;
		procFile->WriteFloatString( "\"%s\" ", ambient->material->GetName() );

		uTri = ShareMapTriVerts( ambient );
		FreeTriList( ambient );

		CleanupUTriangles( uTri );
		WriteUTriangles( uTri );
		R_FreeStaticTriSurf( uTri );

		procFile->WriteFloatString( "}\n\n" );
	}

	procFile->WriteFloatString( "}\n\n" );
}
/*
====================
BuildLightShadows

Build the beam tree and shadow volume surface for a light
====================
*/
static void BuildLightShadows( uEntity_t *e, mapLight_t *light ) {
	int			i;
	optimizeGroup_t	*group;
	mapTri_t	*tri;
	mapTri_t	*shadowers;
	optimizeGroup_t		*shadowerGroups;
	idVec3		lightOrigin;
	bool		hasPerforatedSurface = false;

	//
	// build a group list of all the triangles that will contribute to
	// the optimized shadow volume, leaving the original triangles alone
	//


	// shadowers will contain all the triangles that will contribute to the
	// shadow volume
	shadowerGroups = NULL;
	lightOrigin = light->def.globalLightOrigin;

	// if the light is no-shadows, don't add any surfaces
	// to the beam tree at all
	if ( !light->def.parms.noShadows
		&& light->def.lightShader->LightCastsShadows() ) {
		for ( i = 0 ; i < e->numAreas ; i++ ) {
			for ( group = e->areas[i].groups ; group ; group = group->nextGroup ) {
				// if the surface doesn't cast shadows, skip it
				if ( !group->material->SurfaceCastsShadow() ) {
					continue;
				}

				// if the group doesn't face away from the light, it
				// won't contribute to the shadow volume
				if ( dmapGlobals.mapPlanes[ group->planeNum ].Distance( lightOrigin ) > 0 ) {
					continue;
				}

				// if the group bounds doesn't intersect the light bounds,
				// skip it
				if ( !group->bounds.IntersectsBounds( light->def.frustumTris->bounds ) ) {
					continue;
				}

				// build up a list of the triangle fragments inside the
				// light frustum
				shadowers = NULL;
				for ( tri = group->triList ; tri ; tri = tri->next ) {
					mapTri_t	*in, *out;

					// clip it to the light frustum
					ClipTriByLight( light, tri, &in, &out );
					FreeTriList( out );
					shadowers = MergeTriLists( shadowers, in );
				}

				// if we didn't get any out of this group, we don't
				// need to create a new group in the shadower list
				if ( !shadowers ) {
					continue;
				}

				// find a group in shadowerGroups to add these to
				// we will ignore everything but planenum, and we
				// can merge across areas
				optimizeGroup_t	*check;

				for ( check = shadowerGroups ; check ; check = check->nextGroup ) {
					if ( check->planeNum == group->planeNum ) {
						break;
					}
				}
				if ( !check ) {
					check = (optimizeGroup_t *)Mem_Alloc( sizeof( *check ) );
					*check = *group;
					check->triList = NULL;
					check->nextGroup = shadowerGroups;
					shadowerGroups = check;
				}

				// if any surface is a shadow-casting perforated or translucent surface, we
				// can't use the face removal optimizations because we can see through
				// some of the faces
				if ( group->material->Coverage() != MC_OPAQUE ) {
					hasPerforatedSurface = true;
				}

				check->triList = MergeTriLists( check->triList, shadowers );
			}
		}
	}

	// take the shadower group list and create a beam tree and shadow volume
	light->shadowTris = CreateLightShadow( shadowerGroups, light );

	if ( light->shadowTris && hasPerforatedSurface ) {
		// can't ever remove front faces, because we can see through some of them
		light->shadowTris->numShadowIndexesNoCaps = light->shadowTris->numShadowIndexesNoFrontCaps =
			light->shadowTris->numIndexes;
	}

	// we don't need the original shadower triangles for anything else
	FreeOptimizeGroupList( shadowerGroups );
}