/* ================== ClipTriList ================== */ void ClipTriList( const mapTri_t *list, const idPlane &plane, float epsilon, mapTri_t **front, mapTri_t **back ) { const mapTri_t *tri; mapTri_t *newList; idWinding *w, *frontW, *backW; *front = NULL; *back = NULL; for ( tri = list ; tri ; tri = tri->next ) { w = WindingForTri( tri ); w->Split( plane, epsilon, &frontW, &backW ); newList = WindingToTriList( frontW, tri ); *front = MergeTriLists( *front, newList ); newList = WindingToTriList( backW, tri ); *back = MergeTriLists( *back, newList ); delete w; } }
/* =============== AddTriListToArea The triList is appended to the apropriate optimzeGroup_t, creating a new one if needed. The entire list is assumed to come from the same planar primitive =============== */ static void AddTriListToArea( uEntity_t *e, mapTri_t *triList, int planeNum, int areaNum, textureVectors_t *texVec ) { uArea_t *area; optimizeGroup_t *group; int i, j; if ( !triList ) { return; } area = &e->areas[areaNum]; for ( group = area->groups ; group ; group = group->nextGroup ) { if ( group->material == triList->material && group->planeNum == planeNum && group->mergeGroup == triList->mergeGroup ) { // check the texture vectors for ( i = 0 ; i < 2 ; i++ ) { for ( j = 0 ; j < 3 ; j++ ) { if ( idMath::Fabs( texVec->v[i][j] - group->texVec.v[i][j] ) > TEXTURE_VECTOR_EQUAL_EPSILON ) { break; } } if ( j != 3 ) { break; } if ( idMath::Fabs( texVec->v[i][3] - group->texVec.v[i][3] ) > TEXTURE_OFFSET_EQUAL_EPSILON ) { break; } } if ( i == 2 ) { break; // exact match } else { // different texture offsets i = 1; // just for debugger breakpoint } } } if ( !group ) { group = (optimizeGroup_t *)Mem_Alloc( sizeof( *group ) ); memset( group, 0, sizeof( *group ) ); group->planeNum = planeNum; group->mergeGroup = triList->mergeGroup; group->material = triList->material; group->nextGroup = area->groups; group->texVec = *texVec; area->groups = group; } group->triList = MergeTriLists( group->triList, triList ); }
/* ================== FixAreaGroupsTjunctions ================== */ void FixAreaGroupsTjunctions(optimizeGroup_t *groupList) { const mapTri_t *tri; mapTri_t *newList; mapTri_t *fixed; int startCount, endCount; optimizeGroup_t *group; if (dmapGlobals.noTJunc) { return; } if (!groupList) { return; } startCount = CountGroupListTris(groupList); if (dmapGlobals.verbose) { common->Printf("----- FixAreaGroupsTjunctions -----\n"); common->Printf("%6i triangles in\n", startCount); } HashTriangles(groupList); for (group = groupList ; group ; group = group->nextGroup) { // don't touch discrete surfaces if (group->material != NULL && group->material->IsDiscrete()) { continue; } newList = NULL; for (tri = group->triList ; tri ; tri = tri->next) { fixed = FixTriangleAgainstHash(tri); newList = MergeTriLists(newList, fixed); } FreeTriList(group->triList); group->triList = newList; } endCount = CountGroupListTris(groupList); if (dmapGlobals.verbose) { common->Printf("%6i triangles out\n", endCount); } }
// RB begin int FilterMeshesIntoTree_r( idWinding* w, mapTri_t* originalTri, node_t* node ) { idWinding* front, *back; int c; if( !w ) { return 0; } if( node->planenum == PLANENUM_LEAF ) { // add it to the leaf list if( originalTri->material->GetContentFlags() & CONTENTS_AREAPORTAL ) { mapTri_t* list = CopyMapTri( originalTri ); list->next = NULL; node->areaPortalTris = MergeTriLists( node->areaPortalTris, list ); } const MapPolygonMesh* mapMesh = originalTri->originalMapMesh; // classify the leaf by the structural brush if( mapMesh->IsOpaque() ) { node->opaque = true; } delete w; return 1; } // split it by the node plane w->Split( dmapGlobals.mapPlanes[ node->planenum ], ON_EPSILON, &front, &back ); delete w; c = 0; c += FilterMeshesIntoTree_r( front, originalTri, node->children[0] ); c += FilterMeshesIntoTree_r( back, originalTri, node->children[1] ); return c; }
/* ================== FixGlobalTjunctions ================== */ void FixGlobalTjunctions( uEntity_t *e ) { mapTri_t *a; int vert; int i; optimizeGroup_t *group; int areaNum; common->Printf( "----- FixGlobalTjunctions -----\n" ); // clear the hash tables memset( hashVerts, 0, sizeof( hashVerts ) ); numHashVerts = 0; numTotalVerts = 0; // bound all the triangles to determine the bucket size hashBounds.Clear(); for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) { for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) { for ( a = group->triList ; a ; a = a->next ) { hashBounds.AddPoint( a->v[0].xyz ); hashBounds.AddPoint( a->v[1].xyz ); hashBounds.AddPoint( a->v[2].xyz ); } } } // spread the bounds so it will never have a zero size for ( i = 0 ; i < 3 ; i++ ) { hashBounds[0][i] = floor( hashBounds[0][i] - 1 ); hashBounds[1][i] = ceil( hashBounds[1][i] + 1 ); hashIntMins[i] = hashBounds[0][i] * SNAP_FRACTIONS; hashScale[i] = ( hashBounds[1][i] - hashBounds[0][i] ) / HASH_BINS; hashIntScale[i] = hashScale[i] * SNAP_FRACTIONS; if ( hashIntScale[i] < 1 ) { hashIntScale[i] = 1; } } // add all the points to the hash buckets for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) { for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) { // don't touch discrete surfaces if ( group->material != NULL && group->material->IsDiscrete() ) { continue; } for ( a = group->triList ; a ; a = a->next ) { for ( vert = 0 ; vert < 3 ; vert++ ) { a->hashVert[vert] = GetHashVert( a->v[vert].xyz ); } } } } // add all the func_static model vertexes to the hash buckets // optionally inline some of the func_static models if ( dmapGlobals.entityNum == 0 ) { for ( int eNum = 1 ; eNum < dmapGlobals.num_entities ; eNum++ ) { uEntity_t *entity = &dmapGlobals.uEntities[eNum]; const char *className = entity->mapEntity->epairs.GetString( "classname" ); if ( idStr::Icmp( className, "func_static" ) ) { continue; } const char *modelName = entity->mapEntity->epairs.GetString( "model" ); if ( !modelName ) { continue; } if ( !strstr( modelName, ".lwo" ) && !strstr( modelName, ".ase" ) && !strstr( modelName, ".ma" ) ) { continue; } idRenderModel *model = renderModelManager->FindModel( modelName ); // common->Printf( "adding T junction verts for %s.\n", entity->mapEntity->epairs.GetString( "name" ) ); idMat3 axis; // get the rotation matrix in either full form, or single angle form if ( !entity->mapEntity->epairs.GetMatrix( "rotation", "1 0 0 0 1 0 0 0 1", axis ) ) { float angle = entity->mapEntity->epairs.GetFloat( "angle" ); if ( angle != 0.0f ) { axis = idAngles( 0.0f, angle, 0.0f ).ToMat3(); } else { axis.Identity(); } } idVec3 origin = entity->mapEntity->epairs.GetVector( "origin" ); for ( i = 0 ; i < model->NumSurfaces() ; i++ ) { const modelSurface_t *surface = model->Surface( i ); const srfTriangles_t *tri = surface->geometry; mapTri_t mapTri; memset( &mapTri, 0, sizeof( mapTri ) ); mapTri.material = surface->shader; // don't let discretes (autosprites, etc) merge together if ( mapTri.material->IsDiscrete() ) { mapTri.mergeGroup = (void *)surface; } for ( int j = 0 ; j < tri->numVerts ; j += 3 ) { idVec3 v = tri->verts[j].xyz * axis + origin; GetHashVert( v ); } } } } // now fix each area for ( areaNum = 0 ; areaNum < e->numAreas ; areaNum++ ) { for ( group = e->areas[areaNum].groups ; group ; group = group->nextGroup ) { // don't touch discrete surfaces if ( group->material != NULL && group->material->IsDiscrete() ) { continue; } mapTri_t *newList = NULL; for ( mapTri_t *tri = group->triList ; tri ; tri = tri->next ) { mapTri_t *fixed = FixTriangleAgainstHash( tri ); newList = MergeTriLists( newList, fixed ); } FreeTriList( group->triList ); group->triList = newList; } } // done FreeTJunctionHash(); }
/* ==================== WriteOutputSurfaces ==================== */ static void WriteOutputSurfaces( int entityNum, int areaNum ) { mapTri_t *ambient, *copy; int surfaceNum; int numSurfaces; idMapEntity *entity; uArea_t *area; optimizeGroup_t *group, *groupStep; int i; // , j; // int col; srfTriangles_t *uTri; // mapTri_t *tri; typedef struct interactionTris_s { struct interactionTris_s *next; mapTri_t *triList; mapLight_t *light; } interactionTris_t; interactionTris_t *interactions, *checkInter; //, *nextInter; area = &dmapGlobals.uEntities[entityNum].areas[areaNum]; entity = dmapGlobals.uEntities[entityNum].mapEntity; numSurfaces = CountUniqueShaders( area->groups ); if ( entityNum == 0 ) { procFile->WriteFloatString( "model { /* name = */ \"_area%i\" /* numSurfaces = */ %i\n\n", areaNum, numSurfaces ); } else { const char *name; entity->epairs.GetString( "name", "", &name ); if ( !name[0] ) { common->Error( "Entity %i has surfaces, but no name key", entityNum ); } procFile->WriteFloatString( "model { /* name = */ \"%s\" /* numSurfaces = */ %i\n\n", name, numSurfaces ); } surfaceNum = 0; for ( group = area->groups ; group ; group = group->nextGroup ) { if ( group->surfaceEmited ) { continue; } // combine all groups compatible with this one // usually several optimizeGroup_t can be combined into a single // surface, even though they couldn't be merged together to save // vertexes because they had different planes, texture coordinates, or lights. // Different mergeGroups will stay in separate surfaces. ambient = NULL; // each light that illuminates any of the groups in the surface will // get its own list of indexes out of the original surface interactions = NULL; for ( groupStep = group ; groupStep ; groupStep = groupStep->nextGroup ) { if ( groupStep->surfaceEmited ) { continue; } if ( !GroupsAreSurfaceCompatible( group, groupStep ) ) { continue; } // copy it out to the ambient list copy = CopyTriList( groupStep->triList ); ambient = MergeTriLists( ambient, copy ); groupStep->surfaceEmited = true; // duplicate it into an interaction for each groupLight for ( i = 0 ; i < groupStep->numGroupLights ; i++ ) { for ( checkInter = interactions ; checkInter ; checkInter = checkInter->next ) { if ( checkInter->light == groupStep->groupLights[i] ) { break; } } if ( !checkInter ) { // create a new interaction checkInter = (interactionTris_t *)Mem_ClearedAlloc( sizeof( *checkInter ) ); checkInter->light = groupStep->groupLights[i]; checkInter->next = interactions; interactions = checkInter; } copy = CopyTriList( groupStep->triList ); checkInter->triList = MergeTriLists( checkInter->triList, copy ); } } if ( !ambient ) { continue; } if ( surfaceNum >= numSurfaces ) { common->Error( "WriteOutputSurfaces: surfaceNum >= numSurfaces" ); } procFile->WriteFloatString( "/* surface %i */ { ", surfaceNum ); surfaceNum++; procFile->WriteFloatString( "\"%s\" ", ambient->material->GetName() ); uTri = ShareMapTriVerts( ambient ); FreeTriList( ambient ); CleanupUTriangles( uTri ); WriteUTriangles( uTri ); R_FreeStaticTriSurf( uTri ); procFile->WriteFloatString( "}\n\n" ); } procFile->WriteFloatString( "}\n\n" ); }
/* ==================== CarveGroupsByLight Divide each group into an inside group and an outside group, based on which fragments are illuminated by the light's beam tree ==================== */ static void CarveGroupsByLight( uEntity_t *e, mapLight_t *light ) { int i; optimizeGroup_t *group, *newGroup, *carvedGroups, *nextGroup; mapTri_t *tri, *inside, *outside; uArea_t *area; for ( i = 0 ; i < e->numAreas ; i++ ) { area = &e->areas[i]; carvedGroups = NULL; // we will be either freeing or reassigning the groups as we go for ( group = area->groups ; group ; group = nextGroup ) { nextGroup = group->nextGroup; // if the surface doesn't get lit, don't carve it up if ( ( light->def.lightShader->IsFogLight() && !group->material->ReceivesFog() ) || ( !light->def.lightShader->IsFogLight() && !group->material->ReceivesLighting() ) || !group->bounds.IntersectsBounds( light->def.frustumTris->bounds ) ) { group->nextGroup = carvedGroups; carvedGroups = group; continue; } if ( group->numGroupLights == MAX_GROUP_LIGHTS ) { common->Error( "MAX_GROUP_LIGHTS around %f %f %f", group->triList->v[0].xyz[0], group->triList->v[0].xyz[1], group->triList->v[0].xyz[2] ); } // if the group doesn't face the light, // it won't get carved at all if ( !light->def.lightShader->LightEffectsBackSides() && !group->material->ReceivesLightingOnBackSides() && dmapGlobals.mapPlanes[ group->planeNum ].Distance( light->def.parms.origin ) <= 0 ) { group->nextGroup = carvedGroups; carvedGroups = group; continue; } // split into lists for hit-by-light, and not-hit-by-light inside = NULL; outside = NULL; for ( tri = group->triList ; tri ; tri = tri->next ) { mapTri_t *in, *out; ClipTriByLight( light, tri, &in, &out ); inside = MergeTriLists( inside, in ); outside = MergeTriLists( outside, out ); } if ( inside ) { newGroup = (optimizeGroup_t *)Mem_Alloc( sizeof( *newGroup ) ); *newGroup = *group; newGroup->groupLights[newGroup->numGroupLights] = light; newGroup->numGroupLights++; newGroup->triList = inside; newGroup->nextGroup = carvedGroups; carvedGroups = newGroup; } if ( outside ) { newGroup = (optimizeGroup_t *)Mem_Alloc( sizeof( *newGroup ) ); *newGroup = *group; newGroup->triList = outside; newGroup->nextGroup = carvedGroups; carvedGroups = newGroup; } // free the original group->nextGroup = NULL; FreeOptimizeGroupList( group ); } // replace this area's group list with the new one area->groups = carvedGroups; } }
/* ==================== BuildLightShadows Build the beam tree and shadow volume surface for a light ==================== */ static void BuildLightShadows( uEntity_t *e, mapLight_t *light ) { int i; optimizeGroup_t *group; mapTri_t *tri; mapTri_t *shadowers; optimizeGroup_t *shadowerGroups; idVec3 lightOrigin; bool hasPerforatedSurface = false; // // build a group list of all the triangles that will contribute to // the optimized shadow volume, leaving the original triangles alone // // shadowers will contain all the triangles that will contribute to the // shadow volume shadowerGroups = NULL; lightOrigin = light->def.globalLightOrigin; // if the light is no-shadows, don't add any surfaces // to the beam tree at all if ( !light->def.parms.noShadows && light->def.lightShader->LightCastsShadows() ) { for ( i = 0 ; i < e->numAreas ; i++ ) { for ( group = e->areas[i].groups ; group ; group = group->nextGroup ) { // if the surface doesn't cast shadows, skip it if ( !group->material->SurfaceCastsShadow() ) { continue; } // if the group doesn't face away from the light, it // won't contribute to the shadow volume if ( dmapGlobals.mapPlanes[ group->planeNum ].Distance( lightOrigin ) > 0 ) { continue; } // if the group bounds doesn't intersect the light bounds, // skip it if ( !group->bounds.IntersectsBounds( light->def.frustumTris->bounds ) ) { continue; } // build up a list of the triangle fragments inside the // light frustum shadowers = NULL; for ( tri = group->triList ; tri ; tri = tri->next ) { mapTri_t *in, *out; // clip it to the light frustum ClipTriByLight( light, tri, &in, &out ); FreeTriList( out ); shadowers = MergeTriLists( shadowers, in ); } // if we didn't get any out of this group, we don't // need to create a new group in the shadower list if ( !shadowers ) { continue; } // find a group in shadowerGroups to add these to // we will ignore everything but planenum, and we // can merge across areas optimizeGroup_t *check; for ( check = shadowerGroups ; check ; check = check->nextGroup ) { if ( check->planeNum == group->planeNum ) { break; } } if ( !check ) { check = (optimizeGroup_t *)Mem_Alloc( sizeof( *check ) ); *check = *group; check->triList = NULL; check->nextGroup = shadowerGroups; shadowerGroups = check; } // if any surface is a shadow-casting perforated or translucent surface, we // can't use the face removal optimizations because we can see through // some of the faces if ( group->material->Coverage() != MC_OPAQUE ) { hasPerforatedSurface = true; } check->triList = MergeTriLists( check->triList, shadowers ); } } } // take the shadower group list and create a beam tree and shadow volume light->shadowTris = CreateLightShadow( shadowerGroups, light ); if ( light->shadowTris && hasPerforatedSurface ) { // can't ever remove front faces, because we can see through some of them light->shadowTris->numShadowIndexesNoCaps = light->shadowTris->numShadowIndexesNoFrontCaps = light->shadowTris->numIndexes; } // we don't need the original shadower triangles for anything else FreeOptimizeGroupList( shadowerGroups ); }
/* ================= ClipTriByLight Carves a triangle by the frustom planes of a light, producing a (possibly empty) list of triangles on the inside and outside. The original triangle is not modified. If no clipping is required, the result will be a copy of the original. If clipping was required, the outside fragments will be planar clips, which will benefit from re-optimization. ================= */ static void ClipTriByLight( const mapLight_t *light, const mapTri_t *tri, mapTri_t **in, mapTri_t **out ) { idWinding *inside, *oldInside; idWinding *outside[6]; bool hasOutside; int i; *in = NULL; *out = NULL; // clip this winding to the light inside = WindingForTri( tri ); hasOutside = false; for ( i = 0 ; i < 6 ; i++ ) { oldInside = inside; if ( oldInside ) { oldInside->Split( light->def.frustum[i], 0, &outside[i], &inside ); delete oldInside; } else { outside[i] = NULL; } if ( outside[i] ) { hasOutside = true; } } if ( !inside ) { // the entire winding is outside this light // free the clipped fragments for ( i = 0 ; i < 6 ; i++ ) { if ( outside[i] ) { delete outside[i]; } } *out = CopyMapTri( tri ); (*out)->next = NULL; return; } if ( !hasOutside ) { // the entire winding is inside this light // free the inside copy delete inside; *in = CopyMapTri( tri ); (*in)->next = NULL; return; } // the winding is split *in = WindingToTriList( inside, tri ); delete inside; // combine all the outside fragments for ( i = 0 ; i < 6 ; i++ ) { if ( outside[i] ) { mapTri_t *list; list = WindingToTriList( outside[i], tri ); delete outside[i]; *out = MergeTriLists( *out, list ); } } }