Пример #1
0
eclass_t* GetCachedModel(entity_t *pEntity, const char *pName, idVec3 &vMin, idVec3 &vMax)
{
	eclass_t *e = NULL;
	if (pName == NULL || strlen(pName) == 0) {
		return NULL;
	}

	for (e = g_md3Cache; e ; e = e->next) {
		if (!strcmp (pName, e->name)) {
			pEntity->md3Class = e;
			VectorCopy(e->mins, vMin);
			VectorCopy(e->maxs, vMax);
			return e;
	    }
	}

	e = (eclass_t*)Mem_ClearedAlloc(sizeof(*e));
	memset (e, 0, sizeof(*e));
	e->name = Mem_CopyString( pName );
	e->color[0] = e->color[2] = 0.85f;
	if (LoadModel(pName, e, vMin, vMax, NULL)) {
		EClass_InsertSortedList(g_md3Cache, e);
		VectorCopy(vMin, e->mins);
		VectorCopy(vMax, e->maxs);
		pEntity->md3Class = e;
		return e;
	}
	return NULL;
}
Пример #2
0
/*
================
idAASBuild::ParseProcNodes
================
*/
void idAASBuild::ParseProcNodes( idLexer* src )
{
	int i;
	
	src->ExpectTokenString( "{" );
	
	idAASBuild::numProcNodes = src->ParseInt();
	if( idAASBuild::numProcNodes < 0 )
	{
		src->Error( "idAASBuild::ParseProcNodes: bad numProcNodes" );
	}
	idAASBuild::procNodes = ( aasProcNode_t* )Mem_ClearedAlloc( idAASBuild::numProcNodes * sizeof( aasProcNode_t ), TAG_TOOLS );
	
	for( i = 0; i < idAASBuild::numProcNodes; i++ )
	{
		aasProcNode_t* node;
		
		node = &( idAASBuild::procNodes[i] );
		
		src->Parse1DMatrix( 4, node->plane.ToFloatPtr() );
		node->children[0] = src->ParseInt();
		node->children[1] = src->ParseInt();
	}
	
	src->ExpectTokenString( "}" );
}
Пример #3
0
/*
========================
PC_BeginNamedEvent

FIXME: this is not thread safe on the PC
========================
*/
void PC_BeginNamedEvent( const char *szName, ... ) {
#if 0
	if ( !r_pix.GetBool() ) {
		return;
	}
	if ( !pixEvents ) {
		// lazy allocation to not waste memory
		pixEvents = (pixEvent_t *)Mem_ClearedAlloc( sizeof( *pixEvents ) * MAX_PIX_EVENTS, TAG_CRAP );
	}
	if ( numPixEvents >= MAX_PIX_EVENTS ) {
		idLib::FatalError( "PC_BeginNamedEvent: event overflow" );
	}
	if ( ++numPixLevels > 1 ) {
		return;	// only get top level timing information
	}
	if ( !glGetQueryObjectui64vEXT ) {
		return;
	}

	GL_CheckErrors();
	if ( timeQueryIds[0] == 0 ) {
		glGenQueries( MAX_PIX_EVENTS, timeQueryIds );
	}
	glFinish();
	glBeginQuery( GL_TIME_ELAPSED_EXT, timeQueryIds[numPixEvents] );
	GL_CheckErrors();

	pixEvent_t *ev = &pixEvents[numPixEvents++];
	strncpy( ev->name, szName, sizeof( ev->name ) - 1 );
	ev->cpuTime = Sys_Microseconds();
#endif
}
Пример #4
0
/*
==============
LoadFile
==============
*/
int LoadFile (const char *filename, void **bufferptr)
{
	FILE	*f;
	int    length;
	void    *buffer;

	*bufferptr = NULL;

	if ( filename == NULL || strlen(filename) == 0 ) {
		return -1;
	}

	f = fopen( filename, "rb" );
	if ( !f ) {
		return -1;
	}
	length = Q_filelength( f );
	buffer = Mem_ClearedAlloc( length+1, TAG_TOOLS );
	((char *)buffer)[length] = 0;
	if ( (int)fread( buffer, 1, length, f ) != length ) {
		Error( "File read failure" );
	}
	fclose( f );

	*bufferptr = buffer;
	return length;
}
Пример #5
0
static void BotInitInfoEntities() {
	BotFreeInfoEntities();

	int numlocations = 0;
	int numcampspots = 0;
	for ( int ent = AAS_NextBSPEntity( 0 ); ent; ent = AAS_NextBSPEntity( ent ) ) {
		char classname[ MAX_EPAIRKEY ];
		if ( !AAS_ValueForBSPEpairKey( ent, "classname", classname, MAX_EPAIRKEY ) ) {
			continue;
		}

		//map locations
		if ( !String::Cmp( classname, "target_location" ) ) {
			maplocation_t* ml = ( maplocation_t* )Mem_ClearedAlloc( sizeof ( maplocation_t ) );
			AAS_VectorForBSPEpairKey( ent, "origin", ml->origin );
			AAS_ValueForBSPEpairKey( ent, "message", ml->name, sizeof ( ml->name ) );
			ml->areanum = AAS_PointAreaNum( ml->origin );
			ml->next = maplocations;
			maplocations = ml;
			numlocations++;
		}
		//camp spots
		else if ( !String::Cmp( classname, "info_camp" ) ) {
			campspot_t* cs = ( campspot_t* )Mem_ClearedAlloc( sizeof ( campspot_t ) );
			AAS_VectorForBSPEpairKey( ent, "origin", cs->origin );
			AAS_ValueForBSPEpairKey( ent, "message", cs->name, sizeof ( cs->name ) );
			AAS_FloatForBSPEpairKey( ent, "range", &cs->range );
			AAS_FloatForBSPEpairKey( ent, "weight", &cs->weight );
			AAS_FloatForBSPEpairKey( ent, "wait", &cs->wait );
			AAS_FloatForBSPEpairKey( ent, "random", &cs->random );
			cs->areanum = AAS_PointAreaNum( cs->origin );
			if ( !cs->areanum ) {
				BotImport_Print( PRT_MESSAGE, "camp spot at %1.1f %1.1f %1.1f in solid\n", cs->origin[ 0 ], cs->origin[ 1 ], cs->origin[ 2 ] );
				Mem_Free( cs );
				continue;
			}
			cs->next = campspots;
			campspots = cs;
			numcampspots++;
		}
	}
	if ( bot_developer ) {
		BotImport_Print( PRT_MESSAGE, "%d map locations\n", numlocations );
		BotImport_Print( PRT_MESSAGE, "%d camp spots\n", numcampspots );
	}
}
Пример #6
0
int BotAllocGoalState( int client ) {
	for ( int i = 1; i <= MAX_BOTLIB_CLIENTS; i++ ) {
		if ( !botgoalstates[ i ] ) {
			botgoalstates[ i ] = ( bot_goalstate_t* )Mem_ClearedAlloc( sizeof ( bot_goalstate_t ) );
			botgoalstates[ i ]->client = client;
			return i;
		}
	}
	return 0;
}
Пример #7
0
// index to find the weight function of an iteminfo
static int* ItemWeightIndex( weightconfig_t* iwc, itemconfig_t* ic ) {
	//initialize item weight index
	int* index = ( int* )Mem_ClearedAlloc( sizeof ( int ) * ic->numiteminfo );

	for ( int i = 0; i < ic->numiteminfo; i++ ) {
		index[ i ] = FindFuzzyWeight( iwc, ic->iteminfo[ i ].classname );
		if ( index[ i ] < 0 ) {
			Log_Write( "item info %d \"%s\" has no fuzzy weight\r\n", i, ic->iteminfo[ i ].classname );
		}	//end if
	}	//end for
	return index;
}
Пример #8
0
static void InitLevelItemHeap() {
	if ( levelitemheap ) {
		Mem_Free( levelitemheap );
	}

	int max_levelitems = ( int )LibVarValue( "max_levelitems", "256" );
	levelitemheap = ( levelitem_t* )Mem_ClearedAlloc( max_levelitems * sizeof ( levelitem_t ) );

	for ( int i = 0; i < max_levelitems - 1; i++ ) {
		levelitemheap[ i ].next = &levelitemheap[ i + 1 ];
	}
	levelitemheap[ max_levelitems - 1 ].next = NULL;
	freelevelitems = levelitemheap;
}
Пример #9
0
/*
=================
idEventLoop::GetRealEvent
=================
*/
sysEvent_t	idEventLoop::GetRealEvent()
{
	int			r;
	sysEvent_t	ev;
	
	// either get an event from the system or the journal file
	if( com_journal.GetInteger() == 2 )
	{
		r = com_journalFile->Read( &ev, sizeof( ev ) );
		if( r != sizeof( ev ) )
		{
			common->FatalError( "Error reading from journal file" );
		}
		if( ev.evPtrLength )
		{
			ev.evPtr = Mem_ClearedAlloc( ev.evPtrLength, TAG_EVENTS );
			r = com_journalFile->Read( ev.evPtr, ev.evPtrLength );
			if( r != ev.evPtrLength )
			{
				common->FatalError( "Error reading from journal file" );
			}
		}
	}
	else
	{
		ev = Sys_GetEvent();
		
		// write the journal value out if needed
		if( com_journal.GetInteger() == 1 )
		{
			r = com_journalFile->Write( &ev, sizeof( ev ) );
			if( r != sizeof( ev ) )
			{
				common->FatalError( "Error writing to journal file" );
			}
			if( ev.evPtrLength )
			{
				r = com_journalFile->Write( ev.evPtr, ev.evPtrLength );
				if( r != ev.evPtrLength )
				{
					common->FatalError( "Error writing to journal file" );
				}
			}
		}
	}
	
	return ev;
}
/*
================
idCollisionModelManagerLocal::ParseBrushes
================
*/
void idCollisionModelManagerLocal::ParseBrushes( idLexer* src, cm_model_t* model )
{
	cm_brush_t* b;
	int i, numPlanes;
	idVec3 normal;
	idToken token;
	
	if( src->CheckTokenType( TT_NUMBER, 0, &token ) )
	{
		model->brushBlock = ( cm_brushBlock_t* ) Mem_ClearedAlloc( sizeof( cm_brushBlock_t ) + token.GetIntValue(), TAG_COLLISION );
		model->brushBlock->bytesRemaining = token.GetIntValue();
		model->brushBlock->next = ( ( byte* ) model->brushBlock ) + sizeof( cm_brushBlock_t );
	}
	
	src->ExpectTokenString( "{" );
	while( !src->CheckTokenString( "}" ) )
	{
		// parse brush
		numPlanes = src->ParseInt();
		b = AllocBrush( model, numPlanes );
		b->numPlanes = numPlanes;
		src->ExpectTokenString( "{" );
		for( i = 0; i < b->numPlanes; i++ )
		{
			src->Parse1DMatrix( 3, normal.ToFloatPtr() );
			b->planes[i].SetNormal( normal );
			b->planes[i].SetDist( src->ParseFloat() );
		}
		src->ExpectTokenString( "}" );
		src->Parse1DMatrix( 3, b->bounds[0].ToFloatPtr() );
		src->Parse1DMatrix( 3, b->bounds[1].ToFloatPtr() );
		src->ReadToken( &token );
		if( token.type == TT_NUMBER )
		{
			b->contents = token.GetIntValue();		// old .cm files use a single integer
		}
		else
		{
			b->contents = ContentsFromString( token );
		}
		b->checkcount = 0;
		b->primitiveNum = 0;
		b->material = NULL;
		// filter brush into tree
		R_FilterBrushIntoTree( model, model->node, NULL, b );
	}
}
/*
================
idCollisionModelManagerLocal::ParseVertices
================
*/
void idCollisionModelManagerLocal::ParseVertices( idLexer* src, cm_model_t* model )
{
	int i;
	
	src->ExpectTokenString( "{" );
	model->numVertices = src->ParseInt();
	model->maxVertices = model->numVertices;
	model->vertices = ( cm_vertex_t* ) Mem_ClearedAlloc( model->maxVertices * sizeof( cm_vertex_t ), TAG_COLLISION );
	for( i = 0; i < model->numVertices; i++ )
	{
		src->Parse1DMatrix( 3, model->vertices[i].p.ToFloatPtr() );
		model->vertices[i].side = 0;
		model->vertices[i].sideSet = 0;
		model->vertices[i].checkcount = 0;
	}
	src->ExpectTokenString( "}" );
}
/*
================
idCollisionModelManagerLocal::ParsePolygons
================
*/
void idCollisionModelManagerLocal::ParsePolygons( idLexer* src, cm_model_t* model )
{
	cm_polygon_t* p;
	int i, numEdges;
	idVec3 normal;
	idToken token;
	
	if( src->CheckTokenType( TT_NUMBER, 0, &token ) )
	{
		model->polygonBlock = ( cm_polygonBlock_t* ) Mem_ClearedAlloc( sizeof( cm_polygonBlock_t ) + token.GetIntValue(), TAG_COLLISION );
		model->polygonBlock->bytesRemaining = token.GetIntValue();
		model->polygonBlock->next = ( ( byte* ) model->polygonBlock ) + sizeof( cm_polygonBlock_t );
	}
	
	src->ExpectTokenString( "{" );
	while( !src->CheckTokenString( "}" ) )
	{
		// parse polygon
		numEdges = src->ParseInt();
		p = AllocPolygon( model, numEdges );
		p->numEdges = numEdges;
		src->ExpectTokenString( "(" );
		for( i = 0; i < p->numEdges; i++ )
		{
			p->edges[i] = src->ParseInt();
		}
		src->ExpectTokenString( ")" );
		src->Parse1DMatrix( 3, normal.ToFloatPtr() );
		p->plane.SetNormal( normal );
		p->plane.SetDist( src->ParseFloat() );
		src->Parse1DMatrix( 3, p->bounds[0].ToFloatPtr() );
		src->Parse1DMatrix( 3, p->bounds[1].ToFloatPtr() );
		src->ExpectTokenType( TT_STRING, 0, &token );
		// get material
		p->material = declManager->FindMaterial( token );
		p->contents = p->material->GetContentFlags();
		p->checkcount = 0;
		// filter polygon into tree
		R_FilterPolygonIntoTree( model, model->node, NULL, p );
	}
}
Пример #13
0
/*
=====================
R_InitFrameData
=====================
*/
void R_InitFrameData( void ) {
	int size;
	frameData_t *frame;
	frameMemoryBlock_t *block;

	R_ShutdownFrameData();

	frameData = (frameData_t *)Mem_ClearedAlloc( sizeof( *frameData ));
	frame = frameData;
	size = MEMORY_BLOCK_SIZE;
	block = (frameMemoryBlock_t *)Mem_Alloc( size + sizeof( *block ) );
	if ( !block ) {
		common->FatalError( "R_InitFrameData: Mem_Alloc() failed" );
	}
	block->size = size;
	block->used = 0;
	block->next = NULL;
	frame->memory = block;
	frame->memoryHighwater = 0;

	R_ToggleSmpFrame();
}
Пример #14
0
script_t* LoadScriptFile( const char* filename ) {
	char pathname[ MAX_QPATH ];
	if ( String::Length( basefolder ) ) {
		String::Sprintf( pathname, sizeof ( pathname ), "%s/%s", basefolder, filename );
	} else {
		String::Sprintf( pathname, sizeof ( pathname ), "%s", filename );
	}
	fileHandle_t fp;
	int length = FS_FOpenFileByMode( pathname, &fp, FS_READ );
	if ( !fp ) {
		return NULL;
	}

	void* buffer = Mem_ClearedAlloc( sizeof ( script_t ) + length + 1 );
	script_t* script = ( script_t* )buffer;
	Com_Memset( script, 0, sizeof ( script_t ) );
	String::Cpy( script->filename, filename );
	script->buffer = ( char* )buffer + sizeof ( script_t );
	script->buffer[ length ] = 0;
	script->length = length;
	//pointer in script buffer
	script->script_p = script->buffer;
	//pointer in script buffer before reading token
	script->lastscript_p = script->buffer;
	//pointer to end of script buffer
	script->end_p = &script->buffer[ length ];
	//set if there's a token available in script->token
	script->tokenavailable = 0;
	script->line = 1;
	script->lastline = 1;

	SetScriptPunctuations( script, NULL );

	FS_Read( script->buffer, length, fp );
	FS_FCloseFile( fp );

	return script;
}
Пример #15
0
script_t* LoadScriptMemory( const char* ptr, int length, const char* name ) {
	void* buffer = Mem_ClearedAlloc( sizeof ( script_t ) + length + 1 );
	script_t* script = ( script_t* )buffer;
	Com_Memset( script, 0, sizeof ( script_t ) );
	String::Cpy( script->filename, name );
	script->buffer = ( char* )buffer + sizeof ( script_t );
	script->buffer[ length ] = 0;
	script->length = length;
	//pointer in script buffer
	script->script_p = script->buffer;
	//pointer in script buffer before reading token
	script->lastscript_p = script->buffer;
	//pointer to end of script buffer
	script->end_p = &script->buffer[ length ];
	//set if there's a token available in script->token
	script->tokenavailable = 0;
	script->line = 1;
	script->lastline = 1;

	SetScriptPunctuations( script, NULL );

	Com_Memcpy( script->buffer, ptr, length );
	return script;
}
/*
================
idCollisionModelManagerLocal::ParseEdges
================
*/
void idCollisionModelManagerLocal::ParseEdges( idLexer* src, cm_model_t* model )
{
	int i;
	
	src->ExpectTokenString( "{" );
	model->numEdges = src->ParseInt();
	model->maxEdges = model->numEdges;
	model->edges = ( cm_edge_t* ) Mem_ClearedAlloc( model->maxEdges * sizeof( cm_edge_t ), TAG_COLLISION );
	for( i = 0; i < model->numEdges; i++ )
	{
		src->ExpectTokenString( "(" );
		model->edges[i].vertexNum[0] = src->ParseInt();
		model->edges[i].vertexNum[1] = src->ParseInt();
		src->ExpectTokenString( ")" );
		model->edges[i].side = 0;
		model->edges[i].sideSet = 0;
		model->edges[i].internal = src->ParseInt();
		model->edges[i].numUsers = src->ParseInt();
		model->edges[i].normal = vec3_origin;
		model->edges[i].checkcount = 0;
		model->numInternalEdges += model->edges[i].internal;
	}
	src->ExpectTokenString( "}" );
}
Пример #17
0
/*
==============
RenderBumpTriangles

==============
*/
static void RenderBumpTriangles( srfTriangles_t *lowMesh, renderBump_t *rb ) {
	int		i, j;

	RB_SetGL2D();

	qglDisable( GL_CULL_FACE );

	qglColor3f( 1, 1, 1 );

	qglMatrixMode( GL_PROJECTION );
	qglLoadIdentity();
	qglOrtho( 0, 1, 1, 0, -1, 1 );
	qglDisable( GL_BLEND );
	qglMatrixMode( GL_MODELVIEW );
	qglLoadIdentity();

	qglDisable( GL_DEPTH_TEST );

	qglClearColor(1,0,0,1);
	qglClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );

	qglColor3f( 1, 1, 1 );

	// create smoothed normals for the surface, which might be
	// different than the normals at the vertexes if the
	// surface uses unsmoothedNormals, which only takes the
	// normal from a single triangle.  We need properly smoothed
	// normals to make sure that the traces always go off normal
	// to the true surface.
	idVec3	*lowMeshNormals = (idVec3 *)Mem_ClearedAlloc( lowMesh->numVerts * sizeof( *lowMeshNormals ) );
	R_DeriveFacePlanes( lowMesh );
	R_CreateSilIndexes( lowMesh );	// recreate, merging the mirrored verts back together
	const idPlane *planes = lowMesh->facePlanes;
	for ( i = 0 ; i < lowMesh->numIndexes ; i += 3, planes++ ) {
		for ( j = 0 ; j < 3 ; j++ ) {
			int		index;

			index = lowMesh->silIndexes[i+j];
			lowMeshNormals[index] += (*planes).Normal();
		}
	}
	// normalize and replicate from silIndexes to all indexes
	for ( i = 0 ; i < lowMesh->numIndexes ; i++ ) {
		lowMeshNormals[lowMesh->indexes[i]] = lowMeshNormals[lowMesh->silIndexes[i]];
		lowMeshNormals[lowMesh->indexes[i]].Normalize();
	}


	// rasterize each low poly face
	for ( j = 0 ; j < lowMesh->numIndexes ; j+=3 ) {
		// pump the event loop so the window can be dragged around
		Sys_GenerateEvents();

		RasterizeTriangle( lowMesh, lowMeshNormals, j/3, rb );

		qglClearColor(1,0,0,1);
		qglClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
		qglRasterPos2f( 0, 1 );
		qglPixelZoom( glConfig.vidWidth / (float)rb->width, glConfig.vidHeight / (float)rb->height );
		qglDrawPixels( rb->width, rb->height, GL_RGBA, GL_UNSIGNED_BYTE, rb->localPic );
		qglPixelZoom( 1, 1 );
		qglFlush();
		GLimp_SwapBuffers();
	}

	Mem_Free( lowMeshNormals );
}
Пример #18
0
/*
====================
idMD5Mesh::ParseMesh
====================
*/
void idMD5Mesh::ParseMesh( idLexer& parser, int numJoints, const idJointMat* joints )
{
	idToken		token;
	idToken		name;
	
	parser.ExpectTokenString( "{" );
	
	//
	// parse name
	//
	if( parser.CheckTokenString( "name" ) )
	{
		parser.ReadToken( &name );
	}
	
	//
	// parse shader
	//
	parser.ExpectTokenString( "shader" );
	
	parser.ReadToken( &token );
	idStr shaderName = token;
	
	shader = declManager->FindMaterial( shaderName );
	
	//
	// parse texture coordinates
	//
	parser.ExpectTokenString( "numverts" );
	int count = parser.ParseInt();
	if( count < 0 )
	{
		parser.Error( "Invalid size: %s", token.c_str() );
	}
	
	this->numVerts = count;
	
	idList<idVec2> texCoords;
	idList<int> firstWeightForVertex;
	idList<int> numWeightsForVertex;
	
	texCoords.SetNum( count );
	firstWeightForVertex.SetNum( count );
	numWeightsForVertex.SetNum( count );
	
	int numWeights = 0;
	int maxweight = 0;
	for( int i = 0; i < texCoords.Num(); i++ )
	{
		parser.ExpectTokenString( "vert" );
		parser.ParseInt();
		
		parser.Parse1DMatrix( 2, texCoords[ i ].ToFloatPtr() );
		
		firstWeightForVertex[ i ]	= parser.ParseInt();
		numWeightsForVertex[ i ]	= parser.ParseInt();
		
		if( !numWeightsForVertex[ i ] )
		{
			parser.Error( "Vertex without any joint weights." );
		}
		
		numWeights += numWeightsForVertex[ i ];
		if( numWeightsForVertex[ i ] + firstWeightForVertex[ i ] > maxweight )
		{
			maxweight = numWeightsForVertex[ i ] + firstWeightForVertex[ i ];
		}
	}
	
	//
	// parse tris
	//
	parser.ExpectTokenString( "numtris" );
	count = parser.ParseInt();
	if( count < 0 )
	{
		parser.Error( "Invalid size: %d", count );
	}
	
	idList<int> tris;
	tris.SetNum( count * 3 );
	numTris = count;
	for( int i = 0; i < count; i++ )
	{
		parser.ExpectTokenString( "tri" );
		parser.ParseInt();
		
		tris[ i * 3 + 0 ] = parser.ParseInt();
		tris[ i * 3 + 1 ] = parser.ParseInt();
		tris[ i * 3 + 2 ] = parser.ParseInt();
	}
	
	//
	// parse weights
	//
	parser.ExpectTokenString( "numweights" );
	count = parser.ParseInt();
	if( count < 0 )
	{
		parser.Error( "Invalid size: %d", count );
	}
	
	if( maxweight > count )
	{
		parser.Warning( "Vertices reference out of range weights in model (%d of %d weights).", maxweight, count );
	}
	
	idList<vertexWeight_t> tempWeights;
	tempWeights.SetNum( count );
	assert( numJoints < 256 );		// so we can pack into bytes
	
	for( int i = 0; i < count; i++ )
	{
		parser.ExpectTokenString( "weight" );
		parser.ParseInt();
		
		int jointnum = parser.ParseInt();
		if( ( jointnum < 0 ) || ( jointnum >= numJoints ) )
		{
			parser.Error( "Joint Index out of range(%d): %d", numJoints, jointnum );
		}
		
		tempWeights[ i ].joint			= jointnum;
		tempWeights[ i ].jointWeight	= parser.ParseFloat();
		
		parser.Parse1DMatrix( 3, tempWeights[ i ].offset.ToFloatPtr() );
	}
	
	// create pre-scaled weights and an index for the vertex/joint lookup
	idVec4* scaledWeights = ( idVec4* ) Mem_Alloc16( numWeights * sizeof( scaledWeights[0] ), TAG_MD5_WEIGHT );
	int* weightIndex = ( int* ) Mem_Alloc16( numWeights * 2 * sizeof( weightIndex[0] ), TAG_MD5_INDEX );
	memset( weightIndex, 0, numWeights * 2 * sizeof( weightIndex[0] ) );
	
	count = 0;
	for( int i = 0; i < texCoords.Num(); i++ )
	{
		int num = firstWeightForVertex[i];
		for( int j = 0; j < numWeightsForVertex[i]; j++, num++, count++ )
		{
			scaledWeights[count].ToVec3() = tempWeights[num].offset * tempWeights[num].jointWeight;
			scaledWeights[count].w = tempWeights[num].jointWeight;
			weightIndex[count * 2 + 0] = tempWeights[num].joint * sizeof( idJointMat );
		}
		weightIndex[count * 2 - 1] = 1;
	}
	
	parser.ExpectTokenString( "}" );
	
	// update counters
	c_numVerts += texCoords.Num();
	c_numWeights += numWeights;
	c_numWeightJoints++;
	for( int i = 0; i < numWeights; i++ )
	{
		c_numWeightJoints += weightIndex[i * 2 + 1];
	}
	
	//
	// build a base pose that can be used for skinning
	//
	idDrawVert* basePose = ( idDrawVert* )Mem_ClearedAlloc( texCoords.Num() * sizeof( *basePose ), TAG_MD5_BASE );
	for( int j = 0, i = 0; i < texCoords.Num(); i++ )
	{
		idVec3 v = ( *( idJointMat* )( ( byte* )joints + weightIndex[j * 2 + 0] ) ) * scaledWeights[j];
		while( weightIndex[j * 2 + 1] == 0 )
		{
			j++;
			v += ( *( idJointMat* )( ( byte* )joints + weightIndex[j * 2 + 0] ) ) * scaledWeights[j];
		}
		j++;
		
		basePose[i].Clear();
		basePose[i].xyz = v;
		basePose[i].SetTexCoord( texCoords[i] );
	}
	
	// build the weights and bone indexes into the verts, so they will be duplicated
	// as necessary at mirror seems
	
	static int maxWeightsPerVert;
	static float maxResidualWeight;
	
	const int MAX_VERTEX_WEIGHTS = 4;
	
	idList< bool > jointIsUsed;
	jointIsUsed.SetNum( numJoints );
	for( int i = 0; i < jointIsUsed.Num(); i++ )
	{
		jointIsUsed[i] = false;
	}
	
	numMeshJoints = 0;
	maxJointVertDist = 0.0f;
	
	//-----------------------------------------
	// new-style setup for fixed four weights and normal / tangent deformation
	//
	// Several important models have >25% residual weight in joints after the
	// first four, which is worrisome for using a fixed four joint deformation.
	//-----------------------------------------
	for( int i = 0; i < texCoords.Num(); i++ )
	{
		idDrawVert& dv = basePose[i];
		
		// some models do have >4 joint weights, so it is necessary to sort and renormalize
		
		// sort the weights and take the four largest
		int	weights[256];
		const int numWeights = numWeightsForVertex[ i ];
		for( int j = 0; j < numWeights; j++ )
		{
			weights[j] = firstWeightForVertex[i] + j;
		}
		// bubble sort
		for( int j = 0; j < numWeights; j++ )
		{
			for( int k = 0; k < numWeights - 1 - j; k++ )
			{
				if( tempWeights[weights[k]].jointWeight < tempWeights[weights[k + 1]].jointWeight )
				{
					SwapValues( weights[k], weights[k + 1] );
				}
			}
		}
		
		if( numWeights > maxWeightsPerVert )
		{
			maxWeightsPerVert = numWeights;
		}
		
		const int usedWeights = Min( MAX_VERTEX_WEIGHTS, numWeights );
		
		float totalWeight = 0;
		for( int j = 0; j < numWeights; j++ )
		{
			totalWeight += tempWeights[weights[j]].jointWeight;
		}
		assert( totalWeight > 0.999f && totalWeight < 1.001f );
		
		float usedWeight = 0;
		for( int j = 0; j < usedWeights; j++ )
		{
			usedWeight += tempWeights[weights[j]].jointWeight;
		}
		
		const float residualWeight = totalWeight - usedWeight;
		if( residualWeight > maxResidualWeight )
		{
			maxResidualWeight = residualWeight;
		}
		
		byte finalWeights[MAX_VERTEX_WEIGHTS] = { 0 };
		byte finalJointIndecies[MAX_VERTEX_WEIGHTS] = { 0 };
		for( int j = 0; j < usedWeights; j++ )
		{
			const vertexWeight_t& weight = tempWeights[weights[j]];
			const int jointIndex = weight.joint;
			const float fw = weight.jointWeight;
			assert( fw >= 0.0f && fw <= 1.0f );
			const float normalizedWeight = fw / usedWeight;
			finalWeights[j] = idMath::Ftob( normalizedWeight * 255.0f );
			finalJointIndecies[j] = jointIndex;
		}
		
		// Sort the weights and indices for hardware skinning
		for( int k = 0; k < 3; ++k )
		{
			for( int l = k + 1; l < 4; ++l )
			{
				if( finalWeights[l] > finalWeights[k] )
				{
					SwapValues( finalWeights[k], finalWeights[l] );
					SwapValues( finalJointIndecies[k], finalJointIndecies[l] );
				}
			}
		}
		
		// Give any left over to the biggest weight
		finalWeights[0] += Max( 255 - finalWeights[0] - finalWeights[1] - finalWeights[2] - finalWeights[3], 0 );
		
		dv.color[0] = finalJointIndecies[0];
		dv.color[1] = finalJointIndecies[1];
		dv.color[2] = finalJointIndecies[2];
		dv.color[3] = finalJointIndecies[3];
		
		dv.color2[0] = finalWeights[0];
		dv.color2[1] = finalWeights[1];
		dv.color2[2] = finalWeights[2];
		dv.color2[3] = finalWeights[3];
		
		for( int j = usedWeights; j < 4; j++ )
		{
			assert( dv.color2[j] == 0 );
		}
		
		for( int j = 0; j < usedWeights; j++ )
		{
			if( !jointIsUsed[finalJointIndecies[j]] )
			{
				jointIsUsed[finalJointIndecies[j]] = true;
				numMeshJoints++;
			}
			const idJointMat& joint = joints[finalJointIndecies[j]];
			float dist = ( dv.xyz - joint.GetTranslation() ).Length();
			if( dist > maxJointVertDist )
			{
				maxJointVertDist = dist;
			}
		}
	}
	
	meshJoints = ( byte* ) Mem_Alloc( numMeshJoints * sizeof( meshJoints[0] ), TAG_MODEL );
	numMeshJoints = 0;
	for( int i = 0; i < numJoints; i++ )
	{
		if( jointIsUsed[i] )
		{
			meshJoints[numMeshJoints++] = i;
		}
	}
	
	// build the deformInfo and collect a final base pose with the mirror
	// seam verts properly including the bone weights
	deformInfo = R_BuildDeformInfo( texCoords.Num(), basePose, tris.Num(), tris.Ptr(),
									shader->UseUnsmoothedTangents() );
									
	for( int i = 0; i < deformInfo->numOutputVerts; i++ )
	{
		for( int j = 0; j < 4; j++ )
		{
			if( deformInfo->verts[i].color[j] >= numJoints )
			{
				idLib::FatalError( "Bad joint index" );
			}
		}
	}
	
	Mem_Free( basePose );
}
Пример #19
0
/*
====================
WriteOutputSurfaces
====================
*/
static void WriteOutputSurfaces( int entityNum, int areaNum ) {
	mapTri_t	*ambient, *copy;
	int			surfaceNum;
	int			numSurfaces;
	idMapEntity	*entity;
	uArea_t		*area;
	optimizeGroup_t	*group, *groupStep;
	int			i; // , j;
//	int			col;
	srfTriangles_t	*uTri;
//	mapTri_t	*tri;
typedef struct interactionTris_s {
	struct interactionTris_s	*next;
	mapTri_t	*triList;
	mapLight_t	*light;
} interactionTris_t;

	interactionTris_t	*interactions, *checkInter; //, *nextInter;


	area = &dmapGlobals.uEntities[entityNum].areas[areaNum];
	entity = dmapGlobals.uEntities[entityNum].mapEntity;

	numSurfaces = CountUniqueShaders( area->groups );


	if ( entityNum == 0 ) {
		procFile->WriteFloatString( "model { /* name = */ \"_area%i\" /* numSurfaces = */ %i\n\n", 
			areaNum, numSurfaces );
	} else {
		const char *name;

		entity->epairs.GetString( "name", "", &name );
		if ( !name[0] ) {
			common->Error( "Entity %i has surfaces, but no name key", entityNum );
		}
		procFile->WriteFloatString( "model { /* name = */ \"%s\" /* numSurfaces = */ %i\n\n", 
			name, numSurfaces );
	}

	surfaceNum = 0;
	for ( group = area->groups ; group ; group = group->nextGroup ) {
		if ( group->surfaceEmited ) {
			continue;
		}

		// combine all groups compatible with this one
		// usually several optimizeGroup_t can be combined into a single
		// surface, even though they couldn't be merged together to save
		// vertexes because they had different planes, texture coordinates, or lights.
		// Different mergeGroups will stay in separate surfaces.
		ambient = NULL;

		// each light that illuminates any of the groups in the surface will
		// get its own list of indexes out of the original surface
		interactions = NULL;

		for ( groupStep = group ; groupStep ; groupStep = groupStep->nextGroup ) {
			if ( groupStep->surfaceEmited ) {
				continue;
			}
			if ( !GroupsAreSurfaceCompatible( group, groupStep ) ) {
				continue;
			}

			// copy it out to the ambient list
			copy = CopyTriList( groupStep->triList );
			ambient = MergeTriLists( ambient, copy );
			groupStep->surfaceEmited = true;

			// duplicate it into an interaction for each groupLight
			for ( i = 0 ; i < groupStep->numGroupLights ; i++ ) {
				for ( checkInter = interactions ; checkInter ; checkInter = checkInter->next ) {
					if ( checkInter->light == groupStep->groupLights[i] ) {
						break;
					}
				}
				if ( !checkInter ) {
					// create a new interaction
					checkInter = (interactionTris_t *)Mem_ClearedAlloc( sizeof( *checkInter ) );
					checkInter->light = groupStep->groupLights[i];
					checkInter->next = interactions;
					interactions = checkInter;
				}
				copy = CopyTriList( groupStep->triList );
				checkInter->triList = MergeTriLists( checkInter->triList, copy );
			}
		}

		if ( !ambient ) {
			continue;
		}

		if ( surfaceNum >= numSurfaces ) {
			common->Error( "WriteOutputSurfaces: surfaceNum >= numSurfaces" );
		}

		procFile->WriteFloatString( "/* surface %i */ { ", surfaceNum );
		surfaceNum++;
		procFile->WriteFloatString( "\"%s\" ", ambient->material->GetName() );

		uTri = ShareMapTriVerts( ambient );
		FreeTriList( ambient );

		CleanupUTriangles( uTri );
		WriteUTriangles( uTri );
		R_FreeStaticTriSurf( uTri );

		procFile->WriteFloatString( "}\n\n" );
	}

	procFile->WriteFloatString( "}\n\n" );
}
Пример #20
0
/*
=============
Undo_GeneralStart
=============
*/
void Undo_GeneralStart(char *operation)
{
	undo_t *undo;
	brush_t *pBrush;
	entity_t *pEntity;


	if (g_lastundo)
	{
		if (!g_lastundo->done)
		{
			common->Printf("Undo_Start: WARNING last undo not finished.\n");
		}
	}

	undo = (undo_t *) Mem_ClearedAlloc(sizeof(undo_t), TAG_TOOLS);
	if (!undo) return;
	memset(undo, 0, sizeof(undo_t));
	undo->brushlist.next = &undo->brushlist;
	undo->brushlist.prev = &undo->brushlist;
	undo->entitylist.next = &undo->entitylist;
	undo->entitylist.prev = &undo->entitylist;
	if (g_lastundo) g_lastundo->next = undo;
	else g_undolist = undo;
	undo->prev = g_lastundo;
	undo->next = NULL;
	g_lastundo = undo;
	
	undo->time = Sys_DoubleTime();
	//
	if (g_undoId > g_undoMaxSize * 2) g_undoId = 1;
	if (g_undoId <= 0) g_undoId = 1;
	undo->id = g_undoId++;
	undo->done = false;
	undo->operation = operation;
	//reset the undo IDs of all brushes using the new ID
	for (pBrush = active_brushes.next; pBrush != NULL && pBrush != &active_brushes; pBrush = pBrush->next)
	{
		if (pBrush->undoId == undo->id)
		{
			pBrush->undoId = 0;
		}
	}
	for (pBrush = selected_brushes.next; pBrush != NULL && pBrush != &selected_brushes; pBrush = pBrush->next)
	{
		if (pBrush->undoId == undo->id)
		{
			pBrush->undoId = 0;
		}
	}
	//reset the undo IDs of all entities using thew new ID
	for (pEntity = entities.next; pEntity != NULL && pEntity != &entities; pEntity = pEntity->next)
	{
		if (pEntity->undoId == undo->id)
		{
			pEntity->undoId = 0;
		}
	}
	g_undoMemorySize += sizeof(undo_t);
	g_undoSize++;
	//undo buffer is bound to a max
	if (g_undoSize > g_undoMaxSize)
	{
		Undo_FreeFirstUndo();
	}
}
Пример #21
0
/*
=============
Undo_Undo
=============
*/
void Undo_Undo(void)
{
	undo_t *undo, *redo;
	brush_t *pBrush, *pNextBrush;
	entity_t *pEntity, *pNextEntity, *pUndoEntity;

	if (!g_lastundo)
	{
		Sys_Status("Nothing left to undo.\n");
		return;
	}
	if (!g_lastundo->done)
	{
		Sys_Status("Undo_Undo: WARNING: last undo not yet finished!\n");
	}
	// get the last undo
	undo = g_lastundo;
	if (g_lastundo->prev) g_lastundo->prev->next = NULL;
	else g_undolist = NULL;
	g_lastundo = g_lastundo->prev;

	//allocate a new redo
	redo = (undo_t *) Mem_ClearedAlloc(sizeof(undo_t), TAG_TOOLS);
	if (!redo) return;
	memset(redo, 0, sizeof(undo_t));
	redo->brushlist.next = &redo->brushlist;
	redo->brushlist.prev = &redo->brushlist;
	redo->entitylist.next = &redo->entitylist;
	redo->entitylist.prev = &redo->entitylist;
	if (g_lastredo) g_lastredo->next = redo;
	else g_redolist = redo;
	redo->prev = g_lastredo;
	redo->next = NULL;
	g_lastredo = redo;
	redo->time = Sys_DoubleTime();
	redo->id = g_redoId++;
	redo->done = true;
	redo->operation = undo->operation;

	//reset the redo IDs of all brushes using the new ID
	for (pBrush = active_brushes.next; pBrush != NULL && pBrush != &active_brushes; pBrush = pBrush->next)
	{
		if (pBrush->redoId == redo->id)
		{
			pBrush->redoId = 0;
		}
	}
	for (pBrush = selected_brushes.next; pBrush != NULL && pBrush != &selected_brushes; pBrush = pBrush->next)
	{
		if (pBrush->redoId == redo->id)
		{
			pBrush->redoId = 0;
		}
	}
	//reset the redo IDs of all entities using thew new ID
	for (pEntity = entities.next; pEntity != NULL && pEntity != &entities; pEntity = pEntity->next)
	{
		if (pEntity->redoId == redo->id)
		{
			pEntity->redoId = 0;
		}
	}

	// remove current selection
	Select_Deselect();
	// move "created" brushes to the redo
	for (pBrush = active_brushes.next; pBrush != NULL && pBrush != &active_brushes; pBrush=pNextBrush)
	{
		pNextBrush = pBrush->next;
		if (pBrush->undoId == undo->id)
		{
			//Brush_Free(pBrush);
			//move the brush to the redo
			Brush_RemoveFromList(pBrush);
			Brush_AddToList(pBrush, &redo->brushlist);
			//make sure the ID of the owner is stored
			pBrush->ownerId = pBrush->owner->entityId;
			//unlink the brush from the owner entity
			Entity_UnlinkBrush(pBrush);
		}
	}
	// move "created" entities to the redo
	for (pEntity = entities.next; pEntity != NULL && pEntity != &entities; pEntity = pNextEntity)
	{
		pNextEntity = pEntity->next;
		if (pEntity->undoId == undo->id)
		{
			// check if this entity is in the undo
			for (pUndoEntity = undo->entitylist.next; pUndoEntity != NULL && pUndoEntity != &undo->entitylist; pUndoEntity = pUndoEntity->next)
			{
				// move brushes to the undo entity
				if (pUndoEntity->entityId == pEntity->entityId)
				{
					pUndoEntity->brushes.next = pEntity->brushes.next;
					pUndoEntity->brushes.prev = pEntity->brushes.prev;
					pEntity->brushes.next = &pEntity->brushes;
					pEntity->brushes.prev = &pEntity->brushes;
				}
			}
			//
			//Entity_Free(pEntity);
			//move the entity to the redo
			Entity_RemoveFromList(pEntity);
			Entity_AddToList(pEntity, &redo->entitylist);
		}
	}
	// add the undo entities back into the entity list
	for (pEntity = undo->entitylist.next; pEntity != NULL && pEntity != &undo->entitylist; pEntity = undo->entitylist.next)
	{
		g_undoMemorySize -= Entity_MemorySize(pEntity);
		//if this is the world entity
		if (pEntity->entityId == world_entity->entityId)
		{
			//free the epairs of the world entity
			Entity_FreeEpairs(world_entity);
			//set back the original epairs
			world_entity->epairs = pEntity->epairs;
			//free the world_entity clone that stored the epairs
			Entity_Free(pEntity);
		}
		else
		{
			Entity_RemoveFromList(pEntity);
			Entity_AddToList(pEntity, &entities);
			pEntity->redoId = redo->id;
		}
	}
	// add the undo brushes back into the selected brushes
	for (pBrush = undo->brushlist.next; pBrush != NULL && pBrush != &undo->brushlist; pBrush = undo->brushlist.next)
	{
		g_undoMemorySize -= Brush_MemorySize(pBrush);
		Brush_RemoveFromList(pBrush);
    	Brush_AddToList(pBrush, &active_brushes);
		for (pEntity = entities.next; pEntity != NULL && pEntity != &entities; pEntity = pEntity->next)
		{
			if (pEntity->entityId == pBrush->ownerId)
			{
				Entity_LinkBrush(pEntity, pBrush);
				break;
			}
		}
		//if the brush is not linked then it should be linked into the world entity
		if (pEntity == NULL || pEntity == &entities)
		{
			Entity_LinkBrush(world_entity, pBrush);
		}
		//build the brush
		//Brush_Build(pBrush);
		Select_Brush(pBrush);
		pBrush->redoId = redo->id;
    }
	//
	common->Printf("%s undone.\n", undo->operation);
	// free the undo
	g_undoMemorySize -= sizeof(undo_t);
	Mem_Free(undo);
	g_undoSize--;
	g_undoId--;
	if (g_undoId <= 0) g_undoId = 2 * g_undoMaxSize;
	//

	Sys_BeginWait();
	brush_t *b, *next;
	for (b = active_brushes.next ; b != NULL && b != &active_brushes ; b=next) {
		next = b->next;
		Brush_Build( b, true, false, false );
	}
	for (b = selected_brushes.next ; b != NULL && b != &selected_brushes ; b=next) {
		next = b->next;
		Brush_Build( b, true, false, false );
	}
	Sys_EndWait();

    g_bScreenUpdates = true; 
    Sys_UpdateWindows(W_ALL);
}
Пример #22
0
/*
=====================
SplitOriginalEdgesAtCrossings
=====================
*/
void SplitOriginalEdgesAtCrossings( optimizeGroup_t *opt ) {
	int				i, j, k, l;
	int				numOriginalVerts;
	edgeCrossing_t	**crossings;

	numOriginalVerts = numOptVerts;
	// now split any crossing edges and create optEdges
	// linked to the vertexes

	// debug drawing bounds
	dmapGlobals.drawBounds = optBounds;

	dmapGlobals.drawBounds[0][0] -= 2;
	dmapGlobals.drawBounds[0][1] -= 2;
	dmapGlobals.drawBounds[1][0] += 2;
	dmapGlobals.drawBounds[1][1] += 2;

	// generate crossing points between all the original edges
	crossings = (edgeCrossing_t **)Mem_ClearedAlloc( numOriginalEdges * sizeof( *crossings ), TAG_DMAP );

	for ( i = 0 ; i < numOriginalEdges ; i++ ) {
		if ( dmapGlobals.drawflag ) {
			DrawOriginalEdges( numOriginalEdges, originalEdges );
			qglBegin( GL_LINES );
			qglColor3f( 0, 1, 0 );
			qglVertex3fv( originalEdges[i].v1->pv.ToFloatPtr() );
			qglColor3f( 0, 0, 1 );
			qglVertex3fv( originalEdges[i].v2->pv.ToFloatPtr() );
			qglEnd();
			qglFlush();
		}
		for ( j = i+1 ; j < numOriginalEdges ; j++ ) {
			optVertex_t	*v1, *v2, *v3, *v4;
			optVertex_t	*newVert;
			edgeCrossing_t	*cross;

			v1 = originalEdges[i].v1;
			v2 = originalEdges[i].v2;
			v3 = originalEdges[j].v1;
			v4 = originalEdges[j].v2;

			if ( !EdgesCross( v1, v2, v3, v4 ) ) {
				continue;
			}

			// this is the only point in optimization where
			// completely new points are created, and it only
			// happens if there is overlapping coplanar
			// geometry in the source triangles
			newVert = EdgeIntersection( v1, v2, v3, v4, opt );

			if ( !newVert ) {
//common->Printf( "lines %i (%i to %i) and %i (%i to %i) are colinear\n", i, v1 - optVerts, v2 - optVerts, 
//		   j, v3 - optVerts, v4 - optVerts );	// !@#
				// colinear, so add both verts of each edge to opposite
				if ( VertexBetween( v3, v1, v2 ) ) {
					cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
					cross->ov = v3;
					cross->next = crossings[i];
					crossings[i] = cross;
				}

				if ( VertexBetween( v4, v1, v2 ) ) {
					cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
					cross->ov = v4;
					cross->next = crossings[i];
					crossings[i] = cross;
				}

				if ( VertexBetween( v1, v3, v4 ) ) {
					cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
					cross->ov = v1;
					cross->next = crossings[j];
					crossings[j] = cross;
				}

				if ( VertexBetween( v2, v3, v4 ) ) {
					cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
					cross->ov = v2;
					cross->next = crossings[j];
					crossings[j] = cross;
				}

				continue;
			}
#if 0
if ( newVert && newVert != v1 && newVert != v2 && newVert != v3 && newVert != v4 ) {
common->Printf( "lines %i (%i to %i) and %i (%i to %i) cross at new point %i\n", i, v1 - optVerts, v2 - optVerts, 
		   j, v3 - optVerts, v4 - optVerts, newVert - optVerts );
} else if ( newVert ) {
common->Printf( "lines %i (%i to %i) and %i (%i to %i) intersect at old point %i\n", i, v1 - optVerts, v2 - optVerts, 
		  j, v3 - optVerts, v4 - optVerts, newVert - optVerts );
}
#endif
			if ( newVert != v1 && newVert != v2 ) {
				cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
				cross->ov = newVert;
				cross->next = crossings[i];
				crossings[i] = cross;
			}

			if ( newVert != v3 && newVert != v4 ) {
				cross = (edgeCrossing_t *)Mem_ClearedAlloc( sizeof( *cross ), TAG_DMAP );
				cross->ov = newVert;
				cross->next = crossings[j];
				crossings[j] = cross;
			}

		}
	}


	// now split each edge by its crossing points
	// colinear edges will have duplicated edges added, but it won't hurt anything
	for ( i = 0 ; i < numOriginalEdges ; i++ ) {
		edgeCrossing_t	*cross, *nextCross;
		int				numCross;
		optVertex_t		**sorted;

		numCross = 0;
		for ( cross = crossings[i] ; cross ; cross = cross->next ) {
			numCross++;
		}
		numCross += 2;	// account for originals
		sorted = (optVertex_t **)Mem_Alloc( numCross * sizeof( *sorted ), TAG_DMAP );
		sorted[0] = originalEdges[i].v1;
		sorted[1] = originalEdges[i].v2;
		j = 2;
		for ( cross = crossings[i] ; cross ; cross = nextCross ) {
			nextCross = cross->next;
			sorted[j] = cross->ov;
			Mem_Free( cross );
			j++;
		}

		// add all possible fragment combinations that aren't divided
		// by another point
		for ( j = 0 ; j < numCross ; j++ ) {
			for ( k = j+1 ; k < numCross ; k++ ) {
				for ( l = 0 ; l < numCross ; l++ ) {
					if ( sorted[l] == sorted[j] || sorted[l] == sorted[k] ) {
						continue;
					}
					if ( sorted[j] == sorted[k] ) {
						continue;
					}
					if ( VertexBetween( sorted[l], sorted[j], sorted[k] ) ) {
						break;
					}
				}
				if ( l == numCross ) {
//common->Printf( "line %i fragment from point %i to %i\n", i, sorted[j] - optVerts, sorted[k] - optVerts );
					AddEdgeIfNotAlready( sorted[j], sorted[k] );
				}
			}
		}

		Mem_Free( sorted );
	}


	Mem_Free( crossings );
	Mem_Free( originalEdges );

	// check for duplicated edges
	for ( i = 0 ; i < numOptEdges ; i++ ) {
		for ( j = i+1 ; j < numOptEdges ; j++ ) {
			if ( ( optEdges[i].v1 == optEdges[j].v1 && optEdges[i].v2 == optEdges[j].v2 ) 
				|| ( optEdges[i].v1 == optEdges[j].v2 && optEdges[i].v2 == optEdges[j].v1 ) ) {
				common->Printf( "duplicated optEdge\n" );
			}
		}
	}

	if ( dmapGlobals.verbose ) {
		common->Printf( "%6i original edges\n", numOriginalEdges );
		common->Printf( "%6i edges after splits\n", numOptEdges );
		common->Printf( "%6i original vertexes\n", numOriginalVerts );
		common->Printf( "%6i vertexes after splits\n", numOptVerts );
	}
}
Пример #23
0
NSBitmapImageRep::NSBitmapImageRep( int wide, int high )
{
    bmap = ( byte * ) Mem_ClearedAlloc( wide * high * 4 );
    width = wide;
    height = high;
}
Пример #24
0
static itemconfig_t* LoadItemConfig( const char* filename ) {
	int max_iteminfo = ( int )LibVarValue( "max_iteminfo", "256" );
	if ( max_iteminfo < 0 ) {
		BotImport_Print( PRT_ERROR, "max_iteminfo = %d\n", max_iteminfo );
		max_iteminfo = 256;
		LibVarSet( "max_iteminfo", "256" );
	}

	if ( GGameType & GAME_Quake3 ) {
		PC_SetBaseFolder( BOTFILESBASEFOLDER );
	}
	char path[ MAX_QPATH ];
	String::NCpyZ( path, filename, MAX_QPATH );
	source_t* source = LoadSourceFile( path );
	if ( !source ) {
		BotImport_Print( PRT_ERROR, "counldn't load %s\n", path );
		return NULL;
	}
	//initialize item config
	itemconfig_t* ic = ( itemconfig_t* )Mem_ClearedAlloc( sizeof ( itemconfig_t ) +
		max_iteminfo * sizeof ( iteminfo_t ) );
	ic->iteminfo = ( iteminfo_t* )( ( char* )ic + sizeof ( itemconfig_t ) );
	ic->numiteminfo = 0;
	//parse the item config file
	token_t token;
	while ( PC_ReadToken( source, &token ) ) {
		if ( !String::Cmp( token.string, "iteminfo" ) ) {
			if ( ic->numiteminfo >= max_iteminfo ) {
				SourceError( source, "more than %d item info defined\n", max_iteminfo );
				Mem_Free( ic );
				FreeSource( source );
				return NULL;
			}
			iteminfo_t* ii = &ic->iteminfo[ ic->numiteminfo ];
			Com_Memset( ii, 0, sizeof ( iteminfo_t ) );
			if ( !PC_ExpectTokenType( source, TT_STRING, 0, &token ) ) {
				Mem_Free( ic );
				FreeSource( source );
				return NULL;
			}
			StripDoubleQuotes( token.string );
			String::NCpy( ii->classname, token.string, sizeof ( ii->classname ) - 1 );
			if ( !ReadStructure( source, &iteminfo_struct, ( char* )ii ) ) {
				Mem_Free( ic );
				FreeSource( source );
				return NULL;
			}
			ii->number = ic->numiteminfo;
			ic->numiteminfo++;
		} else {
			SourceError( source, "unknown definition %s\n", token.string );
			Mem_Free( ic );
			FreeSource( source );
			return NULL;
		}
	}
	FreeSource( source );

	if ( !ic->numiteminfo ) {
		BotImport_Print( PRT_WARNING, "no item info loaded\n" );
	}
	BotImport_Print( PRT_MESSAGE, "loaded %s\n", path );
	return ic;
}