Exemple #1
0
void CScreenTintMgr::Update()
{
	if (!m_bChanged)
		return;

    LTVector vTemp;
	vTemp.Init(0.0f,0.0f,0.0f);
	for (int i = 0; i < NUM_TINT_EFFECTS; i++)
	{
		vTemp.x = LTMAX(vTemp.x,m_avTints[i].x);
		vTemp.y = LTMAX(vTemp.y,m_avTints[i].y);
		vTemp.z = LTMAX(vTemp.z,m_avTints[i].z);
	}

	if (vTemp.x > 1.0f)
		vTemp.x = 1.0f;
	if (vTemp.y > 1.0f)
		vTemp.y = 1.0f;
	if (vTemp.z > 1.0f)
		vTemp.z = 1.0f;
    m_bChanged = false;

    //g_pLTClient->SetCameraLightAdd( g_pPlayerMgr->GetPlayerCamera()->GetCamera(), &vTemp);

}
//uses the glyphs and determines the size of the texture needed to lay them out with a sigle pixel boundary
//around each character
static bool GetTextureSizeFromCharSizes(const CTextureStringGlyph* pGlyphs, uint32 nNumGlyphs, uint32 nMaxGlyphWidth, 
										uint32 nMaxGlyphHeight, uint32 nCharSpacing, SIZE& sizeTexture )
{
	//default the size
	sizeTexture.cx = sizeTexture.cy = 0;

	// Start the height off as one row.
	uint32 nRawHeight = nMaxGlyphHeight + nCharSpacing;

	// To find the height, keep putting characters into the rows until we reach the bottom.
	int nXOffset	= 0;
	int nMaxXOffset = 0;

	for( uint32 nGlyph = 0; nGlyph < nNumGlyphs; nGlyph++ )
	{
		// Get this character's width.
		int nCharWidthWithSpacing = pGlyphs[nGlyph].m_rBlackBox.GetWidth() + nCharSpacing;

		// See if this width fits in the current row.
		if( nXOffset + nCharWidthWithSpacing < MAXIMUM_TEXTURE_SIZE )
		{
			// Still fits in the current row.
			nXOffset += nCharWidthWithSpacing;

			//keep track of the maximum extent
			nMaxXOffset = LTMAX(nMaxXOffset, nXOffset);
		}
		else
		{
			// Doesn't fit in the current row.  Englarge by one row
			// and start at the left again.
			nXOffset = 0;
			nRawHeight += nMaxGlyphHeight + nCharSpacing;

			//keep track of the maximum extent
			nMaxXOffset = LTMAX(nMaxXOffset, nCharWidthWithSpacing);
		}
	}

	//if the nMaxOffset extends past the maximum texture size, then we have a glyph that is too
	//large to fit on a texture
	if(nMaxXOffset >= MAXIMUM_TEXTURE_SIZE)
		return false;

	//also see if the whole string couldn't fit on the texture
	if(nRawHeight >= MAXIMUM_TEXTURE_SIZE)
		return false;

	//adjust the offset to be a texture size
	sizeTexture.cx = GetTextureSize( nMaxXOffset );

	// Enlarge the height to the nearest power of two and use that as our final height.
	sizeTexture.cy = GetTextureSize( nRawHeight );

	//otherwise the texture size is valid
	return true;
}
void CRVTrackerTextureWrap::ScaleToMultiple(CTWPolyList &aList, CTextExtents &cExtents) const
{
	// Go find out how big the texture is...
	float fTextureU, fTextureV;
	if (!GetFirstTextureMul(aList, fTextureU, fTextureV))
		return;

	// Adjust to the next even multiple
	float fRangeU = (cExtents.m_fMaxU - cExtents.m_fMinU) * fTextureU;
	float fRangeV = (cExtents.m_fMaxV - cExtents.m_fMinV) * fTextureV;
	// Get the texture's scale
	int nRepeatCountU = (int)(fRangeU + 0.5f);
	int nRepeatCountV = (int)(fRangeV + 0.5f);
	nRepeatCountU = LTMAX(nRepeatCountU, 1);
	nRepeatCountV = LTMAX(nRepeatCountV, 1);
	// Calculate the scaling necessary
	float fScaleU;
	if (fRangeU != 0.0f)
	{
		fScaleU = ((float)nRepeatCountU) / fRangeU;
	}
	else
	{
		fScaleU = 1.0f;
	}
	float fScaleV;
	if (fRangeV != 0.0f)
	{
		fScaleV = ((float)nRepeatCountV) / fRangeV;
	}
	else
	{
		fScaleV = 1.0f;
	}

	// Scale the extents
	cExtents.m_fMinU *= fScaleU;
	cExtents.m_fMaxU *= fScaleU;
	cExtents.m_fMinV *= fScaleV;
	cExtents.m_fMaxV *= fScaleV;

	// Scale the texture spaces
	for (uint32 nPolyLoop = 0; nPolyLoop < aList.GetSize(); ++nPolyLoop)
	{
		// Skip polys that weren't touched...
		if (!aList[nPolyLoop]->m_bTouched)
			continue;
		// Get the texture space
		LTVector vPolyO, vPolyP, vPolyQ;
		aList[nPolyLoop]->m_pPoly->GetTexture(GetCurrTexture()).GetTextureSpace(vPolyO, vPolyP, vPolyQ);
		// Scale it
		vPolyP *= fScaleU;
		vPolyQ *= fScaleV;
		// Put it back
		aList[nPolyLoop]->m_pPoly->SetTextureSpace(GetCurrTexture(), vPolyO, vPolyP, vPolyQ);
	}
}
Exemple #4
0
void LightBase::HandleDimsMsg( HOBJECT /*hSender*/, const CParsedMsg &crParsedMsg )
{
	if(crParsedMsg.GetArgCount() == 4)
	{
		//read in our new dimensions and apply them
		m_vDirectionalDims.Init(	LTMAX(0.0f, (float)atof(crParsedMsg.GetArg(1))),
									LTMAX(0.0f, (float)atof(crParsedMsg.GetArg(2))),
									LTMAX(0.0f, (float)atof(crParsedMsg.GetArg(3))));
		g_pLTServer->SetLightDirectionalDims(m_hObject, m_vDirectionalDims * LTVector(0.5f, 0.5f, 1.0f));
	}
}
bool CAIActionAttackTurret::TimeoutExpired( CAI* pAI )
{
	// SmartObject specifies the timeout for the turret.

	AIDB_SmartObjectRecord* pSmartObjectRecord = g_pAIDB->GetAISmartObjectRecord( m_pActionRecord->eSmartObjectID );
	if( !pSmartObjectRecord )
	{
		return false;
	}

	// Give up after some expiration time.

	if( pSmartObjectRecord->fTimeout != 0.f )
	{
		double fTargetChangeTime = pAI->GetAIBlackBoard()->GetBBTargetChangeTime();
		double fTargetLastVisibleTime = pAI->GetAIBlackBoard()->GetBBTargetLastVisibleTime();

		double fTargetTime = LTMAX( fTargetChangeTime, fTargetLastVisibleTime );
		if( g_pLTServer->GetTime() - fTargetTime > pSmartObjectRecord->fTimeout )
		{
			return true;
		}
	}

	// Timeout has not expired.

	return false;
}
//given a particle position, a limit on the velocities, and the acceleration type, this will generate the 
//appropriate velocity
static LTVector GenerateObjectSpaceParticleVel(ePSVelocityType eType, const LTVector& vObjSpacePos, 
											   const LTVector& vMinVelocity, const LTVector& vMaxVelocity)
{
	LTVector vVel(0, 0, 0);

	// Randomize the velocity within our range
	switch(eType)
	{
	case PSV_eRandom:
		{
			vVel.x = GetRandom( vMinVelocity.x, vMaxVelocity.x );
			vVel.y = GetRandom( vMinVelocity.y, vMaxVelocity.y );
			vVel.z = GetRandom( vMinVelocity.z, vMaxVelocity.z );
		}
		break;
	case PSV_eCenter:
		{
			//velocity direction is based upon position from 0, 0, 0
			float fMag = LTMAX(vObjSpacePos.Mag(), 0.01f);
			vVel = vObjSpacePos * (GetRandom(vMinVelocity.x, vMaxVelocity.x) / fMag);
		}
		break;
	default:
		LTERROR( "Unknown particle velocity type");
		break;
	}

	return vVel;
}
Exemple #7
0
bool CSpriteFX::Init(const FX_BASEDATA *pBaseData, const CBaseFXProps *pProps)
{

	// Perform base class initialisation

	if( !CBaseSpriteFX::Init(pBaseData, pProps))
		return false;

	//install our visible callback if we need to cast a visible ray
	if(GetProps()->m_bCastVisibleRay)
	{
		g_pLTClient->GetCustomRender()->SetVisibleCallback(m_hObject, CustomRenderVisibleCallback);
	}

	//determine a random overall scale
	m_fScale = GetRandom(GetProps()->m_fMinScale, GetProps()->m_fMaxScale);

	//also determine the largest scale possible so we don't have to update this every frame
	float fMaxScale = GetProps()->m_ffcScale.GetFirstValue();
	for(uint32 nKey = 1; nKey < GetProps()->m_ffcScale.GetNumKeys(); nKey++)
		fMaxScale = LTMAX(fMaxScale, GetProps()->m_ffcScale.GetKey(nKey));

	//and use this as our visible scale
	SetVisScale(fMaxScale * m_fScale);
	
	// Success !!
	return true;
}
Exemple #8
0
//handles updating the properties of the light given the specified unit time value
void CDynaLightFX::UpdateDynamicProperties(float fUnitTime)
{
	//update the type of this light
	g_pLTClient->SetLightType(m_hLight, GetEngineLightType(GetProps()->m_efcType.GetValue(fUnitTime)));

	//the intensity of the light
	float fIntensity	= GetProps()->m_ffcIntensity.GetValue(fUnitTime);
	float fFlickerScale	= GetProps()->m_ffcFlickerScale.GetValue(fUnitTime);
	g_pLTClient->SetLightIntensityScale(m_hLight, LTMAX(0.0f, fIntensity * GetRandom(fFlickerScale, 1.0f)) );

	//get the color of this light
	LTVector vColor = ToColor3(GetProps()->m_cfcColor.GetValue(fUnitTime));
	g_pLTClient->SetObjectColor(m_hLight, vColor.x, vColor.y, vColor.z, 1.0f);

	//and now the radius of the light
	float fRadius = GetProps()->m_ffcRadius.GetValue(fUnitTime);
	g_pLTClient->SetLightRadius(m_hLight, fRadius);

	//the specular color
	LTVector4 vSpecular = GetProps()->m_cfcSpecularColor.GetValue(fUnitTime);
	g_pLTClient->SetLightSpecularColor(m_hLight, ToColor3(vSpecular));

	//the translucent color
	LTVector4 vTranslucent = GetProps()->m_cfcTranslucentColor.GetValue(fUnitTime);
	g_pLTClient->SetLightTranslucentColor(m_hLight, ToColor3(vTranslucent));

	//the spot light information
	float fFovX = MATH_DEGREES_TO_RADIANS(GetProps()->m_ffcSpotFovX.GetValue(fUnitTime));
	float fFovY = MATH_DEGREES_TO_RADIANS(GetProps()->m_ffcSpotFovY.GetValue(fUnitTime));

	g_pLTClient->SetLightSpotInfo(m_hLight, fFovX, fFovY, 0.0f);
}
Exemple #9
0
void LightBase::HandleRadiusMsg( HOBJECT /*hSender*/, const CParsedMsg &crParsedMsg )
{
	if(crParsedMsg.GetArgCount() == 2)
	{
		//read in our new dimensions and apply them
		m_fLightRadius =	LTMAX((float)atof(crParsedMsg.GetArg(1)), 0.0f);						
		g_pLTServer->SetLightRadius(m_hObject, m_fLightRadius);
	}
}
Exemple #10
0
//  ---------------------------------------------------------------------------
CUI_RESULTTYPE CUIList_Impl::Scroll(int32 number)
{
	m_WindowStart += number;

	m_WindowStart = LTMAX(0, LTMIN(m_ItemCount-1, m_WindowStart));

	this->AlignTextInWidget();

	return CUIR_OK;
}
float AINavMeshLinkAbstract::GetNMLinkPathingWeight(CAI* pAI)
{
	// Sanity check.

	if( !pAI )
	{
		return 1.f;
	}

	// Do not add extra weight to the poly we are standing in!

	if( pAI->GetCurrentNavMeshPoly() == m_eNMPolyID )
	{
		return 1.f;
	}

	// No SmartObject exists to define the weight.

	AIDB_SmartObjectRecord* pSmartObject = GetSmartObject();
	if( !pSmartObject )
	{
		return 1.f;
	}

	// Weight unpreferred links much more heavily.

	double fCurTime = g_pLTServer->GetTime();
	if( fCurTime < m_fNextPreferredTime )
	{
		// Weight decreases linearly over time.

		float fInterpFactor = ((float)( m_fNextPreferredTime - fCurTime )) / m_fPreferredDelay;
		return LTMAX( 1.f, 10000.f * fInterpFactor );
	}

	// Weight reserved links much more heavily,
	// if the link is reserved by someone else.

	if( m_hReservingAI && 
		( m_hReservingAI != pAI->m_hObject ) &&
		( !IsDeadAI( m_hReservingAI ) ) )
	{
		return 1000.f;
	}

	// Default weight.

	return 1.f;
}
//called after all properties have been loaded to allow for post processing of parameters
bool CParticleSystemProps::PostLoadProperties()
{
	//we need to find the largest size that particles can get in this system
	m_fMaxParticlePadding = 0.0f;
	for(uint32 nCurrKey = 0; nCurrKey < m_ffcParticleScale.GetNumKeys(); nCurrKey++)
	{
		m_fMaxParticlePadding = LTMAX(m_fMaxParticlePadding, m_ffcParticleScale.GetKey(nCurrKey));
	}

	//scale the particle padding so that the screen orientation and rotation of the particle are considered
	//and also the fact that the size is the full size and we want the half size
	 m_fMaxParticlePadding *= 0.5f * MATH_SQRT2;

	return CBaseFXProps::PostLoadProperties();
}
Exemple #13
0
void CAIWeaponMelee::HandleModelString( CAI* pAI, const CParsedMsg& cParsedMsg )
{
	static CParsedMsg::CToken s_cTok_RigidMeleeAttack("MELEEATTACK");

	// Insure the rigidbody melee attacks decrement the burstshot counter.  
	// If this isn't done, an AI will never go into aim.

	if ( cParsedMsg.GetArg(0) == s_cTok_RigidMeleeAttack )
	{
		m_nBurstShots = LTMAX( 0, m_nBurstShots - 1 ); 
	}
	else
	{
		DefaultHandleModelString(pAI, cParsedMsg );
	}
}
//allocators for a block of memory
void* CLTALoadOnlyAlloc::AllocateBlock(uint32 nSize)
{
	//see if we have enough room left in this block
	if(m_nMemLeft < nSize)
	{
		uint32 nBlockSize = LTMAX(m_nBlockSize, nSize);

		//we need to allocate the memory for a block. If the size is bigger than
		//a block, we need to allocate the block to be that big
		uint8* pMem;
		LT_MEM_TRACK_ALLOC(pMem = new uint8[nBlockSize],LT_MEM_TYPE_MISC);

		//check the allocation
		if(pMem == NULL)
			return NULL;

		//now allocate the block structure that will maintain the mem
		CLoadMemBlock* pNewBlock;
		LT_MEM_TRACK_ALLOC(pNewBlock = new CLoadMemBlock(pMem, m_pHead),LT_MEM_TYPE_MISC);

		//check the allocation
		if(pNewBlock == NULL)
		{
			delete [] pMem;
			return NULL;
		}

		m_pHead = pNewBlock;

		//update the block size
		m_nMemLeft = nBlockSize;
	}

	//ok, now we know that the allocation will fit inside of this block, so lets
	//go ahead and return the pointer and update our counts
	ASSERT(m_nMemLeft >= nSize);
	ASSERT(m_pHead);

	void* pRV = (void*)m_pHead->m_pCurrMemHead;

	//update our counts
	m_pHead->m_pCurrMemHead += nSize;
	m_nMemLeft -= nSize;

	return pRV;
}
Exemple #15
0
static void GeneratePolyGridFresnelAlphaAndCamera(const LTVector& vViewPos, CPolyGridBumpVertex* pVerts, LTPolyGrid* pGrid, uint32 nNumVerts)
{
	//we need to transform the camera position into our view space
	LTMatrix mInvWorldTrans;

	mInvWorldTrans.Identity();
	mInvWorldTrans.SetTranslation(-pGrid->GetPos());

	LTMatrix mOrientation;
	pGrid->m_Rotation.ConvertToMatrix(mOrientation);

	mInvWorldTrans = mOrientation * mInvWorldTrans;

	LTVector vCameraPos = mInvWorldTrans * vViewPos;

	//now generate the internals of the polygrid
	CPolyGridBumpVertex* pCurrVert	= pVerts;
	CPolyGridBumpVertex* pEnd		= pCurrVert + nNumVerts;

	//determine the fresnel table that we are going to be using
	const CFresnelTable* pTable = g_FresnelCache.GetTable(LTMAX(1.0003f, pGrid->m_fFresnelVolumeIOR), pGrid->m_fBaseReflection);

	//use a vector from the camera to the center of the grid to base our approximations off of. The further
	//we get to the edges the more likely this error will be, but it is better than another sqrt per vert
	LTVector vToPGPt;

	while(pCurrVert < pEnd)
	{
		//the correct but slow way, so only do it every once in a while
		//if((pCurrVert - g_TriVertList) % 4 == 0)
		{
			vToPGPt = vCameraPos - pCurrVert->m_Vec;
			vToPGPt.Normalize();
		}

		pCurrVert->m_fEyeX = vToPGPt.x;
		pCurrVert->m_fEyeY = vToPGPt.y;
		pCurrVert->m_fEyeZ = vToPGPt.z;

		pCurrVert->m_nColor |= pTable->GetValue(vToPGPt.Dot(pCurrVert->m_vBasisUp));
		++pCurrVert;
	}
}
Exemple #16
0
LTBOOL ClientLightFX::Init()
{
 	VEC_DIVSCALAR(m_vColor, m_vColor, 511.0f);

	m_fStartTime = g_pLTServer->GetTime();

	m_fIntensityPhase = DEG2RAD(m_fIntensityPhase);
	m_fRadiusPhase = DEG2RAD(m_fRadiusPhase);

	if (m_bStartOn)
	{
        uint32 dwUsrFlags = g_pLTServer->GetObjectUserFlags(m_hObject);
		dwUsrFlags |= USRFLG_VISIBLE;
		g_pLTServer->SetObjectUserFlags(m_hObject, dwUsrFlags);
	}

	// Set the dims to something to avoid situations where the object is considered
	// invisible even though it's visible.
    float fDims = LTMAX(m_fRadiusMin, 5.0f);
	LTVector vDims(fDims, fDims, fDims);
	g_pLTServer->SetObjectDims(m_hObject, &vDims);

    return LTTRUE;
}
COccludee::CreatePtsOutline<NUM_PTS>::CreatePtsOutline(LTVector aPts[NUM_PTS], COutline *pResult)
{
	if (g_CV_DebugRBOldOccludeeShape.m_Val)
	{
		// Get the screen-space bounds
	  	float fMinScrX, fMinScrY, fMaxScrX, fMaxScrY;
	  	
	  	fMinScrX = fMaxScrX = aPts[0].x;
	  	fMinScrY = fMaxScrY = aPts[0].y;
	  
	  	for (uint32 nBoundsLoop = 1; nBoundsLoop < 8; ++nBoundsLoop)
	  	{
	  		fMinScrX = LTMIN(fMinScrX, aPts[nBoundsLoop].x);
	  		fMinScrY = LTMIN(fMinScrY, aPts[nBoundsLoop].y);
	  		fMaxScrX = LTMAX(fMaxScrX, aPts[nBoundsLoop].x);
	 		fMaxScrY = LTMAX(fMaxScrY, aPts[nBoundsLoop].y);
	  	}
	  
	  	pResult->push_back(LTVector(fMinScrX, fMinScrY, 0.0f));
	  	pResult->push_back(LTVector(fMaxScrX, fMinScrY, 0.0f));
	  	pResult->push_back(LTVector(fMaxScrX, fMaxScrY, 0.0f));
	  	pResult->push_back(LTVector(fMinScrX, fMaxScrY, 0.0f));
		return;
	}

	// Find the lowest point, and put it in aPts[0]
	for (uint32 nFindLowestLoop = 1; nFindLowestLoop < NUM_PTS; ++nFindLowestLoop)
	{
		if (aPts[nFindLowestLoop].y > aPts[0].y)
			std::swap(aPts[nFindLowestLoop], aPts[0]);
	}

	std::sort(aPts + 1, aPts + NUM_PTS, FPointOrder(aPts[0]));

	bool bBadOutline = false;

	// Get the actual hull
	LTVector *aStack[NUM_PTS];
	LTVector **pStackTop = &aStack[2];
	aStack[0] = &aPts[NUM_PTS - 1];
	aStack[1] = &aPts[0];
	uint32 i = 1;
	while (i < (NUM_PTS - 1))
	{
		LTVector *p1 = pStackTop[-2];
		LTVector *p2 = pStackTop[-1];
		float fArea = (p2->x - p1->x) * (aPts[i].y - p1->y) - (aPts[i].x - p1->x) * (p2->y - p1->y);
		if (fArea > -0.001f) 
		{
			*pStackTop = &aPts[i];
			++pStackTop;
			++i;
		}
		else
		{
			--pStackTop;
			if (pStackTop <= &aStack[2])
			{
				// Note : This should never happen, but if it does, we still need to provide
				// some sort of output.
				bBadOutline = true; 
				break;
			}
		}
	}

	// If the algorithm failed, go back to using a screen-extents quad :(
	if (bBadOutline)
	{
	  	// Get the screen-space bounds
	  	float fMinScrX, fMinScrY, fMaxScrX, fMaxScrY;
	  	
	  	fMinScrX = fMaxScrX = aPts[0].x;
	  	fMinScrY = fMaxScrY = aPts[0].y;
	  
	  	for (uint32 nBoundsLoop = 1; nBoundsLoop < 8; ++nBoundsLoop)
	  	{
	  		fMinScrX = LTMIN(fMinScrX, aPts[nBoundsLoop].x);
	  		fMinScrY = LTMIN(fMinScrY, aPts[nBoundsLoop].y);
	  		fMaxScrX = LTMAX(fMaxScrX, aPts[nBoundsLoop].x);
			// We already know aPts[0].y is the max..
	 		// fMaxScrY = LTMAX(fMaxScrY, aPts[nBoundsLoop].y);
	  	}
	  
	  	pResult->push_back(LTVector(fMinScrX, fMinScrY, 0.0f));
	  	pResult->push_back(LTVector(fMaxScrX, fMinScrY, 0.0f));
	  	pResult->push_back(LTVector(fMaxScrX, fMaxScrY, 0.0f));
	  	pResult->push_back(LTVector(fMinScrX, fMaxScrY, 0.0f));
		return;
	}

	// Convert the stack into an outline
	LTVector **pStackIterator = aStack;
	for (; pStackIterator != pStackTop; ++pStackIterator)
	{
		pResult->push_back(**pStackIterator);
	}
}
Exemple #18
0
void CTriggerFX::CalcLocalClientDistance()
{
	m_fDistPercent = -1.0f;

	// Don't do anything if the trigger is locked or our distances are too small..

	if( m_cs.bLocked || (m_cs.fHUDAlwaysOnDist <= 0.0f && m_cs.fHUDLookAtDist <= 0.0f) )
		return;

	// See if the player is within the trigger...

	LTVector vTrigPos;
	g_pLTClient->GetObjectPos( m_hServerObject, &vTrigPos );
	g_pLTClient->SetObjectPos( m_hDimsObject, vTrigPos );

	HLOCALOBJ	hPlayerObj = g_pLTClient->GetClientObject();
	LTVector	vPlayerPos, vPlayerDims;

	g_pLTClient->GetObjectPos( hPlayerObj, &vPlayerPos );

	// Make sure we are within the display radius...
	
	float	fMaxRadius = LTMAX( m_cs.fHUDAlwaysOnDist, m_cs.fHUDLookAtDist );
	float	fDistSqr = vTrigPos.DistSqr( vPlayerPos );
	bool	bWithinLookAtDist = (fDistSqr < m_cs.fHUDLookAtDist * m_cs.fHUDLookAtDist);
	bool	bWithinAlwaysOnDist = (fDistSqr < m_cs.fHUDAlwaysOnDist * m_cs.fHUDAlwaysOnDist);

	if( !bWithinLookAtDist && !bWithinAlwaysOnDist )
	{
		// We are not close enough...
		
		m_bWithinIndicatorRadius = false;
		return;
	}

	m_bWithinIndicatorRadius = true;

	g_pPhysicsLT->GetObjectDims( hPlayerObj, &vPlayerDims );

	LTVector vTrigMin = vTrigPos - m_cs.vDims;
	LTVector vTrigMax = vTrigPos + m_cs.vDims;
	LTVector vPlayerMin = vPlayerPos - vPlayerDims;
	LTVector vPlayerMax = vPlayerPos + vPlayerDims;

	// Check if we are within the height of the trigger...

	bool bWithinHeight =false;
	if( vPlayerMax.y > vTrigMin.y && vPlayerMin.y < vTrigMax.y )
		bWithinHeight = true;
	
	// See if we are inside the trigger at all...

	if( bWithinHeight && (BoxesIntersect( vTrigMin, vTrigMax, vPlayerMin, vPlayerMax ) || bWithinAlwaysOnDist))
	{
		m_fDistPercent = 1.0f;
	}
	else
	{
		// We are within the height of the trigger, show how far from it we are...

		float fMinDist = (vPlayerDims.x + vPlayerDims.z) * 0.5f;
		float fMaxDist = 100000.0f;

		LTVector vDir;

		if( bWithinAlwaysOnDist )
		{
			vDir = vTrigPos - vPlayerPos;
			vDir.Normalize();
		}
		else
		{
			LTRotation const& rRot = g_pPlayerMgr->GetPlayerCamera()->GetCameraRotation( );
			vDir = rRot.Forward();
		}

		IntersectQuery IQuery;
		IntersectInfo IInfo;

		IQuery.m_From		= vPlayerPos + (vDir * fMinDist);
		IQuery.m_To			= IQuery.m_From + (vDir * fMaxDist);
		IQuery.m_Flags		= INTERSECT_OBJECTS | INTERSECT_HPOLY | IGNORE_NONSOLID;
				
		// We need to recieve rayhits for this intersect call...

		g_pCommonLT->SetObjectFlags( m_hDimsObject, OFT_Flags, FLAG_RAYHIT, FLAG_RAYHIT );

		if( g_pLTClient->IntersectSegment( IQuery, &IInfo ))
		{
			if( IInfo.m_hObject == m_hDimsObject )
			{
				IInfo.m_Point.y = vPlayerPos.y;
				float fDist = vPlayerPos.Dist( IInfo.m_Point );
				m_fDistPercent = 1.0f - (fDist / fMaxRadius);
			}
		}

		// No more rayhits...
		
		g_pCommonLT->SetObjectFlags( m_hDimsObject, OFT_Flags, 0, FLAG_RAYHIT );
	}
}
//handles actually creating the associated texture given the list of glyphs that have their positioning
//and dimension information filled out
static bool CreateGlyphTexture(CTextureStringGlyph* pGlyphs, uint32 nNumGlyphs, HDC hDC, 
							   uint8* pDibBits, const BITMAPINFO& bmi, const TEXTMETRICW& textMetric,
							   HTEXTURE & hTexture)
{
	//run through and build up the maximum extents from the glyph black boxes
	uint32 nMaxWidth = 0;
	uint32 nMaxHeight = 0;

	for(uint32 nCurrGlyph = 0; nCurrGlyph < nNumGlyphs; nCurrGlyph++)
	{
		nMaxWidth = LTMAX(nMaxWidth, pGlyphs[nCurrGlyph].m_rBlackBox.GetWidth());
		nMaxHeight = LTMAX(nMaxHeight, pGlyphs[nCurrGlyph].m_rBlackBox.GetHeight());
	}

	//the spacing added to each glyph dimension
	const uint32 knCharSpacing = 2;

	//determine the size of this texture that we will need
	SIZE sizeTexture;
	if(!GetTextureSizeFromCharSizes(pGlyphs, nNumGlyphs, nMaxWidth, nMaxHeight, knCharSpacing, sizeTexture ))
	{
		//the font is to big to fit into a texture, we must fail
		return false;
	}



	// This will be filled in with the pixel data of the font.
	uint8* pImageData = NULL;

	//lock down our texture for writing
//	uint32 nWidth, nHeight, 
	uint32 nPitch;
//	uint8* pImageData;

	// Calculate the pixeldata pitch.
	nPitch = WIDTHBYTES( 16, sizeTexture.cx );
	int nPixelDataSize = nPitch * sizeTexture.cy;


	// Allocate an array to copy the font into.
	LT_MEM_TRACK_ALLOC( pImageData = new uint8[ nPixelDataSize ],LT_MEM_TYPE_UI );
	if ( pImageData == NULL )
	{
		DEBUG_PRINT( 1, ("CreateGlyphTexture:  Failed to allocate pixeldata." ));
		return false;
	}


	// set the whole font texture to pure white, with alpha of 0.  When
	// we copy the glyph from the bitmap to the pixeldata, we just
	// affect the alpha, which allows the font to antialias with any color.
	uint16* pData = (uint16*)pImageData;
	uint16* pPixelDataEnd = (uint16*)(pImageData + nPixelDataSize);
	while( pData < pPixelDataEnd )
	{
		pData[0] = 0x0FFF;
		pData++;
	}

	// This will hold the UV offset for the font texture.
	POINT sizeOffset;
	sizeOffset.x = 0;
	sizeOffset.y = 0;

	//success flag
	bool bSuccess = true;

	// Iterate over the characters.
	for( uint32 nGlyph = 0; nGlyph < nNumGlyphs; nGlyph++ )
	{
		// Clear the bitmap out for this glyph if it's not the first.  The first glyph
		// gets a brand new bitmap to write on.
		if( nGlyph != 0 )
		{
			memset( pDibBits, 0, bmi.bmiHeader.biSizeImage );
		}

		//cache the glyph we will be operating on
		CTextureStringGlyph& Glyph = pGlyphs[nGlyph];

		// Get this character's width.
		wchar_t cChar = Glyph.m_cGlyph;
		int nCharWidthWithSpacing = Glyph.m_rBlackBox.GetWidth() + knCharSpacing;

		// See if this width fits in the current row.
		int nCharRightSide = sizeOffset.x + nCharWidthWithSpacing;
		if( nCharRightSide >= sizeTexture.cx )
		{
			// Doesn't fit in the current row.  Go to the next row.
			sizeOffset.x = 0;
			sizeOffset.y += nMaxHeight + knCharSpacing;
		}

		// Write the glyph out so that the smallest box around the glyph starts
		// at the bitmap's 0,0.
		POINT ptTextOutOffset;
		ptTextOutOffset.x = -Glyph.m_rBlackBox.Left();
		ptTextOutOffset.y = -Glyph.m_rBlackBox.Top();

		// Write out the glyph.  We can't use GetGlyphOutline to get the bitmap since
		// it has a lot of corruption bugs with it.  

		if( !TextOutW( hDC, ptTextOutOffset.x, ptTextOutOffset.y, &cChar, 1 ))
		{
			bSuccess = false;
			break;
		}

		// Make sure the GDI is done with our bitmap.
		GdiFlush( );

		LTRect2n rCopyTo;
		rCopyTo.Left()		= sizeOffset.x + (knCharSpacing / 2);
		rCopyTo.Top()		= sizeOffset.y + (knCharSpacing / 2);
		rCopyTo.Right()		= rCopyTo.Left() + Glyph.m_rBlackBox.GetWidth();
		rCopyTo.Bottom()	= rCopyTo.Top() + Glyph.m_rBlackBox.GetHeight();

		// Find pointer to region within the pixel data to copy the glyph
		// and copy the glyph into the pixeldata.
		CopyGlyphBitmapToPixelData( bmi, pDibBits, rCopyTo, textMetric, pImageData, sizeTexture );

		//setup the UV coordinates for this glyph
		Glyph.m_fU = (float)(rCopyTo.Left() + 0.5f) / (float)sizeTexture.cx;
		Glyph.m_fV = (float)(rCopyTo.Top() + 0.5f) / (float)sizeTexture.cy;
		Glyph.m_fTexWidth  = rCopyTo.GetWidth() / (float)sizeTexture.cx;
		Glyph.m_fTexHeight = rCopyTo.GetHeight() / (float)sizeTexture.cy;

		// Update to the next offset for the next character.
		sizeOffset.x += nCharWidthWithSpacing;
	}


	//if we succeeded in rendering all the characters, convert it to a texture
	if(bSuccess)
	{

		// turn pixeldata into a texture
		g_pILTTextureMgr->CreateTextureFromData(
				hTexture, 
				TEXTURETYPE_ARGB4444,
				TEXTUREFLAG_PREFER16BIT | TEXTUREFLAG_PREFER4444,
				pImageData, 
				sizeTexture.cx,
				sizeTexture.cy );

		if( !hTexture )
		{
			DEBUG_PRINT( 1, ("CreateGlyphTexture:  Couldn't create texture." ));
			bSuccess = false;
		}

	}

	// Don't need pixel data any more.
	if( pImageData )
	{
		delete[] pImageData;
		pImageData = NULL;
	}


	//return the success code
	return bSuccess;
}
Exemple #20
0
//given an OBB specified with a transform and half dimensions, this will approximate how much is submerged
//and distribute that force to the appropriate points on the box
static bool ApplyOBBBuoyancy(const LTRigidTransform& tTransform, const LTVector& vHalfDims,
							 const LTPlane& WSPlane, float& fVolume, LTVector& vApplyAt, float& fSurfaceArea)
{
	//structure representing cached information about one of the vertices of an OBB
	struct SOBBInfo
	{
		LTVector	m_vPos;
		float		m_fDist;
		bool		m_bBackSide;
	};

	//determine a translation for the plane, so that our volume determination can
	//be relative to this point
	LTVector vPlaneTranslation = tTransform.m_vPos - WSPlane.Normal() * (WSPlane.DistTo(tTransform.m_vPos));

	//determine the center of the OBB, but do so in the translation of the plane
	LTVector vTranslatedCenter = tTransform.m_vPos - vPlaneTranslation;

	//determine the axis of the main transform
	LTVector vRight, vUp, vForward;
	tTransform.m_rRot.GetVectors(vRight, vUp, vForward);

	//scale the vectors based upon the half dimensions
	vRight		*= vHalfDims.x;
	vUp			*= vHalfDims.y;
	vForward	*= vHalfDims.z;

	//generate the eight vertices of the OBB
	SOBBInfo OBB[8];
	OBB[0].m_vPos = vTranslatedCenter + vRight + vUp + vForward;
	OBB[1].m_vPos = vTranslatedCenter + vRight + vUp - vForward;
	OBB[2].m_vPos = vTranslatedCenter + vRight - vUp - vForward;
	OBB[3].m_vPos = vTranslatedCenter + vRight - vUp + vForward;
	OBB[4].m_vPos = vTranslatedCenter - vRight + vUp + vForward;
	OBB[5].m_vPos = vTranslatedCenter - vRight + vUp - vForward;
	OBB[6].m_vPos = vTranslatedCenter - vRight - vUp - vForward;
	OBB[7].m_vPos = vTranslatedCenter - vRight - vUp + vForward;

	//now run through and generate the distances to each point on the OBB

	//also determine the minimum and maximum extents so we can early out of the more expensive
	//computations
	OBB[0].m_fDist		= OBB[0].m_vPos.Dot(WSPlane.Normal());
	OBB[0].m_bBackSide	= (OBB[0].m_fDist < 0.0f);

	float fMin = OBB[0].m_fDist;
	float fMax = OBB[0].m_fDist;

	for(uint32 nCurrPt = 1; nCurrPt < 8; nCurrPt++)
	{
		//since we already translated the OBB as if the plane was at the origin,
		//we can just do a dot with the normal to get the distance
		OBB[nCurrPt].m_fDist		= OBB[nCurrPt].m_vPos.Dot(WSPlane.Normal());
		OBB[nCurrPt].m_bBackSide	= (OBB[nCurrPt].m_fDist < 0.0f);

		fMin = LTMIN(fMin, OBB[nCurrPt].m_fDist);
		fMax = LTMAX(fMax, OBB[nCurrPt].m_fDist);
	}

	//handle early out conditions
	if(fMin >= 0.0f)
	{
		//completely out of the water, apply no forces
		return false;
	}

	if(fMax <= 0.0f)
	{
		//completely beneath the water, find the volume (*8 is for the *2 that is on each dimension)
		fVolume	 = vHalfDims.x * vHalfDims.y * vHalfDims.z * 8.0f;
		vApplyAt = tTransform.m_vPos;
		fSurfaceArea = 2.0f * (vHalfDims.x * vHalfDims.y + vHalfDims.y * vHalfDims.z + vHalfDims.z * vHalfDims.z);
		return true;
	}

	//we are spanning the water, we need to do the expensive tests to determine how much is underwater
	//and where exactly is the center of geometry

	//we now know all the distances, apply the clipping algorithm to each face in turn, the winding order
	//of this is very important. This table was derived by taking a cube with vertices labeled, and laying
	//it out like a laid out cube map, mapping the vertices, and then entering the vertices in a consistant
	//winding order. The first vertex is repeated to avoid having to do any wrapping around.
	static const uint32 knFaces[]	= {	0, 1, 2, 3, 0,
										7, 6, 5, 4, 7,
										0, 3, 7, 4, 0,
										6, 2, 1, 5, 6,
										1, 0, 4, 5, 1,
										7, 3, 2, 6, 7	};

	//the vertices we will clip into (max we can have is 5, since we have 4 source and 1 clip plane)
	LTVector vClipped[5];

	//the computed volume we have displaced
	float fSubmergedVolume = 0.0f;

	//the computed center of geometry
	LTVector vCenterOfGeom(0.0f, 0.0f, 0.0f);
	uint32 nNumGeomContributers = 0;

	//the surface area that is submerged
	fSurfaceArea = 0.0f;

	//now accumulate the data for each face
	for(uint32 nCurrFace = 0; nCurrFace < LTARRAYSIZE(knFaces); nCurrFace += 5)
	{
		//the current output vertex
		uint32 nOutputVert = 0;

		for(uint32 nCurrEdge = 0; nCurrEdge < 4; nCurrEdge++)
		{
			const SOBBInfo& Vert1 = OBB[knFaces[nCurrFace + nCurrEdge]];
			const SOBBInfo& Vert2 = OBB[knFaces[nCurrFace + nCurrEdge + 1]];

			//handle clipping
			if(Vert1.m_bBackSide == Vert2.m_bBackSide)
			{
				//both on the same side, handle case of both behind, in which case we add vert 1,
				//or case of both in front, in which case we add none
				if(Vert1.m_bBackSide)
				{
					vClipped[nOutputVert] = Vert1.m_vPos;
					nOutputVert++;
				}
			}
			else
			{
				if(Vert1.m_bBackSide)
				{
					vClipped[nOutputVert] = Vert1.m_vPos;
					nOutputVert++;
				}


				//we need to clip it and optionally add the first one if we are going out
				float fPercent = Vert1.m_fDist / (Vert1.m_fDist - Vert2.m_fDist);
				LTVector vClipVert = Vert1.m_vPos + (Vert2.m_vPos - Vert1.m_vPos) * fPercent;

				vClipped[nOutputVert] = vClipVert;
				nOutputVert++;
			}
		}

		//sanity check that we didn't overflow our array of clipped vertices
		LTASSERT(nOutputVert <= LTARRAYSIZE(vClipped), "Error: Overflowed clipped vertex array");

		//we now have our clipped polygon, bail if it is invalid, otherwise accumulate the volume and
		//the center of gravity
		if(nOutputVert < 3)
			continue;

		vCenterOfGeom += vClipped[0];
		vCenterOfGeom += vClipped[1];
		nNumGeomContributers += nOutputVert;

		for(uint32 nClipVert = 2; nClipVert < nOutputVert; nClipVert++)
		{
			//determine the volume of the parellpiped formed by this region (we'll do the divide
			//by 6 at a later time)
			fSubmergedVolume += vClipped[0].Dot(vClipped[nClipVert - 1].Cross(vClipped[nClipVert]));

			//accumulate the surface area (this is the area of a rectangle, so it is actually double
			//what we want, but we reduce that down below)
			fSurfaceArea += (vClipped[nClipVert - 1] - vClipped[0]).Cross(vClipped[nClipVert] - vClipped[0]).Mag();

			//and accumulate the center of geometry as simply the vertices
			vCenterOfGeom += vClipped[nClipVert];			
		}
	}

	//we should have positive volume and surface area
	LTASSERT(fSubmergedVolume >= 0.0f, "Warning: Found negative submerged volumes. Check handedness of cross product?");
	LTASSERT(fSurfaceArea >= 0.0f, "Warning: Found negative surface area. Check handedness of cross product?");
	LTASSERT(nNumGeomContributers > 0, "Error: Found submerged OBB but with no vertices beneath the plane");

	//apply the scales to the accumulated values that we were avoiding doing in the loop

	//1/6th the total volume since we were summing parallelpipeds, and which is 6 times what we want
	fSubmergedVolume /= 6.0f;

	//the surface area is double what it needs to be at this point since we were accumulating 
	//rectangle areas
	fSurfaceArea *= 0.5f;

	//the center of geometry needs to be averaged out on the number of points 
	//and move the center of geometry out of the plane translation space
	vCenterOfGeom = vCenterOfGeom / (float)nNumGeomContributers + vPlaneTranslation;

	//for the application of the point, we don't directly use the center of geometry as that produces
	//WAY too much noise and rapid fluctuations, so instead we do a weighting between that, and the
	//center of mass for the shape, producing much more stable results
	static const float kfCenterOfMassWeight = 0.7f;
	vApplyAt = vCenterOfGeom.Lerp(tTransform.m_vPos, kfCenterOfMassWeight);
	fVolume	 = fSubmergedVolume;

	return true;
}
Exemple #21
0
bool CAIWeaponAbstract::GetShootPosition( CAI* pAI, AimContext& Context,LTVector& outvShootPos )
{
	ASSERT(pAI);

	// Cineractive firing.

	if( m_eFiringState == kAIFiringState_CineFiring )
	{
		LTVector vDir = pAI->GetWeaponForward( m_pWeapon );
		vDir.Normalize();
		outvShootPos = pAI->GetPosition() + ( vDir * 5000.f );
		return true;
	}

	// If perfect accuracy is enabled, we are done.

	if( pAI->GetAIBlackBoard()->GetBBPerfectAccuracy() )
	{
		HOBJECT hTarget = pAI->GetAIBlackBoard()->GetBBTargetObject();
		g_pLTServer->GetObjectPos( hTarget, &outvShootPos );
		return true;
	}

	// Initially aim for the target's visible position.
	// If the target is not visible at all, use his actual position.
	// This is a failsafe for AI shooting at the origin if they have
	// not yet seen the target ever.

	LTVector vVisiblePosition = pAI->GetTarget()->GetVisiblePosition();
	if( !pAI->GetAIBlackBoard()->GetBBTargetVisibleFromWeapon() )
	{
		vVisiblePosition = pAI->GetAIBlackBoard()->GetBBTargetPosition();
	}
	outvShootPos = vVisiblePosition;

	// If Target is within the FullAccuracy radius, we are done.

	if( pAI->GetTarget()->GetTargetDistSqr() < pAI->GetFullAccuracyRadiusSqr() )
	{
		return true;
	}

	// The following code forces the AI to intenionally miss every x
	// number of shots, depending on their accuracy. This gives players
	// the excitement of getting shot at without killing them too fast.

	// For example, if accuracy = 0.5 there will be a guaranteed sequence
	// of HIT, MISS, HIT, MISS, ...
	// If accuracy = 0.25, then HIT, MISS, MISS, MISS, HIT, MISS, MISS, MISS, etc.
	// If accuracy = 0.75, then HIT, HIT, HIT, MISS, HIT, HIT, HIT, MISS, etc.

	// Calculate the ratio of hits to misses based on the current 
	// accuracy.  This needs to be recalculated for every shot, 
	// because accuracy may change at any time.

	float fAccuracy = m_flWeaponContextInaccuracyScalar * pAI->GetAccuracy();
	if( fAccuracy <= 0.f )
	{
		Context.m_cMisses = 1;
		Context.m_cHits = 0;
	}
	else if( fAccuracy >= 1.f )
	{
		Context.m_cMisses = 0;
		Context.m_cHits = 1;
	}
	else if( fAccuracy < 0.5f )
	{
		Context.m_cMisses = (uint32)( ( ( 1.f - fAccuracy ) / fAccuracy ) + 0.5f );
		Context.m_cHits = 1;
	}
	else 
	{
		Context.m_cMisses = 1;
		Context.m_cHits = (uint32)( ( fAccuracy / ( 1.f - fAccuracy ) ) + 0.5f );
	}

	// If we have met or exceeded the required number of misses, 
	// reset the counters.

	if( Context.m_iMiss >= Context.m_cMisses )
	{
		Context.m_iHit = 0;
		Context.m_iMiss = 0;
	}

	//
	// First take care of hits, then take care of misses.
	//

	// Hit.

	if( Context.m_iHit < Context.m_cHits )
	{
		++Context.m_iHit;

		// Blind fire.

		if( pAI->GetAIBlackBoard()->GetBBBlindFire() )
		{
			GetBlindFirePosition( pAI, outvShootPos, !FIRE_MISS );
			return false;
		}

		// Suppression fire at last known pos.

		if( pAI->GetAIBlackBoard()->GetBBSuppressionFire() )
		{
			HOBJECT hTarget = pAI->GetAIBlackBoard()->GetBBTargetObject();

			CAIWMFact factQuery;
			factQuery.SetFactType( kFact_Character );
			factQuery.SetTargetObject( hTarget );
			CAIWMFact* pFact = pAI->GetAIWorkingMemory()->FindWMFact( factQuery );
			if( pFact )
			{
				outvShootPos = pFact->GetPos();
			}
		}

		// Default fire.

		// If target has started moving or change directions recently,
		// factor in some inaccuracy.

		float fInnaccuracy = LTMAX( 0.f, pAI->GetTarget()->GetCurMovementInaccuracy() );
		if( fInnaccuracy > 0.f )
		{
			LTVector vShootOffset = LTVector(	GetRandom( -fInnaccuracy, fInnaccuracy ),
				GetRandom( -fInnaccuracy * 0.5f, fInnaccuracy * 0.5f ),
				GetRandom( -fInnaccuracy, fInnaccuracy ) );
			vShootOffset.Normalize();

			outvShootPos += vShootOffset * 100.0f;
		}
		
		return true;
	}

	// Miss.

	else 
	{
		++Context.m_iMiss;

		// Blind fire.

		if( pAI->GetAIBlackBoard()->GetBBBlindFire() )
		{
			GetBlindFirePosition( pAI, outvShootPos, FIRE_MISS );
			return false;
		}

		// Default fire.

		HOBJECT hTarget = pAI->GetAIBlackBoard()->GetBBTargetObject();
		if( !IsCharacter( hTarget ) )
		{
			return false;
		}

		CCharacter* pChar = (CCharacter*)g_pLTServer->HandleToObject( hTarget );
		if( !pChar )
		{
			return false;
		}

		// Intentionally shoot a little short of the target.

		LTVector vPos = pAI->GetAIBlackBoard()->GetBBTargetPosition();;

		// Suppression fire at last known pos.

		if( pAI->GetAIBlackBoard()->GetBBSuppressionFire() )
		{
			CAIWMFact factQuery;
			factQuery.SetFactType( kFact_Character );
			factQuery.SetTargetObject( hTarget );
			CAIWMFact* pFact = pAI->GetAIWorkingMemory()->FindWMFact( factQuery );
			if( pFact )
			{
				vPos = pFact->GetPos();
			}
		}

		float fDist = sqrt( pAI->GetTarget()->GetTargetDistSqr() );

		float fRadius = pChar->GetRadius();

		float fRand = GetRandom( 0.f, 1.f );
		fDist -= ( fRadius * 2.f ) + ( fRand * pAI->GetAccuracyMissPerturb() );

		// Calculate a position to the right or left of the target.

		LTVector vDir = vPos - pAI->GetPosition();
		if( vDir != LTVector::GetIdentity() )
		{
			vDir.Normalize();
		}

		vPos = pAI->GetPosition() + ( vDir * fDist );

		LTVector vRight = vDir.Cross( LTVector( 0.f, 1.f, 0.f ) );

		fRand = GetRandom( 0.f, 1.f );
		float fPerturb = ( ( pAI->GetAccuracyMissPerturb() * 2.f ) * fRand ) - pAI->GetAccuracyMissPerturb();
		vRight *= fPerturb;

		// Apply the offset to miss the target.

		outvShootPos = vPos + vRight;

		// Force bullets to land in front of the target, on the floor.

		if( m_pAIWeaponRecord->bForceMissToFloor )
		{
			float fFloor = pAI->GetAIBlackBoard()->GetBBTargetPosition().y;
			fFloor -= pAI->GetAIBlackBoard()->GetBBTargetDims().y;
			outvShootPos.y = fFloor;
		}

		return false;
	}

	return false;
}
bool CParticleSystemProps::LoadProperty(ILTInStream* pStream, const char* pszName, const char* pszStringTable, const uint8* pCurveData)
{
	if( LTStrIEquals( pszName, "Material" ) )
	{
		m_pszMaterial = CFxProp_String::Load(pStream, pszStringTable);
	}
	else if (LTStrIEquals( pszName, "NumImages" ) )
	{
		m_nNumImages = (uint32)CFxProp_Int::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "PlayerView" ) )
	{
		m_bPlayerView = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "InSky" ))
	{
		m_eInSky = (EFXSkySetting)CFxProp_Enum::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "ParticleColor" ) )
	{
		m_cfcParticleColor.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "ParticleScale" ) )
	{
		m_ffcParticleScale.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "ParticlesPerEmission" ) )
	{
		m_nfcParticlesPerEmission.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "MinParticleLifeSpan" ) )
	{
		m_ffcMinLifetime.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "MaxParticleLifeSpan" ) )
	{	
		m_ffcMaxLifetime.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "AdditionalAcceleration") )
	{
		m_vfcAcceleration.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "GravityScale") )
	{
		m_ffcGravityScale.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "EmissionInterval" ) )
	{
		m_fEmissionInterval = CFxProp_Float::Load(pStream);
		//if this is above zero, make sure that it is set to a reasonable limit to prevent
		//effects from performing way too many emissions
		if(m_fEmissionInterval > 0.0f)
			m_fEmissionInterval = LTMAX(m_fEmissionInterval, 0.01f);
	}
	else if( LTStrIEquals( pszName, "GroupCreationInterval" ) )
	{
		m_fGroupCreationInterval = CFxProp_Float::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "EmissionDir" ) )
	{
		m_vEmissionDir = CFxProp_Vector::Load(pStream);	

		//handle the case of if an artist cleared it. They aren't supposed to, but happens occasionally
		if(m_vEmissionDir.NearlyEquals(LTVector::GetIdentity(), 0.01f))
			m_vEmissionDir.Init(0.0f, 1.0f, 0.0f);

		m_vEmissionDir.Normalize();

		// Get the perpindicular vectors to this plane
		FindPerps(m_vEmissionDir, m_vEmissionPerp1, m_vEmissionPerp2);
	}
	else if( LTStrIEquals( pszName, "EmissionOffset" ) )
	{
		m_vfcEmissionOffset.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "EmissionDims" ) )
	{
		m_vfcEmissionDims.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "EmissionMinRadius" ) )
	{
		m_ffcMinRadius.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "EmissionMaxRadius" ) )
	{
		m_ffcMaxRadius.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "MinParticleVelocity" ) )
	{
		m_vfcMinVelocity.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "MaxParticleVelocity" ) )
	{
		m_vfcMaxVelocity.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "EmissionType" ) )
	{
		m_eEmissionType = (ePSEmissionType)CFxProp_Enum::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "PercentToBounce" ) )
	{
		m_ffcPercentToBounce.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "RotateParticles" ) )
	{
		m_bRotate = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "MoveParticlesWithSystem" ) )
	{
		m_bObjectSpace = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "MinAngularVelocity" ) )
	{
		m_fMinAngularVelocity = MATH_DEGREES_TO_RADIANS(CFxProp_Float::Load(pStream));
	}
	else if( LTStrIEquals( pszName, "MaxAngularVelocity" ) )
	{
		m_fMaxAngularVelocity = MATH_DEGREES_TO_RADIANS(CFxProp_Float::Load(pStream));
	}
	else if( LTStrIEquals( pszName, "Streak") )
	{
		m_bStreak = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "StreakScale") )
	{
		m_fStreakScale = CFxProp_Float::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "Drag" ) )
	{
		m_ffcDrag.Load(pStream, pCurveData);
	}
	else if( LTStrIEquals( pszName, "BounceStrength" ) )
	{
		m_fBounceStrength = LTMAX(CFxProp_Float::Load(pStream), 0.0f);
	}
	else if( LTStrIEquals( pszName, "Solid" ) )
	{
		m_bSolid = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "TranslucentLight" ) )
	{
		m_bTranslucentLight = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "EnableBounceScale" ) )
	{
		m_bEnableBounceScale = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "VelocityType" ) )
	{
		m_eVelocityType = (ePSVelocityType)CFxProp_Enum::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "InfiniteLife" ) )
	{
		m_bInfiniteLife = CFxProp_EnumBool::Load(pStream);
	}
	else if( LTStrIEquals( pszName, "SplatEffect" ) )
	{
		m_pszSplatEffect = CFxProp_String::Load(pStream, pszStringTable);
	}
	else if( LTStrIEquals( pszName, "SplatPercent" ) )
	{
		m_ffcPercentToSplat.Load(pStream, pCurveData);
	}
	else
	{
		return CBaseFXProps::LoadProperty(pStream, pszName, pszStringTable, pCurveData);
	}

	return true;	
}
Exemple #23
0
//determines which polygon would be the best to split. Returns the integer index of that
//polygon. Note that it assumes that the list contain at least one element. In addition,
//it will return the number of polygons that lie on the same plane to make allocation
//of nodes easier, along with how many will end up on the front and back
static uint32 FindBestSplitPlane(PrePolyArray& PolyList, 
								 uint32& nNumLieOn, uint32& nNumFront, uint32& nNumBack)
{
	ASSERT(PolyList.GetSize() > 0);

	uint32 nBestIndex = 0;
	uint32 nBestScore = 0xFFFFFFFF;

	//plane information
	PVector vNormal;
	PReal fPlaneDist;

	//counts
	uint32 nFront, nBack, nSplit, nOn;

	//the poly classification
	uint32 nType;

	//the number of polygons to test
	uint32 nNumPolies = PolyList.GetSize();

	//the amount of polygons to skip over. Currently just do 2 * sqrt(n)
	//samples, so 20 polygons will be sampled for 100 nodes, etx. This
	//speeds up BSP generation time significantly.
	uint32 nPolyInc = LTMAX(1, (uint32)(nNumPolies / sqrt(4.0 * nNumPolies)));

	for(uint32 nCurrPoly = 0; nCurrPoly < nNumPolies; nCurrPoly += nPolyInc)
	{
		fPlaneDist	= PolyList[nCurrPoly]->Dist();
		vNormal		= PolyList[nCurrPoly]->Normal();

		//reset the info
		nFront = nBack = nSplit = nOn = 0;

		//now need to count up the split information
		for(uint32 nTestPoly = 0; nTestPoly < PolyList.GetSize(); nTestPoly++)
		{
			//skip over the current poly
			if(nTestPoly == nCurrPoly)
			{
				nOn++;
				continue;
			}

			nType = ClassifyPoly(vNormal, fPlaneDist, PolyList[nTestPoly]);

			if(nType == PLANE_SPAN)
				nSplit++;
			else if(nType == PLANE_FRONT)
				nFront++;
			else if(nType == PLANE_BACK)
				nBack++;
			else
				nOn++;
		}

		//ok, figure out the score
		uint32 nScore = CalcSplitScore(nSplit, nFront, nBack);

		if(nScore < nBestScore)
		{
			nBestScore	= nScore;
			nBestIndex	= nCurrPoly;
			nNumLieOn	= nOn;
			nNumFront	= nFront + nSplit;
			nNumBack	= nBack + nSplit;
		}
	}

	return nBestIndex;
}
static float TSOpMax(const float* pVars, TSOp oP1, TSOp oP2)
{
	return LTMAX(pVars[oP1], pVars[oP2]);
}
Exemple #25
0
void CHUDBar::Update(float x,float y, float fillW, float maxW, float h)
{
	float capW = h/2.0f;
	float barW = LTMAX(maxW - (2.0f * capW),0.0f);

	float barLeft = x + capW;
	float barRight = barLeft + barW;	

	if (fillW < capW)
	{
		//width of bar is lass than endcap width

		//draw partial left endcap
		float capUw = (fillW/capW) / 4.0f;
		DrawPrimSetXYWH(m_Poly[0],x,y,fillW,h);
		SetupQuadUVs(m_Poly[0], m_Bar, 0.0f,0.0f,capUw,0.5f);
		DrawPrimSetXYWH(m_Poly[3],x+fillW,y,(capW-fillW),h);
		SetupQuadUVs(m_Poly[3], m_Bar, capUw,0.5f,0.25f-capUw,0.5f);

		//hide full armor bars and right endcap
		DrawPrimSetXYWH(m_Poly[1],-1.0f,-1.0f,0.0f,0.0f);
		DrawPrimSetXYWH(m_Poly[2],-1.0f,-1.0f,0.0f,0.0f);

		//draw empty armor bars and right endcap
		DrawPrimSetXYWH(m_Poly[4],barLeft,y, barW,h);
		DrawPrimSetXYWH(m_Poly[5],barRight,y, capW,h);
		SetupQuadUVs(m_Poly[5], m_Bar, 0.75f,0.5f,0.25f,0.5f);
	}
	else
	{
		//draw full left endcap
		DrawPrimSetXYWH(m_Poly[0],x,y,capW,h);
		SetupQuadUVs(m_Poly[0], m_Bar, 0.0f,0.0f,0.25f,0.5f);
		DrawPrimSetXYWH(m_Poly[3],-1.0f,-1.0f,0.0f,0.0f);
		SetupQuadUVs(m_Poly[3], m_Bar, 0.0f,0.5f,0.25f,0.5f);

		if (fillW < (capW + barW) )
		{
			x += fillW;
			
			//draw partial bar
			DrawPrimSetXYWH(m_Poly[1],barLeft,y,x-barLeft,h);
			DrawPrimSetXYWH(m_Poly[4],x,y,barRight-x,h);

			//draw empty right endcap
			DrawPrimSetXYWH(m_Poly[2],-1.0f,-1.0f,0.0f,0.0f);
			DrawPrimSetXYWH(m_Poly[5],barRight,y, capW,h);
			SetupQuadUVs(m_Poly[5], m_Bar, 0.75f,0.5f,0.25f,0.5f);
		}
		else
		{
			//draw full bar
			DrawPrimSetXYWH(m_Poly[1],barLeft,y, barW,h);
			DrawPrimSetXYWH(m_Poly[4],-1.0f,-1.0f,0.0f,0.0f);

			if (fillW < maxW)
			{
				//draw partial right endcap
				float partW = maxW - fillW;
				float capUw = (partW/capW) / 4.0f;
				DrawPrimSetXYWH(m_Poly[2],barRight,y, capW-partW,h);
				SetupQuadUVs(m_Poly[2], m_Bar, 0.75f,0.0f,0.25f-capUw,0.5f);
				DrawPrimSetXYWH(m_Poly[5],x+fillW,y,partW,h);
				SetupQuadUVs(m_Poly[5], m_Bar, 1.0f-capUw,0.5f,capUw,0.5f);
			}
			else
			{
				//draw full right endcap
				DrawPrimSetXYWH(m_Poly[2],barRight,y, capW,h);
				SetupQuadUVs(m_Poly[2], m_Bar, 0.75f,0.0f,0.25f,0.5f);
				DrawPrimSetXYWH(m_Poly[5],-1.0f,-1.0f,0.0f,0.0f);
			}

		}
	}
}
Exemple #26
0
void CPolyGridFX::LoadDampenImage(uint32 nPGWidth, uint32 nPGHeight)
{
	if (!m_sDampenImage.size( ))
	{
		return;
	}

	//ok, now try and load the image
	uint32 nWidth, nHeight;
	uint8* pData;

	if(m_pClientDE->CreateHeightmapFromBitmap(m_sDampenImage.c_str( ), &nWidth, &nHeight, &pData) != LT_OK)
		return;
	
	//alright, now we need to do some filtering to generate our final dampen map
	m_DampenBuffer.Resize(nPGWidth, nPGHeight);

	//find out our dimensions with respect to the image
	float fXSize = (float)nWidth / (float)nPGWidth;
	float fYSize = (float)nHeight / (float)nPGHeight;

	float fSampleArea = fXSize * fYSize;

	//now run through for every sample in the dampen buffer
	for(uint32 nY = 0; nY < nPGHeight; nY++)
	{
		for(uint32 nX = 0; nX < nPGWidth; nX++)
		{
			//ok, now we build up a rectangle and filter the image buffer through
			//this rectangle to find the final value
			float fXMin = nX * fXSize;
			float fYMin = (nPGHeight - 1 - nY) * fYSize;
			float fXMax = fXMin + fXSize;
			float fYMax = fYMin + fYSize;

			//now we can find the extents to filter into this rectangle
			uint32 nXMin = (uint32)fXMin;
			uint32 nYMin = (uint32)fYMin;
			uint32 nXMax = LTMIN((uint32)fXMax, nWidth - 1);
			uint32 nYMax = LTMIN((uint32)fYMax, nHeight - 1);

			float fVal = 0.0f;

			for(uint32 nIY = nYMin; nIY <= nYMax; nIY++)
			{
				for(uint32 nIX = nXMin; nIX <= nXMax; nIX++)
				{
					//now we have a rectangle of width/height of one at this position,
					//so we need to see how much intersects with the bounding rect
					float fIXMin = LTMAX(fXMin, (float)nIX);
					float fIYMin = LTMAX(fYMin, (float)nIY);
					float fIXMax = LTMIN(fXMax, (float)nIX + 1.0f);
					float fIYMax = LTMIN(fYMax, (float)nIY + 1.0f);

					//figure out how much of the rectangle is occupied by this
					float fWeight = (fIXMax - fIXMin) * (fIYMax - fIYMin) / fSampleArea;

					//add our contribution
					fVal += fWeight * pData[nIY * nWidth + nIX];
				}
			}

			//save this value
			m_DampenBuffer.Get(nX, nY) = (uint8)fVal;
		}
	}

	//free our heightmap
	m_pClientDE->FreeHeightmap(pData);
}
// Wrap the textures, starting at a poly index
void CRVTrackerTextureWrap::WrapTexture(CTWPolyInfo *pPoly, const CVector &vWrapDir, CTextExtents &cExtents) const
{
	// Mark this poly as wrapped
	pPoly->m_bTouched = TRUE;

	CTexturedPlane& Texture = pPoly->m_pPoly->GetTexture(GetCurrTexture());

	// Get the texture space
	LTVector vWrapO = Texture.GetO();
	LTVector vWrapP = Texture.GetP();
	LTVector vWrapQ = Texture.GetQ();

	// Get the texture offset projections
	float fWrapOdotP = vWrapO.Dot(vWrapP);
	float fWrapOdotQ = vWrapO.Dot(vWrapQ);

	// Update the texturing extents
	for (uint32 nExtentLoop = 0; nExtentLoop < pPoly->m_aEdges.GetSize(); ++nExtentLoop)
	{
		LTVector vEdgePt = pPoly->m_aEdges[nExtentLoop]->m_aPt[0];

		float fCurU = vWrapP.Dot(vEdgePt) - fWrapOdotP;
		float fCurV = vWrapQ.Dot(vEdgePt) - fWrapOdotQ;

		cExtents.m_fMinU = LTMIN(fCurU, cExtents.m_fMinU);
		cExtents.m_fMaxU = LTMAX(fCurU, cExtents.m_fMaxU);
		cExtents.m_fMinV = LTMIN(fCurV, cExtents.m_fMinV);
		cExtents.m_fMaxV = LTMAX(fCurV, cExtents.m_fMaxV);
	}

	CMoArray<uint32> aNeighbors;
	CMoArray<float> aDots;

	// Insert the neighbors into a list in dot-product order
	for (uint32 nNeighborLoop = 0; nNeighborLoop < pPoly->m_aNeighbors.GetSize(); ++nNeighborLoop)
	{
		CTWPolyInfo *pNeighbor = pPoly->m_aNeighbors[nNeighborLoop];

		// Skip edges that don't have a neighbor
		if (!pNeighbor)
			continue;

		// Skip neighbors that are already wrapped
		if (pNeighbor->m_bTouched)
			continue;

		// Get our dot product
		float fCurDot = vWrapDir.Dot(pPoly->m_aEdges[nNeighborLoop]->m_Plane.m_Normal);

		if ((m_bRestrictWalkDir) && (fCurDot < 0.707f))
			continue;

		// Mark this neighbor as touched (to avoid later polygons pushing it onto the stack)
		pNeighbor->m_bTouched = TRUE;

		// Insert it into the list
		for (uint32 nInsertLoop = 0; nInsertLoop < aNeighbors.GetSize(); ++nInsertLoop)
		{
			if (fCurDot > aDots[nInsertLoop])
				break;
		}
		aDots.Insert(nInsertLoop, fCurDot);
		aNeighbors.Insert(nInsertLoop, nNeighborLoop);
	}

	// Recurse through its neighbors
	for (uint32 nWrapLoop = 0; nWrapLoop < aNeighbors.GetSize(); ++nWrapLoop)
	{
		CTWPolyInfo *pNeighbor = pPoly->m_aNeighbors[aNeighbors[nWrapLoop]];
		CTWEdgeInfo *pEdge = pPoly->m_aEdges[aNeighbors[nWrapLoop]];

		//////////////////////////////////////////////////////////////////////////////
		// Wrap this neighbor

		// Create a matrix representing the basis of the polygon in relation to this edge
		LTMatrix mPolyBasis;
		mPolyBasis.SetTranslation(0.0f, 0.0f, 0.0f);
		mPolyBasis.SetBasisVectors(&pEdge->m_vDir, &pPoly->m_pPoly->m_Plane.m_Normal, &pEdge->m_Plane.m_Normal);

		// Create a new basis for the neighbor polygon
		LTMatrix mNeighborBasis;
		LTVector vNeighborForward;
		vNeighborForward = pNeighbor->m_pPoly->m_Plane.m_Normal.Cross(pEdge->m_vDir);
		// Just to be sure..
		vNeighborForward.Norm();
		mNeighborBasis.SetTranslation(0.0f, 0.0f, 0.0f);
		mNeighborBasis.SetBasisVectors(&pEdge->m_vDir, &pNeighbor->m_pPoly->m_Plane.m_Normal, &vNeighborForward);

		// Create a rotation matrix from here to there
		LTMatrix mRotation;
		mRotation = mNeighborBasis * ~mPolyBasis;

		// Rotate the various vectors
		LTVector vNewP;
		LTVector vNewQ;
		LTVector vNewDir;

		mRotation.Apply3x3(vWrapP, vNewP);
		mRotation.Apply3x3(vWrapQ, vNewQ);
		mRotation.Apply3x3(vWrapDir, vNewDir);

		// Rotate the texture basis if we're following a path
		if (m_nWrapStyle == k_WrapPath)
		{
			LTVector vNeighborEdgeDir;
			if (GetSimilarEdgeDir(pNeighbor, vNewDir, vNeighborEdgeDir, 0.707f))
			{
				LTMatrix mRotatedNeighbor;
				LTVector vNeighborRight;
				vNeighborRight = vNeighborEdgeDir.Cross(pNeighbor->m_pPoly->m_Plane.m_Normal);
				vNeighborRight.Norm();
				// Make sure we're pointing the right way...
				if (vNeighborRight.Dot(pEdge->m_vDir) < 0.0f)
					vNeighborRight = -vNeighborRight;
				mRotatedNeighbor.SetTranslation(0.0f, 0.0f, 0.0f);
				mRotatedNeighbor.SetBasisVectors(&vNeighborRight, &pNeighbor->m_pPoly->m_Plane.m_Normal, &vNeighborEdgeDir);
				// Build a basis based on an edge from the current polygon 
				LTVector vBestPolyEdge;
				GetSimilarEdgeDir(pPoly, vWrapDir, vBestPolyEdge);
				LTVector vPolyRight = vBestPolyEdge.Cross(pNeighbor->m_pPoly->m_Plane.m_Normal);
				vPolyRight.Norm();
				// Make sure we're pointing the right way...
				if (vPolyRight.Dot(pEdge->m_vDir) < 0.0f)
					vPolyRight = -vPolyRight;
				// Build the poly edge matrix
				LTMatrix mPolyEdgeBasis;
				mPolyEdgeBasis.SetTranslation(0.0f, 0.0f, 0.0f);
				mPolyEdgeBasis.SetBasisVectors(&vPolyRight, &pNeighbor->m_pPoly->m_Plane.m_Normal, &vBestPolyEdge);

				// Get a matrix from here to there
				LTMatrix mRotator;
				mRotator = mRotatedNeighbor * ~mPolyEdgeBasis;
				// Rotate the texture basis
				mRotator.Apply3x3(vNewP);
				mRotator.Apply3x3(vNewQ);
				// And use the new edge as the new direction
				vNewDir = vNeighborEdgeDir;
			}

			// Remove skew from vNewP/vNewQ
			if ((float)fabs(vNewP.Dot(vNewQ)) > 0.001f)
			{
				float fMagP = vNewP.Mag();
				float fMagQ = vNewQ.Mag();
				vNewQ *= 1.0f / fMagQ;
				vNewP -= vNewQ * vNewQ.Dot(vNewP);
				vNewP.Norm(fMagP);
				vNewQ *= fMagQ;
			}
		}

		// Get the first edge point..
		CVector vEdgePt = pEdge->m_aPt[0];

		// Calculate the texture coordinate at this point
		float fWrapU = vWrapP.Dot(vEdgePt) - fWrapOdotP;
		float fWrapV = vWrapQ.Dot(vEdgePt) - fWrapOdotQ;

		// Build the new offset
		float fNewOdotP = vNewP.Dot(vEdgePt) - fWrapU;
		float fNewOdotQ = vNewQ.Dot(vEdgePt) - fWrapV;
		LTVector vNewO;
		vNewO.Init();
		float fNewPMag = vNewP.MagSqr();
		if (fNewPMag > 0.0f)
			vNewO += vNewP * (fNewOdotP / fNewPMag);
		float fNewQMag = vNewQ.MagSqr();
		if (fNewQMag > 0.0f)
			vNewO += vNewQ * (fNewOdotQ / fNewQMag);

		pNeighbor->m_pPoly->SetTextureSpace(GetCurrTexture(), vNewO, vNewP, vNewQ);

		// Recurse into this neighbor
		WrapTexture(pNeighbor, vNewDir, cExtents);
	}
}
Exemple #28
0
void CLiteObjectMgr::ShowInfo(uint32 nUpdateTime)
{
	g_pLTServer->CPrint("\nLiteObjectMgr stats : (%d ticks)", nUpdateTime);

	typedef std::set<HCLASS> TClassSet;
	TClassSet aClasses;

	TObjectList::const_iterator iCurObject;

	// Set up the class set
	iCurObject = m_aActiveObjects.begin();
	for (; iCurObject != m_aActiveObjects.end(); ++iCurObject)
	{
		aClasses.insert((*iCurObject)->GetClass());
	}

	iCurObject = m_aInactiveObjects.begin();
	for (; iCurObject != m_aInactiveObjects.end(); ++iCurObject)
	{
		aClasses.insert((*iCurObject)->GetClass());
	}

	TClassSet::const_iterator iCurClass;

	// Figure out how wide the class name field should be
	uint32 nMaxClassNameWidth = 6;
	iCurClass = aClasses.begin();
	for (; iCurClass != aClasses.end(); ++iCurClass)
	{
		char aClassNameBuff[256];
		g_pLTServer->GetClassName(*iCurClass, aClassNameBuff, sizeof(aClassNameBuff));
		nMaxClassNameWidth = LTMAX(nMaxClassNameWidth, strlen(aClassNameBuff));
	}

	// Print a header
	char *aClassUnderline = (char*)alloca(nMaxClassNameWidth + 1);
	memset(aClassUnderline, '-', nMaxClassNameWidth);
	aClassUnderline[nMaxClassNameWidth] = 0;
	g_pLTServer->CPrint("   -%s-|--------|----------|-------", aClassUnderline);
	g_pLTServer->CPrint("    Class %*s | Active | Inactive | Total", nMaxClassNameWidth - 6,"");
	g_pLTServer->CPrint("   -%s-|--------|----------|-------", aClassUnderline);

	// Run through the classes and accumulate stats
	iCurClass = aClasses.begin();
	for (; iCurClass != aClasses.end(); ++iCurClass)
	{
		uint32 nNumActive = 0;
		iCurObject = m_aActiveObjects.begin();
		for (; iCurObject != m_aActiveObjects.end(); ++iCurObject)
		{
			if ((*iCurObject)->GetClass() == *iCurClass)
				++nNumActive;
		}

		uint32 nNumInactive = 0;
		iCurObject = m_aInactiveObjects.begin();
		for (; iCurObject != m_aInactiveObjects.end(); ++iCurObject)
		{
			if ((*iCurObject)->GetClass() == *iCurClass)
				++nNumInactive;
		}
		
		char aClassNameBuff[256];
		g_pLTServer->GetClassName(*iCurClass, aClassNameBuff, sizeof(aClassNameBuff));

		// Here's your output...
		g_pLTServer->CPrint("    %-*s |   %4d |     %4d |  %4d",
			nMaxClassNameWidth, aClassNameBuff, nNumActive, nNumInactive, nNumActive + nNumInactive);
	}

	// Print the totals
	g_pLTServer->CPrint("   -%s-|--------|----------|-------", aClassUnderline);
	g_pLTServer->CPrint("    %-*s |   %4d |     %4d |  %4d",
		nMaxClassNameWidth, "Total", 
		GetNumActiveObjects(), GetNumInactiveObjects(), GetNumObjects());
}