コード例 #1
0
static cell_t GetClientsInRange(IPluginContext *pContext, const cell_t *params)
{
	cell_t *origin;
	pContext->LocalToPhysAddr(params[1], &origin);

	Vector vOrigin(sp_ctof(origin[0]), sp_ctof(origin[1]), sp_ctof(origin[2]));

	ClientRangeType rangeType = (ClientRangeType) params[2];

	CBitVec<ABSOLUTE_PLAYER_LIMIT> players;
	engine->Message_DetermineMulticastRecipients(rangeType == ClientRangeType::Audibility, vOrigin, players);

	cell_t *outPlayers;
	pContext->LocalToPhysAddr(params[3], &outPlayers);

	int maxPlayers = params[4];
	int curPlayers = 0;

	int index = players.FindNextSetBit(0);
	while (index > -1 && curPlayers < maxPlayers)
	{
		int entidx = index + 1;
		CPlayer *pPlayer = g_Players.GetPlayerByIndex(entidx);
		if (pPlayer && pPlayer->IsInGame())
		{
			outPlayers[curPlayers++] = entidx;
		}

		index = players.FindNextSetBit(index + 1);
	}

	return curPlayers;
}
コード例 #2
0
void CRecipientFilter::RemovePlayersFromBitMask( CBitVec< ABSOLUTE_PLAYER_LIMIT >& playerbits )
{
	int index = playerbits.FindNextSetBit( 0 );

	while ( index > -1 )
	{
		CBasePlayer *pPlayer = UTIL_PlayerByIndex( index + 1 );
		if ( pPlayer )
		{
			RemoveRecipient( pPlayer );
		}

		index = playerbits.FindNextSetBit( index + 1 );
	}
}
コード例 #3
0
//-----------------------------------------------------------------------------
// Purpose: Used at level change and round start to re-calculate which holiday is active
//-----------------------------------------------------------------------------
void UTIL_CalculateHolidays()
{
	s_HolidaysActive.ClearAll();

	CRTime::UpdateRealTime();
	for ( int iHoliday = 0; iHoliday < kHolidayCount; iHoliday++ )
	{
		if ( EconHolidays_IsHolidayActive( iHoliday, CRTime::RTime32TimeCur() ) )
		{
			s_HolidaysActive.Set( iHoliday );
		}
	}

	s_HolidaysCalculated = true;
}
コード例 #4
0
ファイル: posedebugger.cpp プロジェクト: Cre3per/hl2sdk-csgo
bool CPoseDebuggerImpl::IsModelShown( int iEntNum ) const
{
	Assert( iEntNum < MAX_EDICTS );
	if ( iEntNum >= 0 && iEntNum < MAX_EDICTS )
		return m_uiMaskShowModels.IsBitSet( iEntNum );
	else
		return false;
}
コード例 #5
0
int AllocateBlobNetworkBypassIndex( void )
{
	int retval;
	if( g_pBlobNetworkBypass->iNumParticlesAllocated == g_pBlobNetworkBypass->iHighestIndexUsed )
	{
		//no holes in the allocations, allocate from the end
		retval = g_pBlobNetworkBypass->iHighestIndexUsed;
		++g_pBlobNetworkBypass->iHighestIndexUsed;
	}
	else
	{
		CBitVec<BLOB_MAX_LEVEL_PARTICLES> notUsed;
		g_pBlobNetworkBypass->bCurrentlyInUse.Not( &notUsed );
		retval = notUsed.FindNextSetBit( 0 );
		Assert( retval < (int)g_pBlobNetworkBypass->iHighestIndexUsed );
	}

	++g_pBlobNetworkBypass->iNumParticlesAllocated;

	g_pBlobNetworkBypass->bCurrentlyInUse.Set( retval );
	return retval;
}
コード例 #6
0
bool UTIL_IsHolidayActive( /*EHoliday*/ int eHoliday )
{
#ifdef USES_ECON_ITEMS
	if ( IsX360() )
		return false;

	if ( !s_HolidaysCalculated )
	{
		UTIL_CalculateHolidays();
	}

	return s_HolidaysActive.IsBitSet( eHoliday );
#else
	return false;
#endif
}
コード例 #7
0
void CLagCompensationManager::BacktrackEntity( CAI_BaseNPC *pEntity, float flTargetTime )
{
	Vector org, mins, maxs;
	QAngle ang;

	VPROF_BUDGET( "BacktrackEntity", "CLagCompensationManager" );

	// get track history of this entity
	int index = pEntity->GetAIIndex();
	CUtlFixedLinkedList< LagRecord > *track = &m_EntityTrack[ index ];

	// check if we have at leat one entry
	if ( track->Count() <= 0 )
		return;

	int curr = track->Head();

	LagRecord *prevRecord = NULL;
	LagRecord *record = NULL;

	Vector prevOrg = pEntity->GetLocalOrigin();
	
	// Walk context looking for any invalidating event
	while( track->IsValidIndex(curr) )
	{
		// remember last record
		prevRecord = record;

		// get next record
		record = &track->Element( curr );

		if ( !(record->m_fFlags & LC_ALIVE) )
		{
			// entity must be alive, lost track
			return;
		}

		Vector delta = record->m_vecOrigin - prevOrg;
		if ( delta.LengthSqr() > LAG_COMPENSATION_TELEPORTED_DISTANCE_SQR )
		{
			// lost track, moved too far (may have teleported)
			return; 
		}

		// did we find a context smaller than target time ?
		if ( record->m_flSimulationTime <= flTargetTime )
			break; // hurra, stop

		prevOrg = record->m_vecOrigin;

		// go one step back in time
		curr = track->Next( curr );
	}

	Assert( record );

	if ( !record )
	{
		if ( sv_unlag_debug.GetBool() )
		{
			DevMsg( "No valid positions in history for BacktrackEntity ( %s )\n", pEntity->GetClassname() );
		}

		return; // that should never happen
	}

	float frac = 0.0f;
	if ( prevRecord && 
		 (record->m_flSimulationTime < flTargetTime) &&
		 (record->m_flSimulationTime < prevRecord->m_flSimulationTime) )
	{
		// we didn't find the exact time but have a valid previous record
		// so interpolate between these two records;

		Assert( prevRecord->m_flSimulationTime > record->m_flSimulationTime );
		Assert( flTargetTime < prevRecord->m_flSimulationTime );

		// calc fraction between both records
		frac = ( flTargetTime - record->m_flSimulationTime ) / 
			( prevRecord->m_flSimulationTime - record->m_flSimulationTime );

		Assert( frac > 0 && frac < 1 ); // should never extrapolate

		ang  = Lerp( frac, record->m_vecAngles, prevRecord->m_vecAngles );
		org  = Lerp( frac, record->m_vecOrigin, prevRecord->m_vecOrigin  );
		mins = Lerp( frac, record->m_vecMins, prevRecord->m_vecMins  );
		maxs = Lerp( frac, record->m_vecMaxs, prevRecord->m_vecMaxs );
	}
	else
	{
		// we found the exact record or no other record to interpolate with
		// just copy these values since they are the best we have
		ang  = record->m_vecAngles;
		org  = record->m_vecOrigin;
		mins = record->m_vecMins;
		maxs = record->m_vecMaxs;
	}

	// See if this is still a valid position for us to teleport to
	if ( sv_unlag_fixstuck.GetBool() )
	{
		// Try to move to the wanted position from our current position.
		trace_t tr;
		UTIL_TraceEntity( pEntity, org, org, MASK_NPCSOLID, &tr );
		if ( tr.startsolid || tr.allsolid )
		{
			if ( sv_unlag_debug.GetBool() )
				DevMsg( "WARNING: BackupEntity trying to back entity into a bad position - %s\n", pEntity->GetClassname() );

			CBasePlayer *pHitPlayer = dynamic_cast<CBasePlayer *>( tr.m_pEnt );

			// don't lag compensate the current player
			if ( pHitPlayer && ( pHitPlayer != m_pCurrentPlayer ) )	
			{
				// If we haven't backtracked this player, do it now
				// this deliberately ignores WantsLagCompensationOnEntity.
				if ( !m_RestorePlayer.Get( pHitPlayer->entindex() - 1 ) )
				{
					// prevent recursion - save a copy of m_RestorePlayer,
					// pretend that this player is off-limits
					int pl_index = pEntity->entindex() - 1;

					// Temp turn this flag on
					m_RestorePlayer.Set( pl_index );

					BacktrackPlayer( pHitPlayer, flTargetTime );

					// Remove the temp flag
					m_RestorePlayer.Clear( pl_index );
				}				
			}
			else
			{
				CAI_BaseNPC *pHitEntity = dynamic_cast<CAI_BaseNPC *>( tr.m_pEnt );
				if ( pHitEntity )
				{
					CAI_BaseNPC *pNPC = NULL;
					CAI_BaseNPC **ppAIs = g_AI_Manager.AccessAIs();
					int nAIs = g_AI_Manager.NumAIs();
					for ( int i = 0; i < nAIs; i++ ) // we'll have to find this entity's index though :(
					{
						pNPC = ppAIs[i];
						if ( pNPC == pHitEntity )
							break;
					}
					// If we haven't backtracked this player, do it now
					// this deliberately ignores WantsLagCompensationOnEntity.
					if ( pNPC && !m_RestoreEntity.Get( pNPC->GetAIIndex() ) )
					{
						// prevent recursion - save a copy of m_RestoreEntity,
						// pretend that this player is off-limits

						// Temp turn this flag on
						m_RestoreEntity.Set( pNPC->GetAIIndex() );

						BacktrackEntity( pHitEntity, flTargetTime );

						// Remove the temp flag
						m_RestoreEntity.Clear( pNPC->GetAIIndex() );
					}
				}
			}

			// now trace us back as far as we can go
			UTIL_TraceEntity( pEntity, pEntity->GetLocalOrigin(), org, MASK_NPCSOLID, &tr );

			if ( tr.startsolid || tr.allsolid )
			{
				// Our starting position is bogus

				if ( sv_unlag_debug.GetBool() )
					DevMsg( "Backtrack failed completely, bad starting position\n" );
			}
			else
			{
				// We can get to a valid place, but not all the way to the target
				Vector vPos;
				VectorLerp( pEntity->GetLocalOrigin(), org, tr.fraction * g_flFractionScale, vPos );
				
				// This is as close as we're going to get
				org = vPos;

				if ( sv_unlag_debug.GetBool() )
					DevMsg( "Backtrack got most of the way\n" );
			}
		}
	}
	
	// See if this represents a change for the entity
	int flags = 0;
	LagRecord *restore = &m_EntityRestoreData[ index ];
	LagRecord *change  = &m_EntityChangeData[ index ];

	QAngle angdiff = pEntity->GetLocalAngles() - ang;
	Vector orgdiff = pEntity->GetLocalOrigin() - org;

	// Always remember the pristine simulation time in case we need to restore it.
	restore->m_flSimulationTime = pEntity->GetSimulationTime();

	if ( angdiff.LengthSqr() > LAG_COMPENSATION_EPS_SQR )
	{
		flags |= LC_ANGLES_CHANGED;
		restore->m_vecAngles = pEntity->GetLocalAngles();
		pEntity->SetLocalAngles( ang );
		change->m_vecAngles = ang;
	}

	// Use absolute equality here
	if ( ( mins != pEntity->WorldAlignMins() ) ||
		 ( maxs != pEntity->WorldAlignMaxs() ) )
	{
		flags |= LC_SIZE_CHANGED;
		restore->m_vecMins = pEntity->WorldAlignMins() ;
		restore->m_vecMaxs = pEntity->WorldAlignMaxs();
		pEntity->SetSize( mins, maxs );
		change->m_vecMins = mins;
		change->m_vecMaxs = maxs;
	}

	// Note, do origin at end since it causes a relink into the k/d tree
	if ( orgdiff.LengthSqr() > LAG_COMPENSATION_EPS_SQR )
	{
		flags |= LC_ORIGIN_CHANGED;
		restore->m_vecOrigin = pEntity->GetLocalOrigin();
		pEntity->SetLocalOrigin( org );
		change->m_vecOrigin = org;
	}

	// Sorry for the loss of the optimization for the case of people
	// standing still, but you breathe even on the server.
	// This is quicker than actually comparing all bazillion floats.
	flags |= LC_ANIMATION_CHANGED;
	restore->m_masterSequence = pEntity->GetSequence();
	restore->m_masterCycle = pEntity->GetCycle();

	bool interpolationAllowed = false;
	if( prevRecord && (record->m_masterSequence == prevRecord->m_masterSequence) )
	{
		// If the master state changes, all layers will be invalid too, so don't interp (ya know, interp barely ever happens anyway)
		interpolationAllowed = true;
	}
	
	////////////////////////
	// First do the master settings
	bool interpolatedMasters = false;
	if( frac > 0.0f && interpolationAllowed )
	{
		interpolatedMasters = true;
		pEntity->SetSequence( Lerp( frac, record->m_masterSequence, prevRecord->m_masterSequence ) );
		pEntity->SetCycle( Lerp( frac, record->m_masterCycle, prevRecord->m_masterCycle ) );

		if( record->m_masterCycle > prevRecord->m_masterCycle )
		{
			// the older record is higher in frame than the newer, it must have wrapped around from 1 back to 0
			// add one to the newer so it is lerping from .9 to 1.1 instead of .9 to .1, for example.
			float newCycle = Lerp( frac, record->m_masterCycle, prevRecord->m_masterCycle + 1 );
			pEntity->SetCycle(newCycle < 1 ? newCycle : newCycle - 1 );// and make sure .9 to 1.2 does not end up 1.05
		}
		else
		{
			pEntity->SetCycle( Lerp( frac, record->m_masterCycle, prevRecord->m_masterCycle ) );
		}
	}
	if( !interpolatedMasters )
	{
		pEntity->SetSequence(record->m_masterSequence);
		pEntity->SetCycle(record->m_masterCycle);
	}

	////////////////////////
	// Now do all the layers
	int layerCount = pEntity->GetNumAnimOverlays();
	for( int layerIndex = 0; layerIndex < layerCount; ++layerIndex )
	{
		CAnimationLayer *currentLayer = pEntity->GetAnimOverlay(layerIndex);
		if( currentLayer )
		{
			restore->m_layerRecords[layerIndex].m_cycle = currentLayer->m_flCycle;
			restore->m_layerRecords[layerIndex].m_order = currentLayer->m_nOrder;
			restore->m_layerRecords[layerIndex].m_sequence = currentLayer->m_nSequence;
			restore->m_layerRecords[layerIndex].m_weight = currentLayer->m_flWeight;

			bool interpolated = false;
			if( (frac > 0.0f)  &&  interpolationAllowed )
			{
				LayerRecord &recordsLayerRecord = record->m_layerRecords[layerIndex];
				LayerRecord &prevRecordsLayerRecord = prevRecord->m_layerRecords[layerIndex];
				if( (recordsLayerRecord.m_order == prevRecordsLayerRecord.m_order)
					&& (recordsLayerRecord.m_sequence == prevRecordsLayerRecord.m_sequence)
					)
				{
					// We can't interpolate across a sequence or order change
					interpolated = true;
					if( recordsLayerRecord.m_cycle > prevRecordsLayerRecord.m_cycle )
					{
						// the older record is higher in frame than the newer, it must have wrapped around from 1 back to 0
						// add one to the newer so it is lerping from .9 to 1.1 instead of .9 to .1, for example.
						float newCycle = Lerp( frac, recordsLayerRecord.m_cycle, prevRecordsLayerRecord.m_cycle + 1 );
						currentLayer->m_flCycle = newCycle < 1 ? newCycle : newCycle - 1;// and make sure .9 to 1.2 does not end up 1.05
					}
					else
					{
						currentLayer->m_flCycle = Lerp( frac, recordsLayerRecord.m_cycle, prevRecordsLayerRecord.m_cycle  );
					}
					currentLayer->m_nOrder = recordsLayerRecord.m_order;
					currentLayer->m_nSequence = recordsLayerRecord.m_sequence;
					currentLayer->m_flWeight = Lerp( frac, recordsLayerRecord.m_weight, prevRecordsLayerRecord.m_weight  );
				}
			}
			if( !interpolated )
			{
				//Either no interp, or interp failed.  Just use record.
				currentLayer->m_flCycle = record->m_layerRecords[layerIndex].m_cycle;
				currentLayer->m_nOrder = record->m_layerRecords[layerIndex].m_order;
				currentLayer->m_nSequence = record->m_layerRecords[layerIndex].m_sequence;
				currentLayer->m_flWeight = record->m_layerRecords[layerIndex].m_weight;
			}
		}
	}
	
	if ( !flags )
		return; // we didn't change anything

	if ( sv_lagflushbonecache.GetBool() )
		pEntity->InvalidateBoneCache();

	/*char text[256]; Q_snprintf( text, sizeof(text), "time %.2f", flTargetTime );
	pEntity->DrawServerHitboxes( 10 );
	NDebugOverlay::Text( org, text, false, 10 );
	NDebugOverlay::EntityBounds( pEntity, 255, 0, 0, 32, 10 ); */

	m_RestoreEntity.Set( index ); //remember that we changed this entity
	m_bNeedToRestore = true;  // we changed at least one player / entity
	restore->m_fFlags = flags; // we need to restore these flags
	change->m_fFlags = flags; // we have changed these flags

	if( sv_showlagcompensation.GetInt() == 1 )
	{
		pEntity->DrawServerHitboxes(4, true);
	}
}
コード例 #8
0
// Called during player movement to set up/restore after lag compensation
void CLagCompensationManager::StartLagCompensation( CBasePlayer *player, CUserCmd *cmd )
{
	//DONT LAG COMP AGAIN THIS FRAME IF THERES ALREADY ONE IN PROGRESS
	//IF YOU'RE HITTING THIS THEN IT MEANS THERES A CODE BUG
	if ( m_pCurrentPlayer )
	{
		Assert( m_pCurrentPlayer == NULL );
		Warning( "Trying to start a new lag compensation session while one is already active!\n" );
		return;
	}

	// sort out any changes to the AI indexing
	if ( m_bNeedsAIUpdate ) // to be called once per frame... must happen BEFORE lag compensation -
	{// if that happens, that is. if not its called at the end of the frame
		m_bNeedsAIUpdate = false;
		UpdateAIIndexes();
	}

	// Assume no players or entities need to be restored
	m_RestorePlayer.ClearAll();
	m_RestoreEntity.ClearAll();
	m_bNeedToRestore = false;

	m_pCurrentPlayer = player;
	
	if ( !player->m_bLagCompensation		// Player not wanting lag compensation
		 || (gpGlobals->maxClients <= 1)	// no lag compensation in single player
		 || !sv_unlag.GetBool()				// disabled by server admin
		 || player->IsBot() 				// not for bots
		 || player->IsObserver()			// not for spectators
		)
		return;

	// NOTE: Put this here so that it won't show up in single player mode.
	VPROF_BUDGET( "StartLagCompensation", VPROF_BUDGETGROUP_OTHER_NETWORKING );
	Q_memset( m_RestoreData, 0, sizeof( m_RestoreData ) );
	Q_memset( m_ChangeData, 0, sizeof( m_ChangeData ) );
	Q_memset( m_EntityRestoreData, 0, sizeof( m_EntityRestoreData ) );
	Q_memset( m_EntityChangeData, 0, sizeof( m_EntityChangeData ) );

	// Get true latency

	// correct is the amout of time we have to correct game time
	float correct = 0.0f;

	INetChannelInfo *nci = engine->GetPlayerNetInfo( player->entindex() ); 

	if ( nci )
	{
		// add network latency
		correct+= nci->GetLatency( FLOW_OUTGOING );
	}

	// calc number of view interpolation ticks - 1
	int lerpTicks = TIME_TO_TICKS( player->m_fLerpTime );

	// add view interpolation latency see C_BaseEntity::GetInterpolationAmount()
	correct += TICKS_TO_TIME( lerpTicks );
	
	// check bouns [0,sv_maxunlag]
	correct = clamp( correct, 0.0f, sv_maxunlag.GetFloat() );

	// correct tick send by player 
	int targettick = cmd->tick_count - lerpTicks;

	// calc difference between tick send by player and our latency based tick
	float deltaTime =  correct - TICKS_TO_TIME(gpGlobals->tickcount - targettick);

	if ( fabs( deltaTime ) > 0.2f )
	{
		// difference between cmd time and latency is too big > 200ms, use time correction based on latency
		// DevMsg("StartLagCompensation: delta too big (%.3f)\n", deltaTime );
		targettick = gpGlobals->tickcount - TIME_TO_TICKS( correct );
	}
	
	// Iterate all active players
	const CBitVec<MAX_EDICTS> *pEntityTransmitBits = engine->GetEntityTransmitBitsForClient( player->entindex() - 1 );
	for ( int i = 1; i <= gpGlobals->maxClients; i++ )
	{
		CBasePlayer *pPlayer = UTIL_PlayerByIndex( i );
		if ( !pPlayer || player == pPlayer )
			continue;

		// Custom checks for if things should lag compensate (based on things like what team the player is on).
		if ( !player->WantsLagCompensationOnEntity( pPlayer, cmd, pEntityTransmitBits ) )
			continue;

		// Move other player back in time
		BacktrackPlayer( pPlayer, TICKS_TO_TIME( targettick ) );
	}

	// also iterate all monsters
	CAI_BaseNPC **ppAIs = g_AI_Manager.AccessAIs();
	int nAIs = g_AI_Manager.NumAIs();

	for ( int i = 0; i < nAIs; i++ )
	{
		CAI_BaseNPC *pNPC = ppAIs[i];
		// Custom checks for if things should lag compensate
		if ( !pNPC || !player->WantsLagCompensationOnEntity( pNPC, cmd, pEntityTransmitBits ) )
			continue;
		
		// Move NPC back in time
		BacktrackEntity( pNPC, TICKS_TO_TIME( targettick ) );
	}
}
コード例 #9
0
void CLagCompensationManager::FinishLagCompensation( CBasePlayer *player )
{
	VPROF_BUDGET_FLAGS( "FinishLagCompensation", VPROF_BUDGETGROUP_OTHER_NETWORKING, BUDGETFLAG_CLIENT|BUDGETFLAG_SERVER );

	m_pCurrentPlayer = NULL;

	if ( !m_bNeedToRestore )
		return; // no player was changed at all

	// Iterate all active players
	for ( int i = 1; i <= gpGlobals->maxClients; i++ )
	{
		int pl_index = i - 1;
		
		if ( !m_RestorePlayer.Get( pl_index ) )
		{
			// player wasn't changed by lag compensation
			continue;
		}

		CBasePlayer *pPlayer = UTIL_PlayerByIndex( i );
		if ( !pPlayer )
		{
			continue;
		}

		LagRecord *restore = &m_RestoreData[ pl_index ];
		LagRecord *change  = &m_ChangeData[ pl_index ];

		bool restoreSimulationTime = false;

		if ( restore->m_fFlags & LC_SIZE_CHANGED )
		{
			restoreSimulationTime = true;
	
			// see if simulation made any changes, if no, then do the restore, otherwise,
			//  leave new values in
			if ( pPlayer->CollisionProp()->OBBMinsPreScaled() == change->m_vecMinsPreScaled &&
				pPlayer->CollisionProp()->OBBMaxsPreScaled() == change->m_vecMaxsPreScaled )
			{
				// Restore it
				pPlayer->SetSize( restore->m_vecMinsPreScaled, restore->m_vecMaxsPreScaled );
			}
			else
			{
				Warning( "Should we really not restore the size?\n" );
			}
		}

		if ( restore->m_fFlags & LC_ANGLES_CHANGED )
		{		   
			restoreSimulationTime = true;

			if ( pPlayer->GetLocalAngles() == change->m_vecAngles )
			{
				pPlayer->SetLocalAngles( restore->m_vecAngles );
			}
		}

		if ( restore->m_fFlags & LC_ORIGIN_CHANGED )
		{
			restoreSimulationTime = true;

			// Okay, let's see if we can do something reasonable with the change
			Vector delta = pPlayer->GetLocalOrigin() - change->m_vecOrigin;
			
			// If it moved really far, just leave the player in the new spot!!!
			if ( delta.Length2DSqr() < m_flTeleportDistanceSqr )
			{
				RestorePlayerTo( pPlayer, restore->m_vecOrigin + delta );
			}
		}

		if( restore->m_fFlags & LC_ANIMATION_CHANGED )
		{
			restoreSimulationTime = true;

			pPlayer->SetSequence(restore->m_masterSequence);
			pPlayer->SetCycle(restore->m_masterCycle);

			int layerCount = pPlayer->GetNumAnimOverlays();
			for( int layerIndex = 0; layerIndex < layerCount; ++layerIndex )
			{
				CAnimationLayer *currentLayer = pPlayer->GetAnimOverlay(layerIndex);
				if( currentLayer )
				{
					currentLayer->m_flCycle = restore->m_layerRecords[layerIndex].m_cycle;
					currentLayer->m_nOrder = restore->m_layerRecords[layerIndex].m_order;
					currentLayer->m_nSequence = restore->m_layerRecords[layerIndex].m_sequence;
					currentLayer->m_flWeight = restore->m_layerRecords[layerIndex].m_weight;
				}
			}
		}

		if ( restoreSimulationTime )
		{
			pPlayer->SetSimulationTime( restore->m_flSimulationTime );
		}
	}

	// also iterate all monsters
	CAI_BaseNPC **ppAIs = g_AI_Manager.AccessAIs();
	int nAIs = g_AI_Manager.NumAIs();

	for ( int i = 0; i < nAIs; i++ )
	{
		CAI_BaseNPC *pNPC = ppAIs[i];
		
		if ( !m_RestoreEntity.Get( i ) )
		{
			// entity wasn't changed by lag compensation
			continue;
		}

		LagRecord *restore = &m_EntityRestoreData[ i ];
		LagRecord *change  = &m_EntityChangeData[ i ];

		bool restoreSimulationTime = false;

		if ( restore->m_fFlags & LC_SIZE_CHANGED )
		{
			restoreSimulationTime = true;
	
			// see if simulation made any changes, if no, then do the restore, otherwise,
			//  leave new values in
			if ( pNPC->WorldAlignMins() == change->m_vecMins && 
				 pNPC->WorldAlignMaxs() == change->m_vecMaxs )
			{
				// Restore it
				pNPC->SetSize( restore->m_vecMins, restore->m_vecMaxs );
			}
		}

		if ( restore->m_fFlags & LC_ANGLES_CHANGED )
		{		   
			restoreSimulationTime = true;

			if ( pNPC->GetLocalAngles() == change->m_vecAngles )
			{
				pNPC->SetLocalAngles( restore->m_vecAngles );
			}
		}

		if ( restore->m_fFlags & LC_ORIGIN_CHANGED )
		{
			restoreSimulationTime = true;

			// Okay, let's see if we can do something reasonable with the change
			Vector delta = pNPC->GetLocalOrigin() - change->m_vecOrigin;
			
			// If it moved really far, just leave the player in the new spot!!!
			if ( delta.LengthSqr() < LAG_COMPENSATION_TELEPORTED_DISTANCE_SQR )
			{
				RestoreEntityTo( pNPC, restore->m_vecOrigin + delta );
			}
		}

		if( restore->m_fFlags & LC_ANIMATION_CHANGED )
		{
			restoreSimulationTime = true;

			pNPC->SetSequence(restore->m_masterSequence);
			pNPC->SetCycle(restore->m_masterCycle);

			int layerCount = pNPC->GetNumAnimOverlays();
			for( int layerIndex = 0; layerIndex < layerCount; ++layerIndex )
			{
				CAnimationLayer *currentLayer = pNPC->GetAnimOverlay(layerIndex);
				if( currentLayer )
				{
					currentLayer->m_flCycle = restore->m_layerRecords[layerIndex].m_cycle;
					currentLayer->m_nOrder = restore->m_layerRecords[layerIndex].m_order;
					currentLayer->m_nSequence = restore->m_layerRecords[layerIndex].m_sequence;
					currentLayer->m_flWeight = restore->m_layerRecords[layerIndex].m_weight;
				}
			}
		}

		if ( restoreSimulationTime )
		{
			pNPC->SetSimulationTime( restore->m_flSimulationTime );
		}
	}
}
コード例 #10
0
bool _ComputeRagdollBones( const ragdoll_t *pRagdoll, matrix3x4_t &parentTransform, matrix3x4_t *pBones, Vector *pPositions, QAngle *pAngles )
{
	matrix3x4_t inverted, output;

#ifdef _DEBUG
	CBitVec<MAXSTUDIOBONES> vBonesComputed;
	vBonesComputed.ClearAll();
#endif

	for ( int i = 0; i < pRagdoll->listCount; ++i )
	{
		const ragdollelement_t& element = pRagdoll->list[ i ];

		// during restore if a model has changed since the file was saved, this could be NULL
		if ( !element.pObject )
			return false;

		int const boneIndex = pRagdoll->boneIndex[ i ];
		if ( boneIndex < 0 )
		{
			AssertMsg( 0, "Replay: No mapping for ragdoll bone\n" );
			return false;
		}

		// Get global transform and put it into the bone cache
		element.pObject->GetPositionMatrix( &pBones[ boneIndex ] );

		// Ensure a fixed translation from the parent (no stretching)
		if ( element.parentIndex >= 0 && !pRagdoll->allowStretch )
		{
			int parentIndex = pRagdoll->boneIndex[ element.parentIndex ];

#ifdef _DEBUG
			// Make sure we computed the parent already
			Assert( vBonesComputed.IsBitSet(parentIndex) );
#endif

			// overwrite the position from physics to force rigid attachment
			// NOTE: On the client we actually override this with the proper parent bone in each LOD
			Vector out;
			VectorTransform( element.originParentSpace, pBones[ parentIndex ], out );
			MatrixSetColumn( out, 3, pBones[ boneIndex ] );

			MatrixInvert( pBones[ parentIndex ], inverted );
		}
		else if ( element.parentIndex == - 1 )
		{
			// Decompose into parent space
			MatrixInvert( parentTransform, inverted );
		}

#ifdef _DEBUG
		vBonesComputed.Set( boneIndex, true );
#endif

		// Compute local transform and put into 'output'
 		ConcatTransforms( inverted, pBones[ boneIndex ], output );

		// Cache as Euler/position
 		MatrixAngles( output, pAngles[ i ], pPositions[ i ] );
	}
	return true;
}
コード例 #11
0
void CGroundLine::SetParameters(
	const Vector &vStart, 
	const Vector &vEnd, 
	const Vector &vStartColor,	// Color values 0-1
	const Vector &vEndColor,
	float alpha,
	float lineWidth
	)
{
	m_vStart = vStart;
	m_vEnd = vEnd;
	m_vStartColor = vStartColor;
	m_vEndColor = vEndColor;
	m_Alpha = alpha;
	m_LineWidth = lineWidth;

	Vector vTo( vEnd.x - vStart.x, vEnd.y - vStart.y, 0 );
	float flXYLen = vTo.Length();

	// Recalculate our segment list.
	unsigned int nSteps = (int)flXYLen / XY_PER_SEGMENT;
	nSteps = clamp( nSteps, 8, MAX_GROUNDLINE_SEGMENTS ) & ~1;
	unsigned int nMaxSteps = nSteps / 2;

	// First generate the sequence. We generate every other point here so it can insert fixup points to prevent
	// it from crossing world geometry.
	Vector pt[MAX_GROUNDLINE_SEGMENTS];
	Vector vStep = (Vector(m_vEnd[0], m_vEnd[1], 0) - Vector(m_vStart[0], m_vStart[1], 0)) / (nMaxSteps-1);

	pt[0] = FindBestSurfacePoint(m_vStart);

	unsigned int i;
	for(i=1; i < nMaxSteps; i++)
		pt[i<<1] = FindBestSurfacePoint(pt[(i-1)<<1] + vStep);


	CBitVec<MAX_GROUNDLINE_SEGMENTS> pointsUsed;
	pointsUsed.ClearAll();

	// Now try to make sure they don't intersect the geometry.
	for(i=0; i < nMaxSteps-1; i++)
	{
		Vector &a = pt[i<<1];
		Vector &b = pt[(i+1)<<1];

		trace_t trace;
		UTIL_TraceLine(a, b, MASK_SOLID_BRUSHONLY, NULL, COLLISION_GROUP_NONE, &trace);
		if(trace.fraction < 1)
		{
			int cIndex = (i<<1)+1;
			Vector &c = pt[cIndex];

			// Ok, this line segment intersects the world. Do a binary search to try to find the
			// point of intersection.
			Vector hi, lo;
			if(a.z < b.z)
			{
				hi = b;
				lo = a;
			}
			else
			{
				hi = a;
				lo = b;
			}

			if(BinSearchSegments(lo, hi, Vector(lo[0],lo[1],hi[2]), 15, &c))
			{
				pointsUsed.Set( cIndex );
			}
			else if(BinSearchSegments(lo, hi, Vector(hi[0],hi[1],hi[2]+500), 15, &c))
			{
				pointsUsed.Set( cIndex );
			}
		}
	}

	// Export the points.
	m_nPoints = 0;
	for(i=0; i < nSteps; i++)
	{
		// Every other point is always active.
		if( pointsUsed.Get( i ) || !(i & 1) )
		{
			m_Points[m_nPoints] = pt[i];
			++m_nPoints;
		}
	}
}
コード例 #12
0
void CBlobParticleNetworkBypassAutoGame::PreRender( void )
{
	if( engine->IsRecordingDemo() && g_pBlobNetworkBypass->bDataUpdated )
	{
		//record the update, TODO: compress the data by omitting the holes

		int iMaxIndex = MAX(g_pBlobNetworkBypass->iHighestIndexUsed, m_iOldHighestIndexUsed);
		int iBitMax = (iMaxIndex / BITS_PER_INT) + 1;

		size_t iDataSize = sizeof( int ) + sizeof( float ) + sizeof( int ) + sizeof( int ) + (sizeof( int ) * iBitMax) +
							iMaxIndex*( sizeof( Vector ) + sizeof( float ) + sizeof( Vector ) );
		uint8 *pData = new uint8 [iDataSize];
		uint8 *pWrite = pData;

		//let the receiver know how much of each array to expect
		*(int *)pWrite = LittleDWord( iMaxIndex );
		pWrite += sizeof( int );

		//write the update timestamp
		*(float *)pWrite = g_pBlobNetworkBypass->fTimeDataUpdated;
		pWrite += sizeof( float );

		//record usage information, also helps us effectively compress the subsequent data by omitting the holes.
		*(int *)pWrite = LittleDWord( g_pBlobNetworkBypass->iHighestIndexUsed );
		pWrite += sizeof( int );

		*(int *)pWrite = LittleDWord( g_pBlobNetworkBypass->iNumParticlesAllocated );
		pWrite += sizeof( int );

		int *pIntParser = (int *)&g_pBlobNetworkBypass->bCurrentlyInUse;
		for( int i = 0; i != iBitMax; ++i )
		{
			//convert and write the bitfield integers
			*(int *)pWrite = LittleDWord( *pIntParser );
			pWrite += sizeof( int );
			++pIntParser;
		}

		//write positions
		memcpy( pWrite, g_pBlobNetworkBypass->vParticlePositions, sizeof( Vector ) * iMaxIndex );
		pWrite += sizeof( Vector ) * iMaxIndex;

		//write radii
		memcpy( pWrite, g_pBlobNetworkBypass->vParticleRadii, sizeof( float ) * iMaxIndex );
		pWrite += sizeof( float ) * iMaxIndex;

		//write closest surface direction
		memcpy( pWrite, g_pBlobNetworkBypass->vParticleClosestSurfDir, sizeof( Vector ) * iMaxIndex );
		pWrite += sizeof( Vector ) * iMaxIndex;

		engine->RecordDemoCustomData( BlobNetworkBypass_CustomDemoDataCallback, pData, iDataSize );

		Assert( pWrite == (pData + iDataSize) );

		delete []pData;
	}

	//invalidate interpolation on freed indices, do a quick update for brand new indices
	{
		//operate on smaller chunks based on the assumption that LARGE portions of the end of the bitvecs are empty
		CBitVec<BITS_PER_INT> *pCurrentlyInUse = (CBitVec<BITS_PER_INT> *)&g_pBlobNetworkBypass->bCurrentlyInUse;
		CBitVec<BITS_PER_INT> *pOldInUse = (CBitVec<BITS_PER_INT> *)&m_bOldInUse;
		int iStop = (MAX(g_pBlobNetworkBypass->iHighestIndexUsed, m_iOldHighestIndexUsed) / BITS_PER_INT) + 1;
		int iBaseIndex = 0;

		//float fNewIndicesUpdateTime = g_pBlobNetworkBypass->bPositionsUpdated ? g_pBlobNetworkBypass->fTimeDataUpdated : gpGlobals->curtime;

		for( int i = 0; i != iStop; ++i )
		{
			CBitVec<BITS_PER_INT> bInUseXOR;
			pCurrentlyInUse->Xor( *pOldInUse, &bInUseXOR ); //find bits that changed
			
			int j = 0;
			while( (j = bInUseXOR.FindNextSetBit( j )) != -1 )
			{
				int iChangedUsageIndex = iBaseIndex + j;
				
				if( pOldInUse->IsBitSet( iChangedUsageIndex ) )
				{
					//index no longer used
					g_BlobParticleInterpolation.vInterpolatedPositions[iChangedUsageIndex] = vec3_origin;
					s_PositionInterpolators[iChangedUsageIndex].ClearHistory();
					g_BlobParticleInterpolation.vInterpolatedRadii[iChangedUsageIndex] = 1.0f;
					s_RadiusInterpolators[iChangedUsageIndex].ClearHistory();
					g_BlobParticleInterpolation.vInterpolatedClosestSurfDir[iChangedUsageIndex] = vec3_origin;
					s_ClosestSurfDirInterpolators[iChangedUsageIndex].ClearHistory();
				}
				else
				{
					//index just started being used. Assume we got an out of band update to the position
					g_BlobParticleInterpolation.vInterpolatedPositions[iChangedUsageIndex] = g_pBlobNetworkBypass->vParticlePositions[iChangedUsageIndex];
					s_PositionInterpolators[iChangedUsageIndex].Reset( gpGlobals->curtime );
					g_BlobParticleInterpolation.vInterpolatedRadii[iChangedUsageIndex] = g_pBlobNetworkBypass->vParticleRadii[iChangedUsageIndex];
					s_RadiusInterpolators[iChangedUsageIndex].Reset( gpGlobals->curtime );
					g_BlobParticleInterpolation.vInterpolatedClosestSurfDir[iChangedUsageIndex] = g_pBlobNetworkBypass->vParticleClosestSurfDir[iChangedUsageIndex];
					s_ClosestSurfDirInterpolators[iChangedUsageIndex].Reset( gpGlobals->curtime );
					//s_PositionInterpolators[iChangedUsageIndex].NoteChanged( gpGlobals->curtime, fNewIndicesUpdateTime, true );
				}

				++j;
				if( j == BITS_PER_INT )
					break;
			}
			iBaseIndex += BITS_PER_INT;
			++pCurrentlyInUse;
			++pOldInUse;
		}

		memcpy( &m_bOldInUse, &g_pBlobNetworkBypass->bCurrentlyInUse, sizeof( m_bOldInUse ) );
		m_iOldHighestIndexUsed = g_pBlobNetworkBypass->iHighestIndexUsed;
	}

	if( g_pBlobNetworkBypass->iHighestIndexUsed == 0 )
		return;

	static ConVarRef cl_interpREF( "cl_interp" );
	//now do the interpolation of positions still in use
	{
		float fInterpTime = gpGlobals->curtime - cl_interpREF.GetFloat();

		CBitVec<BITS_PER_INT> *pIntParser = (CBitVec<BITS_PER_INT> *)&g_pBlobNetworkBypass->bCurrentlyInUse;
		int iStop = (g_pBlobNetworkBypass->iHighestIndexUsed / BITS_PER_INT) + 1;
		int iBaseIndex = 0;
		for( int i = 0; i != iStop; ++i )
		{
			int j = 0;
			while( (j = pIntParser->FindNextSetBit( j )) != -1 )
			{
				int iUpdateIndex = iBaseIndex + j;

				if( g_pBlobNetworkBypass->bDataUpdated )
				{
					g_BlobParticleInterpolation.vInterpolatedPositions[iUpdateIndex] = g_pBlobNetworkBypass->vParticlePositions[iUpdateIndex];
					s_PositionInterpolators[iUpdateIndex].NoteChanged( gpGlobals->curtime, g_pBlobNetworkBypass->fTimeDataUpdated, true );
					g_BlobParticleInterpolation.vInterpolatedRadii[iUpdateIndex] = g_pBlobNetworkBypass->vParticleRadii[iUpdateIndex];
					s_RadiusInterpolators[iUpdateIndex].NoteChanged( gpGlobals->curtime, g_pBlobNetworkBypass->fTimeDataUpdated, true );
					g_BlobParticleInterpolation.vInterpolatedClosestSurfDir[iUpdateIndex] = g_pBlobNetworkBypass->vParticleClosestSurfDir[iUpdateIndex];
					s_ClosestSurfDirInterpolators[iUpdateIndex].NoteChanged( gpGlobals->curtime, g_pBlobNetworkBypass->fTimeDataUpdated, true );
					//s_PositionInterpolators[iUpdateIndex].AddToHead( gpGlobals->curtime, &g_pBlobNetworkBypass->vParticlePositions[iUpdateIndex], false );
				}

				s_PositionInterpolators[iUpdateIndex].Interpolate( fInterpTime );
				s_RadiusInterpolators[iUpdateIndex].Interpolate( fInterpTime );
				s_ClosestSurfDirInterpolators[iUpdateIndex].Interpolate( fInterpTime );

				++j;
				if( j == BITS_PER_INT )
					break;
			}
			iBaseIndex += BITS_PER_INT;
			++pIntParser;
		}

		g_pBlobNetworkBypass->bDataUpdated = false;
	}
}