Example #1
0
void ServerDTI_Flush()
{
    if ( !g_pServerDTIFilename )
        return;

    CCycleCount curTime;
    curTime.Sample();

    CCycleCount runningTime;
    CCycleCount::Sub( curTime, g_ServerDTITimer, runningTime );

    // Write out a file that can be used by Excel.
    FileHandle_t fp = g_pFileSystem->Open( g_pServerDTIFilename, "wt", "LOGDIR" );

    if( fp != FILESYSTEM_INVALID_HANDLE )
    {
        // Write the header.
        g_pFileSystem->FPrintf( fp,
                                "DTName"

                                "\tCalcDelta calls"
                                "\tCalcDelta ms"

                                "\tEncode calls"
                                "\tEncode ms"

                                "\tShouldTransmit calls"
                                "\tShouldTransmit ms"

                                "\tWriteDeltaProps ms"

                                "\t%% manual mode"

                                "\tTotal"
                                "\tPercent"
                                "\n"
                              );

        // Calculate totals.
        CCycleCount totalCalcDelta, totalEncode, totalShouldTransmit, totalDeltaProps;
        totalCalcDelta.Init();
        totalEncode.Init();
        totalShouldTransmit.Init();

        FOR_EACH_LL( g_DTISendTables, i )
        {
            CDTISendTable *pTable = g_DTISendTables[i];

            CCycleCount::Add( pTable->m_nCalcDeltaCycles, totalCalcDelta, totalCalcDelta );
            CCycleCount::Add( pTable->m_nEncodeCycles, totalEncode, totalEncode );
            CCycleCount::Add( pTable->m_nShouldTransmitCycles, totalShouldTransmit, totalShouldTransmit );
            CCycleCount::Add( pTable->m_nWriteDeltaPropsCycles, totalDeltaProps, totalDeltaProps );
        }
Example #2
0
void RunMPIBuildVisLeafs()
{
    g_CPUTime.Init();
    
    Msg( "%-20s ", "BuildVisLeafs  :" );
	if ( g_bMPIMaster )
	{
		StartPacifier("");
	}

	memset( g_VMPIVisLeafsData, 0, sizeof( g_VMPIVisLeafsData ) );
	if ( !g_bMPIMaster || VMPI_GetActiveWorkUnitDistributor() == k_eWorkUnitDistributor_SDK )
	{
		// Allocate space for the transfers for each thread.
		for ( int i=0; i < numthreads; i++ )
		{
			g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers = BuildVisLeafs_Start();
		}
	}

	//
	// Slaves ask for work via GetMPIBuildVisLeafWork()
	// Results are returned in BuildVisRow()
	//
	VMPI_SetCurrentStage( "RunMPIBuildVisLeafs" );
	
	double elapsed = DistributeWork( 
		dvis->numclusters, 
		VMPI_DISTRIBUTEWORK_PACKETID,
		MPI_ProcessVisLeafs, 
		MPI_ReceiveVisLeafsResults );

	// Free the transfers from each thread.
	for ( int i=0; i < numthreads; i++ )
	{
		if ( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers )
			BuildVisLeafs_End( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers );
	}

	if ( g_bMPIMaster )
	{
		EndPacifier(false);
		Msg( " (%d)\n", (int)elapsed );
	}
	else
	{
		if ( g_iVMPIVerboseLevel >= 1 )
			Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
	}
}
Example #3
0
        FOR_EACH_LL( g_DTISendTables, j )
        {
            CDTISendTable *pTable = g_DTISendTables[j];

            if ( !pTable->HadAnyAction() )
                continue;

            CCycleCount total;
            CCycleCount::Add( pTable->m_nEncodeCycles, pTable->m_nCalcDeltaCycles, total );
            CCycleCount::Add( pTable->m_nShouldTransmitCycles, total, total );

            g_pFileSystem->FPrintf( fp,
                                    "%s"

                                    "\t%d"
                                    "\t%.3f"

                                    "\t%d"
                                    "\t%.3f"

                                    "\t%d"
                                    "\t%.3f"

                                    "\t%.3f"

                                    "\t%.2f"

                                    "\t%.3f"
                                    "\t%.3f"
                                    "\n",
                                    (char const*)pTable->m_NetTableName,

                                    pTable->m_nCalcDeltaCalls,
                                    pTable->m_nCalcDeltaCycles.GetMillisecondsF(),

                                    pTable->m_nEncodeCalls,
                                    pTable->m_nEncodeCycles.GetMillisecondsF(),

                                    pTable->m_nShouldTransmitCalls,
                                    pTable->m_nShouldTransmitCycles.GetMillisecondsF(),

                                    pTable->m_nWriteDeltaPropsCycles.GetMillisecondsF(),

                                    (float)pTable->m_nNoChanges * 100.0f / (pTable->m_nNoChanges + pTable->m_nChangeAutoDetects),

                                    total.GetMillisecondsF(),
                                    total.GetMillisecondsF() * 100 / runningTime.GetMillisecondsF()
                                  );
        }
Example #4
0
void ServerDTI_Init( char const *pFilename )
{
    g_pServerDTIFilename = pFilename;
    g_bServerDTIEnabled = true;
    g_TotalServerDTICycles.Init();
    g_bFirstHookTimer = true;
}
Example #5
0
void RunMPIBuildFacelights()
{
	g_CPUTime.Init();

    Msg( "%-20s ", "BuildFaceLights:" );
	if ( g_bMPIMaster )
	{
		StartPacifier("");
	}

	VMPI_SetCurrentStage( "RunMPIBuildFaceLights" );
	double elapsed = DistributeWork( 
		numfaces, 
		VMPI_DISTRIBUTEWORK_PACKETID,
		MPI_ProcessFaces, 
		MPI_ReceiveFaceResults );

	if ( g_bMPIMaster )
	{
		EndPacifier(false);
		Msg( " (%d)\n", (int)elapsed );
	}

	if ( g_bMPIMaster )
	{
		//
		// BuildPatchLights is normally called from BuildFacelights(),
		// but in MPI mode we have the master do the calculation
		// We might be able to speed this up by doing while the master
		// is idling in the above loop. Wouldn't want to slow down the
		// handing out of work - maybe another thread?
		//
		for ( int i=0; i < numfaces; ++i )
		{
			BuildPatchLights(i);
		}
	}
	else
	{
		if ( g_iVMPIVerboseLevel >= 1 )
			Msg( "\n\n%.1f%% CPU utilization during BuildFaceLights\n\n", ( g_CPUTime.GetSeconds() * 100 / elapsed ) );
	}
}
Example #6
0
//-----------------------------------------
//
// Run PortalFlow across all available processing nodes
//
void RunMPIPortalFlow()
{
    Msg( "%-20s ", "MPIPortalFlow:" );
	if ( g_bMPIMaster )
		StartPacifier("");

	// Workers wait until we get the MC socket address.
	g_PortalMCThreadUniqueID = StatsDB_GetUniqueJobID();
	if ( g_bMPIMaster )
	{
		CCycleCount cnt;
		cnt.Sample();
		CUniformRandomStream randomStream;
		randomStream.SetSeed( cnt.GetMicroseconds() );

		g_PortalMCAddr.port = randomStream.RandomInt( 22000, 25000 ); // Pulled out of something else.
		g_PortalMCAddr.ip[0] = (unsigned char)RandomInt( 225, 238 );
		g_PortalMCAddr.ip[1] = (unsigned char)RandomInt( 0, 255 );
		g_PortalMCAddr.ip[2] = (unsigned char)RandomInt( 0, 255 );
		g_PortalMCAddr.ip[3] = (unsigned char)RandomInt( 3, 255 );

		g_pPortalMCSocket = CreateIPSocket();
		int i=0;
		for ( i; i < 5; i++ )
		{
			if ( g_pPortalMCSocket->BindToAny( randomStream.RandomInt( 20000, 30000 ) ) )
				break;
		}
		if ( i == 5 )
		{
			Error( "RunMPIPortalFlow: can't open a socket to multicast on." );
		}

		char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_SUBPACKETID_MC_ADDR };
		VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), &g_PortalMCAddr, sizeof( g_PortalMCAddr ), VMPI_PERSISTENT );
	}
	else
	{
		VMPI_SetCurrentStage( "wait for MC address" );

		while ( !g_bGotMCAddr )
		{
			VMPI_DispatchNextMessage();
		}

		// Open our multicast receive socket.
		g_pPortalMCSocket = CreateMulticastListenSocket( g_PortalMCAddr );
		if ( !g_pPortalMCSocket )
		{
			char err[512];
			IP_GetLastErrorString( err, sizeof( err ) );
			Error( "RunMPIPortalFlow: CreateMulticastListenSocket failed. (%s).", err );
		}

		// Make a thread to listen for the data on the multicast socket.
		DWORD dwDummy = 0;
		g_MCThreadExitEvent.Init( false, false );

		// Make sure we kill the MC thread if the app exits ungracefully.
		CmdLib_AtCleanup( MCThreadCleanupFn );
		
		g_hMCThread = CreateThread( 
			NULL,
			0,
			PortalMCThreadFn,
			NULL,
			0,
			&dwDummy );

		if ( !g_hMCThread )
		{
			Error( "RunMPIPortalFlow: CreateThread failed for multicast receive thread." );
		}			
	}

	VMPI_SetCurrentStage( "RunMPIBasePortalFlow" );


	g_pDistributeWorkCallbacks = &g_VisDistributeWorkCallbacks;

	g_CPUTime.Init();
	double elapsed = DistributeWork( 
		g_numportals * 2,		// # work units
		VMPI_DISTRIBUTEWORK_PACKETID,	// packet ID
		ProcessPortalFlow,		// Worker function to process work units
		ReceivePortalFlow		// Master function to receive work results
		);
		
	g_pDistributeWorkCallbacks = NULL;

	CheckExitedEarly();

	// Stop the multicast stuff.
	VMPI_DeletePortalMCSocket();

	if( !g_bMPIMaster )
	{
		if ( g_iVMPIVerboseLevel >= 1 )
		{
			Msg( "Received %d (out of %d) portals from multicast.\n", g_nMulticastPortalsReceived, g_numportals * 2 );
			Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
		}

		Msg( "VVIS worker finished. Over and out.\n" );
		VMPI_SetCurrentStage( "worker done" );

		CmdLib_Exit( 0 );
	}

	if ( g_bMPIMaster )
	{
		EndPacifier( false );
		Msg( " (%d)\n", (int)elapsed );
	}
}
Example #7
0
//-----------------------------------------
//
// Run BasePortalVis across all available processing nodes
// Then collect and redistribute the results.
//
void RunMPIBasePortalVis()
{
	int i;

	Msg( "\n\nportalbytes: %d\nNum Work Units: %d\nTotal data size: %d\n", portalbytes, g_numportals*2, portalbytes*g_numportals*2 );
    Msg("%-20s ", "BasePortalVis:");
	if ( g_bMPIMaster )
		StartPacifier("");


	VMPI_SetCurrentStage( "RunMPIBasePortalVis" );

	// Note: we're aiming for about 1500 portals in a map, so about 3000 work units.
	g_CPUTime.Init();
	double elapsed = DistributeWork( 
		g_numportals * 2,		// # work units
		VMPI_DISTRIBUTEWORK_PACKETID,	// packet ID
		ProcessBasePortalVis,	// Worker function to process work units
		ReceiveBasePortalVis	// Master function to receive work results
		);

	if ( g_bMPIMaster )
	{
		EndPacifier( false );
		Msg( " (%d)\n", (int)elapsed );
	}
	
	//
	// Distribute the results to all the workers.
	//
	if ( g_bMPIMaster )
	{
		if ( !fastvis )
		{
			VMPI_SetCurrentStage( "SendPortalResults" );

			// Store all the portal results in a temp file and multicast that to the workers.
			CUtlVector<char> allPortalData;
			allPortalData.SetSize( g_numportals * 2 * portalbytes * 2 );

			char *pOut = allPortalData.Base();
			for ( i=0; i < g_numportals * 2; i++) 
			{
				portal_t *p = &portals[i];
				
				memcpy( pOut, p->portalfront, portalbytes );
				pOut += portalbytes;
				
				memcpy( pOut, p->portalflood, portalbytes );
				pOut += portalbytes;
			}

			const char *pVirtualFilename = "--portal-results--";
			VMPI_FileSystem_CreateVirtualFile( pVirtualFilename, allPortalData.Base(), allPortalData.Count() );

			char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_BASEPORTALVIS_RESULTS };
			VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), pVirtualFilename, strlen( pVirtualFilename ) + 1, VMPI_PERSISTENT );
		}
	}
	else
	{
		VMPI_SetCurrentStage( "RecvPortalResults" );

		// Wait until we've received the filename from the master.
		while ( g_BasePortalVisResultsFilename.Count() == 0 )
		{
			VMPI_DispatchNextMessage();
		}

		// Open 
		FileHandle_t fp = g_pFileSystem->Open( g_BasePortalVisResultsFilename.Base(), "rb", VMPI_VIRTUAL_FILES_PATH_ID );
		if ( !fp )
			Error( "Can't open '%s' to read portal info.", g_BasePortalVisResultsFilename.Base() );

		for ( i=0; i < g_numportals * 2; i++) 
		{
			portal_t *p = &portals[i];

			p->portalfront = (byte*)malloc (portalbytes);
			g_pFileSystem->Read( p->portalfront, portalbytes, fp );
			
			p->portalflood = (byte*)malloc (portalbytes);
			g_pFileSystem->Read( p->portalflood, portalbytes, fp );
		
			p->portalvis = (byte*)malloc (portalbytes);
			memset (p->portalvis, 0, portalbytes);
		
			p->nummightsee = CountBits (p->portalflood, g_numportals*2);
		}

		g_pFileSystem->Close( fp );
	}

	
	if ( !g_bMPIMaster )
	{
		if ( g_iVMPIVerboseLevel >= 1 )
			Msg( "\n%% worker CPU utilization during BasePortalVis: %.1f\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
	}
}
int main(int argc, char* argv[])
{
	SpewOutputFunc( MySpewFunc );

	// Figure out a random port to use.
	CCycleCount cnt;
	cnt.Sample();
	CUniformRandomStream randomStream;
	randomStream.SetSeed( cnt.GetMicroseconds() );
	int iPort = randomStream.RandomInt( 20000, 30000 );


	g_ClientPacketEvent.Init( false, false );

	
	// Setup the "server".
	CHandlerCreator_Server serverHandler;
	CIPAddr addr( 127, 0, 0, 1, iPort );

	ITCPConnectSocket *pListener = ThreadedTCP_CreateListener( 
		&serverHandler,
		(unsigned short)iPort );

	
	// Setup the "client".
	CHandlerCreator_Client clientCreator;
	ITCPConnectSocket *pConnector = ThreadedTCP_CreateConnector( 
		CIPAddr( 127, 0, 0, 1, iPort ),
		CIPAddr(),
		&clientCreator );


	// Wait for them to connect.
	while ( !g_pClientSocket )
	{
		if ( !pConnector->Update( &g_pClientSocket ) )
		{
			Error( "Error in client connector!\n" );
		}
	}
	pConnector->Release();


	while ( !g_pServerSocket )
	{
		if ( !pListener->Update( &g_pServerSocket ) )
			Error( "Error in server connector!\n" );
	}
	pListener->Release();


	// Send some data.
	__int64 totalBytes = 0;
	CCycleCount startTime;
	int iPacket = 1;

	startTime.Sample();
	CUtlVector<char> buf;

	while ( (GetAsyncKeyState( VK_SHIFT ) & 0x8000) == 0 )
	{
		int size = randomStream.RandomInt( 1024*0, 1024*320 );
		if ( buf.Count() < size )
			buf.SetSize( size );

		if ( g_pClientSocket->Send( buf.Base(), size ) )
		{
			// Server receives the data and echoes it back. Verify that the data is good.
			WaitForSingleObject( g_ClientPacketEvent.GetEventHandle(), INFINITE );
			Assert( memcmp( g_ClientPacket.Base(), buf.Base(), size ) == 0 );

			totalBytes += size;
			CCycleCount curTime, elapsed;
			curTime.Sample();
			CCycleCount::Sub( curTime, startTime, elapsed );
			double flSeconds = elapsed.GetSeconds();
			Msg( "Packet %d, %d bytes, %dk/sec\n", iPacket++, size, (int)(((totalBytes+511)/1024) / flSeconds) );
		}
	}
	
	g_pClientSocket->Release();
	g_pServerSocket->Release();
	return 0;
}
unsigned long SampleMilliseconds()
{
	CCycleCount cnt;
	cnt.Sample();
	return cnt.GetMilliseconds();
}
static double IP_FloatTime()
{
	CCycleCount cnt;
	cnt.Sample();
	return cnt.GetSeconds();
}