void DistributeWork_Worker( CDSInfo *pInfo, ProcessWorkUnitFn processFn )
{
    if ( g_iVMPIVerboseLevel >= 1 )
        Msg( "VMPI_DistributeWork call %d started.\n", g_iCurDSInfo+1 );

    CWorkerInfo *pWorkerInfo = &pInfo->m_WorkerInfo;
    pWorkerInfo->m_pProcessFn = processFn;

    g_pCurWorkerThreadsInfo = pInfo;
    g_pCurDistributorWorker->Init( pInfo );

    // Start a couple threads to do the work.
    RunThreads_Start( VMPI_WorkerThread, pInfo, g_bSetThreadPriorities ? k_eRunThreadsPriority_Idle : k_eRunThreadsPriority_UseGlobalState );
    if ( g_iVMPIVerboseLevel >= 1 )
        Msg( "RunThreads_Start finished successfully.\n" );

    if ( VMPI_IsSDKMode() )
    {
        Msg( "\n" );
        while ( g_iMasterFinishedDistributeWorkCall < g_iCurDSInfo )
        {
            VMPI_DispatchNextMessage( 300 );

            Msg( "\rThreads status: " );
            for ( int i=0; i < ARRAYSIZE( g_ThreadWUs ); i++ )
            {
                if ( g_ThreadWUs[i] != ~0ull )
                    Msg( "%d: WU %5d  ", i, (int)g_ThreadWUs[i] );
            }

            VMPI_FlushGroupedPackets();
        }
        Msg( "\n" );
    }
    else
    {
        while ( g_iMasterFinishedDistributeWorkCall < g_iCurDSInfo )
        {
            VMPI_DispatchNextMessage();
        }
    }


    // Close the threads.
    g_pCurWorkerThreadsInfo = NULL;
    RunThreads_End();

    if ( g_iVMPIVerboseLevel >= 1 )
        Msg( "VMPI_DistributeWork call %d finished.\n", g_iCurDSInfo+1 );
}
void PreDistributeWorkSync( CDSInfo *pInfo )
{
    if ( g_bMPIMaster )
    {
        // Send a message telling all the workers we're ready to go on this DistributeWork call.
        MessageBuffer mb;
        PrepareDistributeWorkHeader( &mb, DW_SUBPACKETID_MASTER_READY );
        VMPI_SendData( mb.data, mb.getLen(), VMPI_PERSISTENT );
    }
    else
    {
        if ( g_iVMPIVerboseLevel >= 1 )
            Msg( "PreDistributeWorkSync: waiting for master\n" );

        // Wait for the master's message saying it's ready to go.
        while ( g_iMasterReadyForDistributeWorkCall < g_iCurDSInfo )
        {
            VMPI_DispatchNextMessage();
        }

        if ( g_iVMPIVerboseLevel >= 1 )
            Msg( "PreDistributeWorkSync: master ready\n" );

        // Now tell the master we're ready.
        MessageBuffer mb;
        PrepareDistributeWorkHeader( &mb, DW_SUBPACKETID_WORKER_READY );
        VMPI_SendData( mb.data, mb.getLen(), VMPI_MASTER_ID );
    }
}
Ejemplo n.º 3
0
void RecvDBInfo( CDBInfo *pInfo, unsigned long *pJobPrimaryID )
{
	while ( !g_bReceivedDBInfo )
		VMPI_DispatchNextMessage();

	*pInfo = g_DBInfo;
	*pJobPrimaryID = g_JobPrimaryID;
}
Ejemplo n.º 4
0
void VMPI_DistributeLightData()
{
	if ( !g_bUseMPI )
		return;

	if ( g_bMPIMaster )
	{
		const char *pVirtualFilename = "--plightdata--";
		
		CUtlBuffer lightFaceData;

		// write out the light data
		lightFaceData.EnsureCapacity( pdlightdata->Count() + (numfaces * (MAXLIGHTMAPS+sizeof(int))) );
		Q_memcpy( lightFaceData.PeekPut(), pdlightdata->Base(), pdlightdata->Count() );
		lightFaceData.SeekPut( CUtlBuffer::SEEK_HEAD, pdlightdata->Count() );

		// write out the relevant face info into the stream
		for ( int i = 0; i < numfaces; i++ )
		{
			for ( int j = 0; j < MAXLIGHTMAPS; j++ )
			{
				lightFaceData.PutChar(g_pFaces[i].styles[j]);
			}
			lightFaceData.PutInt(g_pFaces[i].lightofs);
		}
		VMPI_FileSystem_CreateVirtualFile( pVirtualFilename, lightFaceData.Base(), lightFaceData.TellMaxPut() );

		char cPacketID[2] = { VMPI_VRAD_PACKET_ID, VMPI_SUBPACKETID_PLIGHTDATA_RESULTS };
		VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), pVirtualFilename, strlen( pVirtualFilename ) + 1, VMPI_PERSISTENT );
	}
	else
	{
		VMPI_SetCurrentStage( "VMPI_DistributeLightData" );

		// Wait until we've received the filename from the master.
		while ( g_LightResultsFilename.Count() == 0 )
		{
			VMPI_DispatchNextMessage();
		}

		// Open 
		FileHandle_t fp = g_pFileSystem->Open( g_LightResultsFilename.Base(), "rb", VMPI_VIRTUAL_FILES_PATH_ID );
		if ( !fp )
			Error( "Can't open '%s' to read lighting info.", g_LightResultsFilename.Base() );

		int size = g_pFileSystem->Size( fp );
		int faceSize = (numfaces*(MAXLIGHTMAPS+sizeof(int)));

		if ( size > faceSize )
		{
			int lightSize = size - faceSize;
			CUtlBuffer faceData;
			pdlightdata->EnsureCount( lightSize );
			faceData.EnsureCapacity( faceSize );

			g_pFileSystem->Read( pdlightdata->Base(), lightSize, fp );
			g_pFileSystem->Read( faceData.Base(), faceSize, fp );
			g_pFileSystem->Close( fp );

			faceData.SeekPut( CUtlBuffer::SEEK_HEAD, faceSize );

			// write out the face data
			for ( int i = 0; i < numfaces; i++ )
			{
				for ( int j = 0; j < MAXLIGHTMAPS; j++ )
				{
					g_pFaces[i].styles[j] = faceData.GetChar();
				}
				g_pFaces[i].lightofs = faceData.GetInt();
			}
		}
	}
}
Ejemplo n.º 5
0
//-----------------------------------------
//
// Run PortalFlow across all available processing nodes
//
void RunMPIPortalFlow()
{
    Msg( "%-20s ", "MPIPortalFlow:" );
	if ( g_bMPIMaster )
		StartPacifier("");

	// Workers wait until we get the MC socket address.
	g_PortalMCThreadUniqueID = StatsDB_GetUniqueJobID();
	if ( g_bMPIMaster )
	{
		CCycleCount cnt;
		cnt.Sample();
		CUniformRandomStream randomStream;
		randomStream.SetSeed( cnt.GetMicroseconds() );

		g_PortalMCAddr.port = randomStream.RandomInt( 22000, 25000 ); // Pulled out of something else.
		g_PortalMCAddr.ip[0] = (unsigned char)RandomInt( 225, 238 );
		g_PortalMCAddr.ip[1] = (unsigned char)RandomInt( 0, 255 );
		g_PortalMCAddr.ip[2] = (unsigned char)RandomInt( 0, 255 );
		g_PortalMCAddr.ip[3] = (unsigned char)RandomInt( 3, 255 );

		g_pPortalMCSocket = CreateIPSocket();
		int i=0;
		for ( i; i < 5; i++ )
		{
			if ( g_pPortalMCSocket->BindToAny( randomStream.RandomInt( 20000, 30000 ) ) )
				break;
		}
		if ( i == 5 )
		{
			Error( "RunMPIPortalFlow: can't open a socket to multicast on." );
		}

		char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_SUBPACKETID_MC_ADDR };
		VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), &g_PortalMCAddr, sizeof( g_PortalMCAddr ), VMPI_PERSISTENT );
	}
	else
	{
		VMPI_SetCurrentStage( "wait for MC address" );

		while ( !g_bGotMCAddr )
		{
			VMPI_DispatchNextMessage();
		}

		// Open our multicast receive socket.
		g_pPortalMCSocket = CreateMulticastListenSocket( g_PortalMCAddr );
		if ( !g_pPortalMCSocket )
		{
			char err[512];
			IP_GetLastErrorString( err, sizeof( err ) );
			Error( "RunMPIPortalFlow: CreateMulticastListenSocket failed. (%s).", err );
		}

		// Make a thread to listen for the data on the multicast socket.
		DWORD dwDummy = 0;
		g_MCThreadExitEvent.Init( false, false );

		// Make sure we kill the MC thread if the app exits ungracefully.
		CmdLib_AtCleanup( MCThreadCleanupFn );
		
		g_hMCThread = CreateThread( 
			NULL,
			0,
			PortalMCThreadFn,
			NULL,
			0,
			&dwDummy );

		if ( !g_hMCThread )
		{
			Error( "RunMPIPortalFlow: CreateThread failed for multicast receive thread." );
		}			
	}

	VMPI_SetCurrentStage( "RunMPIBasePortalFlow" );


	g_pDistributeWorkCallbacks = &g_VisDistributeWorkCallbacks;

	g_CPUTime.Init();
	double elapsed = DistributeWork( 
		g_numportals * 2,		// # work units
		VMPI_DISTRIBUTEWORK_PACKETID,	// packet ID
		ProcessPortalFlow,		// Worker function to process work units
		ReceivePortalFlow		// Master function to receive work results
		);
		
	g_pDistributeWorkCallbacks = NULL;

	CheckExitedEarly();

	// Stop the multicast stuff.
	VMPI_DeletePortalMCSocket();

	if( !g_bMPIMaster )
	{
		if ( g_iVMPIVerboseLevel >= 1 )
		{
			Msg( "Received %d (out of %d) portals from multicast.\n", g_nMulticastPortalsReceived, g_numportals * 2 );
			Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
		}

		Msg( "VVIS worker finished. Over and out.\n" );
		VMPI_SetCurrentStage( "worker done" );

		CmdLib_Exit( 0 );
	}

	if ( g_bMPIMaster )
	{
		EndPacifier( false );
		Msg( " (%d)\n", (int)elapsed );
	}
}
Ejemplo n.º 6
0
//-----------------------------------------
//
// Run BasePortalVis across all available processing nodes
// Then collect and redistribute the results.
//
void RunMPIBasePortalVis()
{
	int i;

	Msg( "\n\nportalbytes: %d\nNum Work Units: %d\nTotal data size: %d\n", portalbytes, g_numportals*2, portalbytes*g_numportals*2 );
    Msg("%-20s ", "BasePortalVis:");
	if ( g_bMPIMaster )
		StartPacifier("");


	VMPI_SetCurrentStage( "RunMPIBasePortalVis" );

	// Note: we're aiming for about 1500 portals in a map, so about 3000 work units.
	g_CPUTime.Init();
	double elapsed = DistributeWork( 
		g_numportals * 2,		// # work units
		VMPI_DISTRIBUTEWORK_PACKETID,	// packet ID
		ProcessBasePortalVis,	// Worker function to process work units
		ReceiveBasePortalVis	// Master function to receive work results
		);

	if ( g_bMPIMaster )
	{
		EndPacifier( false );
		Msg( " (%d)\n", (int)elapsed );
	}
	
	//
	// Distribute the results to all the workers.
	//
	if ( g_bMPIMaster )
	{
		if ( !fastvis )
		{
			VMPI_SetCurrentStage( "SendPortalResults" );

			// Store all the portal results in a temp file and multicast that to the workers.
			CUtlVector<char> allPortalData;
			allPortalData.SetSize( g_numportals * 2 * portalbytes * 2 );

			char *pOut = allPortalData.Base();
			for ( i=0; i < g_numportals * 2; i++) 
			{
				portal_t *p = &portals[i];
				
				memcpy( pOut, p->portalfront, portalbytes );
				pOut += portalbytes;
				
				memcpy( pOut, p->portalflood, portalbytes );
				pOut += portalbytes;
			}

			const char *pVirtualFilename = "--portal-results--";
			VMPI_FileSystem_CreateVirtualFile( pVirtualFilename, allPortalData.Base(), allPortalData.Count() );

			char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_BASEPORTALVIS_RESULTS };
			VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), pVirtualFilename, strlen( pVirtualFilename ) + 1, VMPI_PERSISTENT );
		}
	}
	else
	{
		VMPI_SetCurrentStage( "RecvPortalResults" );

		// Wait until we've received the filename from the master.
		while ( g_BasePortalVisResultsFilename.Count() == 0 )
		{
			VMPI_DispatchNextMessage();
		}

		// Open 
		FileHandle_t fp = g_pFileSystem->Open( g_BasePortalVisResultsFilename.Base(), "rb", VMPI_VIRTUAL_FILES_PATH_ID );
		if ( !fp )
			Error( "Can't open '%s' to read portal info.", g_BasePortalVisResultsFilename.Base() );

		for ( i=0; i < g_numportals * 2; i++) 
		{
			portal_t *p = &portals[i];

			p->portalfront = (byte*)malloc (portalbytes);
			g_pFileSystem->Read( p->portalfront, portalbytes, fp );
			
			p->portalflood = (byte*)malloc (portalbytes);
			g_pFileSystem->Read( p->portalflood, portalbytes, fp );
		
			p->portalvis = (byte*)malloc (portalbytes);
			memset (p->portalvis, 0, portalbytes);
		
			p->nummightsee = CountBits (p->portalflood, g_numportals*2);
		}

		g_pFileSystem->Close( fp );
	}

	
	if ( !g_bMPIMaster )
	{
		if ( g_iVMPIVerboseLevel >= 1 )
			Msg( "\n%% worker CPU utilization during BasePortalVis: %.1f\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
	}
}
Ejemplo n.º 7
0
void RecvQDirInfo()
{
	while ( !g_bReceivedDirectoryInfo )
		VMPI_DispatchNextMessage();
}