void RunMPIBuildVisLeafs() { g_CPUTime.Init(); Msg( "%-20s ", "BuildVisLeafs :" ); if ( g_bMPIMaster ) { StartPacifier(""); } memset( g_VMPIVisLeafsData, 0, sizeof( g_VMPIVisLeafsData ) ); if ( !g_bMPIMaster || VMPI_GetActiveWorkUnitDistributor() == k_eWorkUnitDistributor_SDK ) { // Allocate space for the transfers for each thread. for ( int i=0; i < numthreads; i++ ) { g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers = BuildVisLeafs_Start(); } } // // Slaves ask for work via GetMPIBuildVisLeafWork() // Results are returned in BuildVisRow() // VMPI_SetCurrentStage( "RunMPIBuildVisLeafs" ); double elapsed = DistributeWork( dvis->numclusters, VMPI_DISTRIBUTEWORK_PACKETID, MPI_ProcessVisLeafs, MPI_ReceiveVisLeafsResults ); // Free the transfers from each thread. for ( int i=0; i < numthreads; i++ ) { if ( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers ) BuildVisLeafs_End( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers ); } if ( g_bMPIMaster ) { EndPacifier(false); Msg( " (%d)\n", (int)elapsed ); } else { if ( g_iVMPIVerboseLevel >= 1 ) Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads ); } }
void RunMPIBuildFacelights() { g_CPUTime.Init(); Msg( "%-20s ", "BuildFaceLights:" ); if ( g_bMPIMaster ) { StartPacifier(""); } VMPI_SetCurrentStage( "RunMPIBuildFaceLights" ); double elapsed = DistributeWork( numfaces, VMPI_DISTRIBUTEWORK_PACKETID, MPI_ProcessFaces, MPI_ReceiveFaceResults ); if ( g_bMPIMaster ) { EndPacifier(false); Msg( " (%d)\n", (int)elapsed ); } if ( g_bMPIMaster ) { // // BuildPatchLights is normally called from BuildFacelights(), // but in MPI mode we have the master do the calculation // We might be able to speed this up by doing while the master // is idling in the above loop. Wouldn't want to slow down the // handing out of work - maybe another thread? // for ( int i=0; i < numfaces; ++i ) { BuildPatchLights(i); } } else { if ( g_iVMPIVerboseLevel >= 1 ) Msg( "\n\n%.1f%% CPU utilization during BuildFaceLights\n\n", ( g_CPUTime.GetSeconds() * 100 / elapsed ) ); } }
//----------------------------------------- // // Run BasePortalVis across all available processing nodes // Then collect and redistribute the results. // void RunMPIBasePortalVis() { int i; Msg( "\n\nportalbytes: %d\nNum Work Units: %d\nTotal data size: %d\n", portalbytes, g_numportals*2, portalbytes*g_numportals*2 ); Msg("%-20s ", "BasePortalVis:"); if ( g_bMPIMaster ) StartPacifier(""); VMPI_SetCurrentStage( "RunMPIBasePortalVis" ); // Note: we're aiming for about 1500 portals in a map, so about 3000 work units. g_CPUTime.Init(); double elapsed = DistributeWork( g_numportals * 2, // # work units VMPI_DISTRIBUTEWORK_PACKETID, // packet ID ProcessBasePortalVis, // Worker function to process work units ReceiveBasePortalVis // Master function to receive work results ); if ( g_bMPIMaster ) { EndPacifier( false ); Msg( " (%d)\n", (int)elapsed ); } // // Distribute the results to all the workers. // if ( g_bMPIMaster ) { if ( !fastvis ) { VMPI_SetCurrentStage( "SendPortalResults" ); // Store all the portal results in a temp file and multicast that to the workers. CUtlVector<char> allPortalData; allPortalData.SetSize( g_numportals * 2 * portalbytes * 2 ); char *pOut = allPortalData.Base(); for ( i=0; i < g_numportals * 2; i++) { portal_t *p = &portals[i]; memcpy( pOut, p->portalfront, portalbytes ); pOut += portalbytes; memcpy( pOut, p->portalflood, portalbytes ); pOut += portalbytes; } const char *pVirtualFilename = "--portal-results--"; VMPI_FileSystem_CreateVirtualFile( pVirtualFilename, allPortalData.Base(), allPortalData.Count() ); char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_BASEPORTALVIS_RESULTS }; VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), pVirtualFilename, strlen( pVirtualFilename ) + 1, VMPI_PERSISTENT ); } } else { VMPI_SetCurrentStage( "RecvPortalResults" ); // Wait until we've received the filename from the master. while ( g_BasePortalVisResultsFilename.Count() == 0 ) { VMPI_DispatchNextMessage(); } // Open FileHandle_t fp = g_pFileSystem->Open( g_BasePortalVisResultsFilename.Base(), "rb", VMPI_VIRTUAL_FILES_PATH_ID ); if ( !fp ) Error( "Can't open '%s' to read portal info.", g_BasePortalVisResultsFilename.Base() ); for ( i=0; i < g_numportals * 2; i++) { portal_t *p = &portals[i]; p->portalfront = (byte*)malloc (portalbytes); g_pFileSystem->Read( p->portalfront, portalbytes, fp ); p->portalflood = (byte*)malloc (portalbytes); g_pFileSystem->Read( p->portalflood, portalbytes, fp ); p->portalvis = (byte*)malloc (portalbytes); memset (p->portalvis, 0, portalbytes); p->nummightsee = CountBits (p->portalflood, g_numportals*2); } g_pFileSystem->Close( fp ); } if ( !g_bMPIMaster ) { if ( g_iVMPIVerboseLevel >= 1 ) Msg( "\n%% worker CPU utilization during BasePortalVis: %.1f\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads ); } }
//----------------------------------------- // // Run PortalFlow across all available processing nodes // void RunMPIPortalFlow() { Msg( "%-20s ", "MPIPortalFlow:" ); if ( g_bMPIMaster ) StartPacifier(""); // Workers wait until we get the MC socket address. g_PortalMCThreadUniqueID = StatsDB_GetUniqueJobID(); if ( g_bMPIMaster ) { CCycleCount cnt; cnt.Sample(); CUniformRandomStream randomStream; randomStream.SetSeed( cnt.GetMicroseconds() ); g_PortalMCAddr.port = randomStream.RandomInt( 22000, 25000 ); // Pulled out of something else. g_PortalMCAddr.ip[0] = (unsigned char)RandomInt( 225, 238 ); g_PortalMCAddr.ip[1] = (unsigned char)RandomInt( 0, 255 ); g_PortalMCAddr.ip[2] = (unsigned char)RandomInt( 0, 255 ); g_PortalMCAddr.ip[3] = (unsigned char)RandomInt( 3, 255 ); g_pPortalMCSocket = CreateIPSocket(); int i=0; for ( i; i < 5; i++ ) { if ( g_pPortalMCSocket->BindToAny( randomStream.RandomInt( 20000, 30000 ) ) ) break; } if ( i == 5 ) { Error( "RunMPIPortalFlow: can't open a socket to multicast on." ); } char cPacketID[2] = { VMPI_VVIS_PACKET_ID, VMPI_SUBPACKETID_MC_ADDR }; VMPI_Send2Chunks( cPacketID, sizeof( cPacketID ), &g_PortalMCAddr, sizeof( g_PortalMCAddr ), VMPI_PERSISTENT ); } else { VMPI_SetCurrentStage( "wait for MC address" ); while ( !g_bGotMCAddr ) { VMPI_DispatchNextMessage(); } // Open our multicast receive socket. g_pPortalMCSocket = CreateMulticastListenSocket( g_PortalMCAddr ); if ( !g_pPortalMCSocket ) { char err[512]; IP_GetLastErrorString( err, sizeof( err ) ); Error( "RunMPIPortalFlow: CreateMulticastListenSocket failed. (%s).", err ); } // Make a thread to listen for the data on the multicast socket. DWORD dwDummy = 0; g_MCThreadExitEvent.Init( false, false ); // Make sure we kill the MC thread if the app exits ungracefully. CmdLib_AtCleanup( MCThreadCleanupFn ); g_hMCThread = CreateThread( NULL, 0, PortalMCThreadFn, NULL, 0, &dwDummy ); if ( !g_hMCThread ) { Error( "RunMPIPortalFlow: CreateThread failed for multicast receive thread." ); } } VMPI_SetCurrentStage( "RunMPIBasePortalFlow" ); g_pDistributeWorkCallbacks = &g_VisDistributeWorkCallbacks; g_CPUTime.Init(); double elapsed = DistributeWork( g_numportals * 2, // # work units VMPI_DISTRIBUTEWORK_PACKETID, // packet ID ProcessPortalFlow, // Worker function to process work units ReceivePortalFlow // Master function to receive work results ); g_pDistributeWorkCallbacks = NULL; CheckExitedEarly(); // Stop the multicast stuff. VMPI_DeletePortalMCSocket(); if( !g_bMPIMaster ) { if ( g_iVMPIVerboseLevel >= 1 ) { Msg( "Received %d (out of %d) portals from multicast.\n", g_nMulticastPortalsReceived, g_numportals * 2 ); Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads ); } Msg( "VVIS worker finished. Over and out.\n" ); VMPI_SetCurrentStage( "worker done" ); CmdLib_Exit( 0 ); } if ( g_bMPIMaster ) { EndPacifier( false ); Msg( " (%d)\n", (int)elapsed ); } }
static double IP_FloatTime() { CCycleCount cnt; cnt.Sample(); return cnt.GetSeconds(); }