Exemple #1
0
void RunMPIBuildVisLeafs()
{
    g_CPUTime.Init();
    
    Msg( "%-20s ", "BuildVisLeafs  :" );
	if ( g_bMPIMaster )
	{
		StartPacifier("");
	}

	memset( g_VMPIVisLeafsData, 0, sizeof( g_VMPIVisLeafsData ) );
	if ( !g_bMPIMaster || VMPI_GetActiveWorkUnitDistributor() == k_eWorkUnitDistributor_SDK )
	{
		// Allocate space for the transfers for each thread.
		for ( int i=0; i < numthreads; i++ )
		{
			g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers = BuildVisLeafs_Start();
		}
	}

	//
	// Slaves ask for work via GetMPIBuildVisLeafWork()
	// Results are returned in BuildVisRow()
	//
	VMPI_SetCurrentStage( "RunMPIBuildVisLeafs" );
	
	double elapsed = DistributeWork( 
		dvis->numclusters, 
		VMPI_DISTRIBUTEWORK_PACKETID,
		MPI_ProcessVisLeafs, 
		MPI_ReceiveVisLeafsResults );

	// Free the transfers from each thread.
	for ( int i=0; i < numthreads; i++ )
	{
		if ( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers )
			BuildVisLeafs_End( g_VMPIVisLeafsData[i].m_pBuildVisLeafsTransfers );
	}

	if ( g_bMPIMaster )
	{
		EndPacifier(false);
		Msg( " (%d)\n", (int)elapsed );
	}
	else
	{
		if ( g_iVMPIVerboseLevel >= 1 )
			Msg( "%.1f%% CPU utilization during PortalFlow\n", (g_CPUTime.GetSeconds() * 100.0f / elapsed) / numthreads );
	}
}
// Returns time it took to finish the work.
double DistributeWork(
    uint64 nWorkUnits,				// how many work units to dole out
    char cPacketID,
    ProcessWorkUnitFn processFn,	// workers implement this to process a work unit and send results back
    ReceiveWorkUnitFn receiveFn		// the master implements this to receive a work unit
)
{
    ++g_iCurDSInfo;

    if ( g_iCurDSInfo == 0 )
    {
        // Register our disconnect handler so we can deal with it if clients bail out.
        if ( g_bMPIMaster )
        {
            VMPI_AddDisconnectHandler( VMPI_DistributeWork_DisconnectHandler );
        }
    }
    else if ( g_iCurDSInfo >= MAX_DW_CALLS )
    {
        Error( "DistributeWork: called more than %d times.\n", MAX_DW_CALLS );
    }

    CDSInfo *pInfo = &g_DSInfo;

    pInfo->m_cPacketID = cPacketID;
    pInfo->m_nWorkUnits = nWorkUnits;

    // Make all the workers wait until the master is ready.
    PreDistributeWorkSync( pInfo );

    g_nWUs = nWorkUnits;
    g_nCompletedWUs = 0ull;
    g_nDuplicatedWUs = 0ull;

    // Setup stats info.
    double flMPIStartTime = Plat_FloatTime();
    g_wuCountByProcess.SetCount( 512 );
    memset( g_wuCountByProcess.Base(), 0, sizeof( int ) * g_wuCountByProcess.Count() );

    unsigned long nBytesSentStart = g_nBytesSent;
    unsigned long nBytesReceivedStart = g_nBytesReceived;
    unsigned long nMessagesSentStart = g_nMessagesSent;
    unsigned long nMessagesReceivedStart = g_nMessagesReceived;

    EWorkUnitDistributor eWorkUnitDistributor = VMPI_GetActiveWorkUnitDistributor();
    if ( g_bMPIMaster )
    {
        Assert( !g_pCurDistributorMaster );
        g_pCurDistributorMaster = ( eWorkUnitDistributor == k_eWorkUnitDistributor_SDK ? CreateWUDistributor_SDKMaster() : CreateWUDistributor_DefaultMaster() );

        DistributeWork_Master( pInfo, processFn, receiveFn );

        g_pCurDistributorMaster->Release();
        g_pCurDistributorMaster = NULL;
    }
    else
    {
        Assert( !g_pCurDistributorWorker );
        g_pCurDistributorWorker = ( eWorkUnitDistributor == k_eWorkUnitDistributor_SDK ? CreateWUDistributor_SDKWorker() : CreateWUDistributor_DefaultWorker() );

        DistributeWork_Worker( pInfo, processFn );

        g_pCurDistributorWorker->Release();
        g_pCurDistributorWorker = NULL;
    }

    double flTimeSpent = Plat_FloatTime() - flMPIStartTime;
    ShowMPIStats(
        flTimeSpent,
        g_nBytesSent - nBytesSentStart,
        g_nBytesReceived - nBytesReceivedStart,
        g_nMessagesSent - nMessagesSentStart,
        g_nMessagesReceived - nMessagesReceivedStart
    );

    // Mark that the threads aren't working on anything at the moment.
    for ( int i=0; i < ARRAYSIZE( g_ThreadWUs ); i++ )
        g_ThreadWUs[i] = ~0ull;

    return flTimeSpent;
}