Пример #1
0
void partialOscillation(LifeList *cells, LifeList *working, 
                        int testUpTo, int testPhases,
                        int nmatches, PartialOscillatorDesc *posc) {

  int i,j;
  History hist;
  Transformation normT;
  Transformation oscT;

  normT= normalize(cells);
  hist= makeHistory(cells, testUpTo+testPhases); 

  initMatchList(posc, nmatches);
  for (i=1; i<=testUpTo; i++) {
    int matchSize;
    int tx, ty;

    for (j=0; j<testPhases; j++) {
      getGeneration(cells, hist, j);
      getGeneration(working, hist, i+j);
      bestMatches(cells, working, i, nmatches, posc);
    }

  }

  getGeneration(cells, hist, 0);
  freeHistory(hist);
  transformBack(cells->cellList, normT, cells->ncells);

}
//---------------------------------------------------------------------------//
// Test templates
//---------------------------------------------------------------------------//
TEUCHOS_UNIT_TEST( DomainCommunicator, Communicate )
{
    typedef Tpetra::Vector<double,int,long> VectorType;
    typedef Tpetra::CrsMatrix<double,int,long> MatrixType;
    typedef MCLS::MatrixTraits<VectorType,MatrixType> MT;
    typedef MCLS::AdjointHistory<long> HistoryType;
    typedef std::mt19937 rng_type;
    typedef MCLS::AdjointTally<VectorType> TallyType;
    typedef MCLS::AlmostOptimalDomain<VectorType,MatrixType,rng_type,TallyType>
	DomainType;

    Teuchos::RCP<const Teuchos::Comm<int> > comm = 
	Teuchos::DefaultComm<int>::getComm();
    int comm_size = comm->getSize();
    int comm_rank = comm->getRank();

    // This test is parallel.
    if ( comm_size > 1 )
    {
	int local_num_rows = 10;
	int global_num_rows = local_num_rows*comm_size;
	Teuchos::RCP<const Tpetra::Map<int,long> > map = 
	    Tpetra::createUniformContigMap<int,long>( global_num_rows, comm );

	// Build the linear operator and solution vector.
	Teuchos::RCP<MatrixType> A = Tpetra::createCrsMatrix<double,int,long>( map );
	Teuchos::Array<long> global_columns( 1 );
	Teuchos::Array<double> values( 1 );
	for ( int i = 1; i < global_num_rows; ++i )
	{
	    global_columns[0] = i-1;
	    values[0] = -0.5/comm_size;
	    A->insertGlobalValues( i, global_columns(), values() );
	}
	global_columns[0] = global_num_rows-1;
	values[0] = -0.5/comm_size;
	A->insertGlobalValues( global_num_rows-1, global_columns(), values() );
	A->fillComplete();

	Teuchos::RCP<VectorType> x = MT::cloneVectorFromMatrixRows( *A );
        Teuchos::RCP<MatrixType> A_T = MT::copyTranspose(*A);

	// Build the adjoint domain.
	Teuchos::ParameterList plist;
	plist.set<int>( "Overlap Size", 0 );
	Teuchos::RCP<DomainType> domain = 
            Teuchos::rcp( new DomainType( A_T, x, plist ) );
	Teuchos::RCP<MCLS::PRNG<rng_type> > rng = Teuchos::rcp(
	    new MCLS::PRNG<rng_type>( comm->getRank() ) );
	domain->setRNG( rng );

	// History setup.
	HistoryType::setByteSize();

	// Build the domain communicator.
	typename MCLS::DomainCommunicator<DomainType>::BankType bank;
	int buffer_size = 3;
	plist.set<int>( "MC Buffer Size", buffer_size );
	MCLS::DomainCommunicator<DomainType> communicator( domain, comm, plist );

	// Test initialization.
	TEST_EQUALITY( Teuchos::as<int>(communicator.maxBufferSize()), buffer_size );
	TEST_ASSERT( !communicator.sendStatus() );
	TEST_ASSERT( !communicator.receiveStatus() );

	// Post receives.
	communicator.post();
	if ( comm_rank == 0 )
	{
	    TEST_ASSERT( !communicator.receiveStatus() );
	}
	else
	{
	    TEST_ASSERT( communicator.receiveStatus() );
	}

	// End communication.
	communicator.end();
	TEST_ASSERT( !communicator.receiveStatus() );

	// Post new receives.
	communicator.post();
	if ( comm_rank == 0 )
	{
	    TEST_ASSERT( !communicator.receiveStatus() );
	}
	else
	{
	    TEST_ASSERT( communicator.receiveStatus() );
	}
	TEST_EQUALITY( communicator.sendBufferSize(), 0 );

	// Flush with zero histories.
	TEST_EQUALITY( communicator.flush(), 0 );
	if ( comm_rank == 0 )
	{
	    TEST_ASSERT( !communicator.receiveStatus() );
	}
	else
	{
	    TEST_ASSERT( communicator.receiveStatus() );
	}
	TEST_ASSERT( !communicator.sendStatus() );

	// Receive empty flushed buffers.
	int zero_histories = communicator.wait( bank );
	TEST_EQUALITY( zero_histories, 0 );
	TEST_ASSERT( !communicator.receiveStatus() );
	TEST_ASSERT( bank.empty() );

	// Repost receives.
	communicator.post();
	if ( comm_rank == 0 )
	{
	    TEST_ASSERT( !communicator.receiveStatus() );
	}
	else
	{
	    TEST_ASSERT( communicator.receiveStatus() );
	}

	// Proc 0 will send to proc 1.
	if ( comm_rank == 0 )
	{
	    TEST_ASSERT( !domain->isGlobalState(10) );

	    HistoryType h1 = 
		makeHistory( 10, 1.1, comm_rank*4 + 1 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r1 = communicator.communicate( h1 );
	    TEST_ASSERT( !r1.sent );
	    TEST_EQUALITY( communicator.sendBufferSize(), 1 );

	    HistoryType h2 = 
		makeHistory( 10, 2.1, comm_rank*4 + 2 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r2 = communicator.communicate( h2 );
	    TEST_ASSERT( !r2.sent );
	    TEST_EQUALITY( communicator.sendBufferSize(), 2 );

	    HistoryType h3 = 
		makeHistory( 10, 3.1, comm_rank*4 + 3 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r3 = communicator.communicate( h3 );
	    TEST_ASSERT( r3.sent );
	    TEST_EQUALITY( r3.destination, 1 );
	    TEST_EQUALITY( communicator.sendBufferSize(), 0 );
	}

	// Proc comm_rank send to proc comm_rank+1 and receive from proc
	// comm_rank-1. 
	else if ( comm_rank < comm_size - 1 )
	{
	    // Send to proc comm_rank+1.
	    TEST_ASSERT( !domain->isGlobalState((comm_rank+1)*10) );

	    HistoryType h1 = 
		makeHistory( (comm_rank+1)*10, 1.1, comm_rank*4 + 1 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r1 = communicator.communicate( h1 );
	    TEST_ASSERT( !r1.sent );
	    TEST_EQUALITY( communicator.sendBufferSize(), 1 );

	    HistoryType h2 = 
		makeHistory( (comm_rank+1)*10, 2.1, comm_rank*4 + 2 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r2 = communicator.communicate( h2 );
	    TEST_ASSERT( !r2.sent );
	    TEST_EQUALITY( communicator.sendBufferSize(), 2 );

	    HistoryType h3 = 
		makeHistory( (comm_rank+1)*10, 3.1, comm_rank*4 + 3 );
	    const typename MCLS::DomainCommunicator<DomainType>::Result
		r3 = communicator.communicate( h3 );
	    TEST_ASSERT( r3.sent );
	    TEST_EQUALITY( r3.destination, comm_rank+1 );
	    TEST_EQUALITY( communicator.sendBufferSize(), 0 );

	    // Receive from proc comm_rank-1.
	    while ( bank.empty() )
	    {
		communicator.checkAndPost( bank );
	    }

	    TEST_EQUALITY( bank.size(), 3 );

	    HistoryType rp3 = bank.top();
	    bank.pop();
	    HistoryType rp2 = bank.top();
	    bank.pop();
	    HistoryType rp1 = bank.top();
	    bank.pop();
	    TEST_ASSERT( bank.empty() );

	    TEST_EQUALITY( rp3.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp3.weight(), 3.1 );
	    TEST_EQUALITY( rp2.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp2.weight(), 2.1 );
	    TEST_EQUALITY( rp1.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp1.weight(), 1.1 );

	    TEST_ASSERT( communicator.receiveStatus() );
	}

	// The last proc just receives.
	else
	{
	    // Check and post until receive from proc comm_rank-1
	    while ( bank.empty() )
	    {
		communicator.checkAndPost( bank );
	    }
	    TEST_ASSERT( communicator.receiveStatus() );
	    TEST_EQUALITY( bank.size(), 3 );

	    HistoryType rp3 = bank.top();
	    bank.pop();
	    HistoryType rp2 = bank.top();
	    bank.pop();
	    HistoryType rp1 = bank.top();
	    bank.pop();
	    TEST_ASSERT( bank.empty() );

	    TEST_EQUALITY( rp3.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp3.weight(), 3.1 );
	    TEST_EQUALITY( rp2.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp2.weight(), 2.1 );
	    TEST_EQUALITY( rp1.globalState(), comm_rank*10 );
	    TEST_EQUALITY( rp1.weight(), 1.1 );
	}

	// End communication.
	communicator.end();
	TEST_ASSERT( !communicator.receiveStatus() );
    }

    // Barrier before exiting to make sure memory deallocation happened
    // correctly. 
    comm->barrier();
}