예제 #1
0
void playerScore(const StateMatrix& state,
                 const ControlData& control,
                 const Map& map,
                 const SimulationParameters& params,
                 GameStats& gameStats)
{
  std::vector<float> distance(state.rows());

  for (uint i=0; i<state.rows();i++)
  {
    std::pair<uint,uint> coords = indices(state(i,0), state(i,1), map, params);
    distance[i] = map.endDistance(coords.first, coords.second);
    gameStats.playerDists[control.ids.at(i)] = distance[i];
  }

  std::vector<int> indices = argsort(distance);
  std::vector<int> ranks(indices.size());
  for (int i=0;i<indices.size();i++)
    ranks[indices[i]] = i;
  for (uint i=0; i<state.rows();i++)
  {
    gameStats.playerRanks[control.ids.at(i)] = ranks[i];
  }

}
예제 #2
0
파일: ranktest.cpp 프로젝트: hoangmit/mscds
void test_rank(const std::vector<bool>& vec) {
	vector<int> ranks(vec.size() + 1);
	ranks[0] = 0;
	for (unsigned int i = 1; i <= vec.size(); i++)
		if (vec[i-1]) ranks[i] = ranks[i-1] + 1;
		else ranks[i] = ranks[i-1];
		BitArray v;
		v = BitArrayBuilder::create(vec.size());
		//v.fillzero();
		for (unsigned int i = 0; i < vec.size(); i++) {
			v.setbit(i, vec[i]);
		}

		for (unsigned int i = 0; i < vec.size(); i++) {
			ASSERT(vec[i] == v.bit(i));
		}

		RankSelect r;
		RankSelect::BuilderTp::build(v, &r);
		for (unsigned int i = 0; i < vec.size(); ++i)
			ASSERT_EQ(vec[i], r.access(i));
		for (int i = 0; i <= vec.size(); ++i) {
			if (ranks[i] != r.rank(i)) {
				cout << "rank " << i << " " << ranks[i] << " " << r.rank(i) << endl;
				ASSERT_EQ(ranks[i], r.rank(i));
			}
		}
		unsigned int onecnt = 0;
		for (unsigned int i = 0; i < vec.size(); ++i)
			if (vec[i]) onecnt++;
		int last = -1;
		for (unsigned int i = 0; i < onecnt; ++i) {
			int pos = r.select(i);
			ASSERT_EQ(i, r.rank(pos));
			ASSERT_EQ(i + 1, r.rank(pos + 1));
			if (pos >= vec.size() || !vec[pos] || pos <= last) {
				cout << "select " << i << "  " << r.select(i) << endl;
				if (i > 0) r.select(i-1);
				ASSERT_EQ(true, vec[pos]);
			}
			ASSERT(pos > last);
			last = pos;
		}
		last = -1;
		for (unsigned int i = 0; i < vec.size() - onecnt; ++i) {
			int pos = r.selectzero(i);
			ASSERT_EQ(i, r.rankzero(pos)) << "pos =" << pos << "   i =" << i << "  len=" << r.length() << endl;
			ASSERT_EQ(i + 1, r.rankzero(pos + 1));
			ASSERT(pos < vec.size() && vec[pos] == false);
			ASSERT(pos > last);
			last = pos;
		}
}
NearestNeighborOperator<DeviceType>::NearestNeighborOperator(
    MPI_Comm comm, Kokkos::View<Coordinate const **, DeviceType> source_points,
    Kokkos::View<Coordinate const **, DeviceType> target_points )
    : _comm( comm )
    , _indices( "indices" )
    , _ranks( "ranks" )
    , _size( source_points.extent_int( 0 ) )
{
    // NOTE: instead of checking the pre-condition that there is at least one
    // source point passed to one of the rank, we let the tree handle the
    // communication and just check that the tree is not empty.

    // Build distributed search tree over the source points.
    DistributedSearchTree<DeviceType> search_tree( _comm, source_points );

    // Tree must have at least one leaf, otherwise it makes little sense to
    // perform the search for nearest neighbors.
    DTK_CHECK( !search_tree.empty() );

    // Query nearest neighbor for all target points.
    auto nearest_queries = Details::NearestNeighborOperatorImpl<
        DeviceType>::makeNearestNeighborQueries( target_points );

    // Perform the actual search.
    Kokkos::View<int *, DeviceType> indices( "indices" );
    Kokkos::View<int *, DeviceType> offset( "offset" );
    Kokkos::View<int *, DeviceType> ranks( "ranks" );
    search_tree.query( nearest_queries, indices, offset, ranks );

    // Check post-condition that we did find a nearest neighbor to all target
    // points.
    DTK_ENSURE( lastElement( offset ) == target_points.extent_int( 0 ) );

    // Save results.
    // NOTE: we don't bother keeping `offset` around since it is just `[0, 1, 2,
    // ..., n_target_poins]`
    _indices = indices;
    _ranks = ranks;
}
    std::vector<int> parallel_openmp(std::vector<int> &a, std::vector<int> &b)
    {
        std::vector<int> ranks(a.size());
        std::vector<int> output(a.size() + b.size());
        
#       pragma omp parallel for shared(output, ranks, a)
        for (std::size_t i = 0; i < a.size(); i++)
        {
            ranks[i] = rank(a[i] - 1, b);
            output[i + ranks[i]] = a[i];
        }
        
        ranks.resize(b.size());

#       pragma omp parallel for shared(output, ranks, b)
        for (std::size_t i = 0; i < b.size(); i++)
        {
            ranks[i] = rank(b[i], a);
            output[i + ranks[i]] = b[i];
        }

        return output;
    }
예제 #5
0
bool CpGrid::scatterGrid(Opm::EclipseStateConstPtr ecl,
                         const double* transmissibilities, int overlapLayers)
{
    // Silence any unused argument warnings that could occur with various configurations.
    static_cast<void>(ecl);
    static_cast<void>(transmissibilities);
    static_cast<void>(overlapLayers);
#if HAVE_MPI && DUNE_VERSION_NEWER(DUNE_GRID, 2, 3)
    if(distributed_data_)
    {
        std::cerr<<"There is already a distributed version of the grid."
                 << " Maybe scatterGrid was called before?"<<std::endl;
        return false;
    }

    CollectiveCommunication cc(MPI_COMM_WORLD);

    std::vector<int> cell_part(current_view_data_->global_cell_.size());
    int my_num=cc.rank();
#ifdef HAVE_ZOLTAN
    cell_part = cpgrid::zoltanGraphPartitionGridOnRoot(*this, ecl, transmissibilities,
                                                       cc, 0);
    int num_parts = cc.size();
#else
    int  num_parts=-1;
    std::array<int, 3> initial_split;
    initial_split[1]=initial_split[2]=std::pow(cc.size(), 1.0/3.0);
    initial_split[0]=cc.size()/(initial_split[1]*initial_split[2]);
    partition(*this, initial_split, num_parts, cell_part);
#endif

    MPI_Comm new_comm = MPI_COMM_NULL;

    if(num_parts < cc.size())
    {
        std::vector<int> ranks(num_parts);
        for(int i=0; i<num_parts; ++i)
            ranks[i]=i;
        MPI_Group new_group;
        MPI_Group old_group;
        MPI_Comm_group(cc, &old_group);
        MPI_Group_incl(old_group, num_parts, &(ranks[0]), &new_group);

        // Not all procs take part in the parallel computation
        MPI_Comm_create(cc, new_group, &new_comm);
        cc=CollectiveCommunication(new_comm);
    }else{
        new_comm = cc;
    }
    if(my_num<cc.size())
    {
        distributed_data_.reset(new cpgrid::CpGridData(new_comm));
        distributed_data_->distributeGlobalGrid(*this,*this->current_view_data_, cell_part,
                                                overlapLayers);
        std::cout << "After loadbalancing process " << my_num << " has " <<
            distributed_data_->cell_to_face_.size() << " cells." << std::endl;
    }
    current_view_data_ = distributed_data_.get();
    return true;
#else // #if HAVE_MPI && DUNE_VERSION_NEWER(DUNE_GRID, 2, 3)
    std::cerr << "CpGrid::scatterGrid() is non-trivial only with "
              << "MPI support and if the target Dune platform is "
              << "sufficiently recent.\n";
    return false;
#endif
}
예제 #6
0
void TranslateBetweenGrids
( const DistMatrix<T,MC,MR>& A, DistMatrix<T,MC,MR>& B ) 
{
    DEBUG_ONLY(CSE cse("copy::TranslateBetweenGrids [MC,MR]"))

    B.Resize( A.Height(), A.Width() );
    // Just need to ensure that each viewing comm contains the other team's
    // owning comm. Congruence is too strong.

    // Compute the number of process rows and columns that each process
    // needs to send to.
    const Int colStride = B.ColStride();
    const Int rowStride = B.RowStride();
    const Int colRank = B.ColRank();
    const Int rowRank = B.RowRank();
    const Int colStrideA = A.ColStride();
    const Int rowStrideA = A.RowStride();
    const Int colGCD = GCD( colStride, colStrideA );
    const Int rowGCD = GCD( rowStride, rowStrideA );
    const Int colLCM = colStride*colStrideA / colGCD;
    const Int rowLCM = rowStride*rowStrideA / rowGCD;
    const Int numColSends = colStride / colGCD;
    const Int numRowSends = rowStride / rowGCD;

    const Int colAlign = B.ColAlign();
    const Int rowAlign = B.RowAlign();
    const Int colAlignA = A.ColAlign();
    const Int rowAlignA = A.RowAlign();

    const bool inBGrid = B.Participating();
    const bool inAGrid = A.Participating();
    if( !inBGrid && !inAGrid )
        return;

    const Int maxSendSize =
        (A.Height()/(colStrideA*numColSends)+1) *
        (A.Width()/(rowStrideA*numRowSends)+1);

    // Translate the ranks from A's VC communicator to B's viewing so that
    // we can match send/recv communicators. Since A's VC communicator is not
    // necessarily defined on every process, we instead work with A's owning
    // group and account for row-major ordering if necessary.
    const int sizeA = A.Grid().Size();
    vector<int> rankMap(sizeA), ranks(sizeA);
    if( A.Grid().Order() == COLUMN_MAJOR )
    {
        for( int j=0; j<sizeA; ++j )
            ranks[j] = j;
    }
    else
    {
        // The (i,j) = i + j*colStrideA rank in the column-major ordering is
        // equal to the j + i*rowStrideA rank in a row-major ordering.
        // Since we desire rankMap[i+j*colStrideA] to correspond to process
        // (i,j) in A's grid's rank in this viewing group, ranks[i+j*colStrideA]
        // should correspond to process (i,j) in A's owning group. Since the
        // owning group is ordered row-major in this case, its rank is
        // j+i*rowStrideA. Note that setting
        // ranks[j+i*rowStrideA] = i+j*colStrideA is *NOT* valid.
        for( int i=0; i<colStrideA; ++i )
            for( int j=0; j<rowStrideA; ++j )
                ranks[i+j*colStrideA] = j+i*rowStrideA;
    }
    mpi::Translate
    ( A.Grid().OwningGroup(), sizeA, &ranks[0],
      B.Grid().ViewingComm(), &rankMap[0] );

    // Have each member of A's grid individually send to all numRow x numCol
    // processes in order, while the members of this grid receive from all
    // necessary processes at each step.
    Int requiredMemory = 0;
    if( inAGrid )
        requiredMemory += maxSendSize;
    if( inBGrid )
        requiredMemory += maxSendSize;
    vector<T> auxBuf( requiredMemory );
    Int offset = 0;
    T* sendBuf = &auxBuf[offset];
    if( inAGrid )
        offset += maxSendSize;
    T* recvBuf = &auxBuf[offset];

    Int recvRow = 0; // avoid compiler warnings...
    if( inAGrid )
        recvRow = Mod(Mod(A.ColRank()-colAlignA,colStrideA)+colAlign,colStride);
    for( Int colSend=0; colSend<numColSends; ++colSend )
    {
        Int recvCol = 0; // avoid compiler warnings...
        if( inAGrid )
            recvCol=Mod(Mod(A.RowRank()-rowAlignA,rowStrideA)+rowAlign,
                        rowStride);
        for( Int rowSend=0; rowSend<numRowSends; ++rowSend )
        {
            mpi::Request sendRequest;
            // Fire off this round of non-blocking sends
            if( inAGrid )
            {
                // Pack the data
                Int sendHeight = Length(A.LocalHeight(),colSend,numColSends);
                Int sendWidth = Length(A.LocalWidth(),rowSend,numRowSends);
                copy::util::InterleaveMatrix
                ( sendHeight, sendWidth,
                  A.LockedBuffer(colSend,rowSend),
                 numColSends, numRowSends*A.LDim(),
                  sendBuf, 1, sendHeight );
                // Send data
                const Int recvVCRank = recvRow + recvCol*colStride;
                const Int recvViewingRank = B.Grid().VCToViewing( recvVCRank );
                mpi::ISend
                ( sendBuf, sendHeight*sendWidth, recvViewingRank,
                  B.Grid().ViewingComm(), sendRequest );
            }
            // Perform this round of recv's
            if( inBGrid )
            {
                const Int sendColOffset = colAlignA;
                const Int recvColOffset =
                    (colSend*colStrideA+colAlign) % colStride;
                const Int sendRowOffset = rowAlignA;
                const Int recvRowOffset =
                    (rowSend*rowStrideA+rowAlign) % rowStride;

                const Int firstSendRow =
                    Mod( Mod(colRank-recvColOffset,colStride)+sendColOffset,
                         colStrideA );
                const Int firstSendCol =
                    Mod( Mod(rowRank-recvRowOffset,rowStride)+sendRowOffset,
                         rowStrideA );

                const Int colShift = Mod( colRank-recvColOffset, colStride );
                const Int rowShift = Mod( rowRank-recvRowOffset, rowStride );
                const Int numColRecvs = Length( colStrideA, colShift, colStride );
                const Int numRowRecvs = Length( rowStrideA, rowShift, rowStride );

                // Recv data
                // For now, simply receive sequentially. Until we switch to
                // nonblocking recv's, we won't be using much of the
                // recvBuf
                Int sendRow = firstSendRow;
                for( Int colRecv=0; colRecv<numColRecvs; ++colRecv )
                {
                    const Int sendColShift = Shift( sendRow, colAlignA, colStrideA ) + colSend*colStrideA;
                    const Int sendHeight = Length( A.Height(), sendColShift, colLCM );
                    const Int localColOffset = (sendColShift-B.ColShift()) / colStride;

                    Int sendCol = firstSendCol;
                    for( Int rowRecv=0; rowRecv<numRowRecvs; ++rowRecv )
                    {
                        const Int sendRowShift = Shift( sendCol, rowAlignA, rowStrideA ) + rowSend*rowStrideA;
                        const Int sendWidth = Length( A.Width(), sendRowShift, rowLCM );
                        const Int localRowOffset = (sendRowShift-B.RowShift()) / rowStride;

                        const Int sendVCRank = sendRow+sendCol*colStrideA;
                        mpi::Recv
                        ( recvBuf, sendHeight*sendWidth, rankMap[sendVCRank],
                          B.Grid().ViewingComm() );

                        // Unpack the data
                        copy::util::InterleaveMatrix
                        ( sendHeight, sendWidth,
                          recvBuf, 1, sendHeight,
                          B.Buffer(localColOffset,localRowOffset),
                          colLCM/colStride, (rowLCM/rowStride)*B.LDim() );

                        // Set up the next send col
                        sendCol = (sendCol + rowStride) % rowStrideA;
                    }
                    // Set up the next send row
                    sendRow = (sendRow + colStride) % colStrideA;
                }
            }
            // Ensure that this round of non-blocking sends completes
            if( inAGrid )
            {
                mpi::Wait( sendRequest );
                recvCol = (recvCol + rowStrideA) % rowStride;
            }
        }
        if( inAGrid )
            recvRow = (recvRow + colStrideA) % colStride;
    }
}
예제 #7
0
//---------------------------------------------------------------------------//
// Test templates
//---------------------------------------------------------------------------//
TEUCHOS_UNIT_TEST( SolverFactory, mcsa_two_by_two )
{
    typedef Epetra_Vector VectorType;
    typedef MCLS::VectorTraits<VectorType> VT;
    typedef Epetra_RowMatrix MatrixType;
    typedef MCLS::MatrixTraits<VectorType,MatrixType> MT;

    Teuchos::RCP<const Teuchos::Comm<int> > comm = 
	Teuchos::DefaultComm<int>::getComm();
    int comm_size = comm->getSize();
    int comm_rank = comm->getRank();

    // This is a 4 processor test.
    if ( comm_size == 4 )
    {
	// Build the set-constant communicator.
	Teuchos::Array<int> ranks(2);
	if ( comm_rank < 2 )
	{
	    ranks[0] = 0;
	    ranks[1] = 1;
	}
	else
	{
	    ranks[0] = 2;
	    ranks[1] = 3;
	}
	Teuchos::RCP<const Teuchos::Comm<int> > comm_set =
	    comm->createSubcommunicator( ranks() );
	int set_size = comm_set->getSize();

	// Declare the linear problem in the global scope.
	Teuchos::RCP<MCLS::LinearProblem<VectorType,MatrixType> > linear_problem;

	// Build the linear system on set 0.
	if ( comm_rank < 2 )
	{
	    int local_num_rows = 10;
	    int global_num_rows = local_num_rows*set_size;
	    Teuchos::RCP<Epetra_Comm> epetra_comm = getEpetraComm( comm_set );
	    Teuchos::RCP<Epetra_Map> map = Teuchos::rcp(
		new Epetra_Map( global_num_rows, 0, *epetra_comm ) );

	    // Build the linear system. This operator is symmetric with a spectral
	    // radius less than 1.
	    Teuchos::RCP<Epetra_CrsMatrix> A = 	
		Teuchos::rcp( new Epetra_CrsMatrix( Copy, *map, 0 ) );
	    Teuchos::Array<int> global_columns( 3 );
	    Teuchos::Array<double> values( 3 );
	    global_columns[0] = 0;
	    global_columns[1] = 1;
	    global_columns[2] = 2;
	    values[0] = 1.0;
	    values[1] = 0.05;
	    values[2] = 0.05;
	    A->InsertGlobalValues( 0, global_columns.size(), 
				   &values[0], &global_columns[0] );
	    for ( int i = 1; i < global_num_rows-1; ++i )
	    {
		global_columns[0] = i-1;
		global_columns[1] = i;
		global_columns[2] = i+1;
		values[0] = 0.05;
		values[1] = 1.0;
		values[2] = 0.05;
		A->InsertGlobalValues( i, global_columns.size(), 
				       &values[0], &global_columns[0] );
	    }
	    global_columns[0] = global_num_rows-3;
	    global_columns[1] = global_num_rows-2;
	    global_columns[2] = global_num_rows-1;
	    values[0] = 0.05;
	    values[1] = 0.05;
	    values[2] = 1.0;
	    A->InsertGlobalValues( global_num_rows-1, global_columns.size(), 
				   &values[0], &global_columns[0] );
	    A->FillComplete();

	    Teuchos::RCP<MatrixType> B = A;

	    // Build the LHS. Put a large positive number here to be sure we are
	    // clear the vector before solving.
	    Teuchos::RCP<VectorType> x = MT::cloneVectorFromMatrixRows( *B );
	    VT::putScalar( *x, 0.0 );

	    // Build the RHS with negative numbers. this gives us a negative
	    // solution. 
	    Teuchos::RCP<VectorType> b = MT::cloneVectorFromMatrixRows( *B );
	    VT::putScalar( *b, -1.0 );

	    // Create the linear problem.
	    linear_problem = Teuchos::rcp( 
		new MCLS::LinearProblem<VectorType,MatrixType>(B, x, b) );
	}
	comm->barrier();

	// Solver parameters.
	Teuchos::RCP<Teuchos::ParameterList> plist = 
	    Teuchos::rcp( new Teuchos::ParameterList() );
	double cutoff = 1.0e-4;
	plist->set<std::string>("MC Type", "Adjoint");
	plist->set<double>("Convergence Tolerance", 1.0e-8);
	plist->set<int>("Maximum Iterations", 10);
	plist->set<double>("Weight Cutoff", cutoff);
        plist->set<int>("Iteration Print Frequency", 1);
	plist->set<int>("MC Check Frequency", 50);
	plist->set<bool>("Reproducible MC Mode",true);
	plist->set<int>("Overlap Size", 2);
	plist->set<int>("Number of Sets", 2);
	plist->set<double>("Sample Ratio", 10.0);
	plist->set<std::string>("Transport Type", "Global" );

	// Create the solver.
	MCLS::SolverFactory<VectorType,MatrixType> factory;

	Teuchos::RCP<MCLS::SolverManager<VectorType,MatrixType> > solver_manager =
	    factory.create( "MCSA", comm, plist );
	solver_manager->setProblem( linear_problem );

	// Solve the problem.
	bool converged_status = solver_manager->solve();

	TEST_ASSERT( converged_status );
	TEST_ASSERT( solver_manager->getConvergedStatus() );
	TEST_ASSERT( solver_manager->getNumIters() < 10 );
	if ( comm_rank < 2 )
	{
	    TEST_ASSERT( solver_manager->achievedTol() > 0.0 );
	}
	else
	{
	    TEST_ASSERT( solver_manager->achievedTol() == 0.0 );
	}

	if ( comm_rank < 2 )
	{
	    // Check that we got a negative solution.
	    Teuchos::ArrayRCP<const double> x_view = 
		VT::view( *linear_problem->getLHS() );
	    Teuchos::ArrayRCP<const double>::const_iterator x_view_it;
	    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
	    {
		TEST_ASSERT( *x_view_it < Teuchos::ScalarTraits<double>::zero() );
	    }
	}
	comm->barrier();

	// Now solve the problem with a positive source.
	if ( comm_rank < 2 )
	{
	    Teuchos::RCP<VectorType> b = 
		MT::cloneVectorFromMatrixRows( *linear_problem->getOperator() );
	    VT::putScalar( *b, 2.0 );
	    linear_problem->setRHS( b );
	    VT::putScalar( *linear_problem->getLHS(), 0.0 );
	}
	comm->barrier();

	converged_status = solver_manager->solve();

	TEST_ASSERT( converged_status );
	TEST_ASSERT( solver_manager->getConvergedStatus() );
	TEST_ASSERT( solver_manager->getNumIters() < 10 );
	if ( comm_rank < 2 )
	{
	    TEST_ASSERT( solver_manager->achievedTol() > 0.0 );
	}
	else
	{
	    TEST_ASSERT( solver_manager->achievedTol() == 0.0 );
	}

	if ( comm_rank < 2 )
	{
	    Teuchos::ArrayRCP<const double> x_view = 
		VT::view( *linear_problem->getLHS() );
	    Teuchos::ArrayRCP<const double>::const_iterator x_view_it;
	    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
	    {
		TEST_ASSERT( *x_view_it > Teuchos::ScalarTraits<double>::zero() );
	    }
	}
	comm->barrier();

	// Reset the domain and solve again with a positive source.
	if ( comm_rank < 2 )
	{
	    VT::putScalar( *linear_problem->getLHS(), 0.0 );
	}
	comm->barrier();
	solver_manager->setProblem( linear_problem );
	converged_status = solver_manager->solve();
	TEST_ASSERT( converged_status );
	TEST_ASSERT( solver_manager->getConvergedStatus() );
	TEST_ASSERT( solver_manager->getNumIters() < 10 );
	if ( comm_rank < 2 )
	{
	    TEST_ASSERT( solver_manager->achievedTol() > 0.0 );
	}
	else
	{
	    TEST_ASSERT( solver_manager->achievedTol() == 0.0 );
	}

	if ( comm_rank < 2 )
	{
	    Teuchos::ArrayRCP<const double> x_view = 
		VT::view( *linear_problem->getLHS() );
	    Teuchos::ArrayRCP<const double>::const_iterator x_view_it;
	    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
	    {
		TEST_ASSERT( *x_view_it > Teuchos::ScalarTraits<double>::zero() );
	    }
	}
	comm->barrier();

	// Reset both and solve with a negative source.
	if ( comm_rank < 2 )
	{
	    Teuchos::RCP<VectorType> b = 
		MT::cloneVectorFromMatrixRows( *linear_problem->getOperator() );
	    VT::putScalar( *b, -2.0 );
	    linear_problem->setRHS( b );
	    VT::putScalar( *linear_problem->getLHS(), 0.0 );
	}
	comm->barrier();

	converged_status = solver_manager->solve();
	TEST_ASSERT( converged_status );
	TEST_ASSERT( solver_manager->getConvergedStatus() );
	TEST_ASSERT( solver_manager->getNumIters() < 10 );
	if ( comm_rank < 2 )
	{
	    TEST_ASSERT( solver_manager->achievedTol() > 0.0 );
	}
	else
	{
	    TEST_ASSERT( solver_manager->achievedTol() == 0.0 );
	}

	if ( comm_rank < 2 )
	{
	    Teuchos::ArrayRCP<const double> x_view =
		VT::view( *linear_problem->getLHS() );
	    Teuchos::ArrayRCP<const double>::const_iterator x_view_it;
	    for ( x_view_it = x_view.begin(); x_view_it != x_view.end(); ++x_view_it )
	    {
		TEST_ASSERT( *x_view_it < Teuchos::ScalarTraits<double>::zero() );
	    }
	}
	comm->barrier();
    }
}
예제 #8
0
파일: suffix_array.cpp 프로젝트: ericl/bwt
uint32_t *idc3(uint32_t *T, size_t n, size_t *retsz) {
    vprint("args", T, n);
    T[n++] = 0;

    // step 0: construct a sample
    size_t B0len = (n+1)/3;
    #define toC(i) ((i < B0len) ? (1 + 3*i) : (2 + 3*(i-B0len)))

    // step 1: sort sample suffixes
    triplet *R = new triplet[n * 3 / 2 + 3];
    triplet *Rptr = R;
    for (int j = 1; j <= 2; j++) {
        size_t i = j;
        for (; i < n - 2; i += 3) {
            *Rptr++ = triplet(T[i], T[i+1], T[i+2]);
        }
        for (; i < n - 1; i += 3) {
            *Rptr++ = triplet(T[i], T[i+1], 0);
        }
        for (; i < n; i += 3) {
            *Rptr++ = triplet(T[i], 0, 0);
        }
    }
    bool unique = false;
    size_t Rsz = Rptr - R;
    size_t Rrsz, SARsz;
    uint32_t *Rr = ranks(R, Rsz, &Rrsz, &unique);
    vprint("Rr", Rr, Rrsz)
    delete[] R;
    uint32_t *SAR = unique ? toSA(Rr, Rrsz, &SARsz) : idc3(Rr, Rrsz, &SARsz);
    vprint("SAR", SAR, SARsz)
    delete[] Rr;
    uint32_t *rank = new uint32_t[n+2];
    for (size_t i = 1; i < SARsz; i++) {
        rank[toC(SAR[i])] = i;
    }

    // step 2: sort nonsample suffixes
    triplet *SB0 = new triplet[n/3 + 3];
    triplet *SB0ptr = SB0;
    for (size_t i = 0; i < n; i += 3) {
        *SB0ptr++ = triplet(T[i], rank[i+1], i);
    }
    size_t SB0sz = SB0ptr - SB0;
    lsd_sort(SB0, SB0sz);

    // step 3: merge
    uint32_t *Sc = new uint32_t[SARsz - 1];
    size_t Scsz = SARsz - 1;
    uint32_t *buf = new uint32_t[n + 1];
    uint32_t *buf_ptr = buf;
    for (size_t i = 1; i < SARsz; i++) {
        Sc[i-1] = toC(SAR[i]);
    }
    delete[] SAR;
    size_t sbi = 0;
    size_t sci = 0;
    while (sbi < SB0sz && sci < Scsz) {
        uint32_t i = Sc[sci];
        uint32_t j = SB0[sbi].arr[2];
        if (i % 3 == 1) {
            if (T[i] < T[j] || (T[i] == T[j] && rank[i+1] <= rank[j+1])) {
                *buf_ptr++ = i;
                sci++;
            } else {
                *buf_ptr++ = j;
                sbi++;
            }
        } else if (i % 3 == 2) {
            if (LE(T[i], T[i+1], rank[i+2], T[j], T[j+1], rank[j+2])) {
                *buf_ptr++ = i;
                sci++;
            } else {
                *buf_ptr++ = j;
                sbi++;
            }
        } else {
            assert(false);
        }
    }
    while (sci < Scsz) {
        uint32_t i = Sc[sci++];
        *buf_ptr++ = i;
    }
    while (sbi < SB0sz) {
        uint32_t j = SB0[sbi++].arr[2];
        *buf_ptr++ = j;
    }
    delete[] Sc;
    delete[] SB0;
    delete[] rank;
    *retsz = buf_ptr - buf;
    return buf;
}
예제 #9
0
파일: suffix_array.cpp 프로젝트: ericl/bwt
uint32_t *gen_suffix_array(string *input, size_t *retsz) {
    uint32_t *v = ranks(input->c_str(), input->length());
    uint32_t *sa = idc3(v, input->length(), retsz);
    delete[] v;
    return sa;
}
예제 #10
0
//
// Get ranks and index of scores
//
// [[Rcpp::export]]
Rcpp::List get_score_ranks(const Rcpp::NumericVector& scores,
                           const bool& na_last,
                           const std::string& ties_method) {

  // Variables
  Rcpp::List ret_val;
  std::string errmsg = "";
  std::vector<int> ranks(scores.size());
  std::vector<int> rank_idx(scores.size());

  // Update NAs
  std::vector<double> svals(scores.size());
  std::vector<int> sorted_idx(scores.size());
  for (int i = 0; i < scores.size(); ++i) {
    if (Rcpp::NumericVector::is_na(scores[i])) {
      if (na_last) {
        svals[i] = DBL_MIN;
      } else {
        svals[i] = DBL_MAX;
      }
    } else {
      svals[i] = scores[i];
    }
    sorted_idx[i] = i;
  }

  // Sort scores
  CompDVec fcomp(svals);
  if (ties_method == "first") {
    std::stable_sort(sorted_idx.begin(), sorted_idx.end(), fcomp);
  } else {
    std::sort(sorted_idx.begin(), sorted_idx.end(), fcomp);
  }

  // Set ranks
  for (unsigned i = 0; i < sorted_idx.size(); ++i) {
     ranks[sorted_idx[i]] = i + 1;
     rank_idx[i] = sorted_idx[i];
  }

  // Update ties
  if (ties_method == "equiv" || ties_method == "random") {
    std::vector<int> tied_idx;
    double prev_val = svals[rank_idx[0]];
    bool tied = false;
    for (unsigned i = 1; i < rank_idx.size(); ++i) {
      if (tied) {
        if (prev_val != svals[rank_idx[i]]) {
          update_ties(ranks, rank_idx, tied_idx, ties_method);
          tied_idx.clear();
          tied = false;
        } else {
          tied_idx.push_back(rank_idx[i]);
        }
      } else if (prev_val == svals[rank_idx[i]]) {
        tied_idx.push_back(rank_idx[i - 1]);
        tied_idx.push_back(rank_idx[i]);
        tied = true;
      }

      prev_val = svals[rank_idx[i]];
    }

    if (tied) {
      update_ties(ranks, rank_idx, tied_idx, ties_method);
    }
  }

  // Add 1 to rank_idx
  for (unsigned i = 0; i < rank_idx.size(); ++i) {
    rank_idx[i] = rank_idx[i] + 1;
  }

  // Return result
  ret_val["ranks"] = ranks;
  ret_val["rank_idx"] = rank_idx;
  ret_val["errmsg"] = errmsg;

  return ret_val;
}
예제 #11
0
bool SampleMatrix::BuildPreference(bool bSingleList)
{
    if(mnClassCount != 1 || mGroupCount == 0)
        return false;
    typedef std::vector<std::vector<std::pair<size_t, float> > > TargetMatrix;
    TargetMatrix targetMatrix;
    targetMatrix.resize(mGroupCount);
    for(int index = 0; index < mnSampleCount; ++index)
    {
        const MlSample* pSample = mpContainer->GetSample(index);
        float target = pSample->GetTargetValue();
        size_t group = pSample->GetGroupId();
        if(bSingleList)
        {
            group = 0;
        }
        else
        {
            if(group >= mGroupCount)
                return false;
        }
        targetMatrix[group].push_back(std::make_pair(index, target));
    }

    std::vector<RankList*> ranks(mGroupCount);
    for(size_t group = 0; group < mGroupCount; ++group)
    {
        RankList* pRankList = new RankList();
        ranks[group] = pRankList;
        if(!pRankList->BuildRank(targetMatrix[group]))
        {
            for(size_t i = 0; i <= group; ++i)
            {
                if(ranks[i])
                    delete ranks[i];
            }
            return false;
        }
        const std::vector<size_t>& rank = pRankList->GetRank();
        const std::vector<size_t>& edges = pRankList->GetJumpEdges();
        for(size_t idxEdge = 0; idxEdge + 1 < edges.size(); ++idxEdge)
        {
            size_t nBegin = edges[idxEdge];
            size_t nEnd = edges[idxEdge + 1];
            double fWeight = rank.size() - (nEnd - nBegin);
            for(size_t idxRank = nBegin; idxRank < nEnd; ++idxRank)
            {
                int idxSamp = rank[idxRank];
                MlSample* pSamp = mpContainer->GetSample(idxSamp);
                if(pSamp)
                {
                    pSamp->SetWeight(fWeight);
                    mpWeight[idxSamp] = fWeight;
                }
            }
        }
    }

    mPreferenceSet.SetRanks(ranks);
    return true;
}
BOOST_AUTO_TEST_CASE_TEMPLATE( sort_results, DeviceType,
                               DTK_SEARCH_DEVICE_TYPES )
{
    std::vector<int> ids_ = {4, 3, 2, 1, 4, 3, 2, 4, 3, 4};
    std::vector<int> sorted_ids = {1, 2, 2, 3, 3, 3, 4, 4, 4, 4};
    std::vector<int> offset = {0, 1, 3, 6, 10};
    int const n = 10;
    int const m = 4;
    BOOST_TEST( ids_.size() == n );
    BOOST_TEST( sorted_ids.size() == n );
    BOOST_TEST( offset.size() == m + 1 );
    std::vector<int> results_ = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
    std::vector<std::set<int>> sorted_results = {
        {3},
        {6, 2},
        {8, 5, 1},
        {9, 7, 4, 0},
    };
    std::vector<int> ranks_ = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19};
    std::vector<std::set<int>> sorted_ranks = {
        {13},
        {16, 12},
        {18, 15, 11},
        {19, 17, 14, 10},
    };
    BOOST_TEST( results_.size() == n );
    BOOST_TEST( ranks_.size() == n );

    Kokkos::View<int *, DeviceType> ids( "query_ids", n );
    auto ids_host = Kokkos::create_mirror_view( ids );
    for ( int i = 0; i < n; ++i )
        ids_host( i ) = ids_[i];
    Kokkos::deep_copy( ids, ids_host );

    Kokkos::View<int *, DeviceType> results( "results", n );
    auto results_host = Kokkos::create_mirror_view( results );
    for ( int i = 0; i < n; ++i )
        results_host( i ) = results_[i];
    Kokkos::deep_copy( results, results_host );

    Kokkos::View<int *, DeviceType> ranks( "ranks", n );
    auto ranks_host = Kokkos::create_mirror_view( ranks );
    for ( int i = 0; i < n; ++i )
        ranks_host( i ) = ranks_[i];
    Kokkos::deep_copy( ranks, ranks_host );

    ArborX::Details::DistributedSearchTreeImpl<DeviceType>::sortResults(
        ids, results, ranks );

    // COMMENT: ids are untouched
    Kokkos::deep_copy( ids_host, ids );
    BOOST_TEST( ids_host == ids_, tt::per_element() );

    Kokkos::deep_copy( results_host, results );
    Kokkos::deep_copy( ranks_host, ranks );
    for ( int q = 0; q < m; ++q )
        for ( int i = offset[q]; i < offset[q + 1]; ++i )
        {
            BOOST_TEST( sorted_results[q].count( results_host[i] ) == 1 );
            BOOST_TEST( sorted_ranks[q].count( ranks_host[i] ) == 1 );
        }

    Kokkos::View<int *, DeviceType> not_sized_properly( "", m );
    BOOST_CHECK_THROW(
        ArborX::Details::DistributedSearchTreeImpl<DeviceType>::sortResults(
            ids, not_sized_properly ),
        ArborX::SearchException );
}