コード例 #1
0
ファイル: SparseRow.cpp プロジェクト: gouchangjiang/opengeoda
double SparseRow::timesColumn(const DenseVector &v)  const  {

    double sum = 0;

    for (int cnt = 0; cnt < this->size; ++cnt)

        sum += v.getValue(nb[cnt].getIx()) * nb[cnt].getWeight();

    return sum;

}
コード例 #2
0
BOOST_AUTO_TEST_CASE_TEMPLATE( multVectorTest, T, test_types ) 
{
    typedef T ValueType;

    int numRows = 20;
    int numCols = 31;

    // definition of raw data for setup and comparison

    scoped_array<ValueType> valuesA( new ValueType[numRows * numCols] );
    scoped_array<ValueType> valuesX( new ValueType[numCols] );
    scoped_array<ValueType> valuesY( new ValueType[numRows] );

    // intialise data for the matrix

    for ( IndexType i = 0; i<numRows; ++i )
    {
        for ( IndexType j = 0; j<numCols; ++j )
        {
            ValueType value = static_cast<ValueType> ( 100.0 - ::fabs( 2.0 * i - j ) );
            valuesA[i * numCols + j] = value;
        }
    }

    // initialize the vector x

    for ( IndexType j = 0; j < numCols; ++j)
    {
        valuesX[j] = static_cast<ValueType> ( 1.2 * ( j + 1 ) );
    }

// compute Y = A * X for comparison

    for (IndexType i = 0; i<numRows; ++i)
    {
        valuesY[i] = 0.0;
        for (IndexType j = 0; j<numCols; ++j)
        {
            valuesY[i] += static_cast<ValueType> ( valuesA[i*numCols+j]*valuesX[j] );
        }
    }

    // construct replicated matrix A and vector X to be redistributed

    DenseMatrix<ValueType> rA;
    rA.setRawDenseData( numRows, numCols, valuesA.get() );
    DenseVector<ValueType> rX( numCols, valuesX.get());

    // try different distributions for rows and colums

    DistributionPtr rowDist;
    DistributionPtr colDist;

    for ( int i = 0; i < 4; i++ )
    {
        rowDist = DistributionPtr( createDistribution( numRows, comm, i ) );

        for ( int j = 0; j < 4; j++ )
        {
            colDist = DistributionPtr( createDistribution( numCols, comm, j ) );

            for ( int k = 0; k < 2; k++ )
            {
                // redistribute A, X, setup result vector Y

                DenseMatrix<ValueType> A( rA, rowDist, colDist );
                DenseVector<ValueType> X( rX, colDist );

                if ( k == 0 )
                {
                    A.setCommunicationKind( Matrix::SYNCHRONOUS );
                }
                else
                {
                    A.setCommunicationKind( Matrix::ASYNCHRONOUS );
                }

                LAMA_LOG_INFO( logger, "mult matrix A = " << A << " with vector X = " << X );

                DenseVector<ValueType> result ( A * X );

                BOOST_REQUIRE_EQUAL( result.size(), numRows );
                BOOST_REQUIRE_EQUAL( result.getDistribution(), *rowDist );

                // compare the result vector Y with the precomputed results valuesY

                for ( IndexType m = 0; m < numRows; ++m )
                {
                    Scalar value = Scalar( valuesY[m] );
                    Scalar expectedvalue = result.getValue( m );
                    //1e-1 is used because there are 8 of 1280 cases which fails with eps<1e-1
                    LAMA_CHECK_SCALAR_SMALL( value - expectedvalue , ValueType, 1e-1 );
                }
            }
        }
    }

    //Tests for Cyclic Dist speciliatzion in case of a square matrix

    {
        PartitionId size = comm->getSize();
        IndexType chunkSize = 7;

        IndexType n = 2 * size * chunkSize;
        cyclicMultVectorTest( chunkSize, n );

        n = 3 * size * chunkSize + chunkSize - 2;
        cyclicMultVectorTest( chunkSize, n );

        //Calculate the size so that each process gets more than one chunk and that not
        //all chunks have the same numbers of chunks and that we have at least von chunk that is not
        // square
        n = 3 * size * chunkSize + size/2 * chunkSize + chunkSize - 2;
        cyclicMultVectorTest( chunkSize, n );

        //not all process get a chunk
        n = size/2 * chunkSize + chunkSize - 2;
        cyclicMultVectorTest( chunkSize, n );
    }
}