Example #1
0
void Cluster::subdivide(unsigned bmin)
{
    const unsigned size(_end - _beg);
    if (size > bmin) {

        idx_t n_rows(static_cast<idx_t>(_l_adj_mat->getNRows()));

        idx_t *xadj(new idx_t[n_rows+1]);
        unsigned const*const original_row_ptr(_l_adj_mat->getRowPtrArray());
        for(idx_t k(0); k<=n_rows; k++) {
            xadj[k] = original_row_ptr[k];
        }

        unsigned nnz(_l_adj_mat->getNNZ());
        idx_t *adjncy(new idx_t[nnz]);
        unsigned const*const original_adjncy(_l_adj_mat->getColIdxArray());
        for(unsigned k(0); k<nnz; k++) {
            adjncy[k] = original_adjncy[k];
        }
//		unsigned nparts = 2;
        idx_t options[METIS_NOPTIONS]; // for METIS
        METIS_SetDefaultOptions(options);
//		options[METIS OPTION PTYPE] = METIS PTYPE RB;
//		options[METIS OPTION OBJTYPE] = METIS OBJTYPE CUT;
//		options[METIS OPTION CTYPE] = METIS CTYPE SHEM;
//		options[] = ;
//		options[] = ;
//		options[] = ;

//		unsigned sepsize(0); // for METIS
        idx_t *vwgt(new idx_t[n_rows + 1]);
//		const unsigned nnz(xadj[n_rows]);
//		unsigned *adjwgt(new unsigned[nnz]);
        for (idx_t k(0); k < n_rows + 1; k++)
            vwgt[k] = 1;
//		for (unsigned k(0); k < nnz; k++)
//			adjwgt[k] = 1;
//		unsigned *part(new unsigned[n_rows + 1]);

        // subdivide the index set into three parts employing METIS
//		METIS_ComputeVertexSeparator(&n_rows, xadj, adjncy, vwgt, &options,
//				&sepsize, part);
        idx_t *loc_op_perm(new idx_t[n_rows]);
        idx_t *loc_po_perm(new idx_t[n_rows]);
        for (idx_t k(0); k<n_rows; k++) {
            loc_op_perm[k] = _g_op_perm[k];
        }
        for (idx_t k(0); k<n_rows; k++) {
            loc_po_perm[k] = _g_po_perm[k];
        }
        METIS_NodeND(&n_rows, xadj, adjncy, vwgt, options, loc_op_perm, loc_po_perm);
        for (idx_t k(0); k<n_rows; k++) {
            _g_op_perm[k] = loc_op_perm[k];
        }
        for (idx_t k(0); k<n_rows; k++) {
            _g_po_perm[k] = loc_po_perm[k];
        }
        delete [] loc_op_perm;
        delete [] loc_po_perm;
        delete [] vwgt;
        delete [] adjncy;
        delete [] xadj;
//		// create and init local permutations
//		unsigned *l_op_perm(new unsigned[size]);
//		unsigned *l_po_perm(new unsigned[size]);
//		for (unsigned i = 0; i < size; ++i)
//			l_op_perm[i] = l_po_perm[i] = i;
//
//		unsigned isep1, isep2;
//		updatePerm(part, isep1, isep2, l_op_perm, l_po_perm);
//		delete[] part;
//
//		// update global permutation
//		unsigned *t_op_perm = new unsigned[size];
//		for (unsigned k = 0; k < size; ++k)
//			t_op_perm[k] = _g_op_perm[_beg + l_op_perm[k]];
//
//		for (unsigned k = _beg; k < _end; ++k) {
//			_g_op_perm[k] = t_op_perm[k - _beg];
//			_g_po_perm[_g_op_perm[k]] = k;
//		}
//		delete[] t_op_perm;
//
//		// next recursion step
//		if ((isep1 >= bmin) && (isep2 - isep1 >= bmin)) {
//			// construct adj matrices for [0, isep1), [isep1,isep2), [isep2, _end)
//			AdjMat *l_adj0(_l_adj_mat->getMat(0, isep1, l_op_perm, l_po_perm));
//			AdjMat *l_adj1(_l_adj_mat->getMat(isep1, isep2, l_op_perm, l_po_perm));
//			AdjMat *l_adj2(_l_adj_mat->getMat(isep2, size, l_op_perm, l_po_perm));
//
//			delete[] l_op_perm;
//			delete[] l_po_perm;
//			delete _l_adj_mat;
//			_l_adj_mat = NULL;
//
//			_n_sons = 3;
//			_sons = new ClusterBase*[_n_sons];
//
//			isep1 += _beg;
//			isep2 += _beg;
//
//			// constructing child nodes for index cluster tree
//			_sons[0] = new Cluster(this, _beg, isep1, _g_op_perm, _g_po_perm, _g_adj_mat, l_adj0);
//			_sons[1] = new Cluster(this, isep1, isep2, _g_op_perm, _g_po_perm, _g_adj_mat, l_adj1);
//			_sons[2] = new Separator(this, isep2, _end, _g_op_perm,	_g_po_perm, _g_adj_mat, l_adj2);
//
//			dynamic_cast<Cluster*>(_sons[0])->subdivide(bmin);
//			dynamic_cast<Cluster*>(_sons[1])->subdivide(bmin);
//
//		} else {
//			delete _l_adj_mat;
//			_l_adj_mat = NULL;
//		} // end if next recursion step
    } // end if ( connected && size () > bmin )

}
Example #2
0
//- Does prevention of 0 cell domains and calls ptscotch.
Foam::label Foam::ptscotchDecomp::decomposeZeroDomains
(
    const fileName& meshPath,
    const List<int>& initadjncy,
    const List<int>& initxadj,
    const scalarField& initcWeights,

    List<int>& finalDecomp
) const
{
    globalIndex globalCells(initxadj.size()-1);

    bool hasZeroDomain = false;
    for (label procI = 0; procI < Pstream::nProcs(); procI++)
    {
        if (globalCells.localSize(procI) == 0)
        {
            hasZeroDomain = true;
            break;
        }
    }

    if (!hasZeroDomain)
    {
        return decompose
        (
            meshPath,
            initadjncy,
            initxadj,
            initcWeights,
            finalDecomp
        );
    }


    if (debug)
    {
        Info<< "ptscotchDecomp : have graphs with locally 0 cells."
            << " trickling down." << endl;
    }

    // Make sure every domain has at least one cell
    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // (scotch does not like zero sized domains)
    // Trickle cells from processors that have them up to those that
    // don't.


    // Number of cells to send to the next processor
    // (is same as number of cells next processor has to receive)
    List<int> nSendCells(Pstream::nProcs(), 0);

    for (label procI = nSendCells.size()-1; procI >=1; procI--)
    {
        label nLocalCells = globalCells.localSize(procI);
        if (nLocalCells-nSendCells[procI] < 1)
        {
            nSendCells[procI-1] = nSendCells[procI]-nLocalCells+1;
        }
    }

    // First receive (so increasing the sizes of all arrays)

    Field<int> xadj(initxadj);
    Field<int> adjncy(initadjncy);
    scalarField cWeights(initcWeights);

    if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
    {
        // Receive cells from previous processor
        IPstream fromPrevProc(Pstream::blocking, Pstream::myProcNo()-1);

        Field<int> prevXadj(fromPrevProc);
        Field<int> prevAdjncy(fromPrevProc);
        scalarField prevCellWeights(fromPrevProc);

        if (prevXadj.size() != nSendCells[Pstream::myProcNo()-1])
        {
            FatalErrorIn("ptscotchDecomp::decompose(..)")
                << "Expected from processor " << Pstream::myProcNo()-1
                << " connectivity for " << nSendCells[Pstream::myProcNo()-1]
                << " nCells but only received " << prevXadj.size()
                << abort(FatalError);
        }

        // Insert adjncy
        prepend(prevAdjncy, adjncy);
        // Adapt offsets and prepend xadj
        xadj += prevAdjncy.size();
        prepend(prevXadj, xadj);
        // Weights
        prepend(prevCellWeights, cWeights);
    }


    // Send to my next processor

    if (nSendCells[Pstream::myProcNo()] > 0)
    {
        // Send cells to next processor
        OPstream toNextProc(Pstream::blocking, Pstream::myProcNo()+1);

        int nCells = nSendCells[Pstream::myProcNo()];
        int startCell = xadj.size()-1 - nCells;
        int startFace = xadj[startCell];
        int nFaces = adjncy.size()-startFace;

        // Send for all cell data: last nCells elements
        // Send for all face data: last nFaces elements
        toNextProc
            << Field<int>::subField(xadj, nCells, startCell)-startFace
            << Field<int>::subField(adjncy, nFaces, startFace)
            <<
            (
                cWeights.size()
              ? static_cast<const scalarField&>
                (
                    scalarField::subField(cWeights, nCells, startCell)
                )
              : scalarField(0)
            );

        // Remove data that has been sent
        if (cWeights.size())
        {
            cWeights.setSize(cWeights.size()-nCells);
        }
        adjncy.setSize(adjncy.size()-nFaces);
        xadj.setSize(xadj.size() - nCells);
    }


    // Do decomposition as normal. Sets finalDecomp.
    label result = decompose(meshPath, adjncy, xadj, cWeights, finalDecomp);


    if (debug)
    {
        Info<< "ptscotchDecomp : have graphs with locally 0 cells."
            << " trickling up." << endl;
    }


    // If we sent cells across make sure we undo it
    // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    // Receive back from next processor if I sent something
    if (nSendCells[Pstream::myProcNo()] > 0)
    {
        IPstream fromNextProc(Pstream::blocking, Pstream::myProcNo()+1);

        List<int> nextFinalDecomp(fromNextProc);

        if (nextFinalDecomp.size() != nSendCells[Pstream::myProcNo()])
        {
            FatalErrorIn("parMetisDecomp::decompose(..)")
                << "Expected from processor " << Pstream::myProcNo()+1
                << " decomposition for " << nSendCells[Pstream::myProcNo()]
                << " nCells but only received " << nextFinalDecomp.size()
                << abort(FatalError);
        }

        append(nextFinalDecomp, finalDecomp);
    }

    // Send back to previous processor.
    if (Pstream::myProcNo() >= 1 && nSendCells[Pstream::myProcNo()-1] > 0)
    {
        OPstream toPrevProc(Pstream::blocking, Pstream::myProcNo()-1);

        int nToPrevious = nSendCells[Pstream::myProcNo()-1];

        toPrevProc <<
            SubList<int>
            (
                finalDecomp,
                nToPrevious,
                finalDecomp.size()-nToPrevious
            );

        // Remove locally what has been sent
        finalDecomp.setSize(finalDecomp.size()-nToPrevious);
    }
    return result;
}