Example #1
0
// currently this method is for debugging purposses only
// later on this may be a parallel io routine
void mpi_tools::collect_and_write_labels( MPI_Comm communicator, PPartitionConfig & config, parallel_graph_access & G) {
        int rank, size;
        MPI_Comm_rank( communicator, &rank);
        MPI_Comm_size( communicator, &size);

        std::vector< NodeID > labels;

        if( rank == ROOT ) {
                labels.resize(G.number_of_global_nodes());
                forall_local_nodes(G, node) {
                        labels[node] = G.getNodeLabel(node);
                } endfor
//issue recv before send
void parallel_projection::parallel_project( MPI_Comm communicator, parallel_graph_access & finer, parallel_graph_access & coarser ) {
        PEID rank, size;
        MPI_Comm_rank( communicator, &rank);
        MPI_Comm_size( communicator, &size);
        
        NodeID divisor = ceil(coarser.number_of_global_nodes() / (double)size);

        m_messages.resize(size);

        std::unordered_map< NodeID, std::vector< NodeID > > cnode_to_nodes;
        forall_local_nodes(finer, node) {
                NodeID cnode = finer.getCNode(node);
                //std::cout <<  "cnode " <<  cnode  << std::endl;
                if( coarser.is_local_node_from_global_id(cnode) ) {
                        NodeID new_label = coarser.getNodeLabel(coarser.getLocalID(cnode));
                        finer.setNodeLabel(node, new_label);
                } else {
                        //we have to request it from another PE
                        PEID peID = cnode / divisor; // cnode is 

                        if( cnode_to_nodes.find( cnode ) == cnode_to_nodes.end()) {
                                m_messages[peID].push_back(cnode); // we are requesting the label of this node 
                        }

                        cnode_to_nodes[cnode].push_back(node);
                }
        } endfor