main(int argc, char **argv) { int rank, nprocs, i, j; int p_Geven, p_Godd, p_size, mod, p_size_mod, *list_even=NULL, *list_odd=NULL; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MA_init(C_INT, 1000, 1000); GA_Initialize(); p_size=nprocs/2; mod=nprocs%2; p_size_mod=p_size+mod; list_even = (int*)malloc(p_size*sizeof(int)); list_odd = (int*)malloc(p_size*sizeof(int)); j=0; for(i=0; i<nprocs; i++) { if(i%2==0) list_even[j]=i; else if(i%2==1) list_odd[j]=i; else break; j++; } j=0; /* for(i=0; i<nprocs; i++) { if(i%2==1) { j++; } } */ p_Geven=GA_Pgroup_create(list_even, p_size_mod); p_Godd=GA_Pgroup_create(list_odd, p_size); if(rank%2==0) printf("%d: My ID is %d :: %d -- even \n", rank, GA_Pgroup_nodeid(p_Geven), GA_Pgroup_nnodes(p_Geven)); else printf("%d: My ID is %d :: %d --- odd\n", rank, GA_Pgroup_nodeid(p_Godd), GA_Pgroup_nnodes(p_Godd)); GA_Sync(); if(rank==0) GA_PRINT_MSG(); GA_Terminate(); MPI_Finalize(); }
/** * Open GOSS channel * @param comm communicator for whatever module is opening the channel * @param topic channel topic. This must be chosen from list of topics * used to initialize the GOSSUtils object */ void gridpack::goss::GOSSUtils::openGOSSChannel(gridpack::parallel::Communicator &comm, std::string topic) { p_grp = comm.getGroup(); if (!p_open && GA_Pgroup_nodeid(p_grp) == 0) { printf("Opening Channel\n"); #ifndef GOSS_DEBUG std::auto_ptr<ActiveMQConnectionFactory> connectionFactory(new ActiveMQConnectionFactory(p_URI)); // Create a Connection p_connection = connectionFactory->createConnection(p_username, p_passwd); p_connection->start(); // Create a Session p_session = p_connection->createSession(Session::AUTO_ACKNOWLEDGE); #endif // Create the destination (Topic or Queue) std::string new_topic("topic.goss.gridpack."); //std::string new_topic("topic/goss/gridpack/"); // Make sure there is no white space around topic gridpack::utility::StringUtils utils; utils.trim(topic); new_topic.append(topic); p_current_topic = topic; printf("new topic = %s\n", new_topic.c_str()); #ifndef GOSS_DEBUG p_destination = p_session->createTopic(new_topic); // Create a MessageProducer from the Session to the Topic p_producer = p_session->createProducer(p_destination); p_producer->setDeliveryMode(DeliveryMode::NON_PERSISTENT); char sbuf[256]; gridpack::utility::CoarseTimer *timer = gridpack::utility::CoarseTimer::instance(); sprintf(sbuf,"_goss_channel_opened %f",timer->currentTime()); std::auto_ptr<TextMessage> message(p_session->createTextMessage(sbuf)); sprintf(sbuf,"_goss_channel_opened topic: %s %f",topic.c_str(), timer->currentTime()); printf("%s\n",sbuf); p_producer->send(message.get()); #endif p_open = true; } else { if (GA_Pgroup_nodeid(p_grp) == 0) { printf("ERROR: Channel already opened\n"); } } }
/** * Send a message over an open GOSS channel * @param text message to be sent */ void gridpack::goss::GOSSUtils::sendGOSSMessage(std::string &text) { if (GA_Pgroup_nodeid(p_grp) == 0) { if (p_open) { #ifndef GOSS_DEBUG std::auto_ptr<TextMessage> message( p_session->createTextMessage(text)); printf("Sending message of length %d\n",text.length()); p_producer->send(message.get()); #else printf("Sending message of length %d\n",text.length()); printf("message %s\n",text.c_str()); #endif } else { printf("No GOSS channel is open"); } } }
Integer util_tcesublock_(Integer *val,Integer *p_handle) { // if(*p_handle==0) exit(1); //ga_error("nxtask: p_handle is zero", 1); if(*val > 0) { // if(!initialized) exit(1); //ga_error("nxtask: not yet initialized", 1); return (Integer) NGA_Read_inc(g_T, &subscript, 1); } else if(*val==0) { int n = 1; initialized=1; int p_h = (int)*p_handle; /* create task array */ // g_T = NGA_Create(C_LONG, 1, &n,"Atomic Task", NULL); g_T = NGA_Create_config(C_LONG,1,&n,"Atomic Task",NULL,p_h); /* Initialize the task array */ if(GA_Pgroup_nodeid(p_h)==0) { int lo=0, hi=0; NGA_Put (g_T, &lo, &hi, &initval, &hi); // printf("PUT %i %i %i\n",sizeof(*p_handle),sizeof(Integer),sizeof(int)); initval=0; } GA_Pgroup_sync(p_h); // printf("CREATE %i %i \n",*p_handle,g_T); return 0; } else if (*val < 0) { GA_Destroy(g_T); // printf("DELETE %i %i \n",*p_handle,g_T); // ga_pgroup_sync_(p_handle); initialized=0; initval=0; return 0; } // ga_error("nxtval: invalid value passed", 0L); return -1; }
/** * Initialize mapper for the given network and the current mode. Create global * arrays that contain offsets that will be used to create vector from the * network component objects * @param network network that will generate vector */ GenVectorMap(boost::shared_ptr<_network> network) : p_network(network) { p_Offsets = NULL; p_timer = NULL; //p_timer = gridpack::utility::CoarseTimer::instance(); p_GAgrp = network->communicator().getGroup(); p_me = GA_Pgroup_nodeid(p_GAgrp); p_nNodes = GA_Pgroup_nnodes(p_GAgrp); p_Offsets = new int[p_nNodes]; p_nBuses = p_network->numBuses(); p_nBranches = p_network->numBranches(); getDimensions(); setOffsets(); setIndices(); GA_Pgroup_sync(p_GAgrp); }
/** * Close GOSS channel. */ void gridpack::goss::GOSSUtils::closeGOSSChannel(gridpack::parallel::Communicator &comm) { if (GA_Pgroup_nodeid(p_grp) == 0) { // Send final message indicating the channel is being closed #ifndef GOSS_DEBUG std::string buf = "_goss_channel_closed"; std::auto_ptr<TextMessage> message(p_session->createTextMessage(buf)); p_producer->send(message.get()); gridpack::utility::CoarseTimer *timer = gridpack::utility::CoarseTimer::instance(); char sbuf[256]; sprintf(sbuf,"_goss_channel_closed topic: %s %f",p_current_topic.c_str(), timer->currentTime()); printf("%s\n",sbuf); std::string acknowledge_topic("topic.goss.gridpack."); acknowledge_topic.append(p_current_topic); acknowledge_topic.append(".acknowledge"); printf("_goss_channel_ack: %s\n",acknowledge_topic.c_str()); std::auto_ptr<Destination> dest(p_session->createTopic(acknowledge_topic)); std::auto_ptr<MessageConsumer> consumer(p_session->createConsumer(dest.get())); std::cout << "Waiting for messages..."<<std::endl; std::auto_ptr<Message> next_message(consumer->receive()); const TextMessage *txtMsg = dynamic_cast<const TextMessage*>(next_message.get()); if (txtMsg->getText() != "success") { std::cout << "Message failure: "<<txtMsg->getText()<<std::endl; } if (p_connection) delete p_connection; if (p_session) delete p_session; if (p_destination) delete p_destination; if (p_producer) delete p_producer; #endif p_open = false; } }
// ------------------------------------------------------------- // CreateMatGA // ------------------------------------------------------------- static PetscErrorCode CreateMatGA(int pgroup, int lrows, int lcols, int grows, int gcols, int *ga) { PetscErrorCode ierr = 0; /* Try to honor local ownership request (of rows). */ int nprocs = GA_Pgroup_nnodes(pgroup); int me = GA_Pgroup_nodeid(pgroup); int tmapc[nprocs+1]; int mapc[nprocs+1]; int i; for (i = 0; i < nprocs+1; i++) tmapc[i] = 0; tmapc[me] = lrows; GA_Pgroup_igop(pgroup, tmapc, nprocs+1, "+"); mapc[0] = 0; for (i = 1; i < nprocs; i++) mapc[i] = mapc[i-1]+tmapc[i-1]; mapc[nprocs] = 0; int dims[2] = {grows, gcols}; int blocks[2] = { nprocs, 1 }; *ga = GA_Create_handle(); GA_Set_data(*ga, 2, dims, MT_PETSC_SCALAR); GA_Set_irreg_distr(*ga, mapc, blocks); GA_Set_pgroup(*ga, pgroup); if (!GA_Allocate(*ga)) { ierr = 1; } PetscScalar z(0.0); GA_Fill(*ga, &z); return ierr; }
// ------------------------------------------------------------- // AdjacencyList::ready // ------------------------------------------------------------- void AdjacencyList::ready(void) { #if 1 int grp = this->communicator().getGroup(); int me = GA_Pgroup_nodeid(grp); int nprocs = GA_Pgroup_nnodes(grp); p_adjacency.clear(); p_adjacency.resize(p_global_nodes.size()); // Find total number of nodes and edges. Assume no duplicates int nedges = p_edges.size(); int total_edges = nedges; char plus[2]; strcpy(plus,"+"); GA_Pgroup_igop(grp,&total_edges, 1, plus); int nnodes = p_original_nodes.size(); int total_nodes = nnodes; GA_Pgroup_igop(grp,&total_nodes, 1, plus); // Create a global array containing original indices of all nodes and indexed // by the global index of the node int i, p; int dist[nprocs]; for (p=0; p<nprocs; p++) { dist[p] = 0; } dist[me] = nnodes; GA_Pgroup_igop(grp,dist,nprocs,plus); int *mapc = new int[nprocs+1]; mapc[0] = 0; for (p=1; p<nprocs; p++) { mapc[p] = mapc[p-1] + dist[p-1]; } mapc[nprocs] = total_nodes; int g_nodes = GA_Create_handle(); int dims = total_nodes; NGA_Set_data(g_nodes,1,&dims,C_INT); NGA_Set_pgroup(g_nodes, grp); if (!GA_Allocate(g_nodes)) { char buf[256]; sprintf(buf,"AdjacencyList::ready: Unable to allocate distributed array" " for bus indices\n"); printf(buf); throw gridpack::Exception(buf); } int lo, hi; lo = mapc[me]; hi = mapc[me+1]-1; int size = hi - lo + 1; int o_idx[size], g_idx[size]; for (i=0; i<size; i++) o_idx[i] = p_original_nodes[i]; for (i=0; i<size; i++) g_idx[i] = p_global_nodes[i]; int **indices= new int*[size]; int *iptr = g_idx; for (i=0; i<size; i++) { indices[i] = iptr; iptr++; } if (size > 0) NGA_Scatter(g_nodes,o_idx,indices,size); GA_Pgroup_sync(grp); delete [] indices; delete [] mapc; // Cycle through all nodes and match them up with nodes at end of edges. for (p=0; p<nprocs; p++) { int iproc = (me+p)%nprocs; // Get node data from process iproc NGA_Distribution(g_nodes,iproc,&lo,&hi); size = hi - lo + 1; if (size <= 0) continue; int *buf = new int[size]; int ld = 1; NGA_Get(g_nodes,&lo,&hi,buf,&ld); // Create a map of the nodes from process p std::map<int,int> nmap; std::map<int,int>::iterator it; std::pair<int,int> pr; for (i=lo; i<=hi; i++){ pr = std::pair<int,int>(buf[i-lo],i); nmap.insert(pr); } delete [] buf; // scan through the edges looking for matches. If there is a match, set the // global index int idx; for (i=0; i<nedges; i++) { idx = static_cast<int>(p_edges[i].original_conn.first); it = nmap.find(idx); if (it != nmap.end()) { p_edges[i].global_conn.first = static_cast<Index>(it->second); } idx = static_cast<int>(p_edges[i].original_conn.second); it = nmap.find(idx); if (it != nmap.end()) { p_edges[i].global_conn.second = static_cast<Index>(it->second); } } } GA_Destroy(g_nodes); // All edges now have global indices assigned to them. Begin constructing // adjacency list. Start by creating a global array containing all edges dist[0] = 0; for (p=1; p<nprocs; p++) { double max = static_cast<double>(total_edges); max = (static_cast<double>(p))*(max/(static_cast<double>(nprocs))); dist[p] = 2*(static_cast<int>(max)); } int g_edges = GA_Create_handle(); dims = 2*total_edges; NGA_Set_data(g_edges,1,&dims,C_INT); NGA_Set_irreg_distr(g_edges,dist,&nprocs); NGA_Set_pgroup(g_edges, grp); if (!GA_Allocate(g_edges)) { char buf[256]; sprintf(buf,"AdjacencyList::ready: Unable to allocate distributed array" " for branch indices\n"); printf(buf); throw gridpack::Exception(buf); } // Add edge information to global array. Start by figuring out how much data // is associated with each process for (p=0; p<nprocs; p++) { dist[p] = 0; } dist[me] = nedges; GA_Pgroup_igop(grp,dist, nprocs, plus); int offset[nprocs]; offset[0] = 0; for (p=1; p<nprocs; p++) { offset[p] = offset[p-1] + 2*dist[p-1]; } // Figure out where local data goes in GA and then copy it to GA lo = offset[me]; hi = lo + 2*nedges - 1; int edge_ids[2*nedges]; for (i=0; i<nedges; i++) { edge_ids[2*i] = static_cast<int>(p_edges[i].global_conn.first); edge_ids[2*i+1] = static_cast<int>(p_edges[i].global_conn.second); } if (lo <= hi) { int ld = 1; NGA_Put(g_edges,&lo,&hi,edge_ids,&ld); } GA_Pgroup_sync(grp); // Cycle through all edges and find out how many are attached to the nodes on // your process. Start by creating a map between the global node indices and // the local node indices std::map<int,int> gmap; std::map<int,int>::iterator it; std::pair<int,int> pr; for (i=0; i<nnodes; i++){ pr = std::pair<int,int>(static_cast<int>(p_global_nodes[i]),i); gmap.insert(pr); } // Cycle through edge information on each processor for (p=0; p<nprocs; p++) { int iproc = (me+p)%nprocs; NGA_Distribution(g_edges,iproc,&lo,&hi); int size = hi - lo + 1; int *buf = new int[size]; int ld = 1; NGA_Get(g_edges,&lo,&hi,buf,&ld); BOOST_ASSERT(size%2 == 0); size = size/2; int idx1, idx2; Index idx; for (i=0; i<size; i++) { idx1 = buf[2*i]; idx2 = buf[2*i+1]; it = gmap.find(idx1); if (it != gmap.end()) { idx = static_cast<Index>(idx2); p_adjacency[it->second].push_back(idx); } it = gmap.find(idx2); if (it != gmap.end()) { idx = static_cast<Index>(idx1); p_adjacency[it->second].push_back(idx); } } delete [] buf; } GA_Destroy(g_edges); GA_Pgroup_sync(grp); #else int me(this->processor_rank()); int nproc(this->processor_size()); p_adjacency.clear(); p_adjacency.resize(p_nodes.size()); IndexVector current_indexes; IndexVector connected_indexes; for (int p = 0; p < nproc; ++p) { // broadcast the node indexes owned by process p to all processes, // all processes work on these at once current_indexes.clear(); if (me == p) { std::copy(p_nodes.begin(), p_nodes.end(), std::back_inserter(current_indexes)); // std::cout << me << ": node indexes: "; // std::copy(current_indexes.begin(), current_indexes.end(), // std::ostream_iterator<Index>(std::cout, ",")); // std::cout << std::endl; } boost::mpi::broadcast(this->communicator(), current_indexes, p); // make a copy of the local edges in a list (so it's easier to // remove those completely accounted for) std::list<p_Edge> tmpedges; std::copy(p_edges.begin(), p_edges.end(), std::back_inserter(tmpedges)); // loop over the process p's node index set int local_index(0); for (IndexVector::iterator n = current_indexes.begin(); n != current_indexes.end(); ++n, ++local_index) { // determine the local edges that refer to the current node index connected_indexes.clear(); std::list<p_Edge>::iterator e(tmpedges.begin()); // std::cout << me << ": current node index: " << *n // << ", edges: " << tmpedges.size() // << std::endl; while (e != tmpedges.end()) { if (*n == e->conn.first && e->conn.second != bogus) { connected_indexes.push_back(e->conn.second); e->found.first = true; // std::cout << me << ": found connection: edge " << e->index // << " (" << e->conn.first << ", " << e->conn.second << ")" // << std::endl; } if (*n == e->conn.second && e->conn.first != bogus) { connected_indexes.push_back(e->conn.first); e->found.second = true; // std::cout << me << ": found connection: edge " << e->index // << " (" << e->conn.first << ", " << e->conn.second << ")" // << std::endl; } if (e->found.first && e->found.second) { e = tmpedges.erase(e); } else if (e->conn.first == bogus || e->conn.second == bogus) { e = tmpedges.erase(e); } else { ++e; } } // gather all connections for the current node index to the // node's owner process, we have to gather the vectors because // processes will have different numbers of connections if (me == p) { size_t allsize; boost::mpi::reduce(this->communicator(), connected_indexes.size(), allsize, std::plus<size_t>(), p); std::vector<IndexVector> all_connected_indexes; boost::mpi::gather(this->communicator(), connected_indexes, all_connected_indexes, p); p_adjacency[local_index].clear(); for (std::vector<IndexVector>::iterator k = all_connected_indexes.begin(); k != all_connected_indexes.end(); ++k) { std::copy(k->begin(), k->end(), std::back_inserter(p_adjacency[local_index])); } } else { boost::mpi::reduce(this->communicator(), connected_indexes.size(), std::plus<size_t>(), p); boost::mpi::gather(this->communicator(), connected_indexes, p); } this->communicator().barrier(); } this->communicator().barrier(); } #endif }