bool LocalNode::close() { if( _state != STATE_LISTENING ) return false; NodeStopPacket packet; send( packet ); EQCHECK( _receiverThread->join( )); _cleanup(); EQINFO << _incoming.getSize() << " connections open after close" << std::endl; #ifndef NDEBUG const Connections& connections = _incoming.getConnections(); for( Connections::const_iterator i = connections.begin(); i != connections.end(); ++i ) { EQINFO << " " << *i << std::endl; } #endif EQASSERTINFO( !hasPendingRequests(), *static_cast< base::RequestHandler* >( this )); return true; }
/* ****************************************************************** * Advance all communication Members by calling advanceSome * repeatedly until all outstanding communications are complete. ****************************************************************** */ bool AsyncCommStage::advanceAll() { while (hasPendingRequests()) { advanceSome(); } return !d_completed_members.empty(); }
void ImageTree::finishedLoading(QImage &img, Alg::MapTreeNode *node) { if ( !_cache ) return; _cache->setTexture(img, node); tilesUpdated(); if ( !hasPendingRequests() ) tilesComplete(); }
AsyncCommStage::Member::~Member() { if (hasPendingRequests()) { TBOX_ERROR("Cannot deallocate a Member with pending communications.\n" << "It would corrupt message passing algorithms.\n"); } if (d_stage != 0) { d_stage->privateDestageMember(this); } d_handler = 0; }
/* ****************************************************************** * Advance one or more communication Members by using * MPI_Waitsome to complete one or more communication requests. * * Get one or more completed requests and check their communication * Members to see if any Member finished its communication operation. * If at least one Member finished its communication, return those that * finished. If no Member has finished, repeat until at least one has. ****************************************************************** */ bool AsyncCommStage::advanceSome() { if (!SAMRAI_MPI::usingMPI()) { return false; } if (d_members.empty()) { // Short cut for an empty stage. return false; } #ifdef DEBUG_CHECK_ASSERTIONS for (unsigned int i = static_cast<unsigned int>(d_member_to_req[d_members.size()]); i < d_req.size(); ++i) { if (d_req[i] != MPI_REQUEST_NULL) TBOX_WARNING("non-null request above d_n_req." << std::endl); } #endif std::vector<int> index(static_cast<int>(d_member_to_req[d_members.size()])); std::vector<SAMRAI_MPI::Status> stat( static_cast<int>(d_member_to_req[d_members.size()])); size_t n_member_completed = 0; int n_req_completed = 0; do { int errf; if (d_communication_timer) d_communication_timer->start(); errf = SAMRAI_MPI::Waitsome( static_cast<int>(d_member_to_req[d_members.size()]), &d_req[0], &n_req_completed, &index[0], &stat[0]); if (d_communication_timer) d_communication_timer->stop(); #ifdef DEBUG_CHECK_ASSERTIONS if (n_req_completed <= 0) { /* * Undocumented feature of some MPI_Waitsome implementations: * MPI_Waitsome sets n_req_completed to a negative number * if all the input requests are MPI_REQUEST_NULL. */ for (size_t i = 0; i < d_member_to_req[d_members.size()]; ++i) { if (d_req[i] != MPI_REQUEST_NULL) { TBOX_ERROR("Library error in AsyncCommStage::advanceSome:\n" << "errf = " << errf << '\n' << "MPI_SUCCESS = " << MPI_SUCCESS << '\n' << "MPI_ERR_IN_STATUS = " << MPI_ERR_IN_STATUS << '\n' << "MPI_REQUEST_NULL = " << MPI_REQUEST_NULL << '\n' << "number of requests = " << d_member_to_req[d_members.size()] << '\n' << "d_req.size() = " << d_req.size() << '\n' << "n_req_completed = " << n_req_completed << '\n' << "i = " << i << '\n' ); } } for (unsigned int i = static_cast<unsigned int>(d_member_to_req[d_members.size()]); i < d_req.size(); ++i) { if (d_req[i] != MPI_REQUEST_NULL) TBOX_WARNING("non-null request above d_n_reg." << std::endl); } } if (n_req_completed == 0) { TBOX_ASSERT(!hasPendingRequests()); } #endif if (errf != MPI_SUCCESS) { TBOX_ERROR("Error in MPI_Waitsome call.\n" << "Error-in-status is " << (errf == MPI_ERR_IN_STATUS) << '\n'); } /* * Construct array of Members with at least one completed * request. */ // Number of Members to check with at least one completed request. unsigned int n_check_member = 0; for (int iout = 0; iout < n_req_completed; ++iout) { // Save status of completed request. d_stat[index[iout]] = stat[iout]; /* * Change index from request index to Member index. * If the Member index is not a duplicate, add it to * the list of Members to check (which is actually * the same list) and increase n_check_member. */ index[iout] = static_cast<int>(d_req_to_member[index[iout]]); #ifdef AsyncCommStage_ExtraDebug plog << "AsyncCommStage::advanceSome completed:" << " tag-" << stat[iout].MPI_TAG << " source-" << stat[iout].MPI_SOURCE << " for member index " << index[iout] << std::endl; #endif unsigned int i; for (i = 0; i < n_check_member; ++i) { if (index[i] == index[iout]) break; } if (i == n_check_member) { index[n_check_member++] = index[iout]; } } /* * Check the Members whose requests completed and count up the * Members that completed all their communication tasks. */ for (unsigned int imember = 0; imember < n_check_member; ++imember) { Member& memberi = *d_members[index[imember]]; TBOX_ASSERT(!memberi.isDone()); bool memberi_done = memberi.proceedToNextWait(); #ifdef AsyncCommStage_ExtraDebug plog << "AsyncCommStage::advanceSome proceedToNextWait for member:" << memberi.d_index_on_stage << " completion=" << memberi_done << std::endl; #endif if (memberi_done) { ++n_member_completed; TBOX_ASSERT(!memberi.hasPendingRequests()); privatePushToCompletionQueue(memberi); } } } while (n_req_completed > 0 && n_member_completed == 0); return !d_completed_members.empty(); }