int replication_socket_t::read(void* buf, size_t min_size, size_t max_size, time_t timeout) { dbSmallBuffer receiveBuf(min_size*n_sockets); int matches[MaxSockets]; char* rcv = receiveBuf.base(); int i, j, n = n_sockets; for (i = 0; i < n_sockets; i++) { matches[i] = -1; if (sockets[i] != NULL) { size_t received = 0; while (received < min_size) { int rc = sockets[i]->read(rcv + i*min_size + received, min_size - received, min_size - received, timeout); if (rc <= 0) { char msg[64]; sockets[i]->get_error_text(msg, sizeof(msg)); handleError(i, "read", msg); delete sockets[i]; sockets[i] = NULL; break; } received += rc; } if (received == min_size) { matches[i] = 0; for (j = 0; j < i; j++) { if (matches[j] == 0) { if (memcmp(rcv + j*min_size, rcv + i*min_size, min_size) == 0) { matches[j] = i; break; } } } } } } int maxVotes = 0; int correctResponse = -1; for (i = 0; i < n; i++) { if (matches[i] >= 0) { int nVotes = 0; j = i; do { int next = matches[j]; nVotes += 1; matches[j] = -1; j = next; } while (j != 0); if (nVotes > maxVotes) { maxVotes = nVotes; correctResponse = i; } else if (nVotes == maxVotes) { correctResponse = -1; } } } if (correctResponse >= 0) { succeed = true; memcpy(buf, rcv + correctResponse*min_size, min_size); return min_size; } else { handleError(-1, "read", "failed to choose correct response"); succeed = false; return -1; } }
tmp<Field<Type> > ggiGAMGInterface::fastReduce(const UList<Type>& ff) const { // Algorithm // Local processor contains faceCells part of the zone and requires // zoneAddressing part. // For fast communications, each processor will send the faceCells and // zoneAddressing to the master. Master will assemble global zone // and send off messages to all processors containing only // the required data // HJ, 24/Jun/2011 if (ff.size() != this->size()) { FatalErrorIn ( "tmp<Field<Type> > ggiGAMGInterface::fastReduce" "(" " const UList<Type>& ff" ") const" ) << "Wrong field size. ff: " << ff.size() << " interface: " << this->size() << abort(FatalError); } if (localParallel() || !Pstream::parRun()) { // Field remains identical: no parallel communications required tmp<Field<Type> > tresult(new Field<Type>(ff)); return tresult; } // Execute reduce if not already done if (!initReduce_) { initFastReduce(); } if (Pstream::master()) { // Master collects information and distributes data. Field<Type> expandField(zoneSize(), pTraits<Type>::zero); // Insert master processor const labelList& za = zoneAddressing(); forAll (za, i) { expandField[za[i]] = ff[i]; } // Master receives and inserts data from all processors for which // receiveAddr contains entries for (label procI = 1; procI < Pstream::nProcs(); procI++) { const labelList& curRAddr = receiveAddr_[procI]; if (!curRAddr.empty()) { Field<Type> receiveBuf(curRAddr.size()); // Opt: reconsider mode of communication IPstream::read ( Pstream::blocking, procI, reinterpret_cast<char*>(receiveBuf.begin()), receiveBuf.byteSize() ); // Insert received information forAll (curRAddr, i) { expandField[curRAddr[i]] = receiveBuf[i]; } } } // Expanded field complete, send required data to other processors for (label procI = 1; procI < Pstream::nProcs(); procI++) { const labelList& curSAddr = sendAddr_[procI]; if (!curSAddr.empty()) { Field<Type> sendBuf(curSAddr.size()); forAll (curSAddr, i) { sendBuf[i] = expandField[curSAddr[i]]; } // Opt: reconsider mode of communication OPstream::write ( Pstream::blocking, procI, reinterpret_cast<const char*>(sendBuf.begin()), sendBuf.byteSize() ); } }