void avtDataObjectInformation::ParallelMerge(const avtDataObjectWriter_p dobw) { #ifdef PARALLEL int groupSize = 1; int myRank, commSize; MPI_Comm_size(VISIT_MPI_COMM, &commSize); MPI_Comm_rank(VISIT_MPI_COMM, &myRank); int mpiResultLenTag = GetUniqueMessageTag(); int mpiResultStrTag = GetUniqueMessageTag(); int mpiSwapLenTag = GetUniqueMessageTag(); int mpiSwapStrTag = GetUniqueMessageTag(); groupSize = 1; // walk up the communication tree, swapping and merging infos while (groupSize < commSize) { int swapWithProc = -1; int myGroupNum = myRank / groupSize; int myGroupIdx = myRank % groupSize; // determine processor to swap with if (myGroupNum % 2) // myGroupNum is odd swapWithProc = (myGroupNum - 1) * groupSize + myGroupIdx; else // myGroupNum is even swapWithProc = (myGroupNum + 1) * groupSize + myGroupIdx; // only do the swap between 0th processors in each group AND only // if the processor to swap with is in range of communicator if ((myGroupIdx == 0) && (0 <= swapWithProc) && (swapWithProc < commSize)) SwapAndMerge(dobw, swapWithProc, mpiSwapLenTag, mpiSwapStrTag); groupSize <<= 1; } // At this point the processor(s) at the top of the tree have the // merged result. So, now we need to re-distribute it to all groupSize >>= 2; // walk back down the communication tree, sending results while (groupSize >= 1) { int swapWithProc = -1; int myGroupNum = myRank / groupSize; int myGroupIdx = myRank % groupSize; // determine processor to send to if (myGroupNum % 2) // myGroupNum is odd { swapWithProc = (myGroupNum - 1) * groupSize + myGroupIdx; if ((myGroupIdx == 0) && (0 <= swapWithProc) && (swapWithProc < commSize)) RecvResult(dobw, swapWithProc, mpiResultLenTag, mpiResultStrTag); } else // myGroupNum is even { swapWithProc = (myGroupNum + 1) * groupSize + myGroupIdx; if ((myGroupIdx == 0) && (0 <= swapWithProc) && (swapWithProc < commSize)) SendResult(dobw, swapWithProc, mpiResultLenTag, mpiResultStrTag); } groupSize >>= 1; } #endif // indicate that it is ok to use this processor's extents as global extents; GetAttributes().SetCanUseThisProcsAsOriginalOrActual(true); }
void avtStreamlineInfoQuery::PostExecute() { //Everyone communicate data to proc 0. #ifdef PARALLEL int nProcs = PAR_Size(); int *counts = new int[nProcs]; for (int i = 0; i < nProcs; i++) counts[i] = 0; counts[PAR_Rank()] = slData.size(); Collect(counts, nProcs); int tag = GetUniqueMessageTag(); MPI_Status stat; if (PAR_Rank() == 0) { for (int i = 1; i < nProcs; i++) { if (counts[i] > 0) { float *vals = new float[counts[i]]; void *ptr = (void *)&vals[0]; MPI_Recv(ptr, counts[i], MPI_FLOAT, i, tag, VISIT_MPI_COMM, &stat); for (int j = 0; j < counts[i]; j++) slData.push_back(vals[j]); delete [] vals; } } } else { if (slData.size() > 0) { void *ptr = (void *)&slData[0]; MPI_Send(ptr, slData.size(), MPI_FLOAT, 0, tag, VISIT_MPI_COMM); } } delete [] counts; #endif std::string msg; char str[128]; int i = 0, sz = slData.size(); int slIdx = 0; MapNode result_node; while (i < sz) { sprintf(str, "Streamline %d: Seed %f %f %f Arclength %f\n", slIdx, slData[i], slData[i+1], slData[i+2], slData[i+3]); MapNode sl_res_node; doubleVector sl_res_seed; sl_res_seed.push_back(slData[i]); sl_res_seed.push_back(slData[i+1]); sl_res_seed.push_back(slData[i+2]); sl_res_node["seed"] = sl_res_seed; sl_res_node["arclength"] = slData[i+3]; i+=4; msg += str; if (dumpSteps) { int numSteps = (int)slData[i++]; doubleVector sl_steps; for (int j = 0; j < numSteps; j++) { sprintf(str, " %f %f %f \n", slData[i], slData[i+1], slData[i+2]);// slData[i+3], slData[i+4]); sl_steps.push_back(slData[i]); sl_steps.push_back(slData[i+1]); sl_steps.push_back(slData[i+2]); i+=5; msg += str; } sl_res_node["steps"] = sl_steps; } sprintf(str, "streamline %d", slIdx); result_node[str] = sl_res_node; slIdx++; } SetResultMessage(msg.c_str()); SetXmlResult(result_node.ToXML()); }