// divide o vetor entre os dois filhos void Node::divide_v1(vector<int>& numbers) { //printf("%d vai dividir %lu números\n", rank, numbers.size()); // calcula o rank do filho à esquerda const int left_rank = rank * 2 + 1; // calcula o rank do filho à direita const int right_rank = rank * 2 + 2; vector<int> resto(numbers.begin() + conquer_at + 1, numbers.end()); // printf("%d pegou %lu números para si e sobraram %lu\n", rank, meu.size(), resto.size()); // calcula a posição da metade do vetor const size_t half_size = resto.size() / 2; // cria dois vetores com cada metade do vetor original vector<int> left(resto.begin(), resto.begin() + half_size); vector<int> right(resto.begin() + half_size, resto.end()); // printf("%d vai enviar %lu números para %d e %lu para %d\n", rank, left.size(), left_rank, right.size(), right_rank); // envia os vetores para os nodos filhos sendToNode(left_rank, left); sendToNode(right_rank, right); // espera um retorno dos nodos filhos receiveFromNode(left_rank, left); receiveFromNode(right_rank, right); // mescla os retornos dos nodos filhos no vetor original mergeVectors(left, right, resto); }
int32_t SegmentMerger::merge() { int32_t value = mergeFields(); mergeTerms(); mergeNorms(); if (fieldInfos->hasVectors()) mergeVectors(); return value; }
int32_t SegmentMerger::merge(bool mergeDocStores) { this->mergeDocStores = mergeDocStores; // NOTE: it's important to add calls to // checkAbort.work(...) if you make any changes to this // method that will spend alot of time. The frequency // of this check impacts how long // IndexWriter.close(false) takes to actually stop the // threads. mergedDocs = mergeFields(); mergeTerms(); mergeNorms(); if (mergeDocStores && fieldInfos->hasVectors()) mergeVectors(); return mergedDocs; }
int Rarefact::getSharedCurve(float percentFreq = 0.01, int nIters = 1000){ try { SharedRarefactionCurveData* rcd = new SharedRarefactionCurveData(); label = lookup[0]->getLabel(); //register the displays for(int i=0;i<displays.size();i++){ rcd->registerDisplay(displays[i]); } //if jumble is false all iters will be the same if (m->jumble == false) { nIters = 1; } //convert freq percentage to number int increment = 1; if (percentFreq < 1.0) { increment = numSeqs * percentFreq; } else { increment = percentFreq; } for(int iter=0;iter<nIters;iter++){ for(int i=0;i<displays.size();i++){ displays[i]->init(label); } if (m->jumble == true) { //randomize the groups random_shuffle(lookup.begin(), lookup.end()); } //make merge the size of lookup[0] SharedRAbundVector* merge = new SharedRAbundVector(lookup[0]->size()); //make copy of lookup zero for(int i = 0; i<lookup[0]->size(); i++) { merge->set(i, lookup[0]->getAbundance(i), "merge"); } vector<SharedRAbundVector*> subset; //send each group one at a time for (int k = 0; k < lookup.size(); k++) { if (m->control_pressed) { delete merge; delete rcd; return 0; } subset.clear(); //clears out old pair of sharedrabunds //add in new pair of sharedrabunds subset.push_back(merge); subset.push_back(lookup[k]); rcd->updateSharedData(subset, k+1, numGroupComb); mergeVectors(merge, lookup[k]); } //resets output files for(int i=0;i<displays.size();i++){ displays[i]->reset(); } delete merge; } for(int i=0;i<displays.size();i++){ displays[i]->close(); } delete rcd; return 0; } catch(exception& e) { m->errorOut(e, "Rarefact", "getSharedCurve"); exit(1); } }
void slave(int rank, int comm_sz) { Message msg; int local_v[MAX_VECTOR]; int local_l; MPI_Status status; int q; // disposable index char * strVector; // pointer for string representation of vector char label[3]; // storage for local event label // initialize vector for (q = 0; q < comm_sz; q++) local_v[q] = 0; // initialize lamport local_l = 0; // initialize label label[0] = '0' + rank - 1; label[1] = '@'; // == 'A' - 1 label[2] = '\0'; while (1) { MPI_Recv(&msg, 1, MESSAGE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status); sleep(1); // On finalize, MPI_TAG == 1 if (status.MPI_TAG == 1) { strVector = vectorToString(local_v, comm_sz); sprintf(msg.string, "Process %d report: Event %s - Logical: %d - Vector: %s", rank - 1, label, local_l, strVector); free(strVector); MPI_Send(&msg, 1, MESSAGE, 0, 0, MPI_COMM_WORLD); break; } local_l++; local_v[rank]++; label[1]++; if (status.MPI_SOURCE == 0) { if (msg.dest == 0) { printf("Executing event %s in process %d.\n", label, rank - 1); } else { printf("Message sent event %s from process %d to process %d: %s\n", label, rank - 1, msg.dest - 1, msg.string); for (q = 0; q < comm_sz; q++) { msg.vector[q] = local_v[q]; } msg.lamport = local_l; MPI_Send(&msg, 1, MESSAGE, msg.dest, 0, MPI_COMM_WORLD); } sprintf(msg.string, "receievd"); MPI_Send(&msg, 1, MESSAGE, 0, 0, MPI_COMM_WORLD); } else { printf("Message received event %s from process %d by process %d: %s\n", label, status.MPI_SOURCE - 1, rank - 1, msg.string); mergeVectors(msg.vector, local_v, comm_sz); local_l = (msg.lamport >= (local_l - 1)? msg.lamport + 1 : local_l); } // print status to stdout strVector = vectorToString(local_v, comm_sz); printf("The Logical/Vector time of event %s at process %d is: %d / %s\n\n", label, rank - 1, local_l, strVector); free(strVector); } }