bool Parser::reduce(ushort i) { #ifdef _DEBUG stream << "reduced by: "; inputProductions[i].print(stream); #endif switch (i) { case 0: // begin -> start break; case 1: // strings -> strings "{String}" return reduceStrings1(); case 2: // strings -> "{String}" return reduceStrings2(); case 3: // vs -> vs "{Letter}" return reduceVs1(); case 4: // vs -> vs "{String}" return reduceVs2(); case 5: // vs -> "{Letter}" return reduceVs3(); case 6: // vs -> "{String}" return reduceVs4(); case 7: // option -> "[" vs "]" return reduceOption(); case 8: // oneProductionRight -> oneProductionRight option return reduceRight1(); case 9: // oneProductionRight -> oneProductionRight vs return reduceRight2(); case 10: // oneProductionRight -> option return reduceRight3(); case 11: // oneProductionRight -> vs return reduceRight4(); case 12: // someProductionRight -> someProduction "|" oneProductionRight return reduceSomeRight1(); case 13: // someProductionRight -> oneProductionRight return reduceSomeRight2(); case 14: // token -> "%" "token" strings ";" return reduceToken(); case 15: // someTokens -> token break; case 16: // someTokens -> someTokens token break; case 17: // production -> "{Letter}" "-" ">" someProductionRight ";" return reduceProduction(); case 18: // someProductions -> someProductions production break; case 19: // someProductions -> production break; case 20: // start -> someTokens "%" "start" "{Letter}" ";" someProductions return reduceAll(); case 21: // start -> "%" "start" "{Letter}" ";" someProductions return reduceAll(); } return true; }
int main(int narg, char **arg) { Teuchos::GlobalMPISession mpiSession(&narg,&arg); Teuchos::RCP<const Teuchos::Comm<int> > comm = Teuchos::DefaultComm<int>::getComm(); int me = comm->getRank(); if (me == 0) std::cout << std::endl << "Usage: Zoltan2_teuchosCommTest.exe [#_of_allreduces_to_do]" << std::endl << " default number is 4000" << std::endl << std::endl; int niter = 4000; if (narg > 1) niter = atoi(arg[1]); double tstart, tend; int iin = me, iout; double din = me * 2., dout; tstart = MPI_Wtime(); for (int i = 0; i < niter; i++) { reduceAll(*comm, Teuchos::REDUCE_SUM, 1, &iin, &iout); reduceAll(*comm, Teuchos::REDUCE_SUM, 1, &din, &dout); } tend = MPI_Wtime(); if (me == 0) std::cout << "reduceAll time using Teuchos::Comm = " << tend - tstart << std::endl; tstart = MPI_Wtime(); for (int i = 0; i < niter; i++) { MPI_Allreduce(&iin, &iout, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&din, &dout, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); } tend = MPI_Wtime(); if (me == 0) std::cout << "Allreduce time using MPI_Allreduce = " << tend - tstart << std::endl; if (me == 0) std::cout << std::endl << "PASS" << std::endl; return 0; }
void Operator<Node>::computeNumberOfProperties() { std::vector<int>::const_iterator elemsIter; std::vector<int>::iterator numberIter; const Teuchos::RCP< const Teuchos::Comm<int> > & input_comm = input_map_->getComm(); //const Epetra_Comm& input_comm = input_map_->Comm(); int max = 0; numberElemsByProperties_.assign(properties_.size(), 0); numberIter = numberElemsByProperties_.begin(); for(elemsIter = properties_.begin() ; elemsIter != properties_.end() ; elemsIter ++) { int property; property = *elemsIter; if (max < property) { max = property; int toAdd = max - numberElemsByProperties_.size() + 1; if (toAdd > 0) { numberElemsByProperties_.insert(numberElemsByProperties_.end(), toAdd, 0); numberIter = numberElemsByProperties_.begin(); } } (*(numberIter + property)) ++; } //input_comm.MaxAll(&max, &numberOfProperties_, 1); reduceAll(*input_comm, Teuchos::REDUCE_MAX, 1, &max, &numberOfProperties_); numberOfProperties_ = numberOfProperties_ - base_ + 1; localNumberOfProperties_ = max - base_ + 1; }
Ordinal SpmdVectorSpaceUtilities::computeGlobalDim( const Teuchos::Comm<Ordinal> &comm, const Ordinal localSubDim ) { Ordinal globalDim = -1; reduceAll(comm, Teuchos::REDUCE_SUM, localSubDim, Teuchos::outArg(globalDim)); return globalDim; }
TEUCHOS_UNIT_TEST( Teuchos_ParameterList, xmlUpdateAndBroadcast ) { const RCP<const Comm<int> > comm = DefaultComm<int>::getComm(); // Test the broadcast functionality to avoid unscalable I/O collisions std::string inputFile="input.xml"; ParameterList A; ParameterList B; updateParametersFromXmlFile(inputFile, &A); updateParametersFromXmlFileAndBroadcast(inputFile, &B, *comm); out << "B = " << B; TEST_ASSERT( B.begin() != B.end() ); // Avoid false positive from empty lists // See if any process returned a failed (i.e. a non-zero local_failed) int local_failed = !(A == B); int global_failed = -1; reduceAll( *comm, Teuchos::REDUCE_SUM, local_failed, outArg(global_failed) ); TEST_EQUALITY_CONST( global_failed, 0 ); }
Ordinal SpmdVectorSpaceUtilities::computeMapCode( const Teuchos::Comm<Ordinal> &comm, const Ordinal localSubDim ) { // // Here we will make a map code out of just the local sub-dimension on each // processor. If each processor has the same number of local elements, then // the map codes will be the same and this is all you need for RTOp // compatibility. // const int procRank = size(comm); Ordinal mapCode = -1; Ordinal localCode = localSubDim % (procRank+1) + localSubDim; reduceAll(comm, Teuchos::REDUCE_SUM, localCode, Teuchos::outArg(mapCode)); return mapCode; }