//checks equality of the vector of boxes inside m_boxes
bool BoxLayout::sameBoxes(const BoxLayout& a_layout) const
{
  bool retval;
  if (size() == a_layout.size())
    {
      retval = true;

      for (int iBox = 0; iBox < size(); ++iBox)
        {
          //RefCountedPtr<Vector<Entry> > m_boxes;
          if ((*m_boxes)[iBox].box != (*a_layout.m_boxes)[iBox].box)
            {
              retval = false;
            }
        }
    }
  else
    {
      retval = false;
    }
  return retval;
}
Beispiel #2
0
Real DotProduct(const BoxLayoutData<FArrayBox>& a_dataOne,
                const BoxLayoutData<FArrayBox>& a_dataTwo,
                const BoxLayout&                a_dblIn,
                const Interval&                 a_comps)
{
  Vector<Real> rhodot;
  rhodot.reserve(10);
  const int startcomp = a_comps.begin();
  const int endcomp = a_comps.end();
  //calculate the single-processor dot product
  DataIterator dit = a_dataOne.dataIterator();

  for (dit.reset(); dit.ok(); ++dit)
  {
    Box fabbox = a_dblIn.get(dit());
    const FArrayBox& onefab = a_dataOne[dit()];
    const FArrayBox& twofab = a_dataTwo[dit()];
    CH_assert(onefab.box().contains(fabbox));
    CH_assert(twofab.box().contains(fabbox));

    Real dotgrid = 0;
    FORT_DOTPRODUCT(CHF_REAL(dotgrid),
                    CHF_CONST_FRA(onefab),
                    CHF_CONST_FRA(twofab),
                    CHF_BOX(fabbox),
                    CHF_CONST_INT(startcomp),
                    CHF_CONST_INT(endcomp));

    rhodot.push_back(dotgrid);
  }

  // now for the multi-processor fandango

  //gather all the rhodots onto a vector and add them up
  int baseProc = 0;
  Vector<Vector<Real> >dotVec;
  gather(dotVec, rhodot, baseProc);

  Real rhodotTot = 0.0;
  if (procID() == baseProc)
  {
    CH_assert(dotVec.size() == numProc());

    rhodot.resize(a_dblIn.size());

    int index = 0;
    for (int p = 0; p < dotVec.size(); p++)
    {
      Vector<Real>& v = dotVec[p];
      for (int i = 0; i < v.size(); i++, index++)
      {
        rhodot[index] = v[i];
      }
    }

    rhodot.sort();

    for (int ivec = 0; ivec < rhodot.size(); ivec++)
    {
      rhodotTot += rhodot[ivec];
    }
  }

  //broadcast the sum to all processors.
  broadcast(rhodotTot, baseProc);

  //return the total
  return rhodotTot;
}
Beispiel #3
0
void getError(LevelData<double, 1> & a_error,
              double               & a_maxError,
              const double         & a_dx)
{
  int rank, nprocs;
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  std::cout << "I am rank " << rank << " of " <<  nprocs << " processes" <<  std::endl;

  BoxLayout* layout = new BoxLayout();
  int npatches;
  int nPatchPerProc, iStartIdx, iEndIdx;
  int patchID = -1;
  double local_maxError = 0;

  LevelData<double, 1> *phi;
  LevelData<double, 1> *lphcalc;
  LevelData<double, 1> *lphexac;

  if(rank == 0)
  {
    *layout = a_error.getBoxLayout();
    npatches = layout->size();
    phi = new LevelData<double, 1>(*layout, s_nghost);
    lphcalc = new LevelData<double, 1>(*layout, 0);
    lphexac = new LevelData<double, 1>(*layout, 0);
    //  cout << "initializing phi to sum_dir(sin 2*pi*xdir)" << endl;
    initialize(*phi,*lphexac, a_dx);
    //set ghost cells of phi
    phi->exchange();
  }
  if(nprocs > 1)
    MPI_Bcast( &npatches, 1, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Barrier(MPI_COMM_WORLD);

  if(npatches == 1)
   nPatchPerProc = 1;
  else
    nPatchPerProc = npatches/nprocs;
  iStartIdx = rank * nPatchPerProc;
  iEndIdx = (npatches < (rank+1)*nPatchPerProc) ? npatches-1 : ((rank+1)*nPatchPerProc-1);
  nPatchPerProc = iEndIdx - iStartIdx + 1; 
  std::cout << "I am rank " << rank << " working from " <<  iStartIdx << " to " << iEndIdx <<  std::endl;

// barrier to sync halo exchange

 

  if(rank == 0)
  {
    Box bxdst, bxsrc;
    MPI_Request reqs[5];
    MPI_Status status[5];
    int iwait = 0;
    for(BLIterator blit(*layout); blit != blit.end(); ++blit)
    {
      //bxdst= (*layout)[*blit];
      patchID++;
      bool mypatch = patchID >= iStartIdx && patchID <=iEndIdx;
      RectMDArray<double>& phiex =     (*phi)[patchID];
      RectMDArray<double>& lphca = (*lphcalc)[patchID];
      RectMDArray<double>& lphex = (*lphexac)[patchID];
      RectMDArray<double>& error = a_error[patchID];
      bxsrc = phiex . getBox();
      bxdst = lphca . getBox();
      lphca.setVal(0.);
      if(mypatch)
      {
cout << "Rank 0 is working on patch " << patchID << endl;
        double tmp;
        tmp = doWork(bxdst, phiex, lphca, lphex, error, a_dx);
        local_maxError = (local_maxError > tmp) ? local_maxError : tmp; 
      }
      else
      {
        int dest = patchID / nPatchPerProc;
//        MPI_Isend(&phiex, sizeof(RectMDArray<double>),MPI_CHAR, dest, 0, MPI_COMM_WORLD,&reqs[0]);
//cout << "master send out patch " <<  patchID<< " sourceDataPointer with size " << phiex.getBox().sizeOf() << endl;
//        MPI_Isend(&lphca, sizeof(RectMDArray<double>),MPI_CHAR, dest, 1, MPI_COMM_WORLD,&reqs[1]);
//cout << "master send out patch " <<  patchID<< " destinationDataPointer with size " << lphca.getBox().sizeOf() << endl;
        MPI_Isend(&bxsrc, sizeof(Box),MPI_CHAR, dest, 4, MPI_COMM_WORLD,&reqs[0]);
cout << "master send out patch " <<  patchID<< " bxdst Box with size " << sizeof(Box)<< endl;
        MPI_Isend(&bxdst, sizeof(Box),MPI_CHAR, dest, 4, MPI_COMM_WORLD,&reqs[1]);
cout << "master send out patch " <<  patchID<< " bxdst Box with size " << sizeof(Box)<< endl;
        double *sourceDataPointer = phiex . getPointer();
        double *destinationDataPointer = lphca . getPointer();
        MPI_Isend(sourceDataPointer, phiex.getBox().sizeOf(),MPI_DOUBLE, dest, 2, MPI_COMM_WORLD,&reqs[2]);
        MPI_Isend(destinationDataPointer, lphca.getBox().sizeOf(),MPI_DOUBLE, dest, 3, MPI_COMM_WORLD,&reqs[3]);
        iwait++;
        MPI_Waitall(4,reqs,status);
cout << "Rank 0 is sending patch " << patchID << " to rank " << dest << endl;

      }
    }
  }
  else
  {
    if(npatches == 1)
     nPatchPerProc = 0;
    else
      nPatchPerProc = npatches/nprocs;
    int src = 0;
    if(nPatchPerProc > 0)
    {
//      MPI_Request reqs[5];
      MPI_Status status[5];
      Box bxdst, bxsrc;
      RectMDArray<double>* phiex = new RectMDArray<double>();
      RectMDArray<double>* lphca = new RectMDArray<double>();
      RectMDArray<double>* lphex = new RectMDArray<double>();
      RectMDArray<double>* error = new RectMDArray<double>();
      int idx;
      for(idx = 0; idx < nPatchPerProc; idx++)      
      {
//        Box bxdst=((const BoxLayout&) layout)[*blit];
        patchID++;
//        MPI_Recv(phiex, sizeof(RectMDArray<double>), MPI_CHAR, src, 0, MPI_COMM_WORLD, &status[0]);
//        MPI_Recv(lphca, sizeof(RectMDArray<double>), MPI_CHAR, src, 1, MPI_COMM_WORLD, &status[1]);
//        double *sourceDataPointer = (double*) malloc(sizeof(double)*phiex->getBox().sizeOf());
        MPI_Recv(&bxsrc, sizeof(Box), MPI_CHAR, src, 4, MPI_COMM_WORLD, &status[0]);
cout << "Rank " << rank << " receive patch " << idx << " bxdst with size " << sizeof(Box)<< endl;
        MPI_Recv(&bxdst, sizeof(Box), MPI_CHAR, src, 4, MPI_COMM_WORLD, &status[1]);
cout << "Rank " << rank << " receive patch " << idx << " bxdst with size " << sizeof(Box)<< endl;
        phiex->define(bxsrc);
        lphca->define(bxdst);
        lphex->define(bxdst);
        error->define(bxdst);
        MPI_Recv(phiex -> getPointer(), phiex->getBox().sizeOf(), MPI_DOUBLE, src, 2, MPI_COMM_WORLD, &status[2]);
cout << "Rank " << rank << " receive patch " << idx << " sourceDataPointer with size " << phiex->getBox().sizeOf() << endl;
//        double *destinationDataPointer = (double*) malloc(sizeof(double)*lphca->getBox().sizeOf());
        MPI_Recv(lphca -> getPointer(), lphca->getBox().sizeOf(), MPI_DOUBLE, src, 3, MPI_COMM_WORLD, &status[3]);
cout << "Rank " << rank << " receive patch " << idx << " destinationDataPointer with size " << lphca->getBox().sizeOf() << endl;
cout << "Rank " << rank << " is working on patch " << npatches << endl;
        double tmp;
        tmp = doWork(bxdst,  *phiex, *lphca, *lphex, *error, a_dx);
        local_maxError = (local_maxError > tmp) ? local_maxError : tmp; 
      }
    }
  }
  MPI_Allreduce(&local_maxError, &a_maxError, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
  if(rank == 0)
   cout << "max Error is: " << a_maxError << endl;
}