Exemplo n.º 1
0
static PetscErrorCode DMGetNeighbors_DA(DM dm, PetscInt *nranks, const PetscMPIInt *ranks[])
{
  PetscErrorCode ierr;
  PetscInt dim;
  DMDAStencilType st;
  
  PetscFunctionBegin;
  ierr = DMDAGetNeighbors(dm,ranks);CHKERRQ(ierr);
  ierr = DMDAGetInfo(dm,&dim,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&st);CHKERRQ(ierr);

  switch (dim) {
    case 1:
      *nranks = 3;
      /* if (st == DMDA_STENCIL_STAR) { *nranks = 3; } */
      break;
    case 2:
      *nranks = 9;
      /* if (st == DMDA_STENCIL_STAR) { *nranks = 5; } */
      break;
    case 3:
      *nranks = 27;
      /* if (st == DMDA_STENCIL_STAR) { *nranks = 7; } */
      break;
    default:
      break;
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 2
0
Arquivo: zdaf.c Projeto: 00liujj/petsc
PETSC_EXTERN void PETSC_STDCALL dmdagetneighbors_(DM *da,PetscMPIInt *ranks,PetscErrorCode *ierr)
{
  const PetscMPIInt *r;
  PetscInt          n;
  DM_DA             *dd = (DM_DA*)(*da)->data;

  *ierr = DMDAGetNeighbors(*da,&r);if (*ierr) return;
  if (dd->dim == 2) n = 9;
  else n = 27;
  *ierr = PetscMemcpy(ranks,r,n*sizeof(PetscMPIInt));
}
Exemplo n.º 3
0
PETSC_EXTERN void PETSC_STDCALL dmdagetneighbors_(DM *da,PetscMPIInt *ranks,PetscErrorCode *ierr)
{
  const PetscMPIInt *r;
  PetscInt          n,dim;

  *ierr = DMDAGetNeighbors(*da,&r);if (*ierr) return;
  *ierr = DMGetDimension(*da,&dim);if (*ierr) return;
  if (dim == 2) n = 9;
  else n = 27;
  *ierr = PetscMemcpy(ranks,r,n*sizeof(PetscMPIInt));
}
Exemplo n.º 4
0
PetscErrorCode FiberField_AddToSendbufs( FiberField field )
{
  int i;
  int e;
  const int vlen = ArrayLength(field->verts);
  const int elen = ArrayLength(field->edges);
  const BoundingBox lbbox = field->localBounds;
  int neiIdx;
  VertexEdgeMPI *evmpi;
  iCoor n; // index in 3x3x3 array nei
  Vertex v;
  PetscMPIInt sendRank;
  const PetscMPIInt *neiRanks;
  PetscErrorCode ierr;

  PetscFunctionBegin;

  ierr = DMDAGetNeighbors(field->da, &neiRanks); CHKERRQ(ierr);
  
  // clear send arrays
  // for each vert
  //   if outside nei, err
  //   else add vert to send list

  for (i = 0; i < NUMNEI; i++) {
    ArraySetSize( field->sendbufs[i], 0);
  }
   
  for (i = 0; i < vlen; i++) {
    ierr = ArrayGet( field->verts, i, &v ); CHKERRQ(ierr);

    PositionToNeiIdx( &lbbox, &v->X, &n, &neiIdx);

    // if vertex outside 3x3x3 nei, something went terribly wrong
    if (n.x < 0 || n.x > 2 ||
        n.y < 0 || n.y > 2 ||
        n.z < 0 || n.z > 2 ) {
      ierr = PetscInfo(0, "ERROR: Vertex outside 3x3x3 neighbor region\n"); CHKERRQ(ierr);
      ierr = PetscInfo1(0, "i = %d\n",i); CHKERRQ(ierr);
      ierr = PetscInfo3(0, "X = {%f, %f, %f}\n",v->X.x,v->X.y,v->X.z); CHKERRQ(ierr);
      ierr = PetscInfo3(0, "n = {%d, %d, %d}\n",n.x,n.y,n.z); CHKERRQ(ierr);
      ierr = PetscInfo(0, "ERROR: END MESSAGE\n"); CHKERRQ(ierr);
      SETERRQ(field->comm, 0, "Vertex outside 3x3x3 neighbor region");
    } else { 
      // convert nei index to mpi rank
      sendRank = neiRanks[neiIdx];
      // in the edge case where a vertex leaves the global bounding box, abort
      // handle this case in the physics, not in the communication routine
      if ( sendRank == MPI_PROC_NULL) {
        ierr = PetscInfo(0, "ERROR: Vertex outside global bbox\n"); CHKERRQ(ierr);
        ierr = PetscInfo1(0, "i = %d\n",i); CHKERRQ(ierr);
        ierr = PetscInfo3(0, "X = {%f, %f, %f}\n",v->X.x,v->X.y,v->X.z); CHKERRQ(ierr);
        ierr = PetscInfo3(0, "n = {%d, %d, %d}\n",n.x,n.y,n.z); CHKERRQ(ierr);
        ierr = PetscInfo1(0, "neiIdx = %d\n",neiIdx); CHKERRQ(ierr);
        ierr = PetscInfo(0, "ERROR: END MESSAGE\n"); CHKERRQ(ierr);
        SETERRQ(field->comm, 0, "Vertex outside global bbox\n");
      }

      // add vertex to send list[rank]
      ierr = ArrayAppend( field->sendbufs[neiIdx], &evmpi); CHKERRQ(ierr); 
      evmpi->xID = v->vID;
      evmpi->type= v->type;
      evmpi->X = v->X;
      evmpi->V = v->V;
      for (e = 0; e < MAXEDGES; e++) {
        evmpi->yIDs[e] = v->eID[e];
      }
    }
  }

  int min;
  int vPO;
  struct _Edge *edges = ArrayGetData(field->edges);
  struct _Vertex *vertsPO;
  ierr = FiberFieldGetVertexArrayPO( field, &vertsPO ); CHKERRQ(ierr);
  for (e = 0; e < elen; e++) {
    // the edge is 'owned' by the vertex with the smallest ID
    min = edges[e].vID[0] < edges[e].vID[1] ? 0 : 1;
    vPO = edges[e].vPO[min]; 

    v = &vertsPO[vPO];

    PositionToNeiIdx( &lbbox, &v->X, &n, &neiIdx);

    if (v->vID != edges[e].vID[min] ) {
      ierr = PetscInfo1(0, "v->vID = %d\n", v->vID); CHKERRQ(ierr);
      ierr = PetscInfo1(0, "edges[e].vID[min] = %d\n", edges[e].vID[min]); CHKERRQ(ierr);
      SETERRQ(PETSC_COMM_SELF, 0, "Bad vertex");
    }

    ierr = ArrayAppend( field->sendbufs[neiIdx], &evmpi); CHKERRQ(ierr); 
    evmpi->xID = edges[e].eID; 
    evmpi->type = edges[e].type; 
    evmpi->yIDs[0] = edges[e].vID[0]; 
    evmpi->yIDs[1] = edges[e].vID[1]; 
    evmpi->X.x = edges[e].l0;
  }

  PetscFunctionReturn(0);
}
Exemplo n.º 5
0
PetscErrorCode FiberField_Nei_Alltoall( FiberField f )
{
  int i;
  int neiIdx; // index where nei[] == src
  int count; // probing count num elements received
  const int tag = 128456826; // TODO: should tag # be something unique for each call to this routine?
  const int NUMRECV = f->NUMRECV;
  Array *sendbufs = f->sendbufs;
  Array *recvbufs = f->recvbufs;
  MPI_Request reqSend[NUMNEI];
  MPI_Request reqRecv[NUMNEI];
  MPI_Status status;
  MPI_Comm comm = f->comm;
  const PetscMPIInt *neiRanks;
  PetscErrorCode ierr;

  PetscFunctionBegin;

  //TODO: why is this barrier necessary if all Isend/Irecv matched with WaitAll?
  //BUG: without barrier, sources from other iterations caught in probe
  ierr = PetscBarrier(0); CHKERRQ(ierr);

  ierr = DMDAGetNeighbors(f->da, &neiRanks); CHKERRQ(ierr);

  // send verts to neighbors
  for (i = 0; i < NUMNEI; i++) {
    count = ArrayLength(sendbufs[i]);
    ierr = MPI_Isend(ArrayGetData(sendbufs[i]), count, f->vertmpitype, neiRanks[i], tag, comm, &reqSend[i] ); CHKERRQ(ierr);
    /*ierr = MPI_Send(ArrayGetData(sendbufs[i]), count, f->vertmpitype, neiRanks[i], tag, comm ); CHKERRQ(ierr);*/

#ifdef DEBUG_ALLTOALL
    ierr = PetscInfo1(0, "i = %d\n", i ); CHKERRQ(ierr);
    ierr = PetscInfo1(0, "dst = %d\n", neiRanks[i] ); CHKERRQ(ierr);
    ierr = PetscInfo1(0, "count = %d\n", ArrayLength(sendbufs[i]) ); CHKERRQ(ierr);
#endif
  }
  
  // receive verts from neighbors
  for (i = 0; i < NUMRECV; i++) {
    // probe for count verts sent
    ierr = MPI_Probe(MPI_ANY_SOURCE, tag, comm, &status); CHKERRQ(ierr);
    ierr = MPI_Get_count( &status, f->vertmpitype, &count); CHKERRQ(ierr);
    // convert source rank into nei index (for recvbufs array)
    for (neiIdx = 0; neiIdx < NUMNEI; neiIdx++) {
      if( neiRanks[neiIdx] == status.MPI_SOURCE )
        break;
    }

#ifdef DEBUG_ALLTOALL
    ierr = PetscInfo1(0, "i = %d\n", i ); CHKERRQ(ierr);
    ierr = PetscInfo1(0, "src = %d\n", status.MPI_SOURCE ); CHKERRQ(ierr);
    ierr = PetscInfo1(0, "count = %d\n", count ); CHKERRQ(ierr);
#endif
    
    ierr = ArraySetSize( recvbufs[neiIdx], count); CHKERRQ(ierr);
    ierr = MPI_Irecv( ArrayGetData(recvbufs[neiIdx]), count, f->vertmpitype, status.MPI_SOURCE, tag, comm, &reqRecv[i]); CHKERRQ(ierr);
    /*ierr = MPI_Recv( ArrayGetData(recvbufs[neiIdx]), count, f->vertmpitype, status.MPI_SOURCE, tag, comm, &status ); CHKERRQ(ierr);*/
  }

  ierr = MPI_Waitall( NUMRECV, reqRecv, MPI_STATUSES_IGNORE ); CHKERRQ(ierr);
  ierr = MPI_Waitall(  NUMNEI, reqSend, MPI_STATUSES_IGNORE ); CHKERRQ(ierr);
  

  PetscFunctionReturn(0);
}