예제 #1
0
int MESH_Get_OverlapAdj_From_GhostAdj(Mesh_ptr mesh, MSTK_Comm comm) {
  int i, idx, nv, ne, nf, nr, local_ov_num[4];
  MVertex_ptr mv;
  MEdge_ptr me;
  MFace_ptr mf;
  MRegion_ptr mr;
  MType mtype;

  int myprtn, numprtns;
  MPI_Comm_rank(comm,&myprtn);
  MPI_Comm_size(comm,&numprtns);

  if (numprtns == 1) return 1;
  
  /* derive which processors this processor has overlaps with */

  int *local_par_adj = (int *) malloc(numprtns*sizeof(int));
  int *global_par_adj = (int *) malloc(numprtns*numprtns*sizeof(int));

  for (i = 0; i < numprtns; i++) {
    local_par_adj[i] = 0;

    for (mtype = MVERTEX; mtype <= MREGION; mtype++) {
      int j = MESH_Has_Ghosts_From_Prtn(mesh,i,mtype);
      local_par_adj[i] |= j<<(2*mtype);
    }
  }
     
  /* At this point, it is assumed that this processor ('prtn') has
     knowledge of all the processors that it has ghost entities from
     and what type of entities they are. We do an MPI_Allgather so
     that the processor can find out the reverse info, i.e., which
     processors are expecting ghost entities from this processor and
     what type of entities. This info then goes in as the overlap
     entity info for this processor */
  for (i = 0; i < numprtns*numprtns; i++) global_par_adj[i] = 0;
  MPI_Allgather(local_par_adj,numprtns,MPI_INT,global_par_adj,numprtns,MPI_INT,comm);

  /* Now set overlap adjacency flags */

  unsigned int ovnum = 0;
  unsigned int *prtnums = (unsigned int *) malloc(numprtns*sizeof(unsigned int));
  for (i = 0; i < numprtns; i++) {
    for (mtype = MVERTEX; mtype <= MREGION; mtype++) {

      int j = global_par_adj[i*numprtns + myprtn] & 1<<(2*mtype);
      if (j)
        MESH_Flag_Has_Overlaps_On_Prtn(mesh,i,mtype);
    }
  }


  free(local_par_adj);
  free(global_par_adj);
  free(prtnums);

  /* DON'T MARK PARALLEL ADJACENCY INFO AS CURRENT AS YET BECAUSE THE ACTUAL
     NUMBER OF ENTITIES TO BE SENT AND RECEIVED IS NOT FINALIZED */
  /* MESH_Mark_ParallelAdj_Current(mesh); */

 return 1;
}
예제 #2
0
  int MESH_Update_ParallelAdj(Mesh_ptr mesh, MSTK_Comm comm) {
  int i, idx, nv, ne, nf, nr, local_ov_num[4];
  MVertex_ptr mv;
  MEdge_ptr me;
  MFace_ptr mf;
  MRegion_ptr mr;
  MType mtype;

  int myprtn, numprtns;
  MPI_Comm_rank(comm,&myprtn);
  MPI_Comm_size(comm,&numprtns);

  if (numprtns == 1) return 1;
  
  /* set ghost adjacencies */
  idx = 0;
  while ((mv = MESH_Next_GhostVertex(mesh,&idx)))
    MESH_Flag_Has_Ghosts_From_Prtn(mesh,MV_MasterParID(mv),MVERTEX);
  idx = 0;
  while ((me = MESH_Next_GhostEdge(mesh,&idx)))
    MESH_Flag_Has_Ghosts_From_Prtn(mesh,ME_MasterParID(me),MEDGE);
  idx = 0;
  while ((mf = MESH_Next_GhostFace(mesh,&idx)))
    MESH_Flag_Has_Ghosts_From_Prtn(mesh,MF_MasterParID(mf),MFACE);
  idx = 0;
  while ((mr = MESH_Next_GhostRegion(mesh,&idx)))
    MESH_Flag_Has_Ghosts_From_Prtn(mesh,MR_MasterParID(mr),MREGION);


  /* derive which processors this processor has overlaps with */

  int *local_par_adj = (int *) malloc(numprtns*sizeof(int));
  int *global_par_adj = (int *) malloc(numprtns*numprtns*sizeof(int));

  for (i = 0; i < numprtns; i++) {
    local_par_adj[i] = 0;

    for (mtype = MVERTEX; mtype <= MREGION; mtype++) {
      int j = MESH_Has_Ghosts_From_Prtn(mesh,i,mtype);
      local_par_adj[i] |= j<<(2*mtype);
    }
  }
     
  /* At this point, it is assumed that this processor ('prtn') has
     knowledge of all the processors that it has ghost entities from
     and what type of entities they are. We do an MPI_Allgather so
     that the processor can find out the reverse info, i.e., which
     processors are expecting ghost entities from this processor and
     what type of entities. This info then goes in as the overlap
     entity info for this processor */
  for (i = 0; i < numprtns*numprtns; i++) global_par_adj[i] = 0;
  MPI_Allgather(local_par_adj,numprtns,MPI_INT,global_par_adj,numprtns,MPI_INT,comm);

  /* Now set overlap adjacency flags */

  unsigned int ovnum = 0;
  unsigned int *prtnums = (unsigned int *) malloc(numprtns*sizeof(unsigned int));
  for (i = 0; i < numprtns; i++) {
    for (mtype = MVERTEX; mtype <= MREGION; mtype++) {

      int j = global_par_adj[i*numprtns + myprtn] & 1<<(2*mtype);
      if (j)
        MESH_Flag_Has_Overlaps_On_Prtn(mesh,i,mtype);
    }
  }


  /* Right now the model we use is that every partition sends ALL its
     overlap entity data to any partition that asks for it */
  /* So, if a processor 'i' has ghosts from partition 'j', it needs to
     know the total number of overlap entities on partition 'j' in
     order to allocate sufficient receive buffers */

  int *global_ov_num = (int *) malloc(4*numprtns*sizeof(int));

  /* local overlap entity numbers */
  local_ov_num[0] = MESH_Num_OverlapVertices(mesh);
  local_ov_num[1] = MESH_Num_OverlapEdges(mesh);
  local_ov_num[2] = MESH_Num_OverlapFaces(mesh);
  local_ov_num[3] = MESH_Num_OverlapRegions(mesh);


  MPI_Allgather(local_ov_num,4,MPI_INT,global_ov_num,4,MPI_INT,comm);

  /* Set how many entities a partition can expect to receive from
     another partititon whether it is used on this partition or not */
  MESH_Init_Par_Recv_Info(mesh);
  for(i = 0; i < numprtns; i++) {
    if (MESH_Has_Ghosts_From_Prtn(mesh,i,MANYTYPE)) {
      for (mtype = MVERTEX; mtype <= MREGION; mtype++) 
        MESH_Set_Num_Recv_From_Prtn(mesh,i,mtype,global_ov_num[4*i+mtype]);
    }
  }

  free(global_ov_num);
  free(local_par_adj);
  free(global_par_adj);
  free(prtnums);

  MESH_Mark_ParallelAdj_Current(mesh);

 return 1;
}
예제 #3
0
  /* 
     Send 1-ring Faces to neighbor processors, and receive them 
     First update the parallel adjancy information, 
  */
  int MESH_Parallel_AddGhost_Face(Mesh_ptr submesh, MSTK_Comm comm) {
    int i, num_recv_procs, index_recv_mesh;
    Mesh_ptr send_mesh;
    Mesh_ptr *recv_meshes;
    int with_attr = 0;

    int rank, num;
    MPI_Comm_rank(comm,&rank);
    MPI_Comm_size(comm,&num);

    /* build the 1-ring layer send mesh */
    send_mesh = MESH_New(MESH_RepType(submesh));
    MESH_BuildSubMesh(submesh,2,send_mesh);

    /* 
       first update parallel adjancy information
       any two processor that has vertex connection now has all connection
    */
    for (i = 0; i < num; i++) {
      if(i == rank) continue;
      if( MESH_Has_Ghosts_From_Prtn(submesh,i,MVERTEX) )  {
        MESH_Flag_Has_Ghosts_From_Prtn(submesh,i,MALLTYPE);
        MESH_Flag_Has_Overlaps_On_Prtn(submesh,i,MALLTYPE);
      }
      if( MESH_Has_Overlaps_On_Prtn(submesh,i,MVERTEX) ) {
        MESH_Flag_Has_Overlaps_On_Prtn(submesh,i,MALLTYPE);
        MESH_Flag_Has_Ghosts_From_Prtn(submesh,i,MALLTYPE);
      }
    }

    MESH_Update_ParallelAdj(submesh, comm);

    /* allocate meshes to receive from other processors */
    num_recv_procs = MESH_Num_GhostPrtns(submesh);
    recv_meshes = (Mesh_ptr*)malloc(num_recv_procs*sizeof(Mesh_ptr));
    for(i = 0; i < num_recv_procs; i++)
      recv_meshes[i] = MESH_New(MESH_RepType(submesh));

    /* printf(" number of recv_procs %d,on rank %d\n", num_recv_procs, rank); */

    int numreq = 0;
    int maxreq = 25; /* should be 17*(num-1) but use small number for testing 
                        realloc of the request array */
    MPI_Request *requests = (MPI_Request *) malloc(maxreq*sizeof(MPI_Request));
    int numptrs2free = 0;
    int maxptrs2free = 25; /* should be about 12*(num-1) to avoid realloc */
    void ** ptrs2free = (void **) malloc(maxptrs2free*sizeof(void *));

    index_recv_mesh = 0;  
    for (i = 0; i < num; i++) {
      if (i < rank) {
        if (MESH_Has_Ghosts_From_Prtn(submesh,i,MFACE)) 
          MESH_RecvMesh(recv_meshes[index_recv_mesh++],i,with_attr,comm);
        if (MESH_Has_Overlaps_On_Prtn(submesh,i,MFACE)) 
          MESH_SendMesh(send_mesh,i,with_attr,comm,
                        &numreq,&maxreq,&requests,&numptrs2free,&maxptrs2free,
                        &ptrs2free);
      }
      if (i > rank) {
        if (MESH_Has_Overlaps_On_Prtn(submesh,i,MFACE)) 
          MESH_SendMesh(send_mesh,i,with_attr,comm,
                        &numreq,&maxreq,&requests,&numptrs2free,&maxptrs2free,
                        &ptrs2free);
        if (MESH_Has_Ghosts_From_Prtn(submesh,i,MFACE)) 
          MESH_RecvMesh(recv_meshes[index_recv_mesh++],i,with_attr,comm);
      }
    }

    if (MPI_Waitall(numreq,requests,MPI_STATUSES_IGNORE) != MPI_SUCCESS)
      MSTK_Report("MSTK_Mesh_Distribute","Problem distributing mesh",MSTK_FATAL);
    if (numreq) free(requests);
  
    /* free all the memory allocated in sendmesh routines */
    int p;
    for (p = 0; p < numptrs2free; ++p) free(ptrs2free[p]);
    if (numptrs2free) free(ptrs2free);
  

    /* install the recv_meshes */
    MESH_ConcatSubMesh(submesh, 2, num_recv_procs, recv_meshes);

    /* delete recvmeshes */
    for (i = 0; i < num_recv_procs; i++) 
      MESH_Delete(recv_meshes[i]);
    free(recv_meshes);

    MESH_Delete(send_mesh);

    return 1;
  }