int MESH_Recv_VertexCoords(Mesh_ptr mesh, int fromrank, int nvertices, MSTK_Comm comm) { int i; MVertex_ptr v; double coor[3]; MPI_Status status; MPI_Request vrequest[2]; char mesg[256], errorstr[256], funcname[256]="MESH_Recv_VertexCoords"; int errcode, len, nreq=0; int rank; MPI_Comm_rank(comm,&rank); /* allocate receive buffer */ double *list_coor = (double *) malloc(3*nvertices*sizeof(double)); errcode = MPI_Recv(list_coor,3*nvertices,MPI_DOUBLE,fromrank,rank,comm, &status); if (errcode != MPI_SUCCESS) MSTK_Report(funcname,"Trouble receiving mesh coordinate info",MSTK_FATAL); for(i = 0; i < nvertices; i++) { v = MESH_Vertex(mesh,i); MV_Set_Coords(v,list_coor+3*i); } free(list_coor); return 1; }
int MESH_Recv_Vertices(Mesh_ptr mesh, int fromrank, int nvertices, MSTK_Comm comm) { int i; MVertex_ptr v; MPI_Status status; MPI_Request request; char mesg[256], errorstr[256], funcname[256]="MESH_Recv_Vertices"; int errcode, len, nreq=0; int rank; MPI_Comm_rank(comm,&rank); /* allocate receive buffer */ int *list_vertex = (int *) malloc(3*nvertices*sizeof(int)); /* receive vertex info */ errcode = MPI_Irecv(list_vertex,3*nvertices,MPI_INT,fromrank,rank,comm, &request); if (errcode != MPI_SUCCESS) MSTK_Report(funcname,"Trouble receiving mesh vertex info",MSTK_FATAL); /* Create the vertices while waiting for the message to complete */ for (i = 0; i < nvertices; i++) v = MV_New(mesh); errcode = MPI_Wait(&request,MPI_STATUS_IGNORE); if (errcode != MPI_SUCCESS) MSTK_Report(funcname,"Trouble receiving mesh vertex info",MSTK_FATAL); for(i = 0; i < nvertices; i++) { v = MESH_Vertex(mesh,i); int gentdim = list_vertex[3*i] & 7; /* first 3 bits; 7 is 0...00111 */ int gentid = list_vertex[3*i] >> 3; /* All but the first 3 bits */ MV_Set_GEntDim(v,gentdim); MV_Set_GEntID(v,gentid); int ptype = list_vertex[3*i+1] & 3; /* first 2 bits; 3 is 0...00011 */ int on_par_bdry = list_vertex[3*i+1] & 4; /* 3rd bit; 4 is 0...00100 */ int masterparid = list_vertex[3*i+1] >> 3; /* All but the first 3 bits */ MV_Set_PType(v,ptype); if (on_par_bdry) MV_Flag_OnParBoundary(v); MV_Set_MasterParID(v,masterparid); MV_Set_GlobalID(v,list_vertex[3*i+2]); } free(list_vertex); return 1; }
int MESH_Send_Vertices(Mesh_ptr mesh, int torank, MSTK_Comm comm, int *numreq, int *maxreq, MPI_Request **requests, int *numptrs2free, int *maxptrs2free, void ***ptrs2free) { int i, j, nv; MVertex_ptr mv; double coor[3]; MPI_Request mpirequest; if (requests == NULL) MSTK_Report("MESH_Surf_SendMesh","MPI requests array is NULL",MSTK_FATAL); if (*maxreq == 0) { *maxreq = 25; *requests = (MPI_Request *) malloc(*maxreq*sizeof(MPI_Request)); *numreq = 0; } else if (*maxreq < (*numreq) + 11) { *maxreq = 2*(*maxreq) + 11; *requests = (MPI_Request *) realloc(*requests,*maxreq*sizeof(MPI_Request)); } /* Now send out detailed vertex info */ nv = MESH_Num_Vertices(mesh); int *list_vertex = (int *) malloc(3*nv*sizeof(int)); /* Store the 3 auxilliary data fields - would be nice if we didn't * have to send the data in such an error prone way (with bit * shifting) that requires knowledge of the internal structure of * MEntity */ for(i = 0; i < nv; i++) { mv = MESH_Vertex(mesh,i); list_vertex[3*i] = (MV_GEntID(mv)<<3) | (MV_GEntDim(mv)); list_vertex[3*i+1] = (MV_MasterParID(mv) <<3) | MV_OnParBoundary(mv)<<2 | (MV_PType(mv)); list_vertex[3*i+2] = MV_GlobalID(mv); } /* send vertices */ MPI_Isend(list_vertex,3*nv,MPI_INT,torank,torank,comm,&mpirequest); (*requests)[*numreq] = mpirequest; (*numreq)++;; int nptrs = 1; if (*maxptrs2free == 0) { *maxptrs2free = 25; *ptrs2free = (void **) malloc(*maxptrs2free*sizeof(void *)); *numptrs2free = 0; } else if (*maxptrs2free < (*numptrs2free) + nptrs) { *maxptrs2free = 2*(*maxptrs2free) + nptrs; *ptrs2free = (void **) realloc(*ptrs2free,(*maxptrs2free)*sizeof(void *)); } (*ptrs2free)[(*numptrs2free)++] = list_vertex; return 1; }
int MESH_AssignGlobalIDs_Vertex(Mesh_ptr submesh, int have_GIDs, MSTK_Comm comm) { int i, j, nv, nbv, ne, nf, nr, mesh_info[10]; MVertex_ptr mv; List_ptr boundary_verts; RepType rtype; int index_nbv, max_nbv, iloc, num_ghost_verts, global_id; int *global_mesh_info, *vertex_ov_label, *vertex_ov_global_id, *id_on_ov_list; int rank, num; MPI_Comm_rank(comm,&rank); MPI_Comm_size(comm,&num); for (i = 0; i < 10; i++) mesh_info[i] = 0; rtype = MESH_RepType(submesh); nv = MESH_Num_Vertices(submesh); ne = MESH_Num_Edges(submesh); nf = MESH_Num_Faces(submesh); nr = MESH_Num_Regions(submesh); mesh_info[0] = rtype; mesh_info[1] = nv; mesh_info[2] = ne; mesh_info[3] = nf; mesh_info[4] = nr; /* calculate number of boundary vertices */ nbv = 0; boundary_verts = List_New(10); if (nr) { for(i = 0; i < nv; i++) { mv = MESH_Vertex(submesh,i); if (vertex_on_boundary3D(mv)) { MV_Flag_OnParBoundary(mv); List_Add(boundary_verts,mv); nbv++; } } } else { for(i = 0; i < nv; i++) { mv = MESH_Vertex(submesh,i); if (vertex_on_boundary2D(mv)) { MV_Flag_OnParBoundary(mv); List_Add(boundary_verts,mv); nbv++; } } } mesh_info[5] = nbv; /* gather submeshes information right now we only need nv and nbv, and later num_ghost_verts, but we gather all mesh_info */ global_mesh_info = (int *)malloc(10*num*sizeof(int)); MPI_Allgather(mesh_info,10,MPI_INT,global_mesh_info,10,MPI_INT,comm); /* get largest number of boundary vertices of all the processors */ max_nbv = 0; for(i = 0; i < num; i++) if(max_nbv < global_mesh_info[10*i+5]) max_nbv = global_mesh_info[10*i+5]; if (have_GIDs) { int *list_boundary_vertex_gid = (int *)malloc(max_nbv*sizeof(int)); int *recv_list_vertex_gid = (int *)malloc(num*max_nbv*sizeof(int)); /* sort boundary vertices based on Global ID, for binary search */ List_Sort(boundary_verts,nbv,sizeof(MVertex_ptr),compareGlobalID); /* only global ids are sent */ index_nbv = 0; for(i = 0; i < nbv; i++) { mv = List_Entry(boundary_verts,i); list_boundary_vertex_gid[index_nbv] = MV_GlobalID(mv); index_nbv++; } MPI_Allgather(list_boundary_vertex_gid,max_nbv,MPI_INT,recv_list_vertex_gid,max_nbv,MPI_INT,comm); /* indicate if a vertex is overlapped */ vertex_ov_label = (int *)malloc(num*max_nbv*sizeof(int)); /* store the local boundary id on ov processor it is used to assign global id of local ghost vertices no need to store master partition id, MV_MasterParID(mv) is already assigned */ id_on_ov_list = (int *)malloc(max_nbv*sizeof(int)); for (i = 0; i < num*max_nbv; i++) vertex_ov_label[i] = 0; num_ghost_verts = 0; /* for processor other than 0 */ if(rank > 0) { for(i = 0; i < nbv; i++) { mv = List_Entry(boundary_verts,i); int gid = MV_GlobalID(mv); /* check which previous processor has a vertex with same Global ID*/ for(j = 0; j < rank; j++) { /* since each processor has sorted the boundary vertices, use binary search */ int *loc = (int *)bsearch(&gid, &recv_list_vertex_gid[max_nbv*j], global_mesh_info[10*j+5], sizeof(int), compareINT); /* if found the vertex on previous processors */ if(loc) { /* here the location iloc is relative to the beginning of the jth processor */ iloc = (int)(loc - &recv_list_vertex_gid[max_nbv*j]); MV_Set_PType(mv,PGHOST); MV_Set_MasterParID(mv,j); num_ghost_verts++; /* label the original vertex as overlapped */ vertex_ov_label[max_nbv*j+iloc] |= 1; id_on_ov_list[i] = iloc; /* if found on processor j, no need to test for j+1,j+2...*/ break; } } } } free(list_boundary_vertex_gid); free(recv_list_vertex_gid); } else { double coor[3]; int *list_boundary_vertex = (int *)malloc(max_nbv*sizeof(int)); double *list_boundary_coor = (double *)malloc(3*max_nbv*sizeof(double)); int *recv_list_vertex = (int *)malloc(num*max_nbv*sizeof(int)); double *recv_list_coor = (double *)malloc(3*num*max_nbv*sizeof(double)); /* sort boundary vertices based on coordinate value, for binary search */ List_Sort(boundary_verts,nbv,sizeof(MVertex_ptr),compareVertexCoor); /* only local id and coordinate values are sent */ index_nbv = 0; for(i = 0; i < nbv; i++) { mv = List_Entry(boundary_verts,i); list_boundary_vertex[index_nbv] = MV_ID(mv); MV_Coords(mv,coor); list_boundary_coor[index_nbv*3] = coor[0]; list_boundary_coor[index_nbv*3+1] = coor[1]; list_boundary_coor[index_nbv*3+2] = coor[2]; index_nbv++; } MPI_Allgather(list_boundary_vertex,max_nbv,MPI_INT,recv_list_vertex,max_nbv,MPI_INT,comm); MPI_Allgather(list_boundary_coor,3*max_nbv,MPI_DOUBLE,recv_list_coor,3*max_nbv,MPI_DOUBLE,comm); /* indicate if a vertex is overlapped */ vertex_ov_label = (int *)malloc(num*max_nbv*sizeof(int)); /* store the local boundary id on ov processor it is used to assign global id of local ghost vertices no need to store master partition id, MV_MasterParID(mv) is already assigned */ id_on_ov_list = (int *)malloc(max_nbv*sizeof(int)); for (i = 0; i < num*max_nbv; i++) vertex_ov_label[i] = 0; num_ghost_verts = 0; /* for processor other than 0 */ if(rank > 0) { for(i = 0; i < nbv; i++) { mv = List_Entry(boundary_verts,i); MV_Coords(mv,coor); /* check which previous processor has the same coordinate vertex */ for(j = 0; j < rank; j++) { /* since each processor has sorted the boundary vertices, use binary search */ double *loc = (double *)bsearch(&coor, &recv_list_coor[3*max_nbv*j], global_mesh_info[10*j+5], 3*sizeof(double), compareCoorDouble); /* if found the vertex on previous processors */ if(loc) { /* here the location iloc is relative to the beginning of the jth processor */ iloc = (int)(loc - &recv_list_coor[3*max_nbv*j])/3; MV_Set_PType(mv,PGHOST); MV_Set_MasterParID(mv,j); num_ghost_verts++; /* label the original vertex as overlapped */ vertex_ov_label[max_nbv*j+iloc] |= 1; id_on_ov_list[i] = iloc; /* if found on processor j, no need to test for j+1,j+2...*/ break; } } } } free(list_boundary_coor); free(recv_list_coor); free(list_boundary_vertex); free(recv_list_vertex); } /* num of ghost verts */ mesh_info[9] = num_ghost_verts; /* update ghost verts number */ MPI_Allgather(mesh_info,10,MPI_INT,global_mesh_info,10,MPI_INT,comm); /* since this is a OR reduction, we can use MPI_IN_PLACE, send buffer same as recv buffer */ MPI_Allreduce(MPI_IN_PLACE,vertex_ov_label,num*max_nbv,MPI_INT,MPI_LOR,comm); /* calculate starting global id number for vertices*/ if (!have_GIDs) { global_id = 1; for(i = 0; i < rank; i++) global_id = global_id + global_mesh_info[10*i+1] - global_mesh_info[10*i+9]; for(i = 0; i < nv; i++) { mv = MESH_Vertex(submesh,i); if (MV_PType(mv) == PGHOST) continue; MV_Set_GlobalID(mv,global_id++); MV_Set_MasterParID(mv,rank); } } /* store overlapped vertices IDs and broadast */ vertex_ov_global_id = (int *)malloc(num*max_nbv*sizeof(int)); for(i = 0; i < num*max_nbv; i++) vertex_ov_global_id[i] = 0; for(i = 0; i < nbv; i++) { if(vertex_ov_label[rank*max_nbv+i]) { mv = List_Entry(boundary_verts,i); MV_Set_PType(mv,POVERLAP); vertex_ov_global_id[rank*max_nbv+i] = MV_GlobalID(mv); } } MPI_Allreduce(MPI_IN_PLACE,vertex_ov_global_id,num*max_nbv,MPI_INT,MPI_MAX,comm); for(i = 0; i < nbv; i++) { mv = List_Entry(boundary_verts,i); if(MV_PType(mv) == PGHOST) MV_Set_GlobalID(mv,vertex_ov_global_id[MV_MasterParID(mv)*max_nbv+id_on_ov_list[i]]); } List_Delete(boundary_verts); free(global_mesh_info); free(vertex_ov_label); free(vertex_ov_global_id); free(id_on_ov_list); return 1; }
int MESH_Send_Attribute(Mesh_ptr mesh, MAttrib_ptr attrib, int torank, MSTK_Comm comm, int *numreq, int *maxreq, MPI_Request **requests, int *numptrs2free, int *maxptrs2free, void ***ptrs2free) { int j, k; int num, ncomp, ival; double rval; void *pval; double *rval_arr; int *list_info; MType mtype; MAttType att_type; MEntity_ptr ment; MPI_Request mpirequest; if (requests == NULL) MSTK_Report("MSTK_SendMSet","Invalid MPI request buffer",MSTK_FATAL); if (*maxreq == 0) { *maxreq = 25; *requests = (MPI_Request *) malloc(*maxreq*sizeof(MPI_Request)); *numreq = 0; } else if (*maxreq < (*numreq) + 2) { *maxreq *= 2; *requests = (MPI_Request *) realloc(*requests,*maxreq*sizeof(MPI_Request)); } /* get attribute properties */ att_type = MAttrib_Get_Type(attrib); ncomp = MAttrib_Get_NumComps(attrib); mtype = MAttrib_Get_EntDim(attrib); /* attribute entity type, used before ghost list established, so no ghost */ switch (mtype) { case MVERTEX: num = MESH_Num_Vertices(mesh); break; case MEDGE: num = MESH_Num_Edges(mesh); break; case MFACE: num = MESH_Num_Faces(mesh); break; case MREGION: num = MESH_Num_Regions(mesh); break; default: num = 0; #ifdef DEBUG2 MSTK_Report("MESH_SendAttr()","Cannot send attributes on entity type MALLTYPE",MSTK_WARN); #endif return 0; } /* attribute index and global id */ list_info = (int *) malloc(num*sizeof(int)); /* attribute values */ int *list_value_int = NULL; double *list_value_double = NULL; if (att_type == INT) list_value_int = (int *) malloc(num*ncomp*sizeof(int)); else list_value_double = (double *) malloc(num*ncomp*sizeof(double)); /* collect data */ for(j = 0; j < num; j++) { switch (mtype) { case MVERTEX: ment = MESH_Vertex(mesh,j); break; case MEDGE: ment = MESH_Edge(mesh,j); break; case MFACE: ment = MESH_Face(mesh,j); break; case MREGION: ment = MESH_Region(mesh,j); break; default: MSTK_Report("MESH_SendAttr()","Invalid entity type",MSTK_WARN); return 0; } MEnt_Get_AttVal(ment,attrib,&ival,&rval,&pval); list_info[j] = MEnt_GlobalID(ment); if (att_type == INT) list_value_int[j] = ival; else { if(ncomp == 1) list_value_double[j] = rval; if(ncomp > 1) { rval_arr = (double *)pval; for(k = 0; k < ncomp; k++) list_value_double[ncomp*j+k] = rval_arr[k]; } } } /* send entity global IDs */ MPI_Isend(list_info,num,MPI_INT,torank,torank,comm,&mpirequest); (*requests)[*numreq] = mpirequest; (*numreq)++; /* send values */ if (att_type == INT) { MPI_Isend(list_value_int,num*ncomp,MPI_INT,torank,torank,comm,&mpirequest); (*requests)[*numreq] = mpirequest; } else { MPI_Isend(list_value_double,num*ncomp,MPI_DOUBLE,torank,torank,comm, &mpirequest); (*requests)[*numreq] = mpirequest; } (*numreq)++; /* track the buffers used for sending so that they can be released later */ if (*maxptrs2free == 0) { *maxptrs2free = 25; *ptrs2free = (void **) malloc(*maxptrs2free*sizeof(void *)); *numptrs2free = 0; } else if (*maxptrs2free < (*numptrs2free) + 2) { *maxptrs2free = 2*(*maxptrs2free) + 2; *ptrs2free = (void **) realloc(*ptrs2free,(*maxptrs2free)*sizeof(void *)); } (*ptrs2free)[(*numptrs2free)++] = list_info; if (att_type == INT) (*ptrs2free)[(*numptrs2free)++] = list_value_int; else (*ptrs2free)[(*numptrs2free)++] = list_value_double; return 1; }