int LOCA::Epetra::AugmentedOp::blockMap2PointMap(const Epetra_BlockMap& BlockMap, Epetra_Map*& PointMap) const { // Generate an Epetra_Map that has the same number and distribution of points // as the input Epetra_BlockMap object. The global IDs for the output PointMap // are computed by using the MaxElementSize of the BlockMap. For variable block // sizes this will create gaps in the GID space, but that is OK for Epetra_Maps. int MaxElementSize = BlockMap.MaxElementSize(); int PtNumMyElements = BlockMap.NumMyPoints(); int * PtMyGlobalElements = 0; if (PtNumMyElements>0) PtMyGlobalElements = new int[PtNumMyElements]; int NumMyElements = BlockMap.NumMyElements(); int curID = 0; for (int i=0; i<NumMyElements; i++) { int StartID = BlockMap.GID(i)*MaxElementSize; int ElementSize = BlockMap.ElementSize(i); for (int j=0; j<ElementSize; j++) PtMyGlobalElements[curID++] = StartID+j; } assert(curID==PtNumMyElements); // Sanity test PointMap = new Epetra_Map(-1, PtNumMyElements, PtMyGlobalElements, BlockMap.IndexBase(), BlockMap.Comm()); if (PtNumMyElements>0) delete [] PtMyGlobalElements; if (!BlockMap.PointSameAs(*PointMap)) {EPETRA_CHK_ERR(-1);} // Maps not compatible return(0); }
// ============================================================================ std::shared_ptr<const Tpetra::Map<int,int>> BorderingHelpers:: extendMapBy1(const Epetra_BlockMap & map) { const Teuchos::Comm<int> & comm = map.Comm(); // Create a new map that hosts one more entry. const int numGlobalElements = map.NumGlobalElements() + 1; const int numMyElements = map.NumMyElements(); int * myGlobalElements = map.MyGlobalElements(); // The following if-else construction just makes sure that // the Tpetra::Map<int,int> constructor is called with an extended // map on proc 0, and with the regular old stuff on all // other procs. std::shared_ptr<Tpetra::Map<int,int>> extendedMap; if (comm.MyPID() == 0) { // Copy over the global indices. std::vector<int> a(numMyElements+1); for (int k = 0; k < numMyElements; k++) a[k] = myGlobalElements[k]; // Append one more. a[numMyElements] = map.NumGlobalElements(); extendedMap = std::make_shared<Tpetra::Map<int,int>>( numGlobalElements, numMyElements+1, &a[0], map.IndexBase(), comm ); } else { extendedMap = std::make_shared<Tpetra::Map<int,int>>( numGlobalElements, numMyElements, myGlobalElements, map.IndexBase(), comm ); } return extendedMap; }
// FIXME long long Epetra_BlockMap Epetra_Util::Create_OneToOne_BlockMap(const Epetra_BlockMap& usermap, bool high_rank_proc_owns_shared) { // FIXME long long //if usermap is already 1-to-1 then we'll just return a copy of it. if (usermap.IsOneToOne()) { Epetra_BlockMap newmap(usermap); return(newmap); } int myPID = usermap.Comm().MyPID(); Epetra_Directory* directory = usermap.Comm().CreateDirectory(usermap); int numMyElems = usermap.NumMyElements(); const int* myElems = usermap.MyGlobalElements(); int* owner_procs = new int[numMyElems*2]; int* sizes = owner_procs+numMyElems; directory->GetDirectoryEntries(usermap, numMyElems, myElems, owner_procs, 0, sizes, high_rank_proc_owns_shared); //we'll fill a list of map-elements which belong on this processor int* myOwnedElems = new int[numMyElems*2]; int* ownedSizes = myOwnedElems+numMyElems; int numMyOwnedElems = 0; for(int i=0; i<numMyElems; ++i) { int GID = myElems[i]; int owner = owner_procs[i]; if (myPID == owner) { ownedSizes[numMyOwnedElems] = sizes[i]; myOwnedElems[numMyOwnedElems++] = GID; } } Epetra_BlockMap one_to_one_map(-1, numMyOwnedElems, myOwnedElems, sizes, usermap.IndexBase(), usermap.Comm()); delete [] myOwnedElems; delete [] owner_procs; delete directory; return(one_to_one_map); }
//EpetraMap_To_TpetraMap: takes in Epetra_Map object, converts it to its equivalent Tpetra::Map object, //and returns an RCP pointer to this Tpetra::Map Teuchos::RCP<const Tpetra_Map> Petra::EpetraMap_To_TpetraMap(const Epetra_BlockMap& epetraMap_, const Teuchos::RCP<const Teuchos::Comm<int> >& commT_) { const std::size_t numElements = Teuchos::as<std::size_t>(epetraMap_.NumMyElements()); const auto indexBase = Teuchos::as<GO>(epetraMap_.IndexBase()); if (epetraMap_.DistributedGlobal() || epetraMap_.Comm().NumProc() == Teuchos::OrdinalTraits<int>::one()) { Teuchos::Array<Tpetra_GO> indices(numElements); int *epetra_indices = epetraMap_.MyGlobalElements(); for(LO i=0; i < numElements; i++) indices[i] = epetra_indices[i]; const Tpetra::global_size_t computeGlobalElements = Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid(); return Teuchos::rcp(new Tpetra_Map(computeGlobalElements, indices, indexBase, commT_)); } else { return Teuchos::rcp(new Tpetra_Map(numElements, indexBase, commT_, Tpetra::LocallyReplicated)); } }
//========================================================================= int Ifpack_CrsRiluk::BlockMap2PointMap(const Epetra_BlockMap & BlockMap, Teuchos::RefCountPtr<Epetra_Map>* PointMap) { // Generate an Epetra_Map that has the same number and distribution of points // as the input Epetra_BlockMap object. The global IDs for the output PointMap // are computed by using the MaxElementSize of the BlockMap. For variable block // sizes this will create gaps in the GID space, but that is OK for Epetra_Maps. int MaxElementSize = BlockMap.MaxElementSize(); int PtNumMyElements = BlockMap.NumMyPoints(); vector<int> PtMyGlobalElements; if (PtNumMyElements>0) PtMyGlobalElements.resize(PtNumMyElements); int NumMyElements = BlockMap.NumMyElements(); int curID = 0; for (int i=0; i<NumMyElements; i++) { int StartID = BlockMap.GID(i)*MaxElementSize; int ElementSize = BlockMap.ElementSize(i); for (int j=0; j<ElementSize; j++) PtMyGlobalElements[curID++] = StartID+j; } assert(curID==PtNumMyElements); // Sanity test (*PointMap) = Teuchos::rcp( new Epetra_Map(-1, PtNumMyElements, &PtMyGlobalElements[0], BlockMap.IndexBase(), BlockMap.Comm()) ); if (!BlockMap.PointSameAs(*(*PointMap))) {EPETRA_CHK_ERR(-1);} // Maps not compatible return(0); }
int BlockMapToMatrixMarketFile( const char *filename, const Epetra_BlockMap & map, const char * mapName, const char *mapDescription, bool writeHeader) { int M = map.NumGlobalElements(); int N = 1; if (map.MaxElementSize()>1) N = 2; // Non-trivial block map, store element sizes in second column FILE * handle = 0; if (map.Comm().MyPID()==0) { // Only PE 0 does this section handle = fopen(filename,"w"); if (!handle) return(-1); MM_typecode matcode; mm_initialize_typecode(&matcode); mm_set_matrix(&matcode); mm_set_array(&matcode); mm_set_integer(&matcode); if (writeHeader==true) { // Only write header if requested (true by default) if (mm_write_banner(handle, matcode)) return(-1); if (mapName!=0) fprintf(handle, "%% \n%% %s\n", mapName); if (mapDescription!=0) fprintf(handle, "%% %s\n%% \n", mapDescription); } } if (writeHeader==true) { // Only write header if requested (true by default) // Make an Epetra_IntVector of length numProc such that all elements are on PE 0 and // the ith element is NumMyElements from the ith PE Epetra_Map map1(-1, 1, 0, map.Comm()); // map with one element on each processor int length = 0; if (map.Comm().MyPID()==0) length = map.Comm().NumProc(); Epetra_Map map2(-1, length, 0, map.Comm()); Epetra_Import lengthImporter(map2, map1); Epetra_IntVector v1(map1); Epetra_IntVector v2(map2); v1[0] = map.NumMyElements(); if (v2.Import(v1, lengthImporter, Insert)) return(-1); if (map.Comm().MyPID()==0) { fprintf(handle, "%s", "%Format Version:\n"); //int version = 1; // We may change the format scheme at a later date. fprintf(handle, "%% %d \n", map.Comm().NumProc()); fprintf(handle, "%s", "%NumProc: Number of processors:\n"); fprintf(handle, "%% %d \n", map.Comm().NumProc()); fprintf(handle, "%s", "%MaxElementSize: Maximum element size:\n"); fprintf(handle, "%% %d \n", map.MaxElementSize()); fprintf(handle, "%s", "%MinElementSize: Minimum element size:\n"); fprintf(handle, "%% %d \n", map.MinElementSize()); fprintf(handle, "%s", "%IndexBase: Index base of map:\n"); fprintf(handle, "%% %d \n", map.IndexBase()); fprintf(handle, "%s", "%NumGlobalElements: Total number of GIDs in map:\n"); fprintf(handle, "%% %d \n", map.NumGlobalElements()); fprintf(handle, "%s", "%NumMyElements: BlockMap lengths per processor:\n"); for ( int i=0; i< v2.MyLength(); i++) fprintf(handle, "%% %d\n", v2[i]); if (mm_write_mtx_array_size(handle, M, N)) return(-1); } } if (BlockMapToHandle(handle, map)) return(-1); // Everybody calls this routine if (map.Comm().MyPID()==0) // Only PE 0 opened a file if (fclose(handle)) return(-1); return(0); }
static int compute_hypergraph_metrics(const Epetra_BlockMap &rowmap, const Epetra_BlockMap &colmap, int numGlobalColumns, Isorropia::Epetra::CostDescriber &costs, double &myGoalWeight, double &balance, double &cutn, double &cutl) // output { const Epetra_Comm &comm = rowmap.Comm(); #ifdef HAVE_MPI const Epetra_MpiComm* mpiComm = dynamic_cast<const Epetra_MpiComm*>(&comm); MPI_Comm mcomm = mpiComm->Comm(); #endif int nProcs = comm.NumProc(); int myProc = comm.MyPID(); double min, avg; std::map<int, float> vertexWeights; std::map<int, std::map<int, float > > graphEdgeWeights; std::map<int, float> hyperEdgeWeights; costs.getCosts(vertexWeights, // vertex global ID -> weight graphEdgeWeights, // vertex global ID -> map from neighbor global ID to edge weight hyperEdgeWeights); // hyperedge global ID -> weight Epetra_Vector vwgt(rowmap); int numVWgts = vertexWeights.size(); if (numVWgts > 0){ double *wvals = new double [numVWgts]; int *gids = new int [numVWgts]; std::map<int, float>::iterator vnext = vertexWeights.begin(); int i=0; while (vnext != vertexWeights.end()){ wvals[i] = vnext->second; gids[i] = vnext->first; vnext++; i++; } vwgt.ReplaceGlobalValues(i, wvals, gids); delete [] wvals; delete [] gids; } else{ vwgt.PutScalar(1.0); // default to unit weights } compute_balance(vwgt, myGoalWeight, min, balance, avg); if (balance < 0){ return 1; } /* Compute cutl and cutn. */ int totalHEWeights = 0; int numHEWeights = hyperEdgeWeights.size(); comm.SumAll(&numHEWeights, &totalHEWeights, 1); if ((totalHEWeights > 0) && (totalHEWeights < numGlobalColumns)){ if (myProc == 0) std::cerr << "Must supply either no h.e. weights or else supply at least one for each column" << std::endl; return -1; } std::map<int, float>::iterator heWgtIter; // Create a set containing all the columns in my rows. We assume all // the rows are in the same partition. int numMyCols = colmap.NumMyElements(); std::set<int> colGIDS; std::set<int>::iterator gidIter; for (int j=0; j<numMyCols; j++){ colGIDS.insert(colmap.GID(j)); } /* Divide columns among processes, then each process computes its * assigned columns' cutl and cutn. * TODO - numGlobalColumns can be less than nprocs * Fix this when a process is assigned no columns. TODO */ int ncols = numGlobalColumns / nProcs; int leftover = numGlobalColumns - (nProcs * ncols); std::vector<int> colCount(nProcs, 0); for (int i=0; i<nProcs; i++){ colCount[i] = ncols; if (i < leftover) colCount[i]++; } int *colTotals = NULL; double *colWeights = NULL; if (colCount[myProc] > 0){ colTotals = new int [colCount[myProc]]; if (totalHEWeights > 0){ colWeights = new double [colCount[myProc]]; } } int *colLocal= new int [ncols + 1]; double *localWeights = NULL; if (totalHEWeights > 0){ localWeights = new double [ncols + 1]; } int base = colmap.IndexBase(); int colStart = base; for (int i=0; i<nProcs; i++){ // All processes send info to the process reponsible // for the next group of columns int ncols = colCount[i]; int colEnd = colStart + ncols; for (int j=colStart,k=0; j < colEnd; j++,k++){ gidIter = colGIDS.find(j); if (gidIter != colGIDS.end()){ colLocal[k] = 1; // column j has rows in my partition } else{ colLocal[k] = 0; } if (totalHEWeights > 0){ std::map<int, float>::iterator heWgtIter = hyperEdgeWeights.find(j); if (heWgtIter != hyperEdgeWeights.end()){ // I have the edge weight for column j localWeights[k] = heWgtIter->second; } else{ localWeights[k] = 0.0; } } } #ifdef HAVE_MPI int rc = MPI_Reduce(colLocal, colTotals, ncols, MPI_INT, MPI_SUM, i, mcomm); if (totalHEWeights > 0){ rc = MPI_Reduce(localWeights, colWeights, ncols, MPI_DOUBLE, MPI_SUM, i, mcomm); } // TODO handle possible MPI error #else memcpy(colTotals, colLocal, ncols * sizeof(int)); if (totalHEWeights > 0){ memcpy(colWeights, localWeights, ncols * sizeof(double)); } #endif colStart = colEnd; } delete [] colLocal; if (localWeights) delete [] localWeights; double localCutN=0; double localCutL=0; double ewgt = 1.0; for (int j=0; j<colCount[myProc]; j++){ if (totalHEWeights > 0){ ewgt = colWeights[j]; } if (colTotals[j] > 1){ localCutL += (colTotals[j] - 1) * ewgt; // # of cuts in columns/edges localCutN += ewgt; // # of cut columns/edges } } if (colTotals) delete [] colTotals; if (colWeights) delete [] colWeights; comm.SumAll(&localCutN, &cutn, 1); comm.SumAll(&localCutL, &cutl, 1); return 0; }
int checkmap(Epetra_BlockMap & Map, int NumGlobalElements, int NumMyElements, int *MyGlobalElements, int ElementSize, int * ElementSizeList, int NumGlobalPoints, int NumMyPoints, int IndexBase, Epetra_Comm& Comm, bool DistributedGlobal, bool IsOneToOne) { int i, ierr=0, forierr=0;// forierr is used in for loops, then is tested // after for loop completes to see if it is non zero - potentially prevents // thousands of error messages if (ElementSizeList==0) { EPETRA_TEST_ERR(!Map.ConstantElementSize(),ierr); } else EPETRA_TEST_ERR(Map.ConstantElementSize(),ierr); EPETRA_TEST_ERR(DistributedGlobal!=Map.DistributedGlobal(),ierr); EPETRA_TEST_ERR(IsOneToOne!=Map.IsOneToOne(),ierr); int *MyElementSizeList; if (ElementSizeList==0) { EPETRA_TEST_ERR(Map.ElementSize()!=ElementSize,ierr); MyElementSizeList = new int[NumMyElements]; EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr); forierr = 0; for (i=0; i<NumMyElements; i++) forierr += MyElementSizeList[i]!=ElementSize; EPETRA_TEST_ERR(forierr,ierr); EPETRA_TEST_ERR(Map.MaxMyElementSize() != ElementSize,ierr); EPETRA_TEST_ERR(Map.MinMyElementSize() != ElementSize,ierr); } else { MyElementSizeList = new int[NumMyElements]; EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr); int MaxSize = MyElementSizeList[0]; int MinSize = MyElementSizeList[0]; forierr=0; for (i=0; i<NumMyElements; i++) { forierr += MyElementSizeList[i]!=ElementSizeList[i]; if (MyElementSizeList[i] > MaxSize) MaxSize = MyElementSizeList[i]; if (MyElementSizeList[i] < MinSize) MinSize = MyElementSizeList[i]; // Test ElementSize(int LID) method forierr += Map.ElementSize(Map.LID(MyGlobalElements[i])) != ElementSizeList[i]; } EPETRA_TEST_ERR(forierr,ierr); EPETRA_TEST_ERR(MaxSize !=Map.MaxMyElementSize(),ierr); EPETRA_TEST_ERR(MinSize !=Map.MinMyElementSize(),ierr); } const Epetra_Comm & Comm1 = Map.Comm(); EPETRA_TEST_ERR(Comm1.NumProc()!=Comm.NumProc(),ierr); EPETRA_TEST_ERR(Comm1.MyPID()!=Comm.MyPID(),ierr); EPETRA_TEST_ERR(Map.IndexBase()!=IndexBase,ierr); EPETRA_TEST_ERR(!Map.LinearMap() && MyGlobalElements==0,ierr); EPETRA_TEST_ERR(Map.LinearMap() && MyGlobalElements!=0,ierr); EPETRA_TEST_ERR(Map.MaxAllGID()!=NumGlobalElements-1+IndexBase,ierr); EPETRA_TEST_ERR(Map.MaxElementSize()!=ElementSize,ierr); int MaxLID = Map.MaxLID(); EPETRA_TEST_ERR(MaxLID!=NumMyElements-1,ierr); int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase; if (Comm.MyPID()>2) MaxMyGID+=3; if (!DistributedGlobal) MaxMyGID = NumMyElements-1+IndexBase; EPETRA_TEST_ERR(Map.MaxMyGID()!=MaxMyGID,ierr); EPETRA_TEST_ERR(Map.MinAllGID()!=IndexBase,ierr); if (ElementSizeList==0) { EPETRA_TEST_ERR(Map.MinElementSize()!=ElementSize,ierr); } else EPETRA_TEST_ERR(Map.MinElementSize()!=2,ierr); int MinLID = Map.MinLID(); EPETRA_TEST_ERR(MinLID!=0,ierr); int MinMyGID = Comm.MyPID()*NumMyElements+IndexBase; if (Comm.MyPID()>2) MinMyGID+=3; if (!DistributedGlobal) MinMyGID = IndexBase; // Not really needed EPETRA_TEST_ERR(Map.MinMyGID()!=MinMyGID,ierr); int * MyGlobalElements1 = new int[NumMyElements]; EPETRA_TEST_ERR(Map.MyGlobalElements(MyGlobalElements1)!=0,ierr); forierr = 0; if (MyGlobalElements==0) { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements1[i]!=MinMyGID+i; EPETRA_TEST_ERR(forierr,ierr); } else { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements[i]!=MyGlobalElements1[i]; EPETRA_TEST_ERR(forierr,ierr); } EPETRA_TEST_ERR(Map.NumGlobalElements()!=NumGlobalElements,ierr); EPETRA_TEST_ERR(Map.NumGlobalPoints()!=NumGlobalPoints,ierr); EPETRA_TEST_ERR(Map.NumMyElements()!=NumMyElements,ierr); EPETRA_TEST_ERR(Map.NumMyPoints()!=NumMyPoints,ierr); int MaxMyGID2 = Map.GID(Map.LID(MaxMyGID)); EPETRA_TEST_ERR(MaxMyGID2 != MaxMyGID,ierr); int MaxLID2 = Map.LID(Map.GID(MaxLID)); EPETRA_TEST_ERR(MaxLID2 != MaxLID,ierr); EPETRA_TEST_ERR(Map.GID(MaxLID+1) != IndexBase-1,ierr);// MaxLID+1 doesn't exist EPETRA_TEST_ERR(Map.LID(MaxMyGID+1) != -1,ierr);// MaxMyGID+1 doesn't exist or is on a different processor EPETRA_TEST_ERR(!Map.MyGID(MaxMyGID),ierr); EPETRA_TEST_ERR(Map.MyGID(MaxMyGID+1),ierr); EPETRA_TEST_ERR(!Map.MyLID(MaxLID),ierr); EPETRA_TEST_ERR(Map.MyLID(MaxLID+1),ierr); EPETRA_TEST_ERR(!Map.MyGID(Map.GID(MaxLID)),ierr); EPETRA_TEST_ERR(Map.MyGID(Map.GID(MaxLID+1)),ierr); EPETRA_TEST_ERR(!Map.MyLID(Map.LID(MaxMyGID)),ierr); EPETRA_TEST_ERR(Map.MyLID(Map.LID(MaxMyGID+1)),ierr); // Test the FirstPointInElementList methods, begin by testing that they produce identical results int * FirstPointInElementList = new int[NumMyElements+1]; Map.FirstPointInElementList(FirstPointInElementList); int * FirstPointInElementList1 = Map.FirstPointInElementList(); forierr = 0; for (i=0; i<=NumMyElements; i++) forierr += FirstPointInElementList[i]!=FirstPointInElementList1[i]; EPETRA_TEST_ERR(forierr,ierr); // Now make sure values are correct forierr = 0; if (Map.ConstantElementSize()) { for (i=0; i<=NumMyElements; i++) forierr += FirstPointInElementList1[i]!=(i*ElementSize);// NOTE:FirstPointInElement[NumMyElements] is not the first point of an element EPETRA_TEST_ERR(forierr,ierr); } else { int FirstPoint = 0; for (i=0; i<NumMyElements; i++) { forierr += FirstPointInElementList1[i]!=FirstPoint; FirstPoint += ElementSizeList[i]; } EPETRA_TEST_ERR(forierr,ierr); EPETRA_TEST_ERR(FirstPointInElementList[NumMyElements] != NumMyPoints,ierr);// The last entry in the array = the total number of Points on the proc } delete [] FirstPointInElementList; // Declare some variables for the FindLocalElementID test int ElementID, Offset; // Test the PointToElementList methods, begin by testing that they produce identical results int * PointToElementList = new int[NumMyPoints]; Map.PointToElementList(PointToElementList); int * PointToElementList1 = Map.PointToElementList(); forierr = 0; for (i=0; i<NumMyPoints; i++) forierr += PointToElementList1[i] != PointToElementList[i]; EPETRA_TEST_ERR(forierr,ierr); //Now make sure values are correct forierr=0; if (Map.ConstantElementSize()) { for (i=0; i<NumMyElements; i++) for (int j=0; j<ElementSize; j++) { forierr += PointToElementList[i*ElementSize+j] != i; // Test FindLocalElementID method Map.FindLocalElementID(i*ElementSize+j,ElementID,Offset); forierr += ElementID != i || Offset != j; } EPETRA_TEST_ERR(forierr,ierr); } else { int MyPointTot = 0; // Keep track of total number of points in all previously completely checked elements for (i=0; i<NumMyElements; i++) { for (int j=0; j<ElementSizeList[i]; j++) { forierr += PointToElementList[MyPointTot+j] != i; // Test FindLocalElementID method Map.FindLocalElementID(MyPointTot+j,ElementID,Offset); forierr += ElementID != i || Offset != j; } MyPointTot += ElementSizeList[i]; } EPETRA_TEST_ERR(forierr,ierr); } delete [] PointToElementList; // Check RemoteIDList function that includes a parameter for size // Get some GIDs off of each processor to test int TotalNumEle, NumElePerProc, NumProc = Comm.NumProc(); int MinNumEleOnProc; int NumMyEle = Map.NumMyElements(); Comm.MinAll(&NumMyEle,&MinNumEleOnProc,1); if (MinNumEleOnProc > 5) NumElePerProc = 6; else NumElePerProc = MinNumEleOnProc; if (NumElePerProc > 0) { TotalNumEle = NumElePerProc*NumProc; int * MyGIDlist = new int[NumElePerProc]; int * GIDlist = new int[TotalNumEle]; int * PIDlist = new int[TotalNumEle]; int * LIDlist = new int[TotalNumEle]; int * SizeList = new int[TotalNumEle]; for (i=0; i<NumElePerProc; i++) MyGIDlist[i] = MyGlobalElements1[i]; Comm.GatherAll(MyGIDlist,GIDlist,NumElePerProc);// Get a few values from each proc Map.RemoteIDList(TotalNumEle, GIDlist, PIDlist, LIDlist, SizeList); int MyPID= Comm.MyPID(); forierr = 0; for (i=0; i<TotalNumEle; i++) { if (Map.MyGID(GIDlist[i])) { forierr += PIDlist[i] != MyPID; forierr += !Map.MyLID(Map.LID(GIDlist[i])) || Map.LID(GIDlist[i]) != LIDlist[i] || Map.GID(LIDlist[i]) != GIDlist[i]; forierr += SizeList[i] != Map.ElementSize(LIDlist[i]); } else { forierr += PIDlist[i] == MyPID; // If MyGID comes back false, the PID listed should be that of another proc } } EPETRA_TEST_ERR(forierr,ierr); delete [] MyGIDlist; delete [] GIDlist; delete [] PIDlist; delete [] LIDlist; delete [] SizeList; } delete [] MyGlobalElements1; delete [] MyElementSizeList; // Check RemoteIDList function (assumes all maps are linear, even if not stored that way) if (Map.LinearMap()) { int * GIDList = new int[3]; int * PIDList = new int[3]; int * LIDList = new int[3]; int MyPID = Map.Comm().MyPID(); int NumIDs = 0; //GIDList[NumIDs++] = Map.MaxAllGID()+1; // Should return -1 for both PID and LID if (Map.MinMyGID()-1>=Map.MinAllGID()) GIDList[NumIDs++] = Map.MinMyGID()-1; if (Map.MaxMyGID()+1<=Map.MaxAllGID()) GIDList[NumIDs++] = Map.MaxMyGID()+1; Map.RemoteIDList(NumIDs, GIDList, PIDList, LIDList); NumIDs = 0; //EPETRA_TEST_ERR(!(PIDList[NumIDs]==-1),ierr); //EPETRA_TEST_ERR(!(LIDList[NumIDs++]==-1),ierr); if (Map.MinMyGID()-1>=Map.MinAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs++]==MyPID-1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs]==MyPID+1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(LIDList[NumIDs++]==0),ierr); delete [] GIDList; delete [] PIDList; delete [] LIDList; } return (ierr); }
//============================================================================== bool Epetra_BlockMap::SameAs(const Epetra_BlockMap & Map) const { // Quickest test: See if both maps share an inner data class if (this->BlockMapData_ == Map.BlockMapData_) return(true); if(!GlobalIndicesTypeMatch(Map)) return(false); // Next check other global properties that are easy global attributes if (BlockMapData_->MinAllGID_ != Map.MinAllGID64() || BlockMapData_->MaxAllGID_ != Map.MaxAllGID64() || BlockMapData_->NumGlobalElements_ != Map.NumGlobalElements64() || BlockMapData_->IndexBase_ != Map.IndexBase()) return(false); // Last possible global check for constant element sizes if (BlockMapData_->ConstantElementSize_ && BlockMapData_->ElementSize_!=Map.ElementSize()) return(false); // If we get this far, we need to check local properties and then check across // all processors to see if local properties are all true int numMyElements = BlockMapData_->NumMyElements_; int MySameMap = 1; // Assume not needed // First check if number of element is the same in each map if (numMyElements != Map.NumMyElements()) MySameMap = 0; // If numMyElements is the same, check to see that list of GIDs is the same if (MySameMap==1) { if (LinearMap() && Map.LinearMap() ) { // For linear maps, just need to check whether lower bound is the same if (MinMyGID64() != Map.MinMyGID64() ) MySameMap = 0; } else { for (int i = 0; i < numMyElements; i++) { if (GID64(i) != Map.GID64(i)) { MySameMap = 0; break; } } } } // for (int i = 0; i < numMyElements; i++) // if (GID64(i) != Map.GID64(i)) MySameMap = 0; // If GIDs are the same, check to see element sizes are the same if (MySameMap==1 && !BlockMapData_->ConstantElementSize_) { int * sizeList1 = ElementSizeList(); int * sizeList2 = Map.ElementSizeList(); for (int i = 0; i < numMyElements; i++) if (sizeList1[i] != sizeList2[i]) MySameMap=0; } // Now get min of MySameMap across all processors int GlobalSameMap = 0; int err = Comm().MinAll(&MySameMap, &GlobalSameMap, 1); assert(err==0); return(GlobalSameMap==1); }