int checkmap(Epetra_Map & Map, int NumGlobalElements, int NumMyElements, int *MyGlobalElements, int IndexBase, Epetra_Comm& Comm, bool DistributedGlobal) { int i, ierr=0, forierr = 0; EPETRA_TEST_ERR(!Map.ConstantElementSize(),ierr); EPETRA_TEST_ERR(DistributedGlobal!=Map.DistributedGlobal(),ierr); EPETRA_TEST_ERR(Map.ElementSize()!=1,ierr); int *MyElementSizeList = new int[NumMyElements]; EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr); forierr = 0; for (i=0; i<NumMyElements; i++) forierr += MyElementSizeList[i]!=1; EPETRA_TEST_ERR(forierr,ierr); delete [] MyElementSizeList; const Epetra_Comm & Comm1 = Map.Comm(); EPETRA_TEST_ERR(Comm1.NumProc()!=Comm.NumProc(),ierr); EPETRA_TEST_ERR(Comm1.MyPID()!=Comm.MyPID(),ierr); EPETRA_TEST_ERR(Map.IndexBase()!=IndexBase,ierr); EPETRA_TEST_ERR(!Map.LinearMap() && MyGlobalElements==0,ierr); EPETRA_TEST_ERR(Map.LinearMap() && MyGlobalElements!=0,ierr); EPETRA_TEST_ERR(Map.MaxAllGID()!=NumGlobalElements-1+IndexBase,ierr); EPETRA_TEST_ERR(Map.MaxElementSize()!=1,ierr); int MaxLID = Map.MaxLID(); EPETRA_TEST_ERR(MaxLID!=NumMyElements-1,ierr); int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase; if (Comm.MyPID()>2) MaxMyGID+=3; if (!DistributedGlobal) MaxMyGID = NumMyElements-1+IndexBase; EPETRA_TEST_ERR(Map.MaxMyGID()!=MaxMyGID,ierr); EPETRA_TEST_ERR(Map.MinAllGID()!=IndexBase,ierr); EPETRA_TEST_ERR(Map.MinElementSize()!=1,ierr); EPETRA_TEST_ERR(Map.MinLID()!=0,ierr); int MinMyGID = Comm.MyPID()*NumMyElements+IndexBase; if (Comm.MyPID()>2) MinMyGID+=3; if (!DistributedGlobal) MinMyGID = 0; EPETRA_TEST_ERR(Map.MinMyGID()!=MinMyGID,ierr); int * MyGlobalElements1 = new int[NumMyElements]; EPETRA_TEST_ERR(Map.MyGlobalElements(MyGlobalElements1)!=0,ierr); forierr = 0; if (MyGlobalElements==0) { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements1[i]!=MinMyGID+i; EPETRA_TEST_ERR(forierr,ierr); } else { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements[i]!=MyGlobalElements1[i]; EPETRA_TEST_ERR(forierr,ierr); } EPETRA_TEST_ERR(Map.NumGlobalElements()!=NumGlobalElements,ierr); EPETRA_TEST_ERR(Map.NumGlobalPoints()!=NumGlobalElements,ierr); EPETRA_TEST_ERR(Map.NumMyElements()!=NumMyElements,ierr); EPETRA_TEST_ERR(Map.NumMyPoints()!=NumMyElements,ierr); int MaxMyGID2 = Map.GID(Map.LID(MaxMyGID)); EPETRA_TEST_ERR(MaxMyGID2 != MaxMyGID,ierr); int MaxLID2 = Map.LID(Map.GID(MaxLID)); EPETRA_TEST_ERR(MaxLID2 != MaxLID,ierr); EPETRA_TEST_ERR(Map.GID(MaxLID+1) != IndexBase-1,ierr);// MaxLID+1 doesn't exist EPETRA_TEST_ERR(Map.LID(MaxMyGID+1) != -1,ierr);// MaxMyGID+1 doesn't exist or is on a different processor EPETRA_TEST_ERR(!Map.MyGID(MaxMyGID),ierr); EPETRA_TEST_ERR(Map.MyGID(MaxMyGID+1),ierr); EPETRA_TEST_ERR(!Map.MyLID(MaxLID),ierr); EPETRA_TEST_ERR(Map.MyLID(MaxLID+1),ierr); EPETRA_TEST_ERR(!Map.MyGID(Map.GID(MaxLID)),ierr); EPETRA_TEST_ERR(Map.MyGID(Map.GID(MaxLID+1)),ierr); EPETRA_TEST_ERR(!Map.MyLID(Map.LID(MaxMyGID)),ierr); EPETRA_TEST_ERR(Map.MyLID(Map.LID(MaxMyGID+1)),ierr); // Check RemoteIDList function // Get some GIDs off of each processor to test int TotalNumEle, NumElePerProc, NumProc = Comm.NumProc(); int MinNumEleOnProc; int NumMyEle=Map.NumMyElements(); Comm.MinAll(&NumMyEle,&MinNumEleOnProc,1); if (MinNumEleOnProc > 5) NumElePerProc = 6; else NumElePerProc = MinNumEleOnProc; if (NumElePerProc > 0) { TotalNumEle = NumElePerProc*NumProc; int * MyGIDlist = new int[NumElePerProc]; int * GIDlist = new int[TotalNumEle]; int * PIDlist = new int[TotalNumEle]; int * LIDlist = new int[TotalNumEle]; for (i=0; i<NumElePerProc; i++) MyGIDlist[i] = MyGlobalElements1[i]; Comm.GatherAll(MyGIDlist,GIDlist,NumElePerProc);// Get a few values from each proc Map.RemoteIDList(TotalNumEle, GIDlist, PIDlist, LIDlist); int MyPID= Comm.MyPID(); forierr = 0; for (i=0; i<TotalNumEle; i++) { if (Map.MyGID(GIDlist[i])) { forierr += PIDlist[i] != MyPID; forierr += !Map.MyLID(Map.LID(GIDlist[i])) || Map.LID(GIDlist[i]) != LIDlist[i] || Map.GID(LIDlist[i]) != GIDlist[i]; } else { forierr += PIDlist[i] == MyPID; // If MyGID comes back false, the PID listed should be that of another proc } } EPETRA_TEST_ERR(forierr,ierr); delete [] MyGIDlist; delete [] GIDlist; delete [] PIDlist; delete [] LIDlist; } delete [] MyGlobalElements1; // Check RemoteIDList function (assumes all maps are linear, even if not stored that way) if (Map.LinearMap()) { int * GIDList = new int[3]; int * PIDList = new int[3]; int * LIDList = new int[3]; int MyPID = Map.Comm().MyPID(); int NumIDs = 0; //GIDList[NumIDs++] = Map.MaxAllGID()+1; // Should return -1 for both PID and LID if (Map.MinMyGID()-1>=Map.MinAllGID()) GIDList[NumIDs++] = Map.MinMyGID()-1; if (Map.MaxMyGID()+1<=Map.MaxAllGID()) GIDList[NumIDs++] = Map.MaxMyGID()+1; Map.RemoteIDList(NumIDs, GIDList, PIDList, LIDList); NumIDs = 0; //EPETRA_TEST_ERR(!(PIDList[NumIDs]==-1),ierr); //EPETRA_TEST_ERR(!(LIDList[NumIDs++]==-1),ierr); if (Map.MinMyGID()-1>=Map.MinAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs++]==MyPID-1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs]==MyPID+1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(LIDList[NumIDs++]==0),ierr); delete [] GIDList; delete [] PIDList; delete [] LIDList; } return (ierr); }
int TLowCommunicationMakeColMapAndReindex(int N, const int * rowptr, int * colind_LID, const int_type *colind_GID, const Epetra_Map& domainMap, const int * owningPIDs, bool SortGhostsAssociatedWithEachProcessor, std::vector<int>& RemotePIDs, MapType1 & NewColMap) { int i,j; // Sanity checks bool UseLL; if(domainMap.GlobalIndicesLongLong()) UseLL=true; else if(domainMap.GlobalIndicesInt()) UseLL=false; else throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot detect int type."); // Scan all column indices and sort into two groups: // Local: those whose GID matches a GID of the domain map on this processor and // Remote: All others. int numDomainElements = domainMap.NumMyElements(); bool * LocalGIDs = 0; if (numDomainElements>0) LocalGIDs = new bool[numDomainElements]; for (i=0; i<numDomainElements; i++) LocalGIDs[i] = false; // Assume domain GIDs are not local bool DoSizes = !domainMap.ConstantElementSize(); // If not constant element size, then error if(DoSizes) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot handle non-constant sized domainMap."); // In principle it is good to have RemoteGIDs and RemotGIDList be as long as the number of remote GIDs // on this processor, but this would require two passes through the column IDs, so we make it the max of 100 // and the number of block rows. const int numMyBlockRows = N; int hashsize = numMyBlockRows; if (hashsize < 100) hashsize = 100; Epetra_HashTable<int_type> RemoteGIDs(hashsize); std::vector<int_type> RemoteGIDList; RemoteGIDList.reserve(hashsize); std::vector<int> PIDList; PIDList.reserve(hashsize); // Here we start using the *int* colind array. If int_type==int this clobbers the GIDs, if // int_type==long long, then this is the first use of the colind array. // For *local* GID's set colind with with their LID in the domainMap. For *remote* GIDs, // we set colind with (numDomainElements+NumRemoteColGIDs) before the increment of // the remote count. These numberings will be separate because no local LID is greater // than numDomainElements. int NumLocalColGIDs = 0; int NumRemoteColGIDs = 0; for(i = 0; i < numMyBlockRows; i++) { for(j = rowptr[i]; j < rowptr[i+1]; j++) { int_type GID = colind_GID[j]; // Check if GID matches a row GID int LID = domainMap.LID(GID); if(LID != -1) { bool alreadyFound = LocalGIDs[LID]; if (!alreadyFound) { LocalGIDs[LID] = true; // There is a column in the graph associated with this domain map GID NumLocalColGIDs++; } colind_LID[j] = LID; } else { int_type hash_value=RemoteGIDs.Get(GID); if(hash_value == -1) { // This means its a new remote GID int PID = owningPIDs[j]; if(PID==-1) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: Cannot figure out if PID is owned."); colind_LID[j] = numDomainElements + NumRemoteColGIDs; RemoteGIDs.Add(GID, NumRemoteColGIDs); RemoteGIDList.push_back(GID); PIDList.push_back(PID); NumRemoteColGIDs++; } else colind_LID[j] = numDomainElements + hash_value; } } } // Possible short-circuit: If all domain map GIDs are present as column indices, then set ColMap=domainMap and quit if (domainMap.Comm().NumProc()==1) { if (NumRemoteColGIDs!=0) { throw std::runtime_error("Some column IDs are not in domainMap. If matrix is rectangular, you must pass in a domainMap"); // Sanity test: When one processor,there can be no remoteGIDs } if (NumLocalColGIDs==numDomainElements) { if (LocalGIDs!=0) delete [] LocalGIDs; // In this case, we just use the domainMap's indices, which is, not coincidently, what we clobbered colind with up above anyway. // No further reindexing is needed. NewColMap = domainMap; return 0; } } // Now build integer array containing column GIDs // Build back end, containing remote GIDs, first int numMyBlockCols = NumLocalColGIDs + NumRemoteColGIDs; std::vector<int_type> ColIndices; int_type * RemoteColIndices=0; if(numMyBlockCols > 0) { ColIndices.resize(numMyBlockCols); if(NumLocalColGIDs!=numMyBlockCols) RemoteColIndices = &ColIndices[NumLocalColGIDs]; // Points to back end of ColIndices else RemoteColIndices=0; } for(i = 0; i < NumRemoteColGIDs; i++) RemoteColIndices[i] = RemoteGIDList[i]; // Build permute array for *remote* reindexing. std::vector<int> RemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) RemotePermuteIDs[i]=i; // Sort External column indices so that all columns coming from a given remote processor are contiguous int NumListsInt=0; int NumListsLL =0; int * IntSortLists[2]; long long * LLSortLists[2]; int * RemotePermuteIDs_ptr = RemotePermuteIDs.size() ? &RemotePermuteIDs[0] : 0; if(!UseLL) { // int version IntSortLists[0] = (int*) RemoteColIndices; IntSortLists[1] = RemotePermuteIDs_ptr; NumListsInt=2; } else { //LL version LLSortLists[0] = (long long*) RemoteColIndices; IntSortLists[0] = RemotePermuteIDs_ptr; NumListsInt = NumListsLL = 1; } int * PIDList_ptr = PIDList.size() ? &PIDList[0] : 0; Epetra_Util::Sort(true, NumRemoteColGIDs, PIDList_ptr, 0, 0, NumListsInt, IntSortLists,NumListsLL,LLSortLists); // Stash the RemotePIDs PIDList.resize(NumRemoteColGIDs); RemotePIDs = PIDList; if (SortGhostsAssociatedWithEachProcessor) { // Sort external column indices so that columns from a given remote processor are not only contiguous // but also in ascending order. NOTE: I don't know if the number of externals associated // with a given remote processor is known at this point ... so I count them here. // NTS: Only sort the RemoteColIndices this time... int StartCurrent, StartNext; StartCurrent = 0; StartNext = 1; while ( StartNext < NumRemoteColGIDs ) { if (PIDList[StartNext]==PIDList[StartNext-1]) StartNext++; else { IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true,StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]),0,0,1,IntSortLists,0,0); StartCurrent = StartNext; StartNext++; } } IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true, StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]), 0, 0, 1,IntSortLists,0,0); } // Reverse the permutation to get the information we actually care about std::vector<int> ReverseRemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) ReverseRemotePermuteIDs[RemotePermuteIDs[i]]=i; // Build permute array for *local* reindexing. bool use_local_permute=false; std::vector<int> LocalPermuteIDs(numDomainElements); // Now fill front end. Two cases: // (1) If the number of Local column GIDs is the same as the number of Local domain GIDs, we // can simply read the domain GIDs into the front part of ColIndices, otherwise // (2) We step through the GIDs of the domainMap, checking to see if each domain GID is a column GID. // we want to do this to maintain a consistent ordering of GIDs between the columns and the domain. if(NumLocalColGIDs == domainMap.NumMyElements()) { if(NumLocalColGIDs > 0) { domainMap.MyGlobalElements(&ColIndices[0]); // Load Global Indices into first numMyBlockCols elements column GID list } } else { int_type* MyGlobalElements = 0; domainMap.MyGlobalElementsPtr(MyGlobalElements); int* ElementSizeList = 0; if(DoSizes) ElementSizeList = domainMap.ElementSizeList(); int NumLocalAgain = 0; use_local_permute = true; for(i = 0; i < numDomainElements; i++) { if(LocalGIDs[i]) { LocalPermuteIDs[i] = NumLocalAgain; ColIndices[NumLocalAgain++] = MyGlobalElements[i]; } } assert(NumLocalAgain==NumLocalColGIDs); // Sanity test } // Done with this array if (LocalGIDs!=0) delete [] LocalGIDs; // Make Column map with same element sizes as Domain map int_type * ColIndices_ptr = ColIndices.size() ? &ColIndices[0] : 0; MapType2 temp((int_type)(-1), numMyBlockCols, ColIndices_ptr, (int_type)domainMap.IndexBase64(), domainMap.Comm()); NewColMap = temp; // Low-cost reindex of the matrix for(i=0; i<numMyBlockRows; i++){ for(j=rowptr[i]; j<rowptr[i+1]; j++){ int ID=colind_LID[j]; if(ID < numDomainElements){ if(use_local_permute) colind_LID[j] = LocalPermuteIDs[colind_LID[j]]; // In the case where use_local_permute==false, we just copy the DomainMap's ordering, which it so happens // is what we put in colind to begin with. } else colind_LID[j] = NumLocalColGIDs + ReverseRemotePermuteIDs[colind_LID[j]-numDomainElements]; } } return 0; }