static Epetra_MultiVector getComponent( spaceT const& Xh, Epetra_MultiVector const& sol ) { Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) ); Epetra_Map globalMap ( epetraMap( Xh->map() ) ); //DVLOG(2) << "Component map: " << componentMap << "\n"; Epetra_MultiVector component( componentMap, 1 ); int Length = component.MyLength(); int shift = Xh->nDofStart( index ); for ( int i=0; i < Length; i++ ) { int compGlobalID = componentMap.GID( i ); if ( compGlobalID >= 0 ) { int compLocalID = componentMap.LID( compGlobalID ); int localID = globalMap.LID( compGlobalID+shift ); // int globalID = globalMap.GID(localID); DVLOG(2) << "[MyBackend] Copy entry sol[" << localID << "]=" << sol[0][localID] << " to component[" << compLocalID << "]\n"; component[0][compLocalID] = sol[0][localID]; DVLOG(2) << component[0][compLocalID] << "\n"; } } return component; }
static void UpdateComponent( spaceT const& Xh, Epetra_MultiVector& sol, Epetra_MultiVector& comp ) { Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) ); Epetra_Map globalMap ( epetraMap( Xh->map() ) ); int shift = Xh->nDofStart( index ); int Length = comp.MyLength(); for ( int i=0; i < Length; i++ ) { int compGlobalID = componentMap.GID( i ); if ( compGlobalID >= 0 ) { int compLocalID = componentMap.LID( compGlobalID ); int localID = globalMap.LID( compGlobalID+shift ); // int globalID = globalMap.GID(localID); DVLOG(2) << "Copy entry component[" << compLocalID << "] to sol[" << localID << "]=" << sol[0][localID] << "]\n"; sol[0][localID] = comp[0][compLocalID] ; DVLOG(2) << comp[0][compLocalID] << "\n"; } } }
size_t AbstractConcreteMatrixAdapter< Epetra_RowMatrix, DerivedMat>::getGlobalRowNNZ_impl(global_ordinal_t row) const { // check whether row is local, then transform to local index Epetra_Map rowmap = this->mat_->RowMatrixRowMap(); int gid = Teuchos::as<int>(row); TEUCHOS_TEST_FOR_EXCEPTION( !rowmap.MyGID(gid), std::invalid_argument, "The specified global row id does not belong to me" ); int lid = rowmap.LID(gid); int nnz = 0; this->mat_->NumMyRowEntries(lid, nnz); return nnz; }
Teuchos::RCP<Epetra_CrsMatrix> Epetra_Operator_to_Epetra_Matrix::constructInverseMatrix(const Epetra_Operator &op, const Epetra_Map &map) { int numEntriesPerRow = 0; Teuchos::RCP<Epetra_FECrsMatrix> matrix = Teuchos::rcp(new Epetra_FECrsMatrix(::Copy, map, numEntriesPerRow)); int numRows = map.NumGlobalElements(); Epetra_Vector X(map); Epetra_Vector Y(map); double tol = 1e-15; // values below this will be considered 0 for (int rowIndex=0; rowIndex<numRows; rowIndex++) { int lid = map.LID(rowIndex); if (lid != -1) { X[lid] = 1.0; } op.ApplyInverse(X, Y); if (lid != -1) { X[lid] = 0.0; } std::vector<double> values; std::vector<int> indices; for (int i=0; i<map.NumMyElements(); i++) { if (abs(Y[i]) > tol) { values.push_back(Y[i]); indices.push_back(map.GID(i)); } } matrix->InsertGlobalValues(rowIndex, values.size(), &values[0], &indices[0]); } matrix->GlobalAssemble(); return matrix; }
// Serial only. Very straightforward: one eval per column. Could speed up a lot // using graph coloring. double testJacobian (panzer::AssemblyEngine_TemplateManager<panzer::Traits>& ae_tm, const Teuchos::RCP<panzer::LinearObjFactory<panzer::Traits> >& linObjFactory, const Teuchos::RCP<const Epetra_Vector> x0 = Teuchos::null) { using Teuchos::RCP; using Teuchos::rcp; const RCP<panzer::LinearObjContainer> ghost_con = linObjFactory->buildGhostedLinearObjContainer(); const RCP<panzer::LinearObjContainer> con = linObjFactory->buildLinearObjContainer(); linObjFactory->initializeGhostedContainer(panzer::LinearObjContainer::X | panzer::LinearObjContainer::F | panzer::LinearObjContainer::Mat, *ghost_con); const RCP<panzer::EpetraLinearObjContainer> ep_con = Teuchos::rcp_dynamic_cast<panzer::EpetraLinearObjContainer>(con); SparseTriple t; { // Finite-difference Jacobian. Should use graph coloring to greatly reduce // number of evaluations. But brute force for now. RCP<Epetra_Vector> x, f0; for (int i = -1; ; ++i) { linObjFactory->initializeContainer(panzer::LinearObjContainer::X | panzer::LinearObjContainer::F | panzer::LinearObjContainer::Mat, *con); ghost_con->initialize(); con->initialize(); const Epetra_Map& row_map = ep_con->get_A()->RowMap(), col_map = ep_con->get_A()->ColMap(); if (i == -1) { if (Teuchos::nonnull(x0)) x = rcp(new Epetra_Vector(*x0)); else x = rcp(new Epetra_Vector(ep_con->get_x()->Map())); t.m = t.n = x->GlobalLength(); } // For a linear problem, could make delta 1 to remove cancellation // error. But I want to look at a nonlinear Robin condition, so do a true // finite difference. const double delta = 1e-6; const bool i_mine = row_map.MyGID(i); double x_prev = 0; int i_lid = 0; if (i_mine && i >= 0) { i_lid = col_map.LID(i); x_prev = (*x)[i_lid]; (*x)[i_lid] += delta; } ep_con->set_x(x); panzer::AssemblyEngineInArgs input(ghost_con, ep_con); input.alpha = 0; input.beta = 1; ae_tm.getAsObject<panzer::Traits::Residual>()->evaluate(input); if (i == -1) f0 = ep_con->get_f(); else { const Epetra_Vector& f = *ep_con->get_f(); if (i_mine) (*x)[i_lid] = x_prev; for (int k = 0; k < f.MyLength(); ++k) { const double d = f[k] - (*f0)[k]; if (d == 0) continue; t.i.push_back(row_map.GID(k)); t.j.push_back(i); t.v.push_back(d/delta); } } if (i + 1 == t.m) break; } } { // AD Jacobian. linObjFactory->initializeContainer(panzer::LinearObjContainer::X | panzer::LinearObjContainer::F | panzer::LinearObjContainer::Mat, *con); ghost_con->initialize(); con->initialize(); if (Teuchos::nonnull(x0)) ep_con->set_x(rcp(new Epetra_Vector(*x0))); else ep_con->get_x()->PutScalar(0); panzer::AssemblyEngineInArgs input(ghost_con, ep_con); input.alpha = 0; input.beta = 1; ae_tm.getAsObject<panzer::Traits::Jacobian>()->evaluate(input); EpetraExt::RowMatrixToMatrixMarketFile("A_ad.mm", *ep_con->get_A()); } RCP<Epetra_CrsMatrix> A_fd; { const Epetra_Map& row_map = ep_con->get_A()->RowMap(), col_map = ep_con->get_A()->ColMap(); A_fd = rcp(new Epetra_CrsMatrix(Copy, row_map, 40)); for (int i = 0; i < static_cast<int>(t.v.size()); ++i) A_fd->InsertGlobalValues(t.i[i], 1, &t.v[i], &t.j[i]); A_fd->FillComplete(); } EpetraExt::RowMatrixToMatrixMarketFile("A_fd.mm", *A_fd); { // Check error. const double A_fd_inf_norm = A_fd->NormInf(); RCP<Epetra_CrsMatrix> D = rcp(new Epetra_CrsMatrix(Copy, ep_con->get_A()->Graph())); D->FillComplete(); Epetra_CrsMatrix* D_ptr = D.get(); EpetraExt::MatrixMatrix::Add(*ep_con->get_A(), false, -1, *A_fd, false, 1, D_ptr); const double D_inf_norm = D->NormInf(); return D_inf_norm / A_fd_inf_norm; } }
int checkmap(Epetra_Map & Map, int NumGlobalElements, int NumMyElements, int *MyGlobalElements, int IndexBase, Epetra_Comm& Comm, bool DistributedGlobal) { int i, ierr=0, forierr = 0; EPETRA_TEST_ERR(!Map.ConstantElementSize(),ierr); EPETRA_TEST_ERR(DistributedGlobal!=Map.DistributedGlobal(),ierr); EPETRA_TEST_ERR(Map.ElementSize()!=1,ierr); int *MyElementSizeList = new int[NumMyElements]; EPETRA_TEST_ERR(Map.ElementSizeList(MyElementSizeList)!=0,ierr); forierr = 0; for (i=0; i<NumMyElements; i++) forierr += MyElementSizeList[i]!=1; EPETRA_TEST_ERR(forierr,ierr); delete [] MyElementSizeList; const Epetra_Comm & Comm1 = Map.Comm(); EPETRA_TEST_ERR(Comm1.NumProc()!=Comm.NumProc(),ierr); EPETRA_TEST_ERR(Comm1.MyPID()!=Comm.MyPID(),ierr); EPETRA_TEST_ERR(Map.IndexBase()!=IndexBase,ierr); EPETRA_TEST_ERR(!Map.LinearMap() && MyGlobalElements==0,ierr); EPETRA_TEST_ERR(Map.LinearMap() && MyGlobalElements!=0,ierr); EPETRA_TEST_ERR(Map.MaxAllGID()!=NumGlobalElements-1+IndexBase,ierr); EPETRA_TEST_ERR(Map.MaxElementSize()!=1,ierr); int MaxLID = Map.MaxLID(); EPETRA_TEST_ERR(MaxLID!=NumMyElements-1,ierr); int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase; if (Comm.MyPID()>2) MaxMyGID+=3; if (!DistributedGlobal) MaxMyGID = NumMyElements-1+IndexBase; EPETRA_TEST_ERR(Map.MaxMyGID()!=MaxMyGID,ierr); EPETRA_TEST_ERR(Map.MinAllGID()!=IndexBase,ierr); EPETRA_TEST_ERR(Map.MinElementSize()!=1,ierr); EPETRA_TEST_ERR(Map.MinLID()!=0,ierr); int MinMyGID = Comm.MyPID()*NumMyElements+IndexBase; if (Comm.MyPID()>2) MinMyGID+=3; if (!DistributedGlobal) MinMyGID = 0; EPETRA_TEST_ERR(Map.MinMyGID()!=MinMyGID,ierr); int * MyGlobalElements1 = new int[NumMyElements]; EPETRA_TEST_ERR(Map.MyGlobalElements(MyGlobalElements1)!=0,ierr); forierr = 0; if (MyGlobalElements==0) { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements1[i]!=MinMyGID+i; EPETRA_TEST_ERR(forierr,ierr); } else { for (i=0; i<NumMyElements; i++) forierr += MyGlobalElements[i]!=MyGlobalElements1[i]; EPETRA_TEST_ERR(forierr,ierr); } EPETRA_TEST_ERR(Map.NumGlobalElements()!=NumGlobalElements,ierr); EPETRA_TEST_ERR(Map.NumGlobalPoints()!=NumGlobalElements,ierr); EPETRA_TEST_ERR(Map.NumMyElements()!=NumMyElements,ierr); EPETRA_TEST_ERR(Map.NumMyPoints()!=NumMyElements,ierr); int MaxMyGID2 = Map.GID(Map.LID(MaxMyGID)); EPETRA_TEST_ERR(MaxMyGID2 != MaxMyGID,ierr); int MaxLID2 = Map.LID(Map.GID(MaxLID)); EPETRA_TEST_ERR(MaxLID2 != MaxLID,ierr); EPETRA_TEST_ERR(Map.GID(MaxLID+1) != IndexBase-1,ierr);// MaxLID+1 doesn't exist EPETRA_TEST_ERR(Map.LID(MaxMyGID+1) != -1,ierr);// MaxMyGID+1 doesn't exist or is on a different processor EPETRA_TEST_ERR(!Map.MyGID(MaxMyGID),ierr); EPETRA_TEST_ERR(Map.MyGID(MaxMyGID+1),ierr); EPETRA_TEST_ERR(!Map.MyLID(MaxLID),ierr); EPETRA_TEST_ERR(Map.MyLID(MaxLID+1),ierr); EPETRA_TEST_ERR(!Map.MyGID(Map.GID(MaxLID)),ierr); EPETRA_TEST_ERR(Map.MyGID(Map.GID(MaxLID+1)),ierr); EPETRA_TEST_ERR(!Map.MyLID(Map.LID(MaxMyGID)),ierr); EPETRA_TEST_ERR(Map.MyLID(Map.LID(MaxMyGID+1)),ierr); // Check RemoteIDList function // Get some GIDs off of each processor to test int TotalNumEle, NumElePerProc, NumProc = Comm.NumProc(); int MinNumEleOnProc; int NumMyEle=Map.NumMyElements(); Comm.MinAll(&NumMyEle,&MinNumEleOnProc,1); if (MinNumEleOnProc > 5) NumElePerProc = 6; else NumElePerProc = MinNumEleOnProc; if (NumElePerProc > 0) { TotalNumEle = NumElePerProc*NumProc; int * MyGIDlist = new int[NumElePerProc]; int * GIDlist = new int[TotalNumEle]; int * PIDlist = new int[TotalNumEle]; int * LIDlist = new int[TotalNumEle]; for (i=0; i<NumElePerProc; i++) MyGIDlist[i] = MyGlobalElements1[i]; Comm.GatherAll(MyGIDlist,GIDlist,NumElePerProc);// Get a few values from each proc Map.RemoteIDList(TotalNumEle, GIDlist, PIDlist, LIDlist); int MyPID= Comm.MyPID(); forierr = 0; for (i=0; i<TotalNumEle; i++) { if (Map.MyGID(GIDlist[i])) { forierr += PIDlist[i] != MyPID; forierr += !Map.MyLID(Map.LID(GIDlist[i])) || Map.LID(GIDlist[i]) != LIDlist[i] || Map.GID(LIDlist[i]) != GIDlist[i]; } else { forierr += PIDlist[i] == MyPID; // If MyGID comes back false, the PID listed should be that of another proc } } EPETRA_TEST_ERR(forierr,ierr); delete [] MyGIDlist; delete [] GIDlist; delete [] PIDlist; delete [] LIDlist; } delete [] MyGlobalElements1; // Check RemoteIDList function (assumes all maps are linear, even if not stored that way) if (Map.LinearMap()) { int * GIDList = new int[3]; int * PIDList = new int[3]; int * LIDList = new int[3]; int MyPID = Map.Comm().MyPID(); int NumIDs = 0; //GIDList[NumIDs++] = Map.MaxAllGID()+1; // Should return -1 for both PID and LID if (Map.MinMyGID()-1>=Map.MinAllGID()) GIDList[NumIDs++] = Map.MinMyGID()-1; if (Map.MaxMyGID()+1<=Map.MaxAllGID()) GIDList[NumIDs++] = Map.MaxMyGID()+1; Map.RemoteIDList(NumIDs, GIDList, PIDList, LIDList); NumIDs = 0; //EPETRA_TEST_ERR(!(PIDList[NumIDs]==-1),ierr); //EPETRA_TEST_ERR(!(LIDList[NumIDs++]==-1),ierr); if (Map.MinMyGID()-1>=Map.MinAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs++]==MyPID-1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(PIDList[NumIDs]==MyPID+1),ierr); if (Map.MaxMyGID()+1<=Map.MaxAllGID()) EPETRA_TEST_ERR(!(LIDList[NumIDs++]==0),ierr); delete [] GIDList; delete [] PIDList; delete [] LIDList; } return (ierr); }
int TLowCommunicationMakeColMapAndReindex(int N, const int * rowptr, int * colind_LID, const int_type *colind_GID, const Epetra_Map& domainMap, const int * owningPIDs, bool SortGhostsAssociatedWithEachProcessor, std::vector<int>& RemotePIDs, MapType1 & NewColMap) { int i,j; // Sanity checks bool UseLL; if(domainMap.GlobalIndicesLongLong()) UseLL=true; else if(domainMap.GlobalIndicesInt()) UseLL=false; else throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot detect int type."); // Scan all column indices and sort into two groups: // Local: those whose GID matches a GID of the domain map on this processor and // Remote: All others. int numDomainElements = domainMap.NumMyElements(); bool * LocalGIDs = 0; if (numDomainElements>0) LocalGIDs = new bool[numDomainElements]; for (i=0; i<numDomainElements; i++) LocalGIDs[i] = false; // Assume domain GIDs are not local bool DoSizes = !domainMap.ConstantElementSize(); // If not constant element size, then error if(DoSizes) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot handle non-constant sized domainMap."); // In principle it is good to have RemoteGIDs and RemotGIDList be as long as the number of remote GIDs // on this processor, but this would require two passes through the column IDs, so we make it the max of 100 // and the number of block rows. const int numMyBlockRows = N; int hashsize = numMyBlockRows; if (hashsize < 100) hashsize = 100; Epetra_HashTable<int_type> RemoteGIDs(hashsize); std::vector<int_type> RemoteGIDList; RemoteGIDList.reserve(hashsize); std::vector<int> PIDList; PIDList.reserve(hashsize); // Here we start using the *int* colind array. If int_type==int this clobbers the GIDs, if // int_type==long long, then this is the first use of the colind array. // For *local* GID's set colind with with their LID in the domainMap. For *remote* GIDs, // we set colind with (numDomainElements+NumRemoteColGIDs) before the increment of // the remote count. These numberings will be separate because no local LID is greater // than numDomainElements. int NumLocalColGIDs = 0; int NumRemoteColGIDs = 0; for(i = 0; i < numMyBlockRows; i++) { for(j = rowptr[i]; j < rowptr[i+1]; j++) { int_type GID = colind_GID[j]; // Check if GID matches a row GID int LID = domainMap.LID(GID); if(LID != -1) { bool alreadyFound = LocalGIDs[LID]; if (!alreadyFound) { LocalGIDs[LID] = true; // There is a column in the graph associated with this domain map GID NumLocalColGIDs++; } colind_LID[j] = LID; } else { int_type hash_value=RemoteGIDs.Get(GID); if(hash_value == -1) { // This means its a new remote GID int PID = owningPIDs[j]; if(PID==-1) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: Cannot figure out if PID is owned."); colind_LID[j] = numDomainElements + NumRemoteColGIDs; RemoteGIDs.Add(GID, NumRemoteColGIDs); RemoteGIDList.push_back(GID); PIDList.push_back(PID); NumRemoteColGIDs++; } else colind_LID[j] = numDomainElements + hash_value; } } } // Possible short-circuit: If all domain map GIDs are present as column indices, then set ColMap=domainMap and quit if (domainMap.Comm().NumProc()==1) { if (NumRemoteColGIDs!=0) { throw std::runtime_error("Some column IDs are not in domainMap. If matrix is rectangular, you must pass in a domainMap"); // Sanity test: When one processor,there can be no remoteGIDs } if (NumLocalColGIDs==numDomainElements) { if (LocalGIDs!=0) delete [] LocalGIDs; // In this case, we just use the domainMap's indices, which is, not coincidently, what we clobbered colind with up above anyway. // No further reindexing is needed. NewColMap = domainMap; return 0; } } // Now build integer array containing column GIDs // Build back end, containing remote GIDs, first int numMyBlockCols = NumLocalColGIDs + NumRemoteColGIDs; std::vector<int_type> ColIndices; int_type * RemoteColIndices=0; if(numMyBlockCols > 0) { ColIndices.resize(numMyBlockCols); if(NumLocalColGIDs!=numMyBlockCols) RemoteColIndices = &ColIndices[NumLocalColGIDs]; // Points to back end of ColIndices else RemoteColIndices=0; } for(i = 0; i < NumRemoteColGIDs; i++) RemoteColIndices[i] = RemoteGIDList[i]; // Build permute array for *remote* reindexing. std::vector<int> RemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) RemotePermuteIDs[i]=i; // Sort External column indices so that all columns coming from a given remote processor are contiguous int NumListsInt=0; int NumListsLL =0; int * IntSortLists[2]; long long * LLSortLists[2]; int * RemotePermuteIDs_ptr = RemotePermuteIDs.size() ? &RemotePermuteIDs[0] : 0; if(!UseLL) { // int version IntSortLists[0] = (int*) RemoteColIndices; IntSortLists[1] = RemotePermuteIDs_ptr; NumListsInt=2; } else { //LL version LLSortLists[0] = (long long*) RemoteColIndices; IntSortLists[0] = RemotePermuteIDs_ptr; NumListsInt = NumListsLL = 1; } int * PIDList_ptr = PIDList.size() ? &PIDList[0] : 0; Epetra_Util::Sort(true, NumRemoteColGIDs, PIDList_ptr, 0, 0, NumListsInt, IntSortLists,NumListsLL,LLSortLists); // Stash the RemotePIDs PIDList.resize(NumRemoteColGIDs); RemotePIDs = PIDList; if (SortGhostsAssociatedWithEachProcessor) { // Sort external column indices so that columns from a given remote processor are not only contiguous // but also in ascending order. NOTE: I don't know if the number of externals associated // with a given remote processor is known at this point ... so I count them here. // NTS: Only sort the RemoteColIndices this time... int StartCurrent, StartNext; StartCurrent = 0; StartNext = 1; while ( StartNext < NumRemoteColGIDs ) { if (PIDList[StartNext]==PIDList[StartNext-1]) StartNext++; else { IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true,StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]),0,0,1,IntSortLists,0,0); StartCurrent = StartNext; StartNext++; } } IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true, StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]), 0, 0, 1,IntSortLists,0,0); } // Reverse the permutation to get the information we actually care about std::vector<int> ReverseRemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) ReverseRemotePermuteIDs[RemotePermuteIDs[i]]=i; // Build permute array for *local* reindexing. bool use_local_permute=false; std::vector<int> LocalPermuteIDs(numDomainElements); // Now fill front end. Two cases: // (1) If the number of Local column GIDs is the same as the number of Local domain GIDs, we // can simply read the domain GIDs into the front part of ColIndices, otherwise // (2) We step through the GIDs of the domainMap, checking to see if each domain GID is a column GID. // we want to do this to maintain a consistent ordering of GIDs between the columns and the domain. if(NumLocalColGIDs == domainMap.NumMyElements()) { if(NumLocalColGIDs > 0) { domainMap.MyGlobalElements(&ColIndices[0]); // Load Global Indices into first numMyBlockCols elements column GID list } } else { int_type* MyGlobalElements = 0; domainMap.MyGlobalElementsPtr(MyGlobalElements); int* ElementSizeList = 0; if(DoSizes) ElementSizeList = domainMap.ElementSizeList(); int NumLocalAgain = 0; use_local_permute = true; for(i = 0; i < numDomainElements; i++) { if(LocalGIDs[i]) { LocalPermuteIDs[i] = NumLocalAgain; ColIndices[NumLocalAgain++] = MyGlobalElements[i]; } } assert(NumLocalAgain==NumLocalColGIDs); // Sanity test } // Done with this array if (LocalGIDs!=0) delete [] LocalGIDs; // Make Column map with same element sizes as Domain map int_type * ColIndices_ptr = ColIndices.size() ? &ColIndices[0] : 0; MapType2 temp((int_type)(-1), numMyBlockCols, ColIndices_ptr, (int_type)domainMap.IndexBase64(), domainMap.Comm()); NewColMap = temp; // Low-cost reindex of the matrix for(i=0; i<numMyBlockRows; i++){ for(j=rowptr[i]; j<rowptr[i+1]; j++){ int ID=colind_LID[j]; if(ID < numDomainElements){ if(use_local_permute) colind_LID[j] = LocalPermuteIDs[colind_LID[j]]; // In the case where use_local_permute==false, we just copy the DomainMap's ordering, which it so happens // is what we put in colind to begin with. } else colind_LID[j] = NumLocalColGIDs + ReverseRemotePermuteIDs[colind_LID[j]-numDomainElements]; } } return 0; }