void JacobiSmoothFG( VT &sol, const MT &A, const VT &def ) // changes only the fine unknowns // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; const FAMGSparseVector *svsol = sol.GetSparseVectorPtr(); const FAMGSparseVector *svdef = def.GetSparseVectorPtr(); const FAMGSparseBlock *sb = A.GetDiagSparseBlockPtr(); double *solptr, *defptr, *matptr; short nr = sb->Get_nr(); if(nr != sb->Get_nc()) assert(0); if(nr != svsol->Get_n()) assert(0); if(nr != svdef->Get_n()) assert(0); // todo: implement for more general vectors for(short i = 1; i < nr; i++) { if(svsol->Get_comp(i) - svsol->Get_comp(i-1) != 1) assert(0); if(svdef->Get_comp(i) - svdef->Get_comp(i-1) != 1) assert(0); } short sol_off = svsol->Get_comp(0); short def_off = svdef->Get_comp(0); double *decomp = new double[nr*nr]; short *pivotmap = new short[nr]; while(viter(ve)) { if( sol.IsFG(ve) ) { solptr = sol.GetValuePtr(ve)+sol_off; defptr = def.GetValuePtr(ve)+def_off; matptr = A.GetDiagValuePtr(ve); SparseBlockMCopyDense(decomp,sb,matptr); if(LR_Decomp(nr,decomp,pivotmap)) assert(0); if(LR_Solve(nr,decomp,pivotmap,solptr,defptr)) assert(0); } #ifdef USE_UG_DS else { // set coarse components to 0 SparseBlockVSet(svsol,sol.GetValuePtr(ve),0.0); } #endif } delete decomp; delete pivotmap; return; }
// split this M-tree into a list of trees having height level, which is used in the "splitting" phase of the BulkLoad algorithm // nCreated is the number of created subtrees, // level is the split level for the tree, // children is the list of the parents of each subtree, // name is the root for the subtrees names // the return value is the list of splitted subtrees's names GiSTlist<char *> * MT::SplitTree (int *nCreated, int level, GiSTlist<MTentry *> *parentEntries, const char *name) { GiSTlist<MTnode *> *oldList = new GiSTlist<MTnode *>; // upper level nodes MTnode *node = new MTnode; // this is because the first operation on node is a delete GiSTpath path; path.MakeRoot (); oldList->Append((MTnode *) ReadNode(path)); // insert the root do { // build the roots list GiSTlist<MTnode *> *newList = new GiSTlist<MTnode *>; // lower level nodes while (!oldList->IsEmpty()) { delete node; // delete the old node created by ReadNode node = oldList->RemoveFront(); // retrieve next node to be examined path = node->Path(); for (int i=0; i<node->NumEntries(); i++) { // append all its children to the new list path.MakeChild ((*node)[i].Ptr()->Ptr()); newList->Append((MTnode *)ReadNode(path)); path.MakeParent (); } } delete oldList; oldList = newList; } while (node->Level() > level); // stop if we're at the split level delete node; GiSTlist<char *> *newTreeNames = new GiSTlist<char *>; // this is the results list while (!oldList->IsEmpty()) { // now append each sub-tree to its root char newName[50]; sprintf (newName, "%s.%i", name, ++(*nCreated)); unlink (newName); // if this M-tree already exists, delete it MT *newTree = new MT; newTree->Create(newName); // create a new M-tree path.MakeRoot (); MTnode *rootNode = (MTnode *) newTree->ReadNode(path); // read the root of the new tree node = oldList->RemoveFront(); newTree->Append(rootNode, (MTnode *)node->Copy()); // append the current node to the root of new tree parentEntries->Append(node->ParentEntry()); // insert the original parent entry into the list newTreeNames->Append(strdup(newName)); // insert the new M-tree name into the list delete node; delete rootNode; delete newTree; } delete oldList; return newTreeNames; }
void VecMinusMatVec( VT &d, const VT &f, const MT &M, const VT &u ) { typename VT::Iterator viter(d); typename VT::VectorEntry row; typename MT::MatrixEntry col; double *dptr, *fptr, *uptr, *mptr; const FAMGSparseVector *svu = u.GetSparseVectorPtr(); const FAMGSparseVector *svf = f.GetSparseVectorPtr(); const FAMGSparseVector *svd = d.GetSparseVectorPtr(); const FAMGSparseBlock *sb = M.GetSparseBlockPtr(); const FAMGSparseBlock *sbd = M.GetDiagSparseBlockPtr(); FAMGSparseVector svsum_d, svsum_o; svsum_d.Product(sbd,svu); svsum_o.Product(sb,svu); double *sum_d = new double[svsum_d.Get_maxcomp()+1]; double *sum_o = new double[svsum_o.Get_maxcomp()+1]; while(viter(row)) { typename MT::Iterator miter(M,row); dptr = d.GetValuePtr(row); fptr = f.GetValuePtr(row); // diagonal miter(col); uptr = u.GetValuePtr(col.dest()); mptr = M.GetValuePtr(col); SparseBlockVSet(&svsum_d,sum_d,0.0); SparseBlockVSet(&svsum_o,sum_o,0.0); SparseBlockMVAddProduct(&svsum_d,sbd,svu,sum_d,mptr,uptr,1.0); while(miter(col)) { uptr = u.GetValuePtr(col.dest()); mptr = M.GetValuePtr(col); SparseBlockMVAddProduct(&svsum_o,sb,svu,sum_o,mptr,uptr,1.0); } SparseBlockVSub(svd,svf,&svsum_o,dptr,fptr,sum_o); SparseBlockVSub(svd,svd,&svsum_d,dptr,dptr,sum_d); } delete sum_d; delete sum_o; }
// append the subtree rooted at from to the node to, which is used in the "append" phase of the BulkLoad algorithm void MT::Append (MTnode *to, MTnode *from) { GiSTlist<MTnode *> *oldList = new GiSTlist<MTnode *>; // upper level nodes to append oldList->Append(from); GiSTlist<GiSTpath> pathList; pathList.Append (to->Path()); MTnode *node = new MTnode, *newNode = NULL; MT *fromTree = (MT *) from->Tree(); do { GiSTlist<MTnode *> *newList = new GiSTlist<MTnode *>; // lower level nodes to append while (!oldList->IsEmpty()) { delete node; node = oldList->RemoveFront(); GiSTpath path = pathList.RemoveFront (); newNode = (MTnode *) ReadNode (path); // node to be appended for (int i=0; i<node->NumEntries(); i++) { MTentry *entry = (MTentry *) (*node)[i].Ptr()->Copy(); if (node->Level() > 0) { // if node isn't a leaf, we've to allocate its children GiSTpath nodePath = node->Path(); nodePath.MakeChild (entry->Ptr()); newList->Append((MTnode *) fromTree->ReadNode(nodePath)); entry->SetPtr(Store()->Allocate()); // allocate its child in the inserted tree path.MakeChild (entry->Ptr()); MTnode *childNode = (MTnode *) CreateNode (); childNode->Path() = path; childNode->SetTree(this); WriteNode (childNode); // write the empty node delete childNode; pathList.Append (path); path.MakeParent (); } newNode->Insert(*entry); delete entry; } newNode->SetLevel(node->Level()); WriteNode (newNode); // write the node delete newNode; } delete oldList; oldList = newList; } while (node->Level() > 0); // until we reach the leaves' level delete node; delete oldList; }
void SGSSmoother( VT &sol, const MT &M, VT &def ) // backward Gauss-Seidel // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(def); typename VT::RevIterator vReviter(def); typename VT::VectorEntry row, col; typename MT::MatrixEntry me; register double sum, diag; int row_index; /* symmetric Gauss-Seidel */ while(viter(row)) { typename MT::Iterator miter(M,row); row_index = row.GetIndex(); sum = def[row]; miter(me); diag = M[me]; while(miter(me)) { col = me.dest(); if( col.GetIndex() < row_index ) sum -= M[me] * def[col]; } def[row] = sum / diag; } viter.reset(); while(viter(row)) def[row] *= M.DiagValue(row); while(vReviter(row)) { typename MT::Iterator miter(M,row); row_index = row.GetIndex(); sum = def[row]; miter(me); diag = M[me]; while(miter(me)) { col = me.dest(); if( col.GetIndex() > row_index ) sum -= M[me] * def[col]; } def[row] = sum / diag; } sol += def; // update solution return; }
void JacobiSmoother( VT &sol, const MT &M, const VT &def ) // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; while(viter(ve)) sol[ve] += def[ve] / M.DiagValue(ve); return; }
void JacobiSmoothFG( VT &sol, const MT &M, const VT &def ) // changes only the fine unknowns // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; #ifdef USE_UG_DS while(viter(ve)) if( sol.IsFG(ve) ) sol[ve] = def[ve] / M.DiagValue(ve); else sol[ve] = 0; // init other components #else while(viter(ve)) if( sol.IsFG(ve) ) sol[ve] += def[ve] / M.DiagValue(ve); #endif return; }
void dampedJacobiSmoother( VT &sol, const MT &M, const VT &def ) // result in sol_vec; def_vec: correct defect before call, after call destroyed { static const double omega = 2.0/3.0; typename VT::Iterator viter(sol); typename VT::VectorEntry ve; while(viter(ve)) sol[ve] += omega * def[ve] / M.DiagValue(ve); return; }
void JacobiSmoothFGSimple( VT &sol, const MT &D, const VT &def ) // changes only the fine unknowns // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; const FAMGSparseVector *svsol = sol.GetSparseVectorPtr(); const FAMGSparseVector *svdef = def.GetSparseVectorPtr(); const FAMGSparseBlock *sb = D.GetDiagSparseBlockPtr(); double *solptr, *defptr, *matptr; while(viter(ve)) { if( sol.IsFG(ve) ) { solptr = sol.GetValuePtr(ve); defptr = def.GetValuePtr(ve); matptr = D.GetDiagValuePtr(ve); SparseBlockMVAddProduct(svsol,sb,svdef,solptr,matptr,defptr,1.0); } } return; }
int ConstructGalerkinMatrix( MT &Mcg, const FAMGGrid &fg ) // this matrix lives on the coarse grid // calculates Mcg := R * Mfg * P and with indices: // Mcg_(i,j) := \sum_{s,t} R_(i,s) * Mfg_(s,t) * P_(t,j) { typedef typename MT::Vector VT; const FAMGTransfer &transfer = *fg.GetTransfer(); const typename MT::GridVector& fg_gridvec = (typename MT::GridVector&)fg.GetGridVector(); const MT& Mfg = (MT&)*fg.GetConsMatrix(); // consistent matrix is essential here! const MT& Dfg = (MT&)*fg.GetDiagMatrix(); const VT &tvA = *fg.GetVector(FAMGTVA); const VT &tvB = *fg.GetVector(FAMGTVB); typename MT::MatrixEntry mij, mis; typename VT::VectorEntry i_fg, i_cg, j_fg, j_cg, s_fg, s_cg, t_cg; FAMGTransferEntry *pjs, *pij, *pst; typename VT::Iterator viter(fg_gridvec); #ifdef ModelP abort();// check the consistent mode of ALL occuring matrices!!! and remove this line then #endif // cast because GetSparseBlockPtr returns a const FAMGSparseBlock * pointer FAMGSparseBlock *cmatsb_d = (FAMGSparseBlock *)Mcg.GetDiagSparseBlockPtr(); FAMGSparseBlock *cmatsb_o = (FAMGSparseBlock *)Mcg.GetSparseBlockPtr(); const FAMGSparseBlock *dmatsb = Dfg.GetDiagSparseBlockPtr(); const FAMGSparseBlock *fmatsb_o = Mfg.GetSparseBlockPtr(); const FAMGSparseBlock *fmatsb_d = Mfg.GetDiagSparseBlockPtr(); const FAMGSparseVector *sp = transfer.Get_sp(); const FAMGSparseVector *sr = transfer.Get_sr(); const FAMGSparseVector *tvAsv = tvA.GetSparseVectorPtr(); const FAMGSparseVector *tvBsv = tvB.GetSparseVectorPtr(); double *tvAptr, *tvBptr; FAMGSparseBlock sb_o_p, sb_r_o, sb_r_o_p, sb_r_d_p, sb_r_dmat_p; // only offdiagonal blocks sb_o_p.Product(fmatsb_o,sp); sb_r_o.Product(sr,fmatsb_o); sb_r_o_p.Product(sr,fmatsb_o,sp); // sb_r_dmat_p.Product(sr,dmatsb,sp); sb_r_dmat_p = (*fmatsb_o); sb_r_d_p.Product(sr,fmatsb_d,sp); // chech sparse block structure if(cmatsb_o->CheckStructureforAdd(fmatsb_o)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_o_p)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_o)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_o_p)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_dmat_p)) return 1; if(cmatsb_d->CheckStructureforAdd(fmatsb_d)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_d_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_o_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_o)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_o_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_dmat_p)) return 1; short maxoffset = sb_o_p.Get_maxoffset(); maxoffset = Max(maxoffset,sb_r_o.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_o_p.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_dmat_p.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_d_p.Get_maxoffset()); double *val = new double[maxoffset+1]; double *diaginv = new double[dmatsb->Get_maxoffset()+1]; while (viter(i_fg) ) { #ifdef ModelP if ( IS_FAMG_GHOST(((FAMGugVectorEntryRef*)(i_fg.GetPointer()))->myvector()) ) { // repair coarse grid matrix of border vector, if it has no diagonal matrix entry if (fg_gridvec.IsCG(i_fg) ) { transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mcg,i_cg); if( mijiter(mij) ) // test first matrix entry of i_cg { if( mij.dest() != i_cg ) Mcg.AddEntry(0.0, i_cg, i_cg); // has no diag entry yet } else // i_cg has no matrix entry { Mcg.AddEntry(0.0, i_cg, i_cg); } } continue; } #endif // i is now in core partition if (fg_gridvec.IsCG(i_fg) ) { // i is coarse transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mfg,i_fg); while( mijiter(mij) ) { j_fg = mij.dest(); if( fg_gridvec.IsCG(j_fg) ) { transfer.GetFirstEntry(j_fg)->GetColInVar(j_cg); // Mcg.AddEntry(Mfg[mij], i_cg, j_cg); // Mcc if(i_cg == j_cg) Mcg.AddEntry(fmatsb_d,Mfg.GetValuePtr(mij), i_cg, j_cg); else Mcg.AddEntry(fmatsb_o,Mfg.GetValuePtr(mij), i_cg, j_cg); // Mcc } else { for( pjs=transfer.GetFirstEntry(j_fg); pjs != NULL; pjs = pjs->GetNext()) { pjs->GetColInVar(s_cg); SparseBlockMMProduct(&sb_o_p,fmatsb_o,sp,val,Mfg.GetValuePtr(mij),pjs->GetProlongationPtr()); Mcg.AddEntry(&sb_o_p,val,i_cg, s_cg); // Mcg.AddEntry(Mfg[mij]*pjs->GetProlongation(), i_cg, s_cg); // Mcf*P } } } } else { // i is fine typename MT::Iterator misiter(Mfg,i_fg); while( misiter(mis) ) { s_fg = mis.dest(); for( pij=transfer.GetFirstEntry(i_fg); pij != NULL; pij = pij->GetNext()) { pij->GetColInVar(j_cg); if( fg_gridvec.IsCG(s_fg) ) { transfer.GetFirstEntry(s_fg)->GetColInVar(s_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis], j_cg, s_cg); // R*Mfc SparseBlockMMProduct(&sb_r_o,sr,fmatsb_o,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis)); Mcg.AddEntry(&sb_r_o,val,j_cg, s_cg); } else { // s is fine if(s_fg == i_fg) { // special treatment for the A_{i,i} to keep block sparsity pattern for( pst=transfer.GetFirstEntry(s_fg); pst != NULL; pst = pst->GetNext()) { pst->GetColInVar(t_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis]*pst->GetProlongation(), j_cg, t_cg);// R*Mff*P SparseBlockMMProduct(&sb_r_d_p,sr,fmatsb_d,sp,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr()); //Mcg.AddEntry(&sb_r_d_p,val,j_cg, j_cg); // lump to diagonal Mcg.AddEntry(&sb_r_d_p,val,t_cg, t_cg); // lump to diagonal // todo: make sure lumping preserves filter condition if(j_cg != t_cg) { // SparseBlockMInvertDiag(dmatsb, diaginv, Dfg.GetValuePtr(mis)); // SparseBlockMMProduct(&sb_r_dmat_p,sr,dmatsb,sp,val,pij->GetRestrictionPtr(),diaginv,pst->GetProlongationPtr()); tvAptr = tvA.GetValuePtr(t_cg); tvBptr = tvB.GetValuePtr(t_cg); SparseBlockGalDiagApprox(&sb_r_dmat_p,sr,fmatsb_d,sp,tvAsv,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr(),tvAptr); // SparseBlockGalDiagApproxT(&sb_r_dmat_p,sr,fmatsb_d,sp,tvBsv,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr(),tvBptr); Mcg.AddEntry(&sb_r_dmat_p,val,j_cg, t_cg); // Mcg.AddEntry(&sb_r_dmat_p,val,j_cg, j_cg,-1.0); Mcg.AddEntry(&sb_r_dmat_p,val,t_cg, t_cg,-1.0); } } } else { for( pst=transfer.GetFirstEntry(s_fg); pst != NULL; pst = pst->GetNext()) { pst->GetColInVar(t_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis]*pst->GetProlongation(), j_cg, t_cg);// R*Mff*P SparseBlockMMProduct(&sb_r_o_p,sr,fmatsb_o,sp,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr()); Mcg.AddEntry(&sb_r_o_p,val,j_cg, t_cg); } } } } } } } delete val; delete diaginv; return 0; }
void MarkStrongLinks(const MT &A, const FAMGGrid &grid) { typedef typename MT::Vector VT; const typename MT::GridVector& gridvec = (typename MT::GridVector&)grid.GetGridVector(); typename MT::MatrixEntry matij; typename VT::VectorEntry vi; typename VT::Iterator viter(gridvec); double rlist[20], llist[20], mij, mji, rmax, lmax; int z, y; const double sigma = FAMGGetParameter()->Getsigma(); const int minsl = 2 - 1; while (viter(vi)) { for(z = 0; z <= minsl; z++) { rlist[z] = llist[z] = 0.0; } typename MT::Iterator mij_iter(A,vi); mij_iter(matij); // skip diagonal while( mij_iter(matij) ) { mij = Abs(A[matij]); mji = Abs(A.GetAdjData(matij)); for(z = minsl; z >= 0; z--) { if (mij < rlist[z]) break; } for(y = minsl; y > z+1; y--) { rlist[y] = rlist[y-1]; } if(z+1 <= minsl) rlist[z+1] = mij; for(z = minsl; z >= 0; z--) { if (mji < llist[z]) break; } for(y = minsl; y > z+1; y--) { llist[y] = llist[y-1]; } if(z+1 <= minsl) llist[z+1] = mji; } rmax = rlist[minsl]*sigma; lmax = llist[minsl]*sigma; mij_iter.reset(); mij_iter(matij); matij.set_strong(1); while( mij_iter(matij) ) { mij = Abs(A[matij]); mji = Abs(A.GetAdjData(matij)); if((mij > rmax) || (mji > lmax)) { matij.set_strong(1); } else matij.set_strong(0); } } return; }
int ConstructGalerkinMatrix( MT &Mcg, const FAMGGrid &fg ) // this matrix lives on the coarse grid // calculates Mcg := R * Mfg * P and with indices: // Mcg_(i,j) := \sum_{s,t} R_(i,s) * Mfg_(s,t) * P_(t,j) { typedef typename MT::Vector VT; const FAMGTransfer &transfer = *fg.GetTransfer(); const typename MT::GridVector& fg_gridvec = (typename MT::GridVector&)fg.GetGridVector(); const MT& Mfg = (MT&)*fg.GetConsMatrix(); // consistent matrix is essential here! typename MT::MatrixEntry mij, mis; typename VT::VectorEntry i_fg, i_cg, j_fg, j_cg, s_fg, s_cg, t_cg; FAMGTransferEntry *pjs, *pij, *pst; typename VT::Iterator viter(fg_gridvec); // the next lines are for debugging only: //MATDATA_DESC *tmpA = ((FAMGugMatrix*)fg.GetConsMatrix())->GetMatDesc(); //GRID *tmpgrid = fg.GetugGrid(); //int tmpflevel = GLEVEL(tmpgrid); //printf("%d: GalerkinAss finelevel = %d\n",me,tmpflevel); prvGeom(tmpflevel,0); primGeom(tmpflevel); prmGeom(tmpflevel,MD_SCALCMP(tmpA)); prvGeom(tmpflevel-1,0); while (viter(i_fg) ) { #ifdef ModelP if ( IS_FAMG_GHOST(((FAMGugVectorEntryRef*)(i_fg.GetPointer()))->myvector()) ) { // repair coarse grid matrix of border vector, if it has no diagonal matrix entry if (fg_gridvec.IsCG(i_fg) ) { transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mcg,i_cg); if( mijiter(mij) ) // test first matrix entry of i_cg { if( mij.dest() != i_cg ) Mcg.AddEntry(0.0, i_cg, i_cg); // has no diag entry yet } else // i_cg has no matrix entry { Mcg.AddEntry(0.0, i_cg, i_cg); } } continue; } #endif // i is now in core partition if (fg_gridvec.IsCG(i_fg) ) { // i is coarse transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mfg,i_fg); while( mijiter(mij) ) { j_fg = mij.dest(); if( fg_gridvec.IsCG(j_fg) ) { transfer.GetFirstEntry(j_fg)->GetColInVar(j_cg); Mcg.AddEntry(Mfg[mij], i_cg, j_cg); // Mcc //printf("%d: G%d[%d] Mcc i f%d[%d] c%d[%d] j f%d[%d] c%d[%d] Mfg[mij]=%g\n",me, prvec(i_cg), // prvec(i_fg),prvec(i_cg),prvec(j_fg),prvec(j_cg),Mfg[mij]); } else { for( pjs=transfer.GetFirstEntry(j_fg); pjs != NULL; pjs = pjs->GetNext()) { pjs->GetColInVar(s_cg); Mcg.AddEntry(Mfg[mij]*pjs->GetProlongation(), i_cg, s_cg); // Mcf*P //printf("%d: G%d[%d] Mcf*P i f%d[%d] c%d[%d] j f%d[%d] s c%d[%d] Mfg[mij]=%g pjs=%g Mfg[mij]*pjs=%g\n",me, prvec(i_cg), // prvec(i_fg),prvec(i_cg),prvec(j_fg),prvec(s_cg),Mfg[mij],pjs->GetProlongation(),Mfg[mij]*pjs->GetProlongation()); } } } } else { // i is fine typename MT::Iterator misiter(Mfg,i_fg); while( misiter(mis) ) { s_fg = mis.dest(); for( pij=transfer.GetFirstEntry(i_fg); pij != NULL; pij = pij->GetNext()) { pij->GetColInVar(j_cg); if( fg_gridvec.IsCG(s_fg) ) { transfer.GetFirstEntry(s_fg)->GetColInVar(s_cg); // pij is equivalent to rji Mcg.AddEntry(pij->GetRestriction()*Mfg[mis], j_cg, s_cg); // R*Mfc //printf("%d: G%d[%d] R*Mfc j c%d[%d] i f%d[%d] s f%d[%d] c%d[%d] rji=%g Mfg[mis]=%g rji*Mfg[mis]=%g\n",me, prvec(j_cg), // prvec(j_cg),prvec(i_fg),prvec(s_fg),prvec(s_cg),pij->GetRestriction(), Mfg[mis], pij->GetRestriction()*Mfg[mis] ); } else { // s is fine for( pst=transfer.GetFirstEntry(s_fg); pst != NULL; pst = pst->GetNext()) { pst->GetColInVar(t_cg); // pij is equivalent to rji Mcg.AddEntry(pij->GetRestriction()*Mfg[mis]*pst->GetProlongation(), j_cg, t_cg);// R*Mff*P //printf("%d: G%d[%d] R*Mff*P j c%d[%d] i f%d[%d] s f%d[%d] t c%d[%d] rji=%g Mfg[mis]=%g pst=%g rji*Mfg[mis]*pst=%g\n",me, prvec(j_cg), // prvec(j_cg),prvec(i_fg),prvec(s_fg),prvec(t_cg),pij->GetRestriction(),Mfg[mis],pst->GetProlongation(),pij->GetRestriction()*Mfg[mis]*pst->GetProlongation() ); } } } } } } return 0; }
// no need of special traversal template<typename MT> V_type fortran_view (MT const &x) { return (x.indexmap().memory_layout_is_c() ? x.transpose() : x);}
// load this M-tree with n data using the BulkLoad algorithm [CP98] // data is an array of n entries // padFactor is the maximum node utilization (use 1) // name is the name of the tree void MT::BulkLoad (MTentry **data, int n, double padFactor, const char *name) { int size = 0; if (EntrySize()) { size = n * (sizeof(GiSTpage) + EntrySize()); // (only valid if we've fixed size entries) } else { for (int i=0; i<n; i++) { size += sizeof(GiSTlte) + sizeof(GiSTpage) + data[i]->CompressedLength(); } } int totSize = size + GIST_PAGE_HEADER_SIZE + sizeof(GiSTlte); if (totSize > Store()->PageSize()) { // we need to split the entries into several sub-trees int numEntries = (int)(Store()->PageSize()*padFactor*n) / totSize; int s = (int) MAX (MIN (numEntries, ceil(((float)n)/numEntries)), numEntries*MIN_UTIL); // initial number of samples int nSamples, *samples = new int[s], *sizes = NULL, *ns = NULL, iter = 0, MAXITER = s * s; GiSTlist<double *> *distm = (GiSTlist<double *> *) calloc (s, sizeof(GiSTlist<double *>)); // relative distances between samples int MINSIZE = (int) (Store()->PageSize()*MIN_UTIL), addEntrySize = EntrySize() ? sizeof(GiSTpage) : sizeof(GiSTlte)+sizeof(GiSTpage); GiSTlist<int> *lists = NULL; // set for each sample set GiSTlist<double> *dists = NULL; // set for distance between each sample and its members BOOL *bSampled = new BOOL[n]; // is this entry in the samples set? // sampling phase do { iter++; if (iter > 1) { // this is a new sampling phase while (!lists[0].IsEmpty()) { lists[0].RemoveFront (); dists[0].RemoveFront (); } delete []lists; delete []dists; delete []sizes; delete []ns; while (!distm[0].IsEmpty()) { delete []distm[0].RemoveFront(); // empty the distance list } for (int i=1; i<s; i++) { distm[i].front = distm[i].rear = NULL; } } if (iter >= MAXITER) { cout << "Too many loops in BulkLoad!"<<endl<<"Please select a lower minimum node utilization or a bigger node size."<<endl; exit(1); } for (int i=0; i<n; i++) { bSampled[i] = FALSE; } nSamples = 0; // pick s samples to create parents while (nSamples < s) { int i; do { i = PickRandom (0, n); } while (bSampled[i]); bSampled[i] = TRUE; samples[nSamples++] = i; } lists = new GiSTlist<int>[s]; dists = new GiSTlist<double>[s]; sizes = new int[s]; ns = new int[s]; for (int i=0; i<s; i++) { sizes[i] = GIST_PAGE_HEADER_SIZE + sizeof(GiSTlte); ns[i] = 1; distm[i].Prepend (new double[s]); } // compute the relative distances between samples for (int i=0; i<s; i++) { for (int j=0; j<i; j++) { distm[j].front->entry[i] = distm[i].front->entry[j] = data[samples[j]]->object().distance(data[samples[i]]->object()); } distm[i].front->entry[i] = 0; } // assign each entry to its nearest parent for (int i=0; i<n; i++) { if (bSampled[i]) { int j = 0; for (; samples[j]!=i; j++); // find this entry in the samples set and return position in it lists[j].Prepend (i); // insert the entry in the right sample dists[j].Prepend (0); // distance between sample and data[i] sizes[j] += addEntrySize + data[i]->CompressedLength(); } else { // here we optimize the distance computations (like we do in the insert algorithm) double *dist = new double[s]; // distance between this non-sample and samples dist[0] = data[samples[0]]->object().distance(data[i]->object()); int minIndex = 0; for (int j=1; j<s; j++) { // seek the nearest sample dist[j] = -MaxDist(); if (fabs (data[samples[j]]->Key()->distance - data[i]->Key()->distance) >= dist[minIndex]) { // pruning continue; } BOOL flag = TRUE; for (int k=0; k<j && flag; k++) { // pruning (other samples) if (dist[k] < 0) { continue; } else { flag = fabs (dist[k] - distm[j].front->entry[k]) < dist[minIndex]; } } if (!flag) { continue; } dist[j] = data[samples[j]]->object().distance(data[i]->object()); // have to compute this distance if (dist[j] < dist[minIndex]) { minIndex = j; } } lists[minIndex].Append (i); // insert the entry in the right sample dists[minIndex].Append (dist[minIndex]); // distance between sample and data[i] sizes[minIndex] += addEntrySize + data[i]->CompressedLength(); ns[minIndex]++; sizes[minIndex] >= MINSIZE ? delete []dist : distm[minIndex].Append (dist); // correspond with lists } } // redistribute underfilled parents int i; while (sizes[i = FindMin (sizes, nSamples)] < MINSIZE) { GiSTlist<int> list = lists[i]; // each sample set while (!dists[i].IsEmpty()) { // clear distance between each sample and its members dists[i].RemoveFront (); } // substitute this set with last set for (int j=0; j<nSamples; j++) { for (GiSTlistnode<double *> *node=distm[j].front; node; node=node->next) { node->entry[i] = node->entry[nSamples-1]; } } GiSTlist<double *> dlist = distm[i]; // relative distances between sample[i] and other samples, reposition by myself distm[i] = distm[nSamples-1]; lists[i] = lists[nSamples-1]; dists[i] = dists[nSamples-1]; samples[i] = samples[nSamples-1]; sizes[i] = sizes[nSamples-1]; ns[i] = ns[nSamples-1]; nSamples--; while (!list.IsEmpty()) { // assign each entry to its nearest parent double *dist = dlist.RemoveFront (); // relative distances between sample[i] (old) and other samples (old) int minIndex = -1; for (int j=0; j<nSamples && minIndex<0; j++) { // search for a computed distance if (dist[j] > 0) { minIndex = j; } } int k = list.RemoveFront (); if (minIndex < 0) { // no distance was computed (i.e. all distances were pruned) dist[0] = data[samples[0]]->object().distance(data[k]->object()); minIndex = 0; } for (int j=0; j<nSamples; j++) { if (j == minIndex) { continue; } if (dist[j] < 0) { // distance wasn't computed if (fabs (data[samples[j]]->Key()->distance - data[k]->Key()->distance) >= dist[minIndex]) { continue; // pruning } BOOL flag = TRUE; for (int i=0; i<j && flag; i++) { // pruning (other samples) if (dist[i] < 0) { continue; } else { flag = fabs (dist[i] - distm[j].front->entry[i]) < dist[minIndex]; } } if (!flag) { continue; } dist[j] = data[samples[j]]->object().distance(data[k]->object()); // have to compute this distance } if (dist[j] < dist[minIndex]) { minIndex = j; } } lists[minIndex].Append (k); dists[minIndex].Append (dist[minIndex]); sizes[minIndex] += addEntrySize + data[k]->CompressedLength(); ns[minIndex]++; sizes[minIndex] >= MINSIZE ? delete []dist : distm[minIndex].Append (dist); // correspond with lists } assert (dlist.IsEmpty()); // so is the list } } while (nSamples == 1); // if there's only one child, repeat the sampling phase MTentry ***array = new MTentry **[nSamples]; // array of the entries for each sub-tree for (int i=0; i<nSamples; i++) { // convert the lists into arrays array[i] = new MTentry *[ns[i]]; for (int j=0; j<ns[i]; j++) { array[i][j] = (MTentry *) data[lists[i].RemoveFront ()]->Copy(); array[i][j]->Key()->distance = dists[i].RemoveFront (); } assert (lists[i].IsEmpty()); assert (dists[i].IsEmpty()); } delete []lists; delete []dists; delete []sizes; delete []bSampled; for (int i=0; i<nSamples; i++) { while (!distm[i].IsEmpty()) { delete [](distm[i].RemoveFront()); } } free (distm); // build an M-tree under each parent int nInit = nSamples; MT *subtree = new MT; GiSTlist<char *> subtreeNames; // list of the subtrees names GiSTlist<MTentry *> topEntries; // list of the parent entries of each subtree int nCreated = 0, minHeight = MAXINT; char newName[50]; for (int i=0; i<nInit; i++) { sprintf (newName, "%s.%i", name, ++nCreated); unlink (newName); subtree->Create(newName); // create the new subtree subtree->BulkLoad(array[i], ns[i], padFactor, newName); // build the subtree GiSTpath path; path.MakeRoot (); MTnode *subtreeRoot = (MTnode *) subtree->ReadNode(path); if (subtreeRoot->IsUnderFull(*Store())) { // if the subtree root node is underfilled, we have to split the tree GiSTlist<MTentry *> *parentEntries = new GiSTlist<MTentry *>; GiSTlist<char *> *newTreeNames = subtree->SplitTree(&nCreated, subtree->TreeHeight()-1, parentEntries, name); // split the tree nSamples--; while (!newTreeNames->IsEmpty()) { // insert all the new trees in the subtrees list subtreeNames.Append (newTreeNames->RemoveFront()); MTentry *entry = parentEntries->RemoveFront(); for (int j=0; j<n; j++) { if (data[j]->object() == entry->object()) { // append the parent entry to the list topEntries.Append (data[j]); break; } } delete entry; nSamples++; } delete newTreeNames; delete parentEntries; minHeight = MIN (minHeight, subtree->TreeHeight()-1); } else { subtreeNames.Append (strdup(newName)); topEntries.Append (data[samples[i]]); minHeight = MIN (minHeight, subtree->TreeHeight()); } delete subtreeRoot; subtree->Close(); delete subtree->Store(); // it was created in subtree->Create() } delete []samples; for (int i=0; i<nInit; i++) { for (int j=0; j<ns[i]; j++) { delete array[i][j]; } delete []array[i]; } delete []array; delete []ns; // fix the subtree height GiSTlist<char *> subtreeNames2; // list of the subtrees names GiSTlist<MTentry *> topEntries2; // list of the parent entries of each subtree while (!topEntries.IsEmpty()) { // insert the trees in the list (splitting trees if necessary) MTentry *parentEntry = topEntries.RemoveFront (); char *tmp = subtreeNames.RemoveFront (); strcpy (newName, tmp); delete []tmp; subtree->Open(newName); if (subtree->TreeHeight() > minHeight) { // we have to split the tree to reduce its height nSamples--; GiSTlist<MTentry *> *parentEntries = new GiSTlist<MTentry *>; GiSTlist<char *> *newTreeNames = subtree->SplitTree(&nCreated, minHeight, parentEntries, name); // split the tree while (!newTreeNames->IsEmpty()) { // insert all the new trees in the subtrees list subtreeNames2.Append (newTreeNames->RemoveFront()); MTentry *entry = parentEntries->RemoveFront(); for (int j=0; j<n; j++) { if (data[j]->object() == entry->object()) { // append the parent entry to the parents list topEntries2.Append (data[j]); break;; } } delete entry; nSamples++; } delete newTreeNames; delete parentEntries; } else { // simply insert the tree and its parent entry to the lists subtreeNames2.Append (strdup(newName)); topEntries2.Append (parentEntry); } subtree->Close(); delete subtree->Store(); // it was created in tree->Open() } // build the super tree upon the parents MTentry **topEntrArr = new MTentry *[nSamples]; // array of the parent entries for each subtree char **subNameArr = new char *[nSamples]; // array of the subtrees names for (int i=0; i<nSamples; i++) { // convert the lists into arrays topEntrArr[i] = topEntries2.RemoveFront (); subNameArr[i] = subtreeNames2.RemoveFront (); } assert (topEntries2.IsEmpty()); assert (subtreeNames2.IsEmpty()); sprintf (newName, "%s.0", name); BulkLoad (topEntrArr, nSamples, padFactor, newName); // attach each subtree to the leaves of the super tree GiSTpath path; path.MakeRoot (); MTnode *node = (MTnode *) ReadNode (path); GiSTlist<MTnode *> *oldList = new GiSTlist<MTnode *>; // upper level nodes oldList->Append(node); int level = node->Level(); while (level > 0) { // build the leaves list for super tree GiSTlist<MTnode *> *newList = new GiSTlist<MTnode *>; // lower level nodes while (!oldList->IsEmpty()) { node = oldList->RemoveFront(); path = node->Path(); node->SetLevel(node->Level() + minHeight); // update level of the upper nodes of the super tree WriteNode (node); for (int i=0; i<node->NumEntries(); i++) { MTentry *entry = (MTentry *) (*node)[i].Ptr(); path.MakeChild (entry->Ptr()); newList->Append((MTnode *)ReadNode(path)); path.MakeParent (); } delete node; } delete oldList; oldList = newList; level--; } while (!oldList->IsEmpty()) { // attach each subtree to its leaf node = oldList->RemoveFront(); // retrieve next leaf (root of subtree) node->SetLevel(minHeight); // update level of the root of the subtree path = node->Path(); for (int i=0; i<node->NumEntries(); i++) { MTentry *entry = (MTentry *) (*node)[i].Ptr(); path.MakeChild(Store()->Allocate()); MTnode *newNode = (MTnode *) CreateNode (); newNode->Path() = path; entry->SetPtr(path.Page()); path.MakeParent (); int j = 0; for (; entry->object() != topEntrArr[j]->object(); j++); // search the position to append subtree->Open(subNameArr[j]); GiSTpath rootPath; rootPath.MakeRoot (); Append (newNode, (MTnode *)subtree->ReadNode(rootPath)); // append this subtree to the super tree subtree->Close(); delete subtree->Store(); // it was created in tree->Open() delete newNode; } WriteNode (node); delete node; } subtree->Open(subNameArr[0]); // in order to destroy the object tree delete subtree; for (int i=0; i<nSamples; i++) { delete []subNameArr[i]; } delete []subNameArr; delete []topEntrArr; // update radii of the upper nodes of the result M-tree path.MakeRoot (); node = (MTnode *) ReadNode (path); oldList->Append(node); level = node->Level(); while (level >= minHeight) { // build the list of the nodes which radii should be recomputed GiSTlist<MTnode *> *newList = new GiSTlist<MTnode *>; while (!oldList->IsEmpty()) { node = oldList->RemoveFront(); path = node->Path(); for (int i=0; i<node->NumEntries(); i++) { path.MakeChild ((*node)[i].Ptr()->Ptr()); newList->Append((MTnode *)ReadNode(path)); path.MakeParent (); } delete node; } delete oldList; oldList = newList; level--; } while (!oldList->IsEmpty()) { // adjust the radii of the nodes MTnode *node = oldList->RemoveFront(); AdjKeys (node); delete node; } delete oldList; for (int i=0; i<=nCreated; i++) { // delete all temporary subtrees sprintf (newName, "%s.%i", name, i); unlink (newName); } } else { // we can insert all the entries in a single node GiSTpath path; path.MakeRoot (); GiSTnode *node = ReadNode (path); for (int i=0; i<n; i++) { node->Insert(*(data[i])); } assert (!node->IsOverFull(*Store())); WriteNode (node); delete node; } }
// no need of special traversal template <typename MT> V_type fortran_view(MT const &x) { if (x.indexmap().memory_layout_is_c()) return x.transpose(); else return x; }