void JacobiSmoothFG( VT &sol, const MT &A, const VT &def ) // changes only the fine unknowns // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; const FAMGSparseVector *svsol = sol.GetSparseVectorPtr(); const FAMGSparseVector *svdef = def.GetSparseVectorPtr(); const FAMGSparseBlock *sb = A.GetDiagSparseBlockPtr(); double *solptr, *defptr, *matptr; short nr = sb->Get_nr(); if(nr != sb->Get_nc()) assert(0); if(nr != svsol->Get_n()) assert(0); if(nr != svdef->Get_n()) assert(0); // todo: implement for more general vectors for(short i = 1; i < nr; i++) { if(svsol->Get_comp(i) - svsol->Get_comp(i-1) != 1) assert(0); if(svdef->Get_comp(i) - svdef->Get_comp(i-1) != 1) assert(0); } short sol_off = svsol->Get_comp(0); short def_off = svdef->Get_comp(0); double *decomp = new double[nr*nr]; short *pivotmap = new short[nr]; while(viter(ve)) { if( sol.IsFG(ve) ) { solptr = sol.GetValuePtr(ve)+sol_off; defptr = def.GetValuePtr(ve)+def_off; matptr = A.GetDiagValuePtr(ve); SparseBlockMCopyDense(decomp,sb,matptr); if(LR_Decomp(nr,decomp,pivotmap)) assert(0); if(LR_Solve(nr,decomp,pivotmap,solptr,defptr)) assert(0); } #ifdef USE_UG_DS else { // set coarse components to 0 SparseBlockVSet(svsol,sol.GetValuePtr(ve),0.0); } #endif } delete decomp; delete pivotmap; return; }
void VecMinusMatVec( VT &d, const VT &f, const MT &M, const VT &u ) { typename VT::Iterator viter(d); typename VT::VectorEntry row; typename MT::MatrixEntry col; double *dptr, *fptr, *uptr, *mptr; const FAMGSparseVector *svu = u.GetSparseVectorPtr(); const FAMGSparseVector *svf = f.GetSparseVectorPtr(); const FAMGSparseVector *svd = d.GetSparseVectorPtr(); const FAMGSparseBlock *sb = M.GetSparseBlockPtr(); const FAMGSparseBlock *sbd = M.GetDiagSparseBlockPtr(); FAMGSparseVector svsum_d, svsum_o; svsum_d.Product(sbd,svu); svsum_o.Product(sb,svu); double *sum_d = new double[svsum_d.Get_maxcomp()+1]; double *sum_o = new double[svsum_o.Get_maxcomp()+1]; while(viter(row)) { typename MT::Iterator miter(M,row); dptr = d.GetValuePtr(row); fptr = f.GetValuePtr(row); // diagonal miter(col); uptr = u.GetValuePtr(col.dest()); mptr = M.GetValuePtr(col); SparseBlockVSet(&svsum_d,sum_d,0.0); SparseBlockVSet(&svsum_o,sum_o,0.0); SparseBlockMVAddProduct(&svsum_d,sbd,svu,sum_d,mptr,uptr,1.0); while(miter(col)) { uptr = u.GetValuePtr(col.dest()); mptr = M.GetValuePtr(col); SparseBlockMVAddProduct(&svsum_o,sb,svu,sum_o,mptr,uptr,1.0); } SparseBlockVSub(svd,svf,&svsum_o,dptr,fptr,sum_o); SparseBlockVSub(svd,svd,&svsum_d,dptr,dptr,sum_d); } delete sum_d; delete sum_o; }
void JacobiSmoothFGSimple( VT &sol, const MT &D, const VT &def ) // changes only the fine unknowns // result in sol_vec; def_vec: correct defect before call, after call destroyed { typename VT::Iterator viter(sol); typename VT::VectorEntry ve; const FAMGSparseVector *svsol = sol.GetSparseVectorPtr(); const FAMGSparseVector *svdef = def.GetSparseVectorPtr(); const FAMGSparseBlock *sb = D.GetDiagSparseBlockPtr(); double *solptr, *defptr, *matptr; while(viter(ve)) { if( sol.IsFG(ve) ) { solptr = sol.GetValuePtr(ve); defptr = def.GetValuePtr(ve); matptr = D.GetDiagValuePtr(ve); SparseBlockMVAddProduct(svsol,sb,svdef,solptr,matptr,defptr,1.0); } } return; }
int ConstructGalerkinMatrix( MT &Mcg, const FAMGGrid &fg ) // this matrix lives on the coarse grid // calculates Mcg := R * Mfg * P and with indices: // Mcg_(i,j) := \sum_{s,t} R_(i,s) * Mfg_(s,t) * P_(t,j) { typedef typename MT::Vector VT; const FAMGTransfer &transfer = *fg.GetTransfer(); const typename MT::GridVector& fg_gridvec = (typename MT::GridVector&)fg.GetGridVector(); const MT& Mfg = (MT&)*fg.GetConsMatrix(); // consistent matrix is essential here! const MT& Dfg = (MT&)*fg.GetDiagMatrix(); const VT &tvA = *fg.GetVector(FAMGTVA); const VT &tvB = *fg.GetVector(FAMGTVB); typename MT::MatrixEntry mij, mis; typename VT::VectorEntry i_fg, i_cg, j_fg, j_cg, s_fg, s_cg, t_cg; FAMGTransferEntry *pjs, *pij, *pst; typename VT::Iterator viter(fg_gridvec); #ifdef ModelP abort();// check the consistent mode of ALL occuring matrices!!! and remove this line then #endif // cast because GetSparseBlockPtr returns a const FAMGSparseBlock * pointer FAMGSparseBlock *cmatsb_d = (FAMGSparseBlock *)Mcg.GetDiagSparseBlockPtr(); FAMGSparseBlock *cmatsb_o = (FAMGSparseBlock *)Mcg.GetSparseBlockPtr(); const FAMGSparseBlock *dmatsb = Dfg.GetDiagSparseBlockPtr(); const FAMGSparseBlock *fmatsb_o = Mfg.GetSparseBlockPtr(); const FAMGSparseBlock *fmatsb_d = Mfg.GetDiagSparseBlockPtr(); const FAMGSparseVector *sp = transfer.Get_sp(); const FAMGSparseVector *sr = transfer.Get_sr(); const FAMGSparseVector *tvAsv = tvA.GetSparseVectorPtr(); const FAMGSparseVector *tvBsv = tvB.GetSparseVectorPtr(); double *tvAptr, *tvBptr; FAMGSparseBlock sb_o_p, sb_r_o, sb_r_o_p, sb_r_d_p, sb_r_dmat_p; // only offdiagonal blocks sb_o_p.Product(fmatsb_o,sp); sb_r_o.Product(sr,fmatsb_o); sb_r_o_p.Product(sr,fmatsb_o,sp); // sb_r_dmat_p.Product(sr,dmatsb,sp); sb_r_dmat_p = (*fmatsb_o); sb_r_d_p.Product(sr,fmatsb_d,sp); // chech sparse block structure if(cmatsb_o->CheckStructureforAdd(fmatsb_o)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_o_p)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_o)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_o_p)) return 1; if(cmatsb_o->CheckStructureforAdd(&sb_r_dmat_p)) return 1; if(cmatsb_d->CheckStructureforAdd(fmatsb_d)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_d_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_o_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_o)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_o_p)) return 1; if(cmatsb_d->CheckStructureforAdd(&sb_r_dmat_p)) return 1; short maxoffset = sb_o_p.Get_maxoffset(); maxoffset = Max(maxoffset,sb_r_o.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_o_p.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_dmat_p.Get_maxoffset()); maxoffset = Max(maxoffset,sb_r_d_p.Get_maxoffset()); double *val = new double[maxoffset+1]; double *diaginv = new double[dmatsb->Get_maxoffset()+1]; while (viter(i_fg) ) { #ifdef ModelP if ( IS_FAMG_GHOST(((FAMGugVectorEntryRef*)(i_fg.GetPointer()))->myvector()) ) { // repair coarse grid matrix of border vector, if it has no diagonal matrix entry if (fg_gridvec.IsCG(i_fg) ) { transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mcg,i_cg); if( mijiter(mij) ) // test first matrix entry of i_cg { if( mij.dest() != i_cg ) Mcg.AddEntry(0.0, i_cg, i_cg); // has no diag entry yet } else // i_cg has no matrix entry { Mcg.AddEntry(0.0, i_cg, i_cg); } } continue; } #endif // i is now in core partition if (fg_gridvec.IsCG(i_fg) ) { // i is coarse transfer.GetFirstEntry(i_fg)->GetColInVar(i_cg); typename MT::Iterator mijiter(Mfg,i_fg); while( mijiter(mij) ) { j_fg = mij.dest(); if( fg_gridvec.IsCG(j_fg) ) { transfer.GetFirstEntry(j_fg)->GetColInVar(j_cg); // Mcg.AddEntry(Mfg[mij], i_cg, j_cg); // Mcc if(i_cg == j_cg) Mcg.AddEntry(fmatsb_d,Mfg.GetValuePtr(mij), i_cg, j_cg); else Mcg.AddEntry(fmatsb_o,Mfg.GetValuePtr(mij), i_cg, j_cg); // Mcc } else { for( pjs=transfer.GetFirstEntry(j_fg); pjs != NULL; pjs = pjs->GetNext()) { pjs->GetColInVar(s_cg); SparseBlockMMProduct(&sb_o_p,fmatsb_o,sp,val,Mfg.GetValuePtr(mij),pjs->GetProlongationPtr()); Mcg.AddEntry(&sb_o_p,val,i_cg, s_cg); // Mcg.AddEntry(Mfg[mij]*pjs->GetProlongation(), i_cg, s_cg); // Mcf*P } } } } else { // i is fine typename MT::Iterator misiter(Mfg,i_fg); while( misiter(mis) ) { s_fg = mis.dest(); for( pij=transfer.GetFirstEntry(i_fg); pij != NULL; pij = pij->GetNext()) { pij->GetColInVar(j_cg); if( fg_gridvec.IsCG(s_fg) ) { transfer.GetFirstEntry(s_fg)->GetColInVar(s_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis], j_cg, s_cg); // R*Mfc SparseBlockMMProduct(&sb_r_o,sr,fmatsb_o,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis)); Mcg.AddEntry(&sb_r_o,val,j_cg, s_cg); } else { // s is fine if(s_fg == i_fg) { // special treatment for the A_{i,i} to keep block sparsity pattern for( pst=transfer.GetFirstEntry(s_fg); pst != NULL; pst = pst->GetNext()) { pst->GetColInVar(t_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis]*pst->GetProlongation(), j_cg, t_cg);// R*Mff*P SparseBlockMMProduct(&sb_r_d_p,sr,fmatsb_d,sp,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr()); //Mcg.AddEntry(&sb_r_d_p,val,j_cg, j_cg); // lump to diagonal Mcg.AddEntry(&sb_r_d_p,val,t_cg, t_cg); // lump to diagonal // todo: make sure lumping preserves filter condition if(j_cg != t_cg) { // SparseBlockMInvertDiag(dmatsb, diaginv, Dfg.GetValuePtr(mis)); // SparseBlockMMProduct(&sb_r_dmat_p,sr,dmatsb,sp,val,pij->GetRestrictionPtr(),diaginv,pst->GetProlongationPtr()); tvAptr = tvA.GetValuePtr(t_cg); tvBptr = tvB.GetValuePtr(t_cg); SparseBlockGalDiagApprox(&sb_r_dmat_p,sr,fmatsb_d,sp,tvAsv,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr(),tvAptr); // SparseBlockGalDiagApproxT(&sb_r_dmat_p,sr,fmatsb_d,sp,tvBsv,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr(),tvBptr); Mcg.AddEntry(&sb_r_dmat_p,val,j_cg, t_cg); // Mcg.AddEntry(&sb_r_dmat_p,val,j_cg, j_cg,-1.0); Mcg.AddEntry(&sb_r_dmat_p,val,t_cg, t_cg,-1.0); } } } else { for( pst=transfer.GetFirstEntry(s_fg); pst != NULL; pst = pst->GetNext()) { pst->GetColInVar(t_cg); // pij is equivalent to rji // Mcg.AddEntry(pij->GetRestriction()*Mfg[mis]*pst->GetProlongation(), j_cg, t_cg);// R*Mff*P SparseBlockMMProduct(&sb_r_o_p,sr,fmatsb_o,sp,val,pij->GetRestrictionPtr(),Mfg.GetValuePtr(mis),pst->GetProlongationPtr()); Mcg.AddEntry(&sb_r_o_p,val,j_cg, t_cg); } } } } } } } delete val; delete diaginv; return 0; }