void sample1(struct data *Afine_data, struct data *Acoarse_data, struct data *Rmat_data, struct data *Pmat_data, double *sol, double *rhs ) { ML *my_ml; int i; int fine_grid, output_level = 10, N_grids = 2, grid0 = 0, grid1 = 1; int Nfine, Ncoarse; double *diagonal; Nfine = Rmat_data->from_size; Ncoarse = Rmat_data->to_size; diagonal = (double *) malloc(Nfine*sizeof(double)); for (i = 0; i < Nfine; i++) diagonal[i] = 2.; fine_grid = grid1; ML_Create (&my_ml, N_grids); ML_Set_OutputLevel( my_ml, output_level); ML_Init_Amatrix (my_ml, grid1, Nfine, Nfine,(void *) Afine_data); ML_Set_Amatrix_Getrow(my_ml, grid1, myAgetrow, my_comm, Nfine+1); ML_Set_Amatrix_Matvec(my_ml, grid1, mymatvec); ML_Set_Amatrix_Diag (my_ml, grid1, Nfine, diagonal); ML_Gen_Smoother_Jacobi(my_ml, grid1, ML_PRESMOOTHER, 2, ML_DEFAULT); ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse,Nfine,(void *)Pmat_data); ML_Set_Prolongator_Getrow(my_ml, grid0, myPgetrow, my_comm, Ncoarse+1); ML_Set_Prolongator_Matvec(my_ml, grid0, myinterp); ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data); ML_Set_Restrictor_Getrow(my_ml, grid1, myRgetrow, my_comm, Nfine+1); ML_Set_Restrictor_Matvec(my_ml, grid1, myrestrict); ML_Gen_AmatrixRAP(my_ml,grid1, grid0); #ifdef SUPERLU ML_Gen_CoarseSolverSuperLU(my_ml, grid0); #else ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 100, ML_DEFAULT); #endif /* ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 200, ML_DEFAULT); */ /* ML_Gen_Smoother_GaussSeidel(my_ml, grid0, ML_PRESMOOTHER, 200, 1.); */ ML_Gen_Solver (my_ml, 0, fine_grid, grid0); ML_Iterate(my_ml, sol, rhs); ML_Destroy(&my_ml); ML_free(diagonal); }
PetscErrorCode PCSetUp_ML(PC pc) { PetscErrorCode ierr; PetscMPIInt size; FineGridCtx *PetscMLdata; ML *ml_object; ML_Aggregate *agg_object; ML_Operator *mlmat; PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; Mat A,Aloc; GridCtx *gridctx; PC_MG *mg = (PC_MG*)pc->data; PC_ML *pc_ml = (PC_ML*)mg->innerctx; PetscBool isSeq, isMPI; KSP smoother; PC subpc; PetscInt mesh_level, old_mesh_level; PetscFunctionBegin; A = pc->pmat; ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); if (pc->setupcalled) { if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { /* Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for multiple solves in which the matrix is not changing too quickly. */ ml_object = pc_ml->ml_object; gridctx = pc_ml->gridctx; Nlevels = pc_ml->Nlevels; fine_level = Nlevels - 1; gridctx[fine_level].A = A; ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); if (isMPI){ ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); } else if (isSeq) { Aloc = A; ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); PetscMLdata = pc_ml->PetscMLdata; ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); PetscMLdata->A = A; PetscMLdata->Aloc = Aloc; ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); mesh_level = ml_object->ML_finest_level; while (ml_object->SingleLevel[mesh_level].Rmat->to) { old_mesh_level = mesh_level; mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; /* clean and regenerate A */ mlmat = &(ml_object->Amat[mesh_level]); ML_Operator_Clean(mlmat); ML_Operator_Init(mlmat,ml_object->comm); ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); } level = fine_level - 1; if (size == 1) { /* convert ML P, R and A into seqaij format */ for (mllevel=1; mllevel<Nlevels; mllevel++){ mlmat = &(ml_object->Amat[mllevel]); ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); level--; } } else { /* convert ML P and R into shell format, ML A into mpiaij format */ for (mllevel=1; mllevel<Nlevels; mllevel++){ mlmat = &(ml_object->Amat[mllevel]); ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); level--; } } for (level=0; level<fine_level; level++) { if (level > 0){ ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); } ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); ierr = PCSetUp_MG(pc);CHKERRQ(ierr); PetscFunctionReturn(0); } else { /* since ML can change the size of vectors/matrices at any level we must destroy everything */ ierr = PCReset_ML(pc);CHKERRQ(ierr); ierr = PCReset_MG(pc);CHKERRQ(ierr); } } /* setup special features of PCML */ /*--------------------------------*/ /* covert A to Aloc to be used by ML at fine grid */ pc_ml->size = size; ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); if (isMPI){ ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); } else if (isSeq) { Aloc = A; ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); /* create and initialize struct 'PetscMLdata' */ ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); pc_ml->PetscMLdata = PetscMLdata; ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); PetscMLdata->A = A; PetscMLdata->Aloc = Aloc; /* create ML discretization matrix at fine grid */ /* ML requires input of fine-grid matrix. It determines nlevels. */ ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); ML_Create(&ml_object,pc_ml->MaxNlevels); ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); pc_ml->ml_object = ml_object; ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); /* aggregation */ ML_Aggregate_Create(&agg_object); pc_ml->agg_object = agg_object; { MatNullSpace mnull; ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; } switch (pc_ml->nulltype) { case PCML_NULLSPACE_USER: { PetscScalar *nullvec; const PetscScalar *v; PetscBool has_const; PetscInt i,j,mlocal,nvec,M; const Vec *vecs; if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr); ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr); ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr); if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; for (i=0; i<nvec; i++) { ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); } ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr); ierr = PetscFree(nullvec);CHKERRQ(ierr); } break; case PCML_NULLSPACE_BLOCK: ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); break; case PCML_NULLSPACE_SCALAR: break; default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type"); } } ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); /* set options */ switch (pc_ml->CoarsenScheme) { case 1: ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; case 2: ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; case 3: ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; } ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); if (pc_ml->SpectralNormScheme_Anorm){ ML_Set_SpectralNormScheme_Anorm(ml_object); } agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; agg_object->keep_P_tentative = (int)pc_ml->Reusable; agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; if (pc_ml->OldHierarchy) { Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); } else { Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); } if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); pc_ml->Nlevels = Nlevels; fine_level = Nlevels - 1; ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); /* set default smoothers */ for (level=1; level<=fine_level; level++){ if (size == 1){ ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); } else { ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); } } ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ ierr = PetscOptionsEnd();CHKERRQ(ierr); ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); pc_ml->gridctx = gridctx; /* wrap ML matrices by PETSc shell matrices at coarsened grids. Level 0 is the finest grid for ML, but coarsest for PETSc! */ gridctx[fine_level].A = A; level = fine_level - 1; if (size == 1){ /* convert ML P, R and A into seqaij format */ for (mllevel=1; mllevel<Nlevels; mllevel++){ mlmat = &(ml_object->Pmat[mllevel]); ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); mlmat = &(ml_object->Rmat[mllevel-1]); ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); mlmat = &(ml_object->Amat[mllevel]); ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); level--; } } else { /* convert ML P and R into shell format, ML A into mpiaij format */ for (mllevel=1; mllevel<Nlevels; mllevel++){ mlmat = &(ml_object->Pmat[mllevel]); ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); mlmat = &(ml_object->Rmat[mllevel-1]); ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); mlmat = &(ml_object->Amat[mllevel]); ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); level--; } } /* create vectors and ksp at all levels */ for (level=0; level<fine_level; level++){ level1 = level + 1; ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); if (level == 0){ ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); } else { ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); } } ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); /* create coarse level and the interpolation between the levels */ for (level=0; level<fine_level; level++){ level1 = level + 1; ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); if (level > 0){ ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); } ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); } ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); /* setupcalled is set to 0 so that MG is setup from scratch */ pc->setupcalled = 0; ierr = PCSetUp_MG(pc);CHKERRQ(ierr); PetscFunctionReturn(0); }