Exemplo n.º 1
0
/*@C
    DMMGSetNullSpace - Indicates the null space in the linear operator (this is needed by the linear solver)

    Collective on DMMG

    Input Parameter:
+   dmmg - the context
.   has_cnst - is the constant vector in the null space
.   n - number of null vectors (excluding the possible constant vector)
-   func - a function that fills an array of vectors with the null vectors (must be orthonormal), may be PETSC_NULL

    Level: advanced

.seealso DMMGCreate(), DMMGDestroy, DMMGSetDM(), DMMGSolve(), MatNullSpaceCreate(), KSPSetNullSpace(), DMMGSetMatType()

@*/
PetscErrorCode PETSCSNES_DLLEXPORT DMMGSetNullSpace(DMMG *dmmg,PetscTruth has_cnst,PetscInt n,PetscErrorCode (*func)(DMMG,Vec[]))
{
  PetscErrorCode ierr;
  PetscInt       i,j,nlevels = dmmg[0]->nlevels;
  Vec            *nulls = 0;
  MatNullSpace   nullsp;
  KSP            iksp;
  PC             pc,ipc;
  PetscTruth     ismg,isred;

  PetscFunctionBegin;
  if (!dmmg) SETERRQ(PETSC_ERR_ARG_NULL,"Passing null as DMMG");
  if (!dmmg[0]->ksp) SETERRQ(PETSC_ERR_ORDER,"Must call AFTER DMMGSetKSP() or DMMGSetSNES()");
  if ((n && !func) || (!n && func)) SETERRQ(PETSC_ERR_ARG_INCOMP,"Both n and func() must be set together");
  if (n < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Cannot have negative number of vectors in null space n = %D",n)

  for (i=0; i<nlevels; i++) {
    if (n) {
      ierr = VecDuplicateVecs(dmmg[i]->b,n,&nulls);CHKERRQ(ierr);
      ierr = (*func)(dmmg[i],nulls);CHKERRQ(ierr);
    }
    ierr = MatNullSpaceCreate(dmmg[i]->comm,has_cnst,n,nulls,&nullsp);CHKERRQ(ierr);
    ierr = KSPSetNullSpace(dmmg[i]->ksp,nullsp);CHKERRQ(ierr);
    for (j=i; j<nlevels; j++) {
      ierr = KSPGetPC(dmmg[j]->ksp,&pc);CHKERRQ(ierr);
      ierr = PetscTypeCompare((PetscObject)pc,PCMG,&ismg);CHKERRQ(ierr);
      if (ismg) {
        ierr = PCMGGetSmoother(pc,i,&iksp);CHKERRQ(ierr);
        ierr = KSPSetNullSpace(iksp, nullsp);CHKERRQ(ierr);
      }
    }
    ierr = MatNullSpaceDestroy(nullsp);CHKERRQ(ierr);
    if (n) {
      ierr = VecDestroyVecs(nulls,n);CHKERRQ(ierr);
    }
  }
  /* make all the coarse grid solvers have LU shift since they are singular */
  for (i=0; i<nlevels; i++) {
    ierr = KSPGetPC(dmmg[i]->ksp,&pc);CHKERRQ(ierr);
    ierr = PetscTypeCompare((PetscObject)pc,PCMG,&ismg);CHKERRQ(ierr);
    if (ismg) {
      ierr = PCMGGetSmoother(pc,0,&iksp);CHKERRQ(ierr);
      ierr = KSPGetPC(iksp,&ipc);CHKERRQ(ierr);
      ierr = PetscTypeCompare((PetscObject)ipc,PCREDUNDANT,&isred);CHKERRQ(ierr);
      if (isred) {
        ierr = PCRedundantGetPC(ipc,&ipc);CHKERRQ(ierr);
      }
      ierr = PCFactorSetShiftType(ipc,MAT_SHIFT_POSITIVE_DEFINITE);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 2
0
PetscErrorCode PETSCSNES_DLLEXPORT DMMGSetUpLevel(DMMG *dmmg,KSP ksp,PetscInt nlevels)
{
  PetscErrorCode          ierr;
  PetscInt                i;
  PC                      pc;
  PetscTruth              ismg,ismf,isshell,ismffd;
  KSP                     lksp; /* solver internal to the multigrid preconditioner */
  MPI_Comm                *comms;

  PetscFunctionBegin;
  if (!dmmg) SETERRQ(PETSC_ERR_ARG_NULL,"Passing null as DMMG");

  /* use fgmres on outer iteration by default */
  ierr  = KSPSetType(ksp,KSPFGMRES);CHKERRQ(ierr);
  ierr  = KSPGetPC(ksp,&pc);CHKERRQ(ierr);
  ierr  = PCSetType(pc,PCMG);CHKERRQ(ierr);
  ierr  = PetscMalloc(nlevels*sizeof(MPI_Comm),&comms);CHKERRQ(ierr);
  for (i=0; i<nlevels; i++) {
    comms[i] = dmmg[i]->comm;
  }
  ierr  = PCMGSetLevels(pc,nlevels,comms);CHKERRQ(ierr);
  ierr  = PetscFree(comms);CHKERRQ(ierr); 
  ierr =  PCMGSetType(pc,PC_MG_FULL);CHKERRQ(ierr);

  ierr = PetscTypeCompare((PetscObject)pc,PCMG,&ismg);CHKERRQ(ierr);
  if (ismg) {
    /* set solvers for each level */
    for (i=0; i<nlevels; i++) {
      if (i < nlevels-1) { /* don't set for finest level, they are set in PCApply_MG()*/
	ierr = PCMGSetX(pc,i,dmmg[i]->x);CHKERRQ(ierr); 
	ierr = PCMGSetRhs(pc,i,dmmg[i]->b);CHKERRQ(ierr); 
      }
      if (i > 0) {
        ierr = PCMGSetR(pc,i,dmmg[i]->r);CHKERRQ(ierr); 
      }
      /* If using a matrix free multiply and did not provide an explicit matrix to build
         the preconditioner then must use no preconditioner 
      */
      ierr = PetscTypeCompare((PetscObject)dmmg[i]->B,MATSHELL,&isshell);CHKERRQ(ierr);
      ierr = PetscTypeCompare((PetscObject)dmmg[i]->B,MATDAAD,&ismf);CHKERRQ(ierr);
      ierr = PetscTypeCompare((PetscObject)dmmg[i]->B,MATMFFD,&ismffd);CHKERRQ(ierr);
      if (isshell || ismf || ismffd) {
        PC  lpc;
        ierr = PCMGGetSmoother(pc,i,&lksp);CHKERRQ(ierr); 
        ierr = KSPGetPC(lksp,&lpc);CHKERRQ(ierr);
        ierr = PCSetType(lpc,PCNONE);CHKERRQ(ierr);
      }
    }

    /* Set interpolation/restriction between levels */
    for (i=1; i<nlevels; i++) {
      ierr = PCMGSetInterpolation(pc,i,dmmg[i]->R);CHKERRQ(ierr); 
      ierr = PCMGSetRestriction(pc,i,dmmg[i]->R);CHKERRQ(ierr); 
    }
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 3
0
Arquivo: ex5.c Projeto: fengyuqi/petsc
int main(int Argc,char **Args)
{
  PetscInt       x_mesh = 15,levels = 3,cycles = 1,use_jacobi = 0;
  PetscInt       i,smooths = 1,*N,its;
  PetscErrorCode ierr;
  PCMGType       am = PC_MG_MULTIPLICATIVE;
  Mat            cmat,mat[20],fmat;
  KSP            cksp,ksp[20],kspmg;
  PetscReal      e[3];  /* l_2 error,max error, residual */
  const char     *shellname;
  Vec            x,solution,X[20],R[20],B[20];
  PC             pcmg,pc;
  PetscBool      flg;

  PetscInitialize(&Argc,&Args,(char*)0,help);

  ierr = PetscOptionsGetInt(NULL,"-x",&x_mesh,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-l",&levels,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-c",&cycles,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-smooths",&smooths,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,"-a",&flg);CHKERRQ(ierr);

  if (flg) am = PC_MG_ADDITIVE;
  ierr = PetscOptionsHasName(NULL,"-f",&flg);CHKERRQ(ierr);
  if (flg) am = PC_MG_FULL;
  ierr = PetscOptionsHasName(NULL,"-j",&flg);CHKERRQ(ierr);
  if (flg) use_jacobi = 1;

  ierr = PetscMalloc1(levels,&N);CHKERRQ(ierr);
  N[0] = x_mesh;
  for (i=1; i<levels; i++) {
    N[i] = N[i-1]/2;
    if (N[i] < 1) SETERRQ(PETSC_COMM_WORLD,1,"Too many levels");
  }

  ierr = Create1dLaplacian(N[levels-1],&cmat);CHKERRQ(ierr);

  ierr = KSPCreate(PETSC_COMM_WORLD,&kspmg);CHKERRQ(ierr);
  ierr = KSPGetPC(kspmg,&pcmg);CHKERRQ(ierr);
  ierr = KSPSetFromOptions(kspmg);CHKERRQ(ierr);
  ierr = PCSetType(pcmg,PCMG);CHKERRQ(ierr);
  ierr = PCMGSetLevels(pcmg,levels,NULL);CHKERRQ(ierr);
  ierr = PCMGSetType(pcmg,am);CHKERRQ(ierr);

  ierr = PCMGGetCoarseSolve(pcmg,&cksp);CHKERRQ(ierr);
  ierr = KSPSetOperators(cksp,cmat,cmat);CHKERRQ(ierr);
  ierr = KSPGetPC(cksp,&pc);CHKERRQ(ierr);
  ierr = PCSetType(pc,PCLU);CHKERRQ(ierr);
  ierr = KSPSetType(cksp,KSPPREONLY);CHKERRQ(ierr);

  /* zero is finest level */
  for (i=0; i<levels-1; i++) {
    ierr = PCMGSetResidual(pcmg,levels - 1 - i,residual,(Mat)0);CHKERRQ(ierr);
    ierr = MatCreateShell(PETSC_COMM_WORLD,N[i+1],N[i],N[i+1],N[i],(void*)0,&mat[i]);CHKERRQ(ierr);
    ierr = MatShellSetOperation(mat[i],MATOP_MULT,(void (*)(void))restrct);CHKERRQ(ierr);
    ierr = MatShellSetOperation(mat[i],MATOP_MULT_TRANSPOSE_ADD,(void (*)(void))interpolate);CHKERRQ(ierr);
    ierr = PCMGSetInterpolation(pcmg,levels - 1 - i,mat[i]);CHKERRQ(ierr);
    ierr = PCMGSetRestriction(pcmg,levels - 1 - i,mat[i]);CHKERRQ(ierr);
    ierr = PCMGSetCyclesOnLevel(pcmg,levels - 1 - i,cycles);CHKERRQ(ierr);

    /* set smoother */
    ierr = PCMGGetSmoother(pcmg,levels - 1 - i,&ksp[i]);CHKERRQ(ierr);
    ierr = KSPGetPC(ksp[i],&pc);CHKERRQ(ierr);
    ierr = PCSetType(pc,PCSHELL);CHKERRQ(ierr);
    ierr = PCShellSetName(pc,"user_precond");CHKERRQ(ierr);
    ierr = PCShellGetName(pc,&shellname);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_WORLD,"level=%D, PCShell name is %s\n",i,shellname);CHKERRQ(ierr);

    /* this is a dummy! since KSP requires a matrix passed in  */
    ierr = KSPSetOperators(ksp[i],mat[i],mat[i]);CHKERRQ(ierr);
    /*
        We override the matrix passed in by forcing it to use Richardson with
        a user provided application. This is non-standard and this practice
        should be avoided.
    */
    ierr = PCShellSetApplyRichardson(pc,gauss_seidel);CHKERRQ(ierr);
    if (use_jacobi) {
      ierr = PCShellSetApplyRichardson(pc,jacobi);CHKERRQ(ierr);
    }
    ierr = KSPSetType(ksp[i],KSPRICHARDSON);CHKERRQ(ierr);
    ierr = KSPSetInitialGuessNonzero(ksp[i],PETSC_TRUE);CHKERRQ(ierr);
    ierr = KSPSetTolerances(ksp[i],PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,smooths);CHKERRQ(ierr);

    ierr = VecCreateSeq(PETSC_COMM_SELF,N[i],&x);CHKERRQ(ierr);

    X[levels - 1 - i] = x;
    if (i > 0) {
      ierr = PCMGSetX(pcmg,levels - 1 - i,x);CHKERRQ(ierr);
    }
    ierr = VecCreateSeq(PETSC_COMM_SELF,N[i],&x);CHKERRQ(ierr);

    B[levels -1 - i] = x;
    if (i > 0) {
      ierr = PCMGSetRhs(pcmg,levels - 1 - i,x);CHKERRQ(ierr);
    }
    ierr = VecCreateSeq(PETSC_COMM_SELF,N[i],&x);CHKERRQ(ierr);

    R[levels - 1 - i] = x;

    ierr = PCMGSetR(pcmg,levels - 1 - i,x);CHKERRQ(ierr);
  }
  /* create coarse level vectors */
  ierr = VecCreateSeq(PETSC_COMM_SELF,N[levels-1],&x);CHKERRQ(ierr);
  ierr = PCMGSetX(pcmg,0,x);CHKERRQ(ierr); X[0] = x;
  ierr = VecCreateSeq(PETSC_COMM_SELF,N[levels-1],&x);CHKERRQ(ierr);
  ierr = PCMGSetRhs(pcmg,0,x);CHKERRQ(ierr); B[0] = x;

  /* create matrix multiply for finest level */
  ierr = MatCreateShell(PETSC_COMM_WORLD,N[0],N[0],N[0],N[0],(void*)0,&fmat);CHKERRQ(ierr);
  ierr = MatShellSetOperation(fmat,MATOP_MULT,(void (*)(void))amult);CHKERRQ(ierr);
  ierr = KSPSetOperators(kspmg,fmat,fmat);CHKERRQ(ierr);

  ierr = CalculateSolution(N[0],&solution);CHKERRQ(ierr);
  ierr = CalculateRhs(B[levels-1]);CHKERRQ(ierr);
  ierr = VecSet(X[levels-1],0.0);CHKERRQ(ierr);

  ierr = residual((Mat)0,B[levels-1],X[levels-1],R[levels-1]);CHKERRQ(ierr);
  ierr = CalculateError(solution,X[levels-1],R[levels-1],e);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_SELF,"l_2 error %g max error %g resi %g\n",(double)e[0],(double)e[1],(double)e[2]);CHKERRQ(ierr);

  ierr = KSPSolve(kspmg,B[levels-1],X[levels-1]);CHKERRQ(ierr);
  ierr = KSPGetIterationNumber(kspmg,&its);CHKERRQ(ierr);
  ierr = residual((Mat)0,B[levels-1],X[levels-1],R[levels-1]);CHKERRQ(ierr);
  ierr = CalculateError(solution,X[levels-1],R[levels-1],e);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_SELF,"its %D l_2 error %g max error %g resi %g\n",its,(double)e[0],(double)e[1],(double)e[2]);CHKERRQ(ierr);

  ierr = PetscFree(N);CHKERRQ(ierr);
  ierr = VecDestroy(&solution);CHKERRQ(ierr);

  /* note we have to keep a list of all vectors allocated, this is
     not ideal, but putting it in MGDestroy is not so good either*/
  for (i=0; i<levels; i++) {
    ierr = VecDestroy(&X[i]);CHKERRQ(ierr);
    ierr = VecDestroy(&B[i]);CHKERRQ(ierr);
    if (i) {ierr = VecDestroy(&R[i]);CHKERRQ(ierr);}
  }
  for (i=0; i<levels-1; i++) {
    ierr = MatDestroy(&mat[i]);CHKERRQ(ierr);
  }
  ierr = MatDestroy(&cmat);CHKERRQ(ierr);
  ierr = MatDestroy(&fmat);CHKERRQ(ierr);
  ierr = KSPDestroy(&kspmg);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Exemplo n.º 4
0
Arquivo: ml.c Projeto: Kun-Qu/petsc
PetscErrorCode PCSetUp_ML(PC pc)
{
  PetscErrorCode  ierr;
  PetscMPIInt     size;
  FineGridCtx     *PetscMLdata;
  ML              *ml_object;
  ML_Aggregate    *agg_object;
  ML_Operator     *mlmat;
  PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
  Mat             A,Aloc; 
  GridCtx         *gridctx; 
  PC_MG           *mg = (PC_MG*)pc->data;
  PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
  PetscBool       isSeq, isMPI;
  KSP             smoother;
  PC              subpc;
  PetscInt        mesh_level, old_mesh_level;

  PetscFunctionBegin;
  A = pc->pmat;
  ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);

  if (pc->setupcalled) {
    if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
      /*
       Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
       multiple solves in which the matrix is not changing too quickly.
       */
      ml_object = pc_ml->ml_object;
      gridctx = pc_ml->gridctx;
      Nlevels = pc_ml->Nlevels;
      fine_level = Nlevels - 1;
      gridctx[fine_level].A = A;

      ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
      ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
      if (isMPI){
        ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
      } else if (isSeq) {
        Aloc = A;
        ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
      } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

      ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
      PetscMLdata = pc_ml->PetscMLdata;
      ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
      PetscMLdata->A    = A;
      PetscMLdata->Aloc = Aloc;
      ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
      ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

      mesh_level = ml_object->ML_finest_level;
      while (ml_object->SingleLevel[mesh_level].Rmat->to) {
        old_mesh_level = mesh_level;
        mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;

        /* clean and regenerate A */
        mlmat = &(ml_object->Amat[mesh_level]);
        ML_Operator_Clean(mlmat);
        ML_Operator_Init(mlmat,ml_object->comm);
        ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
      }

      level = fine_level - 1;
      if (size == 1) { /* convert ML P, R and A into seqaij format */
        for (mllevel=1; mllevel<Nlevels; mllevel++){
          mlmat = &(ml_object->Amat[mllevel]);
          ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
          level--;
        }
      } else { /* convert ML P and R into shell format, ML A into mpiaij format */
        for (mllevel=1; mllevel<Nlevels; mllevel++){
          mlmat  = &(ml_object->Amat[mllevel]);
          ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
          level--;
        }
      }

      for (level=0; level<fine_level; level++) {
        if (level > 0){
          ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
        }
        ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
      }
      ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
      ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);

      ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
      PetscFunctionReturn(0);
    } else {
      /* since ML can change the size of vectors/matrices at any level we must destroy everything */
      ierr = PCReset_ML(pc);CHKERRQ(ierr);
      ierr = PCReset_MG(pc);CHKERRQ(ierr);
    }
  }

  /* setup special features of PCML */
  /*--------------------------------*/
  /* covert A to Aloc to be used by ML at fine grid */
  pc_ml->size = size;
  ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
  if (isMPI){ 
    ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
  } else if (isSeq) {
    Aloc = A;
    ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
  } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

  /* create and initialize struct 'PetscMLdata' */
  ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 
  pc_ml->PetscMLdata = PetscMLdata;
  ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 

  ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);   
  ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
  ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 

  ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 
  ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
  PetscMLdata->A    = A;
  PetscMLdata->Aloc = Aloc;
   
  /* create ML discretization matrix at fine grid */
  /* ML requires input of fine-grid matrix. It determines nlevels. */
  ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
  ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
  ML_Create(&ml_object,pc_ml->MaxNlevels);
  ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
  pc_ml->ml_object = ml_object;
  ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
  ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 
  ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

  ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);

  /* aggregation */
  ML_Aggregate_Create(&agg_object); 
  pc_ml->agg_object = agg_object;

  {
    MatNullSpace mnull;
    ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
    if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
      if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
      else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
      else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
    }
    switch (pc_ml->nulltype) {
    case PCML_NULLSPACE_USER: {
      PetscScalar *nullvec;
      const PetscScalar *v;
      PetscBool has_const;
      PetscInt i,j,mlocal,nvec,M;
      const Vec *vecs;
      if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
      ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
      ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
      ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
      ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr);
      if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
      for (i=0; i<nvec; i++) {
        ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
        for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
        ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
      }
      ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
      ierr = PetscFree(nullvec);CHKERRQ(ierr);
    } break;
    case PCML_NULLSPACE_BLOCK:
      ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
      break;
    case PCML_NULLSPACE_SCALAR:
      break;
    default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
    }
  }
  ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
  /* set options */
  switch (pc_ml->CoarsenScheme) { 
  case 1:  
    ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
  case 2:
    ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
  case 3:
    ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
  }
  ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 
  ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 
  if (pc_ml->SpectralNormScheme_Anorm){
    ML_Set_SpectralNormScheme_Anorm(ml_object);
  }
  agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
  agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
  agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
  agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
  agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
  agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;

  if (pc_ml->OldHierarchy) {
    Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
  } else {
    Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
  }
  if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
  pc_ml->Nlevels = Nlevels;
  fine_level = Nlevels - 1;

  ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 
  /* set default smoothers */
  for (level=1; level<=fine_level; level++){
    if (size == 1){
      ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
      ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
      ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
      ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
    } else {
      ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
      ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
      ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
      ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
    }
  }
  ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
  ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
  ierr = PetscOptionsEnd();CHKERRQ(ierr);

  ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
  pc_ml->gridctx = gridctx;

  /* wrap ML matrices by PETSc shell matrices at coarsened grids.
     Level 0 is the finest grid for ML, but coarsest for PETSc! */
  gridctx[fine_level].A = A;

  level = fine_level - 1;
  if (size == 1){ /* convert ML P, R and A into seqaij format */
    for (mllevel=1; mllevel<Nlevels; mllevel++){ 
      mlmat = &(ml_object->Pmat[mllevel]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
      mlmat = &(ml_object->Rmat[mllevel-1]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
      
      mlmat = &(ml_object->Amat[mllevel]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
      level--;
    }
  } else { /* convert ML P and R into shell format, ML A into mpiaij format */
    for (mllevel=1; mllevel<Nlevels; mllevel++){ 
      mlmat  = &(ml_object->Pmat[mllevel]);
      ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
      mlmat  = &(ml_object->Rmat[mllevel-1]);
      ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);

      mlmat  = &(ml_object->Amat[mllevel]);
      ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);  
      level--;
    }
  }

  /* create vectors and ksp at all levels */
  for (level=0; level<fine_level; level++){  
    level1 = level + 1;
    ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 
   
    ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 
    
    ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);

    if (level == 0){
      ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
    } else {
      ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
    }  
  }
  ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);

  /* create coarse level and the interpolation between the levels */
  for (level=0; level<fine_level; level++){  
    level1 = level + 1;
    ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
    ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);     
    if (level > 0){
      ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
    }    
    ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);      
  }  
  ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 
  ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);

  /* setupcalled is set to 0 so that MG is setup from scratch */
  pc->setupcalled = 0;  
  ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemplo n.º 5
0
PETSC_EXTERN void PETSC_STDCALL  pcmggetsmoother_(PC pc,PetscInt *l,KSP *ksp, int *__ierr ){
*__ierr = PCMGGetSmoother(
	(PC)PetscToPointer((pc) ),*l,ksp);
}
Exemplo n.º 6
0
int
main(int argc, char** argv) {
    int levels = 4;
    int mg_levels = 3;
    PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
    construct_operator(&problem, levels);

    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
    KSPSetOperators(ksp, problem.A, problem.A, SAME_PRECONDITIONER);

    KSPSetType(ksp, KSPRICHARDSON);
    
    if (1) {
	KSPGetPC(ksp, &pc);
	PCSetType(pc, PCMG);
	PCMGSetLevels(pc, mg_levels, NULL);
	PCMGSetGalerkin(pc);
	PCMGSetType(pc, PC_MG_MULTIPLICATIVE);
	PCMGSetCycleType(pc, PC_MG_CYCLE_V);
	int ii;
	for (ii=0; ii<mg_levels; ii++) {
	    if (ii == 0) {
		KSP smooth_ksp;
		PCMGGetSmoother(pc, ii, &smooth_ksp);
		KSPSetType(smooth_ksp, KSPPREONLY);
		PC smooth_pc;
		KSPGetPC(smooth_ksp, &smooth_pc);
		PCSetType(smooth_pc, PCLU);
	    } else {
		// set up the smoother.
		KSP smooth_ksp;
		PC smooth_pc;
		PCMGGetSmoother(pc, ii, &smooth_ksp);
		KSPSetType(smooth_ksp, KSPRICHARDSON);
		KSPRichardsonSetScale(smooth_ksp, 2./3.);
		KSPGetPC(smooth_ksp, &smooth_pc);
		PCSetType(smooth_pc, PCJACOBI);
		KSPSetTolerances(smooth_ksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 2);
	
		//set up the interpolation operator
		Mat prolongation;
		construct_prolongation_operator(ii+1+levels-mg_levels, &prolongation);
		PCMGSetInterpolation(pc, ii, prolongation);
		MatScale(prolongation, 1./2.);
		Mat restriction;
		MatTranspose(prolongation, &restriction);
		PCMGSetRestriction(pc, ii, prolongation);
		MatDestroy(prolongation);
		MatDestroy(restriction);
	    }
	}
    } else {
	KSPGetPC(ksp, &pc);
	PCSetType(pc, PCJACOBI);
    }
	//*/
    /*
    if (0) {
	KSPSetType(ksp, KSPRICHARDSON);
	KSPRichardsonSetScale(ksp, 2./3.);
	KSPGetPC(ksp, &pc);
	PCSetType(pc, PCJACOBI);
    } else {
	PetscOptionsInsertString("-ksp_type richardson");
	PetscOptionsInsertString("-ksp_richardson_scale 0.666666666666666666");
	PetscOptionsInsertString("-pc_type jacobi");
    }
    //*/

    KSPSetInitialGuessNonzero(ksp, PETSC_TRUE);
    KSPSetFromOptions(ksp);
    KSPSetUp(ksp);

    //VecView(problem.x, PETSC_VIEWER_STDOUT_WORLD);
    {
	//CHKERR(PCApply(pc, problem.b, problem.x));
	CHKERR(KSPSolve(ksp, problem.b, problem.x));

	KSPConvergedReason reason;
	CHKERR(KSPGetConvergedReason(ksp, &reason));
	printf("KSPConvergedReason: %d\n", reason);
	
	PetscInt its;
	CHKERR(KSPGetIterationNumber(ksp, &its));
	printf("Num iterations: %d\n", its);

    }
    //compute_residual_norm(&problem);

    VecView(problem.x, PETSC_VIEWER_STDOUT_WORLD);

    PetscFinalize();
    return 0;
}
Exemplo n.º 7
0
/*@C
    DMMGSetKSP - Sets the linear solver object that will use the grid hierarchy

    Collective on DMMG

    Input Parameter:
+   dmmg - the context
.   func - function to compute linear system matrix on each grid level
-   rhs - function to compute right hand side on each level (need only work on the finest grid
          if you do not use grid sequencing)

    Level: advanced

    Notes: For linear problems my be called more than once, reevaluates the matrices if it is called more
       than once. Call DMMGSolve() directly several times to solve with the same matrix but different 
       right hand sides.
   
.seealso DMMGCreate(), DMMGDestroy, DMMGSetDM(), DMMGSolve(), DMMGSetMatType()

@*/
PetscErrorCode PETSCSNES_DLLEXPORT DMMGSetKSP(DMMG *dmmg,PetscErrorCode (*rhs)(DMMG,Vec),PetscErrorCode (*func)(DMMG,Mat,Mat))
{
  PetscErrorCode ierr;
  PetscInt       i,nlevels = dmmg[0]->nlevels,level;
  PetscTruth     ismg,galerkin=PETSC_FALSE;
  PC             pc;
  KSP            lksp;
  
  PetscFunctionBegin;
  if (!dmmg) SETERRQ(PETSC_ERR_ARG_NULL,"Passing null as DMMG");

  if (!dmmg[0]->ksp) {
    /* create solvers for each level if they don't already exist*/
    for (i=0; i<nlevels; i++) {

      ierr = KSPCreate(dmmg[i]->comm,&dmmg[i]->ksp);CHKERRQ(ierr);
      ierr = PetscObjectIncrementTabLevel((PetscObject)dmmg[i]->ksp,PETSC_NULL,nlevels-i);CHKERRQ(ierr);
      ierr = KSPSetOptionsPrefix(dmmg[i]->ksp,dmmg[i]->prefix);CHKERRQ(ierr);
      ierr = DMMGSetUpLevel(dmmg,dmmg[i]->ksp,i+1);CHKERRQ(ierr);
      ierr = KSPSetFromOptions(dmmg[i]->ksp);CHKERRQ(ierr);

      /*  if the multigrid is being run with Galerkin then these matrices do not need to be created except on the finest level
          we do not take advantage of this because it may be that Galerkin has not yet been selected for the KSP object 
          These are also used if grid sequencing is selected for the linear problem. We should probably turn off grid sequencing
          for the linear problem */
      if (!dmmg[i]->B) {
	ierr = DMGetMatrix(dmmg[i]->dm,dmmg[nlevels-1]->mtype,&dmmg[i]->B);CHKERRQ(ierr);
      } 
      if (!dmmg[i]->J) {
	dmmg[i]->J = dmmg[i]->B;
	ierr = PetscObjectReference((PetscObject) dmmg[i]->J);CHKERRQ(ierr);
      }

      dmmg[i]->solve = DMMGSolveKSP;
      dmmg[i]->rhs   = rhs;
    }
  }

  /* evalute matrix on each level */
  ierr = KSPGetPC(dmmg[nlevels-1]->ksp,&pc);CHKERRQ(ierr);
  ierr = PetscTypeCompare((PetscObject)pc,PCMG,&ismg);CHKERRQ(ierr);
  if (ismg) {
    ierr = PCMGGetGalerkin(pc,&galerkin);CHKERRQ(ierr);
  }
  if (func) {
    if (galerkin) {
      ierr = (*func)(dmmg[nlevels-1],dmmg[nlevels-1]->J,dmmg[nlevels-1]->B);CHKERRQ(ierr);
    } else {
      for (i=0; i<nlevels; i++) {
        ierr = (*func)(dmmg[i],dmmg[i]->J,dmmg[i]->B);CHKERRQ(ierr);
      }
    }
  }

  for (i=0; i<nlevels-1; i++) {
    ierr = KSPSetOptionsPrefix(dmmg[i]->ksp,"dmmg_");CHKERRQ(ierr);
  }

  for (level=0; level<nlevels; level++) {
    ierr = KSPSetOperators(dmmg[level]->ksp,dmmg[level]->J,dmmg[level]->B,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
    ierr = KSPGetPC(dmmg[level]->ksp,&pc);CHKERRQ(ierr);
    if (ismg) {
      for (i=0; i<=level; i++) {
        ierr = PCMGGetSmoother(pc,i,&lksp);CHKERRQ(ierr); 
        ierr = KSPSetOperators(lksp,dmmg[i]->J,dmmg[i]->B,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
      }
    }
  }

  PetscFunctionReturn(0);
}
Exemplo n.º 8
0
void StgFEM_GMG_SolverSetup( void* _solver, void* _stokesSLE ) {
    StgFEM_GMG* self = StgFEM_GMG_selfPointer;
    Stokes_SLE_UzawaSolver* solver = (Stokes_SLE_UzawaSolver*)_solver;
    Stokes_SLE* sle = (Stokes_SLE*)_stokesSLE;
    KSP ksp = solver->velSolver;
    PC pc;
    int ii;

    Journal_DPrintf( solver->debug, "In %s:\n", __func__ );
    Stream_IndentBranch( StgFEM_Debug );

    KSPSetType( ksp, KSPFGMRES );
    KSPGetPC( ksp, &pc );
    PCSetType( pc, PCMG );
    PCMGSetLevels( pc, self->numLevels, PETSC_NULL );
    PCMGSetType( pc, PC_MG_MULTIPLICATIVE );
    #if ((PETSC_VERSION_MAJOR==3) && (PETSC_VERSION_MINOR>=2) )
    PCMGSetGalerkin( pc, PETSC_TRUE );
    #else
    PCMGSetGalerkin( pc );
    #endif
    /* Set the operators for each level. */
    {
        Mat *pOps, *rOps;
        MGOpGenerator_SetNumLevels( self->opGen, self->numLevels );
        MGOpGenerator_Generate( self->opGen, &pOps, &rOps );
        for( ii = 1; ii < self->numLevels; ii++ )
            PCMGSetInterpolation( pc, ii, pOps[ii] );
        Memory_Free( pOps );
        Memory_Free( rOps );
    }

    /* Set the solvers on each level. */
    for( ii = 1; ii < self->numLevels; ii++ ) {
        KSP smoother;
        PC smPc;
        PCMGGetSmoother( pc, ii, &smoother );
        KSPSetType( smoother, KSPRICHARDSON );
        KSPGetPC( smoother, &smPc );
        PCSetType( smPc, PCSOR );
        KSPSetTolerances( smoother, PETSC_DEFAULT, PETSC_DEFAULT,
                          PETSC_DEFAULT, 2 );
    }
    

    Stg_KSPSetOperators( ksp, sle->kStiffMat->matrix, sle->kStiffMat->matrix,
                     DIFFERENT_NONZERO_PATTERN );
    KSPSetFromOptions( ksp );

    /* Because we stole the setup routine we need to do this too. */
    if( solver->pcSolver ) {
        Journal_DPrintfL( solver->debug, 1,
                          "Setting up MatrixSolver for the "
                          "Preconditioner.\n" );
        Stg_KSPSetOperators( solver->pcSolver, solver->preconditioner->matrix,
                         solver->preconditioner->matrix,
                         DIFFERENT_NONZERO_PATTERN );
        KSPSetFromOptions( solver->pcSolver );
    }

    Stream_UnIndentBranch( StgFEM_Debug );
}