MGSolver_PETScData* MultigridSolver_CreateCoarseSolver( MultigridSolver* self, Mat matrix ) {
	//MatrixSolver*	coarseSolver;
	MGSolver_PETScData* courseSolver;
	unsigned	nProcs;
	PC		pc;

	MPI_Comm_size( self->mgData->comm, (int*)&nProcs );

	/*
	coarseSolver = (MatrixSolver*)PETScMatrixSolver_New( "" );
	PETScMatrixSolver_SetKSPType( coarseSolver, 
				      PETScMatrixSolver_KSPType_PreOnly );
	if( nProcs == 1 ) {
		PETScMatrixSolver_SetPCType( coarseSolver, 
					     PETScMatrixSolver_PCType_LU );
	}
	else {
		PETScMatrixSolver_SetPCType( coarseSolver, 
					     PETScMatrixSolver_PCType_RedundantLU );
	}
	MatrixSolver_SetMatrix( coarseSolver, matrix );
	*/
	
	if( nProcs == 1 ){
	    KSPCreate( MPI_COMM_WORLD, &courseSolver->ksp );
	    KSPSetType( courseSolver->ksp, KSPPREONLY );
	    KSPGetPC( courseSolver->ksp, &pc );
	    PCSetType( pc, PCLU );
	}
	else {
		PCSetType( pc, PCREDUNDANT );
		#if ((PETSC_VERSION_MAJOR>=3) && (PETSC_VERSION_MINOR>=2) )
		PCCreate( MPI_COMM_WORLD, &pc);
		PCRedundantGetKSP( pc, &courseSolver->ksp );
		#else
		KSPCreate( MPI_COMM_WORLD, &courseSolver->ksp );
		KSPSetType( courseSolver->ksp, KSPPREONLY );
		KSPGetPC( courseSolver->ksp, &pc );
		PCRedundantGetPC( pc, &pc );
                #endif
		PCSetType( pc, PCLU );
	}
        if( courseSolver->matrix != PETSC_NULL ){
	    Stg_MatDestroy(&courseSolver->matrix );}

	courseSolver->matrix = matrix;
	Stg_KSPSetOperators( courseSolver->ksp, matrix, matrix, DIFFERENT_NONZERO_PATTERN );

	return courseSolver;
}
Exemplo n.º 2
0
Arquivo: ex28.c Projeto: 00liujj/petsc
int main(int argc,char **args)
{
  Vec            x, b, u;     /* approx solution, RHS, exact solution */
  Mat            A;           /* linear system matrix */
  KSP            ksp;         /* linear solver context */
  PC             pc;          /* preconditioner context */
  PetscReal      norm;        /* norm of solution error */
  PetscErrorCode ierr;
  PetscInt       i,n = 10,col[3],its,rstart,rend,nlocal;
  PetscScalar    neg_one = -1.0,one = 1.0,value[3];
  PetscBool      TEST_PROCEDURAL=PETSC_FALSE;

  PetscInitialize(&argc,&args,(char*)0,help);
  ierr = PetscOptionsGetInt(NULL,"-n",&n,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetBool(NULL,"-procedural",&TEST_PROCEDURAL,NULL);CHKERRQ(ierr);

  /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
         Compute the matrix and right-hand-side vector that define
         the linear system, Ax = b.
     - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

  /*
     Create vectors.  Note that we form 1 vector from scratch and
     then duplicate as needed. For this simple case let PETSc decide how
     many elements of the vector are stored on each processor. The second
     argument to VecSetSizes() below causes PETSc to decide.
  */
  ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr);
  ierr = VecSetSizes(x,PETSC_DECIDE,n);CHKERRQ(ierr);
  ierr = VecSetFromOptions(x);CHKERRQ(ierr);
  ierr = VecDuplicate(x,&b);CHKERRQ(ierr);
  ierr = VecDuplicate(x,&u);CHKERRQ(ierr);

  /* Identify the starting and ending mesh points on each
     processor for the interior part of the mesh. We let PETSc decide
     above. */

  ierr = VecGetOwnershipRange(x,&rstart,&rend);CHKERRQ(ierr);
  ierr = VecGetLocalSize(x,&nlocal);CHKERRQ(ierr);

  /* Create a tridiagonal matrix. See ../tutorials/ex23.c */
  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,nlocal,nlocal,n,n);CHKERRQ(ierr);
  ierr = MatSetFromOptions(A);CHKERRQ(ierr);
  ierr = MatSetUp(A);CHKERRQ(ierr);
  /* Assemble matrix */
  if (!rstart) {
    rstart = 1;
    i      = 0; col[0] = 0; col[1] = 1; value[0] = 2.0; value[1] = -1.0;
    ierr   = MatSetValues(A,1,&i,2,col,value,INSERT_VALUES);CHKERRQ(ierr);
  }
  if (rend == n) {
    rend = n-1;
    i    = n-1; col[0] = n-2; col[1] = n-1; value[0] = -1.0; value[1] = 2.0;
    ierr = MatSetValues(A,1,&i,2,col,value,INSERT_VALUES);CHKERRQ(ierr);
  }

  /* Set entries corresponding to the mesh interior */
  value[0] = -1.0; value[1] = 2.0; value[2] = -1.0;
  for (i=rstart; i<rend; i++) {
    col[0] = i-1; col[1] = i; col[2] = i+1;
    ierr   = MatSetValues(A,1,&i,3,col,value,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Set exact solution; then compute right-hand-side vector. */
  ierr = VecSet(u,one);CHKERRQ(ierr);
  ierr = MatMult(A,u,b);CHKERRQ(ierr);

  /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                Create the linear solver and set various options
     - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
  ierr = KSPCreate(PETSC_COMM_WORLD,&ksp);CHKERRQ(ierr);
  ierr = KSPSetOperators(ksp,A,A);CHKERRQ(ierr);

  /*
     Set linear solver defaults for this problem (optional).
     - By extracting the KSP and PC contexts from the KSP context,
       we can then directly call any KSP and PC routines to set
       various options.
     - The following statements are optional; all of these
       parameters could alternatively be specified at runtime via
       KSPSetFromOptions();
  */
  if (TEST_PROCEDURAL) {
    /* Example of runtime options: '-pc_redundant_number 3 -redundant_ksp_type gmres -redundant_pc_type bjacobi' */
    PetscMPIInt size,rank,subsize;
    Mat         A_redundant;
    KSP         innerksp;
    PC          innerpc;
    MPI_Comm    subcomm;

    ierr = KSPGetPC(ksp,&pc);CHKERRQ(ierr);
    ierr = PCSetType(pc,PCREDUNDANT);CHKERRQ(ierr);
    ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
    ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
    if (size < 3) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ, "Num of processes %d must greater than 2",size);
    ierr = PCRedundantSetNumber(pc,size-2);CHKERRQ(ierr);
    ierr = KSPSetFromOptions(ksp);CHKERRQ(ierr);

    /* Get subcommunicator and redundant matrix */
    ierr = KSPSetUp(ksp);CHKERRQ(ierr);
    ierr = PCRedundantGetKSP(pc,&innerksp);CHKERRQ(ierr);
    ierr = KSPGetPC(innerksp,&innerpc);CHKERRQ(ierr);
    ierr = PCGetOperators(innerpc,NULL,&A_redundant);CHKERRQ(ierr);
    ierr = PetscObjectGetComm((PetscObject)A_redundant,&subcomm);CHKERRQ(ierr); 
    ierr = MPI_Comm_size(subcomm,&subsize);CHKERRQ(ierr);
    if (subsize==1 && !rank) {
      printf("A_redundant:\n");
      ierr = MatView(A_redundant,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
    }
  } else {
    ierr = KSPSetFromOptions(ksp);CHKERRQ(ierr);
  }
  
  /*  Solve linear system */
  ierr = KSPSolve(ksp,b,x);CHKERRQ(ierr);

  /* Check the error */
  ierr = VecAXPY(x,neg_one,u);CHKERRQ(ierr);
  ierr = VecNorm(x,NORM_2,&norm);CHKERRQ(ierr);
  ierr = KSPGetIterationNumber(ksp,&its);CHKERRQ(ierr);
  if (norm > 1.e-14) {
    ierr = PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g, Iterations %D\n",(double)norm,its);CHKERRQ(ierr);
  }

  /* Free work space. */
  ierr = VecDestroy(&x);CHKERRQ(ierr); ierr = VecDestroy(&u);CHKERRQ(ierr);
  ierr = VecDestroy(&b);CHKERRQ(ierr); ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = KSPDestroy(&ksp);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Exemplo n.º 3
0
static PetscErrorCode PCSetUp_Redundant(PC pc)
{
  PC_Redundant   *red = (PC_Redundant*)pc->data;
  PetscErrorCode ierr;
  PetscInt       mstart,mend,mlocal,M;
  PetscMPIInt    size;
  MPI_Comm       comm,subcomm;
  Vec            x;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);

  /* if pmatrix set by user is sequential then we do not need to gather the parallel matrix */
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  if (size == 1) red->useparallelmat = PETSC_FALSE;

  if (!pc->setupcalled) {
    PetscInt mloc_sub;
    if (!red->psubcomm) { /* create red->psubcomm, new ksp and pc over subcomm */
      KSP ksp;
      ierr = PCRedundantGetKSP(pc,&ksp);CHKERRQ(ierr);
    }
    subcomm = PetscSubcommChild(red->psubcomm);

    if (red->useparallelmat) {
      /* grab the parallel matrix and put it into processors of a subcomminicator */
      ierr = MatCreateRedundantMatrix(pc->pmat,red->psubcomm->n,subcomm,MAT_INITIAL_MATRIX,&red->pmats);CHKERRQ(ierr);

      ierr = MPI_Comm_size(subcomm,&size);CHKERRQ(ierr);
      if (size > 1) {
        PetscBool foundpack;
        ierr = MatGetFactorAvailable(red->pmats,NULL,MAT_FACTOR_LU,&foundpack);CHKERRQ(ierr);
        if (!foundpack) { /* reset default ksp and pc */
          ierr = KSPSetType(red->ksp,KSPGMRES);CHKERRQ(ierr);
          ierr = PCSetType(red->pc,PCBJACOBI);CHKERRQ(ierr);
        } else {
          ierr = PCFactorSetMatSolverType(red->pc,NULL);CHKERRQ(ierr);
        }
      }

      ierr = KSPSetOperators(red->ksp,red->pmats,red->pmats);CHKERRQ(ierr);

      /* get working vectors xsub and ysub */
      ierr = MatCreateVecs(red->pmats,&red->xsub,&red->ysub);CHKERRQ(ierr);

      /* create working vectors xdup and ydup.
       xdup concatenates all xsub's contigously to form a mpi vector over dupcomm  (see PetscSubcommCreate_interlaced())
       ydup concatenates all ysub and has empty local arrays because ysub's arrays will be place into it.
       Note: we use communicator dupcomm, not PetscObjectComm((PetscObject)pc)! */
      ierr = MatGetLocalSize(red->pmats,&mloc_sub,NULL);CHKERRQ(ierr);
      ierr = VecCreateMPI(PetscSubcommContiguousParent(red->psubcomm),mloc_sub,PETSC_DECIDE,&red->xdup);CHKERRQ(ierr);
      ierr = VecCreateMPIWithArray(PetscSubcommContiguousParent(red->psubcomm),1,mloc_sub,PETSC_DECIDE,NULL,&red->ydup);CHKERRQ(ierr);

      /* create vecscatters */
      if (!red->scatterin) { /* efficiency of scatterin is independent from psubcomm_type! */
        IS       is1,is2;
        PetscInt *idx1,*idx2,i,j,k;

        ierr = MatCreateVecs(pc->pmat,&x,0);CHKERRQ(ierr);
        ierr = VecGetSize(x,&M);CHKERRQ(ierr);
        ierr = VecGetOwnershipRange(x,&mstart,&mend);CHKERRQ(ierr);
        mlocal = mend - mstart;
        ierr = PetscMalloc2(red->psubcomm->n*mlocal,&idx1,red->psubcomm->n*mlocal,&idx2);CHKERRQ(ierr);
        j    = 0;
        for (k=0; k<red->psubcomm->n; k++) {
          for (i=mstart; i<mend; i++) {
            idx1[j]   = i;
            idx2[j++] = i + M*k;
          }
        }
        ierr = ISCreateGeneral(comm,red->psubcomm->n*mlocal,idx1,PETSC_COPY_VALUES,&is1);CHKERRQ(ierr);
        ierr = ISCreateGeneral(comm,red->psubcomm->n*mlocal,idx2,PETSC_COPY_VALUES,&is2);CHKERRQ(ierr);
        ierr = VecScatterCreateWithData(x,is1,red->xdup,is2,&red->scatterin);CHKERRQ(ierr);
        ierr = ISDestroy(&is1);CHKERRQ(ierr);
        ierr = ISDestroy(&is2);CHKERRQ(ierr);

        /* Impl below is good for PETSC_SUBCOMM_INTERLACED (no inter-process communication) and PETSC_SUBCOMM_CONTIGUOUS (communication within subcomm) */
        ierr = ISCreateStride(comm,mlocal,mstart+ red->psubcomm->color*M,1,&is1);CHKERRQ(ierr);
        ierr = ISCreateStride(comm,mlocal,mstart,1,&is2);CHKERRQ(ierr);
        ierr = VecScatterCreateWithData(red->xdup,is1,x,is2,&red->scatterout);CHKERRQ(ierr);
        ierr = ISDestroy(&is1);CHKERRQ(ierr);
        ierr = ISDestroy(&is2);CHKERRQ(ierr);
        ierr = PetscFree2(idx1,idx2);CHKERRQ(ierr);
        ierr = VecDestroy(&x);CHKERRQ(ierr);
      }
    } else { /* !red->useparallelmat */
      ierr = KSPSetOperators(red->ksp,pc->mat,pc->pmat);CHKERRQ(ierr);
    }
  } else { /* pc->setupcalled */
    if (red->useparallelmat) {
      MatReuse       reuse;
      /* grab the parallel matrix and put it into processors of a subcomminicator */
      /*--------------------------------------------------------------------------*/
      if (pc->flag == DIFFERENT_NONZERO_PATTERN) {
        /* destroy old matrices */
        ierr  = MatDestroy(&red->pmats);CHKERRQ(ierr);
        reuse = MAT_INITIAL_MATRIX;
      } else {
        reuse = MAT_REUSE_MATRIX;
      }
      ierr = MatCreateRedundantMatrix(pc->pmat,red->psubcomm->n,PetscSubcommChild(red->psubcomm),reuse,&red->pmats);CHKERRQ(ierr);
      ierr = KSPSetOperators(red->ksp,red->pmats,red->pmats);CHKERRQ(ierr);
    } else { /* !red->useparallelmat */
      ierr = KSPSetOperators(red->ksp,pc->mat,pc->pmat);CHKERRQ(ierr);
    }
  }

  if (pc->setfromoptionscalled) {
    ierr = KSPSetFromOptions(red->ksp);CHKERRQ(ierr);
  }
  ierr = KSPSetUp(red->ksp);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemplo n.º 4
0
/*@C
   PCMGSetLevels - Sets the number of levels to use with MG.
   Must be called before any other MG routine.

   Logically Collective on PC

   Input Parameters:
+  pc - the preconditioner context
.  levels - the number of levels
-  comms - optional communicators for each level; this is to allow solving the coarser problems
           on smaller sets of processors. Use NULL_OBJECT for default in Fortran

   Level: intermediate

   Notes:
     If the number of levels is one then the multigrid uses the -mg_levels prefix
  for setting the level options rather than the -mg_coarse prefix.

.keywords: MG, set, levels, multigrid

.seealso: PCMGSetType(), PCMGGetLevels()
@*/
PetscErrorCode  PCMGSetLevels(PC pc,PetscInt levels,MPI_Comm *comms)
{
    PetscErrorCode ierr;
    PC_MG          *mg        = (PC_MG*)pc->data;
    MPI_Comm       comm;
    PC_MG_Levels   **mglevels = mg->levels;
    PetscInt       i;
    PetscMPIInt    size;
    const char     *prefix;
    PC             ipc;
    PetscInt       n;

    PetscFunctionBegin;
    PetscValidHeaderSpecific(pc,PC_CLASSID,1);
    PetscValidLogicalCollectiveInt(pc,levels,2);
    ierr = PetscObjectGetComm((PetscObject)pc,&comm);
    CHKERRQ(ierr);
    if (mg->nlevels == levels) PetscFunctionReturn(0);
    if (mglevels) {
        /* changing the number of levels so free up the previous stuff */
        ierr = PCReset_MG(pc);
        CHKERRQ(ierr);
        n    = mglevels[0]->levels;
        for (i=0; i<n; i++) {
            if (mglevels[i]->smoothd != mglevels[i]->smoothu) {
                ierr = KSPDestroy(&mglevels[i]->smoothd);
                CHKERRQ(ierr);
            }
            ierr = KSPDestroy(&mglevels[i]->smoothu);
            CHKERRQ(ierr);
            ierr = PetscFree(mglevels[i]);
            CHKERRQ(ierr);
        }
        ierr = PetscFree(mg->levels);
        CHKERRQ(ierr);
    }

    mg->nlevels = levels;

    ierr = PetscMalloc(levels*sizeof(PC_MG*),&mglevels);
    CHKERRQ(ierr);
    ierr = PetscLogObjectMemory(pc,levels*(sizeof(PC_MG*)));
    CHKERRQ(ierr);

    ierr = PCGetOptionsPrefix(pc,&prefix);
    CHKERRQ(ierr);

    mg->stageApply = 0;
    for (i=0; i<levels; i++) {
        ierr = PetscNewLog(pc,PC_MG_Levels,&mglevels[i]);
        CHKERRQ(ierr);

        mglevels[i]->level               = i;
        mglevels[i]->levels              = levels;
        mglevels[i]->cycles              = PC_MG_CYCLE_V;
        mg->default_smoothu              = 2;
        mg->default_smoothd              = 2;
        mglevels[i]->eventsmoothsetup    = 0;
        mglevels[i]->eventsmoothsolve    = 0;
        mglevels[i]->eventresidual       = 0;
        mglevels[i]->eventinterprestrict = 0;

        if (comms) comm = comms[i];
        ierr = KSPCreate(comm,&mglevels[i]->smoothd);
        CHKERRQ(ierr);
        ierr = KSPSetType(mglevels[i]->smoothd,KSPCHEBYSHEV);
        CHKERRQ(ierr);
        ierr = KSPSetConvergenceTest(mglevels[i]->smoothd,KSPSkipConverged,NULL,NULL);
        CHKERRQ(ierr);
        ierr = KSPSetNormType(mglevels[i]->smoothd,KSP_NORM_NONE);
        CHKERRQ(ierr);
        ierr = KSPGetPC(mglevels[i]->smoothd,&ipc);
        CHKERRQ(ierr);
        ierr = PCSetType(ipc,PCSOR);
        CHKERRQ(ierr);
        ierr = PetscObjectIncrementTabLevel((PetscObject)mglevels[i]->smoothd,(PetscObject)pc,levels-i);
        CHKERRQ(ierr);
        ierr = KSPSetTolerances(mglevels[i]->smoothd,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT, i ? mg->default_smoothd : 1);
        CHKERRQ(ierr);
        ierr = KSPSetOptionsPrefix(mglevels[i]->smoothd,prefix);
        CHKERRQ(ierr);

        /* do special stuff for coarse grid */
        if (!i && levels > 1) {
            ierr = KSPAppendOptionsPrefix(mglevels[0]->smoothd,"mg_coarse_");
            CHKERRQ(ierr);

            /* coarse solve is (redundant) LU by default; set shifttype NONZERO to avoid annoying zero-pivot in LU preconditioner */
            ierr = KSPSetType(mglevels[0]->smoothd,KSPPREONLY);
            CHKERRQ(ierr);
            ierr = KSPGetPC(mglevels[0]->smoothd,&ipc);
            CHKERRQ(ierr);
            ierr = MPI_Comm_size(comm,&size);
            CHKERRQ(ierr);
            if (size > 1) {
                KSP innerksp;
                PC  innerpc;
                ierr = PCSetType(ipc,PCREDUNDANT);
                CHKERRQ(ierr);
                ierr = PCRedundantGetKSP(ipc,&innerksp);
                CHKERRQ(ierr);
                ierr = KSPGetPC(innerksp,&innerpc);
                CHKERRQ(ierr);
                ierr = PCFactorSetShiftType(innerpc,MAT_SHIFT_INBLOCKS);
                CHKERRQ(ierr);
            } else {
                ierr = PCSetType(ipc,PCLU);
                CHKERRQ(ierr);
                ierr = PCFactorSetShiftType(ipc,MAT_SHIFT_INBLOCKS);
                CHKERRQ(ierr);
            }
        } else {
            char tprefix[128];
            sprintf(tprefix,"mg_levels_%d_",(int)i);
            ierr = KSPAppendOptionsPrefix(mglevels[i]->smoothd,tprefix);
            CHKERRQ(ierr);
        }
        ierr = PetscLogObjectParent(pc,mglevels[i]->smoothd);
        CHKERRQ(ierr);

        mglevels[i]->smoothu = mglevels[i]->smoothd;
        mg->rtol             = 0.0;
        mg->abstol           = 0.0;
        mg->dtol             = 0.0;
        mg->ttol             = 0.0;
        mg->cyclesperpcapply = 1;
    }
    mg->am                   = PC_MG_MULTIPLICATIVE;
    mg->levels               = mglevels;
    pc->ops->applyrichardson = PCApplyRichardson_MG;
    PetscFunctionReturn(0);
}
Exemplo n.º 5
0
void PETSC_STDCALL  pcredundantgetksp_(PC pc,KSP *innerksp, int *__ierr ){
*__ierr = PCRedundantGetKSP(
	(PC)PetscToPointer((pc) ),innerksp);
}
Exemplo n.º 6
0
Arquivo: nn.c Projeto: Kun-Qu/petsc
EXTERN_C_END


/* -------------------------------------------------------------------------- */
/*
   PCNNCreateCoarseMatrix - 
*/
#undef __FUNCT__  
#define __FUNCT__ "PCNNCreateCoarseMatrix"
PetscErrorCode PCNNCreateCoarseMatrix (PC pc)
{
  MPI_Request    *send_request, *recv_request;
  PetscErrorCode ierr;
  PetscInt       i, j, k;
  PetscScalar*   mat;    /* Sub-matrix with this subdomain's contribution to the coarse matrix             */
  PetscScalar**  DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */

  /* aliasing some names */
  PC_IS*         pcis     = (PC_IS*)(pc->data);
  PC_NN*         pcnn     = (PC_NN*)pc->data;
  PetscInt       n_neigh  = pcis->n_neigh;
  PetscInt*      neigh    = pcis->neigh;
  PetscInt*      n_shared = pcis->n_shared;
  PetscInt**     shared   = pcis->shared;  
  PetscScalar**  DZ_IN;   /* Must be initialized after memory allocation. */

  PetscFunctionBegin;
  /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */
  ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr);

  /* Allocate memory for DZ */
  /* Notice that DZ_OUT[0] is allocated some space that is never used. */
  /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */
  {
    PetscInt size_of_Z = 0;
    ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr);
    DZ_IN = pcnn->DZ_IN;
    ierr  = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr);
    for (i=0; i<n_neigh; i++) {
      size_of_Z += n_shared[i];
    }
    ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr);
    ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr);
  }
  for (i=1; i<n_neigh; i++) {
    DZ_IN[i]  = DZ_IN [i-1] + n_shared[i-1];
    DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1];
  }

  /* Set the values of DZ_OUT, in order to send this info to the neighbours */
  /* First, set the auxiliary array pcis->work_N. */
  ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr);
  for (i=1; i<n_neigh; i++){
    for (j=0; j<n_shared[i]; j++) {
      DZ_OUT[i][j] = pcis->work_N[shared[i][j]];
    }
  }

  /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */
  /* Notice that send_request[] and recv_request[] could have one less element. */
  /* We make them longer to have request[i] corresponding to neigh[i].          */
  {
    PetscMPIInt tag;
    ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr);
    ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr);
    recv_request = send_request + (n_neigh);
    for (i=1; i<n_neigh; i++) {
      ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(send_request[i]));CHKERRQ(ierr);
      ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(recv_request[i]));CHKERRQ(ierr);
    }
  }

  /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */
  for(j=0; j<n_shared[0]; j++) {
    DZ_IN[0][j] = pcis->work_N[shared[0][j]];
  }

  /* Start computing with local D*Z while communication goes on.    */
  /* Apply Schur complement. The result is "stored" in vec (more    */
  /* precisely, vec points to the result, stored in pc_nn->vec1_B)  */
  /* and also scattered to pcnn->work_N.                            */
  ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B,
                               pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);

  /* Compute the first column, while completing the receiving. */
  for (i=0; i<n_neigh; i++) {
    MPI_Status  stat;
    PetscMPIInt ind=0;
    if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;}
    mat[ind*n_neigh+0] = 0.0;
    for (k=0; k<n_shared[ind]; k++) {
      mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]];
    }
  }

  /* Compute the remaining of the columns */
  for (j=1; j<n_neigh; j++) {
    ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B,
                                 pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr);
    for (i=0; i<n_neigh; i++) {
      mat[i*n_neigh+j] = 0.0;
      for (k=0; k<n_shared[i]; k++) {
	mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]];
      }
    }
  }

  /* Complete the sending. */
  if (n_neigh>1) {
    MPI_Status *stat;
    ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr);
    if (n_neigh-1) {ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);}
    ierr = PetscFree(stat);CHKERRQ(ierr);
  }

  /* Free the memory for the MPI requests */
  ierr = PetscFree(send_request);CHKERRQ(ierr);

  /* Free the memory for DZ_OUT */
  if (DZ_OUT) {
    ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); 
    ierr = PetscFree(DZ_OUT);CHKERRQ(ierr);
  }

  {
    PetscMPIInt size;
    ierr = MPI_Comm_size(((PetscObject)pc)->comm,&size);CHKERRQ(ierr);
    /* Create the global coarse vectors (rhs and solution). */
    ierr = VecCreateMPI(((PetscObject)pc)->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr);
    ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr);
    /* Create and set the global coarse AIJ matrix. */
    ierr = MatCreate(((PetscObject)pc)->comm,&(pcnn->coarse_mat));CHKERRQ(ierr);
    ierr = MatSetSizes(pcnn->coarse_mat,1,1,size,size);CHKERRQ(ierr);
    ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr);
    ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL);CHKERRQ(ierr);
    ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr);
    ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr);
    ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
    ierr = MatAssemblyEnd  (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  }

  {
    PetscMPIInt rank;
    PetscScalar one = 1.0; 
    ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
    /* "Zero out" rows of not-purely-Neumann subdomains */
    if (pcis->pure_neumann) {  /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */
      ierr = MatZeroRows(pcnn->coarse_mat,0,PETSC_NULL,one,0,0);CHKERRQ(ierr);
    } else { /* here it DOES zero the row, since it's not a floating subdomain. */
      PetscInt row = (PetscInt) rank;
      ierr = MatZeroRows(pcnn->coarse_mat,1,&row,one,0,0);CHKERRQ(ierr);
    }
  }

  /* Create the coarse linear solver context */
  {
    PC  pc_ctx, inner_pc;
    KSP inner_ksp;

    ierr = KSPCreate(((PetscObject)pc)->comm,&pcnn->ksp_coarse);CHKERRQ(ierr);
    ierr = PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2);CHKERRQ(ierr);
    ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr);
    ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr);
    ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr);                
    ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr);               
    ierr = PCRedundantGetKSP(pc_ctx,&inner_ksp);CHKERRQ(ierr);           
    ierr = KSPGetPC(inner_ksp,&inner_pc);CHKERRQ(ierr);           
    ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr);                     
    ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr);
    ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr);
    /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */
    ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr);
  }

  /* Free the memory for mat */
  ierr = PetscFree(mat);CHKERRQ(ierr);

  /* for DEBUGGING, save the coarse matrix to a file. */
  {
    PetscBool  flg = PETSC_FALSE;
    ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_save_coarse_matrix",&flg,PETSC_NULL);CHKERRQ(ierr);
    if (flg) {
      PetscViewer viewer;
      ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr);
      ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr);
      ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr);
      ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
    }
  }

  /*  Set the variable pcnn->factor_coarse_rhs. */
  pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0;

  /* See historical note 02, at the bottom of this file. */
  PetscFunctionReturn(0);
}