Ejemplo n.º 1
0
int main(int argc,char **argv)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank;
  PetscInt       n = 5;
  char           *output;
  Vec            x;

  PetscInitialize(&argc,&argv,(char *)0,0);

  ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr);
  ierr = VecSetSizes(x,PETSC_DECIDE,n);CHKERRQ(ierr);
  ierr = VecSetFromOptions(x);CHKERRQ(ierr);

  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
  ierr = PetscMatlabEngineGetOutput(PETSC_MATLAB_ENGINE_WORLD,&output);
  ierr = PetscMatlabEngineEvaluate(PETSC_MATLAB_ENGINE_WORLD,"MPI_Comm_rank");
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]Processor rank is %s",rank,output);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);

  ierr = PetscObjectSetName((PetscObject)x,"x");CHKERRQ(ierr);
  ierr = PetscMatlabEnginePut(PETSC_MATLAB_ENGINE_WORLD,(PetscObject)x);CHKERRQ(ierr);
  ierr = PetscMatlabEngineEvaluate(PETSC_MATLAB_ENGINE_WORLD,"x = x + MPI_Comm_rank;\n");
  ierr = PetscMatlabEngineGet(PETSC_MATLAB_ENGINE_WORLD,(PetscObject)x);CHKERRQ(ierr);

  ierr = PetscMatlabEngineEvaluate(PETSC_MATLAB_ENGINE_WORLD,"whos\n");
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]The result is %s",rank,output);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);

  ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = VecDestroy(x);CHKERRQ(ierr);
  ierr = PetscFinalize();CHKERRQ(ierr);
  return 0;
}
Ejemplo n.º 2
0
Archivo: ex16.c Proyecto: 00liujj/petsc
int main(int argc,char **argv)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank;
  char           buffer[256],*output,user[256];
  PetscBool      userhappy = PETSC_FALSE;

  PetscInitialize(&argc,&argv,(char*)0,help);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);

  ierr = PetscMatlabEngineGetOutput(PETSC_MATLAB_ENGINE_(PETSC_COMM_WORLD),&output);CHKERRQ(ierr);

  ierr = PetscMatlabEngineEvaluate(PETSC_MATLAB_ENGINE_(PETSC_COMM_WORLD),"MPI_Comm_rank");CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]Processor rank is %s",rank,output);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,">>");CHKERRQ(ierr);
  ierr = PetscSynchronizedFGets(PETSC_COMM_WORLD,stdin,256,user);CHKERRQ(ierr);
  ierr = PetscStrncmp(user,"exit",4,&userhappy);CHKERRQ(ierr);
  while (!userhappy) {
    ierr = PetscMatlabEngineEvaluate(PETSC_MATLAB_ENGINE_(PETSC_COMM_WORLD),user);CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]The result is %s",rank,output);CHKERRQ(ierr);
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_WORLD,">>");CHKERRQ(ierr);
    ierr = PetscSynchronizedFGets(PETSC_COMM_WORLD,stdin,256,user);CHKERRQ(ierr);
    ierr = PetscStrncmp(user,"exit",4,&userhappy);CHKERRQ(ierr);
  }
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 3
0
Archivo: ex6.c Proyecto: hansec/petsc
/* Simple test to check that the ghost points are properly updated */
PetscErrorCode FATest(FA fa)
{
  PetscErrorCode ierr;
  Vec            l,g;
  Field          **la;
  PetscInt       x,y,m,n,j,i,k,p;
  PetscMPIInt    rank;

  PetscFunctionBeginUser;
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  ierr = FAGetGlobalVector(fa,&g);CHKERRQ(ierr);
  ierr = FAGetLocalVector(fa,&l);CHKERRQ(ierr);

  /* fill up global vector of one region at a time with ITS logical coordinates, then update LOCAL
     vector; print local vectors to confirm they are correctly filled */
  for (j=0; j<3; j++) {
    ierr = VecSet(g,0.0);CHKERRQ(ierr);
    ierr = FAGetGlobalCorners(fa,j,&x,&y,&m,&n);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_WORLD,"\nFilling global region %d, showing local results \n",j+1);CHKERRQ(ierr);
    ierr = FAGetGlobalArray(fa,g,j,&la);CHKERRQ(ierr);
    for (k=y; k<y+n; k++) {
      for (i=x; i<x+m; i++) {
        la[k][i].X = i;
        la[k][i].Y = k;
      }
    }
    ierr = FARestoreGlobalArray(fa,g,j,&la);CHKERRQ(ierr);

    ierr = FAGlobalToLocal(fa,g,l);CHKERRQ(ierr);
    ierr = DrawFA(fa,g);CHKERRQ(ierr);
    ierr = DrawFA(fa,l);CHKERRQ(ierr);

    for (p=0; p<3; p++) {
      ierr = FAGetLocalCorners(fa,p,&x,&y,&m,&n);CHKERRQ(ierr);
      ierr = FAGetLocalArray(fa,l,p,&la);CHKERRQ(ierr);
      ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\n[%d] Local array for region %d \n",rank,p+1);CHKERRQ(ierr);
      for (k=y+n-1; k>=y; k--) { /* print in reverse order to match diagram in paper */
        for (i=x; i<x+m; i++) {
          ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"(%G,%G) ",la[k][i].X,la[k][i].Y);CHKERRQ(ierr);
        }
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\n");CHKERRQ(ierr);
      }
      ierr = FARestoreLocalArray(fa,l,p,&la);CHKERRQ(ierr);
      ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);
    }
  }
  ierr = VecDestroy(&g);CHKERRQ(ierr);
  ierr = VecDestroy(&l);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Ejemplo n.º 4
0
Archivo: ex2.c Proyecto: petsc/petsc
static PetscErrorCode CreatePoints_Grid(DM dm, PetscInt *Np, PetscReal **pcoords, PetscBool *pointsAllProcs, AppCtx *ctx)
{
  DM             da;
  DMDALocalInfo  info;
  PetscInt       N = 3, n = 0, spaceDim, i, j, k, *ind, d;
  PetscReal      *h;
  PetscMPIInt    rank;
  PetscErrorCode ierr;

  ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
  ierr = DMGetCoordinateDim(dm, &spaceDim);CHKERRQ(ierr);
  ierr = PetscCalloc1(spaceDim,&ind);CHKERRQ(ierr);
  ierr = PetscCalloc1(spaceDim,&h);CHKERRQ(ierr);
  h[0] = 1.0/(N-1); h[1] = 1.0/(N-1); h[2] = 1.0/(N-1);
  ierr = DMDACreate(PetscObjectComm((PetscObject) dm), &da);CHKERRQ(ierr);
  ierr = DMSetDimension(da, ctx->dim);CHKERRQ(ierr);
  ierr = DMDASetSizes(da, N, N, N);CHKERRQ(ierr);
  ierr = DMDASetDof(da, 1);CHKERRQ(ierr);
  ierr = DMDASetStencilWidth(da, 1);CHKERRQ(ierr);
  ierr = DMSetUp(da);CHKERRQ(ierr);
  ierr = DMDASetUniformCoordinates(da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);CHKERRQ(ierr);
  ierr = DMDAGetLocalInfo(da, &info);CHKERRQ(ierr);
  *Np  = info.xm * info.ym * info.zm;
  ierr = PetscCalloc1(*Np * spaceDim, pcoords);CHKERRQ(ierr);
  for (k = info.zs; k < info.zs + info.zm; ++k) {
    ind[2] = k;
    for (j = info.ys; j < info.ys + info.ym; ++j) {
      ind[1] = j;
      for (i = info.xs; i < info.xs + info.xm; ++i, ++n) {
        ind[0] = i;

        for (d = 0; d < spaceDim; ++d) (*pcoords)[n*spaceDim+d] = ind[d]*h[d];
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "[%d]Point %D (", rank, n);CHKERRQ(ierr);
        for (d = 0; d < spaceDim; ++d) {
          ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "%g", (double)(*pcoords)[n*spaceDim+d]);CHKERRQ(ierr);
          if (d < spaceDim-1) {ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ", ");CHKERRQ(ierr);}
        }
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ")\n");CHKERRQ(ierr);
      }
    }
  }
  ierr = DMDestroy(&da);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL);CHKERRQ(ierr);
  ierr = PetscFree(ind);CHKERRQ(ierr);
  ierr = PetscFree(h);CHKERRQ(ierr);
  *pointsAllProcs = PETSC_FALSE;
  PetscFunctionReturn(0);
}
Ejemplo n.º 5
0
PetscErrorCode PetscSubcommView(PetscSubcomm psubcomm,PetscViewer viewer)
{
  PetscErrorCode    ierr;
  PetscBool         iascii;
  PetscViewerFormat format;

  PetscFunctionBegin;
  ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
  if (iascii) {
    ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
    if (format == PETSC_VIEWER_DEFAULT) {
      MPI_Comm    comm=psubcomm->parent;
      PetscMPIInt rank,size,subsize,subrank,duprank;

      ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
      ierr = PetscViewerASCIIPrintf(viewer,"PetscSubcomm type %s with total %d MPI processes:\n",PetscSubcommTypes[psubcomm->type],size);CHKERRQ(ierr);
      ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
      ierr = MPI_Comm_size(psubcomm->comm,&subsize);CHKERRQ(ierr);
      ierr = MPI_Comm_rank(psubcomm->comm,&subrank);CHKERRQ(ierr);
      ierr = MPI_Comm_rank(psubcomm->dupparent,&duprank);CHKERRQ(ierr);
      ierr = PetscSynchronizedPrintf(comm,"  [%d], color %d, sub-size %d, sub-rank %d, duprank %d\n",rank,psubcomm->color,subsize,subrank,duprank);
      ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
    }
  } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported yet");
  PetscFunctionReturn(0);
}
Ejemplo n.º 6
0
int main(int argc, char **args) {
  PetscErrorCode ierr;
  ierr = DCellInit(); CHKERRQ(ierr);
  MPI_Comm comm = MPI_COMM_WORLD;
  int size, rank, i;
  MPI_Comm_size(comm,&size);
  MPI_Comm_rank(comm,&rank);

  ierr = PetscPrintf(comm,"\nCreating\n"); CHKERRQ(ierr);
  UniqueID uid;
  ierr = UniqueIDCreate(&uid); CHKERRQ(ierr);



  UniqueIDType id;
  ierr = PetscPrintf(comm,"\nLocal Count == 0\n"); CHKERRQ(ierr);
  for (i = 0; i < 2*size; ++i) {
    ierr = UniqueIDGenerate(uid,&id); CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(comm,"%d: %d\n", rank, id); CHKERRQ(ierr);
  }
  ierr = PetscSynchronizedFlush(comm); CHKERRQ(ierr);

  int maxID = 5*size*size;
  ierr = PetscPrintf(comm,"\nSetting max id = %d\n", maxID); CHKERRQ(ierr);
  ierr = UniqueIDSetStartCount(uid, maxID); CHKERRQ(ierr);
  for (i = 0; i < 2*size; ++i) {
    ierr = UniqueIDGenerate(uid,&id); CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(comm,"%d: %d\n", rank, id); CHKERRQ(ierr);
  }
  ierr = PetscSynchronizedFlush(comm); CHKERRQ(ierr);

  ierr = PetscPrintf(comm,"\nTesting Max ID\n"); CHKERRQ(ierr);
  if( rank == size-1 ) {
    ierr = UniqueIDSetStartCount(uid, PETSC_MAX_INT-4*size); CHKERRQ(ierr);
    for (i = 0; i < 5*size; ++i) {
//      ierr = UniqueIDGenerate(uid,&id); CHKERRQ(ierr);
      ierr = PetscSynchronizedPrintf(comm,"%d: %d  (%d)\n", rank, id, PETSC_MAX_INT-id); CHKERRQ(ierr);
    }
  }
  ierr = PetscSynchronizedFlush(comm); CHKERRQ(ierr);

  ierr = PetscPrintf(comm,"\nDestroying\n"); CHKERRQ(ierr);
  ierr = UniqueIDDestroy(uid); CHKERRQ(ierr);

	ierr = DCellFinalize(); CHKERRQ(ierr);
  return 0;
}
Ejemplo n.º 7
0
void Display_DA_3D_info(MAC_grid *grid, Parameters *params) {


    int ierr;
    int Is_g, Js_g, Ks_g;
    int Ie_g, Je_g, Ke_g;
    int Is, Js, Ks;
    int Ie, Je, Ke;
    int rank;

    /* Start index of bottom-left-back corner on current processor including ghost nodes */
    Is_g = grid->L_Is;
    Js_g = grid->L_Js;
    Ks_g = grid->L_Ks;

    /* End index of top-right-front corner on current processor including ghost nodes */
    Ie_g = grid->L_Ie;
    Je_g = grid->L_Je;
    Ke_g = grid->L_Ke;

    /* Start index of bottom-left-back corner on current processor */
    Is = grid->G_Is;
    Js = grid->G_Js;
    Ks = grid->G_Ks;

    /* End index of top-right-front corner on current processor */
    Ie = grid->G_Ie;
    Je = grid->G_Je;
    Ke = grid->G_Ke;

    rank = params->rank;

    ierr = PetscSynchronizedPrintf(PCW, "\nProcessor Rank:%d Is:%d Js:%d Ks:%d\n", params->rank, Is, Js, Ks);
    PETScErrAct(ierr);
    ierr = PetscSynchronizedPrintf(PCW, "Processor Rank:%d Ie:%d Je:%d Ke:%d\n", params->rank, Ie, Je, Ke);
    PETScErrAct(ierr);
    ierr = PetscSynchronizedPrintf(PCW, "Processor Rank:%d Is_g:%d Js_g:%d Ks_g:%d\n", params->rank, Is_g, Js_g, Ks_g);
    PETScErrAct(ierr);
    ierr = PetscSynchronizedPrintf(PCW, "Processor Rank:%d Ie_g:%d Je_g:%d Ke_g:%d\n\n", params->rank, Ie_g, Je_g, Ke_g);
    PETScErrAct(ierr);

    ierr = PetscSynchronizedFlush(PCW); PETScErrAct(ierr);
}	
Ejemplo n.º 8
0
Archivo: ex15.c Proyecto: Kun-Qu/petsc
int main(int argc,char **argv)
{
  int        ierr,choice;
  const char *choices[] = {"Say hello","Say goodbye"};

  PetscInitialize(&argc,&argv,(char *)0,0);
  ierr = PetscPopUpSelect(PETSC_COMM_WORLD,PETSC_NULL,"Select one of ",2,choices,&choice);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"You selected %s\n",choices[choice]);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 9
0
int main(int argc, char *argv[])
{
    PetscInt rank=0,size=0;///This gets the rank of the process. Making it PetscInt cause PETSc handles it well while passing through the communicators of its own..
    PetscInitialize(&argc,&argv,(char*) 0,help);
    MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
    MPI_Comm_size(PETSC_COMM_WORLD, &size);
    PetscSynchronizedPrintf(PETSC_COMM_SELF,"Hello World from %d out of %d\n",rank,size);///Since, till now the display of the processes would be in an arbitrary manner. But after adding this Synchronnised printf command. 
    PetscSynchronizedFlush(PETSC_COMM_WORLD,stderr);///This would synchorize all the processes till now. Which would ensure the required output.
    PetscPrintf(PETSC_COMM_WORLD,"\nNow this should the rank 0 process. Greetings from the process %d\n\n",rank);
    PetscFinalize();
    return 0;
}
Ejemplo n.º 10
0
void Display_2D_inflow(double **inflow, MAC_grid *grid, Parameters *params, const char *q_name) {


    if (grid->G_Is == 0) {

        /* start index on current processor */
        int Js = grid->G_Js;
        int Ks = grid->G_Ks;

        /* end index on current processor */
        int Je = grid->G_Je;
        int Ke = grid->G_Ke;

        PetscSynchronizedPrintf(PCW, "***********************************\n");
        PetscSynchronizedPrintf(PCW, "Rank:%d Printing \"%s\"\n", params->rank, q_name);
        PetscSynchronizedPrintf(PCW, "StartIndex(k:%d,j:%d) EndIndex(k:%d,j:%d)\n", Ks, Js, Ke, Je);
        PetscSynchronizedPrintf(PCW, "q(j:y,k:z)\n");

        int j, k;
        for (k=Ks; k<Ke; k++) {
            for (j=Js; j<Je; j++) {

                PetscSynchronizedPrintf(PCW, "q(%d,%d)=%f ", j, k, inflow[k][j]);

            } /* for j */
            PetscSynchronizedPrintf(PCW, "\n");
        } /* for k*/
        PetscSynchronizedFlush(PCW);
    }
}
Ejemplo n.º 11
0
Archivo: ex2.c Proyecto: petsc/petsc
static PetscErrorCode CreatePoints_GridReplicated(DM dm, PetscInt *Np, PetscReal **pcoords, PetscBool *pointsAllProcs, AppCtx *ctx)
{
  PetscInt       N = 3, n = 0, spaceDim, i, j, k, *ind, d;
  PetscReal      *h;
  PetscMPIInt    rank;
  PetscErrorCode ierr;

  ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
  ierr = DMGetCoordinateDim(dm, &spaceDim);CHKERRQ(ierr);
  ierr = PetscCalloc1(spaceDim,&ind);CHKERRQ(ierr);
  ierr = PetscCalloc1(spaceDim,&h);CHKERRQ(ierr);
  h[0] = 1.0/(N-1); h[1] = 1.0/(N-1); h[2] = 1.0/(N-1);
  *Np  = N * (ctx->dim > 1 ? N : 1) * (ctx->dim > 2 ? N : 1);
  ierr = PetscCalloc1(*Np * spaceDim, pcoords);CHKERRQ(ierr);
  for (k = 0; k < N; ++k) {
    ind[2] = k;
    for (j = 0; j < N; ++j) {
      ind[1] = j;
      for (i = 0; i < N; ++i, ++n) {
        ind[0] = i;

        for (d = 0; d < spaceDim; ++d) (*pcoords)[n*spaceDim+d] = ind[d]*h[d];
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "[%d]Point %D (", rank, n);CHKERRQ(ierr);
        for (d = 0; d < spaceDim; ++d) {
          ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "%g", (double)(*pcoords)[n*spaceDim+d]);CHKERRQ(ierr);
          if (d < spaceDim-1) {ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ", ");CHKERRQ(ierr);}
        }
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ")\n");CHKERRQ(ierr);
      }
    }
  }
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL);CHKERRQ(ierr);
  *pointsAllProcs = PETSC_TRUE;
  ierr = PetscFree(ind);CHKERRQ(ierr);
  ierr = PetscFree(h);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Ejemplo n.º 12
0
Archivo: ex2.c Proyecto: petsc/petsc
static PetscErrorCode CreatePoints_Centroid(DM dm, PetscInt *Np, PetscReal **pcoords, PetscBool *pointsAllProcs, AppCtx *ctx)
{
  PetscSection   coordSection;
  Vec            coordsLocal;
  PetscInt       spaceDim, p;
  PetscMPIInt    rank;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
  ierr = DMGetCoordinatesLocal(dm, &coordsLocal);CHKERRQ(ierr);
  ierr = DMGetCoordinateSection(dm, &coordSection);CHKERRQ(ierr);
  ierr = DMGetCoordinateDim(dm, &spaceDim);CHKERRQ(ierr);
  ierr = DMPlexGetHeightStratum(dm, 0, NULL, Np);CHKERRQ(ierr);
  ierr = PetscCalloc1(*Np * spaceDim, pcoords);CHKERRQ(ierr);
  for (p = 0; p < *Np; ++p) {
    PetscScalar *coords = NULL;
    PetscInt     size, num, n, d;

    ierr = DMPlexVecGetClosure(dm, coordSection, coordsLocal, p, &size, &coords);CHKERRQ(ierr);
    num  = size/spaceDim;
    for (n = 0; n < num; ++n) {
      for (d = 0; d < spaceDim; ++d) (*pcoords)[p*spaceDim+d] += PetscRealPart(coords[n*spaceDim+d]) / num;
    }
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "[%d]Point %D (", rank, p);CHKERRQ(ierr);
    for (d = 0; d < spaceDim; ++d) {
      ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "%g", (double)(*pcoords)[p*spaceDim+d]);CHKERRQ(ierr);
      if (d < spaceDim-1) {ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ", ");CHKERRQ(ierr);}
    }
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, ")\n");CHKERRQ(ierr);
    ierr = DMPlexVecRestoreClosure(dm, coordSection, coordsLocal, p, &num, &coords);CHKERRQ(ierr);
  }
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL);CHKERRQ(ierr);
  *pointsAllProcs = PETSC_FALSE;
  PetscFunctionReturn(0);
}
Ejemplo n.º 13
0
int main(int argc,char **argv)
{
  PetscMPIInt    rank;
  PetscErrorCode ierr;

  PetscInitialize(&argc,&argv,(char *)0,help);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"Greetings from %d\n",rank);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);

  ierr = PetscSynchronizedFPrintf(PETSC_COMM_WORLD,stderr,"Greetings again from %d\n",rank);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);
 
  ierr = PetscFinalize();CHKERRQ(ierr);
  return 0;
}
Ejemplo n.º 14
0
Archivo: gada.c Proyecto: adrielb/DCell
PetscErrorCode vizGA2DA()
{
  PetscErrorCode  ierr;
  int rank;
  MPI_Comm_rank(PETSC_COMM_WORLD,&rank);  
  int d1 = 40, d2 = 50;
  
  DA da;
  Vec vec;
  const PetscInt *lx, *ly, *lz;
  PetscInt m,n,p;
  DALocalInfo info;
  ierr = DACreate2d(PETSC_COMM_WORLD,DA_NONPERIODIC,DA_STENCIL_STAR,
            d1,d2,PETSC_DECIDE,PETSC_DECIDE,1,1,0,0, &da); CHKERRQ(ierr);
  ierr = DACreateGlobalVector(da, &vec); CHKERRQ(ierr);
  ierr = DAGetOwnershipRanges(da, &lx, &ly, &lz); CHKERRQ(ierr);
  ierr = DAGetLocalInfo(da,&info); CHKERRQ(ierr);
  ierr = DAGetInfo(da,0,0,0,0,&m,&n,&p,0,0,0,0); CHKERRQ(ierr);
  /**/
  ierr = DAView(da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
  for (int i = 0; i < m; ++i) {
    PetscPrintf(PETSC_COMM_WORLD,"%d\tlx: %d\n",i,lx[i]);
  }
  for (int i = 0; i < n; ++i) {
    PetscPrintf(PETSC_COMM_WORLD,"%d\tly: %d\n",i,ly[i]);
  }
  /**/
 
  
  int ga = GA_Create_handle();
  int ndim = 2;
  int dims[2] = {d2,d1};
  GA_Set_data(ga,2,dims,MT_DBL);
  int *map;
  PetscMalloc( sizeof(int)*(m+n), &map);
  map[0] = 0;
  for( int i = 1; i < n; i++ )
  {
    map[i] = ly[i-1] + map[i-1];
  }
  map[n] = 0;
  for( int i = n+1; i < m+n; i++ )
  {
    map[i] = lx[i-n-1] + map[i-1];
  }
  /* correct ordering, but nodeid's dont line up with mpi rank for petsc's da
   * DA: +---+---+   GA: +---+---+   
   *     +-2-+-3-+       +-1-+-3-+
   *     +---+---+       +---+---+
   *     +-0-+-1-+       +-0-+-2-+
   *     +---+---+       +---+---+
  int *map;
  PetscMalloc( sizeof(int)*(m+n), &map);
  map[0] = 0;
  for( int i = 1; i < m; i++ )
  {
    map[i] = lx[i] + map[i-1];
  }
  map[m] = 0;
  for( int i = m+1; i < m+n; i++ )
  {
    map[i] = ly[i-m] + map[i-1];
  }
  */
  int block[2] = {n,m};  
  GA_Set_irreg_distr(ga,map,block);
  ierr = GA_Allocate( ga );
  if( !ierr ) GA_Error("\n\n\nga allocaltion failed\n\n",ierr);
  if( !ga ) GA_Error("\n\n\n ga null \n\n",ierr); 
  if( rank != GA_Nodeid() ) GA_Error("MPI rank does not match GA_Nodeid()",1);
  GA_Print_distribution(ga);  
  
  int lo[2], hi[2];
  NGA_Distribution(ga,rank,lo,hi);
  if( lo[1] != info.xs || hi[1] != info.xs+info.xm-1 ||
      lo[0] != info.ys || hi[0] != info.ys+info.ym-1 )
  {
    PetscSynchronizedPrintf(PETSC_COMM_SELF,"[%d] lo:(%2d,%2d)  hi:(%2d,%2d) \t DA: (%2d,%2d), (%2d, %2d)\n",
        rank, lo[1], lo[0], hi[1], hi[0], info.xs, info.ys, info.xs+info.xm-1, info.ys+info.ym-1);
  }
  PetscBarrier(0);
  PetscSynchronizedFlush(PETSC_COMM_WORLD);

  AO ao;
  DAGetAO(da,&ao);
  if( rank == 0 )
  {
    int *idx, len = d1*d2;
    PetscReal *val;
    PetscMalloc(sizeof(PetscReal)*len, &val);
    PetscMalloc(sizeof(int)*len, &idx);
    for (int j = 0; j < d2; ++j)
    {
      for (int i = 0; i < d1; ++i)
      {
        idx[i + d1*j] = i + d1*j;
        val[i + d1*j] = i + d1*j;
      }
    }
    AOApplicationToPetsc(ao,len,idx);
    VecSetValues(vec,len,idx,val,INSERT_VALUES);

    int a[2], b[2],ld[1]={0};
    double c = 0;
    for (int j = 0; j < d2; ++j)
    {
      for (int i = 0; i < d1; ++i)
      {
        a[0] = j;
        a[1] = i;
//        printf("%5.0f ",c);
        NGA_Put(ga,a,a,&c,ld);
        c++;
      }
    }
  }
//  GA_Print(ga);
  VecAssemblyBegin(vec);
  VecAssemblyEnd(vec);
  
  int ld;
  double *ptr;
  NGA_Access(ga,lo,hi,&ptr,&ld);
  PetscReal **d;
  int c=0;
  ierr = DAVecGetArray(da,vec,&d); CHKERRQ(ierr);
  for (int j = info.ys; j < info.ys+info.ym; ++j)
  {
    for (int i = info.xs; i < info.xs+info.xm; ++i)
    {
      if( d[j][i] != ptr[(i-info.xs)+ld*(j-info.ys)] )
        GA_Error("DA array is not equal to GA array",1);
//      printf("%d (%d,%d):\t%3.0f\t%3.0f\n", c, i, j, d[j][i], ptr[(i-info.xs)+ld*(j-info.ys)]);
      c++;
    }
  }
  ierr = DAVecRestoreArray(da,vec,&d); CHKERRQ(ierr);
  
  c=0;
  PetscReal *v;
  int start, end;
  VecGetOwnershipRange(vec, &start, &end);
  VecGetArray( vec, &v );
  for( int i = start; i < end; i++)
  {
//    printf("%d:\t%3.0f\t%3.0f\t%s\n", start, v[i-start], ptr[i-start], (v[i-start]-ptr[i-start]==0?"":"NO") );
  }
  VecRestoreArray( vec, &v );
  
  NGA_Release_update(ga,lo,hi);

  Vec gada;
  VecCreateMPIWithArray(((PetscObject)da)->comm,da->Nlocal,PETSC_DETERMINE,ptr,&gada);
  VecView(gada,PETSC_VIEWER_STDOUT_SELF);
  
  GA_Destroy(ga);
  
  
  
  ierr = VecDestroy(vec); CHKERRQ(ierr);
  ierr = DADestroy(da); CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Ejemplo n.º 15
0
int main(int argc,char **argv)
{
  PetscMPIInt      rank;
  PetscErrorCode   ierr;
  PetscInt         M = 10,N = 8,m = PETSC_DECIDE;
  PetscInt         s =2,w=2,n = PETSC_DECIDE,nloc,l,i,j,kk;
  PetscInt         Xs,Xm,Ys,Ym,iloc,*iglobal;
  const PetscInt   *ltog;
  PetscInt         *lx       = NULL,*ly = NULL;
  PetscBool        testorder = PETSC_FALSE,flg;
  DMBoundaryType   bx        = DM_BOUNDARY_NONE,by= DM_BOUNDARY_NONE;
  DM               da;
  PetscViewer      viewer;
  Vec              local,global;
  PetscScalar      value;
  DMDAStencilType  st = DMDA_STENCIL_BOX;
  AO               ao;

  ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
  ierr = PetscViewerDrawOpen(PETSC_COMM_WORLD,0,"",300,0,400,400,&viewer);CHKERRQ(ierr);

  /* Readoptions */
  ierr = PetscOptionsGetInt(NULL,NULL,"-NX",&M,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-NY",&N,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-s",&s,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-w",&w,NULL);CHKERRQ(ierr);

  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-xperiodic",&flg,NULL);CHKERRQ(ierr); if (flg) bx = DM_BOUNDARY_PERIODIC;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-yperiodic",&flg,NULL);CHKERRQ(ierr); if (flg) by = DM_BOUNDARY_PERIODIC;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-xghosted",&flg,NULL);CHKERRQ(ierr); if (flg) bx = DM_BOUNDARY_GHOSTED;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-yghosted",&flg,NULL);CHKERRQ(ierr); if (flg) by = DM_BOUNDARY_GHOSTED;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-star",&flg,NULL);CHKERRQ(ierr); if (flg) st = DMDA_STENCIL_STAR;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-box",&flg,NULL);CHKERRQ(ierr); if (flg) st = DMDA_STENCIL_BOX;
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-testorder",&testorder,NULL);CHKERRQ(ierr);
  /*
      Test putting two nodes in x and y on each processor, exact last processor
      in x and y gets the rest.
  */
  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-distribute",&flg,NULL);CHKERRQ(ierr);
  if (flg) {
    if (m == PETSC_DECIDE) SETERRQ(PETSC_COMM_WORLD,1,"Must set -m option with -distribute option");
    ierr = PetscMalloc1(m,&lx);CHKERRQ(ierr);
    for (i=0; i<m-1; i++) { lx[i] = 4;}
    lx[m-1] = M - 4*(m-1);
    if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_WORLD,1,"Must set -n option with -distribute option");
    ierr = PetscMalloc1(n,&ly);CHKERRQ(ierr);
    for (i=0; i<n-1; i++) { ly[i] = 2;}
    ly[n-1] = N - 2*(n-1);
  }


  /* Create distributed array and get vectors */
  ierr = DMDACreate2d(PETSC_COMM_WORLD,bx,by,st,M,N,m,n,w,s,lx,ly,&da);CHKERRQ(ierr);
  ierr = PetscFree(lx);CHKERRQ(ierr);
  ierr = PetscFree(ly);CHKERRQ(ierr);

  ierr = DMView(da,viewer);CHKERRQ(ierr);
  ierr = DMCreateGlobalVector(da,&global);CHKERRQ(ierr);
  ierr = DMCreateLocalVector(da,&local);CHKERRQ(ierr);

  /* Set global vector; send ghost points to local vectors */
  value = 1;
  ierr = VecSet(global,value);CHKERRQ(ierr);
  ierr = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr);
  ierr = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr);

  /* Scale local vectors according to processor rank; pass to global vector */
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  value = rank;
  ierr = VecScale(local,value);CHKERRQ(ierr);
  ierr = DMLocalToGlobalBegin(da,local,INSERT_VALUES,global);CHKERRQ(ierr);
  ierr = DMLocalToGlobalEnd(da,local,INSERT_VALUES,global);CHKERRQ(ierr);

  if (!testorder) { /* turn off printing when testing ordering mappings */
    ierr = PetscPrintf(PETSC_COMM_WORLD,"\nGlobal Vectors:\n");CHKERRQ(ierr);
    ierr = VecView(global,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_WORLD,"\n\n");CHKERRQ(ierr);
  }

  /* Send ghost points to local vectors */
  ierr = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr);
  ierr = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr);

  flg  = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,NULL,"-local_print",&flg,NULL);CHKERRQ(ierr);
  if (flg) {
    PetscViewer sviewer;

    ierr = PetscViewerASCIIPushSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr);
    ierr = PetscViewerGetSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
    ierr = VecView(local,sviewer);CHKERRQ(ierr);
    ierr = PetscViewerRestoreSubViewer(PETSC_VIEWER_STDOUT_WORLD,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
    ierr = PetscViewerFlush(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
    ierr = PetscViewerASCIIPopSynchronized(PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  }

  /* Tests mappings betweeen application/PETSc orderings */
  if (testorder) {
    ISLocalToGlobalMapping ltogm;

    ierr = DMGetLocalToGlobalMapping(da,&ltogm);CHKERRQ(ierr);
    ierr = ISLocalToGlobalMappingGetSize(ltogm,&nloc);CHKERRQ(ierr);
    ierr = ISLocalToGlobalMappingGetIndices(ltogm,&ltog);CHKERRQ(ierr);
    ierr = DMDAGetGhostCorners(da,&Xs,&Ys,NULL,&Xm,&Ym,NULL);CHKERRQ(ierr);
    ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr);
    ierr = PetscMalloc1(nloc,&iglobal);CHKERRQ(ierr);

    /* Set iglobal to be global indices for each processor's local and ghost nodes,
       using the DMDA ordering of grid points */
    kk = 0;
    for (j=Ys; j<Ys+Ym; j++) {
      for (i=Xs; i<Xs+Xm; i++) {
        iloc = w*((j-Ys)*Xm + i-Xs);
        for (l=0; l<w; l++) {
          iglobal[kk++] = ltog[iloc+l];
        }
      }
    }

    /* Map this to the application ordering (which for DMDAs is just the natural ordering
       that would be used for 1 processor, numbering most rapidly by x, then y) */
    ierr = AOPetscToApplication(ao,nloc,iglobal);CHKERRQ(ierr);

    /* Then map the application ordering back to the PETSc DMDA ordering */
    ierr = AOApplicationToPetsc(ao,nloc,iglobal);CHKERRQ(ierr);

    /* Verify the mappings */
    kk=0;
    for (j=Ys; j<Ys+Ym; j++) {
      for (i=Xs; i<Xs+Xm; i++) {
        iloc = w*((j-Ys)*Xm + i-Xs);
        for (l=0; l<w; l++) {
          if (iglobal[kk] != ltog[iloc+l]) {
            ierr = PetscFPrintf(PETSC_COMM_SELF,stdout,"[%d] Problem with mapping: j=%D, i=%D, l=%D, petsc1=%D, petsc2=%D\n",rank,j,i,l,ltog[iloc+l],iglobal[kk]);CHKERRQ(ierr);
          }
          kk++;
        }
      }
    }
    ierr = PetscFree(iglobal);CHKERRQ(ierr);
    ierr = ISLocalToGlobalMappingRestoreIndices(ltogm,&ltog);CHKERRQ(ierr);
  }

  /* Free memory */
  ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
  ierr = VecDestroy(&local);CHKERRQ(ierr);
  ierr = VecDestroy(&global);CHKERRQ(ierr);
  ierr = DMDestroy(&da);CHKERRQ(ierr);

  ierr = PetscFinalize();
  return ierr;
}
Ejemplo n.º 16
0
Archivo: ex2.c Proyecto: ZJLi2013/petsc
int main(int argc,char **argv)
{
  PetscMPIInt      rank;
  PetscInt         M  = 13,s=1,dof=1;
  DMDABoundaryType bx = DMDA_BOUNDARY_PERIODIC;
  PetscErrorCode   ierr;
  DM               da;
  PetscViewer      viewer;
  Vec              local,global;
  PetscScalar      value;
  PetscDraw        draw;
  PetscBool        flg = PETSC_FALSE;

  ierr = PetscInitialize(&argc,&argv,(char*)0,help);CHKERRQ(ierr);

  /* Create viewers */
  ierr = PetscViewerDrawOpen(PETSC_COMM_WORLD,0,"",280,480,600,200,&viewer);CHKERRQ(ierr);
  ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
  ierr = PetscDrawSetDoubleBuffer(draw);CHKERRQ(ierr);

  /* Readoptions */
  ierr = PetscOptionsGetInt(NULL,"-M",&M,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetEnum(NULL,"-wrap",DMDABoundaryTypes,(PetscEnum*)&bx,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-dof",&dof,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-s",&s,NULL);CHKERRQ(ierr);

  /* Create distributed array and get vectors */
  ierr = DMDACreate1d(PETSC_COMM_WORLD,bx,M,dof,s,NULL,&da);CHKERRQ(ierr);
  ierr = DMView(da,viewer);CHKERRQ(ierr);
  ierr = DMCreateGlobalVector(da,&global);CHKERRQ(ierr);
  ierr = DMCreateLocalVector(da,&local);CHKERRQ(ierr);

  /* Set global vector; send ghost points to local vectors */
  value = 1;
  ierr  = VecSet(global,value);CHKERRQ(ierr);
  ierr  = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr);
  ierr  = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr);

  /* Scale local vectors according to processor rank; pass to global vector */
  ierr  = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  value = rank+1;
  ierr  = VecScale(local,value);CHKERRQ(ierr);
  ierr  = DMLocalToGlobalBegin(da,local,INSERT_VALUES,global);CHKERRQ(ierr);
  ierr  = DMLocalToGlobalEnd(da,local,INSERT_VALUES,global);CHKERRQ(ierr);

  ierr = VecView(global,viewer);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\nGlobal Vector:\n");CHKERRQ(ierr);
  ierr = VecView(global,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\n");CHKERRQ(ierr);

  /* Send ghost points to local vectors */
  ierr = DMGlobalToLocalBegin(da,global,INSERT_VALUES,local);CHKERRQ(ierr);
  ierr = DMGlobalToLocalEnd(da,global,INSERT_VALUES,local);CHKERRQ(ierr);

  ierr = PetscOptionsGetBool(NULL,"-local_print",&flg,NULL);CHKERRQ(ierr);
  if (flg) {
    PetscViewer            sviewer;
    ISLocalToGlobalMapping is;

    ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD,PETSC_TRUE);CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal Vector: processor %d\n",rank);CHKERRQ(ierr);
    ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
    ierr = VecView(local,sviewer);CHKERRQ(ierr);
    ierr = PetscViewerRestoreSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);

    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\nLocal to global mapping: processor %d\n",rank);CHKERRQ(ierr);
    ierr = PetscViewerGetSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
    ierr = DMGetLocalToGlobalMapping(da,&is);CHKERRQ(ierr);
    ierr = ISLocalToGlobalMappingView(is,sviewer);CHKERRQ(ierr);
    ierr = PetscViewerRestoreSingleton(PETSC_VIEWER_STDOUT_WORLD,&sviewer);CHKERRQ(ierr);
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
  }

  /* Free memory */
  ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
  ierr = VecDestroy(&global);CHKERRQ(ierr);
  ierr = VecDestroy(&local);CHKERRQ(ierr);
  ierr = DMDestroy(&da);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 17
0
int main ( int argc, char* argv[] )
{
	/* parse command line arguments */
	std::string anArg;
	std::string modelRoot;
	for( int i = 1; i < argc; i++ )
	{
		anArg = std::string( argv[i] );
		if( anArg == "--help" || anArg == "-h")
		{
			std::cout << "Usage: ogs [MODEL_ROOT] [OPTIONS]\n"
			          << "Where OPTIONS are:\n"
			          << "  -h [--help]       print this message and exit\n"
			          << "  -b [--build-info] print build info and exit\n"
			          << "  --version         print ogs version and exit" << "\n";
			continue;
		}
		if( anArg == "--build-info" || anArg == "-b" )
		{
			std::cout << "ogs version: " << OGS_VERSION << "\n"
			          << "ogs date: " << OGS_DATE << "\n";
#ifdef CMAKE_CMD_ARGS
			std::cout << "cmake command line arguments: " << CMAKE_CMD_ARGS << "\n";
#endif // CMAKE_CMD_ARGS
#ifdef GIT_COMMIT_INFO
			std::cout << "git commit info: " << GIT_COMMIT_INFO << "\n";
#endif // GIT_COMMIT_INFO
#ifdef SVN_REVISION
			std::cout << "subversion info: " << SVN_REVISION << "\n";
#endif // SVN_REVISION
#ifdef BUILD_TIMESTAMP
			std::cout << "build timestamp: " << BUILD_TIMESTAMP << "\n";
#endif // BUILD_TIMESTAMP
			continue;
		}
		if( anArg == "--version" )
		{
			std::cout << OGS_VERSION << "\n";
			continue;
		}
		if( anArg == "--model-root" || anArg == "-m" )
		{
			modelRoot = std::string( argv[++i] );
			continue;
		}
		// anything left over must be the model root, unless already found
		if ( modelRoot == "" )
			modelRoot = std::string( argv[i] );
	} // end of parse argc loop

	if( argc > 1 && modelRoot == "" ) // non-interactive mode and no model given
		exit(0);             // e.g. just wanted the build info

	char* dateiname(NULL);
#ifdef SUPERCOMPUTER
// *********************************************************************
// buffered output ... important for performance on cray
// (unbuffered output is limited to 10 bytes per second)
// [email protected] 11.10.2007

	char buf[1024 * 1024];
	int bsize;

	bsize = 1024 * 1024; // question: what happens if buffer is full?
	                     // according to documentation the buffer is flushed when full.
	                     // If we have a lot of output, increasing buffer is usefull.
	if(bsize > 0)
//        bufstd = malloc(bsize);
		setvbuf(stdout, buf, _IOFBF, bsize);
	//**********************************************************************
#endif
/*---------- MPI Initialization ----------------------------------*/
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || \
	defined(USE_MPI_GEMS) || defined(USE_MPI_KRC) 
	printf("Before MPI_Init\n");
#if defined(USE_MPI_GEMS)
	int prov;
	MPI_Init_thread(&argc,&argv,MPI_THREAD_FUNNELED, &prov);
#else
	MPI_Init(&argc,&argv);
#endif
	MPI_Barrier (MPI_COMM_WORLD); // 12.09.2007 WW
	elapsed_time_mpi = -MPI_Wtime(); // 12.09.2007 WW
	MPI_Comm_size(MPI_COMM_WORLD,&mysize);
	MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
	std::cout << "After MPI_Init myrank = " << myrank << '\n';
	time_ele_paral = 0.0;
#endif
/*---------- MPI Initialization ----------------------------------*/


#ifdef USE_PETSC
	int rank, r_size;
	PetscLogDouble v1,v2;
	char help[] = "OGS with PETSc \n";
	//PetscInitialize(argc, argv, help);
	PetscInitialize(&argc,&argv,(char *)0,help);
	//kg44 quick fix to compile PETSC with version PETSCV3.4
#ifdef USEPETSC34
       PetscTime(&v1);
#else
       PetscGetTime(&v1);
#endif
	MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
	MPI_Comm_size(PETSC_COMM_WORLD, &r_size);
	PetscSynchronizedPrintf(PETSC_COMM_WORLD, "===\nUse PETSc solver");
	PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Number of CPUs: %d, rank: %d\n", r_size, rank);
#endif




/*---------- LIS solver -----------------------------------------*/
#ifdef LIS
	//Initialization of the lis solver.
	lis_initialize(&argc, &argv);
#endif
/*========================================================================*/
/* Kommunikation mit Betriebssystem */
	/* Timer fuer Gesamtzeit starten */
#ifdef TESTTIME
	TStartTimer(0);
#endif
	/* Intro ausgeben */
#if defined(USE_MPI) //WW
	if(myrank == 0)
#endif
#ifdef USE_PETSC
        if(rank == 0 )
#endif

	DisplayStartMsg();
	/* Speicherverwaltung initialisieren */
	if (!InitMemoryTest())
	{
		DisplayErrorMsg("Fehler: Speicherprotokoll kann nicht erstellt werden!");
		DisplayErrorMsg("        Programm vorzeitig beendet!");
		return 1; // LB changed from 0 to 1 because 0 is indicating success
	}
	if( argc == 1 )               // interactive mode

		dateiname = ReadString();
	else                         // non-interactive mode
	{
		if ( argc == 2 )     // a model root was supplied
		{
			dateiname = (char*) Malloc((int)strlen(argv[1]) + 1);
			dateiname = strcpy(dateiname,argv[1]);
		}
		else                // several args supplied
		if( modelRoot != "")
		{
			dateiname = (char*) Malloc( (int) modelRoot.size() + 1 );
			dateiname = strcpy( dateiname, modelRoot.c_str() );
		}
		DisplayMsgLn(dateiname);
	}
	//WW  DisplayMsgLn("");
	//WW  DisplayMsgLn("");
	// ----------23.02.2009. WW-----------------

	// LB Check if file exists
	std::string tmpFilename = dateiname;
	tmpFilename.append(".pcs");
	if(!IsFileExisting(tmpFilename))
	{
		std::cout << " Error: Cannot find file " << dateiname << "\n";
		return 1;
	}

	FileName = dateiname;
	size_t indexChWin, indexChLinux;
	indexChWin = indexChLinux = 0;
	indexChWin = FileName.find_last_of('\\');
	indexChLinux = FileName.find_last_of('/');
	//
	if(indexChWin != std::string::npos)
		FilePath = FileName.substr(0,indexChWin) + "\\";
	else if(indexChLinux != std::string::npos)
		FilePath = FileName.substr(0,indexChLinux) + "/";
	// ---------------------------WW
	Problem* aproblem = new Problem(dateiname);
#ifdef USE_PETSC
	aproblem->setRankandSize(rank, r_size);
#endif
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || defined(USE_MPI_GEMS)  || defined(USE_MPI_KRC)
	aproblem->setRankandSize(myrank, mysize);
#endif
	
	aproblem->Euler_TimeDiscretize();
	delete aproblem;
	aproblem = NULL;
	if(ClockTimeVec.size()>0)
		ClockTimeVec[0]->PrintTimes();  //CB time
	DestroyClockTime();
#ifdef TESTTIME
#if defined(USE_MPI)
     if(myrank == 0)
#endif
#if defined(USE_PETSC) 
     if(rank == 0)
#endif
	std::cout << "Simulation time: " << TGetTimer(0) << "s" << "\n";
#endif
	/* Abspann ausgeben */
/*--------- MPI Finalize ------------------*/
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || defined(USE_MPI_KRC)
	elapsed_time_mpi += MPI_Wtime(); // 12.09.2007 WW
	std::cout << "\n *** Total CPU time of parallel modeling: " << elapsed_time_mpi <<
	"\n";                                                                          //WW
	// Count CPU time of post time loop WW
	MPI_Finalize();
#endif
/*--------- MPI Finalize ------------------*/
/*--------- LIS Finalize ------------------*/
#ifdef LIS
	lis_finalize();
#endif
/*--------- LIS Finalize ------------------*/

	free(dateiname);

#ifdef USE_PETSC
	//kg44 quick fix to compile PETSC with version PETSCV3.4
#ifdef USEPETSC34
       PetscTime(&v2);
#else
       PetscGetTime(&v2);
#endif


   PetscPrintf(PETSC_COMM_WORLD,"\t\n>>Total elapsed time by using PETSC:%f s\n",v2-v1);

   PetscFinalize();
#endif

	return 0;
}
Ejemplo n.º 18
0
/*@C
    ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
     each index shared by more than one processor 

    Collective on ISLocalToGlobalMapping

    Input Parameters:
.   mapping - the mapping from local to global indexing

    Output Parameter:
+   nproc - number of processors that are connected to this one
.   proc - neighboring processors
.   numproc - number of indices for each subdomain (processor)
-   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)

    Level: advanced

    Concepts: mapping^local to global

    Fortran Usage: 
$        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 
$        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
          PetscInt indices[nproc][numprocmax],ierr)
        There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 
        indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


.seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
          ISLocalToGlobalMappingRestoreInfo()
@*/
PetscErrorCode PETSCVEC_DLLEXPORT ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
{
  PetscErrorCode ierr;
  PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
  PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
  PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
  PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
  PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
  PetscInt       first_procs,first_numprocs,*first_indices;
  MPI_Request    *recv_waits,*send_waits;
  MPI_Status     recv_status,*send_status,*recv_statuses;
  MPI_Comm       comm = ((PetscObject)mapping)->comm;
  PetscTruth     debug = PETSC_FALSE;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
  ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  if (size == 1) {
    *nproc         = 0;
    *procs         = PETSC_NULL;
    ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
    (*numprocs)[0] = 0;
    ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr); 
    (*indices)[0]  = PETSC_NULL;
    PetscFunctionReturn(0);
  }

  ierr = PetscOptionsGetTruth(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);CHKERRQ(ierr);

  /*
    Notes on ISLocalToGlobalMappingGetInfo

    globally owned node - the nodes that have been assigned to this processor in global
           numbering, just for this routine.

    nontrivial globally owned node - node assigned to this processor that is on a subdomain
           boundary (i.e. is has more than one local owner)

    locally owned node - node that exists on this processors subdomain

    nontrivial locally owned node - node that is not in the interior (i.e. has more than one
           local subdomain
  */
  ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
  ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
  ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);

  for (i=0; i<n; i++) {
    if (lindices[i] > max) max = lindices[i];
  }
  ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
  Ng++;
  ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  scale  = Ng/size + 1;
  ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
  rstart = scale*rank;

  /* determine ownership ranges of global indices */
  ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
  ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);

  /* determine owners of each local node  */
  ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr);
  for (i=0; i<n; i++) {
    proc             = lindices[i]/scale; /* processor that globally owns this index */
    nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
    owner[i]         = proc;              
    nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
  }
  nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
  ierr = PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);CHKERRQ(ierr);

  /* inform other processors of number of messages and max length*/
  ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
  ierr = PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);CHKERRQ(ierr);

  /* post receives for owned rows */
  ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
  ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
  for (i=0; i<nrecvs; i++) {
    ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
  }

  /* pack messages containing lists of local nodes to owners */
  ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
  ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
  starts[0]  = 0; 
  for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 
  for (i=0; i<n; i++) {
    sends[starts[owner[i]]++] = lindices[i];
    sends[starts[owner[i]]++] = i;
  }
  ierr = PetscFree(owner);CHKERRQ(ierr);
  starts[0]  = 0; 
  for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 

  /* send the messages */
  ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
  ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
  cnt = 0;
  for (i=0; i<size; i++) {
    if (nprocs[2*i]) {
      ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
      dest[cnt] = i;
      cnt++;
    }
  }
  ierr = PetscFree(starts);CHKERRQ(ierr);

  /* wait on receives */
  ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr);
  ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr);
  cnt  = nrecvs; 
  ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
  ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
  while (cnt) {
    ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
    /* unpack receives into our local space */
    ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
    source[imdex]  = recv_status.MPI_SOURCE;
    len[imdex]     = len[imdex]/2;
    /* count how many local owners for each of my global owned indices */
    for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
    cnt--;
  }
  ierr = PetscFree(recv_waits);CHKERRQ(ierr);

  /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
  nowned  = 0;
  nownedm = 0;
  for (i=0; i<ng; i++) {
    if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
  }

  /* create single array to contain rank of all local owners of each globally owned index */
  ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
  ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
  starts[0] = 0;
  for (i=1; i<ng; i++) {
    if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
    else starts[i] = starts[i-1];
  }

  /* for each nontrival globally owned node list all arriving processors */
  for (i=0; i<nrecvs; i++) {
    for (j=0; j<len[i]; j++) {
      node = recvs[2*i*nmax+2*j]-rstart;
      if (nownedsenders[node] > 1) {
        ownedsenders[starts[node]++] = source[i];
      }
    }
  }

  if (debug) { /* -----------------------------------  */
    starts[0]    = 0;
    for (i=1; i<ng; i++) {
      if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
      else starts[i] = starts[i-1];
    }
    for (i=0; i<ng; i++) {
      if (nownedsenders[i] > 1) {
        ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
        for (j=0; j<nownedsenders[i]; j++) {
          ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
        }
        ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
      }
    }
    ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
  }/* -----------------------------------  */

  /* wait on original sends */
  if (nsends) {
    ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
    ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
    ierr = PetscFree(send_status);CHKERRQ(ierr);
  }
  ierr = PetscFree(send_waits);CHKERRQ(ierr);
  ierr = PetscFree(sends);CHKERRQ(ierr);
  ierr = PetscFree(nprocs);CHKERRQ(ierr);

  /* pack messages to send back to local owners */
  starts[0]    = 0;
  for (i=1; i<ng; i++) {
    if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
    else starts[i] = starts[i-1];
  }
  nsends2 = nrecvs;
  ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
  for (i=0; i<nrecvs; i++) {
    nprocs[i] = 1;
    for (j=0; j<len[i]; j++) {
      node = recvs[2*i*nmax+2*j]-rstart;
      if (nownedsenders[node] > 1) {
        nprocs[i] += 2 + nownedsenders[node];
      }
    }
  }
  nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
  ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr); 
  ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
  starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
  /*
     Each message is 1 + nprocs[i] long, and consists of 
       (0) the number of nodes being sent back 
       (1) the local node number,
       (2) the number of processors sharing it,
       (3) the processors sharing it
  */
  for (i=0; i<nsends2; i++) {
    cnt = 1;
    sends2[starts2[i]] = 0;
    for (j=0; j<len[i]; j++) {
      node = recvs[2*i*nmax+2*j]-rstart;
      if (nownedsenders[node] > 1) {
        sends2[starts2[i]]++;
        sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
        sends2[starts2[i]+cnt++] = nownedsenders[node];
        ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
        cnt += nownedsenders[node];
      }
    }
  }

  /* receive the message lengths */
  nrecvs2 = nsends;
  ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);  
  ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);  
  ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
  for (i=0; i<nrecvs2; i++) {
    ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr);
  }

  /* send the message lengths */
  for (i=0; i<nsends2; i++) {
    ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
  }

  /* wait on receives of lens */
  if (nrecvs2) {
    ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
    ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
    ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
  }
  ierr = PetscFree(recv_waits);

  starts3[0] = 0;
  nt         = 0;
  for (i=0; i<nrecvs2-1; i++) {
    starts3[i+1] = starts3[i] + lens2[i];
    nt          += lens2[i];
  }
  nt += lens2[nrecvs2-1];

  ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
  ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
  for (i=0; i<nrecvs2; i++) {
    ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
  }
  
  /* send the messages */
  ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
  for (i=0; i<nsends2; i++) {
    ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
  }

  /* wait on receives */
  if (nrecvs2) {
    ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
    ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
    ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
  }
  ierr = PetscFree(recv_waits);CHKERRQ(ierr);
  ierr = PetscFree(nprocs);CHKERRQ(ierr);

  if (debug) { /* -----------------------------------  */
    cnt = 0;
    for (i=0; i<nrecvs2; i++) {
      nt = recvs2[cnt++];
      for (j=0; j<nt; j++) {
        ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
        for (k=0; k<recvs2[cnt+1]; k++) {
          ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
        }
        cnt += 2 + recvs2[cnt+1];
        ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
      }
    }
    ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
  } /* -----------------------------------  */

  /* count number subdomains for each local node */
  ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
  ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
  cnt  = 0;
  for (i=0; i<nrecvs2; i++) {
    nt = recvs2[cnt++];
    for (j=0; j<nt; j++) {
      for (k=0; k<recvs2[cnt+1]; k++) {
        nprocs[recvs2[cnt+2+k]]++;
      }
      cnt += 2 + recvs2[cnt+1];
    }
  }
  nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
  *nproc    = nt;
  ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
  ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
  ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
  ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
  cnt       = 0;
  for (i=0; i<size; i++) {
    if (nprocs[i] > 0) {
      bprocs[i]        = cnt;
      (*procs)[cnt]    = i;
      (*numprocs)[cnt] = nprocs[i];
      ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
      cnt++;
    }
  }

  /* make the list of subdomains for each nontrivial local node */
  ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
  cnt  = 0;
  for (i=0; i<nrecvs2; i++) {
    nt = recvs2[cnt++];
    for (j=0; j<nt; j++) {
      for (k=0; k<recvs2[cnt+1]; k++) {
        (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
      }
      cnt += 2 + recvs2[cnt+1];
    }
  }
  ierr = PetscFree(bprocs);CHKERRQ(ierr);
  ierr = PetscFree(recvs2);CHKERRQ(ierr);

  /* sort the node indexing by their global numbers */
  nt = *nproc;
  for (i=0; i<nt; i++) {
    ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
    for (j=0; j<(*numprocs)[i]; j++) {
      tmp[j] = lindices[(*indices)[i][j]];
    }
    ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr); 
    ierr = PetscFree(tmp);CHKERRQ(ierr);
  }

  if (debug) { /* -----------------------------------  */
    nt = *nproc;
    for (i=0; i<nt; i++) {
      ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
      for (j=0; j<(*numprocs)[i]; j++) {
        ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
      }
      ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
    }
    ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
  } /* -----------------------------------  */

  /* wait on sends */
  if (nsends2) {
    ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
    ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
    ierr = PetscFree(send_status);CHKERRQ(ierr);
  }

  ierr = PetscFree(starts3);CHKERRQ(ierr);
  ierr = PetscFree(dest);CHKERRQ(ierr);
  ierr = PetscFree(send_waits);CHKERRQ(ierr);

  ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
  ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
  ierr = PetscFree(starts);CHKERRQ(ierr);
  ierr = PetscFree(starts2);CHKERRQ(ierr);
  ierr = PetscFree(lens2);CHKERRQ(ierr);

  ierr = PetscFree(source);CHKERRQ(ierr);
  ierr = PetscFree(len);CHKERRQ(ierr);
  ierr = PetscFree(recvs);CHKERRQ(ierr);
  ierr = PetscFree(nprocs);CHKERRQ(ierr);
  ierr = PetscFree(sends2);CHKERRQ(ierr);

  /* put the information about myself as the first entry in the list */
  first_procs    = (*procs)[0];
  first_numprocs = (*numprocs)[0];
  first_indices  = (*indices)[0];
  for (i=0; i<*nproc; i++) {
    if ((*procs)[i] == rank) {
      (*procs)[0]    = (*procs)[i];
      (*numprocs)[0] = (*numprocs)[i];
      (*indices)[0]  = (*indices)[i];
      (*procs)[i]    = first_procs; 
      (*numprocs)[i] = first_numprocs;
      (*indices)[i]  = first_indices;
      break;
    }
  }
  PetscFunctionReturn(0);
}
Ejemplo n.º 19
0
PetscErrorCode PetscSFCreateSectionSF(PetscSF sf, PetscSection section, PetscSF *sectionSF)
{
    PetscInt          numRanks;
    const PetscInt    *ranks, *rankOffsets;
    const PetscMPIInt *localPoints, *remotePoints;
    PetscInt          numPoints, numIndices = 0;
    PetscInt          *remoteOffsets;
    PetscInt          *localIndices;
    PetscSFNode       *remoteIndices;
    PetscInt          i, r, ind;
    PetscErrorCode    ierr;

    PetscFunctionBegin;
    ierr      = PetscSFGetRanks(sf, &numRanks, &ranks, &rankOffsets, &localPoints, &remotePoints);
    CHKERRQ(ierr);
    numPoints = rankOffsets[numRanks];
    for (i = 0; i < numPoints; ++i) {
        PetscInt dof;

        ierr        = PetscSectionGetDof(section, localPoints[i], &dof);
        CHKERRQ(ierr);
        numIndices += dof;
    }
    /* Communicate offsets for ghosted points */
#if 0
    PetscInt *localOffsets;
    ierr = PetscMalloc2(numPoints,PetscInt,&localOffsets,numPoints,PetscInt,&remoteOffsets);
    CHKERRQ(ierr);
    for (i = 0; i < numPoints; ++i) {
        ierr = PetscSectionGetOffset(section, localPoints[i], &localOffsets[i]);
        CHKERRQ(ierr);
    }
    ierr = PetscSFBcastBegin(sf, MPIU_INT, localOffsets, remoteOffsets);
    CHKERRQ(ierr);
    ierr = PetscSFBcastEnd(sf, MPIU_INT, localOffsets, remoteOffsets);
    CHKERRQ(ierr);
    for (i = 0; i < numPoints; ++i) {
        ierr = PetscSynchronizedPrintf(((PetscObject) sf)->comm, "remoteOffsets[%d]: %d\n", i, remoteOffsets[i]);
        CHKERRQ(ierr);
    }
#else
    ierr = PetscMalloc((section->atlasLayout.pEnd - section->atlasLayout.pStart) * sizeof(PetscInt), &remoteOffsets);
    CHKERRQ(ierr);
    ierr = PetscSFBcastBegin(sf, MPIU_INT, &section->atlasOff[-section->atlasLayout.pStart], &remoteOffsets[-section->atlasLayout.pStart]);
    CHKERRQ(ierr);
    ierr = PetscSFBcastEnd(sf, MPIU_INT, &section->atlasOff[-section->atlasLayout.pStart], &remoteOffsets[-section->atlasLayout.pStart]);
    CHKERRQ(ierr);
    for (i = section->atlasLayout.pStart; i < section->atlasLayout.pEnd; ++i) {
        ierr = PetscSynchronizedPrintf(((PetscObject) sf)->comm, "remoteOffsets[%d]: %d\n", i, remoteOffsets[i-section->atlasLayout.pStart]);
        CHKERRQ(ierr);
    }
#endif
    ierr = PetscSynchronizedFlush(((PetscObject) sf)->comm);
    CHKERRQ(ierr);
    ierr = PetscMalloc(numIndices * sizeof(PetscInt), &localIndices);
    CHKERRQ(ierr);
    ierr = PetscMalloc(numIndices * sizeof(PetscSFNode), &remoteIndices);
    CHKERRQ(ierr);
    /* Create new index graph */
    for (r = 0, ind = 0; r < numRanks; ++r) {
        PetscInt rank = ranks[r];

        for (i = rankOffsets[r]; i < rankOffsets[r+1]; ++i) {
            PetscInt localPoint   = localPoints[i];
            PetscInt remoteOffset = remoteOffsets[localPoint-section->atlasLayout.pStart];
            PetscInt localOffset, dof, d;

            ierr = PetscSectionGetOffset(section, localPoint, &localOffset);
            CHKERRQ(ierr);
            ierr = PetscSectionGetDof(section, localPoint, &dof);
            CHKERRQ(ierr);
            for (d = 0; d < dof; ++d, ++ind) {
                localIndices[ind]        = localOffset+d;
                remoteIndices[ind].rank  = rank;
                remoteIndices[ind].index = remoteOffset+d;
            }
        }
    }
    ierr = PetscFree(remoteOffsets);
    CHKERRQ(ierr);
    if (numIndices != ind) SETERRQ2(((PetscObject) sf)->comm, PETSC_ERR_PLIB, "Inconsistency in indices, %d should be %d", ind, numIndices);
    ierr = PetscSFCreate(((PetscObject) sf)->comm, sectionSF);
    CHKERRQ(ierr);
    ierr = PetscSFSetGraph(*sectionSF, numIndices, numIndices, localIndices, PETSC_OWN_POINTER, remoteIndices, PETSC_OWN_POINTER);
    CHKERRQ(ierr);
    ierr = PetscSFView(*sectionSF, NULL);
    CHKERRQ(ierr);
    PetscFunctionReturn(0);
}
Ejemplo n.º 20
0
int main(int argc,char **argv)
{
  PetscErrorCode ierr;
  PetscInt       i,n = 5;
  PetscInt       getpetsc[]  = {0,3,4},getapp[]  = {2,1,9,7};
  PetscInt       getpetsc1[] = {0,3,4},getapp1[] = {2,1,9,7};
  PetscInt       getpetsc2[] = {0,3,4},getapp2[] = {2,1,9,7};
  PetscInt       getpetsc3[] = {0,3,4},getapp3[] = {2,1,9,7};
  PetscInt       getpetsc4[] = {0,3,4},getapp4[] = {2,1,9,7};
  PetscMPIInt    rank,size;
  IS             ispetsc,isapp;
  AO             ao;
  const PetscInt *app;

  ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
  ierr = PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);

  /* create the index sets */
  ierr = ISCreateStride(PETSC_COMM_WORLD,n,rank,size,&isapp);CHKERRQ(ierr);
  ierr = ISCreateStride(PETSC_COMM_WORLD,n,n*rank,1,&ispetsc);CHKERRQ(ierr); /* natural numbering */

  /* create the application ordering */
  ierr = AOCreateBasicIS(isapp,ispetsc,&ao);CHKERRQ(ierr);
  ierr = AOView(ao,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  ierr = AOPetscToApplication(ao,4,getapp);CHKERRQ(ierr);
  ierr = AOApplicationToPetsc(ao,3,getpetsc);CHKERRQ(ierr);

  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] 2,1,9,7 PetscToApplication %D %D %D %D\n",rank,getapp[0],getapp[1],getapp[2],getapp[3]);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] 0,3,4 ApplicationToPetsc %D %D %D\n",rank,getpetsc[0],getpetsc[1],getpetsc[2]);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
  ierr = AODestroy(&ao);CHKERRQ(ierr);

  /* test MemoryScalable ao */
  /*-------------------------*/
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\nTest AOCreateMemoryScalable: \n");CHKERRQ(ierr);
  ierr = AOCreateMemoryScalableIS(isapp,ispetsc,&ao);CHKERRQ(ierr);CHKERRQ(ierr);
  ierr = AOView(ao,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  ierr = AOPetscToApplication(ao,4,getapp1);CHKERRQ(ierr);
  ierr = AOApplicationToPetsc(ao,3,getpetsc1);CHKERRQ(ierr);

  /* Check accuracy */;
  for (i=0; i<4; i++) {
    if (getapp1[i] != getapp[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getapp1 %d != getapp %d",getapp1[i],getapp[i]);
  }
  for (i=0; i<3; i++) {
    if (getpetsc1[i] != getpetsc[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getpetsc1 %d != getpetsc %d",getpetsc1[i],getpetsc[i]);
  }

  ierr = AODestroy(&ao);CHKERRQ(ierr);

  /* test MemoryScalable ao: ispetsc = NULL */
  /*-----------------------------------------------*/
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\nTest AOCreateMemoryScalable with ispetsc=NULL:\n");CHKERRQ(ierr);
  ierr = AOCreateMemoryScalableIS(isapp,NULL,&ao);CHKERRQ(ierr);CHKERRQ(ierr);

  ierr = AOView(ao,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  ierr = AOPetscToApplication(ao,4,getapp2);CHKERRQ(ierr);
  ierr = AOApplicationToPetsc(ao,3,getpetsc2);CHKERRQ(ierr);

  /* Check accuracy */;
  for (i=0; i<4; i++) {
    if (getapp2[i] != getapp[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getapp2 %d != getapp %d",getapp2[i],getapp[i]);
  }
  for (i=0; i<3; i++) {
    if (getpetsc2[i] != getpetsc[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getpetsc2 %d != getpetsc %d",getpetsc2[i],getpetsc[i]);
  }
  ierr = AODestroy(&ao);CHKERRQ(ierr);

  /* test AOCreateMemoryScalable() ao: */
  ierr = ISGetIndices(isapp,&app);CHKERRQ(ierr);
  ierr = AOCreateMemoryScalable(PETSC_COMM_WORLD,n,app,NULL,&ao);CHKERRQ(ierr);
  ierr = ISRestoreIndices(isapp,&app);CHKERRQ(ierr);

  ierr = AOPetscToApplication(ao,4,getapp4);CHKERRQ(ierr);
  ierr = AOApplicationToPetsc(ao,3,getpetsc4);CHKERRQ(ierr);

  /* Check accuracy */;
  for (i=0; i<4; i++) {
    if (getapp4[i] != getapp[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getapp4 %d != getapp %d",getapp4[i],getapp[i]);
  }
  for (i=0; i<3; i++) {
    if (getpetsc4[i] != getpetsc[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getpetsc4 %d != getpetsc %d",getpetsc4[i],getpetsc[i]);
  }
  ierr = AODestroy(&ao);CHKERRQ(ierr);

  /* test general API */
  /*------------------*/
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\nTest general API: \n");CHKERRQ(ierr);
  ierr = AOCreate(PETSC_COMM_WORLD,&ao);CHKERRQ(ierr);
  ierr = AOSetIS(ao,isapp,ispetsc);CHKERRQ(ierr);
  ierr = AOSetType(ao,AOMEMORYSCALABLE);CHKERRQ(ierr);
  ierr = AOSetFromOptions(ao);CHKERRQ(ierr);

  /* ispetsc and isapp are nolonger used. */
  ierr = ISDestroy(&ispetsc);CHKERRQ(ierr);
  ierr = ISDestroy(&isapp);CHKERRQ(ierr);

  ierr = AOPetscToApplication(ao,4,getapp3);CHKERRQ(ierr);
  ierr = AOApplicationToPetsc(ao,3,getpetsc3);CHKERRQ(ierr);

  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] 2,1,9,7 PetscToApplication %D %D %D %D\n",rank,getapp3[0],getapp3[1],getapp3[2],getapp3[3]);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] 0,3,4 ApplicationToPetsc %D %D %D\n",rank,getpetsc3[0],getpetsc3[1],getpetsc3[2]);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);

  /* Check accuracy */;
  for (i=0; i<4; i++) {
    if (getapp3[i] != getapp[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getapp3 %d != getapp %d",getapp3[i],getapp[i]);
  }
  for (i=0; i<3; i++) {
    if (getpetsc3[i] != getpetsc[i]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_USER,"getpetsc3 %d != getpetsc %d",getpetsc3[i],getpetsc[i]);
  }

  ierr = AODestroy(&ao);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Ejemplo n.º 21
0
Archivo: ex2.c Proyecto: Kun-Qu/petsc
int main(int argc,char **argv)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size;

  /*
    Every PETSc program should begin with the PetscInitialize() routine.
    argc, argv - These command line arguments are taken to extract the options
                 supplied to PETSc and options supplied to MPI.
    help       - When PETSc executable is invoked with the option -help, 
                 it prints the various options that can be applied at 
                 runtime.  The user can use the "help" variable place
                 additional help messages in this printout.
  */
  ierr = PetscInitialize(&argc,&argv,PETSC_NULL,help);CHKERRQ(ierr);

  /* 
     The following MPI calls return the number of processes
     being used and the rank of this process in the group.
   */
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  /* 
     Here we would like to print only one message that represents
     all the processes in the group.  We use PetscPrintf() with the 
     communicator PETSC_COMM_WORLD.  Thus, only one message is
     printed representing PETSC_COMM_WORLD, i.e., all the processors.
  */
  ierr = PetscPrintf(PETSC_COMM_WORLD,"Number of processors = %d, rank = %d\n",size,rank);CHKERRQ(ierr);
  /* 
     Here we would like to print info from each process, such that
     output from process "n" appears after output from process "n-1".
     To do this we use a combination of PetscSynchronizedPrintf() and
     PetscSynchronizedFlush() with the communicator PETSC_COMM_WORLD.
     All the processes print the message, one after another. 
     PetscSynchronizedFlush() indicates that the current process in the
     given communicator has concluded printing, so that the next process
     in the communicator can begin printing to the screen.
     */
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] Synchronized Hello World.\n",rank);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] Synchronized Hello World - Part II.\n",rank);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);
  /*
    Here a barrier is used to separate the two states.
  */
  ierr = MPI_Barrier(PETSC_COMM_WORLD);CHKERRQ(ierr);

  /*
    Here we simply use PetscPrintf() with the communicator PETSC_COMM_SELF
    (where each process is considered separately).  Thus, this time the
    output from different processes does not appear in any particular order.
  */
  ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Jumbled Hello World\n",rank);CHKERRQ(ierr);

  /*
     Always call PetscFinalize() before exiting a program.  This routine
       - finalizes the PETSc libraries as well as MPI
       - provides summary and diagnostic information if certain runtime
         options are chosen (e.g., -log_summary).  
     See the PetscFinalize() manpage for more information.
  */
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 22
0
int implicit_solver(HashTable* El_Table, HashTable* NodeTable, double delta_t, double LapCoef,
    TimeProps* timeprops_ptr) {
	Vec x, b, xlocal; /* approx solution, RHS */
	Mat A; /* linear system matrix */
	KSP ksp; /* KSP context */
	PetscReal norm; //,val1,val2;         /* norm of solution error */
	PetscErrorCode ierr;
	PetscInt xsize, *num_elem_proc, *to, *from, its;
	PetscMPIInt rank, size;
	KSPConvergedReason reason;
	VecScatter vscat;
	IS globalis, tois;

	MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
	MPI_Comm_size(PETSC_COMM_WORLD, &size);

	/* -------------------------------------------------------------------
	 Compute the matrix and right-hand-side vector that define
	 the linear system, Ax = b.
	 ------------------------------------------------------------------- */

	ierr = PetscMalloc(size * sizeof(PetscInt), &num_elem_proc);
	CHKERRQ(ierr);
	num_elem_proc[rank] = num_nonzero_elem(El_Table);

	if (rank > 0)
		MPI_Send(&num_elem_proc[rank], 1, MPI_INT, 0, 22, PETSC_COMM_WORLD);

	if (rank == 0)
		for (int i = 1; i < size; i++)
			MPI_Recv(&num_elem_proc[i], 1, MPI_INT, i, 22, PETSC_COMM_WORLD, MPI_STATUS_IGNORE);

	MPI_Barrier(PETSC_COMM_WORLD);
	MPI_Bcast(num_elem_proc, size, MPI_INT, 0, PETSC_COMM_WORLD);

	//printf("Number of elements are (hi i am second)...........%d\n", num_nonzero_elem(Laplacian->El_Table));
	int total_elem = 0, start_elem = 0;
	//MPI_Allreduce(&num_elem, total_elem, 1, MPI_INT, MPI_SUM, PETSC_COMM_WORLD);

	ierr = PetscMalloc(num_elem_proc[rank] * sizeof(PetscInt), &to);
	CHKERRQ(ierr);
	ierr = PetscMalloc(num_elem_proc[rank] * sizeof(PetscInt), &from);
	CHKERRQ(ierr);

	for (int i = 0; i < size; i++)
		total_elem += num_elem_proc[i];

	for (int i = 0; i < rank; i++)
		start_elem += num_elem_proc[i];

	for (int i = 0; i < num_elem_proc[rank]; i++) {
		from[i] = i;
		to[i] = i + start_elem;
	}
	ierr = ISCreateGeneral(PETSC_COMM_WORLD, num_elem_proc[rank], from, PETSC_COPY_VALUES, &tois);
	CHKERRQ(ierr);
	ierr = ISCreateGeneral(PETSC_COMM_WORLD, num_elem_proc[rank], to, PETSC_COPY_VALUES, &globalis);
	CHKERRQ(ierr);

	/*
	 Create parallel vectors
	 */
	ierr = VecCreate(PETSC_COMM_WORLD, &b);
	CHKERRQ(ierr);
	ierr = VecSetType(b, VECSTANDARD);
	CHKERRQ(ierr);
	ierr = VecSetSizes(b, num_elem_proc[rank], total_elem);
	CHKERRQ(ierr);
	ierr = VecSetFromOptions(b);
	CHKERRQ(ierr);
	ierr = VecGetSize(b, &xsize);
	//cout<<"size b is  "<<xsize<<endl;

	ierr = VecCreateSeq(PETSC_COMM_SELF, num_elem_proc[rank], &xlocal);
	CHKERRQ(ierr);
	ierr = VecScatterCreate(xlocal, tois, b, globalis, &vscat);
	CHKERRQ(ierr);

	// we have to create a map between local and global vector
	myctx.El_Table = El_Table;
	myctx.Node_Table = NodeTable;
	myctx.Scatter = vscat;
	myctx.Total_elem = total_elem;
	myctx.Num_elem_proc = num_elem_proc;
	myctx.rank = rank;
	myctx.size = size;
	myctx.Timeptr = timeprops_ptr;
	myctx.LapCoef = LapCoef;
	myctx.delta_t = delta_t;

	/*
	 right-hand-side vector.
	 */

	ierr = MakeRHS(&myctx, b);
	CHKERRQ(ierr);
	//ierr = VecView(b,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);

	ierr = VecDuplicate(b, &x);
	CHKERRQ(ierr);
	ierr = VecCopy(b, x);
	CHKERRQ(ierr);
	//ierr = VecView(x,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
	ierr = VecGetSize(x, &xsize);
	//cout<<"size x is  "<<xsize<<endl;

	/*
	 Create and assemble parallel matrix
	 */

	ierr = MatCreateShell(PETSC_COMM_WORLD, num_elem_proc[rank], num_elem_proc[rank], total_elem,
	    total_elem, &myctx, &A);
	CHKERRQ(ierr);
	ierr = MatShellSetOperation(A, MATOP_MULT, (void (*)(void))MatLaplacian2D_Mult);CHKERRQ
	(ierr);

	/*
	 Create linear solver context
	 */
	ierr = KSPCreate(PETSC_COMM_WORLD, &ksp);
	CHKERRQ(ierr);

	/*
	 Set operators. Here the matrix that defines the linear system
	 also serves as the preconditioning matrix.
	 */
	ierr = KSPSetOperators(ksp, A, A/*,DIFFERENT_NONZERO_PATTERN*/);
	CHKERRQ(ierr);
	ierr = KSPSetType(ksp, KSPFGMRES);
	CHKERRQ(ierr);

	/*
	 Set default preconditioner for this program to be block Jacobi.
	 This choice can be overridden at runtime with the option
	 -pc_type <type>
	 */
	//  ierr = KSPSetTolerances(ksp,1.e-7,PETSC_DEFAULT,1.e9,3000);CHKERRQ(ierr);
	ierr = KSPSetTolerances(ksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 50000);
	CHKERRQ(ierr);
	/* -------------------------------------------------------------------
	 Solve the linear system
	 ------------------------------------------------------------------- */
	ierr = KSPSetInitialGuessNonzero(ksp, PETSC_TRUE);
	CHKERRQ(ierr);
	/*
	 Solve the linear system
	 */
	ierr = KSPSolve(ksp, b, x);
	CHKERRQ(ierr);

	/* -------------------------------------------------------------------
	 Check solution and clean up
	 ------------------------------------------------------------------- */

	/*
	 Check the error
	 */
	//ierr = VecNorm(x,NORM_2,&norm);CHKERRQ(ierr);
	if (rank == 0) {

		ierr = KSPGetIterationNumber(ksp, &its);
		CHKERRQ(ierr);
		ierr = KSPGetResidualNorm(ksp, &norm);
		CHKERRQ(ierr);
		ierr = PetscSynchronizedPrintf( MPI_COMM_SELF, "Norm of error %g iterations %D\n",
		    (double) norm, its);
		CHKERRQ(ierr);
		//PetscSynchronizedFlush(PETSC_COMM_WORLD);
		ierr = KSPGetConvergedReason(ksp, &reason);
		ierr = PetscSynchronizedPrintf( MPI_COMM_SELF, "kind of divergence is: ...........%D \n",
		    reason);
		//PetscSynchronizedFlush(PETSC_COMM_WORLD);
	}
	/*

	 */
	//ierr = VecGetArray(x,&xx);CHKERRQ(ierr);
	update_phi(El_Table, x, &myctx);
	//ierr = VecRestoreArray(x, &xx);CHKERRQ(ierr);
	/*
	 Free work space.  All PETSc objects should be destroyed when they
	 are no longer needed.
	 */
	MPI_Barrier(PETSC_COMM_WORLD);

	ierr = KSPDestroy(&ksp);
	CHKERRQ(ierr); //ierr = PetscFree(phin);CHKERRQ(ierr);
	ierr = VecDestroy(&x);
	CHKERRQ(ierr); //ierr = PCDestroy(&pc);CHKERRQ(ierr);
	ierr = VecDestroy(&b);
	CHKERRQ(ierr);
	ierr = MatDestroy(&A);
	CHKERRQ(ierr);
	ierr = VecScatterDestroy(&vscat);
	CHKERRQ(ierr);
	ierr = ISDestroy(&globalis);
	CHKERRQ(ierr);
	ierr = ISDestroy(&tois);
	CHKERRQ(ierr);
	PetscFree(num_elem_proc);
	CHKERRQ(ierr);
	PetscFree(to);
	CHKERRQ(ierr);
	PetscFree(from);
	CHKERRQ(ierr);

	return 0;
}
Ejemplo n.º 23
0
Archivo: ex16.c Proyecto: 00liujj/petsc
int main(int argc,char **args)
{
  Mat            A;
  PetscInt       i,j,m = 3,n = 2,rstart,rend;
  PetscErrorCode ierr;
  PetscScalar    v,*array;
  PetscViewer    view;

  PetscInitialize(&argc,&args,(char*)0,help);

  /*
      Create a parallel dense matrix shared by all processors
  */
  ierr = MatCreateDense(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,m,n,NULL,&A);CHKERRQ(ierr);

  /*
     Set values into the matrix
  */
  for (i=0; i<m; i++) {
    for (j=0; j<n; j++) {
      v = 9.0/(i+j+1); ierr = MatSetValues(A,1,&i,1,&j,&v,INSERT_VALUES);CHKERRQ(ierr);
    }
  }
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /*
       Print the matrix to the screen
  */
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);


  /*
      Print the local portion of the matrix to the screen
  */
  ierr = MatDenseGetArray(A,&array);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {
    for (j=0; j<n; j++) {
      PetscSynchronizedPrintf(PETSC_COMM_WORLD,"%6.4e ",(double)PetscRealPart(array[j*(rend-rstart)+i-rstart]));
    }
    PetscSynchronizedPrintf(PETSC_COMM_WORLD,"\n");
  }
  PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
  ierr = MatDenseRestoreArray(A,&array);CHKERRQ(ierr);

  /*
      Store the binary matrix to a file
  */
  ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD, "matrix.dat", FILE_MODE_WRITE, &view);CHKERRQ(ierr);
  ierr = PetscViewerSetFormat(view,PETSC_VIEWER_NATIVE);CHKERRQ(ierr);
  ierr = MatView(A,view);CHKERRQ(ierr);
  ierr = PetscViewerDestroy(&view);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);

  /*
     Now reload the matrix and view it
  */
  ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,"matrix.dat",FILE_MODE_READ,&view);CHKERRQ(ierr);
  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetType(A,MATMPIDENSE);CHKERRQ(ierr);
  ierr = MatLoad(A,view);CHKERRQ(ierr);
  ierr = PetscViewerDestroy(&view);CHKERRQ(ierr);
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);

  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 24
0
int main(int argc,char **argv)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*toranks,*fromranks,nto,nfrom;
  PetscInt       i,n;
  PetscBool      verbose,build_twosided_f;
  Unit           *todata,*fromdata;
  MPI_Datatype   dtype;

  PetscInitialize(&argc,&argv,(char*)0,help);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  verbose = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-verbose",&verbose,NULL);CHKERRQ(ierr);
  build_twosided_f = PETSC_FALSE;
  ierr = PetscOptionsGetBool(NULL,"-build_twosided_f",&build_twosided_f,NULL);CHKERRQ(ierr);

  for (i=1,nto=0; i<size; i*=2) nto++;
  ierr = PetscMalloc2(nto,&todata,nto,&toranks);CHKERRQ(ierr);
  for (n=0,i=1; i<size; n++,i*=2) {
    toranks[n] = (rank+i) % size;
    todata[n].rank  = (rank+i) % size;
    todata[n].value = (PetscScalar)rank;
    todata[n].ok[0] = 'o';
    todata[n].ok[1] = 'k';
    todata[n].ok[2] = 0;
  }
  if (verbose) {
    for (i=0; i<nto; i++) {
      ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] TO %d: {%D, %g, \"%s\"}\n",rank,toranks[i],todata[i].rank,(double)PetscRealPart(todata[i].value),todata[i].ok);CHKERRQ(ierr);
    }
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
  }

  ierr = MakeDatatype(&dtype);CHKERRQ(ierr);

  if (build_twosided_f) {
    struct FCtx fctx;
    PetscMPIInt *todummy,*fromdummy;
    fctx.rank    = rank;
    fctx.nto     = nto;
    fctx.toranks = toranks;
    fctx.todata  = todata;
    ierr = PetscSegBufferCreate(sizeof(Unit),1,&fctx.seg);CHKERRQ(ierr);
    ierr = PetscMalloc1(nto,&todummy);CHKERRQ(ierr);
    for (i=0; i<nto; i++) todummy[i] = rank;
    ierr = PetscCommBuildTwoSidedF(PETSC_COMM_WORLD,1,MPI_INT,nto,toranks,todummy,&nfrom,&fromranks,&fromdummy,2,FSend,FRecv,&fctx);CHKERRQ(ierr);
    ierr = PetscFree(todummy);CHKERRQ(ierr);
    ierr = PetscFree(fromdummy);CHKERRQ(ierr);
    ierr = PetscSegBufferExtractAlloc(fctx.seg,&fromdata);CHKERRQ(ierr);
    ierr = PetscSegBufferDestroy(&fctx.seg);CHKERRQ(ierr);
  } else {
    ierr = PetscCommBuildTwoSided(PETSC_COMM_WORLD,1,dtype,nto,toranks,todata,&nfrom,&fromranks,&fromdata);CHKERRQ(ierr);
  }
  ierr = MPI_Type_free(&dtype);CHKERRQ(ierr);

  if (verbose) {
    PetscInt *iranks,*iperm;
    ierr = PetscMalloc2(nfrom,&iranks,nfrom,&iperm);CHKERRQ(ierr);
    for (i=0; i<nfrom; i++) {
      iranks[i] = fromranks[i];
      iperm[i] = i;
    }
    /* Receive ordering is non-deterministic in general, so sort to make verbose output deterministic. */
    ierr = PetscSortIntWithPermutation(nfrom,iranks,iperm);CHKERRQ(ierr);
    for (i=0; i<nfrom; i++) {
      PetscInt ip = iperm[i];
      ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] FROM %d: {%D, %g, \"%s\"}\n",rank,fromranks[ip],fromdata[ip].rank,(double)PetscRealPart(fromdata[ip].value),fromdata[ip].ok);CHKERRQ(ierr);
    }
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
    ierr = PetscFree2(iranks,iperm);CHKERRQ(ierr);
  }

  if (nto != nfrom) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"[%d] From ranks %d does not match To ranks %d",rank,nto,nfrom);
  for (i=1; i<size; i*=2) {
    PetscMPIInt expected_rank = (rank-i+size)%size;
    PetscBool flg;
    for (n=0; n<nfrom; n++) {
      if (expected_rank == fromranks[n]) goto found;
    }
    SETERRQ2(PETSC_COMM_WORLD,PETSC_ERR_PLIB,"[%d] Could not find expected from rank %d",rank,expected_rank);
    found:
    if (PetscRealPart(fromdata[n].value) != expected_rank) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"[%d] Got data %g from rank %d",rank,(double)PetscRealPart(fromdata[n].value),expected_rank);
    ierr = PetscStrcmp(fromdata[n].ok,"ok",&flg);CHKERRQ(ierr);
    if (!flg) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"[%d] Got string %s from rank %d",rank,fromdata[n].ok,expected_rank);
  }
  ierr = PetscFree2(todata,toranks);CHKERRQ(ierr);
  ierr = PetscFree(fromdata);CHKERRQ(ierr);
  ierr = PetscFree(fromranks);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 25
0
PetscErrorCode LoadTestMatrices(Mat *_A,Vec *_x,Vec *_b,IS *_isu,IS *_isp)
{
  Vec            f,h,x,b,bX[2];
  Mat            A,Auu,Aup,Apu,App,bA[2][2];
  IS             is_u,is_p,bis[2];
  PetscInt       lnu,lnp,nu,np,i,start_u,end_u,start_p,end_p;
  VecScatter     *vscat;
  PetscMPIInt    rank;
  PetscErrorCode ierr;

  PetscFunctionBeginUser;
  /* fetch test matrices and vectors */
  ierr = LSCLoadTestOperators(&Auu,&Aup,&Apu,&App,&f,&h);CHKERRQ(ierr);

  /* build the mat-nest */
  ierr = VecGetSize(f,&nu);CHKERRQ(ierr);
  ierr = VecGetSize(h,&np);CHKERRQ(ierr);

  ierr = VecGetLocalSize(f,&lnu);CHKERRQ(ierr);
  ierr = VecGetLocalSize(h,&lnp);CHKERRQ(ierr);

  ierr = VecGetOwnershipRange(f,&start_u,&end_u);CHKERRQ(ierr);
  ierr = VecGetOwnershipRange(h,&start_p,&end_p);CHKERRQ(ierr);

  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] lnu = %D | lnp = %D \n", rank, lnu, lnp);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] s_u = %D | e_u = %D \n", rank, start_u, end_u);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] s_p = %D | e_p = %D \n", rank, start_p, end_p);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] is_u (offset) = %D \n", rank, start_u+start_p);CHKERRQ(ierr);
  ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] is_p (offset) = %D \n", rank, start_u+start_p+lnu);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);

  ierr = ISCreateStride(PETSC_COMM_WORLD,lnu,start_u+start_p,1,&is_u);CHKERRQ(ierr);
  ierr = ISCreateStride(PETSC_COMM_WORLD,lnp,start_u+start_p+lnu,1,&is_p);CHKERRQ(ierr);

  bis[0]   = is_u; bis[1]   = is_p;
  bA[0][0] = Auu;  bA[0][1] = Aup;
  bA[1][0] = Apu;  bA[1][1] = App;
  ierr     = MatCreateNest(PETSC_COMM_WORLD,2,bis,2,bis,&bA[0][0],&A);CHKERRQ(ierr);
  ierr     = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr     = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Pull f,h into b */
  ierr  = MatCreateVecs(A,&b,&x);CHKERRQ(ierr);
  bX[0] = f;  bX[1] = h;
  ierr  = PetscMalloc1(2,&vscat);CHKERRQ(ierr);
  for (i=0; i<2; i++) {
    ierr = VecScatterCreate(b,bis[i],bX[i],NULL,&vscat[i]);CHKERRQ(ierr);
    ierr = VecScatterBegin(vscat[i],bX[i],b,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
  }
  for (i=0; i<2; i++) {
    ierr = VecScatterEnd(vscat[i],bX[i],b,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
  }

  /* tidy up */
  for (i=0; i<2; i++) {
    ierr = VecScatterDestroy(&vscat[i]);CHKERRQ(ierr);
  }
  ierr = PetscFree(vscat);CHKERRQ(ierr);
  ierr = MatDestroy(&Auu);CHKERRQ(ierr);
  ierr = MatDestroy(&Aup);CHKERRQ(ierr);
  ierr = MatDestroy(&Apu);CHKERRQ(ierr);
  ierr = MatDestroy(&App);CHKERRQ(ierr);
  ierr = VecDestroy(&f);CHKERRQ(ierr);
  ierr = VecDestroy(&h);CHKERRQ(ierr);

  *_isu = is_u;
  *_isp = is_p;
  *_A   = A;
  *_x   = x;
  *_b   = b;
  PetscFunctionReturn(0);
}
Ejemplo n.º 26
0
int main ( int argc, char* argv[] )
{
	/* parse command line arguments */
	std::string anArg;
	std::string modelRoot;
	#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || \
		defined(USE_MPI_GEMS) || defined(USE_MPI_KRC)
		int nb_ddc=0; //number of cores for DDC related processes
	#endif

	for( int i = 1; i < argc; i++ )
	{
		anArg = std::string( argv[i] );
		if( anArg == "--help" || anArg == "-h")
		{
			std::cout << "Usage: ogs [MODEL_ROOT] [OPTIONS]\n"
			          << "Where OPTIONS are:\n"
			          << "  -h [--help]               print this message and exit\n"
			          << "  -b [--build-info]         print build info and exit\n"
			          << "  --output-directory DIR    put output files into DIR\n"
			          << "  --version                 print ogs version and exit" << "\n";
			continue;
		}
		if( anArg == "--build-info" || anArg == "-b" )
		{
			std::cout << "ogs version: " << BuildInfo::OGS_VERSION << "\n"
			          << "ogs date: " << BuildInfo::OGS_DATE << "\n";
			std::cout << "git commit info: " << BuildInfo::GIT_COMMIT_INFO << "\n";
			std::cout << "build timestamp: " << BuildInfo::BUILD_TIMESTAMP << "\n";
			continue;
		}
		if( anArg == "--version" )
		{
			std::cout << BuildInfo::OGS_VERSION << "\n";
			continue;
		}
		if( anArg == "--model-root" || anArg == "-m" )
		{
			if (i+1 >= argc) {
				std::cerr << "Error: Parameter " << anArg << " needs an additional argument" << std::endl;
				std::exit(EXIT_FAILURE);
			}
			modelRoot = std::string( argv[++i] );
			continue;
		}
		if (anArg == "--output-directory")
		{
			if (i+1 >= argc) {
				std::cerr << "Error: Parameter " << anArg << " needs an additional argument" << std::endl;
				std::exit(EXIT_FAILURE);
			}
			std::string path = argv[++i];

			if (! path.empty()) defaultOutputPath = path;
			continue;
		}
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || \
	defined(USE_MPI_GEMS) || defined(USE_MPI_KRC)
		std::string decompositions;
		if( anArg == "--domain-decomposition" || anArg == "-ddc" )
		{
			decompositions = std::string( argv[++i] );
			nb_ddc = atoi(decompositions.c_str());
			continue;
		}
#endif
		// anything left over must be the model root, unless already found
		if ( modelRoot == "" )
			modelRoot = std::string( argv[i] );
	} // end of parse argc loop

	if( argc > 1 && modelRoot == "" ) // non-interactive mode and no model given
		exit(0);             // e.g. just wanted the build info

	std::string solver_pkg_name = BuildInfo::SOLVER_PACKAGE_NAME;
	// No default linear solver package is in use.
	if(solver_pkg_name.find("Default") == std::string::npos)
	{
		std::cout << "\nWarning: " << solver_pkg_name
		<< " other than the OGS default one is in use." <<std::endl;
		std::cout << "         The solver setting may need to be adjusted for the solution accuracy!" << std::endl;
	}

	char* dateiname(NULL);
#ifdef SUPERCOMPUTER
// *********************************************************************
// buffered output ... important for performance on cray
// (unbuffered output is limited to 10 bytes per second)
// [email protected] 11.10.2007

	char buf[1024 * 1024];
	int bsize;

	bsize = 1024 * 1024; // question: what happens if buffer is full?
	                     // according to documentation the buffer is flushed when full.
	                     // If we have a lot of output, increasing buffer is usefull.
	if(bsize > 0)
//        bufstd = malloc(bsize);
		setvbuf(stdout, buf, _IOFBF, bsize);
	//**********************************************************************
#endif
/*---------- MPI Initialization ----------------------------------*/
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || \
	defined(USE_MPI_GEMS) || defined(USE_MPI_KRC)
	printf("Before MPI_Init\n");
#if defined(USE_MPI_GEMS)
	int prov;
	MPI_Init_thread(&argc,&argv,MPI_THREAD_FUNNELED, &prov);
#else
	MPI_Init(&argc,&argv);
#endif
	MPI_Barrier (MPI_COMM_WORLD); // 12.09.2007 WW
	elapsed_time_mpi = -MPI_Wtime(); // 12.09.2007 WW
	bool splitcomm_flag;
	int np;
	MPI_Comm_size(MPI_COMM_WORLD, &np);
	splitcomm_flag = SplitMPI_Communicator::CreateCommunicator(MPI_COMM_WORLD, np, nb_ddc);
	time_ele_paral = 0.0;
#endif
/*---------- MPI Initialization ----------------------------------*/


#ifdef USE_PETSC
	int rank, r_size;
	PetscLogDouble v1,v2;
	char help[] = "OGS with PETSc \n";
	//PetscInitialize(argc, argv, help);
	PetscInitialize(&argc,&argv,(char *)0,help);
	//kg44 quick fix to compile PETSC with version PETSCV3.4
#ifdef USEPETSC34
       PetscTime(&v1);
#else
       PetscGetTime(&v1);
#endif
	MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
	MPI_Comm_size(PETSC_COMM_WORLD, &r_size);
	PetscSynchronizedPrintf(PETSC_COMM_WORLD, "===\nUse PETSc solver");
	PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Number of CPUs: %d, rank: %d\n", r_size, rank);
#endif

/*---------- LIS solver -----------------------------------------*/
#ifdef LIS
	//Initialization of the lis solver.
	lis_initialize(&argc, &argv);
#endif
/*========================================================================*/
/* Kommunikation mit Betriebssystem */
	/* Timer fuer Gesamtzeit starten */
#ifdef TESTTIME
	TStartTimer(0);
#endif
	/* Intro ausgeben */
#if defined(USE_MPI) //WW
	if(myrank == 0)
#endif
#ifdef USE_PETSC
        if(rank == 0 )
#endif

	DisplayStartMsg();
	/* Speicherverwaltung initialisieren */
	if (!InitMemoryTest())
	{
		DisplayErrorMsg("Fehler: Speicherprotokoll kann nicht erstellt werden!");
		DisplayErrorMsg("        Programm vorzeitig beendet!");
		return 1; // LB changed from 0 to 1 because 0 is indicating success
	}
	if( argc == 1 )               // interactive mode

		dateiname = ReadString();
	else                         // non-interactive mode
	{
		if ( argc == 2 )     // a model root was supplied
		{
			dateiname = (char*) Malloc((int)strlen(argv[1]) + 1);
			dateiname = strcpy(dateiname,argv[1]);
		}
		else                // several args supplied
		if( modelRoot != "")
		{
			dateiname = (char*) Malloc( (int) modelRoot.size() + 1 );
			dateiname = strcpy( dateiname, modelRoot.c_str() );
		}
		DisplayMsgLn(dateiname);
	}
	//WW  DisplayMsgLn("");
	//WW  DisplayMsgLn("");
	// ----------23.02.2009. WW-----------------

	// LB Check if file exists
	std::string tmpFilename = dateiname;
	tmpFilename.append(".pcs");
	if(!IsFileExisting(tmpFilename))
	{
		std::cout << " Error: Cannot find file " << dateiname << "\n";
		return 1;
	}

	// If no option is given, output files are placed in the same directory as the input files
	if (defaultOutputPath.empty()) defaultOutputPath = pathDirname(std::string(dateiname));

	FileName = dateiname;
	size_t indexChWin, indexChLinux;
	indexChWin = indexChLinux = 0;
	indexChWin = FileName.find_last_of('\\');
	indexChLinux = FileName.find_last_of('/');
	//
	if(indexChWin != std::string::npos)
		FilePath = FileName.substr(0,indexChWin) + "\\";
	else if(indexChLinux != std::string::npos)
		FilePath = FileName.substr(0,indexChLinux) + "/";
	// ---------------------------WW
	Problem* aproblem = new Problem(dateiname);
#ifdef USE_PETSC
	aproblem->setRankandSize(rank, r_size);
#endif
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || defined(USE_MPI_GEMS)  || defined(USE_MPI_KRC)
	aproblem->setRankandSize(myrank, mysize);

	if (myrank != MPI_UNDEFINED)
	{
#endif
	aproblem->Euler_TimeDiscretize();
	delete aproblem;
	aproblem = NULL;
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || defined(USE_MPI_GEMS)  || defined(USE_MPI_KRC)
	  }

	//sending killing signals to ranks of group_IPQC, only when the group exists
	if (splitcomm_flag == true){
		int signal = -1, rank_IPQC, mysize_IPQC = np - nb_ddc;
		for (int i=0; i< mysize_IPQC; i++){
			rank_IPQC = mysize + i;
			MPI_Send(&signal, 1, MPI_INT, rank_IPQC, 0, MPI_COMM_WORLD);
		}
 	  }

#endif


	if(ClockTimeVec.size()>0)
		ClockTimeVec[0]->PrintTimes();  //CB time
	DestroyClockTime();
#ifdef TESTTIME
#if defined(USE_MPI)
     if(myrank == 0)
#endif
#if defined(USE_PETSC)
     if(rank == 0)
#endif
	std::cout << "Simulation time: " << TGetTimer(0) << "s" << "\n";
#endif
	/* Abspann ausgeben */
/*--------- MPI Finalize ------------------*/
#if defined(USE_MPI) || defined(USE_MPI_PARPROC) || defined(USE_MPI_REGSOIL) || defined(USE_MPI_KRC)
	elapsed_time_mpi += MPI_Wtime(); // 12.09.2007 WW
	std::cout << "\n *** Total CPU time of parallel modeling: " << elapsed_time_mpi <<
	"\n";                                                                          //WW
	// Count CPU time of post time loop WW
	MPI_Finalize();
#endif
/*--------- MPI Finalize ------------------*/
/*--------- LIS Finalize ------------------*/
#ifdef LIS
	lis_finalize();
#endif
/*--------- LIS Finalize ------------------*/

	free(dateiname);

#ifdef USE_PETSC
	//kg44 quick fix to compile PETSC with version PETSCV3.4
#ifdef USEPETSC34
       PetscTime(&v2);
#else
       PetscGetTime(&v2);
#endif


   PetscPrintf(PETSC_COMM_WORLD,"\t\n>>Total elapsed time by using PETSC:%f s\n",v2-v1);

   PetscFinalize();
#endif

	return 0;
}
Ejemplo n.º 27
0
Archivo: ex9.c Proyecto: Kun-Qu/petsc
int main(int argc,char **argv)
{
  PetscMPIInt    rank,size;
  PetscInt       nlocal = 6,nghost = 2,ifrom[2],i,rstart,rend;
  PetscErrorCode ierr;
  PetscBool      flg,flg2;
  PetscScalar    value,*array,*tarray=0;
  Vec            lx,gx,gxs;

  PetscInitialize(&argc,&argv,(char *)0,help);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  if (size != 2) SETERRQ(PETSC_COMM_SELF,1,"Must run example with two processors\n");

  /*
     Construct a two dimensional graph connecting nlocal degrees of 
     freedom per processor. From this we will generate the global
     indices of needed ghost values

     For simplicity we generate the entire graph on each processor:
     in real application the graph would stored in parallel, but this
     example is only to demonstrate the management of ghost padding
     with VecCreateGhost().

     In this example we consider the vector as representing 
     degrees of freedom in a one dimensional grid with periodic 
     boundary conditions.

        ----Processor  1---------  ----Processor 2 --------
         0    1   2   3   4    5    6    7   8   9   10   11
                               |----| 
         |-------------------------------------------------|

  */

  if (!rank) {
    ifrom[0] = 11; ifrom[1] = 6; 
  } else {
    ifrom[0] = 0;  ifrom[1] = 5; 
  }

  /*
     Create the vector with two slots for ghost points. Note that both 
     the local vector (lx) and the global vector (gx) share the same 
     array for storing vector values.
  */
  ierr = PetscOptionsHasName(PETSC_NULL,"-allocate",&flg);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(PETSC_NULL,"-vecmpisetghost",&flg2);CHKERRQ(ierr);
  if (flg) {
    ierr = PetscMalloc((nlocal+nghost)*sizeof(PetscScalar),&tarray);CHKERRQ(ierr);
    ierr = VecCreateGhostWithArray(PETSC_COMM_WORLD,nlocal,PETSC_DECIDE,nghost,ifrom,tarray,&gxs);CHKERRQ(ierr);
  } else if (flg2) {
    ierr = VecCreate(PETSC_COMM_WORLD,&gxs);CHKERRQ(ierr);
    ierr = VecSetType(gxs,VECMPI);CHKERRQ(ierr);
    ierr = VecSetSizes(gxs,nlocal,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecMPISetGhost(gxs,nghost,ifrom);CHKERRQ(ierr);
  } else {
    ierr = VecCreateGhost(PETSC_COMM_WORLD,nlocal,PETSC_DECIDE,nghost,ifrom,&gxs);CHKERRQ(ierr);
  }

  /*
      Test VecDuplicate()
  */
  ierr = VecDuplicate(gxs,&gx);CHKERRQ(ierr);
  ierr = VecDestroy(&gxs);CHKERRQ(ierr);

  /*
     Access the local representation
  */
  ierr = VecGhostGetLocalForm(gx,&lx);CHKERRQ(ierr);

  /*
     Set the values from 0 to 12 into the "global" vector 
  */
  ierr = VecGetOwnershipRange(gx,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {
    value = (PetscScalar) i;
    ierr  = VecSetValues(gx,1,&i,&value,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = VecAssemblyBegin(gx);CHKERRQ(ierr);
  ierr = VecAssemblyEnd(gx);CHKERRQ(ierr);

  ierr = VecGhostUpdateBegin(gx,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
  ierr = VecGhostUpdateEnd(gx,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);

  /*
     Print out each vector, including the ghost padding region. 
  */
  ierr = VecGetArray(lx,&array);CHKERRQ(ierr);
  for (i=0; i<nlocal+nghost; i++) {
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"%D %G\n",i,PetscRealPart(array[i]));CHKERRQ(ierr);
  }
  ierr = VecRestoreArray(lx,&array);CHKERRQ(ierr);
  ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD);CHKERRQ(ierr);

  ierr = VecGhostRestoreLocalForm(gx,&lx);CHKERRQ(ierr); 
  ierr = VecDestroy(&gx);CHKERRQ(ierr);
  if (flg) {ierr = PetscFree(tarray);CHKERRQ(ierr);}
  ierr = PetscFinalize();
  return 0;
}
Ejemplo n.º 28
0
int main(int argc,char **args)
{
  Mat            C;
  Vec            u,b;
  PetscErrorCode ierr;
  PetscMPIInt    size,rank;
  PetscInt       i,m = 5,N,start,end,M,idx[4];
  PetscInt       j,nrsub,ncsub,*rsub,*csub,mystart,myend;
  PetscBool      flg;
  PetscScalar    one = 1.0,Ke[16],*vals;
  PetscReal      h,norm;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  ierr = PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);CHKERRQ(ierr);

  N    = (m+1)*(m+1); /* dimension of matrix */
  M    = m*m;      /* number of elements */
  h    = 1.0/m;    /* mesh width */
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);

  /* Create stiffness matrix */
  ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr);
  ierr = MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,N,N);CHKERRQ(ierr);
  ierr = MatSetFromOptions(C);CHKERRQ(ierr);
  ierr = MatSetUp(C);CHKERRQ(ierr);

  start = rank*(M/size) + ((M%size) < rank ? (M%size) : rank);
  end   = start + M/size + ((M%size) > rank);

  /* Form the element stiffness for the Laplacian */
  ierr = FormElementStiffness(h*h,Ke);CHKERRQ(ierr);
  for (i=start; i<end; i++) {
    /* location of lower left corner of element */
    /* node numbers for the four corners of element */
    idx[0] = (m+1)*(i/m) + (i % m);
    idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1;
    ierr   = MatSetValues(C,4,idx,4,idx,Ke,ADD_VALUES);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Assemble the matrix again */
  ierr = MatZeroEntries(C);CHKERRQ(ierr);

  for (i=start; i<end; i++) {
    /* location of lower left corner of element */
    /* node numbers for the four corners of element */
    idx[0] = (m+1)*(i/m) + (i % m);
    idx[1] = idx[0]+1; idx[2] = idx[1] + m + 1; idx[3] = idx[2] - 1;
    ierr   = MatSetValues(C,4,idx,4,idx,Ke,ADD_VALUES);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Create test vectors */
  ierr = VecCreate(PETSC_COMM_WORLD,&u);CHKERRQ(ierr);
  ierr = VecSetSizes(u,PETSC_DECIDE,N);CHKERRQ(ierr);
  ierr = VecSetFromOptions(u);CHKERRQ(ierr);
  ierr = VecDuplicate(u,&b);CHKERRQ(ierr);
  ierr = VecSet(u,one);CHKERRQ(ierr);

  /* Check error */
  ierr = MatMult(C,u,b);CHKERRQ(ierr);
  ierr = VecNorm(b,NORM_2,&norm);CHKERRQ(ierr);
  if (norm > PETSC_SQRT_MACHINE_EPSILON) {
    ierr = PetscPrintf(PETSC_COMM_WORLD,"Norm of error b %g should be near 0\n",(double)norm);CHKERRQ(ierr);
  }

  /* Now test MatGetValues() */
  ierr = PetscOptionsHasName(NULL,NULL,"-get_values",&flg);CHKERRQ(ierr);
  if (flg) {
    ierr  = MatGetOwnershipRange(C,&mystart,&myend);CHKERRQ(ierr);
    nrsub = myend - mystart; ncsub = 4;
    ierr  = PetscMalloc1(nrsub*ncsub,&vals);CHKERRQ(ierr);
    ierr  = PetscMalloc1(nrsub,&rsub);CHKERRQ(ierr);
    ierr  = PetscMalloc1(ncsub,&csub);CHKERRQ(ierr);
    for (i=myend-1; i>=mystart; i--) rsub[myend-i-1] = i;
    for (i=0; i<ncsub; i++) csub[i] = 2*(ncsub-i) + mystart;
    ierr = MatGetValues(C,nrsub,rsub,ncsub,csub,vals);CHKERRQ(ierr);
    ierr = MatView(C,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"processor number %d: start=%D, end=%D, mystart=%D, myend=%D\n",rank,start,end,mystart,myend);CHKERRQ(ierr);
    for (i=0; i<nrsub; i++) {
      for (j=0; j<ncsub; j++) {
        if (PetscImaginaryPart(vals[i*ncsub+j]) != 0.0) {
          ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"  C[%D, %D] = %g + %g i\n",rsub[i],csub[j],(double)PetscRealPart(vals[i*ncsub+j]),(double)PetscImaginaryPart(vals[i*ncsub+j]));CHKERRQ(ierr);
        } else {
          ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"  C[%D, %D] = %g\n",rsub[i],csub[j],(double)PetscRealPart(vals[i*ncsub+j]));CHKERRQ(ierr);
        }
      }
    }
    ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);CHKERRQ(ierr);
    ierr = PetscFree(rsub);CHKERRQ(ierr);
    ierr = PetscFree(csub);CHKERRQ(ierr);
    ierr = PetscFree(vals);CHKERRQ(ierr);
  }

  /* Free data structures */
  ierr = VecDestroy(&u);CHKERRQ(ierr);
  ierr = VecDestroy(&b);CHKERRQ(ierr);
  ierr = MatDestroy(&C);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Ejemplo n.º 29
0
Archivo: ex75.c Proyecto: petsc/petsc
int main(int argc,char **args)
{
    Vec            x,y,u,s1,s2;
    Mat            A,sA,sB;
    PetscRandom    rctx;
    PetscReal      r1,r2,rnorm,tol = PETSC_SQRT_MACHINE_EPSILON;
    PetscScalar    one=1.0, neg_one=-1.0, value[3], four=4.0,alpha=0.1;
    PetscInt       n,col[3],n1,block,row,i,j,i2,j2,Ii,J,rstart,rend,bs=1,mbs=16,d_nz=3,o_nz=3,prob=2;
    PetscErrorCode ierr;
    PetscMPIInt    size,rank;
    PetscBool      flg;
    MatType        type;

    ierr = PetscInitialize(&argc,&args,(char*)0,help);
    if (ierr) return ierr;
    ierr = PetscOptionsGetInt(NULL,NULL,"-mbs",&mbs,NULL);
    CHKERRQ(ierr);
    ierr = PetscOptionsGetInt(NULL,NULL,"-bs",&bs,NULL);
    CHKERRQ(ierr);

    ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
    CHKERRQ(ierr);
    ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);
    CHKERRQ(ierr);

    n = mbs*bs;

    /* Assemble MPISBAIJ matrix sA */
    ierr = MatCreate(PETSC_COMM_WORLD,&sA);
    CHKERRQ(ierr);
    ierr = MatSetSizes(sA,PETSC_DECIDE,PETSC_DECIDE,n,n);
    CHKERRQ(ierr);
    ierr = MatSetType(sA,MATSBAIJ);
    CHKERRQ(ierr);
    ierr = MatSetFromOptions(sA);
    CHKERRQ(ierr);
    ierr = MatGetType(sA,&type);
    CHKERRQ(ierr);
    ierr = MatMPISBAIJSetPreallocation(sA,bs,d_nz,NULL,o_nz,NULL);
    CHKERRQ(ierr);
    ierr = MatSeqSBAIJSetPreallocation(sA,bs,d_nz,NULL);
    CHKERRQ(ierr);
    ierr = MatSetOption(sA,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
    CHKERRQ(ierr);

    if (bs == 1) {
        if (prob == 1) { /* tridiagonal matrix */
            value[0] = -1.0;
            value[1] = 2.0;
            value[2] = -1.0;
            for (i=1; i<n-1; i++) {
                col[0] = i-1;
                col[1] = i;
                col[2] = i+1;
                ierr   = MatSetValues(sA,1,&i,3,col,value,INSERT_VALUES);
                CHKERRQ(ierr);
            }
            i       = n - 1;
            col[0]=0;
            col[1] = n - 2;
            col[2] = n - 1;
            value[0]= 0.1;
            value[1]=-1;
            value[2]=2;
            ierr    = MatSetValues(sA,1,&i,3,col,value,INSERT_VALUES);
            CHKERRQ(ierr);

            i        = 0;
            col[0] = 0;
            col[1] = 1;
            col[2]=n-1;
            value[0] = 2.0;
            value[1] = -1.0;
            value[2]=0.1;
            ierr     = MatSetValues(sA,1,&i,3,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        } else if (prob ==2) { /* matrix for the five point stencil */
            n1 =  (int) PetscSqrtReal((PetscReal)n);
            if (n1*n1 != n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"n must be a perfect square of n1");

            for (i=0; i<n1; i++) {
                for (j=0; j<n1; j++) {
                    Ii = j + n1*i;
                    if (i>0)    {
                        J = Ii - n1;
                        ierr = MatSetValues(sA,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (i<n1-1) {
                        J = Ii + n1;
                        ierr = MatSetValues(sA,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (j>0)    {
                        J = Ii - 1;
                        ierr = MatSetValues(sA,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (j<n1-1) {
                        J = Ii + 1;
                        ierr = MatSetValues(sA,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    ierr = MatSetValues(sA,1,&Ii,1,&Ii,&four,INSERT_VALUES);
                    CHKERRQ(ierr);
                }
            }
        }
        /* end of if (bs == 1) */
    } else {  /* bs > 1 */
        for (block=0; block<n/bs; block++) {
            /* diagonal blocks */
            value[0] = -1.0;
            value[1] = 4.0;
            value[2] = -1.0;
            for (i=1+block*bs; i<bs-1+block*bs; i++) {
                col[0] = i-1;
                col[1] = i;
                col[2] = i+1;
                ierr   = MatSetValues(sA,1,&i,3,col,value,INSERT_VALUES);
                CHKERRQ(ierr);
            }
            i       = bs - 1+block*bs;
            col[0] = bs - 2+block*bs;
            col[1] = bs - 1+block*bs;
            value[0]=-1.0;
            value[1]=4.0;
            ierr    = MatSetValues(sA,1,&i,2,col,value,INSERT_VALUES);
            CHKERRQ(ierr);

            i       = 0+block*bs;
            col[0] = 0+block*bs;
            col[1] = 1+block*bs;
            value[0]=4.0;
            value[1] = -1.0;
            ierr    = MatSetValues(sA,1,&i,2,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        }
        /* off-diagonal blocks */
        value[0]=-1.0;
        for (i=0; i<(n/bs-1)*bs; i++) {
            col[0]=i+bs;
            ierr  = MatSetValues(sA,1,&i,1,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
            col[0]=i;
            row=i+bs;
            ierr  = MatSetValues(sA,1,&row,1,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        }
    }
    ierr = MatAssemblyBegin(sA,MAT_FINAL_ASSEMBLY);
    CHKERRQ(ierr);
    ierr = MatAssemblyEnd(sA,MAT_FINAL_ASSEMBLY);
    CHKERRQ(ierr);

    /* Test MatView() */
    ierr = MatCreateBAIJ(PETSC_COMM_WORLD,bs,PETSC_DECIDE,PETSC_DECIDE,n,n,d_nz,NULL,o_nz,NULL,&A);
    CHKERRQ(ierr);
    ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);
    CHKERRQ(ierr);

    if (bs == 1) {
        if (prob == 1) { /* tridiagonal matrix */
            value[0] = -1.0;
            value[1] = 2.0;
            value[2] = -1.0;
            for (i=1; i<n-1; i++) {
                col[0] = i-1;
                col[1] = i;
                col[2] = i+1;
                ierr   = MatSetValues(A,1,&i,3,col,value,INSERT_VALUES);
                CHKERRQ(ierr);
            }
            i       = n - 1;
            col[0]=0;
            col[1] = n - 2;
            col[2] = n - 1;
            value[0]= 0.1;
            value[1]=-1;
            value[2]=2;
            ierr    = MatSetValues(A,1,&i,3,col,value,INSERT_VALUES);
            CHKERRQ(ierr);

            i        = 0;
            col[0] = 0;
            col[1] = 1;
            col[2]=n-1;
            value[0] = 2.0;
            value[1] = -1.0;
            value[2]=0.1;
            ierr     = MatSetValues(A,1,&i,3,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        } else if (prob ==2) { /* matrix for the five point stencil */
            n1 = (int) PetscSqrtReal((PetscReal)n);
            for (i=0; i<n1; i++) {
                for (j=0; j<n1; j++) {
                    Ii = j + n1*i;
                    if (i>0)    {
                        J = Ii - n1;
                        ierr = MatSetValues(A,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (i<n1-1) {
                        J = Ii + n1;
                        ierr = MatSetValues(A,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (j>0)    {
                        J = Ii - 1;
                        ierr = MatSetValues(A,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    if (j<n1-1) {
                        J = Ii + 1;
                        ierr = MatSetValues(A,1,&Ii,1,&J,&neg_one,INSERT_VALUES);
                        CHKERRQ(ierr);
                    }
                    ierr = MatSetValues(A,1,&Ii,1,&Ii,&four,INSERT_VALUES);
                    CHKERRQ(ierr);
                }
            }
        }
        /* end of if (bs == 1) */
    } else {  /* bs > 1 */
        for (block=0; block<n/bs; block++) {
            /* diagonal blocks */
            value[0] = -1.0;
            value[1] = 4.0;
            value[2] = -1.0;
            for (i=1+block*bs; i<bs-1+block*bs; i++) {
                col[0] = i-1;
                col[1] = i;
                col[2] = i+1;
                ierr   = MatSetValues(A,1,&i,3,col,value,INSERT_VALUES);
                CHKERRQ(ierr);
            }
            i       = bs - 1+block*bs;
            col[0] = bs - 2+block*bs;
            col[1] = bs - 1+block*bs;
            value[0]=-1.0;
            value[1]=4.0;
            ierr    = MatSetValues(A,1,&i,2,col,value,INSERT_VALUES);
            CHKERRQ(ierr);

            i       = 0+block*bs;
            col[0] = 0+block*bs;
            col[1] = 1+block*bs;
            value[0]=4.0;
            value[1] = -1.0;
            ierr    = MatSetValues(A,1,&i,2,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        }
        /* off-diagonal blocks */
        value[0]=-1.0;
        for (i=0; i<(n/bs-1)*bs; i++) {
            col[0]=i+bs;
            ierr  = MatSetValues(A,1,&i,1,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
            col[0]=i;
            row=i+bs;
            ierr  = MatSetValues(A,1,&row,1,col,value,INSERT_VALUES);
            CHKERRQ(ierr);
        }
    }
    ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
    CHKERRQ(ierr);
    ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
    CHKERRQ(ierr);

    /* Test MatGetSize(), MatGetLocalSize() */
    ierr = MatGetSize(sA, &i,&j);
    CHKERRQ(ierr);
    ierr = MatGetSize(A, &i2,&j2);
    CHKERRQ(ierr);
    i   -= i2;
    j -= j2;
    if (i || j) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatGetSize()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    ierr = MatGetLocalSize(sA, &i,&j);
    CHKERRQ(ierr);
    ierr = MatGetLocalSize(A, &i2,&j2);
    CHKERRQ(ierr);
    i2  -= i;
    j2 -= j;
    if (i2 || j2) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatGetLocalSize()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    /* vectors */
    /*--------------------*/
    /* i is obtained from MatGetLocalSize() */
    ierr = VecCreate(PETSC_COMM_WORLD,&x);
    CHKERRQ(ierr);
    ierr = VecSetSizes(x,i,PETSC_DECIDE);
    CHKERRQ(ierr);
    ierr = VecSetFromOptions(x);
    CHKERRQ(ierr);
    ierr = VecDuplicate(x,&y);
    CHKERRQ(ierr);
    ierr = VecDuplicate(x,&u);
    CHKERRQ(ierr);
    ierr = VecDuplicate(x,&s1);
    CHKERRQ(ierr);
    ierr = VecDuplicate(x,&s2);
    CHKERRQ(ierr);

    ierr = PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
    CHKERRQ(ierr);
    ierr = PetscRandomSetFromOptions(rctx);
    CHKERRQ(ierr);
    ierr = VecSetRandom(x,rctx);
    CHKERRQ(ierr);
    ierr = VecSet(u,one);
    CHKERRQ(ierr);

    /* Test MatNorm() */
    ierr  = MatNorm(A,NORM_FROBENIUS,&r1);
    CHKERRQ(ierr);
    ierr  = MatNorm(sA,NORM_FROBENIUS,&r2);
    CHKERRQ(ierr);
    rnorm = PetscAbsReal(r1-r2)/r2;
    if (rnorm > tol && !rank) {
        ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatNorm_FROBENIUS(), Anorm=%16.14e, sAnorm=%16.14e bs=%D\n",r1,r2,bs);
        CHKERRQ(ierr);
    }
    ierr  = MatNorm(A,NORM_INFINITY,&r1);
    CHKERRQ(ierr);
    ierr  = MatNorm(sA,NORM_INFINITY,&r2);
    CHKERRQ(ierr);
    rnorm = PetscAbsReal(r1-r2)/r2;
    if (rnorm > tol && !rank) {
        ierr = PetscPrintf(PETSC_COMM_WORLD,"Error: MatNorm_INFINITY(), Anorm=%16.14e, sAnorm=%16.14e bs=%D\n",r1,r2,bs);
        CHKERRQ(ierr);
    }
    ierr  = MatNorm(A,NORM_1,&r1);
    CHKERRQ(ierr);
    ierr  = MatNorm(sA,NORM_1,&r2);
    CHKERRQ(ierr);
    rnorm = PetscAbsReal(r1-r2)/r2;
    if (rnorm > tol && !rank) {
        ierr = PetscPrintf(PETSC_COMM_WORLD,"Error: MatNorm_1(), Anorm=%16.14e, sAnorm=%16.14e bs=%D\n",r1,r2,bs);
        CHKERRQ(ierr);
    }

    /* Test MatGetOwnershipRange() */
    ierr = MatGetOwnershipRange(sA,&rstart,&rend);
    CHKERRQ(ierr);
    ierr = MatGetOwnershipRange(A,&i2,&j2);
    CHKERRQ(ierr);
    i2  -= rstart;
    j2 -= rend;
    if (i2 || j2) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MaGetOwnershipRange()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    /* Test MatDiagonalScale() */
    ierr = MatDiagonalScale(A,x,x);
    CHKERRQ(ierr);
    ierr = MatDiagonalScale(sA,x,x);
    CHKERRQ(ierr);
    ierr = MatMultEqual(A,sA,10,&flg);
    CHKERRQ(ierr);
    if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NOTSAMETYPE,"Error in MatDiagonalScale");

    /* Test MatGetDiagonal(), MatScale() */
    ierr = MatGetDiagonal(A,s1);
    CHKERRQ(ierr);
    ierr = MatGetDiagonal(sA,s2);
    CHKERRQ(ierr);
    ierr = VecNorm(s1,NORM_1,&r1);
    CHKERRQ(ierr);
    ierr = VecNorm(s2,NORM_1,&r2);
    CHKERRQ(ierr);
    r1  -= r2;
    if (r1<-tol || r1>tol) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatDiagonalScale() or MatGetDiagonal(), r1=%g \n",rank,(double)r1);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    ierr = MatScale(A,alpha);
    CHKERRQ(ierr);
    ierr = MatScale(sA,alpha);
    CHKERRQ(ierr);

    /* Test MatGetRowMaxAbs() */
    ierr = MatGetRowMaxAbs(A,s1,NULL);
    CHKERRQ(ierr);
    ierr = MatGetRowMaxAbs(sA,s2,NULL);
    CHKERRQ(ierr);

    ierr = VecNorm(s1,NORM_1,&r1);
    CHKERRQ(ierr);
    ierr = VecNorm(s2,NORM_1,&r2);
    CHKERRQ(ierr);
    r1  -= r2;
    if (r1<-tol || r1>tol) {
        ierr = PetscPrintf(PETSC_COMM_SELF,"Error: MatGetRowMaxAbs() \n");
        CHKERRQ(ierr);
    }

    /* Test MatMult(), MatMultAdd() */
    ierr = MatMultEqual(A,sA,10,&flg);
    CHKERRQ(ierr);
    if (!flg) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatMult() or MatScale()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    ierr = MatMultAddEqual(A,sA,10,&flg);
    CHKERRQ(ierr);
    if (!flg) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatMultAdd()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }

    /* Test MatMultTranspose(), MatMultTransposeAdd() */
    for (i=0; i<10; i++) {
        ierr = VecSetRandom(x,rctx);
        CHKERRQ(ierr);
        ierr = MatMultTranspose(A,x,s1);
        CHKERRQ(ierr);
        ierr = MatMultTranspose(sA,x,s2);
        CHKERRQ(ierr);
        ierr = VecNorm(s1,NORM_1,&r1);
        CHKERRQ(ierr);
        ierr = VecNorm(s2,NORM_1,&r2);
        CHKERRQ(ierr);
        r1  -= r2;
        if (r1<-tol || r1>tol) {
            ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatMult() or MatScale(), err=%g\n",rank,(double)r1);
            CHKERRQ(ierr);
            ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
            CHKERRQ(ierr);
        }
    }
    for (i=0; i<10; i++) {
        ierr = VecSetRandom(x,rctx);
        CHKERRQ(ierr);
        ierr = VecSetRandom(y,rctx);
        CHKERRQ(ierr);
        ierr = MatMultTransposeAdd(A,x,y,s1);
        CHKERRQ(ierr);
        ierr = MatMultTransposeAdd(sA,x,y,s2);
        CHKERRQ(ierr);
        ierr = VecNorm(s1,NORM_1,&r1);
        CHKERRQ(ierr);
        ierr = VecNorm(s2,NORM_1,&r2);
        CHKERRQ(ierr);
        r1  -= r2;
        if (r1<-tol || r1>tol) {
            ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatMultAdd(), err=%g \n",rank,(double)r1);
            CHKERRQ(ierr);
            ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
            CHKERRQ(ierr);
        }
    }

    /* Test MatDuplicate() */
    ierr = MatDuplicate(sA,MAT_COPY_VALUES,&sB);
    CHKERRQ(ierr);
    ierr = MatEqual(sA,sB,&flg);
    CHKERRQ(ierr);
    if (!flg) {
        ierr = PetscPrintf(PETSC_COMM_WORLD," Error in MatDuplicate(), sA != sB \n");
        CHKERRQ(ierr);
        CHKERRQ(ierr);
    }
    ierr = MatMultEqual(sA,sB,5,&flg);
    CHKERRQ(ierr);
    if (!flg) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatDuplicate() or MatMult()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }
    ierr = MatMultAddEqual(sA,sB,5,&flg);
    CHKERRQ(ierr);
    if (!flg) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], Error: MatDuplicate() or MatMultAdd(()\n",rank);
        CHKERRQ(ierr);
        ierr = PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT);
        CHKERRQ(ierr);
    }
    ierr = MatDestroy(&sB);
    CHKERRQ(ierr);
    ierr = VecDestroy(&u);
    CHKERRQ(ierr);
    ierr = VecDestroy(&x);
    CHKERRQ(ierr);
    ierr = VecDestroy(&y);
    CHKERRQ(ierr);
    ierr = VecDestroy(&s1);
    CHKERRQ(ierr);
    ierr = VecDestroy(&s2);
    CHKERRQ(ierr);
    ierr = MatDestroy(&sA);
    CHKERRQ(ierr);
    ierr = MatDestroy(&A);
    CHKERRQ(ierr);
    ierr = PetscRandomDestroy(&rctx);
    CHKERRQ(ierr);

    ierr = PetscFinalize();
    return ierr;
}
Ejemplo n.º 30
0
int main(int argc,char **args)
{
  KSP            subksp;
  Mat            A,subA;
  Vec            x,b,u,subb,subx,subu;
  PetscViewer    fd;
  char           file[PETSC_MAX_PATH_LEN];
  PetscBool      flg;
  PetscErrorCode ierr;
  PetscInt       i,m,n,its;
  PetscReal      norm;
  PetscMPIInt    rank,size;
  MPI_Comm       comm,subcomm;
  PetscSubcomm   psubcomm;
  PetscInt       nsubcomm=1,id;
  PetscScalar    *barray,*xarray,*uarray,*array,one=1.0;
  PetscInt       type=1;

  PetscInitialize(&argc,&args,(char*)0,help);
  /* Load the matrix */
  ierr = PetscOptionsGetString(NULL,"-f",file,PETSC_MAX_PATH_LEN,&flg);CHKERRQ(ierr);
  if (!flg) SETERRQ(PETSC_COMM_WORLD,1,"Must indicate binary file with the -f option");
  ierr = PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);CHKERRQ(ierr);

  /* Load the matrix; then destroy the viewer.*/
  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatLoad(A,fd);CHKERRQ(ierr);
  ierr = PetscViewerDestroy(&fd);CHKERRQ(ierr);

  ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);

  /* Create rhs vector b */
  ierr = MatGetLocalSize(A,&m,NULL);CHKERRQ(ierr);
  ierr = VecCreate(PETSC_COMM_WORLD,&b);CHKERRQ(ierr);
  ierr = VecSetSizes(b,m,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = VecSetFromOptions(b);CHKERRQ(ierr);
  ierr = VecSet(b,one);CHKERRQ(ierr);

  ierr = VecDuplicate(b,&x);CHKERRQ(ierr);
  ierr = VecDuplicate(b,&u);CHKERRQ(ierr);
  ierr = VecSet(x,0.0);CHKERRQ(ierr);

  /* Test MatGetMultiProcBlock() */
  ierr = PetscOptionsGetInt(NULL,"-nsubcomm",&nsubcomm,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,"-subcomm_type",&type,NULL);CHKERRQ(ierr);

  ierr = PetscSubcommCreate(comm,&psubcomm);CHKERRQ(ierr);
  ierr = PetscSubcommSetNumber(psubcomm,nsubcomm);CHKERRQ(ierr);
  if (type == PETSC_SUBCOMM_GENERAL) { /* user provides color, subrank and duprank */
    PetscMPIInt color,subrank,duprank,subsize;
    duprank = size-1 - rank;
    subsize = size/nsubcomm;
    if (subsize*nsubcomm != size) SETERRQ2(comm,PETSC_ERR_SUP,"This example requires nsubcomm %D divides nproc %D",nsubcomm,size);
    color   = duprank/subsize;
    subrank = duprank - color*subsize;
    ierr    = PetscSubcommSetTypeGeneral(psubcomm,color,subrank,duprank);CHKERRQ(ierr);
  } else if (type == PETSC_SUBCOMM_CONTIGUOUS) {
    ierr = PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_CONTIGUOUS);CHKERRQ(ierr);
  } else if (type == PETSC_SUBCOMM_INTERLACED) {
    ierr = PetscSubcommSetType(psubcomm,PETSC_SUBCOMM_INTERLACED);CHKERRQ(ierr);
  } else SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %D is not supported yet",type);
  subcomm = psubcomm->comm;

  ierr = PetscOptionsHasName(NULL, "-subcomm_view", &flg);CHKERRQ(ierr);
  if (flg) {
    PetscMPIInt subsize,subrank,duprank;
    ierr = MPI_Comm_size((MPI_Comm)subcomm,&subsize);CHKERRQ(ierr);
    ierr = MPI_Comm_rank((MPI_Comm)subcomm,&subrank);CHKERRQ(ierr);
    ierr = MPI_Comm_rank((MPI_Comm)psubcomm->dupparent,&duprank);CHKERRQ(ierr);

    ierr = PetscSynchronizedPrintf(comm,"[%D], color %D, sub-size %D, sub-rank %D, duprank %D\n",rank,psubcomm->color,subsize,subrank,duprank);
    ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
  }

  /* Create subA */
  ierr = MatGetMultiProcBlock(A,subcomm,MAT_INITIAL_MATRIX,&subA);CHKERRQ(ierr);

  /* Create sub vectors without arrays. Place b's and x's local arrays into subb and subx */
  ierr = MatGetLocalSize(subA,&m,&n);CHKERRQ(ierr);
  ierr = VecCreateMPIWithArray(subcomm,1,m,PETSC_DECIDE,NULL,&subb);CHKERRQ(ierr);
  ierr = VecCreateMPIWithArray(subcomm,1,n,PETSC_DECIDE,NULL,&subx);CHKERRQ(ierr);
  ierr = VecCreateMPIWithArray(subcomm,1,n,PETSC_DECIDE,NULL,&subu);CHKERRQ(ierr);

  ierr = VecGetArray(b,&barray);CHKERRQ(ierr);
  ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
  ierr = VecGetArray(u,&uarray);CHKERRQ(ierr);
  ierr = VecPlaceArray(subb,barray);CHKERRQ(ierr);
  ierr = VecPlaceArray(subx,xarray);CHKERRQ(ierr);
  ierr = VecPlaceArray(subu,uarray);CHKERRQ(ierr);

  /* Create linear solvers associated with subA */
  ierr = KSPCreate(subcomm,&subksp);CHKERRQ(ierr);
  ierr = KSPSetOperators(subksp,subA,subA,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
  ierr = KSPSetFromOptions(subksp);CHKERRQ(ierr);

  /* Solve sub systems */
  ierr = KSPSolve(subksp,subb,subx);CHKERRQ(ierr);
  ierr = KSPGetIterationNumber(subksp,&its);CHKERRQ(ierr);

  /* check residual */
  ierr = MatMult(subA,subx,subu);CHKERRQ(ierr);
  ierr = VecAXPY(subu,-1.0,subb);CHKERRQ(ierr);
  ierr = VecNorm(u,NORM_2,&norm);CHKERRQ(ierr);
  if (norm > 1.e-4 && !rank) {
    ierr = PetscPrintf(PETSC_COMM_WORLD,"[%D]  Number of iterations = %3D\n",rank,its);CHKERRQ(ierr);
    printf("Error: Residual norm of each block |subb - subA*subx |= %G\n",norm);
  }
  ierr = VecResetArray(subb);CHKERRQ(ierr);
  ierr = VecResetArray(subx);CHKERRQ(ierr);
  ierr = VecResetArray(subu);CHKERRQ(ierr);

  ierr = PetscOptionsGetInt(NULL,"-subvec_view",&id,&flg);CHKERRQ(ierr);
  if (flg && rank == id) {
    ierr = PetscPrintf(PETSC_COMM_SELF,"[%D] subb:\n", rank);
    ierr = VecGetArray(subb,&array);CHKERRQ(ierr);
    for (i=0; i<m; i++) printf("%G\n",PetscRealPart(array[i]));
    ierr = VecRestoreArray(subb,&array);CHKERRQ(ierr);
    ierr = PetscPrintf(PETSC_COMM_SELF,"[%D] subx:\n", rank);
    ierr = VecGetArray(subx,&array);CHKERRQ(ierr);
    for (i=0; i<m; i++) printf("%G\n",PetscRealPart(array[i]));
    ierr = VecRestoreArray(subx,&array);CHKERRQ(ierr);
  }

  ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
  ierr = VecRestoreArray(b,&barray);CHKERRQ(ierr);
  ierr = VecRestoreArray(u,&uarray);CHKERRQ(ierr);
  ierr = MatDestroy(&subA);CHKERRQ(ierr);
  ierr = VecDestroy(&subb);CHKERRQ(ierr);
  ierr = VecDestroy(&subx);CHKERRQ(ierr);
  ierr = VecDestroy(&subu);CHKERRQ(ierr);
  ierr = KSPDestroy(&subksp);CHKERRQ(ierr);
  ierr = PetscSubcommDestroy(&psubcomm);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr); ierr = VecDestroy(&b);CHKERRQ(ierr);
  ierr = VecDestroy(&u);CHKERRQ(ierr); ierr = VecDestroy(&x);CHKERRQ(ierr);

  ierr = PetscFinalize();
  return 0;
}