예제 #1
0
/*
    Given a DA generates a VecScatter context that will deliver a slice
  of the global vector to each processor. In this example, each processor
  receives the values i=*, j=*, k=rank, i.e. one z plane.

  Note: This code is written assuming only one degree of freedom per node.
  For multiple degrees of freedom per node use ISCreateBlock()
  instead of ISCreateGeneral().
*/
PetscErrorCode GenerateSliceScatter(DA da,VecScatter *scatter,Vec *vslice)
{
  AO             ao;
  PetscInt       M,N,P,nslice,*sliceindices,count,i,j;
  PetscMPIInt    rank;
  PetscErrorCode ierr;
  MPI_Comm       comm;
  Vec            vglobal;
  IS             isfrom,isto;

  ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);

  ierr = DAGetAO(da,&ao);CHKERRQ(ierr);
  ierr = DAGetInfo(da,0,&M,&N,&P,0,0,0,0,0,0,0);CHKERRQ(ierr);

  /* 
     nslice is number of degrees of freedom in this processors slice
   if there are more processors then z plans the extra processors get 0
   elements in their slice.
  */
  if (rank < P) {nslice = M*N;} else nslice = 0;

  /* 
     Generate the local vector to hold this processors slice
  */
  ierr = VecCreateSeq(PETSC_COMM_SELF,nslice,vslice);CHKERRQ(ierr);
  ierr = DACreateGlobalVector(da,&vglobal);CHKERRQ(ierr);

  /*
       Generate the indices for the slice in the "natural" global ordering
     Note: this is just an example, one could select any subset of nodes 
    on each processor. Just list them in the global natural ordering.

  */
  ierr = PetscMalloc((nslice+1)*sizeof(PetscInt),&sliceindices);CHKERRQ(ierr);
  count = 0;
  if (rank < P) {
    for (j=0; j<N; j++) {
      for (i=0; i<M; i++) {
         sliceindices[count++] = rank*M*N + j*M + i;
      }
    }
  }
  /*
      Convert the indices to the "PETSc" global ordering
  */
  ierr = AOApplicationToPetsc(ao,nslice,sliceindices);CHKERRQ(ierr); 
  
  /* Create the "from" and "to" index set */
  /* This is to scatter from the global vector */
  ierr = ISCreateGeneral(PETSC_COMM_SELF,nslice,sliceindices,&isfrom);CHKERRQ(ierr);
  /* This is to gather into the local vector */
  ierr = ISCreateStride(PETSC_COMM_SELF,nslice,0,1,&isto);CHKERRQ(ierr);
  ierr = VecScatterCreate(vglobal,isfrom,*vslice,isto,scatter);CHKERRQ(ierr);
  ierr = ISDestroy(isfrom);CHKERRQ(ierr); 
  ierr = ISDestroy(isto);CHKERRQ(ierr);
  ierr = PetscFree(sliceindices);CHKERRQ(ierr);
  return 0;
}
예제 #2
0
파일: gada.c 프로젝트: adrielb/DCell
PetscErrorCode vizGA2DA()
{
  PetscErrorCode  ierr;
  int rank;
  MPI_Comm_rank(PETSC_COMM_WORLD,&rank);  
  int d1 = 40, d2 = 50;
  
  DA da;
  Vec vec;
  const PetscInt *lx, *ly, *lz;
  PetscInt m,n,p;
  DALocalInfo info;
  ierr = DACreate2d(PETSC_COMM_WORLD,DA_NONPERIODIC,DA_STENCIL_STAR,
            d1,d2,PETSC_DECIDE,PETSC_DECIDE,1,1,0,0, &da); CHKERRQ(ierr);
  ierr = DACreateGlobalVector(da, &vec); CHKERRQ(ierr);
  ierr = DAGetOwnershipRanges(da, &lx, &ly, &lz); CHKERRQ(ierr);
  ierr = DAGetLocalInfo(da,&info); CHKERRQ(ierr);
  ierr = DAGetInfo(da,0,0,0,0,&m,&n,&p,0,0,0,0); CHKERRQ(ierr);
  /**/
  ierr = DAView(da, PETSC_VIEWER_STDOUT_WORLD); CHKERRQ(ierr);
  for (int i = 0; i < m; ++i) {
    PetscPrintf(PETSC_COMM_WORLD,"%d\tlx: %d\n",i,lx[i]);
  }
  for (int i = 0; i < n; ++i) {
    PetscPrintf(PETSC_COMM_WORLD,"%d\tly: %d\n",i,ly[i]);
  }
  /**/
 
  
  int ga = GA_Create_handle();
  int ndim = 2;
  int dims[2] = {d2,d1};
  GA_Set_data(ga,2,dims,MT_DBL);
  int *map;
  PetscMalloc( sizeof(int)*(m+n), &map);
  map[0] = 0;
  for( int i = 1; i < n; i++ )
  {
    map[i] = ly[i-1] + map[i-1];
  }
  map[n] = 0;
  for( int i = n+1; i < m+n; i++ )
  {
    map[i] = lx[i-n-1] + map[i-1];
  }
  /* correct ordering, but nodeid's dont line up with mpi rank for petsc's da
   * DA: +---+---+   GA: +---+---+   
   *     +-2-+-3-+       +-1-+-3-+
   *     +---+---+       +---+---+
   *     +-0-+-1-+       +-0-+-2-+
   *     +---+---+       +---+---+
  int *map;
  PetscMalloc( sizeof(int)*(m+n), &map);
  map[0] = 0;
  for( int i = 1; i < m; i++ )
  {
    map[i] = lx[i] + map[i-1];
  }
  map[m] = 0;
  for( int i = m+1; i < m+n; i++ )
  {
    map[i] = ly[i-m] + map[i-1];
  }
  */
  int block[2] = {n,m};  
  GA_Set_irreg_distr(ga,map,block);
  ierr = GA_Allocate( ga );
  if( !ierr ) GA_Error("\n\n\nga allocaltion failed\n\n",ierr);
  if( !ga ) GA_Error("\n\n\n ga null \n\n",ierr); 
  if( rank != GA_Nodeid() ) GA_Error("MPI rank does not match GA_Nodeid()",1);
  GA_Print_distribution(ga);  
  
  int lo[2], hi[2];
  NGA_Distribution(ga,rank,lo,hi);
  if( lo[1] != info.xs || hi[1] != info.xs+info.xm-1 ||
      lo[0] != info.ys || hi[0] != info.ys+info.ym-1 )
  {
    PetscSynchronizedPrintf(PETSC_COMM_SELF,"[%d] lo:(%2d,%2d)  hi:(%2d,%2d) \t DA: (%2d,%2d), (%2d, %2d)\n",
        rank, lo[1], lo[0], hi[1], hi[0], info.xs, info.ys, info.xs+info.xm-1, info.ys+info.ym-1);
  }
  PetscBarrier(0);
  PetscSynchronizedFlush(PETSC_COMM_WORLD);

  AO ao;
  DAGetAO(da,&ao);
  if( rank == 0 )
  {
    int *idx, len = d1*d2;
    PetscReal *val;
    PetscMalloc(sizeof(PetscReal)*len, &val);
    PetscMalloc(sizeof(int)*len, &idx);
    for (int j = 0; j < d2; ++j)
    {
      for (int i = 0; i < d1; ++i)
      {
        idx[i + d1*j] = i + d1*j;
        val[i + d1*j] = i + d1*j;
      }
    }
    AOApplicationToPetsc(ao,len,idx);
    VecSetValues(vec,len,idx,val,INSERT_VALUES);

    int a[2], b[2],ld[1]={0};
    double c = 0;
    for (int j = 0; j < d2; ++j)
    {
      for (int i = 0; i < d1; ++i)
      {
        a[0] = j;
        a[1] = i;
//        printf("%5.0f ",c);
        NGA_Put(ga,a,a,&c,ld);
        c++;
      }
    }
  }
//  GA_Print(ga);
  VecAssemblyBegin(vec);
  VecAssemblyEnd(vec);
  
  int ld;
  double *ptr;
  NGA_Access(ga,lo,hi,&ptr,&ld);
  PetscReal **d;
  int c=0;
  ierr = DAVecGetArray(da,vec,&d); CHKERRQ(ierr);
  for (int j = info.ys; j < info.ys+info.ym; ++j)
  {
    for (int i = info.xs; i < info.xs+info.xm; ++i)
    {
      if( d[j][i] != ptr[(i-info.xs)+ld*(j-info.ys)] )
        GA_Error("DA array is not equal to GA array",1);
//      printf("%d (%d,%d):\t%3.0f\t%3.0f\n", c, i, j, d[j][i], ptr[(i-info.xs)+ld*(j-info.ys)]);
      c++;
    }
  }
  ierr = DAVecRestoreArray(da,vec,&d); CHKERRQ(ierr);
  
  c=0;
  PetscReal *v;
  int start, end;
  VecGetOwnershipRange(vec, &start, &end);
  VecGetArray( vec, &v );
  for( int i = start; i < end; i++)
  {
//    printf("%d:\t%3.0f\t%3.0f\t%s\n", start, v[i-start], ptr[i-start], (v[i-start]-ptr[i-start]==0?"":"NO") );
  }
  VecRestoreArray( vec, &v );
  
  NGA_Release_update(ga,lo,hi);

  Vec gada;
  VecCreateMPIWithArray(((PetscObject)da)->comm,da->Nlocal,PETSC_DETERMINE,ptr,&gada);
  VecView(gada,PETSC_VIEWER_STDOUT_SELF);
  
  GA_Destroy(ga);
  
  
  
  ierr = VecDestroy(vec); CHKERRQ(ierr);
  ierr = DADestroy(da); CHKERRQ(ierr);
  PetscFunctionReturn(0);
}