Beispiel #1
0
int main(int argc,char **args)
{
  Mat            A,B;
  PetscInt       i,rstart,rend;
  PetscMPIInt    rank,size;
  PetscErrorCode ierr;
  PetscScalar    v;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);

  /* Create a MPIBAIJ matrix */
  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,32,32);CHKERRQ(ierr);
  ierr = MatSetType(A,MATMPIBAIJ);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(A,2,2,NULL);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(A,2,2,NULL,2,NULL);CHKERRQ(ierr);

  v    = 1.0;
  ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {
    ierr = MatSetValues(A,1,&i,1,&i,&v,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Convert A to AIJ format */
  ierr = MatConvert(A,MATAIJ,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr);

  ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = MatDestroy(&B);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Beispiel #2
0
PetscErrorCode Assemble(MPI_Comm comm,PetscInt bs,const MatType mtype)
{
  const PetscInt rc[] = {0,1,2,3};
  const PetscScalar vals[] = {1, 2, 3, 4, 5, 6, 7, 8,
                              9,10,11,12,13,14,15,16,
                              17,18,19,20,21,22,23,24,
                              25,26,27,28,29,30,31,32,
                              33,34,35,36,37,38,39,40,
                              41,42,43,44,45,46,47,48,
                              49,50,51,52,53,54,55,56,
                              57,58,49,60,61,62,63,64};
  Mat A;
  PetscViewer viewer;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = MatCreate(comm,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,4*bs,4*bs);CHKERRQ(ierr);
  ierr = MatSetType(A,mtype);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(A,bs,2,NULL,2,NULL);CHKERRQ(ierr);
  ierr = MatMPISBAIJSetPreallocation(A,bs,2,NULL,2,NULL);CHKERRQ(ierr);
  ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
  /* All processes contribute a global matrix */
  ierr = MatSetValuesBlocked(A,4,rc,4,rc,vals,ADD_VALUES);CHKERRQ(ierr);
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = PetscPrintf(comm,"Matrix %s(%D)\n",mtype,bs);CHKERRQ(ierr);
  ierr = PetscViewerASCIIGetStdout(comm,&viewer);CHKERRQ(ierr);
  ierr = PetscViewerPushFormat(viewer,PETSC_VIEWER_ASCII_INFO_DETAIL);CHKERRQ(ierr);
  ierr = MatView(A,viewer);CHKERRQ(ierr);
  ierr = PetscViewerPopFormat(viewer);CHKERRQ(ierr);
  ierr = MatView(A,viewer);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #3
0
int main(int argc, char **args)
{
  Mat             A;
  MatPartitioning part;
  IS              is;
  PetscInt        i,m,N,rstart,rend,nemptyranks,*emptyranks,nbigranks,*bigranks;
  PetscMPIInt     rank,size;
  PetscErrorCode  ierr;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);

  nemptyranks = 10;
  nbigranks = 10;
  ierr = PetscMalloc2(nemptyranks,PetscInt,&emptyranks,nbigranks,PetscInt,&bigranks);CHKERRQ(ierr);

  ierr = PetscOptionsBegin(PETSC_COMM_WORLD,PETSC_NULL,"Partitioning example options",PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscOptionsIntArray("-emptyranks","Ranks to be skipped by partition","",emptyranks,&nemptyranks,PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscOptionsIntArray("-bigranks","Ranks to be overloaded","",bigranks,&nbigranks,PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscOptionsEnd();CHKERRQ(ierr);

  m = 1;
  for (i=0; i<nemptyranks; i++) if (rank == emptyranks[i]) m = 0;
  for (i=0; i<nbigranks; i++) if (rank == bigranks[i]) m = 5;

  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,m,m,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = MatSetFromOptions(A);CHKERRQ(ierr);
  ierr = MatSeqAIJSetPreallocation(A,3,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatMPIAIJSetPreallocation(A,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(A,1,3,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(A,1,3,PETSC_NULL,2,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatSeqSBAIJSetPreallocation(A,1,2,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatMPISBAIJSetPreallocation(A,1,2,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr);

  ierr = MatGetSize(A,PETSC_NULL,&N);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {
    const PetscInt cols[] = {(i+N-1)%N,i,(i+1)%N};
    const PetscScalar vals[] = {1,1,1};
    ierr = MatSetValues(A,1,&i,3,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  ierr = MatPartitioningCreate(PETSC_COMM_WORLD,&part);CHKERRQ(ierr);
  ierr = MatPartitioningSetAdjacency(part,A);CHKERRQ(ierr);
  ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
  ierr = MatPartitioningApply(part,&is);CHKERRQ(ierr);
  ierr = ISView(is,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = ISDestroy(&is);CHKERRQ(ierr);
  ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = PetscFree2(emptyranks,bigranks);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
/*@
   MatXAIJSetPreallocation - set preallocation for serial and parallel AIJ, BAIJ, and SBAIJ matrices

   Collective on Mat

   Input Arguments:
+  A - matrix being preallocated
.  bs - block size
.  dnnz - number of nonzero blocks per block row of diagonal part of parallel matrix
.  onnz - number of nonzero blocks per block row of off-diagonal part of parallel matrix
.  dnnzu - number of nonzero blocks per block row of upper-triangular part of diagonal part of parallel matrix
-  onnzu - number of nonzero blocks per block row of upper-triangular part of off-diagonal part of parallel matrix

   Level: beginner

.seealso: MatSeqAIJSetPreallocation(), MatMPIAIJSetPreallocation(), MatSeqBAIJSetPreallocation(), MatMPIBAIJSetPreallocation(), MatSeqSBAIJSetPreallocation(), MatMPISBAIJSetPreallocation(),
          PetscSplitOwnership()
@*/
PetscErrorCode MatXAIJSetPreallocation(Mat A,PetscInt bs,const PetscInt dnnz[],const PetscInt onnz[],const PetscInt dnnzu[],const PetscInt onnzu[])
{
    PetscErrorCode ierr;
    void           (*aij)(void);

    PetscFunctionBegin;
    ierr = MatSetBlockSize(A,bs);
    CHKERRQ(ierr);
    ierr = PetscLayoutSetUp(A->rmap);
    CHKERRQ(ierr);
    ierr = PetscLayoutSetUp(A->cmap);
    CHKERRQ(ierr);
    ierr = MatSeqBAIJSetPreallocation(A,bs,0,dnnz);
    CHKERRQ(ierr);
    ierr = MatMPIBAIJSetPreallocation(A,bs,0,dnnz,0,onnz);
    CHKERRQ(ierr);
    ierr = MatSeqSBAIJSetPreallocation(A,bs,0,dnnzu);
    CHKERRQ(ierr);
    ierr = MatMPISBAIJSetPreallocation(A,bs,0,dnnzu,0,onnzu);
    CHKERRQ(ierr);
    /*
      In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any
      good before going on with it.
    */
    ierr = PetscObjectQueryFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",&aij);
    CHKERRQ(ierr);
    if (!aij) {
        ierr = PetscObjectQueryFunction((PetscObject)A,"MatSeqAIJSetPreallocation_C",&aij);
        CHKERRQ(ierr);
    }
    if (aij) {
        if (bs == 1) {
            ierr = MatSeqAIJSetPreallocation(A,0,dnnz);
            CHKERRQ(ierr);
            ierr = MatMPIAIJSetPreallocation(A,0,dnnz,0,onnz);
            CHKERRQ(ierr);
        } else {                    /* Convert block-row precallocation to scalar-row */
            PetscInt i,m,*sdnnz,*sonnz;
            ierr = MatGetLocalSize(A,&m,NULL);
            CHKERRQ(ierr);
            ierr = PetscMalloc2((!!dnnz)*m,PetscInt,&sdnnz,(!!onnz)*m,PetscInt,&sonnz);
            CHKERRQ(ierr);
            for (i=0; i<m; i++) {
                if (dnnz) sdnnz[i] = dnnz[i/bs] * bs;
                if (onnz) sonnz[i] = onnz[i/bs] * bs;
            }
            ierr = MatSeqAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL);
            CHKERRQ(ierr);
            ierr = MatMPIAIJSetPreallocation(A,0,dnnz ? sdnnz : NULL,0,onnz ? sonnz : NULL);
            CHKERRQ(ierr);
            ierr = PetscFree2(sdnnz,sonnz);
            CHKERRQ(ierr);
        }
    }
    PetscFunctionReturn(0);
}
void linearSystemPETSc<scalar>::preAllocateEntries()
{
  if (_entriesPreAllocated) return;
  if (!_isAllocated) Msg::Fatal("system must be allocated first");
  int blockSize = _getBlockSizeFromParameters();
  std::vector<int> nByRowDiag (_localSize), nByRowOffDiag (_localSize);
  if (_sparsity.getNbRows() == 0) {
    PetscInt prealloc = 500;
    PetscBool set;
    PetscOptionsGetInt(PETSC_NULL, "-petsc_prealloc", &prealloc, &set);
    prealloc = std::min(prealloc, _localSize);
    nByRowDiag.resize(0);
    nByRowDiag.resize(_localSize, prealloc);
  } else {
    for (int i = 0; i < _localSize; i++) {
      int n;
      const int *r = _sparsity.getRow(i, n);
      for (int j = 0; j < n; j++) {
        if (r[j] >= _localRowStart && r[j] < _localRowEnd)
          nByRowDiag[i] ++;
        else
          nByRowOffDiag[i] ++;
      }
    }
    _sparsity.clear();
  }
  //MatXAIJSetPreallocation is not available in petsc < 3.3
  int commSize = 1;
  MPI_Comm_size(_comm, &commSize);
  if (commSize == 1){
    if (blockSize == 1)
      _try(MatSeqAIJSetPreallocation(_a, 0,  &nByRowDiag[0]));
    else
      _try(MatSeqBAIJSetPreallocation(_a, blockSize, 0, &nByRowDiag[0]));
  }
  else {
    if (blockSize == 1)
      _try(MatMPIAIJSetPreallocation(_a, 0, &nByRowDiag[0], 0, &nByRowOffDiag[0]));
    else
      _try(MatMPIBAIJSetPreallocation(_a, blockSize, 0, &nByRowDiag[0], 0, &nByRowOffDiag[0]));
  }
  if (blockSize > 1)
    _try(MatSetOption(_a, MAT_ROW_ORIENTED, PETSC_FALSE));
  _entriesPreAllocated = true;
}
Beispiel #6
0
static PetscErrorCode DMCreateMatrix_Redundant(DM dm,Mat *J)
{
  DM_Redundant           *red = (DM_Redundant*)dm->data;
  PetscErrorCode         ierr;
  ISLocalToGlobalMapping ltog,ltogb;
  PetscInt               i,rstart,rend,*cols;
  PetscScalar            *vals;

  PetscFunctionBegin;
  ierr = MatCreate(PetscObjectComm((PetscObject)dm),J);CHKERRQ(ierr);
  ierr = MatSetSizes(*J,red->n,red->n,red->N,red->N);CHKERRQ(ierr);
  ierr = MatSetType(*J,dm->mattype);CHKERRQ(ierr);
  ierr = MatSeqAIJSetPreallocation(*J,red->n,NULL);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(*J,1,red->n,NULL);CHKERRQ(ierr);
  ierr = MatMPIAIJSetPreallocation(*J,red->n,NULL,red->N-red->n,NULL);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(*J,1,red->n,NULL,red->N-red->n,NULL);CHKERRQ(ierr);

  ierr = DMGetLocalToGlobalMapping(dm,&ltog);CHKERRQ(ierr);
  ierr = DMGetLocalToGlobalMappingBlock(dm,&ltogb);CHKERRQ(ierr);
  ierr = MatSetLocalToGlobalMapping(*J,ltog,ltog);CHKERRQ(ierr);
  ierr = MatSetLocalToGlobalMappingBlock(*J,ltogb,ltogb);CHKERRQ(ierr);

  ierr = PetscMalloc2(red->N,&cols,red->N,&vals);CHKERRQ(ierr);
  for (i=0; i<red->N; i++) {
    cols[i] = i;
    vals[i] = 0.0;
  }
  ierr = MatGetOwnershipRange(*J,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {
    ierr = MatSetValues(*J,1,&i,red->N,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
  }
  ierr = PetscFree2(cols,vals);CHKERRQ(ierr);
  ierr = MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #7
0
PETSC_EXTERN PetscErrorCode MatISSetMPIXAIJPreallocation_Private(Mat A, Mat B, PetscBool maxreduce)
{
  Mat_IS          *matis = (Mat_IS*)(A->data);
  PetscInt        *my_dnz,*my_onz,*dnz,*onz,*mat_ranges,*row_ownership;
  const PetscInt  *global_indices_r,*global_indices_c;
  PetscInt        i,j,bs,rows,cols;
  PetscInt        lrows,lcols;
  PetscInt        local_rows,local_cols;
  PetscMPIInt     nsubdomains;
  PetscBool       isdense,issbaij;
  PetscErrorCode  ierr;

  PetscFunctionBegin;
  ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&nsubdomains);CHKERRQ(ierr);
  ierr = MatGetSize(A,&rows,&cols);CHKERRQ(ierr);
  ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
  ierr = MatGetSize(matis->A,&local_rows,&local_cols);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject)matis->A,MATSEQDENSE,&isdense);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject)matis->A,MATSEQSBAIJ,&issbaij);CHKERRQ(ierr);
  ierr = ISLocalToGlobalMappingGetIndices(A->rmap->mapping,&global_indices_r);CHKERRQ(ierr);
  if (A->rmap->mapping != A->cmap->mapping) {
    ierr = ISLocalToGlobalMappingGetIndices(A->rmap->mapping,&global_indices_c);CHKERRQ(ierr);
  } else {
    global_indices_c = global_indices_r;
  }

  if (issbaij) {
    ierr = MatGetRowUpperTriangular(matis->A);CHKERRQ(ierr);
  }
  /*
     An SF reduce is needed to sum up properly on shared rows.
     Note that generally preallocation is not exact, since it overestimates nonzeros
  */
  if (!matis->sf) { /* setup SF if not yet created and allocate rootdata and leafdata */
    ierr = MatISComputeSF_Private(A);CHKERRQ(ierr);
  }
  ierr = MatGetLocalSize(A,&lrows,&lcols);CHKERRQ(ierr);
  ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)A),lrows,lcols,dnz,onz);CHKERRQ(ierr);
  /* All processes need to compute entire row ownership */
  ierr = PetscMalloc1(rows,&row_ownership);CHKERRQ(ierr);
  ierr = MatGetOwnershipRanges(A,(const PetscInt**)&mat_ranges);CHKERRQ(ierr);
  for (i=0;i<nsubdomains;i++) {
    for (j=mat_ranges[i];j<mat_ranges[i+1];j++) {
      row_ownership[j] = i;
    }
  }

  /*
     my_dnz and my_onz contains exact contribution to preallocation from each local mat
     then, they will be summed up properly. This way, preallocation is always sufficient
  */
  ierr = PetscCalloc2(local_rows,&my_dnz,local_rows,&my_onz);CHKERRQ(ierr);
  /* preallocation as a MATAIJ */
  if (isdense) { /* special case for dense local matrices */
    for (i=0;i<local_rows;i++) {
      PetscInt index_row = global_indices_r[i];
      for (j=i;j<local_rows;j++) {
        PetscInt owner = row_ownership[index_row];
        PetscInt index_col = global_indices_c[j];
        if (index_col > mat_ranges[owner]-1 && index_col < mat_ranges[owner+1] ) { /* diag block */
          my_dnz[i] += 1;
        } else { /* offdiag block */
          my_onz[i] += 1;
        }
        /* same as before, interchanging rows and cols */
        if (i != j) {
          owner = row_ownership[index_col];
          if (index_row > mat_ranges[owner]-1 && index_row < mat_ranges[owner+1] ) {
            my_dnz[j] += 1;
          } else {
            my_onz[j] += 1;
          }
        }
      }
    }
  } else { /* TODO: this could be optimized using MatGetRowIJ */
    for (i=0;i<local_rows;i++) {
      const PetscInt *cols;
      PetscInt       ncols,index_row = global_indices_r[i];
      ierr = MatGetRow(matis->A,i,&ncols,&cols,NULL);CHKERRQ(ierr);
      for (j=0;j<ncols;j++) {
        PetscInt owner = row_ownership[index_row];
        PetscInt index_col = global_indices_c[cols[j]];
        if (index_col > mat_ranges[owner]-1 && index_col < mat_ranges[owner+1] ) { /* diag block */
          my_dnz[i] += 1;
        } else { /* offdiag block */
          my_onz[i] += 1;
        }
        /* same as before, interchanging rows and cols */
        if (issbaij && index_col != index_row) {
          owner = row_ownership[index_col];
          if (index_row > mat_ranges[owner]-1 && index_row < mat_ranges[owner+1] ) {
            my_dnz[cols[j]] += 1;
          } else {
            my_onz[cols[j]] += 1;
          }
        }
      }
      ierr = MatRestoreRow(matis->A,i,&ncols,&cols,NULL);CHKERRQ(ierr);
    }
  }
  ierr = ISLocalToGlobalMappingRestoreIndices(A->rmap->mapping,&global_indices_r);CHKERRQ(ierr);
  if (global_indices_c != global_indices_r) {
    ierr = ISLocalToGlobalMappingRestoreIndices(A->rmap->mapping,&global_indices_c);CHKERRQ(ierr);
  }
  ierr = PetscFree(row_ownership);CHKERRQ(ierr);

  /* Reduce my_dnz and my_onz */
  if (maxreduce) {
    ierr = PetscSFReduceBegin(matis->sf,MPIU_INT,my_dnz,dnz,MPI_MAX);CHKERRQ(ierr);
    ierr = PetscSFReduceEnd(matis->sf,MPIU_INT,my_dnz,dnz,MPI_MAX);CHKERRQ(ierr);
    ierr = PetscSFReduceBegin(matis->sf,MPIU_INT,my_onz,onz,MPI_MAX);CHKERRQ(ierr);
    ierr = PetscSFReduceEnd(matis->sf,MPIU_INT,my_onz,onz,MPI_MAX);CHKERRQ(ierr);
  } else {
    ierr = PetscSFReduceBegin(matis->sf,MPIU_INT,my_dnz,dnz,MPI_SUM);CHKERRQ(ierr);
    ierr = PetscSFReduceEnd(matis->sf,MPIU_INT,my_dnz,dnz,MPI_SUM);CHKERRQ(ierr);
    ierr = PetscSFReduceBegin(matis->sf,MPIU_INT,my_onz,onz,MPI_SUM);CHKERRQ(ierr);
    ierr = PetscSFReduceEnd(matis->sf,MPIU_INT,my_onz,onz,MPI_SUM);CHKERRQ(ierr);
  }
  ierr = PetscFree2(my_dnz,my_onz);CHKERRQ(ierr);

  /* Resize preallocation if overestimated */
  for (i=0;i<lrows;i++) {
    dnz[i] = PetscMin(dnz[i],lcols);
    onz[i] = PetscMin(onz[i],cols-lcols);
  }
  /* set preallocation */
  ierr = MatMPIAIJSetPreallocation(B,0,dnz,0,onz);CHKERRQ(ierr);
  for (i=0;i<lrows/bs;i++) {
    dnz[i] = dnz[i*bs]/bs;
    onz[i] = onz[i*bs]/bs;
  }
  ierr = MatMPIBAIJSetPreallocation(B,bs,0,dnz,0,onz);CHKERRQ(ierr);
  ierr = MatMPISBAIJSetPreallocation(B,bs,0,dnz,0,onz);CHKERRQ(ierr);
  ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
  if (issbaij) {
    ierr = MatRestoreRowUpperTriangular(matis->A);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Beispiel #8
0
int main(int argc,char **args)
{
  Mat            A,Atrans,sA,*submatA,*submatsA;
  PetscErrorCode ierr;
  PetscMPIInt    size,rank;
  PetscInt       bs=1,mbs=10,ov=1,i,j,k,*rows,*cols,nd=2,*idx,rstart,rend,sz,M,N,Mbs;
  PetscScalar    *vals,rval,one=1.0;
  IS             *is1,*is2;
  PetscRandom    rand;
  PetscBool      flg,TestOverlap,TestSubMat,TestAllcols,test_sorted=PETSC_FALSE;
  PetscInt       vid = -1;
#if defined(PETSC_USE_LOG)
  PetscLogStage  stages[2];
#endif

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  ierr = PetscOptionsGetInt(NULL,NULL,"-mat_block_size",&bs,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-mat_mbs",&mbs,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-ov",&ov,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-nd",&nd,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-view_id",&vid,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,NULL, "-test_overlap", &TestOverlap);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,NULL, "-test_submat", &TestSubMat);CHKERRQ(ierr);
  ierr = PetscOptionsHasName(NULL,NULL, "-test_allcols", &TestAllcols);CHKERRQ(ierr);
  ierr = PetscOptionsGetBool(NULL,NULL,"-test_sorted",&test_sorted,NULL);CHKERRQ(ierr);

  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,mbs*bs,mbs*bs,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = MatSetType(A,MATBAIJ);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(A,bs,PETSC_DEFAULT,NULL);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(A,bs,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);CHKERRQ(ierr);

  ierr = PetscRandomCreate(PETSC_COMM_WORLD,&rand);CHKERRQ(ierr);
  ierr = PetscRandomSetFromOptions(rand);CHKERRQ(ierr);

  ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
  ierr = MatGetSize(A,&M,&N);CHKERRQ(ierr);
  Mbs  = M/bs;

  ierr = PetscMalloc1(bs,&rows);CHKERRQ(ierr);
  ierr = PetscMalloc1(bs,&cols);CHKERRQ(ierr);
  ierr = PetscMalloc1(bs*bs,&vals);CHKERRQ(ierr);
  ierr = PetscMalloc1(M,&idx);CHKERRQ(ierr);

  /* Now set blocks of values */
  for (j=0; j<bs*bs; j++) vals[j] = 0.0;
  for (i=0; i<Mbs; i++) {
    cols[0] = i*bs; rows[0] = i*bs;
    for (j=1; j<bs; j++) {
      rows[j] = rows[j-1]+1;
      cols[j] = cols[j-1]+1;
    }
    ierr = MatSetValues(A,bs,rows,bs,cols,vals,ADD_VALUES);CHKERRQ(ierr);
  }
  /* second, add random blocks */
  for (i=0; i<20*bs; i++) {
    ierr    = PetscRandomGetValue(rand,&rval);CHKERRQ(ierr);
    cols[0] = bs*(PetscInt)(PetscRealPart(rval)*Mbs);
    ierr    = PetscRandomGetValue(rand,&rval);CHKERRQ(ierr);
    rows[0] = rstart + bs*(PetscInt)(PetscRealPart(rval)*mbs);
    for (j=1; j<bs; j++) {
      rows[j] = rows[j-1]+1;
      cols[j] = cols[j-1]+1;
    }

    for (j=0; j<bs*bs; j++) {
      ierr    = PetscRandomGetValue(rand,&rval);CHKERRQ(ierr);
      vals[j] = rval;
    }
    ierr = MatSetValues(A,bs,rows,bs,cols,vals,ADD_VALUES);CHKERRQ(ierr);
  }

  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* make A a symmetric matrix: A <- A^T + A */
  ierr = MatTranspose(A,MAT_INITIAL_MATRIX, &Atrans);CHKERRQ(ierr);
  ierr = MatAXPY(A,one,Atrans,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
  ierr = MatDestroy(&Atrans);CHKERRQ(ierr);
  ierr = MatTranspose(A,MAT_INITIAL_MATRIX, &Atrans);CHKERRQ(ierr);
  ierr = MatEqual(A, Atrans, &flg);CHKERRQ(ierr);
  if (flg) {
    ierr = MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr);
  } else SETERRQ(PETSC_COMM_SELF,1,"A+A^T is non-symmetric");
  ierr = MatDestroy(&Atrans);CHKERRQ(ierr);

  /* create a SeqSBAIJ matrix sA (= A) */
  ierr = MatConvert(A,MATSBAIJ,MAT_INITIAL_MATRIX,&sA);CHKERRQ(ierr);
  if (vid >= 0 && vid < size) {
    if (!rank) printf("A: \n");
    ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
    if (!rank) printf("sA: \n");
    ierr = MatView(sA,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  }

  /* Test sA==A through MatMult() */
  ierr = MatMultEqual(A,sA,10,&flg);CHKERRQ(ierr);
  if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Error in MatConvert(): A != sA");

  /* Test MatIncreaseOverlap() */
  ierr = PetscMalloc1(nd,&is1);CHKERRQ(ierr);
  ierr = PetscMalloc1(nd,&is2);CHKERRQ(ierr);

  for (i=0; i<nd; i++) {
    if (!TestAllcols) {
      ierr = PetscRandomGetValue(rand,&rval);CHKERRQ(ierr);
      sz   = (PetscInt)((0.5+0.2*PetscRealPart(rval))*mbs); /* 0.5*mbs < sz < 0.7*mbs */

      for (j=0; j<sz; j++) {
        ierr      = PetscRandomGetValue(rand,&rval);CHKERRQ(ierr);
        idx[j*bs] = bs*(PetscInt)(PetscRealPart(rval)*Mbs);
        for (k=1; k<bs; k++) idx[j*bs+k] = idx[j*bs]+k;
      }
      ierr = ISCreateGeneral(PETSC_COMM_SELF,sz*bs,idx,PETSC_COPY_VALUES,is1+i);CHKERRQ(ierr);
      ierr = ISCreateGeneral(PETSC_COMM_SELF,sz*bs,idx,PETSC_COPY_VALUES,is2+i);CHKERRQ(ierr);
      if (rank == vid) {
        ierr = PetscPrintf(PETSC_COMM_SELF," [%d] IS sz[%d]: %d\n",rank,i,sz);CHKERRQ(ierr);
        ierr = ISView(is2[i],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
      }
    } else { /* Test all rows and colums */
      sz   = M;
      ierr = ISCreateStride(PETSC_COMM_SELF,sz,0,1,is1+i);CHKERRQ(ierr);
      ierr = ISCreateStride(PETSC_COMM_SELF,sz,0,1,is2+i);CHKERRQ(ierr);

      if (rank == vid) {
        PetscBool colflag;
        ierr = ISIdentity(is2[i],&colflag);CHKERRQ(ierr);
        printf("[%d] is2[%d], colflag %d\n",rank,(int)i,(int)colflag);
        ierr = ISView(is2[i],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
      }
    }
  }

  ierr = PetscLogStageRegister("MatOv_SBAIJ",&stages[0]);CHKERRQ(ierr);
  ierr = PetscLogStageRegister("MatOv_BAIJ",&stages[1]);CHKERRQ(ierr);

  /* Test MatIncreaseOverlap */
  if (TestOverlap) {
    ierr = PetscLogStagePush(stages[0]);CHKERRQ(ierr);
    ierr = MatIncreaseOverlap(sA,nd,is2,ov);CHKERRQ(ierr);
    ierr = PetscLogStagePop();CHKERRQ(ierr);

    ierr = PetscLogStagePush(stages[1]);CHKERRQ(ierr);
    ierr = MatIncreaseOverlap(A,nd,is1,ov);CHKERRQ(ierr);
    ierr = PetscLogStagePop();CHKERRQ(ierr);

    if (rank == vid) {
      printf("\n[%d] IS from BAIJ:\n",rank);
      ierr = ISView(is1[0],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
      printf("\n[%d] IS from SBAIJ:\n",rank);
      ierr = ISView(is2[0],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
    }

    for (i=0; i<nd; ++i) {
      ierr = ISEqual(is1[i],is2[i],&flg);CHKERRQ(ierr);
      if (!flg) {
        if (!rank) {
          ierr = ISSort(is1[i]);CHKERRQ(ierr);
          /* ISView(is1[i],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); */
          ierr = ISSort(is2[i]);CHKERRQ(ierr);
          /* ISView(is2[i],PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr); */
        }
        SETERRQ1(PETSC_COMM_SELF,1,"i=%D, is1 != is2",i);
      }
    }
  }

  /* Test MatCreateSubmatrices */
  if (TestSubMat) {
    if (test_sorted) {
      for (i = 0; i < nd; ++i) {
        ierr = ISSort(is1[i]);CHKERRQ(ierr);
      }
    }
    ierr = MatCreateSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);CHKERRQ(ierr);
    ierr = MatCreateSubMatrices(sA,nd,is1,is1,MAT_INITIAL_MATRIX,&submatsA);CHKERRQ(ierr);

    ierr = MatMultEqual(A,sA,10,&flg);CHKERRQ(ierr);
    if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A != sA");

    /* Now test MatCreateSubmatrices with MAT_REUSE_MATRIX option */
    ierr = MatCreateSubMatrices(A,nd,is1,is1,MAT_REUSE_MATRIX,&submatA);CHKERRQ(ierr);
    ierr = MatCreateSubMatrices(sA,nd,is1,is1,MAT_REUSE_MATRIX,&submatsA);CHKERRQ(ierr);
    ierr = MatMultEqual(A,sA,10,&flg);CHKERRQ(ierr);
    if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatCreateSubmatrices(): A != sA");

    ierr = MatDestroySubMatrices(nd,&submatA);CHKERRQ(ierr);
    ierr = MatDestroySubMatrices(nd,&submatsA);CHKERRQ(ierr);
  }

  /* Free allocated memory */
  for (i=0; i<nd; ++i) {
    ierr = ISDestroy(&is1[i]);CHKERRQ(ierr);
    ierr = ISDestroy(&is2[i]);CHKERRQ(ierr);
  }
  ierr = PetscFree(is1);CHKERRQ(ierr);
  ierr = PetscFree(is2);CHKERRQ(ierr);
  ierr = PetscFree(idx);CHKERRQ(ierr);
  ierr = PetscFree(rows);CHKERRQ(ierr);
  ierr = PetscFree(cols);CHKERRQ(ierr);
  ierr = PetscFree(vals);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = MatDestroy(&sA);CHKERRQ(ierr);
  ierr = PetscRandomDestroy(&rand);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Beispiel #9
0
/*@C
    SlicedGetMatrix - Creates a matrix with the correct parallel layout required for 
      computing the Jacobian on a function defined using the informatin in Sliced.

    Collective on Sliced

    Input Parameter:
+   slice - the slice object
-   mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, MATSEQSBAIJ, MATMPISBAIJ,
            or any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).

    Output Parameters:
.   J  - matrix with the correct nonzero preallocation
        (obviously without the correct Jacobian values)

    Level: advanced

    Notes: This properly preallocates the number of nonzeros in the sparse matrix so you 
       do not need to do it yourself.

.seealso ISColoringView(), ISColoringGetIS(), MatFDColoringCreate(), DASetBlockFills()

@*/
PetscErrorCode PETSCDM_DLLEXPORT SlicedGetMatrix(Sliced slice, const MatType mtype,Mat *J)
{
  PetscErrorCode         ierr;
  PetscInt               *globals,*sd_nnz,*so_nnz,rstart,bs,i;
  ISLocalToGlobalMapping lmap,blmap;
  void                   (*aij)(void) = PETSC_NULL;

  PetscFunctionBegin;
  bs = slice->bs;
  ierr = MatCreate(((PetscObject)slice)->comm,J);CHKERRQ(ierr);
  ierr = MatSetSizes(*J,slice->n*bs,slice->n*bs,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
  ierr = MatSetType(*J,mtype);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);CHKERRQ(ierr);
  /* In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any
  * good before going on with it. */
  ierr = PetscObjectQueryFunction((PetscObject)*J,"MatMPIAIJSetPreallocation_C",&aij);CHKERRQ(ierr);
  if (!aij) {
    ierr = PetscObjectQueryFunction((PetscObject)*J,"MatSeqAIJSetPreallocation_C",&aij);CHKERRQ(ierr);
  }
  if (aij) {
    if (bs == 1) {
      ierr = MatSeqAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz);CHKERRQ(ierr);
      ierr = MatMPIAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);CHKERRQ(ierr);
    } else if (!slice->d_nnz) {
      ierr = MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL);CHKERRQ(ierr);
      ierr = MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL,slice->o_nz*bs,PETSC_NULL);CHKERRQ(ierr);
    } else {
      /* The user has provided preallocation per block-row, convert it to per scalar-row respecting SlicedSetBlockFills() if applicable */
      ierr = PetscMalloc2(slice->n*bs,PetscInt,&sd_nnz,(!!slice->o_nnz)*slice->n*bs,PetscInt,&so_nnz);CHKERRQ(ierr);
      for (i=0; i<slice->n*bs; i++) {
        sd_nnz[i] = (slice->d_nnz[i/bs]-1) * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs)
                                           + (slice->dfill ? slice->dfill->i[i%bs+1]-slice->dfill->i[i%bs] : bs);
        if (so_nnz) {
          so_nnz[i] = slice->o_nnz[i/bs] * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs);
        }
      }
      ierr = MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz);CHKERRQ(ierr);
      ierr = MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz,slice->o_nz*bs,so_nnz);CHKERRQ(ierr);
      ierr = PetscFree2(sd_nnz,so_nnz);CHKERRQ(ierr);
    }
  }

  ierr = MatSetBlockSize(*J,bs);CHKERRQ(ierr);

  /* Set up the local to global map.  For the scalar map, we have to translate to entry-wise indexing instead of block-wise. */
  ierr = PetscMalloc((slice->n+slice->Nghosts)*bs*sizeof(PetscInt),&globals);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(*J,&rstart,PETSC_NULL);CHKERRQ(ierr);
  for (i=0; i<slice->n*bs; i++) {
    globals[i] = rstart + i;
  }
  for (i=0; i<slice->Nghosts*bs; i++) {
    globals[slice->n*bs+i] = slice->ghosts[i/bs]*bs + i%bs;
  }
  ierr = ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,(slice->n+slice->Nghosts)*bs,globals,&lmap);CHKERRQ(ierr);
  ierr = PetscFree(globals);CHKERRQ(ierr);
  ierr = ISLocalToGlobalMappingBlock(lmap,bs,&blmap);CHKERRQ(ierr);
  ierr = MatSetLocalToGlobalMapping(*J,lmap);CHKERRQ(ierr);
  ierr = MatSetLocalToGlobalMappingBlock(*J,blmap);CHKERRQ(ierr);
  ierr = ISLocalToGlobalMappingDestroy(lmap);CHKERRQ(ierr);
  ierr = ISLocalToGlobalMappingDestroy(blmap);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #10
0
void PETSC_STDCALL matmpibaijsetpreallocation_(Mat *mat,PetscInt *bs,PetscInt *d_nz,PetscInt *d_nnz,PetscInt *o_nz,PetscInt *o_nnz,PetscErrorCode *ierr)
{
  CHKFORTRANNULLINTEGER(d_nnz);
  CHKFORTRANNULLINTEGER(o_nnz);
  *ierr = MatMPIBAIJSetPreallocation(*mat,*bs,*d_nz,d_nnz,*o_nz,o_nnz);
}
Beispiel #11
0
Solver::Petsc::Petsc (Grid& gr)
{    
    world = PETSC_COMM_WORLD;
    MPI_Comm_rank (world, &rank);
    MPI_Comm_size (world, &nProcs);
    n = gr.n_in_elm;
    bs = N_VAR;
    vecGlobalSize = n*bs;
    //DX = (double*) malloc (vecGlobalSize*sizeof(double));
    //PetscMalloc1 (xGlobalSize, &DX);
    DX = new double [vecGlobalSize];
    
    
    
    // set x
    VecCreate (world, &x);
    VecSetType (x, VECSTANDARD);
    VecSetBlockSize(x, bs);
    VecSetSizes (x, PETSC_DECIDE, vecGlobalSize);
    VecGetLocalSize (x, &vecLocalSize);
    VecGetOwnershipRange (x, &vecLocBeg, &vecLocEnd);
    
    

    // set b
    VecDuplicate (x, &b);
    
    // set A
    MatCreate (world, &A);    
    MatSetType (A, MATMPIBAIJ);
    //MatSetType (A, MATMPIAIJ);
    MatSetSizes (A, PETSC_DECIDE, PETSC_DECIDE, vecGlobalSize, vecGlobalSize);
    //MatSeqBAIJSetPreallocation (A, bs, 4, NULL);
    //MatSeqAIJSetPreallocation (A, 4*bs, NULL);    
    //MatMPIAIJSetPreallocation (A, 4*bs, NULL, 4*bs, NULL);
    MatMPIBAIJSetPreallocation (A, bs, 5, NULL, 5, NULL);
    //MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
    //MatCreateBAIJ (world, bs, PETSC_DECIDE, PETSC_DECIDE, n*bs, n*bs, 1, NULL, 3, NULL, &A);
    // specific to pentagonal mesh . change later
    MatGetOwnershipRange (A, &matLocBeg, &matLocEnd);
    MatGetLocalSize (A, &matLocalSize, NULL);
    
    
    
    KSPCreate (world, &ksp);
    KSPSetOperators (ksp, A, A);
    KSPGetPC (ksp, &pc);
    PCSetType (pc, PCSOR);
    KSPSetType (ksp, KSPGMRES);
    KSPSetFromOptions (ksp);
        
    localSizes = new int [nProcs];
    int recvcounts[nProcs];
    int displs[nProcs];
    displs[0] = 0;    
    for (int i=0; i<nProcs; ++i)
    {
        recvcounts[i] = 1;
    }
    for (int i=1; i<nProcs; ++i)
    {
        displs[i] = displs[i-1] + recvcounts[i-1];
    }
    
    MPI_Allgatherv (&vecLocalSize, 1, MPI_INT, localSizes, recvcounts, displs, MPI_INT, world);
}
Beispiel #12
0
int main(int argc,char **args)
{
  Mat            A,B,*submatA,*submatB;
  PetscInt       bs=1,m=11,ov=1,i,j,k,*rows,*cols,nd=5,*idx,rstart,rend,sz,mm,nn,M,N,Mbs;
  PetscErrorCode ierr;
  PetscMPIInt    size,rank;
  PetscScalar    *vals,rval;
  IS             *is1,*is2;
  PetscRandom    rdm;
  Vec            xx,s1,s2;
  PetscReal      s1norm,s2norm,rnorm,tol = 100*PETSC_SMALL;
  PetscBool      flg,test_nd0=PETSC_FALSE;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

  ierr = PetscOptionsGetInt(NULL,NULL,"-mat_block_size",&bs,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-mat_size",&m,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-ov",&ov,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(NULL,NULL,"-nd",&nd,NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetBool(NULL,NULL,"-test_nd0",&test_nd0,NULL);CHKERRQ(ierr);

  /* Create a AIJ matrix A */
  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,m*bs,m*bs,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = MatSetType(A,MATAIJ);CHKERRQ(ierr);
  ierr = MatSeqAIJSetPreallocation(A,PETSC_DEFAULT,NULL);CHKERRQ(ierr);
  ierr = MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);CHKERRQ(ierr);
  ierr = MatSetFromOptions(A);CHKERRQ(ierr);
  ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr);

  /* Create a BAIJ matrix B */
  ierr = MatCreate(PETSC_COMM_WORLD,&B);CHKERRQ(ierr);
  ierr = MatSetSizes(B,m*bs,m*bs,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = MatSetType(B,MATBAIJ);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(B,bs,PETSC_DEFAULT,NULL);CHKERRQ(ierr);
  ierr = MatMPIBAIJSetPreallocation(B,bs,PETSC_DEFAULT,NULL,PETSC_DEFAULT,NULL);CHKERRQ(ierr);
  ierr = MatSetFromOptions(B);CHKERRQ(ierr);
  ierr = MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr);

  ierr = PetscRandomCreate(PETSC_COMM_WORLD,&rdm);CHKERRQ(ierr);
  ierr = PetscRandomSetFromOptions(rdm);CHKERRQ(ierr);

  ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
  ierr = MatGetSize(A,&M,&N);CHKERRQ(ierr);
  Mbs  = M/bs;

  ierr = PetscMalloc1(bs,&rows);CHKERRQ(ierr);
  ierr = PetscMalloc1(bs,&cols);CHKERRQ(ierr);
  ierr = PetscMalloc1(bs*bs,&vals);CHKERRQ(ierr);
  ierr = PetscMalloc1(M,&idx);CHKERRQ(ierr);

  /* Now set blocks of values */
  for (i=0; i<40*bs; i++) {
    ierr    = PetscRandomGetValue(rdm,&rval);CHKERRQ(ierr);
    cols[0] = bs*(int)(PetscRealPart(rval)*Mbs);
    ierr    = PetscRandomGetValue(rdm,&rval);CHKERRQ(ierr);
    rows[0] = rstart + bs*(int)(PetscRealPart(rval)*m);
    for (j=1; j<bs; j++) {
      rows[j] = rows[j-1]+1;
      cols[j] = cols[j-1]+1;
    }

    for (j=0; j<bs*bs; j++) {
      ierr    = PetscRandomGetValue(rdm,&rval);CHKERRQ(ierr);
      vals[j] = rval;
    }
    ierr = MatSetValues(A,bs,rows,bs,cols,vals,ADD_VALUES);CHKERRQ(ierr);
    ierr = MatSetValues(B,bs,rows,bs,cols,vals,ADD_VALUES);CHKERRQ(ierr);
  }

  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* Test MatIncreaseOverlap() */
  ierr = PetscMalloc1(nd,&is1);CHKERRQ(ierr);
  ierr = PetscMalloc1(nd,&is2);CHKERRQ(ierr);

  if (!rank && test_nd0) nd = 0; /* test case */

  for (i=0; i<nd; i++) {
    ierr = PetscRandomGetValue(rdm,&rval);CHKERRQ(ierr);
    sz   = (int)(PetscRealPart(rval)*m);
    for (j=0; j<sz; j++) {
      ierr      = PetscRandomGetValue(rdm,&rval);CHKERRQ(ierr);
      idx[j*bs] = bs*(int)(PetscRealPart(rval)*Mbs);
      for (k=1; k<bs; k++) idx[j*bs+k] = idx[j*bs]+k;
    }
    ierr = ISCreateGeneral(PETSC_COMM_SELF,sz*bs,idx,PETSC_COPY_VALUES,is1+i);CHKERRQ(ierr);
    ierr = ISCreateGeneral(PETSC_COMM_SELF,sz*bs,idx,PETSC_COPY_VALUES,is2+i);CHKERRQ(ierr);
  }
  ierr = MatIncreaseOverlap(A,nd,is1,ov);CHKERRQ(ierr);
  ierr = MatIncreaseOverlap(B,nd,is2,ov);CHKERRQ(ierr);

  for (i=0; i<nd; ++i) {
    ierr = ISEqual(is1[i],is2[i],&flg);CHKERRQ(ierr);

    if (!flg) {
      ierr = PetscPrintf(PETSC_COMM_SELF,"i=%D, flg=%d :bs=%D m=%D ov=%D nd=%D np=%D\n",i,flg,bs,m,ov,nd,size);CHKERRQ(ierr);
    }
  }

  for (i=0; i<nd; ++i) {
    ierr = ISSort(is1[i]);CHKERRQ(ierr);
    ierr = ISSort(is2[i]);CHKERRQ(ierr);
  }

  ierr = MatCreateSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);CHKERRQ(ierr);
  ierr = MatCreateSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);CHKERRQ(ierr);

  /* Test MatMult() */
  for (i=0; i<nd; i++) {
    ierr = MatGetSize(submatA[i],&mm,&nn);CHKERRQ(ierr);
    ierr = VecCreateSeq(PETSC_COMM_SELF,mm,&xx);CHKERRQ(ierr);
    ierr = VecDuplicate(xx,&s1);CHKERRQ(ierr);
    ierr = VecDuplicate(xx,&s2);CHKERRQ(ierr);
    for (j=0; j<3; j++) {
      ierr  = VecSetRandom(xx,rdm);CHKERRQ(ierr);
      ierr  = MatMult(submatA[i],xx,s1);CHKERRQ(ierr);
      ierr  = MatMult(submatB[i],xx,s2);CHKERRQ(ierr);
      ierr  = VecNorm(s1,NORM_2,&s1norm);CHKERRQ(ierr);
      ierr  = VecNorm(s2,NORM_2,&s2norm);CHKERRQ(ierr);
      rnorm = s2norm-s1norm;
      if (rnorm<-tol || rnorm>tol) {
        ierr = PetscPrintf(PETSC_COMM_SELF,"[%d]Error:MatMult - Norm1=%16.14e Norm2=%16.14e\n",rank,s1norm,s2norm);CHKERRQ(ierr);
      }
    }
    ierr = VecDestroy(&xx);CHKERRQ(ierr);
    ierr = VecDestroy(&s1);CHKERRQ(ierr);
    ierr = VecDestroy(&s2);CHKERRQ(ierr);
  }

  /* Now test MatCreateSubmatrices with MAT_REUSE_MATRIX option */
  ierr = MatCreateSubMatrices(A,nd,is1,is1,MAT_REUSE_MATRIX,&submatA);CHKERRQ(ierr);
  ierr = MatCreateSubMatrices(B,nd,is2,is2,MAT_REUSE_MATRIX,&submatB);CHKERRQ(ierr);

  /* Test MatMult() */
  for (i=0; i<nd; i++) {
    ierr = MatGetSize(submatA[i],&mm,&nn);CHKERRQ(ierr);
    ierr = VecCreateSeq(PETSC_COMM_SELF,mm,&xx);CHKERRQ(ierr);
    ierr = VecDuplicate(xx,&s1);CHKERRQ(ierr);
    ierr = VecDuplicate(xx,&s2);CHKERRQ(ierr);
    for (j=0; j<3; j++) {
      ierr  = VecSetRandom(xx,rdm);CHKERRQ(ierr);
      ierr  = MatMult(submatA[i],xx,s1);CHKERRQ(ierr);
      ierr  = MatMult(submatB[i],xx,s2);CHKERRQ(ierr);
      ierr  = VecNorm(s1,NORM_2,&s1norm);CHKERRQ(ierr);
      ierr  = VecNorm(s2,NORM_2,&s2norm);CHKERRQ(ierr);
      rnorm = s2norm-s1norm;
      if (rnorm<-tol || rnorm>tol) {
        ierr = PetscPrintf(PETSC_COMM_SELF,"[%d]Error:MatMult - Norm1=%16.14e Norm2=%16.14e\n",rank,s1norm,s2norm);CHKERRQ(ierr);
      }
    }
    ierr = VecDestroy(&xx);CHKERRQ(ierr);
    ierr = VecDestroy(&s1);CHKERRQ(ierr);
    ierr = VecDestroy(&s2);CHKERRQ(ierr);
  }

  /* Free allocated memory */
  for (i=0; i<nd; ++i) {
    ierr = ISDestroy(&is1[i]);CHKERRQ(ierr);
    ierr = ISDestroy(&is2[i]);CHKERRQ(ierr);
  }
  ierr = MatDestroySubMatrices(nd,&submatA);CHKERRQ(ierr);
  ierr = MatDestroySubMatrices(nd,&submatB);CHKERRQ(ierr);

  ierr = PetscFree(is1);CHKERRQ(ierr);
  ierr = PetscFree(is2);CHKERRQ(ierr);
  ierr = PetscFree(idx);CHKERRQ(ierr);
  ierr = PetscFree(rows);CHKERRQ(ierr);
  ierr = PetscFree(cols);CHKERRQ(ierr);
  ierr = PetscFree(vals);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = MatDestroy(&B);CHKERRQ(ierr);
  ierr = PetscRandomDestroy(&rdm);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Beispiel #13
0
void PetscMatrix<T>::init ()
{
  libmesh_assert(this->_dof_map);

  // Clear initialized matrices
  if (this->initialized())
    this->clear();

  this->_is_initialized = true;


  const numeric_index_type my_m = this->_dof_map->n_dofs();
  const numeric_index_type my_n = my_m;
  const numeric_index_type n_l  = this->_dof_map->n_dofs_on_processor(this->processor_id());
  const numeric_index_type m_l  = n_l;


  const std::vector<numeric_index_type>& n_nz = this->_dof_map->get_n_nz();
  const std::vector<numeric_index_type>& n_oz = this->_dof_map->get_n_oz();

  // Make sure the sparsity pattern isn't empty unless the matrix is 0x0
  libmesh_assert_equal_to (n_nz.size(), m_l);
  libmesh_assert_equal_to (n_oz.size(), m_l);

  PetscErrorCode ierr = 0;
  PetscInt m_global   = static_cast<PetscInt>(my_m);
  PetscInt n_global   = static_cast<PetscInt>(my_n);
  PetscInt m_local    = static_cast<PetscInt>(m_l);
  PetscInt n_local    = static_cast<PetscInt>(n_l);

  ierr = MatCreate(this->comm().get(), &_mat);
  LIBMESH_CHKERRABORT(ierr);
  ierr = MatSetSizes(_mat, m_local, n_local, m_global, n_global);
  LIBMESH_CHKERRABORT(ierr);

#ifdef LIBMESH_ENABLE_BLOCKED_STORAGE
  PetscInt blocksize  = static_cast<PetscInt>(this->_dof_map->block_size());
  if (blocksize > 1)
    {
      // specified blocksize, bs>1.
      // double check sizes.
      libmesh_assert_equal_to (m_local  % blocksize, 0);
      libmesh_assert_equal_to (n_local  % blocksize, 0);
      libmesh_assert_equal_to (m_global % blocksize, 0);
      libmesh_assert_equal_to (n_global % blocksize, 0);

      ierr = MatSetType(_mat, MATBAIJ); // Automatically chooses seqbaij or mpibaij
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatSetBlockSize(_mat, blocksize);
      LIBMESH_CHKERRABORT(ierr);

      // transform the per-entry n_nz and n_oz arrays into their block counterparts.
      std::vector<numeric_index_type> b_n_nz, b_n_oz;

      transform_preallocation_arrays (blocksize,
				      n_nz, n_oz,
				      b_n_nz, b_n_oz);

      ierr = MatSeqBAIJSetPreallocation(_mat, blocksize, 0, (PetscInt*)(b_n_nz.empty()?NULL:&b_n_nz[0]));
      LIBMESH_CHKERRABORT(ierr);

      ierr = MatMPIBAIJSetPreallocation(_mat, blocksize,
					0, (PetscInt*)(b_n_nz.empty()?NULL:&b_n_nz[0]),
					0, (PetscInt*)(b_n_oz.empty()?NULL:&b_n_oz[0]));
      LIBMESH_CHKERRABORT(ierr);
    }
  else
#endif
    {
      // no block storage case
      ierr = MatSetType(_mat, MATAIJ); // Automatically chooses seqaij or mpiaij
      LIBMESH_CHKERRABORT(ierr);

      ierr = MatSeqAIJSetPreallocation(_mat, 0, (PetscInt*)(n_nz.empty()?NULL:&n_nz[0]));
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatMPIAIJSetPreallocation(_mat,
				       0, (PetscInt*)(n_nz.empty()?NULL:&n_nz[0]),
				       0, (PetscInt*)(n_oz.empty()?NULL:&n_oz[0]));
      LIBMESH_CHKERRABORT(ierr);
    }

  // Is prefix information available somewhere? Perhaps pass in the system name?
  ierr = MatSetOptionsPrefix(_mat, "");
  LIBMESH_CHKERRABORT(ierr);
  ierr = MatSetFromOptions(_mat);
  LIBMESH_CHKERRABORT(ierr);

  this->zero();
}
Beispiel #14
0
void PetscMatrix<T>::init (const numeric_index_type m_in,
			   const numeric_index_type n_in,
			   const numeric_index_type m_l,
			   const numeric_index_type n_l,
			   const numeric_index_type nnz,
			   const numeric_index_type noz,
			   const numeric_index_type blocksize_in)
{
  // So compilers don't warn when !LIBMESH_ENABLE_BLOCKED_STORAGE
  libmesh_ignore(blocksize_in);

  // Clear initialized matrices
  if (this->initialized())
    this->clear();

  this->_is_initialized = true;


  PetscErrorCode ierr = 0;
  PetscInt m_global   = static_cast<PetscInt>(m_in);
  PetscInt n_global   = static_cast<PetscInt>(n_in);
  PetscInt m_local    = static_cast<PetscInt>(m_l);
  PetscInt n_local    = static_cast<PetscInt>(n_l);
  PetscInt n_nz       = static_cast<PetscInt>(nnz);
  PetscInt n_oz       = static_cast<PetscInt>(noz);

  ierr = MatCreate(this->comm().get(), &_mat);
  LIBMESH_CHKERRABORT(ierr);
  ierr = MatSetSizes(_mat, m_local, n_local, m_global, n_global);
  LIBMESH_CHKERRABORT(ierr);

#ifdef LIBMESH_ENABLE_BLOCKED_STORAGE
  PetscInt blocksize  = static_cast<PetscInt>(blocksize_in);
  if (blocksize > 1)
    {
      // specified blocksize, bs>1.
      // double check sizes.
      libmesh_assert_equal_to (m_local  % blocksize, 0);
      libmesh_assert_equal_to (n_local  % blocksize, 0);
      libmesh_assert_equal_to (m_global % blocksize, 0);
      libmesh_assert_equal_to (n_global % blocksize, 0);
      libmesh_assert_equal_to (n_nz     % blocksize, 0);
      libmesh_assert_equal_to (n_oz     % blocksize, 0);

      ierr = MatSetType(_mat, MATBAIJ); // Automatically chooses seqbaij or mpibaij
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatSetBlockSize(_mat, blocksize);
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatSeqBAIJSetPreallocation(_mat, blocksize, n_nz/blocksize, PETSC_NULL);
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatMPIBAIJSetPreallocation(_mat, blocksize,
					n_nz/blocksize, PETSC_NULL,
					n_oz/blocksize, PETSC_NULL);
      LIBMESH_CHKERRABORT(ierr);
    }
  else
#endif
    {
      ierr = MatSetType(_mat, MATAIJ); // Automatically chooses seqaij or mpiaij
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatSeqAIJSetPreallocation(_mat, n_nz, PETSC_NULL);
      LIBMESH_CHKERRABORT(ierr);
      ierr = MatMPIAIJSetPreallocation(_mat, n_nz, PETSC_NULL, n_oz, PETSC_NULL);
      LIBMESH_CHKERRABORT(ierr);
    }

  // Make it an error for PETSc to allocate new nonzero entries during assembly
#if PETSC_VERSION_LESS_THAN(3,0,0)
  ierr = MatSetOption(_mat, MAT_NEW_NONZERO_ALLOCATION_ERR);
#else
  ierr = MatSetOption(_mat, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE);
#endif
  LIBMESH_CHKERRABORT(ierr);

  // Is prefix information available somewhere? Perhaps pass in the system name?
  ierr = MatSetOptionsPrefix(_mat, "");
  LIBMESH_CHKERRABORT(ierr);
  ierr = MatSetFromOptions(_mat);
  LIBMESH_CHKERRABORT(ierr);

  this->zero ();
}
Beispiel #15
0
PetscErrorCode  MatGetMultiProcBlock_MPIBAIJ(Mat mat, MPI_Comm subComm, MatReuse scall,Mat *subMat)
{
  PetscErrorCode ierr;
  Mat_MPIBAIJ    *aij  = (Mat_MPIBAIJ*)mat->data;
  Mat_SeqBAIJ    *aijB = (Mat_SeqBAIJ*)aij->B->data;
  PetscMPIInt    commRank,subCommSize,subCommRank;
  PetscMPIInt    *commRankMap,subRank,rank,commsize;
  PetscInt       *garrayCMap,col,i,j,*nnz,newRow,newCol,*newbRow,*newbCol,k,k1;
  PetscInt       bs=mat->rmap->bs;
  PetscScalar    *vals,*aijBvals;

  PetscFunctionBegin;
  ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&commsize);CHKERRQ(ierr);
  ierr = MPI_Comm_size(subComm,&subCommSize);CHKERRQ(ierr);

  /* create subMat object with the relavent layout */
  if (scall == MAT_INITIAL_MATRIX) {
    ierr = MatCreate(subComm,subMat);CHKERRQ(ierr);
    ierr = MatSetType(*subMat,MATMPIBAIJ);CHKERRQ(ierr);
    ierr = MatSetSizes(*subMat,mat->rmap->n,mat->cmap->n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = MatSetBlockSizes(*subMat,mat->rmap->bs,mat->cmap->bs);CHKERRQ(ierr);

    /* need to setup rmap and cmap before Preallocation */
    ierr = PetscLayoutSetBlockSize((*subMat)->rmap,mat->rmap->bs);CHKERRQ(ierr);
    ierr = PetscLayoutSetBlockSize((*subMat)->cmap,mat->cmap->bs);CHKERRQ(ierr);
    ierr = PetscLayoutSetUp((*subMat)->rmap);CHKERRQ(ierr);
    ierr = PetscLayoutSetUp((*subMat)->cmap);CHKERRQ(ierr);
  }

  /* create a map of comm_rank from subComm to comm - should commRankMap and garrayCMap be kept for reused? */
  ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&commRank);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(subComm,&subCommRank);CHKERRQ(ierr);
  ierr = PetscMalloc(subCommSize*sizeof(PetscMPIInt),&commRankMap);CHKERRQ(ierr);
  ierr = MPI_Allgather(&commRank,1,MPI_INT,commRankMap,1,MPI_INT,subComm);CHKERRQ(ierr);
  
  /* Traverse garray and identify blocked column indices [of offdiag mat] that
   should be discarded. For the ones not discarded, store the newCol+1
   value in garrayCMap */
  ierr = PetscMalloc(aij->B->cmap->n/bs*sizeof(PetscInt),&garrayCMap);CHKERRQ(ierr);
  ierr = PetscMemzero(garrayCMap,aij->B->cmap->n/bs*sizeof(PetscInt));CHKERRQ(ierr);
  for (i=0; i<aij->B->cmap->n/bs; i++) {
    col = aij->garray[i]; /* blocked column index */
    for (subRank=0; subRank<subCommSize; subRank++) {
      rank = commRankMap[subRank];
      if ((col >= mat->cmap->range[rank]/bs) && (col < mat->cmap->range[rank+1]/bs)) {
        garrayCMap[i] = (((*subMat)->cmap->range[subRank]- mat->cmap->range[rank])/bs + col + 1);
        break;
      }
    }
  }

  if (scall == MAT_INITIAL_MATRIX) {
    /* Now compute preallocation for the offdiag mat */
    ierr = PetscMalloc(aij->B->rmap->n/bs*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
    ierr = PetscMemzero(nnz,aij->B->rmap->n/bs*sizeof(PetscInt));CHKERRQ(ierr);
    for (i=0; i<aij->B->rmap->n/bs; i++) {
      for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
        if (garrayCMap[aijB->j[j]]) nnz[i]++;
      }
    }
    ierr = MatMPIBAIJSetPreallocation(*(subMat),bs,0,NULL,0,nnz);CHKERRQ(ierr);

    /* reuse diag block with the new submat */
    ierr = MatDestroy(&((Mat_MPIBAIJ*)((*subMat)->data))->A);CHKERRQ(ierr);

    ((Mat_MPIBAIJ*)((*subMat)->data))->A = aij->A;

    ierr = PetscObjectReference((PetscObject)aij->A);CHKERRQ(ierr);
  } else if (((Mat_MPIBAIJ*)(*subMat)->data)->A != aij->A) {
    PetscObject obj = (PetscObject)((Mat_MPIBAIJ*)((*subMat)->data))->A;

    ierr = PetscObjectReference((PetscObject)obj);CHKERRQ(ierr);

    ((Mat_MPIBAIJ*)((*subMat)->data))->A = aij->A;

    ierr = PetscObjectReference((PetscObject)aij->A);CHKERRQ(ierr);
  }

  /* Now traverse aij->B and insert values into subMat */
  ierr = PetscMalloc3(bs,PetscInt,&newbRow,bs,PetscInt,&newbCol,bs*bs,PetscScalar,&vals);CHKERRQ(ierr);
  for (i=0; i<aij->B->rmap->n/bs; i++) {
    newRow = (*subMat)->rmap->range[subCommRank] + i*bs;
    for (j=aijB->i[i]; j<aijB->i[i+1]; j++) {
      newCol = garrayCMap[aijB->j[j]];
      if (newCol) {
        newCol--; /* remove the increment */
        newCol *= bs;
        for (k=0; k<bs; k++) {
          newbRow[k] = newRow + k;
          newbCol[k] = newCol + k;
        }
        /* copy column-oriented aijB->a into row-oriented vals */
        aijBvals = aijB->a + j*bs*bs;
        for (k1=0; k1<bs; k1++) { 
          for (k=0; k<bs; k++) { 
            vals[k1+k*bs] = *aijBvals++; 
          }
        }
        ierr = MatSetValues(*subMat,bs,newbRow,bs,newbCol,vals,INSERT_VALUES);CHKERRQ(ierr); 
      }
    }
  }
  ierr = MatAssemblyBegin(*subMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(*subMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* deallocate temporary data */
  ierr = PetscFree3(newbRow,newbCol,vals);CHKERRQ(ierr);
  ierr = PetscFree(commRankMap);CHKERRQ(ierr);
  ierr = PetscFree(garrayCMap);CHKERRQ(ierr);
  if (scall == MAT_INITIAL_MATRIX) {
    ierr = PetscFree(nnz);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Beispiel #16
0
int main(int argc,char **args)
{
  Mat            C,A;
  PetscInt       i, n = 10,midx[3],bs=1;
  PetscErrorCode ierr;
  PetscScalar    v[3];
  PetscBool      flg,isAIJ;
  MatType        type;
  PetscMPIInt    size;

  PetscInitialize(&argc,&args,(char *)0,help);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);

  ierr = MatCreate(PETSC_COMM_WORLD,&C);CHKERRQ(ierr);
  ierr = MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,n,n);CHKERRQ(ierr);
  ierr = MatSetType(C,MATAIJ);CHKERRQ(ierr);
  ierr = MatSetFromOptions(C);CHKERRQ(ierr);

  ierr = MatGetType(C,&type);CHKERRQ(ierr);
  if (size == 1){
    ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJ,&isAIJ);CHKERRQ(ierr);
  } else {
    ierr = PetscObjectTypeCompare((PetscObject)C,MATMPIAIJ,&isAIJ);CHKERRQ(ierr);
  }
  ierr = MatSeqAIJSetPreallocation(C,3,PETSC_NULL);
  ierr = MatMPIAIJSetPreallocation(C,3,PETSC_NULL,3,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatSeqBAIJSetPreallocation(C,bs,3,PETSC_NULL);
  ierr = MatMPIBAIJSetPreallocation(C,bs,3,PETSC_NULL,3,PETSC_NULL);CHKERRQ(ierr);

  v[0] = -1.; v[1] = 2.; v[2] = -1.;
  for (i=1; i<n-1; i++){
    midx[2] = i-1; midx[1] = i; midx[0] = i+1;
    ierr = MatSetValues(C,1,&i,3,midx,v,INSERT_VALUES);CHKERRQ(ierr);
  }
  i = 0; midx[0] = 0; midx[1] = 1;
  v[0] = 2.0; v[1] = -1.;
  ierr = MatSetValues(C,1,&i,2,midx,v,INSERT_VALUES);CHKERRQ(ierr);
  i = n-1; midx[0] = n-2; midx[1] = n-1;
  v[0] = -1.0; v[1] = 2.;
  ierr = MatSetValues(C,1,&i,2,midx,v,INSERT_VALUES);CHKERRQ(ierr);

  ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,n,n);CHKERRQ(ierr);
  ierr = MatSetFromOptions(A);CHKERRQ(ierr);
  ierr = MatSetUp(A);CHKERRQ(ierr);
  ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);

  /* test matrices with different nonzero patterns - Note: A is created with different nonzero pattern of C! */
  ierr = MatCopy(C,A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
  ierr = MatEqual(A,C,&flg);CHKERRQ(ierr);
  if (!flg) SETERRQ(PETSC_COMM_SELF,1,"MatCopy(C,A,DIFFERENT_NONZERO_PATTERN): Matrices are NOT equal");

  ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_INFO);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,"A is obtained with MatCopy(,,DIFFERENT_NONZERO_PATTERN):\n");CHKERRQ(ierr);
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);

  /* test matrices with same nonzero pattern */
  ierr = MatDuplicate(C,MAT_DO_NOT_COPY_VALUES,&A);CHKERRQ(ierr);
  ierr = MatCopy(C,A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
  ierr = MatEqual(A,C,&flg);CHKERRQ(ierr);
  if (!flg) SETERRQ(PETSC_COMM_SELF,1,"MatCopy(C,A,SAME_NONZERO_PATTERN): Matrices are NOT equal");

  ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_INFO);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,"\nA is obtained with MatCopy(,,SAME_NONZERO_PATTERN):\n");CHKERRQ(ierr);
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  ierr = PetscViewerSetFormat(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_ASCII_COMMON);CHKERRQ(ierr);
  ierr = PetscPrintf(PETSC_COMM_WORLD,"A:\n");CHKERRQ(ierr);
  ierr = MatView(A,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);

  /* test MatStore/RetrieveValues() */
  if (isAIJ){
    ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);CHKERRQ(ierr);
    ierr = MatStoreValues(A);CHKERRQ(ierr);
    ierr = MatZeroEntries(A);CHKERRQ(ierr);
    ierr = MatRetrieveValues(A);CHKERRQ(ierr);
  }

  ierr = MatDestroy(&C);CHKERRQ(ierr);
  ierr = MatDestroy(&A);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return 0;
}
Beispiel #17
0
PetscErrorCode MatISGetMPIXAIJ_IS(Mat mat, MatReuse reuse, Mat *M)
{
  Mat_IS                 *matis = (Mat_IS*)(mat->data);
  /* info on mat */
  /* ISLocalToGlobalMapping rmapping,cmapping; */
  PetscInt               bs,rows,cols;
  PetscInt               lrows,lcols;
  PetscInt               local_rows,local_cols;
  PetscBool              isdense,issbaij,issbaij_red;
  /* values insertion */
  PetscScalar            *array;
  PetscInt               *local_indices,*global_indices;
  /* work */
  PetscInt               i,j,index_row;
  PetscErrorCode         ierr;

  PetscFunctionBegin;
  /* MISSING CHECKS
    - rectangular case not covered (it is not allowed by MATIS)
  */
  /* get info from mat */
  /* ierr = MatGetLocalToGlobalMapping(mat,&rmapping,&cmapping);CHKERRQ(ierr); */
  ierr = MatGetSize(mat,&rows,&cols);CHKERRQ(ierr);
  ierr = MatGetBlockSize(mat,&bs);CHKERRQ(ierr);
  ierr = MatGetSize(matis->A,&local_rows,&local_cols);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject)matis->A,MATSEQDENSE,&isdense);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject)matis->A,MATSEQSBAIJ,&issbaij);CHKERRQ(ierr);

  /* work */
  ierr = PetscMalloc1(local_rows,&local_indices);CHKERRQ(ierr);
  for (i=0;i<local_rows;i++) local_indices[i]=i;
  /* map indices of local mat to global values */
  ierr = PetscMalloc(PetscMax(local_cols,local_rows)*sizeof(*global_indices),&global_indices);CHKERRQ(ierr);
  /* ierr = ISLocalToGlobalMappingApply(rmapping,local_rows,local_indices,global_indices);CHKERRQ(ierr); */
  ierr = ISLocalToGlobalMappingApply(matis->mapping,local_rows,local_indices,global_indices);CHKERRQ(ierr);

  if (issbaij) {
    ierr = MatGetRowUpperTriangular(matis->A);CHKERRQ(ierr);
  }

  if (reuse == MAT_INITIAL_MATRIX) {
    Mat         new_mat;
    MatType     new_mat_type;
    Vec         vec_dnz,vec_onz;
    PetscScalar *my_dnz,*my_onz;
    PetscInt    *dnz,*onz,*mat_ranges,*row_ownership;
    PetscInt    index_col,owner;
    PetscMPIInt nsubdomains;

    /* determining new matrix type */
    ierr = MPI_Allreduce(&issbaij,&issbaij_red,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
    if (issbaij_red) {
      new_mat_type = MATSBAIJ;
    } else {
      if (bs>1) {
        new_mat_type = MATBAIJ;
      } else {
        new_mat_type = MATAIJ;
      }
    }

    ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&nsubdomains);CHKERRQ(ierr);
    ierr = MatCreate(PetscObjectComm((PetscObject)mat),&new_mat);CHKERRQ(ierr);
    ierr = MatSetSizes(new_mat,PETSC_DECIDE,PETSC_DECIDE,rows,cols);CHKERRQ(ierr);
    ierr = MatSetBlockSize(new_mat,bs);CHKERRQ(ierr);
    ierr = MatSetType(new_mat,new_mat_type);CHKERRQ(ierr);
    ierr = MatSetUp(new_mat);CHKERRQ(ierr);
    ierr = MatGetLocalSize(new_mat,&lrows,&lcols);CHKERRQ(ierr);

    /*
      preallocation
    */

    ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)new_mat),lrows,lcols,dnz,onz);CHKERRQ(ierr);
    /*
       Some vectors are needed to sum up properly on shared interface dofs.
       Preallocation macros cannot do the job.
       Note that preallocation is not exact, since it overestimates nonzeros
    */
    ierr = MatCreateVecs(new_mat,NULL,&vec_dnz);CHKERRQ(ierr);
    /* ierr = VecSetLocalToGlobalMapping(vec_dnz,rmapping);CHKERRQ(ierr); */
    ierr = VecSetLocalToGlobalMapping(vec_dnz,matis->mapping);CHKERRQ(ierr);
    ierr = VecDuplicate(vec_dnz,&vec_onz);CHKERRQ(ierr);
    /* All processes need to compute entire row ownership */
    ierr = PetscMalloc1(rows,&row_ownership);CHKERRQ(ierr);
    ierr = MatGetOwnershipRanges(new_mat,(const PetscInt**)&mat_ranges);CHKERRQ(ierr);
    for (i=0;i<nsubdomains;i++) {
      for (j=mat_ranges[i];j<mat_ranges[i+1];j++) {
        row_ownership[j]=i;
      }
    }

    /*
       my_dnz and my_onz contains exact contribution to preallocation from each local mat
       then, they will be summed up properly. This way, preallocation is always sufficient
    */
    ierr = PetscMalloc1(local_rows,&my_dnz);CHKERRQ(ierr);
    ierr = PetscMalloc1(local_rows,&my_onz);CHKERRQ(ierr);
    ierr = PetscMemzero(my_dnz,local_rows*sizeof(*my_dnz));CHKERRQ(ierr);
    ierr = PetscMemzero(my_onz,local_rows*sizeof(*my_onz));CHKERRQ(ierr);
    /* preallocation as a MATAIJ */
    if (isdense) { /* special case for dense local matrices */
      for (i=0;i<local_rows;i++) {
        index_row = global_indices[i];
        for (j=i;j<local_rows;j++) {
          owner = row_ownership[index_row];
          index_col = global_indices[j];
          if (index_col > mat_ranges[owner]-1 && index_col < mat_ranges[owner+1] ) { /* diag block */
            my_dnz[i] += 1.0;
          } else { /* offdiag block */
            my_onz[i] += 1.0;
          }
          /* same as before, interchanging rows and cols */
          if (i != j) {
            owner = row_ownership[index_col];
            if (index_row > mat_ranges[owner]-1 && index_row < mat_ranges[owner+1] ) {
              my_dnz[j] += 1.0;
            } else {
              my_onz[j] += 1.0;
            }
          }
        }
      }
    } else {
      for (i=0;i<local_rows;i++) {
        PetscInt ncols;
        const PetscInt *cols;
        index_row = global_indices[i];
        ierr = MatGetRow(matis->A,i,&ncols,&cols,NULL);CHKERRQ(ierr);
        for (j=0;j<ncols;j++) {
          owner = row_ownership[index_row];
          index_col = global_indices[cols[j]];
          if (index_col > mat_ranges[owner]-1 && index_col < mat_ranges[owner+1] ) { /* diag block */
            my_dnz[i] += 1.0;
          } else { /* offdiag block */
            my_onz[i] += 1.0;
          }
          /* same as before, interchanging rows and cols */
          if (issbaij) {
            owner = row_ownership[index_col];
            if (index_row > mat_ranges[owner]-1 && index_row < mat_ranges[owner+1] ) {
              my_dnz[j] += 1.0;
            } else {
              my_onz[j] += 1.0;
            }
          }
        }
        ierr = MatRestoreRow(matis->A,i,&ncols,&cols,NULL);CHKERRQ(ierr);
      }
    }
    ierr = VecSet(vec_dnz,0.0);CHKERRQ(ierr);
    ierr = VecSet(vec_onz,0.0);CHKERRQ(ierr);
    if (local_rows) { /* multilevel guard */
      ierr = VecSetValuesLocal(vec_dnz,local_rows,local_indices,my_dnz,ADD_VALUES);CHKERRQ(ierr);
      ierr = VecSetValuesLocal(vec_onz,local_rows,local_indices,my_onz,ADD_VALUES);CHKERRQ(ierr);
    }
    ierr = VecAssemblyBegin(vec_dnz);CHKERRQ(ierr);
    ierr = VecAssemblyBegin(vec_onz);CHKERRQ(ierr);
    ierr = VecAssemblyEnd(vec_dnz);CHKERRQ(ierr);
    ierr = VecAssemblyEnd(vec_onz);CHKERRQ(ierr);
    ierr = PetscFree(my_dnz);CHKERRQ(ierr);
    ierr = PetscFree(my_onz);CHKERRQ(ierr);
    ierr = PetscFree(row_ownership);CHKERRQ(ierr);

    /* set computed preallocation in dnz and onz */
    ierr = VecGetArray(vec_dnz,&array);CHKERRQ(ierr);
    for (i=0; i<lrows; i++) dnz[i] = (PetscInt)PetscRealPart(array[i]);
    ierr = VecRestoreArray(vec_dnz,&array);CHKERRQ(ierr);
    ierr = VecGetArray(vec_onz,&array);CHKERRQ(ierr);
    for (i=0;i<lrows;i++) onz[i] = (PetscInt)PetscRealPart(array[i]);
    ierr = VecRestoreArray(vec_onz,&array);CHKERRQ(ierr);
    ierr = VecDestroy(&vec_dnz);CHKERRQ(ierr);
    ierr = VecDestroy(&vec_onz);CHKERRQ(ierr);

    /* Resize preallocation if overestimated */
    for (i=0;i<lrows;i++) {
      dnz[i] = PetscMin(dnz[i],lcols);
      onz[i] = PetscMin(onz[i],cols-lcols);
    }
    /* set preallocation */
    ierr = MatMPIAIJSetPreallocation(new_mat,0,dnz,0,onz);CHKERRQ(ierr);
    for (i=0;i<lrows/bs;i++) {
      dnz[i] = dnz[i*bs]/bs;
      onz[i] = onz[i*bs]/bs;
    }
    ierr = MatMPIBAIJSetPreallocation(new_mat,bs,0,dnz,0,onz);CHKERRQ(ierr);
    for (i=0;i<lrows/bs;i++) {
      dnz[i] = dnz[i]-i;
    }
    ierr = MatMPISBAIJSetPreallocation(new_mat,bs,0,dnz,0,onz);CHKERRQ(ierr);
    ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
    *M = new_mat;
  } else {
    PetscInt mbs,mrows,mcols;
    /* some checks */
    ierr = MatGetBlockSize(*M,&mbs);CHKERRQ(ierr);
    ierr = MatGetSize(*M,&mrows,&mcols);CHKERRQ(ierr);
    if (mrows != rows) {
      SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot reuse matrix. Wrong number of rows (%d != %d)",rows,mrows);
    }
    if (mrows != rows) {
      SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot reuse matrix. Wrong number of cols (%d != %d)",cols,mcols);
    }
    if (mbs != bs) {
      SETERRQ2(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Cannot reuse matrix. Wrong block size (%d != %d)",bs,mbs);
    }
    ierr = MatZeroEntries(*M);CHKERRQ(ierr);
  }
  /* set local to global mappings */
  /* ierr = MatSetLocalToGlobalMapping(*M,rmapping,cmapping);CHKERRQ(ierr); */
  /* Set values */
  if (isdense) { /* special case for dense local matrices */
    ierr = MatSetOption(*M,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
    ierr = MatDenseGetArray(matis->A,&array);CHKERRQ(ierr);
    ierr = MatSetValues(*M,local_rows,global_indices,local_cols,global_indices,array,ADD_VALUES);CHKERRQ(ierr);
    ierr = MatDenseRestoreArray(matis->A,&array);CHKERRQ(ierr);
    ierr = PetscFree(local_indices);CHKERRQ(ierr);
    ierr = PetscFree(global_indices);CHKERRQ(ierr);
  } else { /* very basic values insertion for all other matrix types */
    ierr = PetscFree(local_indices);CHKERRQ(ierr);
    for (i=0;i<local_rows;i++) {
      ierr = MatGetRow(matis->A,i,&j,(const PetscInt**)&local_indices,(const PetscScalar**)&array);CHKERRQ(ierr);
      /* ierr = MatSetValuesLocal(*M,1,&i,j,local_indices,array,ADD_VALUES);CHKERRQ(ierr); */
      ierr = ISLocalToGlobalMappingApply(matis->mapping,j,local_indices,global_indices);CHKERRQ(ierr);
      ierr = ISLocalToGlobalMappingApply(matis->mapping,1,&i,&index_row);CHKERRQ(ierr);
      ierr = MatSetValues(*M,1,&index_row,j,global_indices,array,ADD_VALUES);CHKERRQ(ierr);
      ierr = MatRestoreRow(matis->A,i,&j,(const PetscInt**)&local_indices,(const PetscScalar**)&array);CHKERRQ(ierr);
    }
    ierr = PetscFree(global_indices);CHKERRQ(ierr);
  }
  ierr = MatAssemblyBegin(*M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(*M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  if (isdense) {
    ierr = MatSetOption(*M,MAT_ROW_ORIENTED,PETSC_TRUE);CHKERRQ(ierr);
  }
  if (issbaij) {
    ierr = MatRestoreRowUpperTriangular(matis->A);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}