예제 #1
0
파일: sbstrmfact.c 프로젝트: 00liujj/petsc
PetscErrorCode MatSolve_SeqSBSTRM_4_inplace(Mat A,Vec bb,Vec xx)
{
  Mat_SeqSBAIJ   *a   =(Mat_SeqSBAIJ*)A->data;
  IS             isrow=a->row;
  PetscInt       mbs  =a->mbs,*ai=a->i,*aj=a->j,bs=A->rmap->bs,bs2=a->bs2;
  PetscErrorCode ierr;
  const PetscInt *r;
  PetscInt       nz,*vj,k,idx;
  PetscScalar    *x,*b,x0,x1,x2,x3,*t,*tp;

  Mat_SeqSBSTRM   *sbstrm = (Mat_SeqSBSTRM*)A->spptr;
  MatScalar       *as     =sbstrm->as,*diag;
  PetscScalar     tp0, tp1, tp2, tp3;
  const MatScalar *v0, *v1, *v2, *v3;
  PetscInt        slen;

  PetscFunctionBegin;
  slen = 4*(ai[mbs]-ai[0]);
  v0   = as + 16*ai[0];
  v1   = v0 + slen;
  v2   = v1 + slen;
  v3   = v2 + slen;

  ierr = VecGetArray(bb,&b);CHKERRQ(ierr);
  ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
  t    = a->solve_work;
  ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);

  /* solve U^T * D * y = b by forward substitution */
  tp = t;
  for (k=0; k<mbs; k++) { /* t <- perm(b) */
    idx   = 4*r[k];
    tp[0] = b[idx];
    tp[1] = b[idx+1];
    tp[2] = b[idx+2];
    tp[3] = b[idx+3];
    tp   += 4;
  }

  for (k=0; k<mbs; k++) {
    vj = aj + ai[k];
    tp = t + k*4;
    x0 = tp[0]; x1=tp[1]; x2=tp[2]; x3=tp[3];
    nz = ai[k+1] - ai[k];

    tp = t + (*vj)*4;
    while (nz--) {
      tp[0]   += v0[0]*x0 + v1[0]*x1 + v2[0]*x2 + v3[0]*x3;
      tp[1]   += v0[1]*x0 + v1[1]*x1 + v2[1]*x2 + v3[1]*x3;
      tp[2]   += v0[2]*x0 + v1[2]*x1 + v2[2]*x2 + v3[2]*x3;
      tp[3]   += v0[3]*x0 + v1[3]*x1 + v2[3]*x2 + v3[3]*x3;
      vj++; tp = t + (*vj)*4;

      v0 += 4; v1 += 4; v2 += 4; v3 += 4;
    }

    /* xk = inv(Dk)*(Dk*xk) */
    diag  = as+k*16;          /* ptr to inv(Dk) */
    tp    = t + k*4;
    tp[0] = diag[0]*x0 + diag[4]*x1 + diag[8]*x2 + diag[12]*x3;
    tp[1] = diag[1]*x0 + diag[5]*x1 + diag[9]*x2 + diag[13]*x3;
    tp[2] = diag[2]*x0 + diag[6]*x1 + diag[10]*x2+ diag[14]*x3;
    tp[3] = diag[3]*x0 + diag[7]*x1 + diag[11]*x2+ diag[15]*x3;
  }

  /* solve U*x = y by back substitution */
  for (k=mbs-1; k>=0; k--) {
    vj = aj + ai[k+1];
    tp = t + k*4;
    x0 = tp[0]; x1=tp[1]; x2=tp[2]; x3=tp[3]; /* xk */
    nz = ai[k+1] - ai[k];

    tp = t + (*vj)*4;
    while (nz--) {
      /* xk += U(k,* */
      v0 -= 4; v1 -= 4; v2 -= 4; v3 -= 4;

      vj--; tp = t + (*vj)*4;

      tp0 = tp[0]; tp1 = tp[1]; tp2 = tp[2]; tp3 = tp[3];
      x0 += v0[3]*tp3 + v0[2]*tp2 + v0[1]*tp1 + v0[0]*tp0;
      x1 += v1[3]*tp3 + v1[2]*tp2 + v1[1]*tp1 + v1[0]*tp0;
      x2 += v2[3]*tp3 + v2[2]*tp2 + v2[1]*tp1 + v2[0]*tp0;
      x3 += v3[3]*tp3 + v3[2]*tp2 + v3[1]*tp1 + v3[0]*tp0;
    }
    tp    = t + k*4;
    tp[0] = x0; tp[1]=x1; tp[2]=x2; tp[3]=x3;

    idx      = 4*r[k];
    x[idx]   = x0;
    x[idx+1] = x1;
    x[idx+2] = x2;
    x[idx+3] = x3;
  }

  ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
  ierr = VecRestoreArray(bb,&b);CHKERRQ(ierr);
  ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
  ierr = PetscLogFlops(4.0*bs2*a->nz - (bs+2.0*bs2)*mbs);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
예제 #2
0
/*
This function is used before applying a
symmetric reordering to matrix A that is
in SBAIJ format.

The permutation is assumed to be symmetric, i.e.,
P = P^T (= inv(P)),
so the permuted matrix P*A*inv(P)=P*A*P^T is ensured to be symmetric.
 - a wrong assumption! This code needs rework!  -- Hong

The function is modified from sro.f of YSMP. The description from YSMP:
C    THE NONZERO ENTRIES OF THE MATRIX M ARE ASSUMED TO BE STORED
C    SYMMETRICALLY IN (IA,JA,A) FORMAT (I.E., NOT BOTH M(I,J) AND M(J,I)
C    ARE STORED IF I NE J).
C
C    SRO DOES NOT REARRANGE THE ORDER OF THE ROWS, BUT DOES MOVE
C    NONZEROES FROM ONE ROW TO ANOTHER TO ENSURE THAT IF M(I,J) WILL BE
C    IN THE UPPER TRIANGLE OF M WITH RESPECT TO THE NEW ORDERING, THEN
C    M(I,J) IS STORED IN ROW I (AND THUS M(J,I) IS NOT STORED);  WHEREAS
C    IF M(I,J) WILL BE IN THE STRICT LOWER TRIANGLE OF M, THEN M(J,I) IS
C    STORED IN ROW J (AND THUS M(I,J) IS NOT STORED).


  -- output: new index set (inew, jnew) for A and a map a2anew that maps
             values a to anew, such that all
             nonzero A_(perm(i),iperm(k)) will be stored in the upper triangle.
             Note: matrix A is not permuted by this function!
*/
PetscErrorCode  MatReorderingSeqSBAIJ(Mat A,IS perm)
{
  Mat_SeqSBAIJ   *a=(Mat_SeqSBAIJ*)A->data;
  const PetscInt mbs=a->mbs;

  PetscFunctionBegin;
  if (!mbs) PetscFunctionReturn(0);
  SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix reordering is not supported for sbaij matrix. Use aij format");
#if 0
  PetscErrorCode ierr;
  const PetscInt *rip,*riip;
  PetscInt       *ai,*aj,*r;
  PetscInt       *nzr,nz,jmin,jmax,j,k,ajk,i;
  IS             iperm;  /* inverse of perm */
  ierr = ISGetIndices(perm,&rip);CHKERRQ(ierr);

  ierr = ISInvertPermutation(perm,PETSC_DECIDE,&iperm);CHKERRQ(ierr);
  ierr = ISGetIndices(iperm,&riip);CHKERRQ(ierr);

  for (i=0; i<mbs; i++) {
    if (rip[i] != riip[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Non-symmetric permutation, use symmetric permutation for symmetric matrices");
  }
  ierr = ISRestoreIndices(iperm,&riip);CHKERRQ(ierr);
  ierr = ISDestroy(&iperm);CHKERRQ(ierr);

  if (!a->inew) {
    ierr = PetscMalloc2(mbs+1,&ai, 2*a->i[mbs],&aj);CHKERRQ(ierr);
  } else {
    ai = a->inew; aj = a->jnew;
  }
  ierr = PetscMemcpy(ai,a->i,(mbs+1)*sizeof(PetscInt));CHKERRQ(ierr);
  ierr = PetscMemcpy(aj,a->j,(a->i[mbs])*sizeof(PetscInt));CHKERRQ(ierr);

  /*
     Phase 1: Find row index r in which to store each nonzero.
              Initialize count of nonzeros to be stored in each row (nzr).
              At the end of this phase, a nonzero a(*,*)=a(r(),aj())
              s.t. a(perm(r),perm(aj)) will fall into upper triangle part.
  */

  ierr = PetscMalloc1(mbs,&nzr);CHKERRQ(ierr);
  ierr = PetscMalloc1(ai[mbs],&r);CHKERRQ(ierr);
  for (i=0; i<mbs; i++) nzr[i] = 0;
  for (i=0; i<ai[mbs]; i++) r[i] = 0;

  /*  for each nonzero element */
  for (i=0; i<mbs; i++) {
    nz = ai[i+1] - ai[i];
    j  = ai[i];
    /* printf("nz = %d, j=%d\n",nz,j); */
    while (nz--) {
      /*  --- find row (=r[j]) and column (=aj[j]) in which to store a[j] ...*/
      k = aj[j];                          /* col. index */
      /* printf("nz = %d, k=%d\n", nz,k); */
      /* for entry that will be permuted into lower triangle, swap row and col. index */
      if (rip[k] < rip[i]) aj[j] = i;
      else k = i;

      r[j] = k; j++;
      nzr[k]++;  /* increment count of nonzeros in that row */
    }
  }

  /* Phase 2: Find new ai and permutation to apply to (aj,a).
              Determine pointers (r) to delimit rows in permuted (aj,a).
              Note: r is different from r used in phase 1.
              At the end of this phase, (aj[j],a[j]) will be stored in
              (aj[r(j)],a[r(j)]).
  */
  for (i=0; i<mbs; i++) {
    ai[i+1] = ai[i] + nzr[i];
    nzr[i]  = ai[i+1];
  }

  /* determine where each (aj[j], a[j]) is stored in new (aj,a)
     for each nonzero element (in reverse order) */
  jmin = ai[0]; jmax = ai[mbs];
  nz   = jmax - jmin;
  j    = jmax-1;
  while (nz--) {
    i = r[j];  /* row value */
    if (aj[j] == i) r[j] = ai[i]; /* put diagonal nonzero at beginning of row */
    else { /* put off-diagonal nonzero in last unused location in row */
      nzr[i]--; r[j] = nzr[i];
    }
    j--;
  }

  a->a2anew = aj + ai[mbs];
  ierr      = PetscMemcpy(a->a2anew,r,ai[mbs]*sizeof(PetscInt));CHKERRQ(ierr);

  /* Phase 3: permute (aj,a) to upper triangular form (wrt new ordering) */
  for (j=jmin; j<jmax; j++) {
    while (r[j] != j) {
      k   = r[j]; r[j] = r[k]; r[k] = k;
      ajk = aj[k]; aj[k] = aj[j]; aj[j] = ajk;
      /* ak = aa[k]; aa[k] = aa[j]; aa[j] = ak; */
    }
  }
  ierr= ISRestoreIndices(perm,&rip);CHKERRQ(ierr);

  a->inew = ai;
  a->jnew = aj;

  ierr    = ISDestroy(&a->row);CHKERRQ(ierr);
  ierr    = ISDestroy(&a->icol);CHKERRQ(ierr);
  ierr    = PetscObjectReference((PetscObject)perm);CHKERRQ(ierr);
  ierr    = ISDestroy(&a->row);CHKERRQ(ierr);
  a->row  = perm;
  ierr    = PetscObjectReference((PetscObject)perm);CHKERRQ(ierr);
  ierr    = ISDestroy(&a->icol);CHKERRQ(ierr);
  a->icol = perm;

  ierr = PetscFree(nzr);CHKERRQ(ierr);
  ierr = PetscFree(r);CHKERRQ(ierr);
  PetscFunctionReturn(0);
#endif
}
예제 #3
0
파일: sbaijov.c 프로젝트: erdc-cm/petsc-dev
static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
{
  Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
  PetscErrorCode ierr;
  PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
  const PetscInt *idx_i;
  PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
                 Mbs,i,j,k,*odata1,*odata2,
                 proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
  PetscInt       proc_end=0,len_unused,nodata2;
  PetscInt       ois_max; /* max no of is[] in each of processor */
  char           *t_p;
  MPI_Comm       comm;
  MPI_Request    *s_waits1,*s_waits2,r_req;
  MPI_Status     *s_status,r_status;
  PetscBT        *table;  /* mark indices of this processor's is[] */
  PetscBT        table_i;
  PetscBT        otable; /* mark indices of other processors' is[] */
  PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
  IS             garray_local,garray_gl;

  PetscFunctionBegin;
  comm = ((PetscObject)C)->comm;
  size = c->size;
  rank = c->rank;
  Mbs  = c->Mbs;

  ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
  ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);

  /* create tables used in
     step 1: table[i] - mark c->garray of proc [i]
     step 3: table[i] - mark indices of is[i] when whose=MINE
             table[0] - mark incideces of is[] when whose=OTHER */
  len = PetscMax(is_max, size);CHKERRQ(ierr);
  ierr = PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);CHKERRQ(ierr);
  for (i=0; i<len; i++) {
    table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
  }

  ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);

  /* 1. Send this processor's is[] to other processors */
  /*---------------------------------------------------*/
  /* allocate spaces */
  ierr = PetscMalloc(is_max*sizeof(PetscInt),&n);CHKERRQ(ierr);
  len = 0;
  for (i=0; i<is_max; i++) {
    ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
    len += n[i];
  }
  if (!len) {
    is_max = 0;
  } else {
    len += 1 + is_max; /* max length of data1 for one processor */
  }


  ierr = PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);CHKERRQ(ierr);
  ierr = PetscMalloc(size*sizeof(PetscInt*),&data1_start);CHKERRQ(ierr);
  for (i=0; i<size; i++) data1_start[i] = data1 + i*len;

  ierr = PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscMPIInt,&iwork,size+1,PetscInt,&Bowners);CHKERRQ(ierr);

  /* gather c->garray from all processors */
  ierr = ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);CHKERRQ(ierr);
  ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
  ierr = ISDestroy(&garray_local);CHKERRQ(ierr);
  ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
  Bowners[0] = 0;
  for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];

  if (is_max){
    /* hash table ctable which maps c->row to proc_id) */
    ierr = PetscMalloc(Mbs*sizeof(PetscInt),&ctable);CHKERRQ(ierr);
    for (proc_id=0,j=0; proc_id<size; proc_id++) {
      for (; j<C->rmap->range[proc_id+1]/bs; j++) {
        ctable[j] = proc_id;
      }
    }

    /* hash tables marking c->garray */
    ierr = ISGetIndices(garray_gl,&idx_i);CHKERRQ(ierr);
    for (i=0; i<size; i++){
      table_i = table[i];
      ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
      for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
        ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
      }
    }
    ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
  }  /* if (is_max) */
  ierr = ISDestroy(&garray_gl);CHKERRQ(ierr);

  /* evaluate communication - mesg to who, length, and buffer space */
  for (i=0; i<size; i++) len_s[i] = 0;

  /* header of data1 */
  for (proc_id=0; proc_id<size; proc_id++){
    iwork[proc_id] = 0;
    *data1_start[proc_id] = is_max;
    data1_start[proc_id]++;
    for (j=0; j<is_max; j++) {
      if (proc_id == rank){
        *data1_start[proc_id] = n[j];
      } else {
        *data1_start[proc_id] = 0;
      }
      data1_start[proc_id]++;
    }
  }

  for (i=0; i<is_max; i++) {
    ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
    for (j=0; j<n[i]; j++){
      idx = idx_i[j];
      *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
      proc_end = ctable[idx];
      for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
        if (proc_id == rank ) continue; /* done before this loop */
        if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
          continue;   /* no need for sending idx to [proc_id] */
        *data1_start[proc_id] = idx; data1_start[proc_id]++;
        len_s[proc_id]++;
      }
    }
    /* update header data */
    for (proc_id=0; proc_id<size; proc_id++){
      if (proc_id== rank) continue;
      *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
      iwork[proc_id] = len_s[proc_id] ;
    }
    ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
  }

  nrqs = 0; nrqr = 0;
  for (i=0; i<size; i++){
    data1_start[i] = data1 + i*len;
    if (len_s[i]){
      nrqs++;
      len_s[i] += 1 + is_max; /* add no. of header msg */
    }
  }

  for (i=0; i<is_max; i++) {
    ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
  }
  ierr = PetscFree(n);CHKERRQ(ierr);
  ierr = PetscFree(ctable);CHKERRQ(ierr);

  /* Determine the number of messages to expect, their lengths, from from-ids */
  ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
  ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);

  /*  Now  post the sends */
  ierr = PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);CHKERRQ(ierr);
  k = 0;
  for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
    if (len_s[proc_id]){
      ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
      k++;
    }
  }

  /* 2. Receive other's is[] and process. Then send back */
  /*-----------------------------------------------------*/
  len = 0;
  for (i=0; i<nrqr; i++){
    if (len_r1[i] > len)len = len_r1[i];
  }
  ierr = PetscFree(len_r1);CHKERRQ(ierr);
  ierr = PetscFree(id_r1);CHKERRQ(ierr);

  for (proc_id=0; proc_id<size; proc_id++)
    len_s[proc_id] = iwork[proc_id] = 0;

  ierr = PetscMalloc((len+1)*sizeof(PetscInt),&odata1);CHKERRQ(ierr);
  ierr = PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);CHKERRQ(ierr);
  ierr = PetscBTCreate(Mbs,&otable);CHKERRQ(ierr);

  len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
  len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
  ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
  nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
  odata2_ptr[nodata2] = odata2;
  len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */

  k = 0;
  while (k < nrqr){
    /* Receive messages */
    ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
    if (flag){
      ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
      proc_id = r_status.MPI_SOURCE;
      ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
      ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);

      /*  Process messages */
      /*  make sure there is enough unused space in odata2 array */
      if (len_unused < len_max){ /* allocate more space for odata2 */
        ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
        odata2_ptr[++nodata2] = odata2;
        len_unused = len_est;
      }

      ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
      len = 1 + odata2[0];
      for (i=0; i<odata2[0]; i++){
        len += odata2[1 + i];
      }

      /* Send messages back */
      ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
      k++;
      odata2     += len;
      len_unused -= len;
      len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
    }
  }
  ierr = PetscFree(odata1);CHKERRQ(ierr);
  ierr = PetscBTDestroy(&otable);CHKERRQ(ierr);

  /* 3. Do local work on this processor's is[] */
  /*-------------------------------------------*/
  /* make sure there is enough unused space in odata2(=data) array */
  len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
  if (len_unused < len_max){ /* allocate more space for odata2 */
    ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
    odata2_ptr[++nodata2] = odata2;
  }

  data = odata2;
  ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
  ierr = PetscFree(data1_start);CHKERRQ(ierr);

  /* 4. Receive work done on other processors, then merge */
  /*------------------------------------------------------*/
  /* get max number of messages that this processor expects to recv */
  ierr = MPI_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
  ierr = PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);CHKERRQ(ierr);
  ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr);

  k = 0;
  while (k < nrqs){
    /* Receive messages */
    ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);CHKERRQ(ierr);
    if (flag){
      ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
      proc_id = r_status.MPI_SOURCE;
      ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
      ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
      if (len > 1+is_max){ /* Add data2 into data */
        data2_i = data2 + 1 + is_max;
        for (i=0; i<is_max; i++){
          table_i = table[i];
          data_i  = data + 1 + is_max + Mbs*i;
          isz     = data[1+i];
          for (j=0; j<data2[1+i]; j++){
            col = data2_i[j];
            if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
          }
          data[1+i] = isz;
          if (i < is_max - 1) data2_i += data2[1+i];
        }
      }
      k++;
    }
  }
  ierr = PetscFree(data2);CHKERRQ(ierr);
  ierr = PetscFree2(table,t_p);CHKERRQ(ierr);

  /* phase 1 sends are complete */
  ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
  if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
  ierr = PetscFree(data1);CHKERRQ(ierr);

  /* phase 2 sends are complete */
  if (nrqr){ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);}
  ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr);
  ierr = PetscFree(s_status);CHKERRQ(ierr);

  /* 5. Create new is[] */
  /*--------------------*/
  for (i=0; i<is_max; i++) {
    data_i = data + 1 + is_max + Mbs*i;
    ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
  }
  for (k=0; k<=nodata2; k++){
    ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
  }
  ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);

  PetscFunctionReturn(0);
}
예제 #4
0
파일: sbaijov.c 프로젝트: erdc-cm/petsc-dev
PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
{
  PetscErrorCode ierr;
  PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
  IS             *is_new,*is_row;
  Mat            *submats;
  Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
  Mat_SeqSBAIJ   *asub_i;
  PetscBT        table;
  PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
  const PetscInt *idx;
  PetscBool      flg,*allcolumns,*allrows;

  PetscFunctionBegin;
  ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
  /* Convert the indices into block format */
  ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);CHKERRQ(ierr);
  if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}

  /* ----- previous non-scalable implementation ----- */
  flg=PETSC_FALSE;
  ierr = PetscOptionsHasName(PETSC_NULL, "-IncreaseOverlap_old", &flg);CHKERRQ(ierr);
  if (flg){ /* previous non-scalable implementation */
    printf("use previous non-scalable implementation...\n");
    for (i=0; i<ov; ++i) {
      ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
    }
  } else { /* implementation using modified BAIJ routines */

  ierr = PetscMalloc((Mbs+1)*sizeof(PetscInt),&nidx);CHKERRQ(ierr);
  ierr = PetscBTCreate(Mbs,&table);CHKERRQ(ierr); /* for column search */
  ierr = PetscMalloc2(is_max+1,PetscBool,&allcolumns,is_max+1,PetscBool,&allrows);CHKERRQ(ierr);

  /* Create is_row */
  ierr = PetscMalloc(is_max*sizeof(IS **),&is_row);CHKERRQ(ierr);
  ierr = ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);CHKERRQ(ierr);
  allrows[0] = PETSC_TRUE;
  for (i=1; i<is_max; i++) {
    is_row[i]  = is_row[0]; /* reuse is_row[0] */
    allrows[i] = PETSC_TRUE;
  }

  /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
  ierr = PetscMalloc((is_max+1)*sizeof(Mat),&submats);CHKERRQ(ierr);

  /* Check for special case: each processor gets entire matrix columns */
  for (i=0; i<is_max; i++) {
    ierr = ISIdentity(is_new[i],&flg);CHKERRQ(ierr);
    ierr = ISGetLocalSize(is_new[i],&isz);CHKERRQ(ierr);
    if (flg && isz == Mbs){
      allcolumns[i] = PETSC_TRUE;
    } else {
      allcolumns[i] = PETSC_FALSE;
    }
  }

  /* Determine the number of stages through which submatrices are done */
  nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
  if (!nmax) nmax = 1;
  nstages_local = is_max/nmax + ((is_max % nmax)?1:0);

  /* Make sure every processor loops through the nstages */
  ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr);

  for (iov=0; iov<ov; ++iov) {
    /* 1) Get submats for column search */
    for (i=0,pos=0; i<nstages; i++) {
      if (pos+nmax <= is_max) max_no = nmax;
      else if (pos == is_max) max_no = 0;
      else                   max_no = is_max-pos;
      c->ijonly = PETSC_TRUE;
      ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allrows+pos,allcolumns+pos,submats+pos);CHKERRQ(ierr);
      pos += max_no;
    }

    /* 2) Row search */
    ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);

    /* 3) Column search */
    for (i=0; i<is_max; i++){
      asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
      ai=asub_i->i;;

      /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
      ierr = PetscBTMemzero(Mbs,table);CHKERRQ(ierr);

      ierr = ISGetIndices(is_new[i],&idx);CHKERRQ(ierr);
      ierr = ISGetLocalSize(is_new[i],&nis);CHKERRQ(ierr);
      for (l=0; l<nis; l++) {
        ierr = PetscBTSet(table,idx[l]);CHKERRQ(ierr);
        nidx[l] = idx[l];
      }
      isz = nis;

      /* add column entries to table */
      for (brow=0; brow<Mbs; brow++){
        nz = ai[brow+1] - ai[brow];
        if (nz) {
          if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
        }
      }
      ierr = ISRestoreIndices(is_new[i],&idx);CHKERRQ(ierr);
      ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);

      /* create updated is_new */
      ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);CHKERRQ(ierr);
    }

    /* Free tmp spaces */
    for (i=0; i<is_max; i++){
      ierr = MatDestroy(&submats[i]);CHKERRQ(ierr);
    }
  }
  ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr);
  ierr = PetscBTDestroy(&table);CHKERRQ(ierr);
  ierr = PetscFree(submats);CHKERRQ(ierr);
  ierr = ISDestroy(&is_row[0]);CHKERRQ(ierr);
  ierr = PetscFree(is_row);CHKERRQ(ierr);
  ierr = PetscFree(nidx);CHKERRQ(ierr);

  }

  for (i=0; i<is_max; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
  ierr = ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);CHKERRQ(ierr);

  for (i=0; i<is_max; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
  ierr = PetscFree(is_new);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
예제 #5
0
PETSC_EXTERN PetscErrorCode AOCreate_Basic(AO ao)
{
  AO_Basic       *aobasic;
  PetscMPIInt    size,rank,count,*lens,*disp;
  PetscInt       napp,*allpetsc,*allapp,ip,ia,N,i,*petsc=NULL,start;
  PetscErrorCode ierr;
  IS             isapp=ao->isapp,ispetsc=ao->ispetsc;
  MPI_Comm       comm;
  const PetscInt *myapp,*mypetsc=NULL;

  PetscFunctionBegin;
  /* create special struct aobasic */
  ierr     = PetscNewLog(ao, AO_Basic, &aobasic);CHKERRQ(ierr);
  ao->data = (void*) aobasic;
  ierr     = PetscMemcpy(ao->ops,&AOOps_Basic,sizeof(struct _AOOps));CHKERRQ(ierr);
  ierr     = PetscObjectChangeTypeName((PetscObject)ao,AOBASIC);CHKERRQ(ierr);

  ierr = ISGetLocalSize(isapp,&napp);CHKERRQ(ierr);
  ierr = ISGetIndices(isapp,&myapp);CHKERRQ(ierr);

  ierr = PetscMPIIntCast(napp,&count);CHKERRQ(ierr);

  /* transmit all lengths to all processors */
  ierr = PetscObjectGetComm((PetscObject)isapp,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
  ierr = PetscMalloc2(size,PetscMPIInt, &lens,size,PetscMPIInt,&disp);CHKERRQ(ierr);
  ierr = MPI_Allgather(&count, 1, MPI_INT, lens, 1, MPI_INT, comm);CHKERRQ(ierr);
  N    =  0;
  for (i = 0; i < size; i++) {
    ierr = PetscMPIIntCast(N,disp+i);CHKERRQ(ierr); /* = sum(lens[j]), j< i */
    N   += lens[i];
  }
  ao->N = N;
  ao->n = N;

  /* If mypetsc is 0 then use "natural" numbering */
  if (napp) {
    if (!ispetsc) {
      start = disp[rank];
      ierr  = PetscMalloc((napp+1) * sizeof(PetscInt), &petsc);CHKERRQ(ierr);
      for (i=0; i<napp; i++) petsc[i] = start + i;
    } else {
      ierr  = ISGetIndices(ispetsc,&mypetsc);CHKERRQ(ierr);
      petsc = (PetscInt*)mypetsc;
    }
  }

  /* get all indices on all processors */
  ierr = PetscMalloc2(N,PetscInt,&allpetsc,N,PetscInt,&allapp);CHKERRQ(ierr);
  ierr = MPI_Allgatherv(petsc, count, MPIU_INT, allpetsc, lens, disp, MPIU_INT, comm);CHKERRQ(ierr);
  ierr = MPI_Allgatherv((void*)myapp, count, MPIU_INT, allapp, lens, disp, MPIU_INT, comm);CHKERRQ(ierr);
  ierr = PetscFree2(lens,disp);CHKERRQ(ierr);

#if defined(PETSC_USE_DEBUG)
  {
    PetscInt *sorted;
    ierr = PetscMalloc(N*sizeof(PetscInt),&sorted);CHKERRQ(ierr);

    ierr = PetscMemcpy(sorted,allpetsc,N*sizeof(PetscInt));CHKERRQ(ierr);
    ierr = PetscSortInt(N,sorted);CHKERRQ(ierr);
    for (i=0; i<N; i++) {
      if (sorted[i] != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"PETSc ordering requires a permutation of numbers 0 to N-1\n it is missing %D has %D",i,sorted[i]);
    }

    ierr = PetscMemcpy(sorted,allapp,N*sizeof(PetscInt));CHKERRQ(ierr);
    ierr = PetscSortInt(N,sorted);CHKERRQ(ierr);
    for (i=0; i<N; i++) {
      if (sorted[i] != i) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Application ordering requires a permutation of numbers 0 to N-1\n it is missing %D has %D",i,sorted[i]);
    }

    ierr = PetscFree(sorted);CHKERRQ(ierr);
  }
#endif

  /* generate a list of application and PETSc node numbers */
  ierr = PetscMalloc2(N,PetscInt, &aobasic->app,N,PetscInt,&aobasic->petsc);CHKERRQ(ierr);
  ierr = PetscLogObjectMemory(ao,2*N*sizeof(PetscInt));CHKERRQ(ierr);
  ierr = PetscMemzero(aobasic->app, N*sizeof(PetscInt));CHKERRQ(ierr);
  ierr = PetscMemzero(aobasic->petsc, N*sizeof(PetscInt));CHKERRQ(ierr);
  for (i = 0; i < N; i++) {
    ip = allpetsc[i];
    ia = allapp[i];
    /* check there are no duplicates */
    if (aobasic->app[ip]) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Duplicate in PETSc ordering at position %d. Already mapped to %d, not %d.", i, aobasic->app[ip]-1, ia);
    aobasic->app[ip] = ia + 1;
    if (aobasic->petsc[ia]) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Duplicate in Application ordering at position %d. Already mapped to %d, not %d.", i, aobasic->petsc[ia]-1, ip);
    aobasic->petsc[ia] = ip + 1;
  }
  if (napp && !mypetsc) {
    ierr = PetscFree(petsc);CHKERRQ(ierr);
  }
  ierr = PetscFree2(allpetsc,allapp);CHKERRQ(ierr);
  /* shift indices down by one */
  for (i = 0; i < N; i++) {
    aobasic->app[i]--;
    aobasic->petsc[i]--;
  }

  ierr = ISRestoreIndices(isapp,&myapp);CHKERRQ(ierr);
  if (napp) {
    if (ispetsc) {
      ierr = ISRestoreIndices(ispetsc,&mypetsc);CHKERRQ(ierr);
    } else {
      ierr = PetscFree(petsc);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
예제 #6
0
파일: petscw.c 프로젝트: primme/primme
static PetscErrorCode permutematrix(Mat Ain, Mat Bin, Mat *Aout, Mat *Bout, int **permIndices)
{
   PetscErrorCode  ierr;
   MatPartitioning part;
   IS              isn, is, iscols;
   PetscInt        *nlocal,localCols,m,n;
   PetscMPIInt     size, rank;
   MPI_Comm        comm;
 
   PetscFunctionBegin;
 
   ierr = PetscObjectGetComm((PetscObject)Ain,&comm);CHKERRQ(ierr);
   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
   ierr = MatGetSize(Ain,&m,&n);CHKERRQ(ierr);
   ierr = MatPartitioningCreate(comm,&part);CHKERRQ(ierr);
   ierr = MatPartitioningSetAdjacency(part,Ain);CHKERRQ(ierr);
   ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
   /* get new processor owner number of each vertex */
   ierr = MatPartitioningApply(part,&is);CHKERRQ(ierr);
   /* get new global number of each old global number */
   ierr = ISPartitioningToNumbering(is,&isn);CHKERRQ(ierr);
   ierr = PetscMalloc(size*sizeof(int),&nlocal);CHKERRQ(ierr);
   /* get number of new vertices for each processor */
   ierr = ISPartitioningCount(is,size,nlocal);CHKERRQ(ierr);
   ierr = ISDestroy(&is);CHKERRQ(ierr);
 
   /* get old global number of each new global number */
   ierr = ISInvertPermutation(isn,nlocal[rank],&is);CHKERRQ(ierr);
   ierr = ISDestroy(&isn);CHKERRQ(ierr);
   ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
   ierr = ISSort(is);CHKERRQ(ierr);

   /* If matrix is square, the permutation is applied to rows and columns;
      otherwise it is only applied to rows. */
   if (m == n) {
      iscols = is;
      localCols = nlocal[rank];
   } else {
      PetscInt lowj, highj;
      ierr = MatGetOwnershipRangeColumn(Ain,&lowj,&highj);CHKERRQ(ierr);  
      localCols = highj-lowj;
      ierr = ISCreateStride(comm,localCols, lowj, 1, &iscols);CHKERRQ(ierr);
   }

   /* copy permutation */
   if (permIndices) {
      const PetscInt *indices;
      PetscInt i;
      *permIndices = malloc(sizeof(int)*(nlocal[rank]+localCols));
      ierr = ISGetIndices(is, &indices);CHKERRQ(ierr);
      for (i=0; i<nlocal[rank]; i++) (*permIndices)[i] = indices[i];
      ierr = ISRestoreIndices(is, &indices);CHKERRQ(ierr);
      ierr = ISGetIndices(iscols, &indices);CHKERRQ(ierr);
      for (i=0; i<localCols; i++) (*permIndices)[i+nlocal[rank]] = indices[i];
      ierr = ISRestoreIndices(iscols, &indices);CHKERRQ(ierr);
   }
 
   ierr = PetscFree(nlocal);CHKERRQ(ierr);

   ierr = MatGetSubMatrix(Ain,is,iscols,MAT_INITIAL_MATRIX,Aout);CHKERRQ(ierr);
   if (Bin && Bout) {
      ierr = MatGetSubMatrix(Bin,is,iscols,MAT_INITIAL_MATRIX,Bout);CHKERRQ(ierr);
   }
   ierr = ISDestroy(&is);CHKERRQ(ierr);
   if (m != n) {
      ierr = ISDestroy(&iscols);CHKERRQ(ierr);
   }
 
   PetscFunctionReturn(0);
}