示例#1
0
static PetscErrorCode MatPartitioningApply_Party(MatPartitioning part, IS * partitioning)
{
    PetscErrorCode ierr;
    int  *locals, *parttab = NULL, rank, size;
    Mat mat = part->adj, matMPI, matSeq;
    int nb_locals;              
    Mat_MPIAdj *adj = (Mat_MPIAdj *) mat->data;
    MatPartitioning_Party *party = (MatPartitioning_Party *) part->data;
    PetscTruth flg;
#ifdef PETSC_HAVE_UNISTD_H
    int fd_stdout, fd_pipe[2], count,err;
#endif

    PetscFunctionBegin;
    /* check if the matrix is sequential, use MatGetSubMatrices if necessary */
    ierr = PetscTypeCompare((PetscObject) mat, MATMPIADJ, &flg);CHKERRQ(ierr);
    ierr = MPI_Comm_size(((PetscObject)mat)->comm, &size);CHKERRQ(ierr);
    ierr = MPI_Comm_rank(((PetscObject)part)->comm, &rank);CHKERRQ(ierr);
    if (size > 1) {
        int M, N;
        IS isrow, iscol;
        Mat *A;

        if (flg) SETERRQ(PETSC_ERR_SUP,"Distributed matrix format MPIAdj is not supported for sequential partitioners");
        ierr = PetscPrintf(((PetscObject)part)->comm,"Converting distributed matrix to sequential: this could be a performance loss\n");CHKERRQ(ierr);
        ierr = MatGetSize(mat, &M, &N);CHKERRQ(ierr);
        ierr = ISCreateStride(PETSC_COMM_SELF, M, 0, 1, &isrow);CHKERRQ(ierr);
        ierr = ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iscol);CHKERRQ(ierr);
        ierr = MatGetSubMatrices(mat, 1, &isrow, &iscol, MAT_INITIAL_MATRIX, &A);CHKERRQ(ierr);
        ierr = ISDestroy(isrow);CHKERRQ(ierr);
        ierr = ISDestroy(iscol);CHKERRQ(ierr);
        matSeq = *A;
        ierr   = PetscFree(A);CHKERRQ(ierr);
    } else {
        matSeq = mat;
    }
    /* check for the input format that is supported only for a MPIADJ type 
       and set it to matMPI */

    if (!flg) {
        ierr = MatConvert(matSeq, MATMPIADJ, MAT_INITIAL_MATRIX, &matMPI);CHKERRQ(ierr);
    } else {
        matMPI = matSeq;
    }

    adj = (Mat_MPIAdj *) matMPI->data;  /* finaly adj contains adjacency graph */
    {
        /* Party library arguments definition */
        int n = mat->rmap->N;         /* number of vertices in full graph */
        int *edge_p = adj->i;   /* start of edge list for each vertex */
        int *edge = adj->j;     /* edge list data */
        int *vertex_w = NULL;   /* weights for all vertices */
        int *edge_w = NULL;     /* weights for all edges */
        float *x = NULL, *y = NULL, *z = NULL;  /* coordinates for inertial method */
        int p = part->n;        /* number of parts to create */
        int *part_party;        /* set number of each vtx (length n) */
        int cutsize;            /* number of edge cut */
        char *global = party->global_method;    /* global partitioning algorithm */
        char *local = party->local_method;      /* local partitioning algorithm */
        int redl = party->nbvtxcoarsed; /* how many vertices to coarsen down to? */
        char *redm = party->redm;
        char *redo = party->redo;
        int rec = party->rec;
        int output = party->output;

        ierr = PetscMalloc((mat->rmap->N) * sizeof(int), &part_party);CHKERRQ(ierr);

        /* redirect output to buffer party->mesg_log */
#ifdef PETSC_HAVE_UNISTD_H
        fd_stdout = dup(1);
        pipe(fd_pipe);
        close(1);
        dup2(fd_pipe[1], 1);
        ierr = PetscMalloc(SIZE_LOG * sizeof(char), &(party->mesg_log));CHKERRQ(ierr);
#endif

        /* library call */
        party_lib_times_start();
        ierr = party_lib(n, vertex_w, x, y, z, edge_p, edge, edge_w,
            p, part_party, &cutsize, redl, redm, redo,
            global, local, rec, output);

        party_lib_times_output(output);
        part_info(n, vertex_w, edge_p, edge, edge_w, p, part_party, output);

#ifdef PETSC_HAVE_UNISTD_H
        err = fflush(stdout);
        if (err) SETERRQ(PETSC_ERR_SYS,"fflush() failed on stdout");    
        count =
            read(fd_pipe[0], party->mesg_log, (SIZE_LOG - 1) * sizeof(char));
        if (count < 0)
            count = 0;
        party->mesg_log[count] = 0;
        close(1);
        dup2(fd_stdout, 1);
        close(fd_stdout);
        close(fd_pipe[0]);
        close(fd_pipe[1]);
#endif
        /* if in the call we got an error, we say it */
        if (ierr) SETERRQ(PETSC_ERR_LIB, party->mesg_log);
        parttab = part_party;
    }

    /* Creation of the index set */
    ierr = MPI_Comm_rank(((PetscObject)part)->comm, &rank);CHKERRQ(ierr);
    ierr = MPI_Comm_size(((PetscObject)part)->comm, &size);CHKERRQ(ierr);
    nb_locals = mat->rmap->N / size;
    locals = parttab + rank * nb_locals;
    if (rank < mat->rmap->N % size) {
        nb_locals++;
        locals += rank;
    } else {
        locals += mat->rmap->N % size;
    }
    ierr = ISCreateGeneral(((PetscObject)part)->comm, nb_locals, locals, partitioning);CHKERRQ(ierr);

    /* destroying old objects */
    ierr = PetscFree(parttab);CHKERRQ(ierr);
    if (matSeq != mat) {
        ierr = MatDestroy(matSeq);CHKERRQ(ierr);
    }
    if (matMPI != mat) {
        ierr = MatDestroy(matMPI);CHKERRQ(ierr);
    }
    PetscFunctionReturn(0);
}
示例#2
0
static PetscErrorCode MatPartitioningApply_Party(MatPartitioning part,IS *partitioning)
{
  PetscErrorCode        ierr;
  PetscInt              i,*parttab,*locals,nb_locals,M,N;
  PetscMPIInt           size,rank;
  Mat                   mat = part->adj,matAdj,matSeq,*A;
  Mat_MPIAdj            *adj;
  MatPartitioning_Party *party = (MatPartitioning_Party*)part->data;
  PetscBool             flg;
  IS                    isrow, iscol;
  int                   n,*edge_p,*edge,*vertex_w,p,*part_party,cutsize,redl,rec;
  const char            *redm,*redo;
  char                  *mesg_log;
#if defined(PETSC_HAVE_UNISTD_H)
  int                   fd_stdout,fd_pipe[2],count,err;
#endif

  PetscFunctionBegin;
  ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);CHKERRQ(ierr);
  if (size>1) {
    if (flg) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"Distributed matrix format MPIAdj is not supported for sequential partitioners");
    ierr   = PetscInfo(part,"Converting distributed matrix to sequential: this could be a performance loss\n");CHKERRQ(ierr);
    ierr   = MatGetSize(mat,&M,&N);CHKERRQ(ierr);
    ierr   = ISCreateStride(PETSC_COMM_SELF,M,0,1,&isrow);CHKERRQ(ierr);
    ierr   = ISCreateStride(PETSC_COMM_SELF,N,0,1,&iscol);CHKERRQ(ierr);
    ierr   = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr);
    ierr   = ISDestroy(&isrow);CHKERRQ(ierr);
    ierr   = ISDestroy(&iscol);CHKERRQ(ierr);
    matSeq = *A;
    ierr   = PetscFree(A);CHKERRQ(ierr);
  } else {
    ierr   = PetscObjectReference((PetscObject)mat);CHKERRQ(ierr);
    matSeq = mat;
  }

  if (!flg) { /* convert regular matrix to MPIADJ */
    ierr = MatConvert(matSeq,MATMPIADJ,MAT_INITIAL_MATRIX,&matAdj);CHKERRQ(ierr);
  } else {
    ierr   = PetscObjectReference((PetscObject)matSeq);CHKERRQ(ierr);
    matAdj = matSeq;
  }

  adj = (Mat_MPIAdj*)matAdj->data;  /* finaly adj contains adjacency graph */

  /* arguments for Party library */
  n        = mat->rmap->N;             /* number of vertices in full graph */
  edge_p   = adj->i;                   /* start of edge list for each vertex */
  edge     = adj->j;                   /* edge list data */
  vertex_w = part->vertex_weights;     /* weights for all vertices */
  p        = part->n;                  /* number of parts to create */
  redl     = party->nbvtxcoarsed;      /* how many vertices to coarsen down to? */
  rec      = party->recursive ? 1 : 0; /* recursive bisection */
  redm     = party->redm ? "lam" : ""; /* matching method */
  redo     = party->redo ? "w3" : "";  /* matching optimization method */

  ierr = PetscMalloc1(mat->rmap->N,&part_party);CHKERRQ(ierr);

  /* redirect output to buffer */
#if defined(PETSC_HAVE_UNISTD_H)
  fd_stdout = dup(1);
  if (pipe(fd_pipe)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"Could not open pipe");
  close(1);
  dup2(fd_pipe[1],1);
  ierr = PetscMalloc1(SIZE_LOG,&mesg_log);CHKERRQ(ierr);
#endif

  /* library call */
  party_lib_times_start();
  ierr = party_lib(n,vertex_w,NULL,NULL,NULL,edge_p,edge,NULL,p,part_party,&cutsize,redl,(char*)redm,(char*)redo,party->global,party->local,rec,1);

  party_lib_times_output(1);
  part_info(n,vertex_w,edge_p,edge,NULL,p,part_party,1);

#if defined(PETSC_HAVE_UNISTD_H)
  err = fflush(stdout);
  if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on stdout");
  count = read(fd_pipe[0],mesg_log,(SIZE_LOG-1)*sizeof(char));
  if (count<0) count = 0;
  mesg_log[count] = 0;
  close(1);
  dup2(fd_stdout,1);
  close(fd_stdout);
  close(fd_pipe[0]);
  close(fd_pipe[1]);
  if (party->verbose) {
    ierr = PetscPrintf(PetscObjectComm((PetscObject)mat),mesg_log);
  }
  ierr = PetscFree(mesg_log);CHKERRQ(ierr);
#endif
  if (ierr) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Party failed");

  ierr = PetscMalloc1(mat->rmap->N,&parttab);CHKERRQ(ierr);
  for (i=0; i<mat->rmap->N; i++) parttab[i] = part_party[i];

  /* creation of the index set */
  nb_locals = mat->rmap->N / size;
  locals    = parttab + rank*nb_locals;
  if (rank < mat->rmap->N % size) {
    nb_locals++;
    locals += rank;
  } else locals += mat->rmap->N % size;

  ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),nb_locals,locals,PETSC_COPY_VALUES,partitioning);CHKERRQ(ierr);

  /* clean up */
  ierr = PetscFree(parttab);CHKERRQ(ierr);
  ierr = PetscFree(part_party);CHKERRQ(ierr);
  ierr = MatDestroy(&matSeq);CHKERRQ(ierr);
  ierr = MatDestroy(&matAdj);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}