Exemple #1
0
int Zoltan_PHG_Redistribute(
  ZZ *zz, 
  PHGPartParams *hgp,     /* Input: parameters; used only for user's
                             request of nProc_x and nProc_y */
  HGraph  *ohg,           /* Input: Local part of distributed hypergraph */
  int     lo, int hi,     /* Input: range of proc ranks (inclusive)
                             to be included in new communicator: ncomm */
  PHGComm *ncomm,         /* Output: Communicators of new distribution */
  HGraph  *nhg,           /* Output: Newly redistributed hypergraph */
  int     **vmap,         /* Output: allocated with the size nhg->nVtx and
                             vertex map from nhg to ohg's local vertex number*/
  int     **vdest         /* Output: allocated with the size nhg->nVtx and
                             stores dest proc in ocomm */
    )   
{
    char * yo = "Zoltan_PHG_Redistribute";
    PHGComm *ocomm = ohg->comm;
    int     *v2Col, *n2Row, ierr=ZOLTAN_OK, i, *ranks;
    int     reqx=hgp->nProc_x_req, reqy=hgp->nProc_y_req;
    float   frac;
    MPI_Group allgrp, newgrp;
    MPI_Comm  nmpicomm;

    if (ocomm->nProc==1){
        errexit("%s: ocomm->nProc==1", yo);
        return ZOLTAN_FATAL;
    }

    /* create a new communicator for procs[lo..hi] */
    MPI_Comm_group(ocomm->Communicator, &allgrp);
    ranks = (int *) ZOLTAN_MALLOC(ocomm->nProc * sizeof(int));
    for (i=lo; i<=hi; ++i)
        ranks[i-lo] = i;
    
    MPI_Group_incl(allgrp, hi-lo+1, ranks, &newgrp);
    MPI_Comm_create(ocomm->Communicator, newgrp, &nmpicomm);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&allgrp);   
    ZOLTAN_FREE(&ranks);

    if (reqx==1 || reqy==1)
        ;
    else
        reqx = reqy = -1;
    
    /* fill ncomm */
    ierr = Zoltan_PHG_Set_2D_Proc_Distrib(ocomm->zz, nmpicomm, 
                                          ocomm->myProc-lo, hi-lo+1, 
                                          reqx, reqy, ncomm);
    
    v2Col = (int *) ZOLTAN_MALLOC(ohg->nVtx * sizeof(int));    
    n2Row = (int *) ZOLTAN_MALLOC(ohg->nEdge * sizeof(int));

    /* UVC: TODO very simple straight forward partitioning right now;
       later we can implement a more "load balanced", or smarter
       mechanisms */
    /* KDDKDD 5/11/07:  Round-off error in the computation of v2Col
     * and n2Row can lead to different answers on different platforms.
     * Vertices or edges get sent to different processors during the 
     * split, resulting in different matchings and, thus, different
     * answers.
     * Problem was observed on hg_cage10, zdrive.inp.phg.ipm.nproc_vertex1
     * and zdrive.inp.phg.ipm.nproc_edge1;
     * solaris machine seamus and linux machine patches give different
     * results due to differences in n2Row and v2Col, respectively.  
     * Neither answer is wrong,
     * but the linux results result in FAILED test in test_zoltan.
     */
    frac = (float) ohg->nVtx / (float) ncomm->nProc_x;
    for (i=0; i<ohg->nVtx; ++i) 
        v2Col[i] = (int) ((float) i / frac);
    frac = (float) ohg->nEdge / (float) ncomm->nProc_y;
    for (i=0; i<ohg->nEdge; ++i) 
        n2Row[i] = (int) ((float) i / frac);

    ierr |= Zoltan_PHG_Redistribute_Hypergraph(zz, hgp, ohg, lo, 
                                               v2Col, n2Row, ncomm, 
                                               nhg, vmap, vdest);
    Zoltan_Multifree(__FILE__, __LINE__, 2,
                     &v2Col, &n2Row);
    
    return ierr;
}
int Zoltan_PHG_Redistribute(
  ZZ *zz, 
  PHGPartParams *hgp,     /* Input: parameters; used only for user's
                             request of nProc_x and nProc_y */
  HGraph  *ohg,           /* Input: Local part of distributed hypergraph */
  int     lo, int hi,     /* Input: range of proc ranks (inclusive)
                             to be included in new communicator: ncomm */
  PHGComm *ncomm,         /* Output: Communicators of new distribution */
  HGraph  *nhg,           /* Output: Newly redistributed hypergraph */
  int     **vmap,         /* Output: allocated with the size nhg->nVtx and
                             vertex map from nhg to ohg's local vertex number*/
  int     **vdest         /* Output: allocated with the size nhg->nVtx and
                             stores dest proc in ocomm */
    )   
{
    char * yo = "Zoltan_PHG_Redistribute";
    PHGComm *ocomm = ohg->comm;
    int     *v2Col, *n2Row, ierr=ZOLTAN_OK, i, *ranks;
    int     reqx=hgp->nProc_x_req, reqy=hgp->nProc_y_req;
    float   frac;
    MPI_Group allgrp, newgrp;
    MPI_Comm  nmpicomm;

    if (ocomm->nProc==1){
        errexit("%s: ocomm->nProc==1", yo);
        return ZOLTAN_FATAL;
    }

    /* create a new communicator for procs[lo..hi] */
    MPI_Comm_group(ocomm->Communicator, &allgrp);
    ranks = (int *) ZOLTAN_MALLOC(ocomm->nProc * sizeof(int));
    for (i=lo; i<=hi; ++i)
        ranks[i-lo] = i;
    
    MPI_Group_incl(allgrp, hi-lo+1, ranks, &newgrp);
    MPI_Comm_create(ocomm->Communicator, newgrp, &nmpicomm);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&allgrp);   
    ZOLTAN_FREE(&ranks);

    if (reqx==1 || reqy==1)
        ;
    else
        reqx = reqy = -1;
    
    /* fill ncomm */
    ierr = Zoltan_PHG_Set_2D_Proc_Distrib(ocomm->zz, nmpicomm, 
                                          ocomm->myProc-lo, hi-lo+1, 
                                          reqx, reqy, ncomm);
    
    v2Col = (int *) ZOLTAN_MALLOC(ohg->nVtx * sizeof(int));    
    n2Row = (int *) ZOLTAN_MALLOC(ohg->nEdge * sizeof(int));

    /* UVC: TODO very simple straight forward partitioning right now;
       later we can implement a more "load balanced", or smarter
       mechanisms */
    frac = (float) ohg->nVtx / (float) ncomm->nProc_x;
    for (i=0; i<ohg->nVtx; ++i) 
        v2Col[i] = (int) ((float) i / frac);
    frac = (float) ohg->nEdge / (float) ncomm->nProc_y;
    for (i=0; i<ohg->nEdge; ++i) 
        n2Row[i] = (int) ((float) i / frac);

    ierr |= Zoltan_PHG_Redistribute_Hypergraph(zz, ohg, lo, v2Col, n2Row, ncomm, nhg, vmap, vdest);
    Zoltan_Multifree(__FILE__, __LINE__, 2,
                     &v2Col, &n2Row);
    
    return ierr;
}