int
Zoltan_Distribute_layout (ZZ *zz, const PHGComm * const inlayout,
			  int loRank, int hiRank,
			  int reqx, int reqy,
			  PHGComm *outlayout)
{
  MPI_Group allgrp, newgrp;
  int *ranks;
  MPI_Comm  nmpicomm;
  MPI_Comm  ompicomm;
  int myProc;
  int i;
  int nProc;

  ompicomm = (inlayout != NULL)?inlayout->Communicator:zz->Communicator;
  myProc = (inlayout != NULL)?inlayout->myProc:zz->Proc;
  nProc= (inlayout != NULL)?inlayout->nProc:zz->Num_Proc;
  if (((reqx != 1) && (reqy != 1) && (nProc > 3)) && Zoltan_PHG_isPrime(nProc)) nProc--;

  Zoltan_PHGComm_Init(outlayout);

  /* create a new communicator for procs[lo..hi] */

  MPI_Comm_group(ompicomm, &allgrp);
  ranks = (int *) ZOLTAN_MALLOC(nProc * sizeof(int));
  if (!ranks) return ZOLTAN_MEMERR;

  for (i=loRank; i<=hiRank; ++i)
    ranks[i-loRank] = i;

  MPI_Group_incl(allgrp, nProc, ranks, &newgrp);
  MPI_Comm_create(ompicomm, newgrp, &nmpicomm);
  MPI_Group_free(&newgrp);
  MPI_Group_free(&allgrp);
  ZOLTAN_FREE(&ranks);

  return (Zoltan_PHG_Set_2D_Proc_Distrib(zz, nmpicomm,
					myProc-loRank, nProc,
					reqx, reqy, outlayout));
}
Beispiel #2
0
int Zoltan_PHG_Redistribute(
  ZZ *zz, 
  PHGPartParams *hgp,     /* Input: parameters; used only for user's
                             request of nProc_x and nProc_y */
  HGraph  *ohg,           /* Input: Local part of distributed hypergraph */
  int     lo, int hi,     /* Input: range of proc ranks (inclusive)
                             to be included in new communicator: ncomm */
  PHGComm *ncomm,         /* Output: Communicators of new distribution */
  HGraph  *nhg,           /* Output: Newly redistributed hypergraph */
  int     **vmap,         /* Output: allocated with the size nhg->nVtx and
                             vertex map from nhg to ohg's local vertex number*/
  int     **vdest         /* Output: allocated with the size nhg->nVtx and
                             stores dest proc in ocomm */
    )   
{
    char * yo = "Zoltan_PHG_Redistribute";
    PHGComm *ocomm = ohg->comm;
    int     *v2Col, *n2Row, ierr=ZOLTAN_OK, i, *ranks;
    int     reqx=hgp->nProc_x_req, reqy=hgp->nProc_y_req;
    float   frac;
    MPI_Group allgrp, newgrp;
    MPI_Comm  nmpicomm;

    if (ocomm->nProc==1){
        errexit("%s: ocomm->nProc==1", yo);
        return ZOLTAN_FATAL;
    }

    /* create a new communicator for procs[lo..hi] */
    MPI_Comm_group(ocomm->Communicator, &allgrp);
    ranks = (int *) ZOLTAN_MALLOC(ocomm->nProc * sizeof(int));
    for (i=lo; i<=hi; ++i)
        ranks[i-lo] = i;
    
    MPI_Group_incl(allgrp, hi-lo+1, ranks, &newgrp);
    MPI_Comm_create(ocomm->Communicator, newgrp, &nmpicomm);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&allgrp);   
    ZOLTAN_FREE(&ranks);

    if (reqx==1 || reqy==1)
        ;
    else
        reqx = reqy = -1;
    
    /* fill ncomm */
    ierr = Zoltan_PHG_Set_2D_Proc_Distrib(ocomm->zz, nmpicomm, 
                                          ocomm->myProc-lo, hi-lo+1, 
                                          reqx, reqy, ncomm);
    
    v2Col = (int *) ZOLTAN_MALLOC(ohg->nVtx * sizeof(int));    
    n2Row = (int *) ZOLTAN_MALLOC(ohg->nEdge * sizeof(int));

    /* UVC: TODO very simple straight forward partitioning right now;
       later we can implement a more "load balanced", or smarter
       mechanisms */
    /* KDDKDD 5/11/07:  Round-off error in the computation of v2Col
     * and n2Row can lead to different answers on different platforms.
     * Vertices or edges get sent to different processors during the 
     * split, resulting in different matchings and, thus, different
     * answers.
     * Problem was observed on hg_cage10, zdrive.inp.phg.ipm.nproc_vertex1
     * and zdrive.inp.phg.ipm.nproc_edge1;
     * solaris machine seamus and linux machine patches give different
     * results due to differences in n2Row and v2Col, respectively.  
     * Neither answer is wrong,
     * but the linux results result in FAILED test in test_zoltan.
     */
    frac = (float) ohg->nVtx / (float) ncomm->nProc_x;
    for (i=0; i<ohg->nVtx; ++i) 
        v2Col[i] = (int) ((float) i / frac);
    frac = (float) ohg->nEdge / (float) ncomm->nProc_y;
    for (i=0; i<ohg->nEdge; ++i) 
        n2Row[i] = (int) ((float) i / frac);

    ierr |= Zoltan_PHG_Redistribute_Hypergraph(zz, hgp, ohg, lo, 
                                               v2Col, n2Row, ncomm, 
                                               nhg, vmap, vdest);
    Zoltan_Multifree(__FILE__, __LINE__, 2,
                     &v2Col, &n2Row);
    
    return ierr;
}
Beispiel #3
0
int Zoltan_PHG_Initialize_Params(
  ZZ *zz,   /* the Zoltan data structure */
  float *part_sizes,
  PHGPartParams *hgp
)
{
  int err = ZOLTAN_OK;
  char *yo = "Zoltan_PHG_Initialize_Params";
  int nProc;
  int usePrimeComm;
  MPI_Comm communicator;
  char add_obj_weight[MAX_PARAM_STRING_LEN];
  char edge_weight_op[MAX_PARAM_STRING_LEN];
  char cut_objective[MAX_PARAM_STRING_LEN];
  char *package = hgp->hgraph_pkg; 
  char *method = hgp->hgraph_method;
  char buf[1024];

  memset(hgp, 0, sizeof(*hgp)); /* in the future if we forget to initialize
                                   another param at least it will be 0 */
  
  Zoltan_Bind_Param(PHG_params, "HYPERGRAPH_PACKAGE", &hgp->hgraph_pkg);
  Zoltan_Bind_Param(PHG_params, "PHG_MULTILEVEL", &hgp->useMultilevel);
  Zoltan_Bind_Param(PHG_params, "PHG_FROM_GRAPH_METHOD", hgp->convert_str);  
  Zoltan_Bind_Param(PHG_params, "PHG_OUTPUT_LEVEL", &hgp->output_level);
  Zoltan_Bind_Param(PHG_params, "FINAL_OUTPUT", &hgp->final_output); 
  Zoltan_Bind_Param(PHG_params, "CHECK_GRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "CHECK_HYPERGRAPH", &hgp->check_graph);   
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_VERTEX", &hgp->nProc_x_req);
  Zoltan_Bind_Param(PHG_params, "PHG_NPROC_EDGE", &hgp->nProc_y_req);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_LIMIT", &hgp->redl);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_NCANDIDATE", &hgp->nCand);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD", hgp->redm_str);
  Zoltan_Bind_Param(PHG_params, "PHG_COARSENING_METHOD_FAST", hgp->redm_fast);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_VISIT_ORDER", &hgp->visit_order);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SCALING", &hgp->edge_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_VERTEX_SCALING", &hgp->vtx_scaling);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_METHOD", hgp->refinement_str);
  Zoltan_Bind_Param(PHG_params, "PHG_DIRECT_KWAY", &hgp->kway);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_LOOP_LIMIT", 
                                &hgp->fm_loop_limit);
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_MAX_NEG_MOVE", 
                                &hgp->fm_max_neg_move);  
  Zoltan_Bind_Param(PHG_params, "PHG_REFINEMENT_QUALITY", 
                                &hgp->refinement_quality);  
  Zoltan_Bind_Param(PHG_params, "PHG_COARSEPARTITION_METHOD", 
                                 hgp->coarsepartition_str);
  Zoltan_Bind_Param(PHG_params, "PHG_USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "USE_TIMERS",
                                 (void*) &hgp->use_timers);  
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->EdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_MATCH_EDGE_SIZE_THRESHOLD",
                                 (void*) &hgp->MatchEdgeSizeThreshold);  
  Zoltan_Bind_Param(PHG_params, "PHG_BAL_TOL_ADJUSTMENT",
                                 (void*) &hgp->bal_tol_adjustment);  
  Zoltan_Bind_Param(PHG_params, "PARKWAY_SERPART",
                                 (void *) hgp->parkway_serpart);
  Zoltan_Bind_Param(PHG_params, "PHG_CUT_OBJECTIVE",
                                 (void *) &cut_objective);
  Zoltan_Bind_Param(PHG_params, "ADD_OBJ_WEIGHT",
                                 (void *) add_obj_weight);
  Zoltan_Bind_Param(PHG_params, "PHG_EDGE_WEIGHT_OPERATION",
                                 (void *) edge_weight_op);
  Zoltan_Bind_Param(PHG_params, "PHG_RANDOMIZE_INPUT",
                                 (void*) &hgp->RandomizeInitDist);  
  Zoltan_Bind_Param(PHG_params, "PHG_PROCESSOR_REDUCTION_LIMIT",
		                 (void*) &hgp->ProRedL);
  Zoltan_Bind_Param(PHG_params, "PHG_REPART_MULTIPLIER",
		                 (void*) &hgp->RepartMultiplier);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL0",
                                 (void*) &hgp->patoh_alloc_pool0);
  Zoltan_Bind_Param(PHG_params, "PATOH_ALLOC_POOL1",
                                 (void*) &hgp->patoh_alloc_pool1);
  
  
  /* Set default values */
  strncpy(hgp->hgraph_pkg,           "phg", MAX_PARAM_STRING_LEN);
  strncpy(hgp->convert_str,    "neighbors", MAX_PARAM_STRING_LEN);
  strncpy(hgp->redm_str,             "agg", MAX_PARAM_STRING_LEN);
  hgp->match_array_type = 0;
  strncpy(hgp->redm_fast,          "l-ipm", MAX_PARAM_STRING_LEN);
  strncpy(hgp->coarsepartition_str, "auto", MAX_PARAM_STRING_LEN);
  strncpy(hgp->refinement_str,       "fm2", MAX_PARAM_STRING_LEN);
  strncpy(hgp->parkway_serpart,    "patoh", MAX_PARAM_STRING_LEN);
  strncpy(cut_objective,    "connectivity", MAX_PARAM_STRING_LEN);
  strncpy(add_obj_weight,           "none", MAX_PARAM_STRING_LEN);
  strncpy(edge_weight_op,            "max", MAX_PARAM_STRING_LEN);
  /* LB.Approach is initialized to "REPARTITION", and set in Set_Key_Params  */
  strncpy(hgp->hgraph_method,  zz->LB.Approach, MAX_PARAM_STRING_LEN);
  if (!strcasecmp(zz->LB.Approach,"REFINE")) 
    hgp->useMultilevel = 0;
  else
    hgp->useMultilevel = 1;

  hgp->use_timers = 0;
  hgp->LocalCoarsePartition = 0;
  hgp->edge_scaling = 0;
  hgp->vtx_scaling = 0;
  hgp->vtx_scal_size = 0;
  hgp->vtx_scal = NULL;  /* Array for storing vertex degree scale vector. 
                            Should perhaps go in hg structure, not the
                            param struct? */
  hgp->connectivity_cut = 1; 
  hgp->visit_order = 0;  /* Random */
  hgp->check_graph = 0;
  hgp->bal_tol = zz->LB.Imbalance_Tol[0]; /* Make vector for multiconstraint */
  hgp->bal_tol_adjustment = 0.7;
  hgp->nCand = 100;
  hgp->redl = MAX(2*zz->LB.Num_Global_Parts, 100);
  hgp->output_level = PHG_DEBUG_NONE;
  hgp->final_output = 0;
  hgp->nProc_x_req = -1;
  hgp->nProc_y_req = -1;
  hgp->kway = 0;
  hgp->fm_loop_limit = 10;
  hgp->fm_max_neg_move = 250;  
  hgp->refinement_quality = 1;
  hgp->RandomizeInitDist = 0;
  hgp->EdgeSizeThreshold = 0.25;
  hgp->MatchEdgeSizeThreshold = 500;  
  hgp->hybrid_keep_factor = 0.;
  hgp->ProRedL = 0.0; /* UVCUVC: CHECK default set to 0 until we run more experiments */
  hgp->RepartMultiplier = 100.;
  hgp->patoh_alloc_pool0 = 0;
  hgp->patoh_alloc_pool1 = 0;
  hgp->UseFixedVtx = 0;
  hgp->UsePrefPart = 0;
  
  /* Get application values of parameters. */
  err = Zoltan_Assign_Param_Vals(zz->Params, PHG_params, zz->Debug_Level, 
          zz->Proc, zz->Debug_Proc);
  
  nProc = zz->Num_Proc;
  usePrimeComm = 0;

  /* Parse add_obj_weight parameter */

  if (!strcasecmp(add_obj_weight, "none")) {
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  }
  else if (zz->Obj_Weight_Dim > 0) {
    /* Do not add_obj_weight until multiconstraint PHG is implemented */
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
     "Both application supplied *and* ADD_OBJ_WEIGHT "
     "calculated vertex weights were provided.");
    ZOLTAN_PRINT_WARN(zz->Proc, yo,
      "Only the first application supplied weight per vertex will be used.");
    hgp->add_obj_weight = PHG_ADD_NO_WEIGHT;
    hgp->part_sizes = part_sizes;
  } 
  else {
    if (!strcasecmp(add_obj_weight, "vertices")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "unit")){
      hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "vertex degree")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "nonzeros")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else if (!strcasecmp(add_obj_weight, "pins")){
      hgp->add_obj_weight = PHG_ADD_PINS_WEIGHT;
    } else{
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid ADD_OBJ_WEIGHT parameter.\n");
      err = ZOLTAN_WARN;
    }
    /* Set hgp->part_sizes to new array of part_sizes with added obj weight. */
    if (part_sizes)
      err = Zoltan_LB_Add_Part_Sizes_Weight(zz, 
                          (zz->Obj_Weight_Dim ? zz->Obj_Weight_Dim : 1), 
                          zz->Obj_Weight_Dim+1, 
                          part_sizes, &(hgp->part_sizes));
  }

  if ((zz->Obj_Weight_Dim==0) &&      /* no application supplied weights */
      (hgp->add_obj_weight==PHG_ADD_NO_WEIGHT)){ /* no calculated weight */

    hgp->add_obj_weight = PHG_ADD_UNIT_WEIGHT; /* default object weight */
  }

  if (!strcasecmp(cut_objective, "default")
      || !strcasecmp(cut_objective, "connectivity"))
      hgp->connectivity_cut = 1;
  else if (!strcasecmp(cut_objective, "hyperedges"))
      hgp->connectivity_cut = 0;
  else {
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid PHG_CUT_OBJECTIVE parameter.\n");
      goto End;
  }

  if (!strcasecmp(edge_weight_op, "max")){
    hgp->edge_weight_op = PHG_MAX_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "add")){
    hgp->edge_weight_op = PHG_ADD_EDGE_WEIGHTS;
  } else if (!strcasecmp(edge_weight_op, "error")){
    hgp->edge_weight_op = PHG_FLAG_ERROR_EDGE_WEIGHTS;
  } else{
    ZOLTAN_PRINT_ERROR(zz->Proc, yo,
      "Invalid PHG_EDGE_WEIGHT_OPERATION parameter.\n");
    err = ZOLTAN_WARN;
  }

  if ((strcasecmp(method, "PARTITION")) &&
      (strcasecmp(method, "REPARTITION")) &&
      (strcasecmp(method, "REFINE"))) {
    sprintf(buf,"%s is not a valid hypergraph method\n",method);
    ZOLTAN_PRINT_ERROR (zz->Proc, yo, buf);
    err = ZOLTAN_FATAL;
    goto End;
  }

  /* Adjust refinement parameters using hgp->refinement_quality */
  if (hgp->refinement_quality < 0.5/hgp->fm_loop_limit) 
    /* No refinement */
    strncpy(hgp->refinement_str,      "no",   MAX_PARAM_STRING_LEN);
  else {
    /* Scale FM parameters */
    hgp->fm_loop_limit   *= hgp->refinement_quality;
    hgp->fm_max_neg_move *= hgp->refinement_quality;
  }

  if (!strcasecmp(package, "PHG")){
    /* Test to determine whether we should change the number of processors
       used for partitioning to make more efficient 2D decomposition */

    if (hgp->nProc_x_req != 1 && hgp->nProc_y_req != 1)  /* Want 2D decomp */
      if (zz->Num_Proc > SMALL_PRIME && Zoltan_PHG_isPrime(zz->Num_Proc)) 
        /* 2D data decomposition is requested but we have a prime 
         * number of processors. */
        usePrimeComm = 1;

    if ((!strcasecmp(method, "REPARTITION"))){
        zz->LB.Remap_Flag = 0;
    }

    if ((!strcasecmp(method, "REPARTITION")) ||
        (!strcasecmp(method, "REFINE"))) {
        hgp->fm_loop_limit = 4; /* experimental evaluation showed that for
                                repartitioning/refinement small number of passes
                                is "good enough". These are all heuristics hence
                                it is possible to create a pathological cases; 
                                but in general this seems to be sufficient */
    }
    
    if (!hgp->useMultilevel) {
        /* don't do coarsening */
        strncpy(hgp->redm_str, "no", MAX_PARAM_STRING_LEN);

        /* we have modified all coarse partitioners to handle preferred part
           if user wants to choose one she can choose; otherwise default 
           partitioner
           (greedy growing) does work better than previous default partitioning
           for phg_refine ("no"). */        
        hgp->UsePrefPart = 1;

    }
    if (!strcasecmp(method, "REFINE") && hgp->useMultilevel){
        /* UVCUVC: as a heuristic we prefer local matching;
           in our experiments for IPDPS'07 and WileyChapter multilevel_refine
           didn't prove itself useful; it is too costly even with local matching
           hence it will not be be released yet (i.e. not in v3). */
        strncpy(hgp->redm_str, "l-ipm", MAX_PARAM_STRING_LEN);                
        hgp->UsePrefPart = 1;
    }    
  }
  else if (!strcasecmp(package, "PARKWAY")){
    if (hgp->nProc_x_req>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "ParKway requires nProc_x=1 or -1.");
      goto End;
    }
    hgp->nProc_x_req = 1;
  } 
  else if (!strcasecmp(package, "PATOH")){
    if (zz->Num_Proc>1) {
      err = ZOLTAN_FATAL;
      ZOLTAN_PRINT_ERROR(zz->Proc, yo, "PaToH only works with Num_Proc=1.");
      goto End;
    }
  }

  if (!usePrimeComm)
    MPI_Comm_dup(zz->Communicator, &communicator);
  else {
    MPI_Group newgrp, zzgrp;
    nProc--;
    MPI_Comm_group(zz->Communicator, &zzgrp);
    MPI_Group_excl(zzgrp, 1, &nProc, &newgrp);
    MPI_Comm_create(zz->Communicator, newgrp, &communicator);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&zzgrp);
  }

  err = Zoltan_PHG_Set_2D_Proc_Distrib(zz, communicator, zz->Proc, 
                                       nProc, hgp->nProc_x_req, 
                                       hgp->nProc_y_req, 
                                       &hgp->globalcomm);
  if (err != ZOLTAN_OK) 
    goto End;

  /* Convert strings to function pointers. */
  err = Zoltan_PHG_Set_Part_Options (zz, hgp);
  
End:
  return err;
}
Beispiel #4
0
int Zoltan_PHG_Redistribute(
  ZZ *zz, 
  PHGPartParams *hgp,     /* Input: parameters; used only for user's
                             request of nProc_x and nProc_y */
  HGraph  *ohg,           /* Input: Local part of distributed hypergraph */
  int     lo, int hi,     /* Input: range of proc ranks (inclusive)
                             to be included in new communicator: ncomm */
  PHGComm *ncomm,         /* Output: Communicators of new distribution */
  HGraph  *nhg,           /* Output: Newly redistributed hypergraph */
  int     **vmap,         /* Output: allocated with the size nhg->nVtx and
                             vertex map from nhg to ohg's local vertex number*/
  int     **vdest         /* Output: allocated with the size nhg->nVtx and
                             stores dest proc in ocomm */
    )   
{
    char * yo = "Zoltan_PHG_Redistribute";
    PHGComm *ocomm = ohg->comm;
    int     *v2Col, *n2Row, ierr=ZOLTAN_OK, i, *ranks;
    int     reqx=hgp->nProc_x_req, reqy=hgp->nProc_y_req;
    float   frac;
    MPI_Group allgrp, newgrp;
    MPI_Comm  nmpicomm;

    if (ocomm->nProc==1){
        errexit("%s: ocomm->nProc==1", yo);
        return ZOLTAN_FATAL;
    }

    /* create a new communicator for procs[lo..hi] */
    MPI_Comm_group(ocomm->Communicator, &allgrp);
    ranks = (int *) ZOLTAN_MALLOC(ocomm->nProc * sizeof(int));
    for (i=lo; i<=hi; ++i)
        ranks[i-lo] = i;
    
    MPI_Group_incl(allgrp, hi-lo+1, ranks, &newgrp);
    MPI_Comm_create(ocomm->Communicator, newgrp, &nmpicomm);
    MPI_Group_free(&newgrp);
    MPI_Group_free(&allgrp);   
    ZOLTAN_FREE(&ranks);

    if (reqx==1 || reqy==1)
        ;
    else
        reqx = reqy = -1;
    
    /* fill ncomm */
    ierr = Zoltan_PHG_Set_2D_Proc_Distrib(ocomm->zz, nmpicomm, 
                                          ocomm->myProc-lo, hi-lo+1, 
                                          reqx, reqy, ncomm);
    
    v2Col = (int *) ZOLTAN_MALLOC(ohg->nVtx * sizeof(int));    
    n2Row = (int *) ZOLTAN_MALLOC(ohg->nEdge * sizeof(int));

    /* UVC: TODO very simple straight forward partitioning right now;
       later we can implement a more "load balanced", or smarter
       mechanisms */
    frac = (float) ohg->nVtx / (float) ncomm->nProc_x;
    for (i=0; i<ohg->nVtx; ++i) 
        v2Col[i] = (int) ((float) i / frac);
    frac = (float) ohg->nEdge / (float) ncomm->nProc_y;
    for (i=0; i<ohg->nEdge; ++i) 
        n2Row[i] = (int) ((float) i / frac);

    ierr |= Zoltan_PHG_Redistribute_Hypergraph(zz, ohg, lo, v2Col, n2Row, ncomm, nhg, vmap, vdest);
    Zoltan_Multifree(__FILE__, __LINE__, 2,
                     &v2Col, &n2Row);
    
    return ierr;
}