Exemplo n.º 1
0
static int dist_hyperedges(
  MPI_Comm comm,		/* MPI Communicator */
  PARIO_INFO_PTR pio_info,      /* Parallel IO info */
  int     host_proc,		/* processor where all the data is initially */
  int     base,                 /* indicates whether input is 0-based or
				   1-based (i.e., is lowest vertex number
				   0 or 1?). */
  int     gnvtxs,               /* global number of vertices */
  int     *gnhedges,		/* global number of hyperedges */
  int     *nhedges,		/* local number of hyperedges */
  int     **hgid,		/* global hyperedge numbers */
  int     **hindex,		/* Starting hvertex entry for hyperedges */
  int     **hvertex,		/* Array of vertices in hyperedges; returned
				   values are global IDs for vertices */
  int     **hvertex_proc,	/* Array of processor assignments for
				   vertices in hvertex.  */
  int     *hewgt_dim,           /* number of weights per hyperedge */
  float   **hewgts,		/* hyperedge weight list data */
  short   *assignments          /* assignments from Chaco file; may be NULL */
)
{
/*
 * Distribute hyperedges from one processor to all processors.
 * Vertex distribution is assumed already done through chaco_dist_graph.
 * The memory for the hyperedges on the host node is freed
 * and fresh memory is allocated for the distributed hyperedges.
 */

const char *yo = "dist_hyperedges";
int nprocs, myproc, i, h, p = 0;
int *old_hindex = NULL, *old_hvertex = NULL, *old_hvertex_proc = NULL;
int *size = NULL, *num_send = NULL;
int *send = NULL;
int *send_hgid = NULL, *send_hindex = NULL;
int *send_hvertex = NULL, *send_hvertex_proc = NULL;
int max_size, max_num_send;
int hcnt[2];
int hecnt, hvcnt;
float *old_hewgts = NULL;
float *send_hewgts = NULL;
MPI_Status status;
int num_dist_procs;
int hedge_init_dist_type;

  hedge_init_dist_type = (pio_info->init_dist_type != INITIAL_FILE
			  ? pio_info->init_dist_type
			  : INITIAL_LINEAR);

  /* Determine number of processors and my rank. */
  MPI_Comm_size (comm, &nprocs );
  MPI_Comm_rank (comm, &myproc );

  DEBUG_TRACE_START(myproc, yo);

  if (pio_info->init_dist_procs > 0 && pio_info->init_dist_procs <= nprocs)
    num_dist_procs = pio_info->init_dist_procs;
  else
    /* Reset num_dist_procs if not set validly by input */
    num_dist_procs = nprocs;

  /* Broadcast to all procs */
  MPI_Bcast( hewgt_dim, 1, MPI_INT, host_proc, comm);
  MPI_Bcast( nhedges, 1, MPI_INT, host_proc, comm);
  *gnhedges = *nhedges;

  /* Initialize */
  if (*gnhedges == 0) {
    *hindex = (int *) malloc(sizeof(int));
    if (!(*hindex)) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }
    (*hindex)[0] = 0;
    *hvertex = NULL;
    *hvertex_proc = NULL;
    return 1;
  }

  if (nprocs == 1) {
    *nhedges = *gnhedges;
    *hgid = (int *) malloc(*gnhedges * sizeof(int));
    *hvertex_proc = (int *) malloc((*hindex)[*gnhedges] * sizeof(int));
    if ((*gnhedges && !(*hgid)) || ((*hindex)[*gnhedges] && !(*hvertex_proc))) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }
    for (h = 0; h < *gnhedges; h++)
      (*hgid)[h] = h + base;           /* Want the same numbering than for vertices */
    for (h = 0; h < (*hindex)[*gnhedges]; h++)
      (*hvertex_proc)[h] = 0;
    return 1;
  }
  if (myproc == host_proc) {
    /* Store pointers to original data */
    old_hindex  = *hindex;
    old_hvertex = *hvertex;
    old_hewgts = *hewgts;
    old_hvertex_proc = (int *) malloc(old_hindex[*gnhedges] * sizeof(int));

    /* Allocate space for size and send flags */
    size = (int *) calloc(2 * nprocs, sizeof(int));
    num_send = size + nprocs;
    send = (int *) malloc(*gnhedges * sizeof(int *));
    if ((old_hindex[*gnhedges] && !old_hvertex_proc) || !size ||
        (*gnhedges && !send)) {
      Gen_Error(0, "fatal: memory error");
      return 0;
    }

    /* Determine to which processors hyperedges should be sent */
    for (h = 0; h < *gnhedges; h++) {
      if (hedge_init_dist_type == INITIAL_CYCLIC)
	p = h % num_dist_procs;
      else if (hedge_init_dist_type == INITIAL_LINEAR)
	p = (int) ((float) h * (float) num_dist_procs / (float)(*gnhedges));
      else if (hedge_init_dist_type == INITIAL_OWNER)
	p = ch_dist_proc(old_hvertex[old_hindex[h]], assignments, base);
      size[p] += (old_hindex[h+1] - old_hindex[h]);
      num_send[p]++;
      send[h] = p;
      for (i = old_hindex[h]; i < old_hindex[h+1]; i++)
	old_hvertex_proc[i] = ch_dist_proc(old_hvertex[i], assignments, base);
    }

    /* Determine size of send buffers and allocate them. */
    max_size = 0;
    max_num_send = 0;
    for (p = 0; p < nprocs; p++) {
      if (size[p] > max_size) max_size = size[p];
      if (num_send[p] > max_num_send) max_num_send = num_send[p];
    }

    send_hgid = (int *) malloc((max_num_send) * sizeof(int));
    send_hindex = (int *) malloc((max_num_send+1) * sizeof(int));
    send_hvertex = (int *) malloc(max_size * sizeof(int));
    send_hvertex_proc = (int *) malloc(max_size * sizeof(int));
    if (*hewgt_dim)
      send_hewgts = (float *) malloc(max_num_send*(*hewgt_dim)*sizeof(float));
    if ((max_num_send && !send_hgid) || !send_hindex ||
	(max_size && (!send_hvertex || !send_hvertex_proc)) ||
	(max_num_send && *hewgt_dim && !send_hewgts)) {
      Gen_Error(0, "fatal: memory error in dist_hyperedges");
      return 0;
    }

    /* Load and send data */
    for (p = 0; p < nprocs; p++) {

      if (p == myproc) continue;

      hecnt = 0;
      hvcnt = 0;
      send_hindex[0] = 0;
      for (h = 0; h < *gnhedges; h++) {
	if (send[h]==p) {
	  send_hgid[hecnt] = h + base; /* Want the same numbering than for vertices */
	  send_hindex[hecnt+1] = send_hindex[hecnt]
			       + (old_hindex[h+1] - old_hindex[h]);
	  for (i = 0; i < *hewgt_dim; i++)
	    send_hewgts[hecnt*(*hewgt_dim)+i] = old_hewgts[h*(*hewgt_dim)+i];
	  hecnt++;
	  for (i = old_hindex[h]; i < old_hindex[h+1]; i++) {
	    send_hvertex[hvcnt] = old_hvertex[i];
	    send_hvertex_proc[hvcnt] = old_hvertex_proc[i];
	    hvcnt++;
	  }
	}
      }

      hcnt[0] = hecnt;
      hcnt[1] = hvcnt;

      MPI_Send(hcnt, 2, MPI_INT, p, 1, comm);
      MPI_Send(send_hgid, hecnt, MPI_INT, p, 2, comm);
      MPI_Send(send_hindex, hecnt+1, MPI_INT, p, 3, comm);
      MPI_Send(send_hvertex, hvcnt, MPI_INT, p, 4, comm);
      MPI_Send(send_hvertex_proc, hvcnt, MPI_INT, p, 5, comm);
      if (*hewgt_dim)
	MPI_Send(send_hewgts, hecnt*(*hewgt_dim), MPI_FLOAT, p, 6, comm);
    }

    safe_free((void **)(void *) &send_hgid);
    safe_free((void **)(void *) &send_hindex);
    safe_free((void **)(void *) &send_hvertex);
    safe_free((void **)(void *) &send_hvertex_proc);
    safe_free((void **)(void *) &send_hewgts);

    /* Copy data owned by myproc into new local storage */
    *nhedges = num_send[myproc];
    *hgid = (int *) malloc(*nhedges * sizeof(int));
    *hindex = (int *) malloc((*nhedges+1) * sizeof(int));
    *hvertex = (int *) malloc(size[myproc] * sizeof(int));
    *hvertex_proc = (int *) malloc(size[myproc] * sizeof(int));
    if (*hewgt_dim)
      *hewgts = (float *) malloc(*nhedges * *hewgt_dim * sizeof(float));
    if ((*nhedges && !(*hgid)) || !(*hindex) ||
	(size[myproc] && (!(*hvertex) || !(*hvertex_proc))) ||
	(*nhedges && *hewgt_dim && !(*hewgts))) {
      Gen_Error(0, "fatal: memory error in dist_hyperedges");
      return 0;
    }

    hecnt = 0;
    hvcnt = 0;
    (*hindex)[0] = 0;

    for (h = 0; h < *gnhedges; h++) {
      if (send[h]==myproc) {
	(*hgid)[hecnt] = h + base;  /* Want the same numbering than for vertices */
	(*hindex)[hecnt+1] = (*hindex)[hecnt]
			   + (old_hindex[h+1] - old_hindex[h]);
	for (i = 0; i < *hewgt_dim; i++)
	  (*hewgts)[hecnt*(*hewgt_dim)+i] = old_hewgts[h*(*hewgt_dim)+i];
	hecnt++;
	for (i = old_hindex[h]; i < old_hindex[h+1]; i++) {
	  (*hvertex_proc)[hvcnt] = old_hvertex_proc[i];
	  (*hvertex)[hvcnt] = old_hvertex[i];  /* Global index */
	  hvcnt++;
	}
      }
    }

    safe_free((void **)(void *) &send);
    safe_free((void **)(void *) &size);
    safe_free((void **)(void *) &old_hindex);
    safe_free((void **)(void *) &old_hvertex);
    safe_free((void **)(void *) &old_hvertex_proc);
    safe_free((void **)(void *) &old_hewgts);
  }
  else {
    /* host_proc != myproc; receive hedge data from host_proc */
    MPI_Recv(hcnt, 2, MPI_INT, host_proc, 1, comm, &status);
    *nhedges = hcnt[0];
    *hgid = (int *) malloc(hcnt[0] * sizeof(int));
    *hindex = (int *) malloc((hcnt[0]+1) * sizeof(int));
    *hvertex = (int *) malloc(hcnt[1] * sizeof(int));
    *hvertex_proc = (int *) malloc(hcnt[1] * sizeof(int));
    if (*hewgt_dim)
      *hewgts = (float *) malloc(hcnt[0] * *hewgt_dim * sizeof(float));
    if ((hcnt[0] && !(*hgid)) || !(*hindex) ||
	(hcnt[1] && (!(*hvertex) || !(*hvertex_proc))) ||
	(hcnt[0] && *hewgt_dim && !(*hewgts))) {
      Gen_Error(0, "fatal: memory error in dist_hyperedges");
      return 0;
    }
    MPI_Recv(*hgid, hcnt[0], MPI_INT, host_proc, 2, comm, &status);
    MPI_Recv(*hindex, hcnt[0]+1, MPI_INT, host_proc, 3, comm, &status);
    MPI_Recv(*hvertex, hcnt[1], MPI_INT, host_proc, 4, comm, &status);
    MPI_Recv(*hvertex_proc, hcnt[1], MPI_INT, host_proc, 5, comm, &status);
    if (*hewgt_dim)
      MPI_Recv(*hewgts, hcnt[0]* *hewgt_dim, MPI_FLOAT, host_proc, 6, comm,
	       &status);
  }

  DEBUG_TRACE_END(myproc, yo);
  return 1;
}
Exemplo n.º 2
0
int chaco_fill_elements(
  int        Proc,
  int        Num_Proc,
  PROB_INFO_PTR prob,            /* problem description */
  MESH_INFO_PTR mesh,            /* mesh information for the problem */
  int        gnvtxs,             /* global number of vertices across all procs*/
  int        nvtxs,              /* number of vertices in local graph */
  int       *start,              /* start of edge list for each vertex */
  int       *adj,                /* edge list data */
  int        vwgt_dim,           /* # of weights per vertex */
  float     *vwgts,              /* vertex weight list data */
  int        ewgt_dim,           /* # of weights per edge */
  float     *ewgts,              /* edge weight list data */
  int        ndim,               /* dimension of the geometry */
  float     *x,                  /* x-coordinates of the vertices */
  float     *y,                  /* y-coordinates of the vertices */
  float     *z,                  /* z-coordinates of the vertices */
  short     *assignments,        /* assignments from Chaco file; may be NULL */
  int       base                 /* smallest vertex number to use; 
                                    base == 1 for Chaco; 
                                    may be 0 or 1 for HG files. */
)
{
  /* Local declarations. */
  int i, j, k, elem_id, *local_ids = NULL;
  int num_vtx, min_vtx, max_vtx; 
  int *vtx_list = NULL;
  const char *yo = "chaco_fill_elements";
/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  chaco_init_local_ids(&local_ids, &vtx_list, &min_vtx, &max_vtx, &num_vtx, 
                       gnvtxs, assignments, base);


  for (i = 0; i < num_vtx; i++) {
    mesh->elements[i].globalID = vtx_list[i]+base;  /* GlobalIDs are 1-based
                                                       in Chaco; may be 0-based
                                                       or 1-based in HG files */
    if (vwgts != NULL){
      for (j=0; j<vwgt_dim; j++) {
        mesh->elements[i].cpu_wgt[j] = vwgts[i*vwgt_dim+j];
      }
    }
    else
      mesh->elements[i].cpu_wgt[0] = 1.0;
    mesh->elements[i].elem_blk = 0; /* only one elem block for all vertices */

    if (assignments)
      mesh->elements[i].my_part = assignments[vtx_list[i]];
    else
      mesh->elements[i].my_part = Proc;  /* Init partition is starting proc.*/

    if (mesh->num_dims > 0) {
      /* One set of coords per element. */
      mesh->elements[i].connect = (int *) malloc(sizeof(int));
      mesh->elements[i].connect[0] = mesh->elements[i].globalID;
      mesh->elements[i].coord = (float **) malloc(sizeof(float *));
      mesh->elements[i].coord[0] = (float *) calloc(mesh->num_dims,
                                                    sizeof(float));  
      mesh->elements[i].coord[0][0] = x[i];
      mesh->elements[i].avg_coord[0] = x[i];
      if (mesh->num_dims > 1) {
        mesh->elements[i].coord[0][1] = y[i];
        mesh->elements[i].avg_coord[1] = y[i];
        if (mesh->num_dims > 2) {
          mesh->elements[i].coord[0][2] = z[i];
          mesh->elements[i].avg_coord[2] = z[i];
        }
      }
    }
  }

  for (i = 0; i < num_vtx; i++) {
    /* now start with the adjacencies */
    if (start != NULL)
      mesh->elements[i].nadj = start[i+1] - start[i];
    else
      mesh->elements[i].nadj = 0;
    if (mesh->elements[i].nadj > 0) {
      mesh->elements[i].adj_len = mesh->elements[i].nadj;
      mesh->elements[i].adj = (int *) malloc (mesh->elements[i].nadj * sizeof(int));
      mesh->elements[i].adj_proc = (int *) malloc (mesh->elements[i].nadj * sizeof(int));
      if (!(mesh->elements[i].adj) || !(mesh->elements[i].adj_proc)) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
      if (ewgts != NULL) {
        mesh->elements[i].edge_wgt = (float *) malloc (mesh->elements[i].nadj * sizeof(float));
        if (!(mesh->elements[i].edge_wgt)) {
          Gen_Error(0, "fatal: insufficient memory");
          return 0;
        }
      }
      else
        mesh->elements[i].edge_wgt = NULL;

      for (j = 0; j < mesh->elements[i].nadj; j++) {
        elem_id = adj[start[i] + j] - (1-base);  /* Chaco is 1-based;
                                                    HG may be 0 or 1 based. */

        /* determine which processor the adjacent vertex is on */
        k = ch_dist_proc(elem_id, assignments, base);

        /*
         * if the adjacent element is on this processor
         * then find the local id for that element
         */
        if (k == Proc) 
          mesh->elements[i].adj[j] = local_ids[elem_id-base-min_vtx];
        else /* use the global id */
          mesh->elements[i].adj[j] = elem_id;

        mesh->elements[i].adj_proc[j] = k;

        if (ewgts != NULL)
          mesh->elements[i].edge_wgt[j] = ewgts[start[i] + j];
      }
    } /* End: "if (mesh->elements[i].nadj > 0)" */
  } /* End: "for (i = 0; i < mesh->num_elems; i++)" */

  safe_free((void **)(void *) &vtx_list);
  safe_free((void **)(void *) &local_ids);

  if (!build_elem_comm_maps(Proc, mesh)) {
    Gen_Error(0, "Fatal: error building initial elem comm maps");
    return 0;
  }

  if (Debug_Driver > 3)
    print_distributed_mesh(Proc, Num_Proc, mesh);

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}
Exemplo n.º 3
0
/* Read from file and set up hypergraph. */
int read_hypergraph_file(
  int Proc,
  int Num_Proc,
  PROB_INFO_PTR prob,
  PARIO_INFO_PTR pio_info,
  MESH_INFO_PTR mesh
)
{
  /* Local declarations. */
  const char  *yo = "read_hypergraph_file";
  char   cmesg[256];

  int    i, gnvtxs, distributed_pins = 0, edge, vertex, nextEdge;
  int    nvtxs = 0, gnhedges = 0, nhedges = 0, npins = 0;
  int    vwgt_dim=0, hewgt_dim=0, vtx, edgeSize, global_npins;
  int   *hindex = NULL, *hvertex = NULL, *hvertex_proc = NULL;
  int   *hgid = NULL;
  float *hewgts = NULL, *vwgts = NULL;
  ZOLTAN_FILE* fp = NULL;
  int base = 0;   /* Smallest vertex number; usually zero or one. */
  char filename[256];

  /* Variables that allow graph-based functions to be reused. */
  /* If no chaco.graph or chaco.coords files exist, values are NULL or 0,
   * since graph is not being built. If chaco.graph and/or chaco.coords
   * exist, these arrays are filled and values stored in mesh.
   * Including these files allows for comparison of HG methods with other
   * methods, along with visualization of results and comparison of
   * LB_Eval results.
   */
  int    ch_nvtxs = 0;        /* Temporary values for chaco_read_graph.   */
#ifdef KDDKDD
  int    ch_vwgt_dim = 0;     /* Their values are ignored, as vertex      */
#endif
  float *ch_vwgts = NULL;     /* info is provided by hypergraph file.     */
  int   *ch_start = NULL, *ch_adj = NULL, ch_ewgt_dim = 0;
  short *ch_assignments = NULL;
  float *ch_ewgts = NULL;
  int    ch_ndim = 0;
  float *ch_x = NULL, *ch_y = NULL, *ch_z = NULL;
  int    ch_no_geom = TRUE;   /* Assume no geometry info is given; reset if
				 it is provided. */
  int    file_error = 0;

/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  if (Proc == 0) {

    /* Open and read the hypergraph file. */
    if (pio_info->file_type == HYPERGRAPH_FILE)
      sprintf(filename, "%s.hg", pio_info->pexo_fname);
    else if (pio_info->file_type == MATRIXMARKET_FILE)
      sprintf(filename, "%s.mtx", pio_info->pexo_fname);
    else {
	sprintf(cmesg, "fatal:  invalid file type %d", pio_info->file_type);
	Gen_Error(0, cmesg);
	return 0;
    }

      fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
      file_error = (fp == NULL);
  }



  MPI_Bcast(&file_error, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if (file_error) {
    sprintf(cmesg,
      "fatal:  Could not open hypergraph file %s",pio_info->pexo_fname);
    Gen_Error(0, cmesg);
    return 0;
  }

  if (pio_info->file_type == HYPERGRAPH_FILE) {
    /* read the array in on processor 0 */
    if (Proc == 0) {
      if (HG_readfile(Proc, fp, &nvtxs, &nhedges, &npins,
		    &hindex, &hvertex, &vwgt_dim, &vwgts,
		    &hewgt_dim, &hewgts, &base) != 0){
	Gen_Error(0, "fatal: Error returned from HG_readfile");
	return 0;
      }
    }
  }
  else if (pio_info->file_type == MATRIXMARKET_FILE) {
    /*
     * pio_info->chunk_reader == 0  (the usual case)
     *   process 0 will read entire file in MM_readfile,
     *   and will distribute vertices in chaco_dist_graph and pins in
     *   dist_hyperedges later.   (distributed_pins==0)
     *
     * pio_info->chunk_reader == 1  ("initial read = chunks" in zdrive.inp)
     *   process 0 will read the file in chunks, and will send vertices
     *   and pins to other processes before reading the next chunk, all
     *   in MM_readfile.  (distributed_pins==1)
     */

    if (MM_readfile(Proc, Num_Proc, fp, pio_info,
		    &nvtxs,     /* global number of vertices */
		    &nhedges,   /* global number of hyperedges */
		    &npins,     /* local number of pins */
		    &hindex, &hvertex, &vwgt_dim, &vwgts,
		    &hewgt_dim, &hewgts, &ch_start, &ch_adj,
		    &ch_ewgt_dim, &ch_ewgts, &base, &global_npins)) {
      Gen_Error(0, "fatal: Error returned from MM_readfile");
      return 0;
    }

    if (Proc == 0) ZOLTAN_FILE_close(fp);

    if ((Num_Proc > 1) && pio_info->chunk_reader && (global_npins > Num_Proc)){
      distributed_pins = 1;
    }
    else{
      distributed_pins = 0;
    }
  }


#ifdef KDDKDD
 {
   /* If CHACO graph file is available, read it. */

   sprintf(filename, "%s.graph", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
   file_error =
#ifndef ZOLTAN_COMPRESS
     (fp == NULL);
#else
   fp.error;
#endif


   if (!file_error) {
      /* CHACO graph file is available. */
      /* Assuming hypergraph vertices are same as chaco vertices. */
      /* Chaco vertices and their weights are ignored in rest of function. */
      if (chaco_input_graph(fp, filename, &ch_start, &ch_adj, &ch_nvtxs,
		      &ch_vwgt_dim, &ch_vwgts, &ch_ewgt_dim, &ch_ewgts) != 0) {
	Gen_Error(0, "fatal: Error returned from chaco_input_graph");
	return 0;
      }
    }
   else
     ch_nvtxs = nvtxs;


    /* If coordinate file is available, read it. */
   sprintf(filename, "%s.coords", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
   file_error =
#ifndef ZOLTAN_COMPRESS
     (fp == NULL);
#else
   fp.error;
#endif

    if (!file_error) {
      /* CHACO coordinates file is available. */
      ch_no_geom = FALSE;
      if (chaco_input_geom(fpkdd, filename, ch_nvtxs, &ch_ndim,
			   &ch_x, &ch_y, &ch_z) != 0) {
	Gen_Error(0, "fatal: Error returned from chaco_input_geom");
	return 0;
      }
    }
 }
#else /* KDDKDD */
  ch_nvtxs = nvtxs;
#endif /* KDDKDD */


  {
     /* Read Chaco assignment file, if requested */
   if (pio_info->init_dist_type == INITIAL_FILE) {
     sprintf(filename, "%s.assign", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);

   if (fp == NULL) {
     sprintf(cmesg, "Error:  Could not open Chaco assignment file %s; "
	     "initial distribution cannot be read",
	     filename);
     Gen_Error(0, cmesg);
     return 0;
   }
   else {
     /* read the coordinates in on processor 0 */
     ch_assignments = (short *) malloc(nvtxs * sizeof(short));
     if (nvtxs && !ch_assignments) {
       Gen_Error(0, "fatal: memory error in read_hypergraph_file");
       return 0;
     }
     /* closes fpassign when done */
     if (chaco_input_assign(fp, filename, ch_nvtxs, ch_assignments) != 0){
       Gen_Error(0, "fatal: Error returned from chaco_input_assign");
       return 0;
     }
   }
   }
 }

  MPI_Bcast(&base, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if (distributed_pins){
    gnhedges = nhedges;
    nhedges = 0;
    hewgt_dim = 0;
    hewgts = NULL;
    for (edge=0; edge<gnhedges; edge++){
      edgeSize = hindex[edge+1] - hindex[edge];
      if (edgeSize > 0) nhedges++;
    }
    hgid = (int *)malloc(nhedges * sizeof(int));
    hvertex_proc = (int *)malloc(npins * sizeof(int));
    nextEdge=0;
    vtx=0;
    for (edge=0; edge<gnhedges; edge++){
      edgeSize = hindex[edge+1] - hindex[edge];
      if (edgeSize > 0){
	hgid[nextEdge] = edge+1;
	if (nextEdge < edge){
	  hindex[nextEdge+1] = hindex[nextEdge] + edgeSize;
	}
	for (vertex=0; vertex<edgeSize; vertex++,vtx++){
	  hvertex_proc[vtx] = ch_dist_proc(hvertex[vtx], NULL, 1);
	}
	nextEdge++;
      }
    }
    gnvtxs = nvtxs;
    nvtxs = ch_dist_num_vtx(Proc, NULL);
    if (ch_start){    /* need to include only vertices this process owns */
      for (i=0,vertex=0; i<gnvtxs; i++){
	if ((ch_start[i+1] > ch_start[vertex]) || /* vtx has adjacencies so it's mine */
	    (ch_dist_proc(i, NULL, 0) == Proc))   /* my vtx with no adjacencies */
	  {
	  if (i > vertex){
	    ch_start[vertex+1] = ch_start[i+1];
	  }
	  vertex++;
	}
      }
    }
#if 0
    debug_lists(Proc, Num_Proc, nhedges, hindex, hvertex, hvertex_proc, hgid);
#endif
  } else{

    /* Distribute hypergraph graph */
    /* Use hypergraph vertex information and chaco edge information. */

    if (!chaco_dist_graph(MPI_COMM_WORLD, pio_info, 0, &gnvtxs, &nvtxs,
	     &ch_start, &ch_adj, &vwgt_dim, &vwgts, &ch_ewgt_dim, &ch_ewgts,
	     &ch_ndim, &ch_x, &ch_y, &ch_z, &ch_assignments) != 0) {
      Gen_Error(0, "fatal: Error returned from chaco_dist_graph");
      return 0;
    }

    if (!dist_hyperedges(MPI_COMM_WORLD, pio_info, 0, base, gnvtxs, &gnhedges,
		       &nhedges, &hgid, &hindex, &hvertex, &hvertex_proc,
		       &hewgt_dim, &hewgts, ch_assignments)) {
      Gen_Error(0, "fatal: Error returned from dist_hyperedges");
      return 0;
    }
  }


  /* Initialize mesh structure for Hypergraph. */
  mesh->data_type = HYPERGRAPH;
  mesh->num_elems = nvtxs;
  mesh->vwgt_dim = vwgt_dim;
  mesh->ewgt_dim = ch_ewgt_dim;
  mesh->elem_array_len = mesh->num_elems + 5;
  mesh->num_dims = ch_ndim;
  mesh->num_el_blks = 1;

  mesh->gnhedges = gnhedges;
  mesh->nhedges = nhedges;
  mesh->hewgt_dim = hewgt_dim;

  mesh->hgid = hgid;
  mesh->hindex = hindex;
  mesh->hvertex = hvertex;
  mesh->hvertex_proc = hvertex_proc;
  mesh->heNumWgts = nhedges;
  mesh->heWgtId = NULL;
  mesh->hewgts = hewgts;


  mesh->eb_etypes = (int *) malloc (5 * mesh->num_el_blks * sizeof(int));
  if (!mesh->eb_etypes) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }
  mesh->eb_ids = mesh->eb_etypes + mesh->num_el_blks;
  mesh->eb_cnts = mesh->eb_ids + mesh->num_el_blks;
  mesh->eb_nnodes = mesh->eb_cnts + mesh->num_el_blks;
  mesh->eb_nattrs = mesh->eb_nnodes + mesh->num_el_blks;

  mesh->eb_names = (char **) malloc (mesh->num_el_blks * sizeof(char *));
  if (!mesh->eb_names) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  mesh->eb_etypes[0] = -1;
  mesh->eb_ids[0] = 1;
  mesh->eb_cnts[0] = nvtxs;
  mesh->eb_nattrs[0] = 0;
  /*
   * Each element has one set of coordinates (i.e., node) if a coords file
   * was provided; zero otherwise.
   */
  MPI_Bcast( &ch_no_geom, 1, MPI_INT, 0, MPI_COMM_WORLD);
  if (ch_no_geom)
    mesh->eb_nnodes[0] = 0;
  else
    mesh->eb_nnodes[0] = 1;

  /* allocate space for name */
  mesh->eb_names[0] = (char *) malloc((MAX_STR_LENGTH+1) * sizeof(char));
  if (!mesh->eb_names[0]) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }
  strcpy(mesh->eb_names[0], "hypergraph");

  /* allocate the element structure array */
  mesh->elements = (ELEM_INFO_PTR) malloc (mesh->elem_array_len
					 * sizeof(ELEM_INFO));
  if (!(mesh->elements)) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  /*
   * initialize all of the element structs as unused by
   * setting the globalID to -1
   */
  for (i = 0; i < mesh->elem_array_len; i++)
    initialize_element(&(mesh->elements[i]));

  /*
   * now fill the element structure array with the
   * information from the Chaco file
   * Use hypergraph vertex information and chaco edge information.
   */
  if (!chaco_fill_elements(Proc, Num_Proc, prob, mesh, gnvtxs, nvtxs,
		     ch_start, ch_adj, vwgt_dim, vwgts, ch_ewgt_dim, ch_ewgts,
		     ch_ndim, ch_x, ch_y, ch_z, ch_assignments, base)) {
    Gen_Error(0, "fatal: Error returned from chaco_fill_elements");
    return 0;
  }
#if 0
  debug_elements(Proc, Num_Proc, mesh->num_elems,mesh->elements);
#endif

  safe_free((void **)(void *) &vwgts);
  safe_free((void **)(void *) &ch_ewgts);
  safe_free((void **)(void *) &ch_vwgts);
  safe_free((void **)(void *) &ch_x);
  safe_free((void **)(void *) &ch_y);
  safe_free((void **)(void *) &ch_z);
  safe_free((void **)(void *) &ch_start);
  safe_free((void **)(void *) &ch_adj);
  safe_free((void **)(void *) &ch_assignments);

 if (Debug_Driver > 3)
   print_distributed_mesh(Proc, Num_Proc, mesh);

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}