示例#1
0
int ch_dist_max_num_vtx(short *assignments)
{
/* Function that returns the maximum number of vertices assigned to any 
 * processor.
 */
int i;
int tmp, max = 0;

  for (i = 0; i < Num_Proc; i++)
    if ((tmp = ch_dist_num_vtx(i,assignments)) > max) max = tmp;

  return max;
}
示例#2
0
int chaco_dist_graph(
  MPI_Comm comm,		/* MPI Communicator */
  PARIO_INFO_PTR pio_info,      /* Parallel IO info */
  int     host_proc,		/* processor where all the data is initially */
  int     *gnvtxs,		/* number of vertices in global graph */
  int     *nvtxs,		/* number of vertices in local graph */
  int     **xadj,		/* start of edge list for each vertex */
  int     **adjncy,		/* edge list data */
  int     *vwgt_dim,            /* number of weights per vertex */
  float   **vwgts,		/* vertex weight list data */
  int     *ewgt_dim,            /* number of weights per edge */
  float   **ewgts,		/* edge weight list data */
  int     *ndim,                /* dimension of the geometry */
  float   **x,                  /* x-coordinates of the vertices */
  float   **y,                  /* y-coordinates of the vertices */
  float   **z,                  /* z-coordinates of the vertices */
  short   **assignments         /* assignments from Chaco file; may be NULL */
)
{
  const char *yo = "chaco_dist_graph";
  int nprocs, myproc, i, j, k, n, p, nedges, nsend, max_nvtxs, v, adj_cnt;
  int offset, use_graph, nvtx_edges;
  int *old_xadj = NULL, *old_adjncy = NULL, *size = NULL;
  int *send_xadj = NULL, *send_adjncy = NULL;
  int *vtx_list = NULL;
  float *old_x = NULL, *old_y = NULL, *old_z = NULL;
  float *send_x = NULL, *send_y = NULL, *send_z = NULL;
  float *old_vwgts = NULL, *old_ewgts = NULL;
  float *send_vwgts = NULL, *send_ewgts = NULL;
  MPI_Status status;

  /* Determine number of processors and my rank. */
  MPI_Comm_size (comm, &nprocs );
  MPI_Comm_rank (comm, &myproc );

  DEBUG_TRACE_START(myproc, yo);

  /* Initialize */
  use_graph = (*xadj  != NULL);
 
  /* Handle serial case and return. */
  if (nprocs == 1) {
    /* Set values expected to be returned by this function. */
    /* All array pointers are unchanged.                    */
    *gnvtxs = *nvtxs;
    /* Initialize distribution, so other routines using it work. */
    ch_dist_init(nprocs, *gnvtxs, pio_info, assignments, host_proc, comm);

    return 1;
  }

  /* Broadcast to all procs */
  MPI_Bcast( vwgt_dim, 1, MPI_INT, host_proc, comm);
  MPI_Bcast( ewgt_dim, 1, MPI_INT, host_proc, comm);
  MPI_Bcast( &use_graph, 1, MPI_INT, host_proc, comm);
  MPI_Bcast( ndim, 1, MPI_INT, host_proc, comm);
  MPI_Bcast( nvtxs, 1, MPI_INT, host_proc, comm);
  *gnvtxs = *nvtxs;

  /* Initialize the chaco distribution on all processors */
  ch_dist_init(nprocs, *gnvtxs, pio_info, assignments, host_proc, comm);
  
  /* Store pointers to original data */
  if (myproc == host_proc) {
    old_xadj   = *xadj;
    *xadj      = NULL;
    old_adjncy = *adjncy;
    *adjncy    = NULL;
    old_x      = *x;
    old_y      = *y;
    old_z      = *z;
  }

  /* Allocate space for new distributed graph data */
  n = *nvtxs = ch_dist_num_vtx(myproc, *assignments);

  if (use_graph) {
    *xadj = (int *) malloc((n+1)*sizeof(int));
    if (*xadj == NULL) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }
  }
  if (*vwgt_dim){
    old_vwgts = *vwgts;
    *vwgts = NULL;
    if (n > 0) {
      *vwgts = (float *) malloc(n*(*vwgt_dim)*sizeof(float));
      if (*vwgts == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
    }
  }
  if (*ndim > 0) {
    *x = (float *)  malloc(n*sizeof(float));
    if (*ndim > 1) {
      *y = (float *)  malloc(n*sizeof(float));
      if (*ndim > 2) {
        *z = (float *)  malloc(n*sizeof(float));
      }
    }
  }


  /* 
   * Distribute vertex data (xadj, coordinates, etc ) to all procs 
   */

  if (myproc == host_proc){

    /* Allocate space for send buffers  (size = max num vtx per proc ) */
    max_nvtxs = ch_dist_max_num_vtx(*assignments);
    if (use_graph) {
      send_xadj = (int *) malloc((max_nvtxs+1)*sizeof(int));
      if (send_xadj == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
    }
    if (*vwgt_dim) {
      send_vwgts = (float *) malloc(max_nvtxs*(*vwgt_dim)*sizeof(float));
      if (send_vwgts == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
    }
    if (*ndim > 0) {
      send_x = (float *) malloc(max_nvtxs*sizeof(float));
      if (send_x == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
      if (*ndim > 1) {
        send_y = (float *) malloc(max_nvtxs*sizeof(float));
        if (send_y == NULL) {
          Gen_Error(0, "fatal: insufficient memory");
          return 0;
        }
        if (*ndim > 2) {
          send_z = (float *) malloc(max_nvtxs*sizeof(float));
          if (send_z == NULL) {
            Gen_Error(0, "fatal: insufficient memory");
            return 0;
          }
        }
      }
    }

    /* Allocate space for list of vertices on a given processor */
    vtx_list = (int *) malloc(max_nvtxs*sizeof(int));
    if (vtx_list == NULL) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }

    /* Allocate array to accumulate number of edges to be sent to each proc. */
    if (use_graph) {
      size = (int *) malloc(nprocs*sizeof(int));
      if (size == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
    }

    /* For each processor, gather its vertex information and send it. */
    for (p = 0; p < nprocs; p++){
      if (use_graph) size[p] = 0;

      /* Get list of vertices to be assigned to processor p */
      ch_dist_vtx_list(vtx_list, &nsend, p, *assignments);

      if (p == myproc){

        /* Loop over vertices assigned to myproc; copy the vertex */
        /* data into local arrays.                                */

        if (use_graph) (*xadj)[0] = 0;
        for (i = 0; i < nsend; i++) {
          v = vtx_list[i];
          if (use_graph) {
            size[p] += old_xadj[v+1]-old_xadj[v];
            (*xadj)[i+1] = (*xadj)[i] + old_xadj[v+1] - old_xadj[v];
          }

          if (*vwgt_dim){
            for (j=0; j<*vwgt_dim; j++)
              (*vwgts)[i*(*vwgt_dim)+j] = old_vwgts[v*(*vwgt_dim)+j];
          }

          if (*ndim > 0) {
            (*x)[i] = old_x[v];
            if (*ndim > 1) {
              (*y)[i] = old_y[v];
              if (*ndim > 2) {
                (*z)[i] = old_z[v];
              }
            }
          }
        }
      }
      else {

        /* Loop over vertices assigned to proc p to gather */
        /* vertex data into send buffers                   */

        if (use_graph) send_xadj[0] = 0;
        for (i = 0; i < nsend; i++) {
          v = vtx_list[i];
          if (use_graph) {
            size[p] += old_xadj[v+1]-old_xadj[v];
            send_xadj[i+1] = send_xadj[i] + old_xadj[v+1] - old_xadj[v];
          }
          if (*vwgt_dim){
            for (j=0; j<*vwgt_dim; j++)
              send_vwgts[i*(*vwgt_dim)+j] = old_vwgts[v*(*vwgt_dim)+j];
          }
          if (*ndim > 0) {
            send_x[i] = old_x[v];
            if (*ndim > 1) {
              send_y[i] = old_y[v];
              if (*ndim > 2) {
                send_z[i] = old_z[v];
              }
            }
          }
        }

        /* Send vertex data to proc p. */
        if (use_graph)
          MPI_Send(send_xadj, nsend+1, MPI_INT, p, 1, comm);
        if (*vwgt_dim)
          MPI_Send(send_vwgts, nsend*(*vwgt_dim), MPI_FLOAT, p, 2, comm);
        if (*ndim > 0) {
          MPI_Send(send_x, nsend, MPI_FLOAT, p, 3, comm);
          if (*ndim > 1) {
            MPI_Send(send_y, nsend, MPI_FLOAT, p, 4, comm);
            if (*ndim > 2) {
              MPI_Send(send_z, nsend, MPI_FLOAT, p, 5, comm);
            }
          }
        }
      }
    }
    safe_free((void **)(void *) &send_xadj);
    safe_free((void **)(void *) &send_vwgts);
    safe_free((void **)(void *) &send_x);
    safe_free((void **)(void *) &send_y);
    safe_free((void **)(void *) &send_z);
  }
  else {
    /* host_proc != myproc; receive vertex data from host_proc */
    if (use_graph)
      MPI_Recv (*xadj, (*nvtxs)+1, MPI_INT, host_proc, 1, comm, &status);
    if (*vwgt_dim)
      MPI_Recv (*vwgts, (*nvtxs)*(*vwgt_dim), MPI_FLOAT, host_proc, 2, comm, &status);
    if (*ndim > 0) {
      MPI_Recv(*x, *nvtxs,  MPI_FLOAT, host_proc, 3, comm, &status);
      if (*ndim > 1) {
        MPI_Recv(*y, *nvtxs, MPI_FLOAT, host_proc, 4, comm, &status);
        if (*ndim > 2) {
          MPI_Recv(*z, *nvtxs, MPI_FLOAT, host_proc, 5, comm, &status);
        }
      }
    }
  }


  /* 
   * Distribute edge data to all procs 
   */

  if (use_graph) {

    if (*ewgt_dim) {
      old_ewgts = *ewgts;
      *ewgts = NULL;
    }

    /* Allocate space for edge data */
    nedges = (*xadj)[ *nvtxs];
    if (nedges > 0) {
      *adjncy = (int *) malloc(nedges * sizeof (int));
      if (*adjncy == NULL) {
        Gen_Error(0, "fatal: insufficient memory");
        return 0;
      }
      if (*ewgt_dim){
        *ewgts = (float *) malloc(nedges*(*ewgt_dim) * sizeof (float));
        if (*ewgts == NULL) {
          Gen_Error(0, "fatal: insufficient memory");
          return 0;
        }
      }
    }

    /* Gather and send/receive edge data */

    if (myproc == host_proc){

      /* For each processor, gather its edge data and send it. */
      for (p = 0; p < nprocs; p++){
        if (size[p] == 0) continue;

        /* Get list of vertices to be assigned to processor p */
        ch_dist_vtx_list(vtx_list, &nsend, p, *assignments);

        adj_cnt = 0;
        if (p == myproc) {

          /* Loop over vertices assigned to myproc copy the edge */
          /* data into local arrays.                             */

          for (i = 0; i < nsend; i++) {
            v = vtx_list[i];
            offset = old_xadj[v];
            nvtx_edges = old_xadj[v+1] - old_xadj[v];
            for (j = 0; j < nvtx_edges; j++) {
              (*adjncy)[adj_cnt] = old_adjncy[offset+j];
              if (*ewgt_dim){ 
                for (k=0; k<*ewgt_dim; k++)
                  (*ewgts)[adj_cnt*(*ewgt_dim)+k] = old_ewgts[(offset+j)*(*ewgt_dim)+k];
              }
              adj_cnt++;
            }
          }
        }
        else { /* p != myproc */

          /* allocate send buffers; size = num edges to send to proc p */
          nvtx_edges = 0;
          for (i = 0; i < nsend; i++) {
            v = vtx_list[i];
            nvtx_edges += old_xadj[v+1] - old_xadj[v];
          }
          send_adjncy = (int *) malloc(nvtx_edges * sizeof(int));
          if (send_adjncy == NULL) {
            Gen_Error(0, "fatal: insufficient memory");
            return 0;
          }
          if (*ewgt_dim) {
            send_ewgts = (float *) malloc(nvtx_edges*(*ewgt_dim) * sizeof(float));
            if (send_ewgts == NULL) {
              Gen_Error(0, "fatal: insufficient memory");
              return 0;
            }
          }

          /* Loop over vertices assigned to proc p to gather */
          /* edge data into send buffers                     */

          for (i = 0; i < nsend; i++) {
            v = vtx_list[i];
            offset = old_xadj[v];
            nvtx_edges = old_xadj[v+1] - old_xadj[v];
            for (j = 0; j < nvtx_edges; j++) {
              send_adjncy[adj_cnt] = old_adjncy[offset+j];
              if (*ewgt_dim){
                for (k=0; k<*ewgt_dim; k++)
                  send_ewgts[adj_cnt*(*ewgt_dim)+k] = old_ewgts[(offset+j)*(*ewgt_dim)+k];
              }
              adj_cnt++;
            }
          }
          /* Send edge data to proc p. */
          MPI_Send(send_adjncy, size[p], MPI_INT, p, 6, comm);
          if (*ewgt_dim)
            MPI_Send(send_ewgts, size[p]*(*ewgt_dim), MPI_FLOAT, p, 7, comm);
          safe_free((void **)(void *) &send_adjncy);
          safe_free((void **)(void *) &send_ewgts);
        }
      }
    }
    else {
      /* host_proc != myproc; receive edge data from host_proc */
      if (nedges > 0) {
        MPI_Recv (*adjncy, nedges, MPI_INT, host_proc, 6, comm, &status);
        if (*ewgt_dim)
          MPI_Recv (*ewgts, nedges*(*ewgt_dim), MPI_FLOAT, host_proc, 7, comm, &status);
      }
    }
  }

  /* Free space on host proc */
  if (myproc == host_proc){
    safe_free((void **)(void *) &old_xadj);
    safe_free((void **)(void *) &old_adjncy);
    safe_free((void **)(void *) &old_vwgts);
    safe_free((void **)(void *) &old_ewgts);

    safe_free((void **)(void *) &old_x);
    safe_free((void **)(void *) &old_y);
    safe_free((void **)(void *) &old_z);
    safe_free((void **)(void *) &vtx_list);
  }
  if (size != NULL) safe_free((void **)(void *) &size);
   
  DEBUG_TRACE_END(myproc, yo);
  return 1;
}
示例#3
0
/* Read from file and set up hypergraph. */
int read_hypergraph_file(
  int Proc,
  int Num_Proc,
  PROB_INFO_PTR prob,
  PARIO_INFO_PTR pio_info,
  MESH_INFO_PTR mesh
)
{
  /* Local declarations. */
  const char  *yo = "read_hypergraph_file";
  char   cmesg[256];

  int    i, gnvtxs, distributed_pins = 0, edge, vertex, nextEdge;
  int    nvtxs = 0, gnhedges = 0, nhedges = 0, npins = 0;
  int    vwgt_dim=0, hewgt_dim=0, vtx, edgeSize, global_npins;
  int   *hindex = NULL, *hvertex = NULL, *hvertex_proc = NULL;
  int   *hgid = NULL;
  float *hewgts = NULL, *vwgts = NULL;
  ZOLTAN_FILE* fp = NULL;
  int base = 0;   /* Smallest vertex number; usually zero or one. */
  char filename[256];

  /* Variables that allow graph-based functions to be reused. */
  /* If no chaco.graph or chaco.coords files exist, values are NULL or 0,
   * since graph is not being built. If chaco.graph and/or chaco.coords
   * exist, these arrays are filled and values stored in mesh.
   * Including these files allows for comparison of HG methods with other
   * methods, along with visualization of results and comparison of
   * LB_Eval results.
   */
  int    ch_nvtxs = 0;        /* Temporary values for chaco_read_graph.   */
#ifdef KDDKDD
  int    ch_vwgt_dim = 0;     /* Their values are ignored, as vertex      */
#endif
  float *ch_vwgts = NULL;     /* info is provided by hypergraph file.     */
  int   *ch_start = NULL, *ch_adj = NULL, ch_ewgt_dim = 0;
  short *ch_assignments = NULL;
  float *ch_ewgts = NULL;
  int    ch_ndim = 0;
  float *ch_x = NULL, *ch_y = NULL, *ch_z = NULL;
  int    ch_no_geom = TRUE;   /* Assume no geometry info is given; reset if
				 it is provided. */
  int    file_error = 0;

/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  if (Proc == 0) {

    /* Open and read the hypergraph file. */
    if (pio_info->file_type == HYPERGRAPH_FILE)
      sprintf(filename, "%s.hg", pio_info->pexo_fname);
    else if (pio_info->file_type == MATRIXMARKET_FILE)
      sprintf(filename, "%s.mtx", pio_info->pexo_fname);
    else {
	sprintf(cmesg, "fatal:  invalid file type %d", pio_info->file_type);
	Gen_Error(0, cmesg);
	return 0;
    }

      fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
      file_error = (fp == NULL);
  }



  MPI_Bcast(&file_error, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if (file_error) {
    sprintf(cmesg,
      "fatal:  Could not open hypergraph file %s",pio_info->pexo_fname);
    Gen_Error(0, cmesg);
    return 0;
  }

  if (pio_info->file_type == HYPERGRAPH_FILE) {
    /* read the array in on processor 0 */
    if (Proc == 0) {
      if (HG_readfile(Proc, fp, &nvtxs, &nhedges, &npins,
		    &hindex, &hvertex, &vwgt_dim, &vwgts,
		    &hewgt_dim, &hewgts, &base) != 0){
	Gen_Error(0, "fatal: Error returned from HG_readfile");
	return 0;
      }
    }
  }
  else if (pio_info->file_type == MATRIXMARKET_FILE) {
    /*
     * pio_info->chunk_reader == 0  (the usual case)
     *   process 0 will read entire file in MM_readfile,
     *   and will distribute vertices in chaco_dist_graph and pins in
     *   dist_hyperedges later.   (distributed_pins==0)
     *
     * pio_info->chunk_reader == 1  ("initial read = chunks" in zdrive.inp)
     *   process 0 will read the file in chunks, and will send vertices
     *   and pins to other processes before reading the next chunk, all
     *   in MM_readfile.  (distributed_pins==1)
     */

    if (MM_readfile(Proc, Num_Proc, fp, pio_info,
		    &nvtxs,     /* global number of vertices */
		    &nhedges,   /* global number of hyperedges */
		    &npins,     /* local number of pins */
		    &hindex, &hvertex, &vwgt_dim, &vwgts,
		    &hewgt_dim, &hewgts, &ch_start, &ch_adj,
		    &ch_ewgt_dim, &ch_ewgts, &base, &global_npins)) {
      Gen_Error(0, "fatal: Error returned from MM_readfile");
      return 0;
    }

    if (Proc == 0) ZOLTAN_FILE_close(fp);

    if ((Num_Proc > 1) && pio_info->chunk_reader && (global_npins > Num_Proc)){
      distributed_pins = 1;
    }
    else{
      distributed_pins = 0;
    }
  }


#ifdef KDDKDD
 {
   /* If CHACO graph file is available, read it. */

   sprintf(filename, "%s.graph", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
   file_error =
#ifndef ZOLTAN_COMPRESS
     (fp == NULL);
#else
   fp.error;
#endif


   if (!file_error) {
      /* CHACO graph file is available. */
      /* Assuming hypergraph vertices are same as chaco vertices. */
      /* Chaco vertices and their weights are ignored in rest of function. */
      if (chaco_input_graph(fp, filename, &ch_start, &ch_adj, &ch_nvtxs,
		      &ch_vwgt_dim, &ch_vwgts, &ch_ewgt_dim, &ch_ewgts) != 0) {
	Gen_Error(0, "fatal: Error returned from chaco_input_graph");
	return 0;
      }
    }
   else
     ch_nvtxs = nvtxs;


    /* If coordinate file is available, read it. */
   sprintf(filename, "%s.coords", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);
   file_error =
#ifndef ZOLTAN_COMPRESS
     (fp == NULL);
#else
   fp.error;
#endif

    if (!file_error) {
      /* CHACO coordinates file is available. */
      ch_no_geom = FALSE;
      if (chaco_input_geom(fpkdd, filename, ch_nvtxs, &ch_ndim,
			   &ch_x, &ch_y, &ch_z) != 0) {
	Gen_Error(0, "fatal: Error returned from chaco_input_geom");
	return 0;
      }
    }
 }
#else /* KDDKDD */
  ch_nvtxs = nvtxs;
#endif /* KDDKDD */


  {
     /* Read Chaco assignment file, if requested */
   if (pio_info->init_dist_type == INITIAL_FILE) {
     sprintf(filename, "%s.assign", pio_info->pexo_fname);

   fp = ZOLTAN_FILE_open(filename, "r", pio_info->file_comp);

   if (fp == NULL) {
     sprintf(cmesg, "Error:  Could not open Chaco assignment file %s; "
	     "initial distribution cannot be read",
	     filename);
     Gen_Error(0, cmesg);
     return 0;
   }
   else {
     /* read the coordinates in on processor 0 */
     ch_assignments = (short *) malloc(nvtxs * sizeof(short));
     if (nvtxs && !ch_assignments) {
       Gen_Error(0, "fatal: memory error in read_hypergraph_file");
       return 0;
     }
     /* closes fpassign when done */
     if (chaco_input_assign(fp, filename, ch_nvtxs, ch_assignments) != 0){
       Gen_Error(0, "fatal: Error returned from chaco_input_assign");
       return 0;
     }
   }
   }
 }

  MPI_Bcast(&base, 1, MPI_INT, 0, MPI_COMM_WORLD);

  if (distributed_pins){
    gnhedges = nhedges;
    nhedges = 0;
    hewgt_dim = 0;
    hewgts = NULL;
    for (edge=0; edge<gnhedges; edge++){
      edgeSize = hindex[edge+1] - hindex[edge];
      if (edgeSize > 0) nhedges++;
    }
    hgid = (int *)malloc(nhedges * sizeof(int));
    hvertex_proc = (int *)malloc(npins * sizeof(int));
    nextEdge=0;
    vtx=0;
    for (edge=0; edge<gnhedges; edge++){
      edgeSize = hindex[edge+1] - hindex[edge];
      if (edgeSize > 0){
	hgid[nextEdge] = edge+1;
	if (nextEdge < edge){
	  hindex[nextEdge+1] = hindex[nextEdge] + edgeSize;
	}
	for (vertex=0; vertex<edgeSize; vertex++,vtx++){
	  hvertex_proc[vtx] = ch_dist_proc(hvertex[vtx], NULL, 1);
	}
	nextEdge++;
      }
    }
    gnvtxs = nvtxs;
    nvtxs = ch_dist_num_vtx(Proc, NULL);
    if (ch_start){    /* need to include only vertices this process owns */
      for (i=0,vertex=0; i<gnvtxs; i++){
	if ((ch_start[i+1] > ch_start[vertex]) || /* vtx has adjacencies so it's mine */
	    (ch_dist_proc(i, NULL, 0) == Proc))   /* my vtx with no adjacencies */
	  {
	  if (i > vertex){
	    ch_start[vertex+1] = ch_start[i+1];
	  }
	  vertex++;
	}
      }
    }
#if 0
    debug_lists(Proc, Num_Proc, nhedges, hindex, hvertex, hvertex_proc, hgid);
#endif
  } else{

    /* Distribute hypergraph graph */
    /* Use hypergraph vertex information and chaco edge information. */

    if (!chaco_dist_graph(MPI_COMM_WORLD, pio_info, 0, &gnvtxs, &nvtxs,
	     &ch_start, &ch_adj, &vwgt_dim, &vwgts, &ch_ewgt_dim, &ch_ewgts,
	     &ch_ndim, &ch_x, &ch_y, &ch_z, &ch_assignments) != 0) {
      Gen_Error(0, "fatal: Error returned from chaco_dist_graph");
      return 0;
    }

    if (!dist_hyperedges(MPI_COMM_WORLD, pio_info, 0, base, gnvtxs, &gnhedges,
		       &nhedges, &hgid, &hindex, &hvertex, &hvertex_proc,
		       &hewgt_dim, &hewgts, ch_assignments)) {
      Gen_Error(0, "fatal: Error returned from dist_hyperedges");
      return 0;
    }
  }


  /* Initialize mesh structure for Hypergraph. */
  mesh->data_type = HYPERGRAPH;
  mesh->num_elems = nvtxs;
  mesh->vwgt_dim = vwgt_dim;
  mesh->ewgt_dim = ch_ewgt_dim;
  mesh->elem_array_len = mesh->num_elems + 5;
  mesh->num_dims = ch_ndim;
  mesh->num_el_blks = 1;

  mesh->gnhedges = gnhedges;
  mesh->nhedges = nhedges;
  mesh->hewgt_dim = hewgt_dim;

  mesh->hgid = hgid;
  mesh->hindex = hindex;
  mesh->hvertex = hvertex;
  mesh->hvertex_proc = hvertex_proc;
  mesh->heNumWgts = nhedges;
  mesh->heWgtId = NULL;
  mesh->hewgts = hewgts;


  mesh->eb_etypes = (int *) malloc (5 * mesh->num_el_blks * sizeof(int));
  if (!mesh->eb_etypes) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }
  mesh->eb_ids = mesh->eb_etypes + mesh->num_el_blks;
  mesh->eb_cnts = mesh->eb_ids + mesh->num_el_blks;
  mesh->eb_nnodes = mesh->eb_cnts + mesh->num_el_blks;
  mesh->eb_nattrs = mesh->eb_nnodes + mesh->num_el_blks;

  mesh->eb_names = (char **) malloc (mesh->num_el_blks * sizeof(char *));
  if (!mesh->eb_names) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  mesh->eb_etypes[0] = -1;
  mesh->eb_ids[0] = 1;
  mesh->eb_cnts[0] = nvtxs;
  mesh->eb_nattrs[0] = 0;
  /*
   * Each element has one set of coordinates (i.e., node) if a coords file
   * was provided; zero otherwise.
   */
  MPI_Bcast( &ch_no_geom, 1, MPI_INT, 0, MPI_COMM_WORLD);
  if (ch_no_geom)
    mesh->eb_nnodes[0] = 0;
  else
    mesh->eb_nnodes[0] = 1;

  /* allocate space for name */
  mesh->eb_names[0] = (char *) malloc((MAX_STR_LENGTH+1) * sizeof(char));
  if (!mesh->eb_names[0]) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }
  strcpy(mesh->eb_names[0], "hypergraph");

  /* allocate the element structure array */
  mesh->elements = (ELEM_INFO_PTR) malloc (mesh->elem_array_len
					 * sizeof(ELEM_INFO));
  if (!(mesh->elements)) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  /*
   * initialize all of the element structs as unused by
   * setting the globalID to -1
   */
  for (i = 0; i < mesh->elem_array_len; i++)
    initialize_element(&(mesh->elements[i]));

  /*
   * now fill the element structure array with the
   * information from the Chaco file
   * Use hypergraph vertex information and chaco edge information.
   */
  if (!chaco_fill_elements(Proc, Num_Proc, prob, mesh, gnvtxs, nvtxs,
		     ch_start, ch_adj, vwgt_dim, vwgts, ch_ewgt_dim, ch_ewgts,
		     ch_ndim, ch_x, ch_y, ch_z, ch_assignments, base)) {
    Gen_Error(0, "fatal: Error returned from chaco_fill_elements");
    return 0;
  }
#if 0
  debug_elements(Proc, Num_Proc, mesh->num_elems,mesh->elements);
#endif

  safe_free((void **)(void *) &vwgts);
  safe_free((void **)(void *) &ch_ewgts);
  safe_free((void **)(void *) &ch_vwgts);
  safe_free((void **)(void *) &ch_x);
  safe_free((void **)(void *) &ch_y);
  safe_free((void **)(void *) &ch_z);
  safe_free((void **)(void *) &ch_start);
  safe_free((void **)(void *) &ch_adj);
  safe_free((void **)(void *) &ch_assignments);

 if (Debug_Driver > 3)
   print_distributed_mesh(Proc, Num_Proc, mesh);

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}