Example #1
0
/*--------------------------------------------------------------------------*/
int output_results(const char *cmd_file,
                   const char *tag,
                   int Proc,
                   int Num_Proc,
                   PROB_INFO_PTR prob,
                   PARIO_INFO_PTR pio_info,
                   MESH_INFO_PTR mesh)
/*
 * For the first swipe at this, don't try to create a new
 * exodus/nemesis file or anything. Just get the global ids,
 * sort them, and print them to a new ascii file.
 */
{
  /* Local declarations. */
  const char  *yo = "output_results";
  char   par_out_fname[FILENAME_MAX+1], ctemp[FILENAME_MAX+1];
  char cmsg[256];

  int   *global_ids = NULL;
  int   *parts = NULL;
  int   *perm = NULL;
  int   *invperm = NULL;
  int   *index = NULL;
  int    i, j;

  FILE  *fp;
/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  if (mesh->num_elems) {
     global_ids = (int *) malloc(5 * mesh->num_elems * sizeof(int));
     if (!global_ids) {
       Gen_Error(0, "fatal: insufficient memory");
       return 0;
     }
     parts = global_ids + mesh->num_elems;
     perm = parts + mesh->num_elems;
     invperm = perm + mesh->num_elems;
     index = invperm + mesh->num_elems;
  }

  for (i = j = 0; i < mesh->elem_array_len; i++) {
    if (mesh->elements[i].globalID >= 0) {
      global_ids[j] = mesh->elements[i].globalID;
      parts[j] = mesh->elements[i].my_part;
      perm[j] = mesh->elements[i].perm_value;
      invperm[j] = mesh->elements[i].invperm_value;
      index[j] = j;
      j++;
    }
  }

  sort_index(mesh->num_elems, global_ids, index);

  /* generate the parallel filename for this processor */
  strcpy(ctemp, pio_info->pexo_fname);
  strcat(ctemp, ".");
  strcat(ctemp, tag);
  gen_par_filename(ctemp, par_out_fname, pio_info, Proc, Num_Proc);

  fp = fopen(par_out_fname, "w");

  if (fp == NULL){
    sprintf(cmsg, "Error in %s; %s can not be opened for writing.", yo, par_out_fname);
    Gen_Error(0, cmsg);
    return 0;
  }

  if (Proc == 0) 
    echo_cmd_file(fp, cmd_file);

  fprintf(fp, "Global element ids assigned to processor %d\n", Proc);
  fprintf(fp, "GID\tPart\tPerm\tIPerm\n");
  for (i = 0; i < mesh->num_elems; i++) {
    j = index[i];
    fprintf(fp, "%d\t%d\t%d\t%d\n", global_ids[j], parts[j], perm[j], invperm[j]);
  }

  fclose(fp);
  free(global_ids);

  if (Output.Mesh_Info_File) {

    ELEM_INFO_PTR current_element;
    int total_nodes = 0;
    float *x, *y, *z;
    int k;
    int prev_id;

    for (i = 0; i < mesh->num_elems; i++) {
      total_nodes += mesh->eb_nnodes[mesh->elements[i].elem_blk];
    }
    global_ids = (int *) malloc(2 * total_nodes * sizeof(int));
    index = global_ids + total_nodes;
    x = (float *) calloc(3 * total_nodes,  sizeof(float));
    y = x + total_nodes;
    z = y + total_nodes;

    for (k = 0, i = 0; i < mesh->num_elems; i++) {
      current_element = &(mesh->elements[i]);
      for (j = 0; j < mesh->eb_nnodes[current_element->elem_blk]; j++) {
        global_ids[k] = current_element->connect[j];
        x[k] = current_element->coord[j][0];
        if (mesh->num_dims > 1) 
          y[k] = current_element->coord[j][1];
        if (mesh->num_dims > 2)
          z[k] = current_element->coord[j][2];
        index[k] = k;
        k++;
      }
    }

    sort_index(total_nodes, global_ids, index);

    strcat(par_out_fname, ".mesh");
    fp = fopen(par_out_fname, "w");
    fprintf(fp, "Vertex IDs and coordinates\n");
    prev_id = -1;
    for (k = 0; k < total_nodes; k++) {
      j = index[k];
      if (global_ids[j] == prev_id)
        continue;
      prev_id = global_ids[j];
      fprintf(fp, "  %d  (%e, %e, %e)\n", global_ids[j], x[j], y[j], z[j]);
    }
    fprintf(fp, "\n");
    fprintf(fp, "Element connectivity:\n");
    for (i = 0; i < mesh->num_elems; i++) {
      current_element = &(mesh->elements[i]);
      fprintf(fp, "  %d  (", current_element->globalID);
      for (j = 0; j < mesh->eb_nnodes[current_element->elem_blk]; j++) {
        fprintf(fp, "%d  ", current_element->connect[j]);
      }
      fprintf(fp, ")\n");
    }
    
    fclose(fp);
    free(global_ids);
    free(x);
  }

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}
Example #2
0
/*--------------------------------------------------------------------------*/
int output_gnu(const char *cmd_file,
               const char *tag,
               int Proc,
               int Num_Proc,
               PROB_INFO_PTR prob,
               PARIO_INFO_PTR pio_info,
               MESH_INFO_PTR mesh)
/*
 * For 2D problems, output files that can be read by gnuplot for looking at
 * results.
 * We'll do 3D problems later.
 *
 * One gnuplot file is written for each partition.  
 * When number of partitions == number of processors, there is one file per
 * processor.
 *
 * For Chaco input files, the file written contains coordinates of owned
 * nodes and all nodes in that partition connected to the owned nodes. When
 * drawn "with linespoints", the subdomains are drawn, but lines connecting the
 * subdomains are not drawn.
 *
 * For Nemesis input files, the file written contains the coordinates of
 * each node of owned elements.  When drawn "with lines", the element outlines
 * for each owned element are drawn.
 *
 * In addition, processor 0 writes a gnuplot command file telling gnuplot how
 * to process the individual coordinate files written.  This file can be used
 * with the gnuplot "load" command to simplify generation of the gnuplot.
 */
{
  /* Local declarations. */
  const char  *yo = "output_gnu";
  char   par_out_fname[FILENAME_MAX+1], ctemp[FILENAME_MAX+1];
  ELEM_INFO *current_elem, *nbor_elem;
  int    nbor, num_nodes;
  const char  *datastyle = NULL;
  int    i, j, nelems;
  int    prev_part = -1;
  int    max_part = -1;
  float    locMaxX = INT_MIN;
  float    locMinX = INT_MAX;
  float    locMaxY = INT_MIN;
  float    locMinY = INT_MAX;
  float    globMaxX = INT_MIN;
  float    globMinX = INT_MAX;
  float    globMaxY = INT_MIN;
  float    globMinY = INT_MAX;
  int    gmax_part = Num_Proc-1;
  int    gnum_part = Num_Proc;
  int   *parts = NULL;
  int   *index = NULL;
  int   *elem_index = NULL;
  FILE  *fp = NULL;
/***************************** BEGIN EXECUTION ******************************/

  if(Output.Gnuplot < 0)
  {
    Gen_Error(0,"warning: 'gnuplot output' parameter set to invalid negative value.");
    return 0;
  }

  DEBUG_TRACE_START(Proc, yo);

  if (mesh->num_dims > 2) {
    Gen_Error(0, "warning: cannot generate gnuplot data for 3D problems.");
    DEBUG_TRACE_END(Proc, yo);
    return 0;
  }

  if (mesh->eb_nnodes[0] == 0) {
    /* No coordinate information is available.  */
    Gen_Error(0, "warning: cannot generate gnuplot data when no coordinate"
                 " input is given.");
    DEBUG_TRACE_END(Proc, yo);
    return 0;
  }

  /* 
   * Build arrays of partition number to sort by.  Index and elem_index arrays 
   * will be used even when plotting by processor numbers (for generality), 
   * so build it regardless. 
   */
  nelems = mesh->num_elems - mesh->blank_count;

  if (nelems > 0) {
    parts = (int *) malloc(3 * nelems * sizeof(int));
    index = parts + nelems;
    elem_index = index + nelems;
    for (j = 0, i = 0; i < mesh->elem_array_len; i++) {
      current_elem = &(mesh->elements[i]);
      if (current_elem->globalID >= 0) {

        if (mesh->blank_count && (mesh->blank[i] == 1)) continue;
        
        if (current_elem->my_part > max_part) max_part = current_elem->my_part;
        parts[j] = (Output.Plot_Partition ? current_elem->my_part : Proc);
        index[j] = j;
        elem_index[j] = i;
        j++;
      }
    }
  }
  if (Output.Plot_Partition) {
    /* Sort by partition numbers.  Assumes # parts >= # proc. */
    if (nelems > 0) 
      sort_index(nelems, parts, index);
    MPI_Allreduce(&max_part, &gmax_part, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
    gnum_part = gmax_part + 1;
  }

  /* generate the parallel filename for this processor */
  strcpy(ctemp, pio_info->pexo_fname);
  strcat(ctemp, ".");
  strcat(ctemp, tag);
  strcat(ctemp, ".gnu");


  if (pio_info->file_type == CHACO_FILE ||
      pio_info->file_type == NO_FILE_POINTS ||
      pio_info->file_type == NO_FILE_TRIANGLES ||
      pio_info->file_type == HYPERGRAPH_FILE) {
    /* 
     * For each node of Chaco graph, print the coordinates of the node.
     * Then, for each neighboring node on the processor, print the neighbor's
     * coordinates.
     */
    datastyle = "linespoints";
    for (i = 0; i < nelems; i++) {
      current_elem = &(mesh->elements[elem_index[index[i]]]);
      if (parts[index[i]] != prev_part) {
        if (fp != NULL) fclose(fp);
        gen_par_filename(ctemp, par_out_fname, pio_info, 
                         parts[index[i]], Num_Proc);
        fp = fopen(par_out_fname, "w");
        prev_part = parts[index[i]];
      }
    
      /* Include the point itself, so that even if there are no edges,
       * the point will appear.  */
      fprintf(fp, "\n%e %e\n", 
              current_elem->coord[0][0], current_elem->coord[0][1]);

      /* save max and min x/y coords */
      if(current_elem->coord[0][0] < locMinX)
      {
        locMinX = current_elem->coord[0][0];
      }
      if(current_elem->coord[0][0] > locMaxX)
      {
        locMaxX = current_elem->coord[0][0];
      }
      if(current_elem->coord[0][1] < locMinY)
      {
        locMinY = current_elem->coord[0][1];
      }
      if(current_elem->coord[0][1] > locMaxY)
      {
        locMaxY = current_elem->coord[0][1];
      }

      if (Output.Gnuplot>1)
      {

        for (j = 0; j < current_elem->nadj; j++) {
          if (current_elem->adj_proc[j] == Proc) {  /* Nbor is on same proc */
            if (mesh->blank_count && (mesh->blank[current_elem->adj[j]] == 1))
              continue;
            if (!Output.Plot_Partition || 
                mesh->elements[current_elem->adj[j]].my_part == 
                             current_elem->my_part) {  
              /* Not plotting partitions, or nbor is in same partition */
              /* Plot the edge.  Need to include current point and nbor point
               * for each edge. */
              fprintf(fp, "\n%e %e\n", 
                  current_elem->coord[0][0], current_elem->coord[0][1]);
              nbor = current_elem->adj[j];
              nbor_elem = &(mesh->elements[nbor]);
              fprintf(fp, "%e %e\n",
                      nbor_elem->coord[0][0], nbor_elem->coord[0][1]);
            }
          }
        }

      }


    }

    MPI_Reduce(&locMinX,&globMinX,1,MPI_FLOAT,MPI_MIN,0,MPI_COMM_WORLD);
    MPI_Reduce(&locMinY,&globMinY,1,MPI_FLOAT,MPI_MIN,0,MPI_COMM_WORLD);
    MPI_Reduce(&locMaxX,&globMaxX,1,MPI_FLOAT,MPI_MAX,0,MPI_COMM_WORLD);
    MPI_Reduce(&locMaxY,&globMaxY,1,MPI_FLOAT,MPI_MAX,0,MPI_COMM_WORLD);

  }
  else if (pio_info->file_type == NEMESIS_FILE) { /* Nemesis input file */
    /* 
     *  For each element of Nemesis input file, print the coordinates of its
     *  nodes.  No need to follow neighbors, as decomposition is by elements.
     */
    double sum[2];
    datastyle = "lines";
    for (i = 0; i < nelems; i++) {
      current_elem = &(mesh->elements[elem_index[index[i]]]);
      if (parts[index[i]] != prev_part) {
        if (fp != NULL) fclose(fp);
        gen_par_filename(ctemp, par_out_fname, pio_info, 
                         parts[index[i]], Num_Proc);
        fp = fopen(par_out_fname, "w");
        prev_part = parts[index[i]];
      }
      num_nodes = mesh->eb_nnodes[current_elem->elem_blk];
      sum[0] = sum[1] = 0.0;
      for (j = 0; j < num_nodes; j++) {
        fprintf(fp, "%e %e\n", 
                current_elem->coord[j][0], current_elem->coord[j][1]);
        sum[0] += current_elem->coord[j][0];
        sum[1] += current_elem->coord[j][1];
      }
      fprintf(fp, "%e %e\n", current_elem->coord[0][0], 
                             current_elem->coord[0][1]);
      fprintf(fp, "\n");
      /* Print small + in center of element */
      sum[0] /= num_nodes;
      sum[1] /= num_nodes;
      fprintf(fp, "%e %e\n",   sum[0] - 0.001, sum[1]);
      fprintf(fp, "%e %e\n\n", sum[0] + 0.001, sum[1]);
      fprintf(fp, "%e %e\n",   sum[0], sum[1] - 0.001);
      fprintf(fp, "%e %e\n\n", sum[0], sum[1] + 0.001);
    }
  }
  
  if (nelems == 0 && !Output.Plot_Partition) { 
    /* Open a file just so one exists; satisfies the gnuload file. */
    gen_par_filename(ctemp, par_out_fname, pio_info, Proc, Num_Proc);
    fp = fopen(par_out_fname, "w");
  }
    
  if (fp != NULL) fclose(fp);
  safe_free((void **)(void *) &parts);

  if (Proc == 0) {
    /* Write gnu master file with gnu commands for plotting */
    strcpy(ctemp, pio_info->pexo_fname);
    strcat(ctemp, ".");
    strcat(ctemp, tag);
    strcat(ctemp, ".gnuload");
    fp = fopen(ctemp, "w");
    fprintf(fp, "set nokey\n");
    fprintf(fp, "set nolabel\n");
    fprintf(fp, "set noxzeroaxis\n");
    fprintf(fp, "set noyzeroaxis\n");
    fprintf(fp, "set noxtics\n");
    fprintf(fp, "set noytics\n");
    fprintf(fp, "set data style %s\n", datastyle);

    /* resize range so that there is a 5% border around data */
    fprintf(fp, "set xrange [%f:%f] \n ",globMinX-(globMaxX-globMinX)/20
	                            ,globMaxX+(globMaxX-globMinX)/20);
    fprintf(fp, "set yrange [%f:%f] \n ",globMinY-(globMaxY-globMinY)/20
	                            ,globMaxY+(globMaxY-globMinY)/20);


    fprintf(fp, "plot ");
    strcpy(ctemp, pio_info->pexo_fname);
    strcat(ctemp, ".");
    strcat(ctemp, tag);
    strcat(ctemp, ".gnu");
    for (i = 0; i < gnum_part; i++) {
      gen_par_filename(ctemp, par_out_fname, pio_info, i, Num_Proc);
      fprintf(fp, "\"%s\"", par_out_fname);
      if (i != gnum_part-1) {
        fprintf(fp, ",\\\n");
      }
    }
    fprintf(fp, "\n");
    fclose(fp);
  }

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}
Example #3
0
int write_elem_vars(
  int Proc,
  MESH_INFO_PTR mesh,
  PARIO_INFO_PTR pio_info, 
  int num_exp, 
  ZOLTAN_ID_PTR exp_gids,
  int *exp_procs,
  int *exp_to_part
)
{
/* Routine to write processor assignments per element to the nemesis files. */
int iblk;
int i, j;
int tmp;
float ver;
int pexoid, cpu_ws = 0, io_ws = 0;
int max_cnt = 0;
float *vars;
char par_nem_fname[FILENAME_MAX+1];
char tmp_nem_fname[FILENAME_MAX+1];
int Num_Proc;
char cmesg[256];
char *str = "Proc";

  /* generate the parallel filename for this processor */
  MPI_Comm_size(MPI_COMM_WORLD, &Num_Proc);
  gen_par_filename(pio_info->pexo_fname, tmp_nem_fname, pio_info, Proc,
                   Num_Proc);
  /* 
   * Copy the parallel file to a new file (so we don't write results in the
   * cvs version).
   */
  sprintf(cmesg, "%s.blot", pio_info->pexo_fname);
  gen_par_filename(cmesg, par_nem_fname, pio_info, Proc,
                   Num_Proc);
  fcopy(tmp_nem_fname, par_nem_fname);

  if ((pexoid = ex_open(par_nem_fname, EX_WRITE, &cpu_ws, &io_ws,
                        &ver)) < 0) {
    sprintf(cmesg,"fatal: could not open parallel Exodus II file %s",
            par_nem_fname);
    Gen_Error(0, cmesg);
    return 0;
  }

  if (ex_put_var_names(pexoid, "e", 1, &str) < 0) {
    Gen_Error(0, "Error returned from ex_put_var_names.");
    return 0;
  }

  /* Get max number of elements in an element block; alloc vars array to size */
  for (iblk = 0; iblk < mesh->num_el_blks; iblk++)
    max_cnt = (mesh->eb_cnts[iblk] > max_cnt ? mesh->eb_cnts[iblk] : max_cnt);

  vars = (float *) malloc(max_cnt * sizeof(float));

  /* Must write data by element block; gather the data */
  for (iblk = 0; iblk < mesh->num_el_blks; iblk++) {
    for (j = 0, i = 0; i < mesh->num_elems; i++) {
      if (mesh->elements[i].elem_blk == iblk) {
        /* Element is in block; see whether it is to be exported. */
        if ((tmp=in_list(mesh->elements[i].globalID, num_exp, (int *) exp_gids)) != -1)
          vars[j++] = (Output.Plot_Partition ? (float) (exp_to_part[tmp]) 
                                       : (float) (exp_procs[tmp]));
        else
          vars[j++] = (Output.Plot_Partition ? mesh->elements[i].my_part 
                                       : (float) (Proc));
      }
    }
    if (ex_put_elem_var(pexoid, 1, 1, mesh->eb_ids[iblk], 
                        mesh->eb_cnts[iblk], vars) < 0) {
      Gen_Error(0, "fatal: Error returned from ex_put_elem_var");
      return 0;
    }
  }

  safe_free((void **)(void *) &vars);
  /* Close the parallel file */
  if(ex_close (pexoid) < 0) {
    Gen_Error(0, "fatal: Error returned from ex_close");
    return 0;
  }
  return 1;
}
Example #4
0
int read_exoII_file(int Proc,
                    int Num_Proc,
                    PROB_INFO_PTR prob,
                    PARIO_INFO_PTR pio_info,
                    MESH_INFO_PTR mesh)
{
#ifndef ZOLTAN_NEMESIS
  Gen_Error(0, "Fatal:  Nemesis requested but not linked with driver.");
  return 0;

#else /* ZOLTAN_NEMESIS */
  /* Local declarations. */
  char  *yo = "read_exoII_mesh";
  char   par_nem_fname[FILENAME_MAX+1], title[MAX_LINE_LENGTH+1];
  char   cmesg[256];

  float  ver;

  int    i, pexoid, cpu_ws = 0, io_ws = 0;
  int   *nnodes = NULL, *etypes = NULL;
#ifdef DEBUG_EXO
  int    j, k, elem;
#endif
  FILE  *fdtmp;

/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  /* since this is a test driver, set error reporting in exodus */
  ex_opts(EX_VERBOSE | EX_DEBUG);

  /* generate the parallel filename for this processor */
  gen_par_filename(pio_info->pexo_fname, par_nem_fname, pio_info, Proc,
                   Num_Proc);

  /* 
   * check whether parallel file exists.  do the check with fopen 
   * as ex_open coredumps on the paragon when files do not exist.
   */

  if ((fdtmp = fopen(par_nem_fname, "r")) == NULL) {
    sprintf(cmesg,"fatal: parallel Exodus II file %s does not exist",
            par_nem_fname);
    Gen_Error(0, cmesg);
    return 0;
  }
  else
    fclose(fdtmp);

  /*
   * now open the existing parallel file using Exodus calls.
   */

  if ((pexoid = ex_open(par_nem_fname, EX_READ, &cpu_ws, &io_ws,
                        &ver)) < 0) {
    sprintf(cmesg,"fatal: could not open parallel Exodus II file %s",
            par_nem_fname);
    Gen_Error(0, cmesg);
    return 0;
  }

  /* and get initial information */
  if (ex_get_init(pexoid, title, &(mesh->num_dims),
                  &(mesh->num_nodes), &(mesh->num_elems),
                  &(mesh->num_el_blks), &(mesh->num_node_sets),
                  &(mesh->num_side_sets)) < 0) {
    Gen_Error(0, "fatal: Error returned from ex_get_init");
    return 0;
  }


  /* alocate some memory for the element blocks */
  mesh->data_type = MESH;
  mesh->vwgt_dim = 1;  /* One weight for now. */
  mesh->ewgt_dim = 1;  /* One weight for now. */
  mesh->eb_etypes = (int *) malloc (5 * mesh->num_el_blks * sizeof(int));
  if (!mesh->eb_etypes) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }
  mesh->eb_ids = mesh->eb_etypes + mesh->num_el_blks;
  mesh->eb_cnts = mesh->eb_ids + mesh->num_el_blks;
  mesh->eb_nnodes = mesh->eb_cnts + mesh->num_el_blks;
  mesh->eb_nattrs = mesh->eb_nnodes + mesh->num_el_blks;

  mesh->eb_names = (char **) malloc (mesh->num_el_blks * sizeof(char *));
  if (!mesh->eb_names) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  mesh->hindex = (int *) malloc(sizeof(int));
  mesh->hindex[0] = 0;

  if (ex_get_elem_blk_ids(pexoid, mesh->eb_ids) < 0) {
    Gen_Error(0, "fatal: Error returned from ex_get_elem_blk_ids");
    return 0;
  }

  /* allocate temporary storage for items needing global reduction.   */
  /* nemesis does not store most element block info about blocks for  */
  /* which the processor owns no elements.                            */
  /* we, however, use this information in migration, so we need to    */
  /* accumulate it for all element blocks.    kdd 2/2001              */

  if (mesh->num_el_blks > 0) {
    nnodes = (int *) malloc(2 * mesh->num_el_blks * sizeof(int));
    if (!nnodes) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }
    etypes = nnodes + mesh->num_el_blks;
  }

  /* get the element block information */
  for (i = 0; i < mesh->num_el_blks; i++) {

    /* allocate space for name */
    mesh->eb_names[i] = (char *) malloc((MAX_STR_LENGTH+1) * sizeof(char));
    if (!mesh->eb_names[i]) {
      Gen_Error(0, "fatal: insufficient memory");
      return 0;
    }

    if (ex_get_elem_block(pexoid, mesh->eb_ids[i], mesh->eb_names[i],
                          &(mesh->eb_cnts[i]), &(nnodes[i]),
                          &(mesh->eb_nattrs[i])) < 0) {
      Gen_Error(0, "fatal: Error returned from ex_get_elem_block");
      return 0;
    }

    if (mesh->eb_cnts[i] > 0) {
      if ((etypes[i] =  (int) get_elem_type(mesh->eb_names[i],
                                            nnodes[i],
                                            mesh->num_dims)) == E_TYPE_ERROR) {
        Gen_Error(0, "fatal: could not get element type");
        return 0;
      }
    }
    else etypes[i] = (int) NULL_EL;
  }

  /* Perform reduction on necessary fields of element blocks.  kdd 2/2001 */
  MPI_Allreduce(nnodes, mesh->eb_nnodes, mesh->num_el_blks, MPI_INT, MPI_MAX, 
                MPI_COMM_WORLD);
  MPI_Allreduce(etypes, mesh->eb_etypes, mesh->num_el_blks, MPI_INT, MPI_MIN, 
                MPI_COMM_WORLD);
  for (i = 0; i < mesh->num_el_blks; i++) {
    strcpy(mesh->eb_names[i], get_elem_name(mesh->eb_etypes[i]));
  }
  free(nnodes);

  /*
   * allocate memory for the elements
   * allocate a little extra for element migration latter
   */
  mesh->elem_array_len = mesh->num_elems + 5;
  mesh->elements = (ELEM_INFO_PTR) malloc (mesh->elem_array_len 
                                         * sizeof(ELEM_INFO));
  if (!(mesh->elements)) {
    Gen_Error(0, "fatal: insufficient memory");
    return 0;
  }

  /*
   * intialize all of the element structs as unused by
   * setting the globalID to -1
   */
  for (i = 0; i < mesh->elem_array_len; i++) 
    initialize_element(&(mesh->elements[i]));

  /* read the information for the individual elements */
  if (!read_elem_info(pexoid, Proc, prob, mesh)) {
    Gen_Error(0, "fatal: Error returned from read_elem_info");
    return 0;
  }

  /* read the communication information */
  if (!read_comm_map_info(pexoid, Proc, prob, mesh)) {
    Gen_Error(0, "fatal: Error returned from read_comm_map_info");
    return 0;
  }

  /* Close the parallel file */
  if(ex_close (pexoid) < 0) {
    Gen_Error(0, "fatal: Error returned from ex_close");
    return 0;
  }

  /* print out the distributed mesh */
  if (Debug_Driver > 3)
    print_distributed_mesh(Proc, Num_Proc, mesh);

  DEBUG_TRACE_END(Proc, yo);
  return 1;

#endif /* ZOLTAN_NEMESIS */
}
Example #5
0
void NemSpread<T,INT>::read_restart_data ()

/* Function which reads the restart variable data from the EXODUS II
 * database which contains the results information. Then distribute
 * it to the processors, and write it to the parallel exodus files.
 *
 *----------------------------------------------------------------------------
 *
 * Functions called:
 *
 * read_vars -- function which reads the variable values from the restart
 *              file, and then distributes them to the processors
 *
 * write_var_timestep -- function which writes out the variables for a
 *                       to a parallel ExodusII file.
 *
 *----------------------------------------------------------------------------
 */

{
    const char  *yo="read_restart_data";

    /* need to get the element block ids and counts */
    std::vector<INT> eb_ids_global(globals.Num_Elem_Blk);
    std::vector<INT> eb_cnts_global(globals.Num_Elem_Blk);
    std::vector<INT> ss_ids_global(globals.Num_Side_Set);
    std::vector<INT> ss_cnts_global(globals.Num_Side_Set);
    std::vector<INT> ns_ids_global(globals.Num_Node_Set);
    std::vector<INT> ns_cnts_global(globals.Num_Node_Set);

    INT ***eb_map_ptr = NULL, **eb_cnts_local = NULL;
    int    exoid=0, *par_exoid = NULL;

    float  vers;
    char   cTemp[512];

    /* computing precision should be the same as the database precision
     *
     * EXCEPTION: if the io_ws is smaller than the machine precision,
     * ie - database with io_ws == 4 on a Cray (sizeof(float) == 8),
     * then the cpu_ws must be the machine precision.
     */
    int cpu_ws;
    if (io_ws < (int)sizeof(float)) cpu_ws = sizeof(float);
    else                            cpu_ws = io_ws;

    /* Open the ExodusII file */
    {
        cpu_ws = io_ws;
        int mode = EX_READ | int64api;
        if ((exoid=ex_open(Exo_Res_File, mode, &cpu_ws, &io_ws, &vers)) < 0) {
            fprintf(stderr, "%s: Could not open file %s for restart info\n",
                    yo, Exo_Res_File);
            exit(1);
        }
    }

    /* allocate space for the global variables */
    Restart_Info.Glob_Vals.resize(Restart_Info.NVar_Glob);

    if (Restart_Info.NVar_Elem > 0 ) {

        /* allocate storage space */
        Restart_Info.Elem_Vals.resize(Proc_Info[2]);

        /* now allocate storage for the values */
        for (int iproc = 0; iproc <Proc_Info[2]; iproc++) {
            size_t array_size = Restart_Info.NVar_Elem *
                                (globals.Num_Internal_Elems[iproc] + globals.Num_Border_Elems[iproc]);
            Restart_Info.Elem_Vals[iproc].resize(array_size);
        }

        /*
         * at this point, I need to broadcast the global element block ids
         * and counts to the processors. I know that this is redundant data
         * since they will all receive this information in read_mesh, but
         * the variables which contain that information are static in
         * el_exoII_io.c, and cannot be used here. So, take a second and
         * broadcast all of this out.
         *
         * I want to do this here so that it is done only once no matter
         * how many time steps are retrieved
         */

        /* Get the Element Block IDs from the input file */
        if (ex_get_ids (exoid, EX_ELEM_BLOCK, TOPTR(eb_ids_global)) < 0)
        {
            fprintf(stderr, "%s: unable to get element block IDs", yo);
            exit(1);
        }

        /* Get the count of elements in each element block */
        for (int cnt = 0; cnt < globals.Num_Elem_Blk; cnt++) {
            if (ex_get_block(exoid, EX_ELEM_BLOCK, eb_ids_global[cnt], cTemp,
                             &(eb_cnts_global[cnt]), NULL, NULL, NULL, NULL) < 0) {
                fprintf(stderr, "%s: unable to get element count for block id "ST_ZU"",
                        yo, (size_t)eb_ids_global[cnt]);
                exit(1);
            }
        }

        /*
         * in order to speed up finding matches in the global element
         * number map, set up an array of pointers to the start of
         * each element block's global element number map. That way
         * only entries for the current element block have to be searched
         */
        eb_map_ptr = (INT ***) array_alloc (__FILE__, __LINE__, 2,Proc_Info[2],
                                            globals.Num_Elem_Blk, sizeof(INT *));
        if (!eb_map_ptr) {
            fprintf(stderr, "[%s]: ERROR, insufficient memory!\n", yo);
            exit(1);
        }
        eb_cnts_local = (INT **) array_alloc (__FILE__, __LINE__, 2,Proc_Info[2],
                                              globals.Num_Elem_Blk, sizeof(INT));
        if (!eb_cnts_local) {
            fprintf(stderr, "[%s]: ERROR, insufficient memory!\n", yo);
            exit(1);
        }

        /*
         * for now, assume that element blocks have been
         * stored in the same order as the global blocks
         */
        for (int iproc = 0; iproc <Proc_Info[2]; iproc++) {
            int    ifound = 0;
            size_t offset = 0;
            int    ilocal;
            for (int cnt = 0; cnt < globals.Num_Elem_Blk; cnt++) {
                for (ilocal = ifound; ilocal < globals.Proc_Num_Elem_Blk[iproc]; ilocal++) {
                    if (globals.Proc_Elem_Blk_Ids[iproc][ilocal] == eb_ids_global[cnt])
                        break;
                }

                if (ilocal < globals.Proc_Num_Elem_Blk[iproc]) {
                    eb_map_ptr[iproc][cnt] = &globals.GElems[iproc][offset];
                    eb_cnts_local[iproc][cnt] = globals.Proc_Num_Elem_In_Blk[iproc][ilocal];
                    offset += globals.Proc_Num_Elem_In_Blk[iproc][ilocal];
                    ifound = ilocal; /* don't search the same part of the list over */
                }
                else {
                    eb_map_ptr[iproc][cnt] = NULL;
                    eb_cnts_local[iproc][cnt] = 0;
                }
            }
        }

    } /* End: "if (Restart_Info.NVar_Elem > 0 )" */

    if (Restart_Info.NVar_Node > 0 ) {
        /* allocate storage space */
        Restart_Info.Node_Vals.resize(Proc_Info[2]);

        /* now allocate storage for the values */
        for (int iproc = 0; iproc <Proc_Info[2]; iproc++) {
            size_t array_size = Restart_Info.NVar_Node * (globals.Num_Internal_Nodes[iproc] +
                                globals.Num_Border_Nodes[iproc] + globals.Num_External_Nodes[iproc]);
            Restart_Info.Node_Vals[iproc].resize(array_size);
        }
    }

    if (Restart_Info.NVar_Sset > 0 ) {

        /* allocate storage space */
        Restart_Info.Sset_Vals.resize(Proc_Info[2]);

        /* now allocate storage for the values */
        for (int iproc = 0; iproc <Proc_Info[2]; iproc++) {
            size_t array_size = Restart_Info.NVar_Sset * globals.Proc_SS_Elem_List_Length[iproc];

            Restart_Info.Sset_Vals[iproc].resize(array_size);
        }

        /*
         * at this point, I need to broadcast the ids and counts to the
         * processors. I know that this is redundant data since they will
         * all receive this information in read_mesh, but the variables
         * which contain that information are static in el_exoII_io.c, and
         * cannot be used here. So, take a second and broadcast all of
         * this out.
         *
         * I want to do this here so that it is done only once no matter
         * how many time steps are retrieved
         */

        /* Get the Sideset IDs from the input file */
        if (ex_get_ids (exoid, EX_SIDE_SET, TOPTR(ss_ids_global)) < 0) {
            fprintf(stderr, "%s: unable to get sideset IDs", yo);
            exit(1);
        }

        /* Get the count of elements in each sideset */
        for (int cnt = 0; cnt < globals.Num_Side_Set; cnt++) {
            if (ex_get_set_param(exoid, EX_SIDE_SET,
                                 ss_ids_global[cnt],
                                 &(ss_cnts_global[cnt]), NULL) < 0) {
                fprintf(stderr, "%s: unable to get element count for sideset id "ST_ZU"",
                        yo, (size_t)ss_ids_global[cnt]);
                exit(1);
            }
        }
    } /* End: "if (Restart_Info.NVar_Sset > 0 )" */


    if (Restart_Info.NVar_Nset > 0 ) {

        /* allocate storage space */
        Restart_Info.Nset_Vals.resize(Proc_Info[2]);

        /* now allocate storage for the values */
        for (int iproc = 0; iproc <Proc_Info[2]; iproc++) {
            size_t array_size = Restart_Info.NVar_Nset * globals.Proc_NS_List_Length[iproc];
            Restart_Info.Nset_Vals[iproc].resize(array_size);
        }

        /*
         * at this point, I need to broadcast the ids and counts to the
         * processors. I know that this is redundant data since they will
         * all receive this information in read_mesh, but the variables
         * which contain that information are static in el_exoII_io.c, and
         * cannot be used here. So, take a second and broadcast all of
         * this out.
         *
         * I want to do this here so that it is done only once no matter
         * how many time steps are retrieved
         */

        /* Get the Nodeset IDs from the input file */
        if (ex_get_ids (exoid, EX_NODE_SET, TOPTR(ns_ids_global)) < 0) {
            fprintf(stderr, "%s: unable to get nodeset IDs", yo);
            exit(1);
        }

        /* Get the count of elements in each nodeset */
        for (int cnt = 0; cnt < globals.Num_Node_Set; cnt++) {
            if (ex_get_set_param(exoid, EX_NODE_SET,
                                 ns_ids_global[cnt],
                                 &(ns_cnts_global[cnt]), NULL) < 0) {
                fprintf(stderr, "%s: unable to get element count for nodeset id "ST_ZU"",
                        yo, (size_t)ns_ids_global[cnt]);
                exit(1);
            }
        }
    } /* End: "if (Restart_Info.NVar_Nset > 0 )" */


    /*
     * NOTE: A possible place to speed this up would be to
     * get the global node and element lists here, and broadcast
     * them out only once.
     */

    par_exoid = (int*)malloc(Proc_Info[2] * sizeof(int));
    if(!par_exoid) {
        fprintf(stderr, "[%s]: ERROR, insufficient memory!\n",
                yo);
        exit(1);
    }

    /* See if any '/' in the name.  IF present, isolate the basename of the file */
    if (strrchr(PIO_Info.Scalar_LB_File_Name, '/') != NULL) {
        /* There is a path separator.  Get the portion after the
         * separator
         */
        strcpy(cTemp, strrchr(PIO_Info.Scalar_LB_File_Name, '/')+1);
    } else {
        /* No separator; this is already just the basename... */
        strcpy(cTemp, PIO_Info.Scalar_LB_File_Name);
    }

    if (strlen(PIO_Info.Exo_Extension) == 0)
        add_fname_ext(cTemp, ".par");
    else
        add_fname_ext(cTemp, PIO_Info.Exo_Extension);

    int open_file_count = get_free_descriptor_count();
    if (open_file_count >Proc_Info[5]) {
        printf("All output files opened simultaneously.\n");
        for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) {

            gen_par_filename(cTemp, Par_Nem_File_Name, Proc_Ids[iproc],
                             Proc_Info[0]);

            /* Open the parallel Exodus II file for writing */
            cpu_ws = io_ws;
            int mode = EX_WRITE | int64api | int64db;
            if ((par_exoid[iproc]=ex_open(Par_Nem_File_Name, mode, &cpu_ws,
                                          &io_ws, &vers)) < 0) {
                fprintf(stderr,"[%d] %s Could not open parallel Exodus II file: %s\n",
                        iproc, yo, Par_Nem_File_Name);
                exit(1);
            }
        }
    } else {
        printf("All output files opened one-at-a-time.\n");
    }

    /* Now loop over the number of time steps */
    for (int time_idx = 0; time_idx < Restart_Info.Num_Times; time_idx++) {

        double start_t = second ();

        /* read and distribute the variables for this time step */
        if (read_vars(exoid, Restart_Info.Time_Idx[time_idx],
                      TOPTR(eb_ids_global), TOPTR(eb_cnts_global), eb_map_ptr,
                      eb_cnts_local,
                      TOPTR(ss_ids_global), TOPTR(ss_cnts_global),
                      TOPTR(ns_ids_global), TOPTR(ns_cnts_global)) < 0) {
            fprintf(stderr, "%s: Error occured while reading variables\n",
                    yo);
            exit(1);
        }
        double end_t   = second () - start_t;
        printf ("\tTime to read  vars for timestep %d: %f (sec.)\n", (time_idx+1), end_t);

        start_t = second ();
        for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) {

            if (open_file_count <Proc_Info[5]) {
                gen_par_filename(cTemp, Par_Nem_File_Name, Proc_Ids[iproc],
                                 Proc_Info[0]);

                /* Open the parallel Exodus II file for writing */
                cpu_ws = io_ws;
                int mode = EX_WRITE | int64api | int64db;
                if ((par_exoid[iproc]=ex_open(Par_Nem_File_Name, mode, &cpu_ws,
                                              &io_ws, &vers)) < 0) {
                    fprintf(stderr,"[%d] %s Could not open parallel Exodus II file: %s\n",
                            iproc, yo, Par_Nem_File_Name);
                    exit(1);
                }
            }

            /*
             * Write out the variable data for the time steps in this
             * block to each parallel file.
             */
            write_var_timestep(par_exoid[iproc], iproc, (time_idx+1),
                               TOPTR(eb_ids_global), TOPTR(ss_ids_global), TOPTR(ns_ids_global));

            if (iproc%10 == 0 || iproc ==Proc_Info[2]-1)
                printf("%d", iproc);
            else
                printf(".");

            if (open_file_count <Proc_Info[5]) {
                if (ex_close(par_exoid[iproc]) == -1) {
                    fprintf(stderr, "[%d] %s Could not close the parallel Exodus II file.\n",
                            iproc, yo);
                    exit(1);
                }
            }
        } /* End "for (iproc=0; iproc <Proc_Info[2]; iproc++)" */

        end_t   = second () - start_t;
        printf ("\n\tTime to write vars for timestep %d: %f (sec.)\n", (time_idx+1), end_t);

    }
    if (Restart_Info.NVar_Elem > 0 ) {
        safe_free((void **) &eb_map_ptr);
        safe_free((void **) &eb_cnts_local);
    }

    /* Close the restart exodus II file */
    if (ex_close(exoid) == -1) {
        fprintf(stderr, "%sCould not close the restart Exodus II file\n",
                yo);
        exit(1);
    }

    if (open_file_count >Proc_Info[5]) {
        for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) {
            /* Close the parallel exodus II file */
            if (ex_close(par_exoid[iproc]) == -1) {
                fprintf(stderr, "[%d] %s Could not close the parallel Exodus II file.\n",
                        iproc, yo);
                exit(1);
            }
        }
    }
    if (par_exoid != NULL) {
        free(par_exoid);
        par_exoid = NULL;
    }
}
Example #6
0
/*--------------------------------------------------------------------------*/
int output_gnu(char *cmd_file,
               char *tag,
               int Proc,
               int Num_Proc,
               PROB_INFO_PTR prob,
               PARIO_INFO_PTR pio_info,
               MESH_INFO_PTR mesh)
/*
 * For 2D problems, output files that can be read by gnuplot for looking at
 * results.
 * We'll do 3D problems later.
 *
 * One gnuplot file is written for each processor.  

 * For Chaco input files, the file written contains coordinates of owned
 * nodes and all nodes on that processor connected to the owned nodes. When
 * drawn "with linespoints", the subdomains are drawn, but lines connecting the
 * subdomains are not drawn.
 *
 * For Nemesis input files, the file written contains the coordinates of
 * each node of owned elements.  When drawn "with lines", the element outlines
 * for each owned element are drawn.
 *
 * In addition, processor 0 writes a gnuplot command file telling gnuplot how
 * to process the individual coordinate files written.  This file can be used
 * with the gnuplot "load" command to simplify generation of the gnuplot.
 */
{
  /* Local declarations. */
  char  *yo = "output_gnu";
  char   par_out_fname[FILENAME_MAX+1], ctemp[FILENAME_MAX+1];
  ELEM_INFO *current_elem, *nbor_elem;
  int    nbor, num_nodes;
  char  *datastyle;
  int    i, j;

  FILE  *fp;
/***************************** BEGIN EXECUTION ******************************/

  DEBUG_TRACE_START(Proc, yo);

  if (mesh->num_dims > 2) {
    Gen_Error(0, "warning: cannot generate gnuplot data for 3D problems.");
    DEBUG_TRACE_END(Proc, yo);
    return 0;
  }

  if (mesh->eb_nnodes[0] == 0) {
    /* No coordinate information is available.  */
    Gen_Error(0, "warning: cannot generate gnuplot data when no coordinate"
                 " input is given.");
    DEBUG_TRACE_END(Proc, yo);
    return 0;
  }

  /* generate the parallel filename for this processor */
  strcpy(ctemp, pio_info->pexo_fname);
  strcat(ctemp, ".");
  strcat(ctemp, tag);
  strcat(ctemp, ".gnu");
  gen_par_filename(ctemp, par_out_fname, pio_info, Proc, Num_Proc);

  fp = fopen(par_out_fname, "w");

  if (pio_info->file_type == CHACO_FILE) {
    /* 
     * For each node of Chaco graph, print the coordinates of the node.
     * Then, for each neighboring node on the processor, print the neighbor's
     * coordinates.
     */
    datastyle = "linespoints";
    for (i = 0; i < mesh->elem_array_len; i++) {
      current_elem = &(mesh->elements[i]);
      if (current_elem->globalID >= 0) {
        /* Include the point itself, so that even if there are no edges,
         * the point will appear.  */
        fprintf(fp, "\n%e %e\n", 
                current_elem->coord[0][0], current_elem->coord[0][1]);
        for (j = 0; j < current_elem->nadj; j++) {
          if (current_elem->adj_proc[j] == Proc) {
            /* Plot the edge.  Need to include current point and nbor point
             * for each edge. */
            fprintf(fp, "\n%e %e\n", 
                current_elem->coord[0][0], current_elem->coord[0][1]);
            nbor = current_elem->adj[j];
            nbor_elem = &(mesh->elements[nbor]);
            fprintf(fp, "%e %e\n",
                    nbor_elem->coord[0][0], nbor_elem->coord[0][1]);
          }
        }
      }
    }
  }
  else { /* Nemesis input file */
    /* 
     *  For each element of Nemesis input file, print the coordinates of its
     *  nodes.  No need to follow neighbors, as decomposition is by elements.
     */
    datastyle = "lines";
    for (i = 0; i < mesh->elem_array_len; i++) {
      current_elem = &(mesh->elements[i]);
      if (current_elem->globalID >= 0) {
        num_nodes = mesh->eb_nnodes[current_elem->elem_blk];
        for (j = 0; j < num_nodes; j++) {
          fprintf(fp, "%e %e\n", 
                  current_elem->coord[j][0], current_elem->coord[j][1]);
        }
        fprintf(fp, "\n");
      }
    }
  }
    
  fclose(fp);

  if (Proc == 0) {
    /* Write gnu master file with gnu commands for plotting */
    strcpy(ctemp, pio_info->pexo_fname);
    strcat(ctemp, ".");
    strcat(ctemp, tag);
    strcat(ctemp, ".gnuload");
    fp = fopen(ctemp, "w");
    fprintf(fp, "set nokey\n");
    fprintf(fp, "set nolabel\n");
    fprintf(fp, "set noxzeroaxis\n");
    fprintf(fp, "set noyzeroaxis\n");
    fprintf(fp, "set noxtics\n");
    fprintf(fp, "set noytics\n");
    fprintf(fp, "set data style %s\n", datastyle);

    fprintf(fp, "plot ");
    strcpy(ctemp, pio_info->pexo_fname);
    strcat(ctemp, ".");
    strcat(ctemp, tag);
    strcat(ctemp, ".gnu");
    for (i = 0; i < Num_Proc; i++) {
      gen_par_filename(ctemp, par_out_fname, pio_info, i, Num_Proc);
      fprintf(fp, "\"%s\"", par_out_fname);
      if (i != Num_Proc-1) {
        fprintf(fp, ",\\\n");
      }
    }
    fprintf(fp, "\n");
    fclose(fp);
  }

  DEBUG_TRACE_END(Proc, yo);
  return 1;
}