Example #1
0
static void sort_and_compare_maps(
  int proc,
  int nbor_proc,
  MESH_INFO_PTR mesh, 
  struct map_list_head *map, 
  int map_size, 
  int *sindex
)
{
/*
 *  Routine to sort a given communication map for a single neighbor processor
 *  and compare it to the actual communication map for that processor
 *  (generated by build_elem_comm_maps).
 *  If the DDirectory were used to build comm maps, this routine could be
 *  modified to do the assignments to mesh->ecmap_*, rather than do comparisons
 *  with it.
 */
int i, j;
int cnt = 0;
int indx;

  /*
   *  Sort the given map according to element ids.
   *  Primary key is determined as in build_elem_comm_maps.
   */

  for (i = 0; i < map_size; i++)
    sindex[i] = i;

  if (proc < nbor_proc)
    quicksort_pointer_inc_id_id(sindex, map->glob_id, map->neigh_id,
                                0, map_size-1);
  else 
    quicksort_pointer_inc_id_id(sindex, map->neigh_id, map->glob_id,
                                0, map_size-1);
  
  /*
   * Compute offset into mesh communication maps for the given nbor proc.
   */
  if ((indx = in_list2(nbor_proc, mesh->necmap, mesh->ecmap_id)) == -1) {
    printf("%d DDirectory Test:  Comm map for nbor proc %d does not exist\n",
           proc, nbor_proc);
    return;
  }

  cnt = 0;
  for (i = 0; i < indx; i++)
    cnt += mesh->ecmap_cnt[i];

  /*
   * Compare given map to mesh communication map for this nbor proc.
   * If the DDirectory code were used to construct maps, assignments
   * would be done here (rather than comparisons).
   */

  if (map_size != mesh->ecmap_cnt[indx]) {
    printf("%d DDirectory Test:  Different map size for nbor_proc %d: "
           "%d != %d\n", proc, nbor_proc, map_size, mesh->ecmap_cnt[indx]);
    return;
  }

  for (i = 0; i < map_size; i++) {
    j = sindex[i];
    if (map->elem_id[j] != mesh->ecmap_elemids[i+cnt]) {
      printf("%d DDirectory Test: Different element IDs for nbor_proc %d: "
             "%d != %d\n", proc, nbor_proc, map->elem_id[j], 
             mesh->ecmap_elemids[i+cnt]);
    }
  }

  for (i = 0; i < map_size; i++) {
    j = sindex[i];
    if (map->side_id[j] != mesh->ecmap_sideids[i+cnt]) {
      printf("%d DDirectory Test: Different side IDs for nbor_proc %d: "
             "%d != %d\n", proc, nbor_proc, map->side_id[j], 
             mesh->ecmap_sideids[i+cnt]);
    }
  }

  for (i = 0; i < map_size; i++) {
    j = sindex[i];
    if (map->neigh_id[j] != mesh->ecmap_neighids[i+cnt]) {
      printf("%d DDirectory Test: Different neigh IDs for nbor_proc %d: "
             ZOLTAN_ID_SPEC " != " ZOLTAN_ID_SPEC "\n", proc, nbor_proc, map->neigh_id[j], 
             mesh->ecmap_neighids[i+cnt]);
    }
  }

  if (Debug_Driver > 3) {
    printf("%d  *************   DDirectory Map for %d    ***************\n",
            proc, nbor_proc);
    printf("Local ID\tSide ID\tGlobal ID\tNeigh ID\n");
    for (i = 0; i < map_size; i++) {
      j = sindex[i];
      printf("\t%d\t%d\t" ZOLTAN_ID_SPEC "\t" ZOLTAN_ID_SPEC "\n", 
             map->elem_id[j], map->side_id[j], 
             map->glob_id[j], map->neigh_id[j]);
    }
  }
}
Example #2
0
int write_elem_vars(
  int Proc,
  MESH_INFO_PTR mesh,
  PARIO_INFO_PTR pio_info, 
  int num_exp, 
  ZOLTAN_ID_PTR exp_gids,
  int *exp_procs,
  int *exp_to_part
)
{
/* Routine to write processor assignments per element to the nemesis files. */
int iblk;
int i, j;
int tmp;
float ver;
int pexoid, cpu_ws = 0, io_ws = 0;
int max_cnt = 0;
float *vars;
char par_nem_fname[FILENAME_MAX+1];
char tmp_nem_fname[FILENAME_MAX+1];
int Num_Proc;
char cmesg[256];
char *str = "Proc";

  /* generate the parallel filename for this processor */
  MPI_Comm_size(MPI_COMM_WORLD, &Num_Proc);
  gen_par_filename(pio_info->pexo_fname, tmp_nem_fname, pio_info, Proc,
                   Num_Proc);
  /* 
   * Copy the parallel file to a new file (so we don't write results in the
   * cvs version).
   */
  sprintf(cmesg, "%s.blot", pio_info->pexo_fname);
  gen_par_filename(cmesg, par_nem_fname, pio_info, Proc,
                   Num_Proc);
  fcopy(tmp_nem_fname, par_nem_fname);

  if ((pexoid = ex_open(par_nem_fname, EX_WRITE, &cpu_ws, &io_ws,
                        &ver)) < 0) {
    sprintf(cmesg,"fatal: could not open parallel Exodus II file %s",
            par_nem_fname);
    Gen_Error(0, cmesg);
    return 0;
  }

  if (ex_put_var_names(pexoid, "e", 1, &str) < 0) {
    Gen_Error(0, "Error returned from ex_put_var_names.");
    return 0;
  }

  /* Get max number of elements in an element block; alloc vars array to size */
  for (iblk = 0; iblk < mesh->num_el_blks; iblk++)
    max_cnt = (mesh->eb_cnts[iblk] > max_cnt ? mesh->eb_cnts[iblk] : max_cnt);

  vars = (float *) malloc(max_cnt * sizeof(float));

  /* Must write data by element block; gather the data */
  for (iblk = 0; iblk < mesh->num_el_blks; iblk++) {
    for (j = 0, i = 0; i < mesh->num_elems; i++) {
      if (mesh->elements[i].elem_blk == iblk) {
        /* Element is in block; see whether it is to be exported. */
        if ((tmp=in_list2((int)mesh->elements[i].globalID, num_exp, (int *) exp_gids)) != -1)
          vars[j++] = (Output.Plot_Partition ? (float) (exp_to_part[tmp]) 
                                       : (float) (exp_procs[tmp]));
        else
          vars[j++] = (Output.Plot_Partition ? mesh->elements[i].my_part 
                                       : (float) (Proc));
      }
    }
    if (ex_put_elem_var(pexoid, 1, 1, mesh->eb_ids[iblk], 
                        mesh->eb_cnts[iblk], vars) < 0) {
      Gen_Error(0, "fatal: Error returned from ex_put_elem_var");
      return 0;
    }
  }

  safe_free((void **)(void *) &vars);
  /* Close the parallel file */
  if(ex_close (pexoid) < 0) {
    Gen_Error(0, "fatal: Error returned from ex_close");
    return 0;
  }
  return 1;
}
Example #3
0
int build_elem_comm_maps(int proc, MESH_INFO_PTR mesh)
{
/*
 * Build element communication maps, given a distributed mesh.
 * This routine builds initial communication maps for Chaco input
 * (for Nemesis, initial communication maps are read from the Nemesis file)
 * and rebuilds communication maps after data migration.
 *
 * One communication map per neighboring processor is built.
 * The corresponding maps on neighboring processors
 * must be sorted in the same order, so that neighboring processors do not
 * have to use ghost elements.   For each communication map's pair of
 * processors, the lower-numbered processor determines the order of the
 * elements in the communication map.  The sort key is the elements' global
 * IDs on the lower-number processor; the secondary key is the neighboring
 * elements global IDs.  The secondary key is used when a single element
 * must communicate with more than one neighbor.
 */

const char *yo = "build_elem_comm_maps";
int i, j;
ELEM_INFO *elem;
ZOLTAN_ID_TYPE iadj_elem;
int iadj_proc;
int indx;
int num_alloc_maps;
int max_adj = 0;
int max_adj_per_map;
int cnt, offset;
int *sindex = NULL;
int tmp;
struct map_list_head *tmp_maps = NULL, *map = NULL;

  DEBUG_TRACE_START(proc, yo);

  /*
   *  Free the old maps, if they exist.
   */

  if (mesh->ecmap_id != NULL) {
    safe_free((void **) &(mesh->ecmap_id));
    safe_free((void **) &(mesh->ecmap_cnt));
    safe_free((void **) &(mesh->ecmap_elemids));
    safe_free((void **) &(mesh->ecmap_sideids));
    safe_free((void **) &(mesh->ecmap_neighids));
    mesh->necmap = 0;
  }

  /*
   *  Look for off-processor adjacencies.
   *  Loop over all elements 
   */

  num_alloc_maps = MAP_ALLOC;
  mesh->ecmap_id = (int *) malloc(num_alloc_maps * sizeof(int));
  mesh->ecmap_cnt = (int *) malloc(num_alloc_maps * sizeof(int));
  tmp_maps = (struct map_list_head*) malloc(num_alloc_maps 
                                          * sizeof(struct map_list_head));

  if (mesh->ecmap_id == NULL || mesh->ecmap_cnt == NULL || tmp_maps == NULL) {
    Gen_Error(0, "Fatal:  insufficient memory");
    DEBUG_TRACE_END(proc, yo);
    return 0;
  }

  for (i = 0; i < mesh->num_elems; i++) {
    elem = &(mesh->elements[i]);
    for (j = 0; j < elem->adj_len; j++) {

      /* Skip NULL adjacencies (sides that are not adjacent to another elem). */
      if (elem->adj[j] == ZOLTAN_ID_INVALID) continue;

      iadj_elem = elem->adj[j];
      iadj_proc = elem->adj_proc[j];

      if (iadj_proc != proc) {
        /* 
         * Adjacent element is off-processor.
         * Add this element to the temporary data structure for 
         * the appropriate neighboring processor.
         */
        if ((indx = in_list2(iadj_proc, mesh->necmap, mesh->ecmap_id)) == -1) {
          /*
           * Start a new communication map.
           */

          if (mesh->necmap >= num_alloc_maps) {
            num_alloc_maps += MAP_ALLOC;
            mesh->ecmap_id = (int *) realloc(mesh->ecmap_id,
                                            num_alloc_maps * sizeof(int));
            mesh->ecmap_cnt = (int *) realloc(mesh->ecmap_cnt,
                                             num_alloc_maps * sizeof(int));
            tmp_maps = (struct map_list_head *) realloc(tmp_maps,
                               num_alloc_maps * sizeof(struct map_list_head));
            if (mesh->ecmap_id == NULL || mesh->ecmap_cnt == NULL || 
                tmp_maps == NULL) {
              Gen_Error(0, "Fatal:  insufficient memory");
              DEBUG_TRACE_END(proc, yo);
              return 0;
            }
          }
          mesh->ecmap_id[mesh->necmap] = iadj_proc;
          mesh->ecmap_cnt[mesh->necmap] = 0;
          map = &(tmp_maps[mesh->necmap]);
          map->glob_id  = (ZOLTAN_ID_TYPE *) malloc(MAP_ALLOC * sizeof(ZOLTAN_ID_TYPE));
          map->elem_id  = (int *) malloc(MAP_ALLOC * sizeof(int));
          map->side_id  = (int *) malloc(MAP_ALLOC * sizeof(int));
          map->neigh_id = (ZOLTAN_ID_TYPE *) malloc(MAP_ALLOC * sizeof(ZOLTAN_ID_TYPE));
          if (map->glob_id == NULL || map->elem_id == NULL || 
              map->side_id == NULL || map->neigh_id == NULL) {
            Gen_Error(0, "Fatal:  insufficient memory");
            DEBUG_TRACE_END(proc, yo);
            return 0;
          }
          map->map_alloc_size = MAP_ALLOC;
          indx = mesh->necmap;
          mesh->necmap++;
        }
        /* Add to map for indx. */
        map = &(tmp_maps[indx]);
        if (mesh->ecmap_cnt[indx] >= map->map_alloc_size) {
          map->map_alloc_size += MAP_ALLOC;
          map->glob_id  = (ZOLTAN_ID_TYPE *) realloc(map->glob_id, map->map_alloc_size * sizeof(ZOLTAN_ID_TYPE));
          map->elem_id  = (int *) realloc(map->elem_id, 
                                          map->map_alloc_size * sizeof(int));
          map->side_id  = (int *) realloc(map->side_id, 
                                          map->map_alloc_size * sizeof(int));
          map->neigh_id = (ZOLTAN_ID_TYPE *) realloc(map->neigh_id, map->map_alloc_size * sizeof(ZOLTAN_ID_TYPE));
          if (map->glob_id == NULL || map->elem_id == NULL || 
              map->side_id == NULL || map->neigh_id == NULL) {
            Gen_Error(0, "Fatal:  insufficient memory");
            DEBUG_TRACE_END(proc, yo);
            return 0;
          }
        }
        tmp = mesh->ecmap_cnt[indx];
        map->glob_id[tmp] = elem->globalID;
        map->elem_id[tmp] = i;
        map->side_id[tmp] = j+1;  /* side is determined by position in
                                          adj array (+1 since not 0-based). */
        map->neigh_id[tmp] = iadj_elem;
        mesh->ecmap_cnt[indx]++;
        max_adj++;
      }
    }
  }

  /* 
   * If no communication maps, don't need to do anything else. 
   */

  if (mesh->necmap > 0) {

    /*
     * Allocate data structure for element communication map arrays.
     */

    mesh->ecmap_elemids  = (int *) malloc(max_adj * sizeof(int));
    mesh->ecmap_sideids  = (int *) malloc(max_adj * sizeof(int));
    mesh->ecmap_neighids = (ZOLTAN_ID_TYPE *) malloc(max_adj * sizeof(ZOLTAN_ID_TYPE));


    /*
     * Allocate temporary memory for sort index.
     */
    max_adj_per_map = 0;
    for (i = 0; i < mesh->necmap; i++)
      if (mesh->ecmap_cnt[i] > max_adj_per_map)
        max_adj_per_map = mesh->ecmap_cnt[i];
    sindex = (int *) malloc(max_adj_per_map * sizeof(int));

    cnt = 0;
    for (i = 0; i < mesh->necmap; i++) {

      map = &(tmp_maps[i]);
      for (j = 0; j < mesh->ecmap_cnt[i]; j++)
        sindex[j] = j;

      /*
       * Sort the map so that adjacent processors have the same ordering
       * for the communication.  
       * Assume the ordering of the lower-numbered processor in the pair
       * of communicating processors.
       */

      if (proc < mesh->ecmap_id[i]) 
        quicksort_pointer_inc_id_id(sindex, map->glob_id, map->neigh_id,
                                    0, mesh->ecmap_cnt[i]-1);
      else
        quicksort_pointer_inc_id_id(sindex, map->neigh_id, map->glob_id,
                                    0, mesh->ecmap_cnt[i]-1);

      /*
       * Copy sorted data into elem map arrays. 
       */

      offset = cnt;
      for (j = 0; j < mesh->ecmap_cnt[i]; j++) {
        mesh->ecmap_elemids[offset]  = map->elem_id[sindex[j]];
        mesh->ecmap_sideids[offset]  = map->side_id[sindex[j]];
        mesh->ecmap_neighids[offset] = map->neigh_id[sindex[j]];
        offset++;
      }

      cnt += mesh->ecmap_cnt[i];
    }
  }

  /* Free temporary data structure. */
  for (i = 0; i < mesh->necmap; i++) {
    safe_free((void **) &(tmp_maps[i].glob_id));
    safe_free((void **) &(tmp_maps[i].elem_id));
    safe_free((void **) &(tmp_maps[i].side_id));
    safe_free((void **) &(tmp_maps[i].neigh_id));
  }
  safe_free((void **) &tmp_maps);
  safe_free((void **) &sindex);

  if (Test.DDirectory) 
    compare_maps_with_ddirectory_results(proc, mesh);

  DEBUG_TRACE_END(proc, yo);
  return 1;
}