int main(void)
{
    int depth;
    unsigned i, n;
    unsigned long size;
    int levels;
    char string[128];
    int topodepth;
    void *m;
    hwloc_topology_t topology;
    hwloc_cpuset_t cpuset;
    hwloc_obj_t obj;

    /* Allocate and initialize topology object. */
    hwloc_topology_init(&topology);

    /* ... Optionally, put detection configuration here to ignore
       some objects types, define a synthetic topology, etc....  

       The default is to detect all the objects of the machine that
       the caller is allowed to access.  See Configure Topology
       Detection. */

    /* Perform the topology detection. */
    hwloc_topology_load(topology);

    /* Optionally, get some additional topology information
       in case we need the topology depth later. */
    topodepth = hwloc_topology_get_depth(topology);

    /*****************************************************************
     * First example:
     * Walk the topology with an array style, from level 0 (always
     * the system level) to the lowest level (always the proc level).
     *****************************************************************/
    for (depth = 0; depth < topodepth; depth++) {
        printf("*** Objects at level %d\n", depth);
        for (i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth); 
             i++) {
            hwloc_obj_type_snprintf(string, sizeof(string),
				    hwloc_get_obj_by_depth(topology, depth, i), 0);
            printf("Index %u: %s\n", i, string);
        }
    }

    /*****************************************************************
     * Second example:
     * Walk the topology with a tree style.
     *****************************************************************/
    printf("*** Printing overall tree\n");
    print_children(topology, hwloc_get_root_obj(topology), 0);

    /*****************************************************************
     * Third example:
     * Print the number of packages.
     *****************************************************************/
    depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PACKAGE);
    if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) {
        printf("*** The number of packages is unknown\n");
    } else {
        printf("*** %u package(s)\n",
               hwloc_get_nbobjs_by_depth(topology, depth));
    }

    /*****************************************************************
     * Fourth example:
     * Compute the amount of cache that the first logical processor
     * has above it.
     *****************************************************************/
    levels = 0;
    size = 0;
    for (obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0);
         obj;
         obj = obj->parent)
      if (obj->type == HWLOC_OBJ_CACHE) {
        levels++;
        size += obj->attr->cache.size;
      }
    printf("*** Logical processor 0 has %d caches totaling %luKB\n", 
           levels, size / 1024);

    /*****************************************************************
     * Fifth example:
     * Bind to only one thread of the last core of the machine.
     *
     * First find out where cores are, or else smaller sets of CPUs if
     * the OS doesn't have the notion of a "core".
     *****************************************************************/
    depth = hwloc_get_type_or_below_depth(topology, HWLOC_OBJ_CORE);

    /* Get last core. */
    obj = hwloc_get_obj_by_depth(topology, depth,
                   hwloc_get_nbobjs_by_depth(topology, depth) - 1);
    if (obj) {
        /* Get a copy of its cpuset that we may modify. */
        cpuset = hwloc_bitmap_dup(obj->cpuset);

        /* Get only one logical processor (in case the core is
           SMT/hyper-threaded). */
        hwloc_bitmap_singlify(cpuset);

        /* And try to bind ourself there. */
        if (hwloc_set_cpubind(topology, cpuset, 0)) {
            char *str;
            int error = errno;
            hwloc_bitmap_asprintf(&str, obj->cpuset);
            printf("Couldn't bind to cpuset %s: %s\n", str, strerror(error));
            free(str);
        }

        /* Free our cpuset copy */
        hwloc_bitmap_free(cpuset);
    }

    /*****************************************************************
     * Sixth example:
     * Allocate some memory on the last NUMA node, bind some existing
     * memory to the last NUMA node.
     *****************************************************************/
    /* Get last node. There's always at least one. */
    n = hwloc_get_nbobjs_by_type(topology, HWLOC_OBJ_NUMANODE);
    obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, n - 1);

    size = 1024*1024;
    m = hwloc_alloc_membind_nodeset(topology, size, obj->nodeset,
                                    HWLOC_MEMBIND_BIND, 0);
    hwloc_free(topology, m, size);

    m = malloc(size);
    hwloc_set_area_membind_nodeset(topology, m, size, obj->nodeset,
                                   HWLOC_MEMBIND_BIND, 0);
    free(m);

    /* Destroy topology object. */
    hwloc_topology_destroy(topology);

    return 0;
}
Esempio n. 2
0
void output_console(hwloc_topology_t topology, const char *filename, int logical, int legend __hwloc_attribute_unused, int verbose_mode)
{
  unsigned topodepth;
  FILE *output;

  if (!filename || !strcmp(filename, "-"))
    output = stdout;
  else {
    output = open_file(filename, "w"); 
    if (!output) {
      fprintf(stderr, "Failed to open %s for writing (%s)\n", filename, strerror(errno));
      return;
    }
  }

  topodepth = hwloc_topology_get_depth(topology);

  /*
   * if verbose_mode == 0, only print the summary.
   * if verbose_mode == 1, only print the topology tree.
   * if verbose_mode > 1, print both.
   */

  if (lstopo_show_only != (hwloc_obj_type_t)-1) {
    if (verbose_mode > 1)
      fprintf(output, "Only showing %s objects\n", hwloc_obj_type_string(lstopo_show_only));
    output_only (topology, hwloc_get_root_obj(topology), output, logical, verbose_mode);
  } else if (verbose_mode >= 1) {
    output_topology (topology, hwloc_get_root_obj(topology), NULL, output, 0, logical, verbose_mode);
    fprintf(output, "\n");
  }

  if ((verbose_mode > 1 || !verbose_mode) && lstopo_show_only == (hwloc_obj_type_t)-1) {
    hwloc_lstopo_show_summary(output, topology);
 }

  if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) {
    const struct hwloc_distances_s * distances;
    unsigned depth;

    for (depth = 0; depth < topodepth; depth++) {
      distances = hwloc_get_whole_distance_matrix_by_depth(topology, depth);
      if (!distances || !distances->latency)
        continue;
      printf("latency matrix between %ss (depth %u) by %s indexes:\n",
	     hwloc_obj_type_string(hwloc_get_depth_type(topology, depth)),
	     depth,
	     logical ? "logical" : "physical");
      hwloc_utils_print_distance_matrix(topology, hwloc_get_root_obj(topology), distances->nbobjs, depth, distances->latency, logical);
    }
  }

  if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) {
    hwloc_const_bitmap_t complete = hwloc_topology_get_complete_cpuset(topology);
    hwloc_const_bitmap_t topo = hwloc_topology_get_topology_cpuset(topology);
    hwloc_const_bitmap_t online = hwloc_topology_get_online_cpuset(topology);
    hwloc_const_bitmap_t allowed = hwloc_topology_get_allowed_cpuset(topology);

    if (complete && !hwloc_bitmap_isequal(topo, complete)) {
      hwloc_bitmap_t unknown = hwloc_bitmap_alloc();
      char *unknownstr;
      hwloc_bitmap_copy(unknown, complete);
      hwloc_bitmap_andnot(unknown, unknown, topo);
      hwloc_bitmap_asprintf(&unknownstr, unknown);
      fprintf (output, "%d processors not represented in topology: %s\n", hwloc_bitmap_weight(unknown), unknownstr);
      free(unknownstr);
      hwloc_bitmap_free(unknown);
    }
    if (complete && !hwloc_bitmap_isequal(online, complete)) {
      hwloc_bitmap_t offline = hwloc_bitmap_alloc();
      char *offlinestr;
      hwloc_bitmap_copy(offline, complete);
      hwloc_bitmap_andnot(offline, offline, online);
      hwloc_bitmap_asprintf(&offlinestr, offline);
      fprintf (output, "%d processors offline: %s\n", hwloc_bitmap_weight(offline), offlinestr);
      free(offlinestr);
      hwloc_bitmap_free(offline);
    }
    if (complete && !hwloc_bitmap_isequal(allowed, online)) {
      if (!hwloc_bitmap_isincluded(online, allowed)) {
        hwloc_bitmap_t forbidden = hwloc_bitmap_alloc();
        char *forbiddenstr;
        hwloc_bitmap_copy(forbidden, online);
        hwloc_bitmap_andnot(forbidden, forbidden, allowed);
        hwloc_bitmap_asprintf(&forbiddenstr, forbidden);
        fprintf(output, "%d processors online but not allowed: %s\n", hwloc_bitmap_weight(forbidden), forbiddenstr);
        free(forbiddenstr);
        hwloc_bitmap_free(forbidden);
      }
      if (!hwloc_bitmap_isincluded(allowed, online)) {
        hwloc_bitmap_t potential = hwloc_bitmap_alloc();
        char *potentialstr;
        hwloc_bitmap_copy(potential, allowed);
        hwloc_bitmap_andnot(potential, potential, online);
        hwloc_bitmap_asprintf(&potentialstr, potential);
        fprintf(output, "%d processors allowed but not online: %s\n", hwloc_bitmap_weight(potential), potentialstr);
        free(potentialstr);
        hwloc_bitmap_free(potential);
      }
    }
    if (!hwloc_topology_is_thissystem(topology))
      fprintf (output, "Topology not from this system\n");
  }

  if (output != stdout)
    fclose(output);
}
Esempio n. 3
0
int main(void)
{
  hwloc_topology_t topology;
#ifdef HWLOC_HAVE_CPU_SET
  unsigned depth;
  hwloc_bitmap_t hwlocset;
  cpu_set_t schedset;
  hwloc_obj_t obj;
  int err;
#endif /* HWLOC_HAVE_CPU_SET */

  hwloc_topology_init(&topology);
  hwloc_topology_load(topology);

#ifdef HWLOC_HAVE_CPU_SET

  depth = hwloc_topology_get_depth(topology);

  hwlocset = hwloc_bitmap_dup(hwloc_topology_get_complete_cpuset(topology));
  hwloc_cpuset_to_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset));
#ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY
  err = sched_setaffinity(0, sizeof(schedset));
#else
  err = sched_setaffinity(0, sizeof(schedset), &schedset);
#endif
  assert(!err);
  hwloc_bitmap_free(hwlocset);

#ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY
  err = sched_getaffinity(0, sizeof(schedset));
#else
  err = sched_getaffinity(0, sizeof(schedset), &schedset);
#endif
  assert(!err);
  hwlocset = hwloc_bitmap_alloc();
  hwloc_cpuset_from_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset));
  assert(hwloc_bitmap_isincluded(hwlocset, hwloc_topology_get_complete_cpuset(topology)));
  hwloc_bitmap_andnot(hwlocset, hwlocset, hwloc_topology_get_online_cpuset(topology));
  hwloc_bitmap_andnot(hwlocset, hwlocset, hwloc_topology_get_allowed_cpuset(topology));
  assert(hwloc_bitmap_iszero(hwlocset));
  hwloc_bitmap_free(hwlocset);

  obj = hwloc_get_obj_by_depth(topology, depth-1, hwloc_get_nbobjs_by_depth(topology, depth-1) - 1);
  assert(obj);
  assert(obj->type == HWLOC_OBJ_PU);

  hwlocset = hwloc_bitmap_dup(obj->cpuset);
  hwloc_cpuset_to_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset));
#ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY
  err = sched_setaffinity(0, sizeof(schedset));
#else
  err = sched_setaffinity(0, sizeof(schedset), &schedset);
#endif
  assert(!err);
  hwloc_bitmap_free(hwlocset);

#ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY
  err = sched_getaffinity(0, sizeof(schedset));
#else
  err = sched_getaffinity(0, sizeof(schedset), &schedset);
#endif
  assert(!err);
  hwlocset = hwloc_bitmap_alloc();
  hwloc_cpuset_from_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset));
  assert(hwloc_bitmap_isequal(hwlocset, obj->cpuset));
  hwloc_bitmap_free(hwlocset);

#endif /* HWLOC_HAVE_CPU_SET */

  hwloc_topology_destroy(topology);
  return 0;
}
Esempio n. 4
0
/* This function produces topology aware trees for reduction and broadcasts, with different
 * K values. This is a heavy-weight function as it allocates shared memory, generates topology
 * information, builds a package-level tree (for package leaders), and a per-package tree.
 * These are combined in shared memory for other ranks to read out from.
 * */
int MPIDI_SHM_topology_tree_init(MPIR_Comm * comm_ptr, int root, int bcast_k,
                                 MPIR_Treealgo_tree_t * bcast_tree, int *bcast_topotree_fail,
                                 int reduce_k, MPIR_Treealgo_tree_t * reduce_tree,
                                 int *reduce_topotree_fail, MPIR_Errflag_t * errflag)
{
    int *shared_region;
    MPL_shm_hnd_t fd;
    int num_ranks, rank;
    int mpi_errno = MPI_SUCCESS, mpi_errno_ret = MPI_SUCCESS;
    size_t shm_size;
    int **bind_map = NULL;
    int *max_entries_per_level = NULL;
    int **ranks_per_package = NULL;
    int *package_ctr = NULL;
    size_t topo_depth = 0;
    int package_level = 0, i, max_ranks_per_package = 0;
    bool mapfail_flag = false;

    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_SHM_TOPOLOGY_TREE_INIT);
    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_SHM_TOPOLOGY_TREE_INIT);

    num_ranks = MPIR_Comm_size(comm_ptr);
    rank = MPIR_Comm_rank(comm_ptr);

    /* Calculate the size of shared memory that would be needed */
    shm_size = sizeof(int) * 5 * num_ranks + num_ranks * sizeof(cpu_set_t);

    /* STEP 1. Create shared memory region for exchanging topology information (root only) */
    mpi_errno = MPIDIU_allocate_shm_segment(comm_ptr, shm_size, &fd, (void **) &shared_region,
                                            &mapfail_flag);
    if (mpi_errno || mapfail_flag) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }
    /* STEP 2. Collect cpu_sets for each rank at the root */
    cpu_set_t my_cpu_set;
    CPU_ZERO(&my_cpu_set);
    sched_getaffinity(0, sizeof(my_cpu_set), &my_cpu_set);
    ((cpu_set_t *) (shared_region))[rank] = my_cpu_set;
    mpi_errno = MPIR_Barrier_impl(comm_ptr, errflag);
    if (mpi_errno) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }
    /* STEP 3. Root has all the cpu_set information, now build tree */
    if (rank == root) {
        topo_depth = hwloc_topology_get_depth(MPIR_Process.hwloc_topology);
        bind_map = (int **) MPL_malloc(num_ranks * sizeof(int *), MPL_MEM_OTHER);
        MPIR_ERR_CHKANDJUMP(!bind_map, mpi_errno, MPI_ERR_OTHER, "**nomem");
        for (i = 0; i < num_ranks; ++i) {
            bind_map[i] = (int *) MPL_calloc(topo_depth, sizeof(int), MPL_MEM_OTHER);
            MPIR_ERR_CHKANDJUMP(!bind_map[i], mpi_errno, MPI_ERR_OTHER, "**nomem");
        }
        MPIDI_SHM_hwloc_init_bindmap(num_ranks, topo_depth, shared_region, bind_map);
        /* Done building the topology information */

        /* STEP 3.1. Count the maximum entries at each level - used for breaking the tree into
         * intra/inter socket */
        max_entries_per_level = (int *) MPL_calloc(topo_depth, sizeof(size_t), MPL_MEM_OTHER);
        MPIR_ERR_CHKANDJUMP(!max_entries_per_level, mpi_errno, MPI_ERR_OTHER, "**nomem");
        package_level =
            MPIDI_SHM_topotree_get_package_level(topo_depth, max_entries_per_level, num_ranks,
                                                 bind_map);
        if (MPIDI_SHM_TOPOTREE_DEBUG)
            fprintf(stderr, "Breaking topology at :: %d (default= %d)\n", package_level,
                    MPIDI_SHM_TOPOTREE_CUTOFF);

        /* STEP 3.2. allocate space for the entries that go in each package based on hwloc info */
        ranks_per_package =
            (int
             **) MPL_malloc(max_entries_per_level[package_level] * sizeof(int *), MPL_MEM_OTHER);
        MPIR_ERR_CHKANDJUMP(!ranks_per_package, mpi_errno, MPI_ERR_OTHER, "**nomem");
        package_ctr =
            (int *) MPL_calloc(max_entries_per_level[package_level], sizeof(int), MPL_MEM_OTHER);
        MPIR_ERR_CHKANDJUMP(!package_ctr, mpi_errno, MPI_ERR_OTHER, "**nomem");
        for (i = 0; i < max_entries_per_level[package_level]; ++i) {
            package_ctr[i] = 0;
            ranks_per_package[i] = (int *) MPL_calloc(num_ranks, sizeof(int), MPL_MEM_OTHER);
            MPIR_ERR_CHKANDJUMP(!ranks_per_package[i], mpi_errno, MPI_ERR_OTHER, "**nomem");
        }
        /* sort the ranks into packages based on the binding information */
        for (i = 0; i < num_ranks; ++i) {
            int package = bind_map[i][package_level];
            ranks_per_package[package][package_ctr[package]++] = i;
        }
        max_ranks_per_package = 0;
        for (i = 0; i < max_entries_per_level[package_level]; ++i) {
            max_ranks_per_package = MPL_MAX(max_ranks_per_package, package_ctr[i]);
        }
        /* At this point we have done the common work in extracting topology information
         * and restructuring it to our needs. Now we generate the tree. */

        /* For Bcast, package leaders are added before the package local ranks, and the per_package
         * tree is left_skewed */
        mpi_errno = MPIDI_SHM_gen_tree(bcast_k, shared_region, max_entries_per_level,
                                       ranks_per_package, max_ranks_per_package, package_ctr,
                                       package_level, num_ranks, 1 /*package_leaders_first */ ,
                                       0 /*left_skewed */ , errflag);
        if (mpi_errno) {
            /* for communication errors, just record the error but continue */
            *errflag =
                MPIX_ERR_PROC_FAILED ==
                MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
            MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
            MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
        }
    }
    mpi_errno = MPIR_Barrier_impl(comm_ptr, errflag);
    if (mpi_errno) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }

    /* Every rank copies their tree out from shared memory */
    MPIDI_SHM_copy_tree(shared_region, num_ranks, rank, bcast_tree, bcast_topotree_fail);
    if (MPIDI_SHM_TOPOTREE_DEBUG)
        MPIDI_SHM_print_topotree_file("BCAST", comm_ptr->context_id, rank, bcast_tree);

    /* Wait until shared memory is available */
    mpi_errno = MPIR_Barrier_impl(comm_ptr, errflag);
    if (mpi_errno) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }
    /* Generate the reduce tree */
    /* For Reduce, package leaders are added after the package local ranks, and the per_package
     * tree is right_skewed (children are added in the reverse order */
    if (rank == root) {
        memset(shared_region, 0, shm_size);
        mpi_errno = MPIDI_SHM_gen_tree(reduce_k, shared_region, max_entries_per_level,
                                       ranks_per_package, max_ranks_per_package, package_ctr,
                                       package_level, num_ranks, 0 /*package_leaders_last */ ,
                                       1 /*right_skewed */ , errflag);
        if (mpi_errno) {
            /* for communication errors, just record the error but continue */
            *errflag =
                MPIX_ERR_PROC_FAILED ==
                MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
            MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
            MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
        }
    }

    mpi_errno = MPIR_Barrier_impl(comm_ptr, errflag);
    if (mpi_errno) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }
    /* each rank copy the reduce tree out */
    MPIDI_SHM_copy_tree(shared_region, num_ranks, rank, reduce_tree, reduce_topotree_fail);

    if (MPIDI_SHM_TOPOTREE_DEBUG)
        MPIDI_SHM_print_topotree_file("REDUCE", comm_ptr->context_id, rank, reduce_tree);
    /* Wait for all ranks to copy out the tree */
    mpi_errno = MPIR_Barrier_impl(comm_ptr, errflag);
    if (mpi_errno) {
        /* for communication errors, just record the error but continue */
        *errflag =
            MPIX_ERR_PROC_FAILED ==
            MPIR_ERR_GET_CLASS(mpi_errno) ? MPIR_ERR_PROC_FAILED : MPIR_ERR_OTHER;
        MPIR_ERR_SET(mpi_errno, *errflag, "**fail");
        MPIR_ERR_ADD(mpi_errno_ret, mpi_errno);
    }
    /* Cleanup */
    if (rank == root) {
        for (i = 0; i < max_entries_per_level[package_level]; ++i) {
            MPL_free(ranks_per_package[i]);
        }
        MPL_free(ranks_per_package);
        MPL_free(package_ctr);
        if (MPIDI_SHM_TOPOTREE_DEBUG)
            for (i = 0; i < topo_depth; ++i) {
                fprintf(stderr, "Level :: %d, Max :: %d\n", i, max_entries_per_level[i]);
            }
        for (i = 0; i < num_ranks; ++i) {
            MPL_free(bind_map[i]);
        }
        MPL_free(max_entries_per_level);
        MPL_free(bind_map);
    }
    MPIDIU_destroy_shm_segment(shm_size, &fd, (void **) &shared_region);

  fn_exit:
    if (rank == root && MPIDI_SHM_TOPOTREE_DEBUG)
        fprintf(stderr, "Done creating tree for %d\n", num_ranks);
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_SHM_TOPOLOGY_TREE_INIT);
    return mpi_errno;
  fn_fail:
    goto fn_exit;
}
Esempio n. 5
0
int opal_hwloc_compare(const hwloc_topology_t topo1,
                       const hwloc_topology_t topo2,
                       opal_data_type_t type)
{
    hwloc_topology_t t1, t2;
    unsigned d1, d2;
    struct hwloc_topology_support *s1, *s2;
    char *x1=NULL, *x2=NULL;
    int l1, l2;
    int s;

    /* stop stupid compiler warnings */
    t1 = (hwloc_topology_t)topo1;
    t2 = (hwloc_topology_t)topo2;

    /* do something quick first */
    d1 = hwloc_topology_get_depth(t1);
    d2 = hwloc_topology_get_depth(t2);
    if (d1 > d2) {
        return OPAL_VALUE1_GREATER;
    } else if (d2 > d1) {
        return OPAL_VALUE2_GREATER;
    }


    /* do the comparison the "cheat" way - get an xml representation
     * of each tree, and strcmp! This will work fine for inventory
     * comparisons, but might not meet the need for comparing topology
     * where we really need to do a tree-wise search so we only compare
     * the things we care about, and ignore stuff like MAC addresses
     */
    if (0 != hwloc_topology_export_xmlbuffer(t1, &x1, &l1)) {
        return OPAL_EQUAL;
    }
    if (0 != hwloc_topology_export_xmlbuffer(t2, &x2, &l2)) {
        free(x1);
        return OPAL_EQUAL;
    }

    s = strcmp(x1, x2);
    free(x1);
    free(x2);
    if (s > 0) {
        return OPAL_VALUE1_GREATER;
    } else if (s < 0) {
        return OPAL_VALUE2_GREATER;
    }

    /* compare the available support - hwloc unfortunately does
     * not include this info in its xml support!
     */
    if (NULL == (s1 = (struct hwloc_topology_support*)hwloc_topology_get_support(t1)) ||
        NULL == s1->cpubind || NULL == s1->membind) {
        return OPAL_EQUAL;
    }
    if (NULL == (s2 = (struct hwloc_topology_support*)hwloc_topology_get_support(t2)) ||
        NULL == s2->cpubind || NULL == s2->membind) {
        return OPAL_EQUAL;
    }
    /* compare the fields we care about */
    if (s1->cpubind->set_thisproc_cpubind != s2->cpubind->set_thisproc_cpubind ||
        s1->cpubind->set_thisthread_cpubind != s2->cpubind->set_thisthread_cpubind ||
        s1->membind->set_thisproc_membind != s2->membind->set_thisproc_membind ||
        s1->membind->set_thisthread_membind != s2->membind->set_thisthread_membind) {
        OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
                             "hwloc:base:compare BINDING CAPABILITIES DIFFER"));
        return OPAL_VALUE1_GREATER;
    }

    return OPAL_EQUAL;
}
Esempio n. 6
0
tm_topology_t* get_local_topo_with_hwloc(void)
{
  hwloc_topology_t topology;
  tm_topology_t *res = NULL;
  hwloc_obj_t *objs = NULL;
  unsigned topodepth,depth;
  int nb_nodes,i;

  /* Build the topology */
  hwloc_topology_init(&topology);
#if HWLOC_API_VERSION >= 0x00020000
  hwloc_topology_set_all_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_STRUCTURE);
#else  /* HWLOC_API_VERSION >= 0x00020000 */
  hwloc_topology_ignore_all_keep_structure(topology);
#endif  /* HWLOC_API_VERSION >= 0x00020000 */
  hwloc_topology_load(topology);

  /* Test if symetric */
  if(!symetric(topology)){
    if(tm_get_verbose_level() >= CRITICAL)
      fprintf(stderr,"Local toplogy not symetric!\n");
    exit(-1);
  }

  /* work on depth */
  topodepth = hwloc_topology_get_depth(topology);

  res                  = (tm_topology_t*)MALLOC(sizeof(tm_topology_t));
  res->nb_constraints  = 0;
  res->constraints     = NULL;
  res->nb_levels       = topodepth;
  res->node_id         = (int**)MALLOC(sizeof(int*)*res->nb_levels);
  res->node_rank       = (int**)MALLOC(sizeof(int*)*res->nb_levels);
  res->nb_nodes        = (size_t*)MALLOC(sizeof(size_t)*res->nb_levels);
  res->arity           = (int*)MALLOC(sizeof(int)*res->nb_levels);

  /* Build TreeMatch topology */
  for( depth = 0 ; depth < topodepth ; depth++ ){
    nb_nodes = hwloc_get_nbobjs_by_depth(topology, depth);
    res->nb_nodes[depth] = nb_nodes;
    res->node_id[depth] = (int*)MALLOC(sizeof(int)*nb_nodes);
    res->node_rank[depth] = (int*)MALLOC(sizeof(int)*nb_nodes);

    objs = (hwloc_obj_t*)MALLOC(sizeof(hwloc_obj_t)*nb_nodes);
    objs[0] = hwloc_get_next_obj_by_depth(topology,depth,NULL);
    hwloc_get_closest_objs(topology,objs[0],objs+1,nb_nodes-1);
    res->arity[depth] = objs[0]->arity;

    if (depth == topodepth -1){
      res->nb_constraints = nb_nodes;
      res->nb_proc_units = nb_nodes;
    }
    /* printf("%d:",res->arity[depth]); */

    /* Build process id tab */
    for (i = 0; i < nb_nodes; i++){
      res->node_id[depth][i] = objs[i]->os_index;
      res->node_rank[depth][objs[i]->os_index] = i;
      /* if(depth==topodepth-1) */
    }
    FREE(objs);
  }



  /* Destroy HWLOC topology object. */
  hwloc_topology_destroy(topology);

  /* printf("\n"); */
  return res;
}
Esempio n. 7
0
void chpl_topo_init(void) {
  //
  // We only load hwloc topology information in configurations where
  // the locale model is other than "flat" or the tasking is based on
  // Qthreads (which will use the topology we load).  We don't use
  // it otherwise (so far) because loading it is somewhat expensive.
  //
  if (strcmp(CHPL_LOCALE_MODEL, "flat") != 0
      || strcmp(CHPL_TASKS, "qthreads") == 0) {
    haveTopology = true;
  } else {
    haveTopology = false;
    return;
  }

  // Check hwloc API version.
  // Require at least hwloc version 1.11 (we need 1.11.5 to not crash
  // in some NUMA configurations).
  // Check both at build time and run time.
#define REQUIRE_HWLOC_VERSION 0x00010b00

#if HWLOC_API_VERSION < REQUIRE_HWLOC_VERSION
#error hwloc version 1.11.5 or newer is required
#endif

  CHK_ERR(hwloc_get_api_version() >= REQUIRE_HWLOC_VERSION);

  //
  // Allocate and initialize topology object.
  //
  CHK_ERR_ERRNO(hwloc_topology_init(&topology) == 0);

  //
  // Perform the topology detection.
  //
  CHK_ERR_ERRNO(hwloc_topology_load(topology) == 0);

  //
  // What is supported?
  //
  topoSupport = hwloc_topology_get_support(topology);

  //
  // TODO: update comment
  // For now, don't support setting memory locality when comm=ugni or
  // comm=gasnet, seg!=everything.  Those are the two configurations in
  // which we use hugepages and/or memory registered with the comm
  // interface, both of which may be a problem for the set-membind call.
  // We will have other ways to achieve locality for these configs in
  // the future.
  //
  do_set_area_membind = true;
  if ((strcmp(CHPL_COMM, "gasnet") == 0
       && strcmp(CHPL_GASNET_SEGMENT, "everything") != 0)) {
      do_set_area_membind = false;
  }

  //
  // We need depth information.
  //
  topoDepth = hwloc_topology_get_depth(topology);

  //
  // How many NUMA domains do we have?
  //
  {
    int level;

    //
    // Note: If there are multiple levels with NUMA nodes, this finds
    //       only the uppermost.
    //
    for (level = 0, numaLevel = -1;
         level < topoDepth && numaLevel == -1;
         level++) {
      if (hwloc_get_depth_type(topology, level) == HWLOC_OBJ_NUMANODE) {
        numaLevel = level;
      }
    }
  }

  //
  // Find the NUMA nodes, that is, the objects at numaLevel that also
  // have CPUs.  This is as opposed to things like Xeon Phi HBM, which
  // is memory-only, no CPUs.
  //
  {
    const hwloc_cpuset_t cpusetAll = hwloc_get_root_obj(topology)->cpuset;
    numNumaDomains =
      hwloc_get_nbobjs_inside_cpuset_by_depth(topology, cpusetAll, numaLevel);
  }
}
Esempio n. 8
0
int main(int argc, char *argv[])
{
  hwloc_topology_t topology;
  unsigned depth;
  hwloc_bitmap_t cpubind_set, membind_set;
  int got_cpubind = 0, got_membind = 0;
  int working_on_cpubind = 1; /* membind if 0 */
  int get_binding = 0;
  int get_last_cpu_location = 0;
  unsigned long flags = HWLOC_TOPOLOGY_FLAG_WHOLE_IO|HWLOC_TOPOLOGY_FLAG_ICACHES;
  int force = 0;
  int single = 0;
  int verbose = 0;
  int logical = 1;
  int taskset = 0;
  int cpubind_flags = 0;
  hwloc_membind_policy_t membind_policy = HWLOC_MEMBIND_BIND;
  int membind_flags = 0;
  int opt;
  int ret;
  int pid_number = -1;
  hwloc_pid_t pid = 0; /* only valid when pid_number > 0, but gcc-4.8 still reports uninitialized warnings */
  char *callname;

  cpubind_set = hwloc_bitmap_alloc();
  membind_set = hwloc_bitmap_alloc();

  hwloc_topology_init(&topology);
  hwloc_topology_set_flags(topology, flags);
  hwloc_topology_load(topology);
  depth = hwloc_topology_get_depth(topology);

  callname = argv[0];
  /* skip argv[0], handle options */
  argv++;
  argc--;

  while (argc >= 1) {
    if (!strcmp(argv[0], "--")) {
      argc--;
      argv++;
      break;
    }

    opt = 0;

    if (*argv[0] == '-') {
      if (!strcmp(argv[0], "-v") || !strcmp(argv[0], "--verbose")) {
	verbose++;
	goto next;
      }
      else if (!strcmp(argv[0], "-q") || !strcmp(argv[0], "--quiet")) {
	verbose--;
	goto next;
      }
      else if (!strcmp(argv[0], "--help")) {
        usage("hwloc-bind", stdout);
	return EXIT_SUCCESS;
      }
      else if (!strcmp(argv[0], "--single")) {
	single = 1;
	goto next;
      }
      else if (!strcmp(argv[0], "-f") || !strcmp(argv[0], "--force")) {
	force = 1;
	goto next;
      }
      else if (!strcmp(argv[0], "--strict")) {
	cpubind_flags |= HWLOC_CPUBIND_STRICT;
	membind_flags |= HWLOC_MEMBIND_STRICT;
	goto next;
      }
      else if (!strcmp(argv[0], "--pid")) {
        if (argc < 2) {
          usage ("hwloc-bind", stderr);
          exit(EXIT_FAILURE);
        }
        pid_number = atoi(argv[1]);
        opt = 1;
        goto next;
      }
      else if (!strcmp (argv[0], "--version")) {
          printf("%s %s\n", callname, HWLOC_VERSION);
          exit(EXIT_SUCCESS);
      }
      if (!strcmp(argv[0], "-l") || !strcmp(argv[0], "--logical")) {
        logical = 1;
        goto next;
      }
      if (!strcmp(argv[0], "-p") || !strcmp(argv[0], "--physical")) {
        logical = 0;
        goto next;
      }
      if (!strcmp(argv[0], "--taskset")) {
        taskset = 1;
        goto next;
      }
      else if (!strcmp (argv[0], "-e") || !strncmp (argv[0], "--get-last-cpu-location", 10)) {
	get_last_cpu_location = 1;
	goto next;
      }
      else if (!strcmp (argv[0], "--get")) {
	get_binding = 1;
	goto next;
      }
      else if (!strcmp (argv[0], "--cpubind")) {
	  working_on_cpubind = 1;
	  goto next;
      }
      else if (!strcmp (argv[0], "--membind")) {
	  working_on_cpubind = 0;
	  goto next;
      }
      else if (!strcmp (argv[0], "--mempolicy")) {
	if (!strncmp(argv[1], "default", 2))
	  membind_policy = HWLOC_MEMBIND_DEFAULT;
	else if (!strncmp(argv[1], "firsttouch", 2))
	  membind_policy = HWLOC_MEMBIND_FIRSTTOUCH;
	else if (!strncmp(argv[1], "bind", 2))
	  membind_policy = HWLOC_MEMBIND_BIND;
	else if (!strncmp(argv[1], "interleave", 2))
	  membind_policy = HWLOC_MEMBIND_INTERLEAVE;
	else if (!strncmp(argv[1], "replicate", 2))
	  membind_policy = HWLOC_MEMBIND_REPLICATE;
	else if (!strncmp(argv[1], "nexttouch", 2))
	  membind_policy = HWLOC_MEMBIND_NEXTTOUCH;
	else {
	  fprintf(stderr, "Unrecognized memory binding policy %s\n", argv[1]);
          usage ("hwloc-bind", stderr);
          exit(EXIT_FAILURE);
	}
	opt = 1;
	goto next;
      }
      else if (!strcmp (argv[0], "--whole-system")) {
	flags |= HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM;
	hwloc_topology_destroy(topology);
	hwloc_topology_init(&topology);
	hwloc_topology_set_flags(topology, flags);
	hwloc_topology_load(topology);
	depth = hwloc_topology_get_depth(topology);
	goto next;
      }
      else if (!strcmp (argv[0], "--restrict")) {
	hwloc_bitmap_t restrictset;
	int err;
	if (argc < 2) {
	  usage (callname, stdout);
	  exit(EXIT_FAILURE);
	}
	restrictset = hwloc_bitmap_alloc();
	hwloc_bitmap_sscanf(restrictset, argv[1]);
	err = hwloc_topology_restrict (topology, restrictset, 0);
	if (err) {
	  perror("Restricting the topology");
	  /* fallthrough */
	}
	hwloc_bitmap_free(restrictset);
	argc--;
	argv++;
	goto next;
      }

      fprintf (stderr, "Unrecognized option: %s\n", argv[0]);
      usage("hwloc-bind", stderr);
      return EXIT_FAILURE;
    }

    ret = hwloc_calc_process_arg(topology, depth, argv[0], logical,
				 working_on_cpubind ? cpubind_set : membind_set,
				 verbose);
    if (ret < 0) {
      if (verbose > 0)
	fprintf(stderr, "assuming the command starts at %s\n", argv[0]);
      break;
    }
    if (working_on_cpubind)
      got_cpubind = 1;
    else
      got_membind = 1;

  next:
    argc -= opt+1;
    argv += opt+1;
  }

  if (pid_number > 0) {
    pid = hwloc_pid_from_number(pid_number, !(get_binding || get_last_cpu_location));
    /* no need to set_pid()
     * the doc just says we're operating on pid, not that we're retrieving the topo/cpuset as seen from inside pid
     */
  }

  if (get_last_cpu_location && !working_on_cpubind) {
    fprintf(stderr, "Options --membind and --get-last-cpu-location cannot be combined.\n");
    return EXIT_FAILURE;
  }
  if ((get_binding || get_last_cpu_location) && (got_cpubind || got_membind)) {
    /* doesn't work because get_binding/get_last_cpu_location overwrites cpubind_set */
    fprintf(stderr, "Cannot display and set binding at the same time.\n");
    return EXIT_FAILURE;
  }

  if (get_binding || get_last_cpu_location) {
    char *s;
    const char *policystr = NULL;
    int err;
    if (working_on_cpubind) {
      if (get_last_cpu_location) {
	if (pid_number > 0)
	  err = hwloc_get_proc_last_cpu_location(topology, pid, cpubind_set, 0);
	else
	  err = hwloc_get_last_cpu_location(topology, cpubind_set, 0);
      } else {
	if (pid_number > 0)
	  err = hwloc_get_proc_cpubind(topology, pid, cpubind_set, 0);
	else
	  err = hwloc_get_cpubind(topology, cpubind_set, 0);
      }
      if (err) {
	const char *errmsg = strerror(errno);
	if (pid_number > 0)
	  fprintf(stderr, "hwloc_get_proc_%s %d failed (errno %d %s)\n", get_last_cpu_location ? "last_cpu_location" : "cpubind", pid_number, errno, errmsg);
	else
	  fprintf(stderr, "hwloc_get_%s failed (errno %d %s)\n", get_last_cpu_location ? "last_cpu_location" : "cpubind", errno, errmsg);
	return EXIT_FAILURE;
      }
      if (taskset)
	hwloc_bitmap_taskset_asprintf(&s, cpubind_set);
      else
	hwloc_bitmap_asprintf(&s, cpubind_set);
    } else {
      hwloc_membind_policy_t policy;
      if (pid_number > 0)
	err = hwloc_get_proc_membind(topology, pid, membind_set, &policy, 0);
      else
	err = hwloc_get_membind(topology, membind_set, &policy, 0);
      if (err) {
	const char *errmsg = strerror(errno);
        if (pid_number > 0)
          fprintf(stderr, "hwloc_get_proc_membind %d failed (errno %d %s)\n", pid_number, errno, errmsg);
        else
	  fprintf(stderr, "hwloc_get_membind failed (errno %d %s)\n", errno, errmsg);
	return EXIT_FAILURE;
      }
      if (taskset)
	hwloc_bitmap_taskset_asprintf(&s, membind_set);
      else
	hwloc_bitmap_asprintf(&s, membind_set);
      switch (policy) {
      case HWLOC_MEMBIND_DEFAULT: policystr = "default"; break;
      case HWLOC_MEMBIND_FIRSTTOUCH: policystr = "firsttouch"; break;
      case HWLOC_MEMBIND_BIND: policystr = "bind"; break;
      case HWLOC_MEMBIND_INTERLEAVE: policystr = "interleave"; break;
      case HWLOC_MEMBIND_REPLICATE: policystr = "replicate"; break;
      case HWLOC_MEMBIND_NEXTTOUCH: policystr = "nexttouch"; break;
      default: fprintf(stderr, "unknown memory policy %d\n", policy); assert(0); break;
      }
    }
    if (policystr)
      printf("%s (%s)\n", s, policystr);
    else
      printf("%s\n", s);
    free(s);
  }

  if (got_membind) {
    if (hwloc_bitmap_iszero(membind_set)) {
      if (verbose >= 0)
	fprintf(stderr, "cannot membind to empty set\n");
      if (!force)
	goto failed_binding;
    }
    if (verbose > 0) {
      char *s;
      hwloc_bitmap_asprintf(&s, membind_set);
      fprintf(stderr, "binding on memory set %s\n", s);
      free(s);
    }
    if (single)
      hwloc_bitmap_singlify(membind_set);
    if (pid_number > 0)
      ret = hwloc_set_proc_membind(topology, pid, membind_set, membind_policy, membind_flags);
    else
      ret = hwloc_set_membind(topology, membind_set, membind_policy, membind_flags);
    if (ret && verbose >= 0) {
      int bind_errno = errno;
      const char *errmsg = strerror(bind_errno);
      char *s;
      hwloc_bitmap_asprintf(&s, membind_set);
      if (pid_number > 0)
        fprintf(stderr, "hwloc_set_proc_membind %s %d failed (errno %d %s)\n", s, pid_number, bind_errno, errmsg);
      else
        fprintf(stderr, "hwloc_set_membind %s failed (errno %d %s)\n", s, bind_errno, errmsg);
      free(s);
    }
    if (ret && !force)
      goto failed_binding;
  }

  if (got_cpubind) {
    if (hwloc_bitmap_iszero(cpubind_set)) {
      if (verbose >= 0)
	fprintf(stderr, "cannot cpubind to empty set\n");
      if (!force)
	goto failed_binding;
    }
    if (verbose > 0) {
      char *s;
      hwloc_bitmap_asprintf(&s, cpubind_set);
      fprintf(stderr, "binding on cpu set %s\n", s);
      free(s);
    }
    if (single)
      hwloc_bitmap_singlify(cpubind_set);
    if (pid_number > 0)
      ret = hwloc_set_proc_cpubind(topology, pid, cpubind_set, cpubind_flags);
    else
      ret = hwloc_set_cpubind(topology, cpubind_set, cpubind_flags);
    if (ret && verbose >= 0) {
      int bind_errno = errno;
      const char *errmsg = strerror(bind_errno);
      char *s;
      hwloc_bitmap_asprintf(&s, cpubind_set);
      if (pid_number > 0)
        fprintf(stderr, "hwloc_set_proc_cpubind %s %d failed (errno %d %s)\n", s, pid_number, bind_errno, errmsg);
      else
        fprintf(stderr, "hwloc_set_cpubind %s failed (errno %d %s)\n", s, bind_errno, errmsg);
      free(s);
    }
    if (ret && !force)
      goto failed_binding;
  }

  hwloc_bitmap_free(cpubind_set);
  hwloc_bitmap_free(membind_set);

  hwloc_topology_destroy(topology);

  if (pid_number > 0)
    return EXIT_SUCCESS;

  if (0 == argc) {
    if (get_binding || get_last_cpu_location)
      return EXIT_SUCCESS;
    fprintf(stderr, "%s: nothing to do!\n", callname);
    return EXIT_FAILURE;
  }

  ret = execvp(argv[0], argv);
  if (ret) {
      fprintf(stderr, "%s: Failed to launch executable \"%s\"\n",
              callname, argv[0]);
      perror("execvp");
  }
  return EXIT_FAILURE;


failed_binding:
  hwloc_bitmap_free(cpubind_set);
  hwloc_bitmap_free(membind_set);
  hwloc_topology_destroy(topology);
  return EXIT_FAILURE;
}
Esempio n. 9
0
int main(void)
{
  hwloc_topology_t topology;
  unsigned nbobjs;
  const struct hwloc_distances_s *distances;
  float d1, d2;
  unsigned depth, topodepth, i, j;
  int err;
  hwloc_obj_t obj1, obj2;

  hwloc_topology_init(&topology);
  hwloc_topology_set_synthetic(topology, "node:4 core:4 pu:1");
  putenv("HWLOC_NUMANode_DISTANCES=0,1,2,3:2*2");
  putenv("HWLOC_PU_DISTANCES=0-15:4*2*2");
  hwloc_topology_load(topology);

  topodepth = hwloc_topology_get_depth(topology);

  for(depth=0; depth<topodepth; depth++) {
    distances = hwloc_get_whole_distance_matrix_by_depth(topology, depth);
    if (!distances || !distances->latency) {
      printf("No distance at depth %u\n", depth);
      continue;
    }

    printf("distance matrix for depth %u:\n", depth);
    print_distances(distances);
    nbobjs = distances->nbobjs;

    obj1 = hwloc_get_obj_by_depth(topology, depth, 0);
    obj2 = hwloc_get_obj_by_depth(topology, depth, nbobjs-1);
    err = hwloc_get_latency(topology, obj1, obj2, &d1, &d2);
    assert(!err);
    assert(d1 == distances->latency[0*nbobjs+(nbobjs-1)]);
    assert(d2 == distances->latency[(nbobjs-1)*nbobjs+0]);
  }

  /* check that hwloc_get_latency works fine on numa distances */
  distances = hwloc_get_whole_distance_matrix_by_type(topology, HWLOC_OBJ_NUMANODE);
  if (!distances || !distances->latency) {
    fprintf(stderr, "No NUMA distance matrix!\n");
    return -1;
  }
  printf("distance matrix for NUMA nodes\n");
  print_distances(distances);
  nbobjs = distances->nbobjs;
  for(i=0; i<nbobjs; i++)
    for(j=0; j<nbobjs; j++) {
      obj1 = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, i);
      obj2 = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, j);
      err = hwloc_get_latency(topology, obj1, obj2, &d1, &d2);
      assert(!err);
      assert(d1 == distances->latency[i*nbobjs+j]);
      assert(d2 == distances->latency[j*nbobjs+i]);
    }
  /* check that some random values are ok */
  assert(distances->latency[0] == 1.0); /* diagonal */
  assert(distances->latency[4] == 4.0); /* same group */
  assert(distances->latency[6] == 8.0); /* different group */
  assert(distances->latency[9] == 8.0); /* different group */
  assert(distances->latency[10] == 1.0); /* diagonal */
  assert(distances->latency[14] == 4.0); /* same group */

  /* check that hwloc_get_latency works fine on PU distances */
  distances = hwloc_get_whole_distance_matrix_by_type(topology, HWLOC_OBJ_PU);
  if (!distances || !distances->latency) {
    fprintf(stderr, "No PU distance matrix!\n");
    return -1;
  }
  printf("distance matrix for PU nodes\n");
  print_distances(distances);
  nbobjs = distances->nbobjs;
  for(i=0; i<16; i++)
    for(j=0; j<16; j++) {
      obj1 = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, i);
      obj2 = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, j);
      err = hwloc_get_latency(topology, obj1, obj2, &d1, &d2);
      assert(!err);
      assert(d1 == distances->latency[i*nbobjs+j]);
      assert(d2 == distances->latency[j*nbobjs+i]);
    }
  /* check that some random values are ok */
  assert(distances->latency[0] == 1.0); /* diagonal */
  assert(distances->latency[1] == 2.0); /* same group */
  assert(distances->latency[3] == 4.0); /* same biggroup */
  assert(distances->latency[15] == 8.0); /* different biggroup */
  assert(distances->latency[250] == 8.0); /* different biggroup */
  assert(distances->latency[253] == 4.0); /* same group */
  assert(distances->latency[254] == 2.0); /* same biggroup */
  assert(distances->latency[255] == 1.0); /* diagonal */

  hwloc_topology_destroy(topology);

  return 0;
}
Esempio n. 10
0
void chpl_topo_init(void) {
  //
  // For now we don't load topology information for locModel=flat, since
  // we won't use it in that case and loading it is somewhat expensive.
  // Eventually we will probably load it even for locModel=flat and use
  // it as the information source for what's currently in chplsys, and
  // also pass it to Qthreads when we use that (so it doesn't load it
  // again), but that's work for the future.
  //
  haveTopology = (strcmp(CHPL_LOCALE_MODEL, "flat") != 0) ? true : false;
  if (!haveTopology) {
    return;
  }

  // Check hwloc API version.
  // Require at least hwloc version 1.11 (we need 1.11.5 to not crash
  // in some NUMA configurations).
  // Check both at build time and run time.
#define REQUIRE_HWLOC_VERSION 0x00010b00

#if HWLOC_API_VERSION < REQUIRE_HWLOC_VERSION
#error hwloc version 1.11.5 or newer is required
#else
  {
    unsigned version = hwloc_get_api_version();
    // check that the version is at least REQUIRE_HWLOC_VERSION
    if (version < REQUIRE_HWLOC_VERSION)
      chpl_internal_error("hwloc version 1.11.5 or newer is required");
  }
#endif

  //
  // Allocate and initialize topology object.
  //
  if (hwloc_topology_init(&topology)) {
    report_error("hwloc_topology_init()", errno);
  }

  //
  // Perform the topology detection.
  //
  if (hwloc_topology_load(topology)) {
    report_error("hwloc_topology_load()", errno);
  }

  //
  // What is supported?
  //
  topoSupport = hwloc_topology_get_support(topology);

  //
  // TODO: update comment
  // For now, don't support setting memory locality when comm=ugni or
  // comm=gasnet, seg!=everything.  Those are the two configurations in
  // which we use hugepages and/or memory registered with the comm
  // interface, both of which may be a problem for the set-membind call.
  // We will have other ways to achieve locality for these configs in
  // the future.
  //
  do_set_area_membind = true;
  if ((strcmp(CHPL_COMM, "gasnet") == 0
       && strcmp(CHPL_GASNET_SEGMENT, "everything") != 0)) {
      do_set_area_membind = false;
  }

  //
  // We need depth information.
  //
  topoDepth = hwloc_topology_get_depth(topology);

  //
  // How many NUMA domains do we have?
  //
  {
    int level;

    //
    // Note: If there are multiple levels with NUMA nodes, this finds
    //       only the uppermost.
    //
    for (level = 0, numaLevel = -1;
         level < topoDepth && numaLevel == -1;
         level++) {
      if (hwloc_get_depth_type(topology, level) == HWLOC_OBJ_NUMANODE) {
        numaLevel = level;
      }
    }
  }

  //
  // Find the NUMA nodes, that is, the objects at numaLevel that also
  // have CPUs.  This is as opposed to things like Xeon Phi HBM, which
  // is memory-only, no CPUs.
  //
  {
    const hwloc_cpuset_t cpusetAll = hwloc_get_root_obj(topology)->cpuset;
    numNumaDomains =
      hwloc_get_nbobjs_inside_cpuset_by_depth(topology, cpusetAll, numaLevel);
  }
}
Esempio n. 11
0
int main(void)
{
  hwloc_topology_t topology;
  unsigned depth;
  unsigned i,j, width;
  char buffer[1024];
  int err;

  /* check a synthetic topology */
  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "2 3 4 5 6");
  assert(!err);
  hwloc_topology_load(topology);

  /* internal checks */

  hwloc_topology_check(topology);

  /* local checks */
  depth = hwloc_topology_get_depth(topology);
  assert(depth == 6);

  width = 1;
  for(i=0; i<6; i++) {
    /* check arities */
    assert(hwloc_get_nbobjs_by_depth(topology, i) == width);
    for(j=0; j<width; j++) {
      hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, i, j);
      assert(obj);
      assert(obj->arity == (i<5 ? i+2 : 0));
    }
    width *= i+2;
  }

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 75);
  err = strcmp("Package:2 NUMANode:3(memory=1073741824) L2Cache:4(size=4194304) Core:5 PU:6", buffer);
  assert(!err);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES|HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS);
  assert(err == 42);
  err = strcmp("Package:2 NUMANode:3 L2Cache:4 Core:5 PU:6", buffer);
  assert(!err);

  hwloc_topology_destroy(topology);



  hwloc_topology_init(&topology);
  err = hwloc_topology_set_type_filter(topology, HWLOC_OBJ_L1ICACHE, HWLOC_TYPE_FILTER_KEEP_ALL);
  err = hwloc_topology_set_synthetic(topology, "pack:2(indexes=3,5) numa:2(memory=256GB indexes=pack) l3u:1(size=20mb) l2:2 l1i:1(size=16kB) l1dcache:2 core:1 pu:2(indexes=l2)");
  assert(!err);
  hwloc_topology_load(topology);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 181);
  err = strcmp("Package:2 NUMANode:2(memory=274877906944 indexes=2*2:1*2) L3Cache:1(size=20971520) L2Cache:2(size=4194304) L1iCache:1(size=16384) L1dCache:2(size=32768) Core:1 PU:2(indexes=4*8:1*4)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PACKAGE, 1)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 1)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 12)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 13)->os_index == 11);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 14)->os_index == 19);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 15)->os_index == 27);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 16)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 17)->os_index == 12);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 18)->os_index == 20);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 19)->os_index == 28);

  hwloc_topology_destroy(topology);




  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 core:2 pu:2(indexes=0,4,2,6,1,5,3,7)");
  assert(!err);
  hwloc_topology_load(topology);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 72);
  err = strcmp("NUMANode:1(memory=1073741824) Package:2 Core:2 PU:2(indexes=4*2:2*2:1*2)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0)->os_index == 0);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 1)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 2)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 3)->os_index == 6);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 4)->os_index == 1);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 5)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 6)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 7)->os_index == 7);

  hwloc_topology_destroy(topology);




  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 numa:2 core:1 pu:2(indexes=0,4,2,6,1,3,5,7)");
  assert(!err);
  hwloc_topology_load(topology);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 76);
  err = strcmp("Package:2 NUMANode:2(memory=1073741824) Core:1 PU:2(indexes=0,4,2,6,1,3,5,7)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0)->os_index == 0);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 1)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 2)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 3)->os_index == 6);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 4)->os_index == 1);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 5)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 6)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 7)->os_index == 7);

  hwloc_topology_destroy(topology);

  return 0;
}
Esempio n. 12
0
int main(int argc, char *argv[])
{
  hwloc_topology_t topology;
  unsigned depth;
  hwloc_bitmap_t cpubind_set, membind_set;
  int cpubind = 1; /* membind if 0 */
  int get_binding = 0;
  int get_last_cpu_location = 0;
  int single = 0;
  int verbose = 0;
  int logical = 1;
  int taskset = 0;
  int cpubind_flags = 0;
  hwloc_membind_policy_t membind_policy = HWLOC_MEMBIND_BIND;
  int membind_flags = 0;
  int opt;
  int ret;
  hwloc_pid_t pid = 0;
  char **orig_argv = argv;

  cpubind_set = hwloc_bitmap_alloc();
  membind_set = hwloc_bitmap_alloc();

  hwloc_topology_init(&topology);
  hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_WHOLE_IO);
  hwloc_topology_load(topology);
  depth = hwloc_topology_get_depth(topology);

  /* skip argv[0], handle options */
  argv++;
  argc--;

  while (argc >= 1) {
    if (!strcmp(argv[0], "--")) {
      argc--;
      argv++;
      break;
    }

    opt = 0;

    if (*argv[0] == '-') {
      if (!strcmp(argv[0], "-v")) {
	verbose = 1;
	goto next;
      }
      else if (!strcmp(argv[0], "--help")) {
	usage(stdout);
	return EXIT_SUCCESS;
      }
      else if (!strcmp(argv[0], "--single")) {
	single = 1;
	goto next;
      }
      else if (!strcmp(argv[0], "--strict")) {
	cpubind_flags |= HWLOC_CPUBIND_STRICT;
	membind_flags |= HWLOC_MEMBIND_STRICT;
	goto next;
      }
      else if (!strcmp(argv[0], "--pid")) {
        if (argc < 2) {
          usage (stderr);
          exit(EXIT_FAILURE);
        }
        pid = atoi(argv[1]);
        opt = 1;
        goto next;
      }
      else if (!strcmp (argv[0], "--version")) {
          printf("%s %s\n", orig_argv[0], VERSION);
          exit(EXIT_SUCCESS);
      }
      if (!strcmp(argv[0], "-l") || !strcmp(argv[0], "--logical")) {
        logical = 1;
        goto next;
      }
      if (!strcmp(argv[0], "-p") || !strcmp(argv[0], "--physical")) {
        logical = 0;
        goto next;
      }
      if (!strcmp(argv[0], "--taskset")) {
        taskset = 1;
        goto next;
      }
      else if (!strncmp (argv[0], "--get-last-cpu-location", 10)) {
	get_last_cpu_location = 1;
	goto next;
      }
      else if (!strcmp (argv[0], "--get")) {
	get_binding = 1;
	goto next;
      }
      else if (!strcmp (argv[0], "--cpubind")) {
	  cpubind = 1;
	  goto next;
      }
      else if (!strcmp (argv[0], "--membind")) {
	  cpubind = 0;
	  goto next;
      }
      else if (!strcmp (argv[0], "--mempolicy")) {
	if (!strncmp(argv[1], "default", 2))
	  membind_policy = HWLOC_MEMBIND_DEFAULT;
	else if (!strncmp(argv[1], "firsttouch", 2))
	  membind_policy = HWLOC_MEMBIND_FIRSTTOUCH;
	else if (!strncmp(argv[1], "bind", 2))
	  membind_policy = HWLOC_MEMBIND_BIND;
	else if (!strncmp(argv[1], "interleave", 2))
	  membind_policy = HWLOC_MEMBIND_INTERLEAVE;
	else if (!strncmp(argv[1], "replicate", 2))
	  membind_policy = HWLOC_MEMBIND_REPLICATE;
	else if (!strncmp(argv[1], "nexttouch", 2))
	  membind_policy = HWLOC_MEMBIND_NEXTTOUCH;
	else {
	  fprintf(stderr, "Unrecognized memory binding policy %s\n", argv[1]);
          usage (stderr);
          exit(EXIT_FAILURE);
	}
	opt = 1;
	goto next;
      }

      fprintf (stderr, "Unrecognized option: %s\n", argv[0]);
      usage(stderr);
      return EXIT_FAILURE;
    }

    ret = hwloc_calc_process_arg(topology, depth, argv[0], logical,
				 cpubind ? cpubind_set : membind_set,
				 verbose);
    if (ret < 0) {
      if (verbose)
	fprintf(stderr, "assuming the command starts at %s\n", argv[0]);
      break;
    }

  next:
    argc -= opt+1;
    argv += opt+1;
  }

  if (get_binding || get_last_cpu_location) {
    char *s;
    const char *policystr = NULL;
    int err;
    if (cpubind) {
      if (get_last_cpu_location) {
	if (pid)
	  err = hwloc_get_proc_last_cpu_location(topology, pid, cpubind_set, 0);
	else
	  err = hwloc_get_last_cpu_location(topology, cpubind_set, 0);
      } else {
	if (pid)
	  err = hwloc_get_proc_cpubind(topology, pid, cpubind_set, 0);
	else
	  err = hwloc_get_cpubind(topology, cpubind_set, 0);
      }
      if (err) {
	const char *errmsg = strerror(errno);
	if (pid)
	  fprintf(stderr, "hwloc_get_proc_%s %ld failed (errno %d %s)\n", get_last_cpu_location ? "last_cpu_location" : "cpubind", (long) pid, errno, errmsg);
	else
	  fprintf(stderr, "hwloc_get_%s failed (errno %d %s)\n", get_last_cpu_location ? "last_cpu_location" : "cpubind", errno, errmsg);
	return EXIT_FAILURE;
      }
      if (taskset)
	hwloc_bitmap_taskset_asprintf(&s, cpubind_set);
      else
	hwloc_bitmap_asprintf(&s, cpubind_set);
    } else {
      hwloc_membind_policy_t policy;
      if (pid)
	err = hwloc_get_proc_membind(topology, pid, membind_set, &policy, 0);
      else
	err = hwloc_get_membind(topology, membind_set, &policy, 0);
      if (err) {
	const char *errmsg = strerror(errno);
        if (pid)
          fprintf(stderr, "hwloc_get_proc_membind %ld failed (errno %d %s)\n", (long) pid, errno, errmsg);
        else
	  fprintf(stderr, "hwloc_get_membind failed (errno %d %s)\n", errno, errmsg);
	return EXIT_FAILURE;
      }
      if (taskset)
	hwloc_bitmap_taskset_asprintf(&s, membind_set);
      else
	hwloc_bitmap_asprintf(&s, membind_set);
      switch (policy) {
      case HWLOC_MEMBIND_DEFAULT: policystr = "default"; break;
      case HWLOC_MEMBIND_FIRSTTOUCH: policystr = "firsttouch"; break;
      case HWLOC_MEMBIND_BIND: policystr = "bind"; break;
      case HWLOC_MEMBIND_INTERLEAVE: policystr = "interleave"; break;
      case HWLOC_MEMBIND_REPLICATE: policystr = "replicate"; break;
      case HWLOC_MEMBIND_NEXTTOUCH: policystr = "nexttouch"; break;
      default: fprintf(stderr, "unknown memory policy %d\n", policy); assert(0); break;
      }
    }
    if (policystr)
      printf("%s (%s)\n", s, policystr);
    else
      printf("%s\n", s);
    free(s);
    return EXIT_SUCCESS;
  }

  if (!hwloc_bitmap_iszero(membind_set)) {
    if (verbose) {
      char *s;
      hwloc_bitmap_asprintf(&s, membind_set);
      fprintf(stderr, "binding on memory set %s\n", s);
      free(s);
    }
    if (single)
      hwloc_bitmap_singlify(membind_set);
    if (pid)
      ret = hwloc_set_proc_membind(topology, pid, membind_set, membind_policy, membind_flags);
    else
      ret = hwloc_set_membind(topology, membind_set, membind_policy, membind_flags);
    if (ret) {
      int bind_errno = errno;
      const char *errmsg = strerror(bind_errno);
      char *s;
      hwloc_bitmap_asprintf(&s, membind_set);
      if (pid)
        fprintf(stderr, "hwloc_set_proc_membind %s %ld failed (errno %d %s)\n", s, (long) pid, bind_errno, errmsg);
      else
        fprintf(stderr, "hwloc_set_membind %s failed (errno %d %s)\n", s, bind_errno, errmsg);
      free(s);
    }
  }

  if (!hwloc_bitmap_iszero(cpubind_set)) {
    if (verbose) {
      char *s;
      hwloc_bitmap_asprintf(&s, cpubind_set);
      fprintf(stderr, "binding on cpu set %s\n", s);
      free(s);
    }
    if (single)
      hwloc_bitmap_singlify(cpubind_set);
    if (pid)
      ret = hwloc_set_proc_cpubind(topology, pid, cpubind_set, cpubind_flags);
    else
      ret = hwloc_set_cpubind(topology, cpubind_set, cpubind_flags);
    if (ret) {
      int bind_errno = errno;
      const char *errmsg = strerror(bind_errno);
      char *s;
      hwloc_bitmap_asprintf(&s, cpubind_set);
      if (pid)
        fprintf(stderr, "hwloc_set_proc_cpubind %s %ld failed (errno %d %s)\n", s, (long) pid, bind_errno, errmsg);
      else
        fprintf(stderr, "hwloc_set_cpubind %s failed (errno %d %s)\n", s, bind_errno, errmsg);
      free(s);
    }
  }

  hwloc_bitmap_free(cpubind_set);
  hwloc_bitmap_free(membind_set);

  hwloc_topology_destroy(topology);

  if (pid)
    return EXIT_SUCCESS;

  if (0 == argc) {
    fprintf(stderr, "%s: nothing to do!\n", orig_argv[0]);
    return EXIT_FAILURE;
  }

  ret = execvp(argv[0], argv);
  if (ret) {
      fprintf(stderr, "%s: Failed to launch executable \"%s\"\n", 
              orig_argv[0], argv[0]);
      perror("execvp");
  }
  return EXIT_FAILURE;
}
Esempio n. 13
0
tm_topology_t* hwloc_to_tm(char *filename,double **pcost)
{
  hwloc_topology_t topology;
  tm_topology_t *res = NULL;
  hwloc_obj_t *objs = NULL;
  unsigned topodepth,depth;
  int nb_nodes,i;
  double *cost;
  int err;

  /* Build the topology */
  hwloc_topology_init(&topology);
  err = hwloc_topology_set_xml(topology,filename);
  if(err == -1){
    if(get_verbose_level() >= CRITICAL)
      fprintf(stderr,"Error: %s is a bad xml topology file!\n",filename);
    exit(-1);
  }

  hwloc_topology_ignore_all_keep_structure(topology);
  hwloc_topology_load(topology);


  /* Test if symetric */
  if(!symetric(topology)){
    if(get_verbose_level() >= CRITICAL)
      fprintf(stderr,"%s not symetric!\n",filename);
    exit(-1);
  }

  /* work on depth */
  topodepth = hwloc_topology_get_depth(topology);

  res = (tm_topology_t*)MALLOC(sizeof(tm_topology_t));
  res->nb_levels = topodepth;
  res->node_id = (int**)MALLOC(sizeof(int*)*res->nb_levels);
  res->nb_nodes = (int*)MALLOC(sizeof(int)*res->nb_levels);
  res->arity = (int*)MALLOC(sizeof(int)*res->nb_levels);

  if(get_verbose_level() >= INFO)
      printf("topodepth = %d\n",topodepth);

  /* Build TreeMatch topology */
  for( depth = 0 ; depth < topodepth ; depth++ ){
    nb_nodes = hwloc_get_nbobjs_by_depth(topology, depth);
    res->nb_nodes[depth] = nb_nodes;
    res->node_id[depth] = (int*)MALLOC(sizeof(int)*nb_nodes);

    objs = (hwloc_obj_t*)MALLOC(sizeof(hwloc_obj_t)*nb_nodes);
    objs[0] = hwloc_get_next_obj_by_depth(topology,depth,NULL);
    hwloc_get_closest_objs(topology,objs[0],objs+1,nb_nodes-1);
    res->arity[depth] = objs[0]->arity;

    if(get_verbose_level() >= INFO)
      printf("%d(%d):",res->arity[depth],nb_nodes);

    /* Build process id tab */
    for (i = 0; i < nb_nodes; i++){
      res->node_id[depth][i] = objs[i]->os_index;
      /* if(depth==topodepth-1) */
    }
    FREE(objs);
  }

  cost = (double*)CALLOC(res->nb_levels,sizeof(double));
  for(i=0; i<res->nb_levels; i++){
    cost[i] = speed(i);
  }

  *pcost = cost;


  /* Destroy topology object. */
  hwloc_topology_destroy(topology);
  if(get_verbose_level() >= INFO)
    printf("\n");
  return res;
}
Esempio n. 14
0
int bind_myself_to_core(hwloc_topology_t topology, int id){
  hwloc_cpuset_t cpuset;
  hwloc_obj_t obj;
  char *str;
  int binding_res;
  int depth = hwloc_topology_get_depth(topology);
  int nb_cores = hwloc_get_nbobjs_by_depth(topology, depth-1);
  int my_core;
  int nb_threads = get_nb_threads();
  /* printf("depth=%d\n",depth); */

  switch (mapping_policy){
  case SCATTER:
    my_core = id*(nb_cores/nb_threads);
    break;
  default:
    if(verbose_level>=WARNING){
      printf("Wrong scheduling policy. Using COMPACT\n");
    }
  case COMPACT:
    my_core = id%nb_cores;
  }

    if(verbose_level>=INFO){
       printf("Mapping thread %d on core %d\n",id,my_core);
   }

    /* Get my core. */
    obj = hwloc_get_obj_by_depth(topology, depth-1, my_core);
    if (obj) {
      /* Get a copy of its cpuset that we may modify. */
      cpuset = hwloc_bitmap_dup(obj->cpuset);

      /* Get only one logical processor (in case the core is
	 SMT/hyperthreaded). */
      hwloc_bitmap_singlify(cpuset);


      /*hwloc_bitmap_asprintf(&str, cpuset);
      printf("Binding thread %d to cpuset %s\n", my_core,str);
      FREE(str);
      */

      /* And try  to bind ourself there. */
      binding_res = hwloc_set_cpubind(topology, cpuset, HWLOC_CPUBIND_THREAD);
      if (binding_res == -1){
	int error = errno;
	hwloc_bitmap_asprintf(&str, obj->cpuset);
	if(verbose_level>=WARNING)
	  printf("Thread %d couldn't bind to cpuset %s: %s.\n This thread is not bound to any core...\n", my_core, str, strerror(error));
	free(str); /* str is allocated by hlwoc, free it normally*/
	return 0;
      }
      /* FREE our cpuset copy */
      hwloc_bitmap_free(cpuset);
      return 1;
    }else{
      if(verbose_level>=WARNING)
	printf("No valid object for core id %d!\n",my_core);
      return 0;
    }
}
Esempio n. 15
0
void hwloc_init_cacheTopology(void)
{
    int maxNumLevels=0;
    int id=0;
    CacheLevel* cachePool = NULL;
    hwloc_obj_t obj;
    int depth;
    int d;

    /* Sum up all depths with caches */
    depth = hwloc_topology_get_depth(hwloc_topology);
    for (d = 0; d < depth; d++)
    {
        if (hwloc_get_depth_type(hwloc_topology, d) == HWLOC_OBJ_CACHE)
            maxNumLevels++;
    }
    cachePool = (CacheLevel*) malloc(maxNumLevels * sizeof(CacheLevel));
    /* Start at the bottom of the tree to get all cache levels in order */
    depth = hwloc_topology_get_depth(hwloc_topology);
    id = 0;
    for(d=depth-1;d >= 0; d--)
    {
        /* We only need caches, so skip other levels */
        if (hwloc_get_depth_type(hwloc_topology, d) != HWLOC_OBJ_CACHE)
        {
            continue;
        }
        /* Get the cache object */
        obj = hwloc_get_obj_by_depth(hwloc_topology, d, 0);
        /* All caches have this attribute, so safe to access */
        switch (obj->attr->cache.type)
        {
            case HWLOC_OBJ_CACHE_DATA:
                cachePool[id].type = DATACACHE;
                break;
            case HWLOC_OBJ_CACHE_INSTRUCTION:
                cachePool[id].type = INSTRUCTIONCACHE;
                break;
            case HWLOC_OBJ_CACHE_UNIFIED:
                cachePool[id].type = UNIFIEDCACHE;
                break;
            default:
                cachePool[id].type = NOCACHE;
                break;
        }

        cachePool[id].associativity = obj->attr->cache.associativity;
        cachePool[id].level = obj->attr->cache.depth;
        cachePool[id].lineSize = obj->attr->cache.linesize;
        cachePool[id].size = obj->attr->cache.size;
        cachePool[id].sets = 0;
        if ((cachePool[id].associativity * cachePool[id].lineSize) != 0)
        {
            cachePool[id].sets = cachePool[id].size /
                (cachePool[id].associativity * cachePool[id].lineSize);
        }
        /* Count all HWThreads below the current cache */
        cachePool[id].threads = hwloc_record_objs_of_type_below_obj(
                        hwloc_topology, obj, HWLOC_OBJ_PU, NULL, NULL);
        /* We need to read the inclusiveness from CPUID, no possibility in hwloc */
        switch ( cpuid_info.family )
        {
            case MIC_FAMILY:
            case P6_FAMILY:
                cachePool[id].inclusive = readCacheInclusiveIntel(cachePool[id].level);
                break;
            case K16_FAMILY:
            case K15_FAMILY:
                cachePool[id].inclusive = readCacheInclusiveAMD(cachePool[id].level);
                break;
            /* For K8 and K10 it is known that they are inclusive */
            case K8_FAMILY:
            case K10_FAMILY:
                cachePool[id].inclusive = 1;
                break;
            default:
                ERROR_PLAIN_PRINT(Processor is not supported);
                break;
        }
        id++;
    }

    cpuid_topology.numCacheLevels = maxNumLevels;
    cpuid_topology.cacheLevels = cachePool;
    return;
}
Esempio n. 16
0
int main(void)
{
  hwloc_topology_t local, global;
  hwloc_obj_t sw1, sw2, sw11, sw12, sw21, sw22, root;
  int err;

  printf("Loading the local topology...\n");
  hwloc_topology_init(&local);
  hwloc_topology_set_synthetic(local, "n:2 s:2 ca:1 core:2 ca:2 pu:2");
  hwloc_topology_load(local);

  printf("Try to create an empty custom topology...\n");
  hwloc_topology_init(&global);
  hwloc_topology_set_custom(global);
  err = hwloc_topology_load(global);
  assert(err == -1);
  assert(errno == EINVAL);
  hwloc_topology_destroy(global);

  printf("Creating a custom topology...\n");
  hwloc_topology_init(&global);
  hwloc_topology_set_custom(global);

  printf("Inserting the local topology into the global one...\n");
  root = hwloc_get_root_obj(global);

  sw1 = hwloc_custom_insert_group_object_by_parent(global, root, 0);
  sw11 = hwloc_custom_insert_group_object_by_parent(global, sw1, 1);
  hwloc_custom_insert_topology(global, sw11, local, NULL);
  hwloc_custom_insert_topology(global, sw11, local, NULL);
  sw12 = hwloc_custom_insert_group_object_by_parent(global, sw1, 1);
  hwloc_custom_insert_topology(global, sw12, local, NULL);
  hwloc_custom_insert_topology(global, sw12, local, NULL);

  sw2 = hwloc_custom_insert_group_object_by_parent(global, root, 0);
  sw21 = hwloc_custom_insert_group_object_by_parent(global, sw2, 1);
  hwloc_custom_insert_topology(global, sw21, local, NULL);
  hwloc_custom_insert_topology(global, sw21, local, NULL);
  hwloc_custom_insert_topology(global, sw21, local, NULL);
  sw22 = hwloc_custom_insert_group_object_by_parent(global, sw2, 1);
  hwloc_custom_insert_topology(global, sw22, local, NULL); /* only one to check that it won't get merged */

  hwloc_topology_destroy(local);

  printf("Building the global topology...\n");
  hwloc_topology_load(global);
  hwloc_topology_check(global);

  assert(hwloc_topology_get_depth(global) == 10);
  assert(hwloc_get_depth_type(global, 0) == HWLOC_OBJ_SYSTEM);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_SYSTEM) == 1);
  assert(hwloc_get_depth_type(global, 1) == HWLOC_OBJ_GROUP);
  assert(hwloc_get_nbobjs_by_depth(global, 1) == 2);
  assert(hwloc_get_depth_type(global, 2) == HWLOC_OBJ_GROUP);
  assert(hwloc_get_nbobjs_by_depth(global, 2) == 4); /* the last group of this level shouldn't be merged */
  assert(hwloc_get_depth_type(global, 3) == HWLOC_OBJ_MACHINE);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_MACHINE) == 8);
  assert(hwloc_get_depth_type(global, 4) == HWLOC_OBJ_NODE);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_NODE) == 16);
  assert(hwloc_get_depth_type(global, 5) == HWLOC_OBJ_SOCKET);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_SOCKET) == 32);
  assert(hwloc_get_depth_type(global, 6) == HWLOC_OBJ_CACHE);
  assert(hwloc_get_nbobjs_by_depth(global, 6) == 32);
  assert(hwloc_get_depth_type(global, 7) == HWLOC_OBJ_CORE);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_CORE) == 64);
  assert(hwloc_get_depth_type(global, 8) == HWLOC_OBJ_CACHE);
  assert(hwloc_get_nbobjs_by_depth(global, 8) == 128);
  assert(hwloc_get_depth_type(global, 9) == HWLOC_OBJ_PU);
  assert(hwloc_get_nbobjs_by_type(global, HWLOC_OBJ_PU) == 256);

  hwloc_topology_destroy(global);

  return 0;
}
int main(void)
{
    hwloc_topology_t topology;
    unsigned depth;
    hwloc_obj_t objs[OBJ_MAX];
    hwloc_obj_t obj;
    hwloc_bitmap_t set;
    int ret;

    hwloc_topology_init(&topology);
    hwloc_topology_set_synthetic(topology, SYNTHETIC_TOPOLOGY_DESCRIPTION);
    hwloc_topology_load(topology);
    depth = hwloc_topology_get_depth(topology);

    /* just get the system object */
    obj = hwloc_get_root_obj(topology);
    ret = hwloc_get_largest_objs_inside_cpuset(topology, obj->cpuset, objs, 1);
    assert(ret == 1);
    assert(objs[0] == obj);
    objs[0] = hwloc_get_first_largest_obj_inside_cpuset(topology, obj->cpuset);
    assert(objs[0] == obj);

    /* just get the very last object */
    obj = hwloc_get_obj_by_depth(topology, depth-1, hwloc_get_nbobjs_by_depth(topology, depth-1)-1);
    ret = hwloc_get_largest_objs_inside_cpuset(topology, obj->cpuset, objs, 1);
    assert(ret == 1);
    assert(objs[0] == obj);

    /* try an empty one */
    set = hwloc_bitmap_alloc();
    ret = hwloc_get_largest_objs_inside_cpuset(topology, set, objs, 1);
    assert(ret == 0);
    objs[0] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    assert(objs[0] == NULL);
    hwloc_bitmap_free(set);

    /* try an impossible one */
    set = hwloc_bitmap_alloc();
    hwloc_bitmap_sscanf(set, GIVEN_TOOLARGE_CPUSET_STRING);
    ret = hwloc_get_largest_objs_inside_cpuset(topology, set, objs, 1);
    assert(ret == -1);
    objs[0] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    assert(objs[0] == NULL);
    hwloc_bitmap_free(set);

    /* try a harder one with 1 obj instead of 2 needed */
    set = hwloc_bitmap_alloc();
    hwloc_bitmap_sscanf(set, GIVEN_LARGESPLIT_CPUSET_STRING);
    ret = hwloc_get_largest_objs_inside_cpuset(topology, set, objs, 1);
    assert(ret == 1);
    assert(objs[0] == hwloc_get_obj_by_depth(topology, depth-1, 0));
    objs[0] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    assert(objs[0] == hwloc_get_obj_by_depth(topology, depth-1, 0));
    /* try a harder one with lots of objs instead of 2 needed */
    ret = hwloc_get_largest_objs_inside_cpuset(topology, set, objs, 2);
    assert(ret == 2);
    assert(objs[0] == hwloc_get_obj_by_depth(topology, depth-1, 0));
    assert(objs[1] == hwloc_get_obj_by_depth(topology, depth-1, hwloc_get_nbobjs_by_depth(topology, depth-1)-1));
    objs[0] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    hwloc_bitmap_andnot(set, set, objs[0]->cpuset);
    objs[1] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    hwloc_bitmap_andnot(set, set, objs[1]->cpuset);
    objs[2] = hwloc_get_first_largest_obj_inside_cpuset(topology, set);
    assert(objs[0] == hwloc_get_obj_by_depth(topology, depth-1, 0));
    assert(objs[1] == hwloc_get_obj_by_depth(topology, depth-1, hwloc_get_nbobjs_by_depth(topology, depth-1)-1));
    assert(objs[2] == NULL);
    assert(hwloc_bitmap_iszero(set));
    hwloc_bitmap_free(set);

    /* try a very hard one */
    set = hwloc_bitmap_alloc();
    hwloc_bitmap_sscanf(set, GIVEN_HARD_CPUSET_STRING);
    ret = hwloc_get_largest_objs_inside_cpuset(topology, set, objs, OBJ_MAX);
    assert(objs[0] == hwloc_get_obj_by_depth(topology, 5, 29));
    assert(objs[1] == hwloc_get_obj_by_depth(topology, 3, 5));
    assert(objs[2] == hwloc_get_obj_by_depth(topology, 3, 6));
    assert(objs[3] == hwloc_get_obj_by_depth(topology, 3, 7));
    assert(objs[4] == hwloc_get_obj_by_depth(topology, 2, 2));
    assert(objs[5] == hwloc_get_obj_by_depth(topology, 4, 36));
    assert(objs[6] == hwloc_get_obj_by_depth(topology, 5, 74));
    hwloc_bitmap_free(set);

    hwloc_topology_destroy(topology);

    return EXIT_SUCCESS;
}
Esempio n. 18
0
int main(void)
{
  hwloc_topology_t topology;
  struct hwloc_distances_s *distances[2];
  hwloc_obj_t objs[16];
  uint64_t values[16*16];
  unsigned depth, topodepth;
  unsigned i, j, k, nr;
  int err;

  hwloc_topology_init(&topology);
  hwloc_topology_set_synthetic(topology, "node:4 core:4 pu:1");
  hwloc_topology_load(topology);

  nr = 0;
  err = hwloc_distances_get(topology, &nr, distances, 0, 0);
  assert(!err);
  assert(!nr);
  if (!nr)
    printf("No distance\n");

  printf("\nInserting NUMA distances\n");
  for(i=0; i<4; i++)
    objs[i] = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, i);
  /* matrix 2*2 */
  for(i=0; i<16; i++)
    values[i] = 8;
  values[0+4*1] = 4;
  values[1+4*0] = 4;
  values[2+4*3] = 4;
  values[3+4*2] = 4;
  for(i=0; i<4; i++)
    values[i+4*i] = 1;
  err = hwloc_distances_add(topology, 4, objs, values,
			    HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_USER,
			    HWLOC_DISTANCES_FLAG_GROUP);
  assert(!err);

  topodepth = hwloc_topology_get_depth(topology);
  for(depth=0; depth<topodepth; depth++) {
    nr = 0;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    if (depth == 2)
      assert(nr == 1);
    else
      assert(!nr);
    if (!nr) {
      printf("No distance at depth %u\n", depth);
      continue;
    }
    nr = 1;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    printf("distance matrix for depth %u:\n", depth);
    print_distances(distances[0]);
    hwloc_distances_release(topology, distances[0]);
  }

  /* check numa distances */
  printf("Checking NUMA distances\n");
  nr = 1;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_NUMANODE, &nr, distances, 0, 0);
  assert(!err);
  assert(nr == 1);
  assert(distances[0]);
  assert(distances[0]->objs);
  assert(distances[0]->values);
  assert(distances[0]->kind == (HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_USER));
  /* check that some random values are ok */
  assert(distances[0]->values[0] == 1); /* diagonal */
  assert(distances[0]->values[4] == 4); /* same group */
  assert(distances[0]->values[6] == 8); /* different group */
  assert(distances[0]->values[9] == 8); /* different group */
  assert(distances[0]->values[10] == 1); /* diagonal */
  assert(distances[0]->values[14] == 4); /* same group */
  hwloc_distances_release(topology, distances[0]);

  printf("\nInserting PU distances\n");
  /* matrix 4*2*2 */
  for(i=0; i<16; i++)
    objs[i] = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, i);
  for(i=0; i<256; i++)
    values[i] = 8;
  for(i=0; i<4; i++) {
    for(j=0; j<4; j++)
      for(k=0; k<4; k++)
      values[i*64+i*4+16*j+k] = 4;
    values[i*64+i*4+1] = 2;
    values[i*64+i*4+16] = 2;
    values[i*64+i*4+2*16+3] = 2;
    values[i*64+i*4+3*16+2] = 2;
  }
  for(i=0; i<16; i++)
    values[i+16*i] = 1;
  err = hwloc_distances_add(topology, 16, objs, values,
			    HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_USER,
			    HWLOC_DISTANCES_FLAG_GROUP);
  assert(!err);

  topodepth = hwloc_topology_get_depth(topology);
  for(depth=0; depth<topodepth; depth++) {
    nr = 0;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    if (depth == 2 || depth == 5)
      assert(nr == 1);
    else
      assert(!nr);
    if (!nr) {
      printf("No distance at depth %u\n", depth);
      continue;
    }
    nr = 1;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    printf("distance matrix for depth %u:\n", depth);
    print_distances(distances[0]);
    hwloc_distances_release(topology, distances[0]);
  }

  /* check PU distances */
  printf("Checking PU distances\n");
  nr = 1;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_PU, &nr, distances, 0, 0);
  assert(!err);
  assert(nr == 1);
  assert(distances[0]);
  assert(distances[0]->values);
  assert(distances[0]->kind == (HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_USER));
  /* check that some random values are ok */
  assert(distances[0]->values[0] == 1); /* diagonal */
  assert(distances[0]->values[1] == 2); /* same group */
  assert(distances[0]->values[3] == 4); /* same biggroup */
  assert(distances[0]->values[15] == 8); /* different biggroup */
  assert(distances[0]->values[250] == 8); /* different biggroup */
  assert(distances[0]->values[253] == 4); /* same group */
  assert(distances[0]->values[254] == 2); /* same biggroup */
  assert(distances[0]->values[255] == 1); /* diagonal */
  hwloc_distances_release(topology, distances[0]);

  printf("\nInserting 2nd PU distances\n");
  /* matrix 4*1 */
  for(i=0; i<4; i++)
    objs[i] = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, i);
  for(i=0; i<16; i++)
    values[i] = 3;
  for(i=0; i<4; i++)
    values[i+4*i] = 7;
  err = hwloc_distances_add(topology, 4, objs, values,
			    HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH|HWLOC_DISTANCES_KIND_FROM_USER,
			    HWLOC_DISTANCES_FLAG_GROUP);
  assert(!err);

  topodepth = hwloc_topology_get_depth(topology);
  for(depth=0; depth<topodepth; depth++) {
    nr = 0;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    if (depth == 2)
      assert(nr == 1);
    else if (depth == 5)
      assert(nr == 2);
    else
      assert(!nr);
    if (!nr) {
      printf("No distance at depth %u\n", depth);
      continue;
    }
    nr = 2;
    err = hwloc_distances_get_by_depth(topology, depth, &nr, distances, 0, 0);
    assert(!err);
    printf("distance matrix for depth %u:\n", depth);
    print_distances(distances[0]);
    hwloc_distances_release(topology, distances[0]);
    if (nr > 1) {
      print_distances(distances[1]);
      hwloc_distances_release(topology, distances[1]);
    }
  }

  /* check PU distances */
  printf("Checking 2nd PU distances\n");
  nr = 2;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_PU, &nr, distances, 0, 0);
  assert(!err);
  assert(nr == 2);
  assert(distances[1]);
  assert(distances[1]->values);
  assert(distances[1]->kind == (HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH|HWLOC_DISTANCES_KIND_FROM_USER));
  /* check that some random values are ok */
  assert(distances[1]->values[0] == 7); /* diagonal */
  assert(distances[1]->values[1] == 3); /* other */
  assert(distances[1]->values[3] == 3); /* other */
  assert(distances[1]->values[15] == 7); /* diagonal */
  hwloc_distances_release(topology, distances[0]);
  hwloc_distances_release(topology, distances[1]);

  /* check distances by kind */
  nr = 2;
  err = hwloc_distances_get(topology, &nr, distances, HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH, 0);
  assert(!err);
  assert(nr == 1);
  hwloc_distances_release(topology, distances[0]);
  nr = 2;
  err = hwloc_distances_get(topology, &nr, distances, HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_OS, 0);
  assert(!err);
  assert(nr == 0);
  nr = 2;
  err = hwloc_distances_get(topology, &nr, distances, HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_FROM_USER, 0);
  assert(!err);
  assert(nr == 2);
  hwloc_distances_release(topology, distances[0]);
  hwloc_distances_release(topology, distances[1]);

  /* remove distances */
  printf("Removing distances\n");
  /* remove both PU distances */
  err = hwloc_distances_remove_by_type(topology, HWLOC_OBJ_PU);
  assert(!err);
  nr = 0;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_PU, &nr, distances, 0, 0);
  assert(!err);
  assert(!nr);
  nr = 0;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_NUMANODE, &nr, distances, 0, 0);
  assert(!err);
  assert(nr == 1);
  /* remove all distances */
  err = hwloc_distances_remove(topology);
  assert(!err);
  nr = 0;
  err = hwloc_distances_get(topology, &nr, distances, 0, 0);
  assert(!err);
  assert(!nr);
  nr = 0;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_PU, &nr, distances, 0, 0);
  assert(!err);
  assert(!nr);
  nr = 0;
  err = hwloc_distances_get_by_type(topology, HWLOC_OBJ_NUMANODE, &nr, distances, 0, 0);
  assert(!err);
  assert(!nr);

  hwloc_topology_destroy(topology);

  return 0;
}
Esempio n. 19
0
int main(void)
{
  hwloc_topology_t topology;
  unsigned depth;
  char buffer[1024];
  int err;

  /* check a synthetic topology */
  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 numa:3 l2:4 core:5 pu:6");
  assert(!err);
  hwloc_topology_load(topology);

  assert(hwloc_get_memory_parents_depth(topology) == 2);

  /* internal checks */

  hwloc_topology_check(topology);

  /* local checks */
  depth = hwloc_topology_get_depth(topology);
  assert(depth == 6);

  check_level(topology, 0, 1, 2);
  check_level(topology, 1, 2, 3);
  check_level(topology, 2, 6, 4);
  check_level(topology, 3, 24, 5);
  check_level(topology, 4, 120, 6);
  check_level(topology, 5, 720, 0);
  check_level(topology, HWLOC_TYPE_DEPTH_NUMANODE, 6, 0);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 83);
  err = strcmp("Package:2 Group:3 [NUMANode(memory=1073741824)] L2Cache:4(size=4194304) Core:5 PU:6", buffer);
  assert(!err);

  assert(hwloc_get_memory_parents_depth(topology) == 2);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES|HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS|HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1);
  assert(err == 47);
  err = strcmp("Socket:2 Group:3 NUMANode:1 Cache:4 Core:5 PU:6", buffer);
  assert(!err);

  hwloc_topology_destroy(topology);



  hwloc_topology_init(&topology);
  err = hwloc_topology_set_type_filter(topology, HWLOC_OBJ_L1ICACHE, HWLOC_TYPE_FILTER_KEEP_ALL);
  err = hwloc_topology_set_synthetic(topology, "pack:2(indexes=3,5) numa:2(memory=256GB indexes=pack) l3u:1(size=20mb) l2:2 l1i:1(size=16kB) l1dcache:2 core:1 pu:2(indexes=l2)");
  assert(!err);
  hwloc_topology_load(topology);

  assert(hwloc_get_memory_parents_depth(topology) == 2);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 181);
  err = strcmp("Package:2 L3Cache:2(size=20971520) [NUMANode(memory=274877906944 indexes=2*2:1*2)] L2Cache:2(size=4194304) L1iCache:1(size=16384) L1dCache:2(size=32768) Core:1 PU:2(indexes=4*8:1*4)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PACKAGE, 1)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 1)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 12)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 13)->os_index == 11);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 14)->os_index == 19);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 15)->os_index == 27);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 16)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 17)->os_index == 12);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 18)->os_index == 20);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 19)->os_index == 28);

  hwloc_topology_destroy(topology);




  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 core:2 pu:2(indexes=0,4,2,6,1,5,3,7)");
  assert(!err);
  hwloc_topology_load(topology);

  assert(hwloc_get_memory_parents_depth(topology) == 0);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 72);
  err = strcmp("[NUMANode(memory=1073741824)] Package:2 Core:2 PU:2(indexes=4*2:2*2:1*2)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0)->os_index == 0);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 1)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 2)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 3)->os_index == 6);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 4)->os_index == 1);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 5)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 6)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 7)->os_index == 7);

  hwloc_topology_destroy(topology);




  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 numa:2 core:1 pu:2(indexes=0,4,2,6,1,3,5,7)");
  assert(!err);
  hwloc_topology_load(topology);

  assert(hwloc_get_memory_parents_depth(topology) == 2);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 76);
  err = strcmp("Package:2 Core:2 [NUMANode(memory=1073741824)] PU:2(indexes=0,4,2,6,1,3,5,7)", buffer);
  assert(!err);

  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0)->os_index == 0);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 1)->os_index == 4);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 2)->os_index == 2);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 3)->os_index == 6);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 4)->os_index == 1);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 5)->os_index == 3);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 6)->os_index == 5);
  assert(hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 7)->os_index == 7);

  hwloc_topology_destroy(topology);




  hwloc_topology_init(&topology);
  err = hwloc_topology_set_synthetic(topology, "pack:2 [numa(memory=1GB)] [numa(memory=1MB)] core:2 [numa(indexes=8,7,5,6,4,3,1,2)] pu:4");
  assert(!err);
  hwloc_topology_load(topology);

  assert(hwloc_get_memory_parents_depth(topology) == HWLOC_TYPE_DEPTH_MULTIPLE);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), 0);
  assert(err == 114);
  err = strcmp("Package:2 [NUMANode(memory=1073741824)] [NUMANode(memory=1048576)] Core:2 [NUMANode(indexes=8,7,5,6,4,3,1,2)] PU:4", buffer);
  assert(!err);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1);
  assert(err == -1);
  assert(errno == EINVAL);

  err = hwloc_topology_export_synthetic(topology, buffer, sizeof(buffer), HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY);
  assert(err == 21);
  err = strcmp("Package:2 Core:2 PU:4", buffer);
  assert(!err);

  hwloc_topology_destroy(topology);

  return 0;
}
void output_console(struct lstopo_output *loutput, const char *filename)
{
  hwloc_topology_t topology = loutput->topology;
  unsigned topodepth;
  int verbose_mode = loutput->verbose_mode;
  int logical = loutput->logical;
  FILE *output;

  output = open_output(filename, loutput->overwrite);
  if (!output) {
    fprintf(stderr, "Failed to open %s for writing (%s)\n", filename, strerror(errno));
    return;
  }

  topodepth = hwloc_topology_get_depth(topology);

  /*
   * if verbose_mode == 0, only print the summary.
   * if verbose_mode == 1, only print the topology tree.
   * if verbose_mode > 1, print both.
   */

  if (lstopo_show_only != (hwloc_obj_type_t)-1) {
    if (verbose_mode > 1)
      fprintf(output, "Only showing %s objects\n", hwloc_obj_type_string(lstopo_show_only));
    output_only (topology, hwloc_get_root_obj(topology), output, logical, verbose_mode);
  } else if (verbose_mode >= 1) {
    output_topology (topology, hwloc_get_root_obj(topology), NULL, output, 0, logical, verbose_mode);
    fprintf(output, "\n");
  }

  if ((verbose_mode > 1 || !verbose_mode) && lstopo_show_only == (hwloc_obj_type_t)-1) {
    hwloc_lstopo_show_summary(output, topology);
 }

  if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) {
    const struct hwloc_distances_s * distances;
    unsigned depth;

    for (depth = 0; depth < topodepth; depth++) {
      distances = hwloc_get_whole_distance_matrix_by_depth(topology, depth);
      if (!distances || !distances->latency)
        continue;
      fprintf(output, "relative latency matrix between %ss (depth %u) by %s indexes:\n",
	      hwloc_obj_type_string(hwloc_get_depth_type(topology, depth)),
	      depth,
	      logical ? "logical" : "physical");
      hwloc_utils_print_distance_matrix(output, topology, hwloc_get_root_obj(topology), distances->nbobjs, depth, distances->latency, logical);
    }
  }

  if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) {
    hwloc_const_bitmap_t complete = hwloc_topology_get_complete_cpuset(topology);
    hwloc_const_bitmap_t topo = hwloc_topology_get_topology_cpuset(topology);
    hwloc_const_bitmap_t allowed = hwloc_topology_get_allowed_cpuset(topology);

    if (!hwloc_bitmap_isequal(topo, complete)) {
      hwloc_bitmap_t unknown = hwloc_bitmap_alloc();
      char *unknownstr;
      hwloc_bitmap_copy(unknown, complete);
      hwloc_bitmap_andnot(unknown, unknown, topo);
      hwloc_bitmap_asprintf(&unknownstr, unknown);
      fprintf (output, "%d processors not represented in topology: %s\n", hwloc_bitmap_weight(unknown), unknownstr);
      free(unknownstr);
      hwloc_bitmap_free(unknown);
    }
    if (!hwloc_bitmap_isequal(topo, allowed)) {
      hwloc_bitmap_t disallowed = hwloc_bitmap_alloc();
      char *disallowedstr;
      hwloc_bitmap_copy(disallowed, topo);
      hwloc_bitmap_andnot(disallowed, disallowed, allowed);
      hwloc_bitmap_asprintf(&disallowedstr, disallowed);
      fprintf(output, "%d processors represented but not allowed: %s\n", hwloc_bitmap_weight(disallowed), disallowedstr);
      free(disallowedstr);
      hwloc_bitmap_free(disallowed);
    }
    if (!hwloc_topology_is_thissystem(topology))
      fprintf (output, "Topology not from this system\n");
  }

  if (output != stdout)
    fclose(output);
}
Esempio n. 21
0
tm_topology_t* hwloc_to_tm(char *filename)
{
  hwloc_topology_t topology;
  tm_topology_t *res = NULL;
  hwloc_obj_t *objs = NULL;
  unsigned topodepth,depth;
  unsigned int nb_nodes;
  double *cost;
  int err, l;
  unsigned int i;
  int vl = tm_get_verbose_level();

  /* Build the topology */
  hwloc_topology_init(&topology);
  err = hwloc_topology_set_xml(topology,filename);
  if(err == -1){
    if(vl >= CRITICAL)
      fprintf(stderr,"Error: %s is a bad xml topology file!\n",filename);
    exit(-1);
  }

#if HWLOC_API_VERSION >= 0x00020000
  hwloc_topology_set_all_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_STRUCTURE);
#else  /* HWLOC_API_VERSION >= 0x00020000 */
  hwloc_topology_ignore_all_keep_structure(topology);
#endif  /* HWLOC_API_VERSION >= 0x00020000 */
  hwloc_topology_load(topology);


  /* Test if symetric */
  if(!symetric(topology)){
    if(tm_get_verbose_level() >= CRITICAL)
      fprintf(stderr,"%s not symetric!\n",filename);
    exit(-1);
  }

  /* work on depth */
  topodepth = hwloc_topology_get_depth(topology);

  res                   = (tm_topology_t*)MALLOC(sizeof(tm_topology_t));
  res->oversub_fact      = 1;
  res->nb_constraints   = 0;
  res->constraints      = NULL;
  res->nb_levels        = topodepth;
  res->node_id          = (int**)MALLOC(sizeof(int*)*res->nb_levels);
  res->node_rank        = (int**)MALLOC(sizeof(int*)*res->nb_levels);
  res->nb_nodes         = (size_t*)MALLOC(sizeof(size_t)*res->nb_levels);
  res->arity            = (int*)MALLOC(sizeof(int)*res->nb_levels);

  if(vl >= INFO)
      printf("topodepth = %d\n",topodepth);

  /* Build TreeMatch topology */
  for( depth = 0 ; depth < topodepth ; depth++ ){
    nb_nodes = hwloc_get_nbobjs_by_depth(topology, depth);
    res->nb_nodes[depth] = nb_nodes;
    res->node_id[depth] = (int*)MALLOC(sizeof(int)*nb_nodes);
    res->node_rank[depth] = (int*)MALLOC(sizeof(int)*nb_nodes);

    objs = (hwloc_obj_t*)MALLOC(sizeof(hwloc_obj_t)*nb_nodes);
    objs[0] = hwloc_get_next_obj_by_depth(topology,depth,NULL);
    hwloc_get_closest_objs(topology,objs[0],objs+1,nb_nodes-1);
    res->arity[depth] = objs[0]->arity;

    if (depth == topodepth -1){
      res->nb_constraints = nb_nodes;
      res->nb_proc_units  = nb_nodes;
    }

    if(vl >= DEBUG)
      printf("\n--%d(%d) **%d**:--\n",res->arity[depth],nb_nodes,res->arity[0]);

    /* Build process id tab */
    for (i = 0; i < nb_nodes; i++){
      if(objs[i]->os_index > nb_nodes){
	if(vl >= CRITICAL){
	  fprintf(stderr, "Index of object %d of level %d is %d and larger than number of nodes : %d\n",
		  i, depth, objs[i]->os_index, nb_nodes);
	}
	exit(-1);
      }

      res->node_id[depth][i] = objs[i]->os_index;
      res->node_rank[depth][objs[i]->os_index] = i;
      /* if(depth==topodepth-1) */
    }
    FREE(objs);


  }

  cost = (double*)CALLOC(res->nb_levels,sizeof(double));
  for(l=0; l<res->nb_levels; l++){
    cost[l] = link_cost(l);
  }
  res->cost = cost;


  /* Destroy topology object. */
  hwloc_topology_destroy(topology);
  if(tm_get_verbose_level() >= INFO)
    printf("\n");



  return res;
}