示例#1
0
/* The job has specialized cores, synchronize user mask with available cores */
static void _validate_mask(uint32_t task_id, hwloc_obj_t obj, cpu_set_t *ts)
{
	int i, j, overlaps = 0;
	bool superset = true;

	for (i = 0; i < CPU_SETSIZE; i++) {
		if (!CPU_ISSET(i, ts))
			continue;
		j = hwloc_bitmap_isset(obj->allowed_cpuset, i);
		if (j > 0) {
			overlaps++;
		} else if (j == 0) {
			CPU_CLR(i, ts);
			superset = false;
		}
	}

	if (overlaps == 0) {
		/* The task's cpu map is completely invalid.
		 * Give it all allowed CPUs */
		for (i = 0; i < CPU_SETSIZE; i++) {
			if (hwloc_bitmap_isset(obj->allowed_cpuset, i) > 0)
				CPU_SET(i, ts);
		}
	}

	if (!superset) {
		info("task/cgroup: Ignoring user CPU binding outside of job "
		     "step allocation for task[%u]", task_id);
		fprintf(stderr, "Requested cpu_bind option outside of job "
			"step allocation for task[%u]\n", task_id);
	}
}
示例#2
0
static void switch_set_index(hwloc_bitmap_t set, unsigned old_index, unsigned new_index)
{
  if (hwloc_bitmap_isset(set, old_index)) {
    hwloc_bitmap_clr(set, old_index);
    hwloc_bitmap_set(set, new_index);
  }
}
示例#3
0
HYD_status HYDT_topo_hwloc_bind(int idx)
{
    int id;
    HYD_status status = HYD_SUCCESS;

    HYDU_FUNC_ENTER();

    /* For processes where the user did not specify a binding unit, no binding is needed. */
    if (!HYDT_topo_hwloc_info.user_binding || (idx < HYDT_topo_hwloc_info.num_bitmaps)) {
        id = idx % HYDT_topo_hwloc_info.num_bitmaps;

        if (HYDT_topo_info.debug) {
            /* Print the binding bitmaps for debugging. */
            int i;
            char *binding;

            HYDU_MALLOC_OR_JUMP(binding, char *, HYDT_topo_hwloc_info.total_num_pus + 1, status);
            memset(binding, '\0', HYDT_topo_hwloc_info.total_num_pus + 1);

            for (i = 0; i < HYDT_topo_hwloc_info.total_num_pus; i++) {
                if (hwloc_bitmap_isset(HYDT_topo_hwloc_info.bitmap[id], i))
                    *(binding + i) = '1';
                else
                    *(binding + i) = '0';
            }

            HYDU_dump_noprefix(stdout, "process %d binding: %s\n", idx, binding);
            MPL_free(binding);
        }
示例#4
0
static void
hwloc_netbsd_hwloc2bsd(hwloc_const_bitmap_t hwloc_cpuset, cpuset_t *cpuset)
{
  unsigned cpu, cpulimit;
  cpuset_zero(cpuset);
  cpulimit = cpuset_size(cpuset) * CHAR_BIT;
  for (cpu = 0; cpu < cpulimit; cpu++)
    if (hwloc_bitmap_isset(hwloc_cpuset, cpu))
      cpuset_set(cpu, cpuset);
}
static int mca_sbgp_map_to_socket_core(int processor_id, int *socket, int *core)
{
    int ret = OPAL_ERR_NOT_FOUND;
    hwloc_obj_t obj;
    hwloc_topology_t *t;
    hwloc_bitmap_t good;

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_INITIALIZED;
    }
    t = &opal_hwloc_topology;

    good = hwloc_bitmap_alloc();
    if (NULL == good) {
        return OPAL_ERR_OUT_OF_RESOURCE;
    }

    /* Iterate through every core and find one that contains the
       processor_id.  Then find the corresponding socket. */
    for (obj = hwloc_get_next_obj_by_type(*t, HWLOC_OBJ_CORE, NULL);
            NULL != obj;
            obj = hwloc_get_next_obj_by_type(*t, HWLOC_OBJ_CORE, obj)) {
        hwloc_bitmap_and(good, obj->online_cpuset,
                         obj->allowed_cpuset);

        /* Does this core contain the processor_id in question? */
        if (hwloc_bitmap_isset(good, processor_id)) {
            *core = obj->os_index;

            /* Go upward from the core object until we find its parent
               socket. */
            while (HWLOC_OBJ_SOCKET != obj->type) {
                if (NULL == obj->parent) {
                    /* If we get to the root without finding a socket,
                       er..  Hmm.  Error! */
                    ret = OPAL_ERR_NOT_FOUND;
                    goto out;
                }
                obj = obj->parent;
            }
            *socket = obj->os_index;
            ret = OPAL_SUCCESS;
            goto out;
        }
    }

    /* If we didn't even find the right core, we didn't find it.  Fall
       through. */
    ret = OPAL_ERR_NOT_FOUND;

out:
    hwloc_bitmap_free(good);
    return ret;
}
示例#6
0
/*******************  FUNCTION  *********************/
int TopoHwloc::getFirstBitInBitmap(hwloc_bitmap_t bitmap) const
{
	int last = hwloc_bitmap_last(bitmap);
	int current = hwloc_bitmap_first(bitmap);
	assert(current != -1);
	while (current != last)
	{
		if (hwloc_bitmap_isset(bitmap,current))
			break;
		current = hwloc_bitmap_next(bitmap,current);
	}
	return current;
}
示例#7
0
static void switch_numa_index(hwloc_obj_t obj, unsigned old_index, unsigned new_index)
{
  hwloc_obj_t child;

  if (obj->type == HWLOC_OBJ_NUMANODE) {
    assert(obj->os_index == old_index);
    obj->os_index = new_index;
  }

  switch_set_index(obj->nodeset, old_index, new_index);
  switch_set_index(obj->allowed_nodeset, old_index, new_index);
  switch_set_index(obj->complete_nodeset, old_index, new_index);

  for(child = obj->first_child; child; child = child->next_sibling)
    if (child->complete_nodeset && hwloc_bitmap_isset(child->complete_nodeset, old_index))
      switch_numa_index(child, old_index, new_index);
}
示例#8
0
static void switch_pu_index(hwloc_obj_t obj, unsigned old_index, unsigned new_index)
{
  hwloc_obj_t child;

  if (obj->type == HWLOC_OBJ_PU) {
    assert(obj->os_index == old_index);
    obj->os_index = new_index;
  }

  switch_set_index(obj->cpuset, old_index, new_index);
  switch_set_index(obj->allowed_cpuset, old_index, new_index);
#ifndef HWLOC2
  switch_set_index(obj->online_cpuset, old_index, new_index);
#endif
  switch_set_index(obj->complete_cpuset, old_index, new_index);

  for(child = obj->first_child; child; child = child->next_sibling)
    if (child->complete_cpuset && hwloc_bitmap_isset(child->complete_cpuset, old_index))
      switch_pu_index(child, old_index, new_index);
}
示例#9
0
static int
hwloc_solaris_set_sth_membind(hwloc_topology_t topology, idtype_t idtype, id_t id, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
{
  int depth;
  int n, i;

  switch (policy) {
    case HWLOC_MEMBIND_DEFAULT:
    case HWLOC_MEMBIND_BIND:
      break;
    default:
      errno = ENOSYS;
      return -1;
  }

  if (flags & HWLOC_MEMBIND_NOCPUBIND) {
    errno = ENOSYS;
    return -1;
  }

  depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
  if (depth < 0) {
    errno = EXDEV;
    return -1;
  }
  n = hwloc_get_nbobjs_by_depth(topology, depth);

  for (i = 0; i < n; i++) {
    hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, i);
    if (hwloc_bitmap_isset(nodeset, obj->os_index)) {
      lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_STRONG);
    } else {
      if (flags & HWLOC_CPUBIND_STRICT)
	lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_NONE);
      else
	lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_WEAK);
    }
  }

  return 0;
}
示例#10
0
static void create_hwloc_cpusets() {
#ifdef USE_HWLOC
    int i;

    int err = hwloc_topology_init(&topology);
    assert(err == 0);

    err = hwloc_topology_load(topology);
    assert(err == 0);

    hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
    assert(cpuset);

    err = hwloc_get_cpubind(topology, cpuset, HWLOC_CPUBIND_PROCESS);
    assert(err == 0);
    const int available_pus = hwloc_bitmap_weight(cpuset);
    const int last_set_index = hwloc_bitmap_last(cpuset);
    const int num_workers = hc_context->nworkers;

    hclib_affinity_t selected_affinity = HCLIB_AFFINITY_STRIDED;
    const char *user_selected_affinity = getenv("HCLIB_AFFINITY");
    if (user_selected_affinity) {
        if (strcmp(user_selected_affinity, "strided") == 0) {
            selected_affinity = HCLIB_AFFINITY_STRIDED;
        } else if (strcmp(user_selected_affinity, "chunked") == 0) {
            selected_affinity = HCLIB_AFFINITY_CHUNKED;
        } else {
            fprintf(stderr, "Unsupported thread affinity \"%s\" specified with "
                    "HCLIB_AFFINITY.\n", user_selected_affinity);
            exit(1);
        }
    }

    thread_cpusets = (hwloc_bitmap_t *)malloc(hc_context->nworkers *
            sizeof(*thread_cpusets));
    assert(thread_cpusets);

    for (i = 0; i < hc_context->nworkers; i++) {
        thread_cpusets[i] = hwloc_bitmap_alloc();
        assert(thread_cpusets[i]);
    }

    switch (selected_affinity) {
        case (HCLIB_AFFINITY_STRIDED): {
            if (available_pus < num_workers) {
                fprintf(stderr, "ERROR Available PUs (%d) was less than number "
                        "of workers (%d), don't currently support "
                        "oversubscription with strided thread pinning\n",
                        available_pus, num_workers);
                exit(1);
            }

            int count = 0;
            int index = 0;
            while (index <= last_set_index) {
                if (hwloc_bitmap_isset(cpuset, index)) {
                    hwloc_bitmap_set(thread_cpusets[count % num_workers],
                            index);
                    count++;
                }
                index++;
            }
            break;
        }
        case (HCLIB_AFFINITY_CHUNKED): {
            const int chunk_size = (available_pus + num_workers - 1) /
                    num_workers;
            int count = 0;
            int index = 0;
            while (index <= last_set_index) {
                if (hwloc_bitmap_isset(cpuset, index)) {
                    hwloc_bitmap_set(thread_cpusets[count / chunk_size], index);
                    count++;
                }
                index++;
            }
            break;
        }
        default:
            assert(false);
    }

    hwloc_bitmap_t nodeset = hwloc_bitmap_alloc();
    hwloc_bitmap_t other_nodeset = hwloc_bitmap_alloc();
    assert(nodeset && other_nodeset);

    /*
     * Here, we look for contiguous ranges of worker threads that share any NUMA
     * nodes with us. In theory, this should be more hierarchical but isn't yet.
     * This is also super inefficient... O(T^2) where T is the number of
     * workers.
     */
    bool revert_to_naive_stealing = false;
    for (i = 0; i < hc_context->nworkers; i++) {
        // Get the NUMA nodes for this CPU set
        hwloc_cpuset_to_nodeset(topology, thread_cpusets[i], nodeset);

        int base = -1;
        int limit = -1;
        int j;
        for (j = 0; j < hc_context->nworkers; j++) {
            hwloc_cpuset_to_nodeset(topology, thread_cpusets[j], other_nodeset);
            // Take the intersection, see if there is any overlap
            hwloc_bitmap_and(other_nodeset, nodeset, other_nodeset);

            if (base < 0) {
                // Haven't found a contiguous chunk of workers yet.
                if (!hwloc_bitmap_iszero(other_nodeset)) {
                    base = j;
                }
            } else {
                /*
                 * Have a contiguous chunk of workers, either still inside it or
                 * after it.
                 */
                if (limit < 0) {
                    // Inside the contiguous chunk of workers
                    if (hwloc_bitmap_iszero(other_nodeset)) {
                        // Found the end
                        limit = j;
                    }
                } else {
                    // After the contiguous chunk of workers
                    if (!hwloc_bitmap_iszero(other_nodeset)) {
                        // No contiguous chunk to find, just do something naive.
                        revert_to_naive_stealing = true;
                        break;
                    }
                }
            }
        }

        if (revert_to_naive_stealing) {
            fprintf(stderr, "WARNING: Using naive work-stealing patterns.\n");
            base = 0;
            limit = hc_context->nworkers;
        } else {
            assert(base >= 0);
            if (limit < 0) {
                limit = hc_context->nworkers;
            }
        }

        hc_context->workers[i]->base_intra_socket_workers = base;
        hc_context->workers[i]->limit_intra_socket_workers = limit;

#ifdef VERBOSE
        char *nbuf;
        hwloc_bitmap_asprintf(&nbuf, nodeset);

        char *buffer;
        hwloc_bitmap_asprintf(&buffer, thread_cpusets[i]);
        fprintf(stderr, "Worker %d has access to %d PUs (%s), %d NUMA nodes "
                "(%s). Shared NUMA nodes with [%d, %d).\n", i,
                hwloc_bitmap_weight(thread_cpusets[i]), buffer,
                hwloc_bitmap_weight(nodeset), nbuf, base, limit);
        free(buffer);
#endif
    }

#endif
}
示例#11
0
int main(int argc, char *argv[])
{
  hwloc_obj_type_t type;
  unsigned old_index, new_index;
  const char *callname = argv[0];
  hwloc_topology_t topology;
  int err;

  if (argc < 6) {
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

#ifdef HWLOC2
  err = hwloc_type_sscanf(argv[3], &type, NULL, 0);
#else
  err = hwloc_obj_type_sscanf(argv[3], &type, NULL, NULL, 0);
#endif
  if (err < 0) {
    fprintf(stderr, "Failed to recognize type `%s'\n", argv[3]);
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }
  if (type != HWLOC_OBJ_PU && type != HWLOC_OBJ_NUMANODE) {
    fprintf(stderr, "Invalid type `%s', should be PU or NUMA node\n", argv[3]);
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

  old_index = atoi(argv[4]);
  new_index = atoi(argv[5]);
  if (old_index == new_index) {
    fprintf(stderr, "Nothing to do\n");
    exit(EXIT_SUCCESS);
  }

  err = hwloc_topology_init(&topology);
  if (err < 0) {
    fprintf(stderr, "hwloc_topology_init() failed (%s)\n", strerror(errno));
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

  err = hwloc_topology_set_xml(topology, argv[1]);
  if (err < 0) {
    fprintf(stderr, "hwloc_topology_set_xml() on file `%s' failed (%s)\n", argv[1], strerror(errno));
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

#ifdef HWLOC2
  err = hwloc_topology_set_flags(topology,
				 HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM);
  err = hwloc_topology_set_all_types_filter(topology, HWLOC_TYPE_FILTER_KEEP_ALL);
#else
  err = hwloc_topology_set_flags(topology,
				 HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM
				 | HWLOC_TOPOLOGY_FLAG_WHOLE_IO
				 | HWLOC_TOPOLOGY_FLAG_ICACHES);
#endif

  err = hwloc_topology_load(topology);
  if (err < 0) {
    fprintf(stderr, "hwloc_topology_load() failed (%s)\n", strerror(errno));
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

  if (HWLOC_OBJ_PU == type) {
    hwloc_const_bitmap_t cpset = hwloc_topology_get_complete_cpuset(topology);
    if (!hwloc_bitmap_isset(cpset, old_index)) {
      fprintf(stderr, "Old PU os_index %u doesn't exist\n", old_index);
      usage(stderr, callname);
      exit(EXIT_FAILURE);
    }
    if (hwloc_bitmap_isset(cpset, new_index)) {
      fprintf(stderr, "New PU os_index %u already exists\n", new_index);
      usage(stderr, callname);
      exit(EXIT_FAILURE);
    }

    switch_pu_index(hwloc_get_root_obj(topology), old_index, new_index);

  } else if (HWLOC_OBJ_NUMANODE == type) {
    hwloc_const_bitmap_t cnset = hwloc_topology_get_complete_nodeset(topology);
    if (!cnset || hwloc_bitmap_isfull(cnset)) {
      fprintf(stderr, "Topology doesn't have NUMA nodes\n");
      usage(stderr, callname);
      exit(EXIT_FAILURE);
    }
    if (!hwloc_bitmap_isset(cnset, old_index)) {
      fprintf(stderr, "Old NUMA node os_index %u doesn't exist\n", old_index);
      usage(stderr, callname);
      exit(EXIT_FAILURE);
    }
    if (hwloc_bitmap_isset(cnset, new_index)) {
      fprintf(stderr, "New NUMA node os_index %u already exists\n", new_index);
      usage(stderr, callname);
      exit(EXIT_FAILURE);
    }

    switch_numa_index(hwloc_get_root_obj(topology), old_index, new_index);
  }

  err = hwloc_topology_export_xml(topology, argv[2], 0);
  if (err < 0) {
    fprintf(stderr, "hwloc_topology_export_xml() on file `%s' failed (%s)\n", argv[2], strerror(errno));
    usage(stderr, callname);
    exit(EXIT_FAILURE);
  }

  hwloc_topology_destroy(topology);

  printf("Beware that hwloc may warn about out-of-order objects when reloading %s\n", argv[2]);
  return 0;
}