Exemplo n.º 1
0
/* If HAVE_NUMA, create mask for given ldom.
 * Otherwise create mask for given socket
 */
static int _bind_ldom(uint32_t ldom, cpu_set_t *mask)
{
#ifdef HAVE_NUMA
	int c, maxcpus, nnid = 0;
	int nmax = numa_max_node();
	if (nmax > 0)
		nnid = ldom % (nmax+1);
	debug3("task/affinity: binding to NUMA node %d", nnid);
	maxcpus = conf->sockets * conf->cores * conf->threads;
	for (c = 0; c < maxcpus; c++) {
		if (slurm_get_numa_node(c) == nnid)
			CPU_SET(c, mask);
	}
	return true;
#else
	uint16_t s, sid  = ldom % conf->sockets;
	uint16_t i, cpus = conf->cores * conf->threads;
	if (!conf->block_map)
		return false;
	for (s = sid * cpus; s < (sid+1) * cpus; s++) {
		i = s % conf->block_map_size;
		CPU_SET(conf->block_map[i], mask);
	}
	return true;
#endif
}
Exemplo n.º 2
0
/* _match_mask_to_ldom
 *
 * expand each mask to encompass the whole locality domain
 * within which it currently exists
 * NOTE: this assumes that the masks are already in logical
 * (and not abstract) CPU order.
 */
static void _match_masks_to_ldom(const uint32_t maxtasks, bitstr_t **masks)
{
	uint32_t i, b, size;

	if (!masks || !masks[0])
		return;
	size = bit_size(masks[0]);
	for(i = 0; i < maxtasks; i++) {
		for (b = 0; b < size; b++) {
			if (bit_test(masks[i], b)) {
				/* get the NUMA node for this CPU, and then
				 * set all CPUs in the mask that exist in
				 * the same CPU */
				int c;
				uint16_t nnid = slurm_get_numa_node(b);
				for (c = 0; c < size; c++) {
					if (slurm_get_numa_node(c) == nnid)
						bit_set(masks[i], c);
				}
			}
		}
	}
}