int hwloc_get_obj_depth_by_name(hwloc_topology_t topology, char * obj_name){ /* find hwloc obj depth */ hwloc_obj_type_t type; int depthattrp; hwloc_obj_cache_type_t cache_type; if(hwloc_obj_type_sscanf(obj_name,&type,&depthattrp,&cache_type,sizeof(cache_type))==-1){ fprintf(stderr,"type \"%s\" was not recognized\n",obj_name); return -1; } int depth = hwloc_get_type_depth(topology,type); if(depth==HWLOC_TYPE_DEPTH_MULTIPLE){ if(type==HWLOC_OBJ_CACHE){ depth = hwloc_get_cache_type_depth(topology,depthattrp,cache_type); if(depth == HWLOC_TYPE_DEPTH_UNKNOWN){ fprintf(stderr,"type %s cannot be found, level=%d\n",obj_name,depthattrp); return -1; } if(depth == HWLOC_TYPE_DEPTH_MULTIPLE){ fprintf(stderr,"type %s multiple caches match for\n",obj_name); return -1; } } else{ fprintf(stderr,"type \"%s\" isn't handled...\n",obj_name); return -1; } } return depth; }
int orte_rmaps_base_compute_bindings(orte_job_t *jdata) { hwloc_obj_type_t hwb, hwm; unsigned clvl=0, clvm=0; opal_binding_policy_t bind; orte_mapping_policy_t map; orte_node_t *node; int i, rc; struct hwloc_topology_support *support; bool force_down = false; hwloc_cpuset_t totalcpuset; int bind_depth, map_depth; opal_output_verbose(5, orte_rmaps_base_framework.framework_output, "mca:rmaps: compute bindings for job %s with policy %s", ORTE_JOBID_PRINT(jdata->jobid), opal_hwloc_base_print_binding(jdata->map->binding)); map = ORTE_GET_MAPPING_POLICY(jdata->map->mapping); bind = OPAL_GET_BINDING_POLICY(jdata->map->binding); if (ORTE_MAPPING_BYUSER == map) { /* user specified binding by rankfile - nothing for us to do */ return ORTE_SUCCESS; } if (OPAL_BIND_TO_CPUSET == bind) { int rc; /* cpuset was given - setup the bindings */ if (ORTE_SUCCESS != (rc = bind_to_cpuset(jdata))) { ORTE_ERROR_LOG(rc); } return rc; } if (OPAL_BIND_TO_NONE == bind) { /* no binding requested */ return ORTE_SUCCESS; } if (OPAL_BIND_TO_BOARD == bind) { /* doesn't do anything at this time */ return ORTE_SUCCESS; } /* binding requested - convert the binding level to the hwloc obj type */ switch (bind) { case OPAL_BIND_TO_NUMA: hwb = HWLOC_OBJ_NODE; break; case OPAL_BIND_TO_SOCKET: hwb = HWLOC_OBJ_SOCKET; break; case OPAL_BIND_TO_L3CACHE: hwb = HWLOC_OBJ_CACHE; clvl = 3; break; case OPAL_BIND_TO_L2CACHE: hwb = HWLOC_OBJ_CACHE; clvl = 2; break; case OPAL_BIND_TO_L1CACHE: hwb = HWLOC_OBJ_CACHE; clvl = 1; break; case OPAL_BIND_TO_CORE: hwb = HWLOC_OBJ_CORE; break; case OPAL_BIND_TO_HWTHREAD: hwb = HWLOC_OBJ_PU; break; default: ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM); return ORTE_ERR_BAD_PARAM; } /* do the same for the mapping policy */ switch (map) { case ORTE_MAPPING_BYNODE: case ORTE_MAPPING_BYSLOT: case ORTE_MAPPING_SEQ: hwm = HWLOC_OBJ_MACHINE; break; case ORTE_MAPPING_BYDIST: case ORTE_MAPPING_BYNUMA: hwm = HWLOC_OBJ_NODE; break; case ORTE_MAPPING_BYSOCKET: hwm = HWLOC_OBJ_SOCKET; break; case ORTE_MAPPING_BYL3CACHE: hwm = HWLOC_OBJ_CACHE; clvm = 3; break; case ORTE_MAPPING_BYL2CACHE: hwm = HWLOC_OBJ_CACHE; clvm = 2; break; case ORTE_MAPPING_BYL1CACHE: hwm = HWLOC_OBJ_CACHE; clvm = 1; break; case ORTE_MAPPING_BYCORE: hwm = HWLOC_OBJ_CORE; break; case ORTE_MAPPING_BYHWTHREAD: hwm = HWLOC_OBJ_PU; break; default: ORTE_ERROR_LOG(ORTE_ERR_BAD_PARAM); return ORTE_ERR_BAD_PARAM; } /* if the job was mapped by the corresponding target, then * we bind in place * * otherwise, we have to bind either up or down the hwloc * tree. If we are binding upwards (e.g., mapped to hwthread * but binding to core), then we just climb the tree to find * the first matching object. * * if we are binding downwards (e.g., mapped to node and bind * to core), then we have to do a round-robin assigment of * procs to the resources below. */ if (ORTE_MAPPING_BYDIST == map) { int rc = ORTE_SUCCESS; if (OPAL_BIND_TO_NUMA == bind) { opal_output_verbose(5, orte_rmaps_base_framework.framework_output, "mca:rmaps: bindings for job %s - dist to numa", ORTE_JOBID_PRINT(jdata->jobid)); if (ORTE_SUCCESS != (rc = bind_in_place(jdata, HWLOC_OBJ_NODE, 0))) { ORTE_ERROR_LOG(rc); } } else if (OPAL_BIND_TO_NUMA < bind) { /* bind every proc downwards */ force_down = true; goto execute; } /* if the binding policy is less than numa, then we are unbound - so * just ignore this and return (should have been caught in prior * tests anyway as only options meeting that criteria are "none" * and "board") */ return rc; } /* now deal with the remaining binding policies based on hardware */ if (bind == map) { opal_output_verbose(5, orte_rmaps_base_framework.framework_output, "mca:rmaps: bindings for job %s - bind in place", ORTE_JOBID_PRINT(jdata->jobid)); if (ORTE_SUCCESS != (rc = bind_in_place(jdata, hwb, clvl))) { ORTE_ERROR_LOG(rc); } return rc; } /* we need to handle the remaining binding options on a per-node * basis because different nodes could potentially have different * topologies, with different relative depths for the two levels */ execute: /* initialize */ totalcpuset = hwloc_bitmap_alloc(); for (i=0; i < jdata->map->nodes->size; i++) { if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(jdata->map->nodes, i))) { continue; } if (!orte_do_not_launch) { /* if we don't want to launch, then we are just testing the system, * so ignore questions about support capabilities */ support = (struct hwloc_topology_support*)hwloc_topology_get_support(node->topology); /* check if topology supports cpubind - have to be careful here * as Linux doesn't currently support thread-level binding. This * may change in the future, though, and it isn't clear how hwloc * interprets the current behavior. So check both flags to be sure. */ if (!support->cpubind->set_thisproc_cpubind && !support->cpubind->set_thisthread_cpubind) { if (!OPAL_BINDING_REQUIRED(jdata->map->binding) || !OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) { /* we are not required to bind, so ignore this */ continue; } orte_show_help("help-orte-rmaps-base.txt", "rmaps:cpubind-not-supported", true, node->name); hwloc_bitmap_free(totalcpuset); return ORTE_ERR_SILENT; } /* check if topology supports membind - have to be careful here * as hwloc treats this differently than I (at least) would have * expected. Per hwloc, Linux memory binding is at the thread, * and not process, level. Thus, hwloc sets the "thisproc" flag * to "false" on all Linux systems, and uses the "thisthread" flag * to indicate binding capability - don't warn if the user didn't * specifically request binding */ if (!support->membind->set_thisproc_membind && !support->membind->set_thisthread_membind && OPAL_BINDING_POLICY_IS_SET(jdata->map->binding)) { if (OPAL_HWLOC_BASE_MBFA_WARN == opal_hwloc_base_mbfa && !membind_warned) { orte_show_help("help-orte-rmaps-base.txt", "rmaps:membind-not-supported", true, node->name); membind_warned = true; } else if (OPAL_HWLOC_BASE_MBFA_ERROR == opal_hwloc_base_mbfa) { orte_show_help("help-orte-rmaps-base.txt", "rmaps:membind-not-supported-fatal", true, node->name); hwloc_bitmap_free(totalcpuset); return ORTE_ERR_SILENT; } } } /* some systems do not report cores, and so we can get a situation where our * default binding policy will fail for no necessary reason. So if we are * computing a binding due to our default policy, and no cores are found * on this node, just silently skip it - we will not bind */ if (!OPAL_BINDING_POLICY_IS_SET(jdata->map->binding) && HWLOC_TYPE_DEPTH_UNKNOWN == hwloc_get_type_depth(node->topology, HWLOC_OBJ_CORE)) { opal_output_verbose(5, orte_rmaps_base_framework.framework_output, "Unable to bind-to core by default on node %s as no cores detected", node->name); continue; } /* we share topologies in order * to save space, so we need to reset the usage info to reflect * our own current state */ reset_usage(node, jdata->jobid); if (force_down) { if (ORTE_SUCCESS != (rc = bind_downwards(jdata, node, hwb, clvl))) { ORTE_ERROR_LOG(rc); return rc; } } else { /* determine the relative depth on this node */ if (HWLOC_OBJ_CACHE == hwb) { /* must use a unique function because blasted hwloc * just doesn't deal with caches very well...sigh */ bind_depth = hwloc_get_cache_type_depth(node->topology, clvl, -1); } else { bind_depth = hwloc_get_type_depth(node->topology, hwb); } if (0 > bind_depth) { /* didn't find such an object */ orte_show_help("help-orte-rmaps-base.txt", "orte-rmaps-base:no-objects", true, hwloc_obj_type_string(hwb), node->name); return ORTE_ERR_SILENT; } if (HWLOC_OBJ_CACHE == hwm) { /* must use a unique function because blasted hwloc * just doesn't deal with caches very well...sigh */ map_depth = hwloc_get_cache_type_depth(node->topology, clvm, -1); } else { map_depth = hwloc_get_type_depth(node->topology, hwm); } if (0 > map_depth) { /* didn't find such an object */ orte_show_help("help-orte-rmaps-base.txt", "orte-rmaps-base:no-objects", true, hwloc_obj_type_string(hwm), node->name); return ORTE_ERR_SILENT; } opal_output_verbose(5, orte_rmaps_base_framework.framework_output, "%s bind_depth: %d map_depth %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), bind_depth, map_depth); if (bind_depth > map_depth) { if (ORTE_SUCCESS != (rc = bind_downwards(jdata, node, hwb, clvl))) { ORTE_ERROR_LOG(rc); return rc; } } else { if (ORTE_SUCCESS != (rc = bind_upwards(jdata, node, hwb, clvl))) { ORTE_ERROR_LOG(rc); return rc; } } } } return ORTE_SUCCESS; }
static HYD_status handle_bitmap_binding(const char *binding, const char *mapping) { int i, j, k, bind_count, map_count, cache_depth = 0, bind_depth = 0, map_depth = 0; int total_map_objs, total_bind_objs, num_pus_in_map_domain, num_pus_in_bind_domain, total_map_domains; hwloc_obj_t map_obj, bind_obj, *start_pu; hwloc_cpuset_t *map_domains; char *bind_str, *map_str; HYD_status status = HYD_SUCCESS; HYDU_FUNC_ENTER(); /* split out the count fields */ status = split_count_field(binding, &bind_str, &bind_count); HYDU_ERR_POP(status, "error splitting count field\n"); status = split_count_field(mapping, &map_str, &map_count); HYDU_ERR_POP(status, "error splitting count field\n"); /* get the binding object */ if (!strcmp(bind_str, "board")) bind_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_MACHINE); else if (!strcmp(bind_str, "numa")) bind_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_NODE); else if (!strcmp(bind_str, "socket")) bind_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_SOCKET); else if (!strcmp(bind_str, "core")) bind_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_CORE); else if (!strcmp(bind_str, "hwthread")) bind_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_PU); else { /* check if it's in the l*cache format */ cache_depth = parse_cache_string(bind_str); if (!cache_depth) { HYDU_ERR_SETANDJUMP(status, HYD_INTERNAL_ERROR, "unrecognized binding string \"%s\"\n", binding); } bind_depth = hwloc_get_cache_type_depth(topology, cache_depth, -1); } /* get the mapping */ if (!strcmp(map_str, "board")) map_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_MACHINE); else if (!strcmp(map_str, "numa")) map_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_NODE); else if (!strcmp(map_str, "socket")) map_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_SOCKET); else if (!strcmp(map_str, "core")) map_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_CORE); else if (!strcmp(map_str, "hwthread")) map_depth = hwloc_get_type_or_above_depth(topology, HWLOC_OBJ_PU); else { cache_depth = parse_cache_string(map_str); if (!cache_depth) { HYDU_ERR_SETANDJUMP(status, HYD_INTERNAL_ERROR, "unrecognized mapping string \"%s\"\n", mapping); } map_depth = hwloc_get_cache_type_depth(topology, cache_depth, -1); } /* * Process Affinity Algorithm: * * The code below works in 3 stages. The end result is an array of all the possible * binding bitmaps for a system, based on the options specified. * * 1. Define all possible mapping "domains" in a system. A mapping domain is a group * of hardware elements found by traversing the topology. Each traversal skips the * number of elements the user specified in the mapping string. The traversal ends * when the next mapping domain == the first mapping domain. Note that if the * mapping string defines a domain that is larger than the system size, we exit * with an error. * * 2. Define the number of possible binding domains within a mapping domain. This * process is similar to step 1, in that we traverse the mapping domain finding * all possible bind combinations, stopping when a duplicate of the first binding * is reached. If a binding is larger (in # of PUs) than the mapping domain, * the number of possible bindings for that domain is 1. In this stage, we also * locate the first PU in each mapping domain for use later during binding. * * 3. Create the binding bitmaps. We allocate an array of bitmaps and fill them in * with all possible bindings. The starting PU in each mapping domain is advanced * if and when we wrap around to the beginning of the mapping domains. This ensures * that we do not repeat. * */ /* calculate the number of map domains */ total_map_objs = hwloc_get_nbobjs_by_depth(topology, map_depth); num_pus_in_map_domain = (HYDT_topo_hwloc_info.total_num_pus / total_map_objs) * map_count; HYDU_ERR_CHKANDJUMP(status, num_pus_in_map_domain > HYDT_topo_hwloc_info.total_num_pus, HYD_INTERNAL_ERROR, "mapping option \"%s\" larger than total system size\n", mapping); /* The number of total_map_domains should be large enough to * contain all contiguous map object collections of length * map_count. For example, if the map object is "socket" and the * map_count is 3, on a system with 4 sockets, the following map * domains should be included: (0,1,2), (3,0,1), (2,3,0), (1,2,3). * We do this by finding how many times we need to replicate the * list of the map objects so that an integral number of map * domains can map to them. In the above case, the list of map * objects is replicated 3 times. */ for (i = 1; (i * total_map_objs) % map_count; i++); total_map_domains = (i * total_map_objs) / map_count; /* initialize the map domains */ HYDU_MALLOC_OR_JUMP(map_domains, hwloc_bitmap_t *, total_map_domains * sizeof(hwloc_bitmap_t), status); HYDU_MALLOC_OR_JUMP(start_pu, hwloc_obj_t *, total_map_domains * sizeof(hwloc_obj_t), status); /* For each map domain, find the next map object (first map object * for the first map domain) and add the following "map_count" * number of contiguous map objects, wrapping to the first one if * needed, to the map domain. Store the first PU in the first map * object of the map domain as "start_pu". This is needed later * for the actual binding. */ map_obj = NULL; for (i = 0; i < total_map_domains; i++) { map_domains[i] = hwloc_bitmap_alloc(); hwloc_bitmap_zero(map_domains[i]); for (j = 0; j < map_count; j++) { map_obj = hwloc_get_next_obj_by_depth(topology, map_depth, map_obj); /* map_obj will be NULL if it reaches the end. call again to wrap around */ if (!map_obj) map_obj = hwloc_get_next_obj_by_depth(topology, map_depth, map_obj); if (j == 0) start_pu[i] = hwloc_get_obj_inside_cpuset_by_type(topology, map_obj->cpuset, HWLOC_OBJ_PU, 0); hwloc_bitmap_or(map_domains[i], map_domains[i], map_obj->cpuset); } } /* Find the possible binding domains is similar to that of map * domains. But if a binding domain is larger (in # of PUs) than * the mapping domain, the number of possible bindings for that * domain is 1. */ /* calculate the number of possible bindings and allocate bitmaps for them */ total_bind_objs = hwloc_get_nbobjs_by_depth(topology, bind_depth); num_pus_in_bind_domain = (HYDT_topo_hwloc_info.total_num_pus / total_bind_objs) * bind_count; if (num_pus_in_bind_domain < num_pus_in_map_domain) { for (i = 1; (i * num_pus_in_map_domain) % num_pus_in_bind_domain; i++); HYDT_topo_hwloc_info.num_bitmaps = (i * num_pus_in_map_domain * total_map_domains) / num_pus_in_bind_domain; } else { HYDT_topo_hwloc_info.num_bitmaps = total_map_domains; } /* initialize bitmaps */ HYDU_MALLOC_OR_JUMP(HYDT_topo_hwloc_info.bitmap, hwloc_bitmap_t *, HYDT_topo_hwloc_info.num_bitmaps * sizeof(hwloc_bitmap_t), status); for (i = 0; i < HYDT_topo_hwloc_info.num_bitmaps; i++) { HYDT_topo_hwloc_info.bitmap[i] = hwloc_bitmap_alloc(); hwloc_bitmap_zero(HYDT_topo_hwloc_info.bitmap[i]); } /* do bindings */ i = 0; while (i < HYDT_topo_hwloc_info.num_bitmaps) { for (j = 0; j < total_map_domains; j++) { bind_obj = hwloc_get_ancestor_obj_by_depth(topology, bind_depth, start_pu[j]); for (k = 0; k < bind_count; k++) { hwloc_bitmap_or(HYDT_topo_hwloc_info.bitmap[i], HYDT_topo_hwloc_info.bitmap[i], bind_obj->cpuset); /* if the binding is smaller than the mapping domain, wrap around inside that domain */ if (num_pus_in_bind_domain < num_pus_in_map_domain) { bind_obj = hwloc_get_next_obj_inside_cpuset_by_depth(topology, map_domains[j], bind_depth, bind_obj); if (!bind_obj) bind_obj = hwloc_get_next_obj_inside_cpuset_by_depth(topology, map_domains[j], bind_depth, bind_obj); } else { bind_obj = hwloc_get_next_obj_by_depth(topology, bind_depth, bind_obj); if (!bind_obj) bind_obj = hwloc_get_next_obj_by_depth(topology, bind_depth, bind_obj); } } i++; /* advance the starting position for this map domain, if needed */ if (num_pus_in_bind_domain < num_pus_in_map_domain) { for (k = 0; k < num_pus_in_bind_domain; k++) { start_pu[j] = hwloc_get_next_obj_inside_cpuset_by_type(topology, map_domains[j], HWLOC_OBJ_PU, start_pu[j]); if (!start_pu[j]) start_pu[j] = hwloc_get_next_obj_inside_cpuset_by_type(topology, map_domains[j], HWLOC_OBJ_PU, start_pu[j]); } } } } /* free temporary memory */ MPL_free(map_domains); MPL_free(start_pu); fn_exit: HYDU_FUNC_EXIT(); return status; fn_fail: goto fn_exit; }