/** * @brief parse long timers list and copy those that will expire in the current * short timer period */ static void _select_long_timers(void) { xtimer_t *select_list_start = long_list_head; xtimer_t *select_list_last = NULL; /* advance long_list head so it points to the first timer of the next (not * just started) "long timer period" */ while (long_list_head) { if ((long_list_head->long_target <= _long_cnt) && _this_high_period(long_list_head->target)) { select_list_last = long_list_head; long_list_head = long_list_head->next; } else { /* remaining long_list timers belong to later long periods */ break; } } /* cut the "selected long timer list" at the end */ if (select_list_last) { select_list_last->next = NULL; } /* merge "current timer list" and "selected long timer list" */ if (timer_list_head) { if (select_list_last) { /* both lists are non-empty. merge. */ timer_list_head = _merge_lists(timer_list_head, select_list_start); } else { /* "selected long timer list" is empty, nothing to do */ } } else { /* current timer list is empty */ if (select_list_last) { /* there's no current timer list, but a non-empty "selected long * timer list". So just use that list as the new current timer * list.*/ timer_list_head = select_list_start; } } }
/* * We could load gres state or validate it using various mechanisms here. * This only validates that the configuration was specified in gres.conf. * In the general case, no code would need to be changed. */ extern int node_config_load(List gres_conf_list, node_config_load_t *config) { int rc = SLURM_SUCCESS; log_level_t log_lvl; List gpu_conf_list, mps_conf_list; bool have_fake_gpus = _test_gpu_list_fake(); /* Assume this state is caused by an scontrol reconfigure */ debug_flags = slurm_get_debug_flags(); if (gres_devices) { debug("Resetting gres_devices"); FREE_NULL_LIST(gres_devices); } FREE_NULL_LIST(mps_info); if (debug_flags & DEBUG_FLAG_GRES) log_lvl = LOG_LEVEL_VERBOSE; else log_lvl = LOG_LEVEL_DEBUG; log_var(log_lvl, "%s: Initalized gres.conf list:", plugin_name); print_gres_list(gres_conf_list, log_lvl); /* * Ensure that every GPU device file is listed as a MPS file. * Any MPS entry that we need to add will have a "Count" of zero. * Every MPS "Type" will be made to match the GPU "Type". The order * of MPS records (by "File") must match the order in which GPUs are * defined for the GRES bitmaps in slurmctld to line up. * * First, convert all GPU records to a new entries in a list where * each File is a unique device (i.e. convert a record with * "File=nvidia[0-3]" into 4 separate records). */ gpu_conf_list = _build_gpu_list(gres_conf_list); /* Now move MPS records to new List, each with unique device file */ mps_conf_list = _build_mps_list(gres_conf_list); /* * Merge MPS records back to original list, updating and reordering * as needed. */ _merge_lists(gres_conf_list, gpu_conf_list, mps_conf_list); FREE_NULL_LIST(gpu_conf_list); FREE_NULL_LIST(mps_conf_list); rc = common_node_config_load(gres_conf_list, gres_name, &gres_devices); if (rc != SLURM_SUCCESS) fatal("%s failed to load configuration", plugin_name); if (_build_mps_dev_info(gres_conf_list) == 0) _remove_mps_recs(gres_conf_list); log_var(log_lvl, "%s: Final gres.conf list:", plugin_name); print_gres_list(gres_conf_list, log_lvl); // Print in parsable format for tests if fake system is in use if (have_fake_gpus) { info("Final normalized gres.conf list (parsable):"); print_gres_list_parsable(gres_conf_list); } return rc; }