char * opt_find (Opt o, char *key) { char *s; NP_ASSERT (o->magic == OPT_MAGIC); if (strchr (key, '=')) s = list_find_first (o->list, (ListFindF)_match_keyval, key); else s = list_find_first (o->list, (ListFindF)_match_key, key); return s ? _optstr (s) : NULL; }
int cerebro_event_unregister(cerebro_t handle, int fd) { if (_cerebro_handle_check(handle) < 0) return -1; if (fd < 0) { handle->errnum = CEREBRO_ERR_PARAMETERS; return -1; } if (!list_find_first(handle->event_fds, _event_fd_find, &fd)) { handle->errnum = CEREBRO_ERR_PARAMETERS; return -1; } if (!list_delete_all(handle->event_fds, _event_fd_find, &fd)) { handle->errnum = CEREBRO_ERR_INTERNAL; return -1; } /* ignore potential error, just return result */ close(fd); handle->errnum = CEREBRO_ERR_SUCCESS; return 0; }
/* * hostsfile_clusterlist_is_node_in_cluster * * hostsfile clusterlist module is_node_in_cluster function */ static int hostsfile_clusterlist_is_node_in_cluster(nodeupdown_t handle, const char *node) { char nodebuf[NODEUPDOWN_MAXNODENAMELEN+1]; char *nodePtr = NULL; void *ptr; /* Have to assume it is */ if (!list_count(hosts)) return 1; /* Shorten hostname if necessary */ if (strchr(node, '.')) { char *p; memset(nodebuf, '\0', NODEUPDOWN_MAXNODENAMELEN+1); strncpy(nodebuf, node, NODEUPDOWN_MAXNODENAMELEN); p = strchr(nodebuf, '.'); *p = '\0'; nodePtr = nodebuf; } else nodePtr = (char *)node; ptr = list_find_first(hosts, _find_str, (void *)nodePtr); if (ptr) return 1; else return 0; }
/* * get_part_list - find record for named partition(s) * IN name - partition name(s) in a comma separated list * RET List of pointers to the partitions or NULL if not found * NOTE: Caller must free the returned list */ extern List get_part_list(char *name) { struct part_record *part_ptr; List job_part_list = NULL; char *token, *last = NULL, *tmp_name; if (name == NULL) return job_part_list; tmp_name = xstrdup(name); token = strtok_r(tmp_name, ",", &last); while (token) { part_ptr = list_find_first(part_list, &list_find_part, token); if (part_ptr) { if (job_part_list == NULL) { job_part_list = list_create(NULL); } list_append(job_part_list, part_ptr); } else { FREE_NULL_LIST(job_part_list); break; } token = strtok_r(NULL, ",", &last); } xfree(tmp_name); return job_part_list; }
static void _remove_job_tres_time_from_cluster(List c_tres, List j_tres, int seconds) { ListIterator c_itr; local_tres_usage_t *loc_c_tres, *loc_j_tres; uint64_t time; if ((seconds <= 0) || !c_tres || !j_tres || !list_count(c_tres) || !list_count(j_tres)) return; c_itr = list_iterator_create(c_tres); while ((loc_c_tres = list_next(c_itr))) { if (!(loc_j_tres = list_find_first( j_tres, _find_loc_tres, &loc_c_tres->id))) continue; time = seconds * loc_j_tres->count; if (time >= loc_c_tres->total_time) loc_c_tres->total_time = 0; else loc_c_tres->total_time -= time; } list_iterator_destroy(c_itr); }
/* Notify the gang scheduler that a job has been started */ extern int gs_job_start(struct job_record *job_ptr) { struct gs_part *p_ptr; uint16_t job_state; if (gs_debug_flags & DEBUG_FLAG_GANG) info("gang: entering gs_job_start for job %u", job_ptr->job_id); /* add job to partition */ pthread_mutex_lock(&data_mutex); p_ptr = list_find_first(gs_part_list, _find_gs_part, job_ptr->partition); if (p_ptr) { job_state = _add_job_to_part(p_ptr, job_ptr); /* if this job is running then check for preemption */ if (job_state == GS_RESUME) _update_all_active_rows(); } pthread_mutex_unlock(&data_mutex); if (!p_ptr) { /* No partition was found for this job, so let it run * uninterupted (what else can we do?) */ error("gang: could not find partition %s for job %u", job_ptr->partition, job_ptr->job_id); } _preempt_job_dequeue(); /* MUST BE OUTSIDE OF data_mutex lock */ if (gs_debug_flags & DEBUG_FLAG_GANG) info("gang: leaving gs_job_start"); return SLURM_SUCCESS; }
/* Notify the gang scheduler that a job has completed */ extern int gs_job_fini(struct job_record *job_ptr) { struct gs_part *p_ptr; char *part_name; if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG) info("gang: entering gs_job_fini for job %u", job_ptr->job_id); if (job_ptr->part_ptr && job_ptr->part_ptr->name) part_name = job_ptr->part_ptr->name; else part_name = job_ptr->partition; pthread_mutex_lock(&data_mutex); p_ptr = list_find_first(gs_part_list, _find_gs_part, part_name); if (!p_ptr) { pthread_mutex_unlock(&data_mutex); if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG) info("gang: leaving gs_job_fini"); return SLURM_SUCCESS; } /* remove job from the partition */ _remove_job_from_part(job_ptr->job_id, p_ptr, true); /* this job may have preempted other jobs, so * check by updating all active rows */ _update_all_active_rows(); pthread_mutex_unlock(&data_mutex); if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG) info("gang: leaving gs_job_fini"); return SLURM_SUCCESS; }
/* * license_job_get - Get the licenses required for a job * IN job_ptr - job identification * RET SLURM_SUCCESS or failure code */ extern int license_job_get(struct job_record *job_ptr) { ListIterator iter; licenses_t *license_entry, *match; int rc = SLURM_SUCCESS; if (!job_ptr->license_list) /* no licenses needed */ return rc; slurm_mutex_lock(&license_mutex); iter = list_iterator_create(job_ptr->license_list); if (iter == NULL) fatal("malloc failure from list_iterator_create"); while ((license_entry = (licenses_t *) list_next(iter))) { match = list_find_first(license_list, _license_find_rec, license_entry->name); if (match) { match->used += license_entry->total; license_entry->used += license_entry->total; } else { error("could not find license %s for job %u", license_entry->name, job_ptr->job_id); rc = SLURM_ERROR; } } list_iterator_destroy(iter); _licenses_print("acquire_license", license_list, job_ptr->job_id); slurm_mutex_unlock(&license_mutex); return rc; }
/* Prepare cluster_list to be federation centric that will be passed to * verify_clsuters_exists in federation_functions.c. */ static int _verify_fed_clusters(List cluster_list, const char *fed_name, bool *existing_fed) { int rc = SLURM_SUCCESS; char *tmp_name = NULL; List tmp_list = list_create(slurmdb_destroy_cluster_rec); ListIterator itr = list_iterator_create(cluster_list); while ((tmp_name = list_next(itr))) { slurmdb_cluster_rec_t *rec = xmalloc(sizeof(slurmdb_cluster_rec_t)); slurmdb_init_cluster_rec(rec, 0); rec->name = xstrdup(tmp_name); list_append(tmp_list, rec); } if ((rc = verify_fed_clusters(tmp_list, fed_name, existing_fed))) goto end_it; /* have to reconcile lists now, clusters may have been removed from * tmp_list */ list_iterator_reset(itr); while ((tmp_name = list_next(itr))) { if (!list_find_first(tmp_list, _find_cluster_rec_in_list, tmp_name)) list_delete_item(itr); } end_it: FREE_NULL_LIST(tmp_list); list_iterator_destroy(itr); return rc; }
/* * license_job_return - Return the licenses allocated to a job * IN job_ptr - job identification * RET SLURM_SUCCESS or failure code */ extern int license_job_return(struct job_record *job_ptr) { ListIterator iter; licenses_t *license_entry, *match; int rc = SLURM_SUCCESS; if (!job_ptr->license_list) /* no licenses needed */ return rc; slurm_mutex_lock(&license_mutex); iter = list_iterator_create(job_ptr->license_list); while ((license_entry = (licenses_t *) list_next(iter))) { match = list_find_first(license_list, _license_find_rec, license_entry->name); if (match) { if (match->used >= license_entry->total) match->used -= license_entry->total; else { error("license use count underflow for %s", match->name); match->used = 0; rc = SLURM_ERROR; } license_entry->used = 0; } else { /* This can happen after a reconfiguration */ error("job returning unknown license %s", license_entry->name); } } list_iterator_destroy(iter); _licenses_print("return_license", license_list, job_ptr->job_id); slurm_mutex_unlock(&license_mutex); return rc; }
void cproxy_disconnect(svc_client_handle_t *h) { struct _svc_cnx *cnx = (struct _svc_cnx*) list_find_first(&_services, _cmp_handle, h); if (cnx) { cfw_close_service(h, cnx); } }
static void _build_tres_list(void) { ListIterator iter; slurmdb_tres_rec_t *tres; char *tres_tmp = NULL, *tres_tmp2 = NULL, *save_ptr = NULL, *tok; if (!g_tres_list) { slurmdb_tres_cond_t cond = {0}; g_tres_list = slurmdb_tres_get(db_conn, &cond); if (!g_tres_list) { fatal("Problem getting TRES data: %m"); exit(1); } } FREE_NULL_LIST(tres_list); tres_list = list_create(slurmdb_destroy_tres_rec); if (!tres_str) { int tres_cpu_id = TRES_CPU; slurmdb_tres_rec_t *tres2; if (!(tres = list_find_first(g_tres_list, slurmdb_find_tres_in_list, &tres_cpu_id))) fatal("Failed to find CPU TRES!"); tres2 = slurmdb_copy_tres_rec(tres); list_append(tres_list, tres2); return; } tres_usage_str = "TRES"; iter = list_iterator_create(g_tres_list); while ((tres = list_next(iter))) { tres_tmp = xstrdup(tres_str); xstrfmtcat(tres_tmp2, "%s%s%s", tres->type, tres->name ? "/" : "", tres->name ? tres->name : ""); tok = strtok_r(tres_tmp, ",", &save_ptr); while (tok) { if (!xstrcasecmp(tres_tmp2, tok)) break; tok = strtok_r(NULL, ",", &save_ptr); } if (tok) { slurmdb_tres_rec_t *tres2 = slurmdb_copy_tres_rec(tres); list_append(tres_list, tres2); } xfree(tres_tmp2); xfree(tres_tmp); } if (!list_count(tres_list)) fatal("No valid TRES given"); list_iterator_destroy(iter); }
extern int fed_mgr_update_feds(slurmdb_update_object_t *update) { List feds; slurmdb_federation_rec_t *fed = NULL; slurmdb_cluster_rec_t *cluster = NULL; slurmctld_lock_t fed_write_lock = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK }; if (!update->objects) return SLURM_SUCCESS; slurm_mutex_lock(&init_mutex); if (!inited) { slurm_mutex_unlock(&init_mutex); return SLURM_SUCCESS; /* we haven't started the fed mgr and we * can't start it from here, don't worry * all will get set up later. */ } slurm_mutex_unlock(&init_mutex); /* we only want one update happening at a time. */ slurm_mutex_lock(&update_mutex); if (slurmctld_conf.debug_flags & DEBUG_FLAG_FEDR) info("Got a federation update"); feds = update->objects; /* find the federation that this cluster is in. * if it's changed from last time then update stored information. * grab other clusters in federation * establish connections with each cluster in federation */ /* what if a remote cluster is removed from federation. * have to detect that and close the connection to the remote */ while ((fed = list_pop(feds))) { if (fed->cluster_list && (cluster = list_find_first(fed->cluster_list, slurmdb_find_cluster_in_list, slurmctld_cluster_name))) { _join_federation(fed, cluster, true); break; } slurmdb_destroy_federation_rec(fed); } if (!fed) { if (slurmctld_conf.debug_flags & DEBUG_FLAG_FEDR) info("Not part of any federation"); lock_slurmctld(fed_write_lock); _leave_federation(); unlock_slurmctld(fed_write_lock); } slurm_mutex_unlock(&update_mutex); return SLURM_SUCCESS; }
/* Given a license string, return a list of license_t records */ static List _build_license_list(char *licenses, bool *valid) { int i; char *end_num, *tmp_str, *token, *last; licenses_t *license_entry; List lic_list; *valid = true; if ((licenses == NULL) || (licenses[0] == '\0')) return NULL; lic_list = list_create(license_free_rec); tmp_str = xstrdup(licenses); token = strtok_r(tmp_str, ",;", &last); while (token && *valid) { uint32_t num = 1; for (i = 0; token[i]; i++) { if (isspace(token[i])) { *valid = false; break; } /* ':' is used as a separator in version 2.5 or later * '*' is used as a separator in version 2.4 or earlier */ if ((token[i] == ':') || (token[i] == '*')) { token[i++] = '\0'; num = (uint32_t)strtol(&token[i], &end_num,10); } } if (num <= 0) { *valid = false; break; } license_entry = list_find_first(lic_list, _license_find_rec, token); if (license_entry) { license_entry->total += num; } else { license_entry = xmalloc(sizeof(licenses_t)); license_entry->name = xstrdup(token); license_entry->total = num; list_push(lic_list, license_entry); } token = strtok_r(NULL, ",;", &last); } xfree(tmp_str); if (*valid == false) { list_destroy(lic_list); lic_list = NULL; } return lic_list; }
int optparse_parse_args (optparse_t p, int argc, char *argv[]) { int c; int li; char *saved_argv0; char *optstring = NULL; struct option *optz = option_table_create (p, &optstring); /* Always set optind = 0 here to force internal initialization of * GNU options parser. See getopt_long(3) NOTES section. */ optind = 0; saved_argv0 = argv[0]; argv[0] = p->program_name; while ((c = getopt_long (argc, argv, optstring, optz, &li))) { struct option_info *opt; struct optparse_option *o; if (c == -1) break; if (c == '?') { fprintf (stderr, "Unknown option. Try --help\n"); optind = -1; break; } opt = list_find_first (p->option_list, (ListFindF) by_val, &c); if (opt == NULL) { fprintf (stderr, "ugh, didn't find option associated with char %c\n", c); continue; } opt->found++; if (optarg) { if (opt->optarg) free (opt->optarg); opt->optarg = strdup (optarg); } o = opt->p_opt; if (o->cb && ((o->cb) (o, optarg, o->arg) < 0)) { fprintf (stderr, "Option \"%s\" failed\n", o->name); optind = -1; break; } } free (optz); free (optstring); argv[0] = saved_argv0; return (optind); }
static void _add_job_alloc_time_to_cluster(List c_tres_list, List j_tres) { ListIterator c_itr = list_iterator_create(c_tres_list); local_tres_usage_t *loc_c_tres, *loc_j_tres; while ((loc_c_tres = list_next(c_itr))) { if (!(loc_j_tres = list_find_first( j_tres, _find_loc_tres, &loc_c_tres->id))) continue; loc_c_tres->time_alloc += loc_j_tres->time_alloc; } list_iterator_destroy(c_itr); }
static struct conn_helper_data *get_conn_helper( _cfw_client_t *c, struct conn_helper_data * helper) { struct conn_helper_data *ret = NULL; list_head_t *helper_list = &c->helper_list; int flags = irq_lock(); ret = (struct conn_helper_data *) list_find_first(helper_list, conn_helper_data_check, helper); irq_unlock(flags); return ret; }
/* * Must have FED unlocked prior to entering */ static void _fed_mgr_ptr_init(slurmdb_federation_rec_t *db_fed, slurmdb_cluster_rec_t *cluster) { ListIterator c_itr; slurmdb_cluster_rec_t *tmp_cluster, *db_cluster; slurmctld_lock_t fed_write_lock = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK }; xassert(cluster); if (slurmctld_conf.debug_flags & DEBUG_FLAG_FEDR) info("Joining federation %s", db_fed->name); lock_slurmctld(fed_write_lock); if (fed_mgr_fed_rec) { /* we are already part of a federation, preserve existing * conenctions */ c_itr = list_iterator_create(db_fed->cluster_list); while ((db_cluster = list_next(c_itr))) { if (!xstrcmp(db_cluster->name, slurmctld_cluster_name)) { fed_mgr_cluster_rec = db_cluster; continue; } if (!(tmp_cluster = list_find_first(fed_mgr_fed_rec->cluster_list, slurmdb_find_cluster_in_list, db_cluster->name))) { /* don't worry about destroying the connection * here. It will happen below when we free * fed_mgr_fed_rec (automagically). */ continue; } slurm_mutex_lock(&tmp_cluster->lock); /* transfer over the connections we already have */ db_cluster->fed.send = tmp_cluster->fed.send; tmp_cluster->fed.send = NULL; db_cluster->fed.recv = tmp_cluster->fed.recv; tmp_cluster->fed.recv = NULL; slurm_mutex_unlock(&tmp_cluster->lock); } list_iterator_destroy(c_itr); slurmdb_destroy_federation_rec(fed_mgr_fed_rec); } else fed_mgr_cluster_rec = cluster; fed_mgr_fed_rec = db_fed; unlock_slurmctld(fed_write_lock); }
char * diod_get_exports (char *name, void *a) { List exports = diod_conf_get_exports (); List seen = NULL; ListIterator itr = NULL; Export *x; int len = 0; char *s = NULL; char *ret = NULL; NP_ASSERT (exports != NULL); if (!(seen = list_create (NULL))) { np_uerror (ENOMEM); goto done; } if (!(itr = list_iterator_create (exports))) { np_uerror (ENOMEM); goto done; } while ((x = list_next (itr))) { if (list_find_first (seen, (ListFindF)_strmatch, x->path)) continue; if (!list_append (seen, x->path)) { np_uerror (ENOMEM); goto done; } if (!(x->oflags & XFLAGS_SUPPRESS)) { if (aspf (&s, &len, "%s %s %s %s\n", x->path, x->opts ? x->opts : "-", x->users ? x->users : "-", x->hosts ? x->hosts : "-") < 0) { np_uerror (ENOMEM); goto done; } } } if (diod_conf_get_exportall ()) if (!_get_mounts (&s, &len, seen)) goto done; ret = s; done: if (itr) list_iterator_destroy (itr); if (seen) list_destroy (seen); return ret; }
static void _setup_cluster_tres(List tres_list, uint32_t id, uint64_t count, int seconds) { local_tres_usage_t *loc_tres = list_find_first(tres_list, _find_loc_tres, &id); if (!loc_tres) { loc_tres = xmalloc(sizeof(local_tres_usage_t)); loc_tres->id = id; list_append(tres_list, loc_tres); } loc_tres->count = count; loc_tres->total_time += seconds * loc_tres->count; }
/* Query the quota for uid and add it to qlist if successful. */ static void add_quota(confent_t *cp, List qlist, uid_t uid, char *name) { quota_t q; if (list_find_first(qlist, (ListFindF)quota_match_uid, &uid)) return; q = quota_create(cp->cf_label, cp->cf_rhost, cp->cf_rpath, cp->cf_thresh); if (quota_get(uid, q)) { quota_destroy(q); return; } if (name) quota_adduser (q, name); list_append(qlist, q); }
extern char *fed_mgr_find_sibling_name_by_ip(char *ip) { char *name = NULL; slurmdb_cluster_rec_t *sibling = NULL; slurmctld_lock_t fed_read_lock = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, READ_LOCK }; lock_slurmctld(fed_read_lock); if (fed_mgr_fed_rec && fed_mgr_fed_rec->cluster_list && (sibling = list_find_first(fed_mgr_fed_rec->cluster_list, _find_sibling_by_ip, ip))) name = xstrdup(sibling->name); unlock_slurmctld(fed_read_lock); return name; }
int usb_pm_unregister_callback(struct device *dev, void (*cb)(bool, void*)) { struct usb_pm_info *usb_dev = (struct usb_pm_info*)dev->priv; list_t *element = list_find_first(&usb_dev->cb_head, check_item_callback, (void*)cb); if (element == NULL) { // element not found return -1; } // Remove list element list_remove(&usb_dev->cb_head, element); bfree(element); return 0; }
static void _handle_client_message(struct cfw_message *msg, void *data) { (void)data; switch (CFW_MESSAGE_ID(msg)) { case MSG_ID_CFW_OPEN_SERVICE: { /* We have passed the allocated cnx as an opaque data */ struct _svc_cnx *cnx = CFW_MESSAGE_PRIV(msg); /* Get the service parameters from the message and store them locally */ cfw_open_conn_rsp_msg_t *con_msg = (cfw_open_conn_rsp_msg_t *)msg; cnx->sh = (svc_client_handle_t *)(con_msg->client_handle); cnx->src_port = con_msg->port; cfw_msg_free(msg); list_add(&_services, (list_t *)cnx); break; } case MSG_ID_CFW_CLOSE_SERVICE: { struct _svc_cnx *cnx = CFW_MESSAGE_PRIV(msg); list_remove(&_services, (list_t *)cnx); bfree(cnx); cfw_msg_free(msg); break; } case MSG_ID_CFW_SVC_AVAIL_EVT: { struct _svc_cnx *cnx = CFW_MESSAGE_PRIV(msg); if (((cfw_svc_available_evt_msg_t*)msg)->service_id == cnx->service_id) { cfw_open_service(_proxy_handle, cnx->service_id, cnx); } cfw_msg_free(msg); break; } default: { /* Find the service connection based on the message source port */ struct _svc_cnx *cnx = (struct _svc_cnx*) list_find_first(&_services, _cmp_port, &(CFW_MESSAGE_SRC(msg))); if (cnx) { cnx->cb(msg, cnx->data); } else { cfw_msg_free(msg); } break; } } }
static void _persist_callback_fini(void *arg) { slurm_persist_conn_t *persist_conn = arg; slurmdb_cluster_rec_t *cluster; slurmctld_lock_t fed_write_lock = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK }; /* If we are shutting down just return or you will get deadlock since * all these locks are already locked. */ if (!persist_conn || *persist_conn->shutdown) return; lock_slurmctld(fed_write_lock); /* shuting down */ if (!fed_mgr_fed_rec) { unlock_slurmctld(fed_write_lock); return; } if (!(cluster = list_find_first(fed_mgr_fed_rec->cluster_list, slurmdb_find_cluster_in_list, persist_conn->cluster_name))) { info("Couldn't find cluster %s?", persist_conn->cluster_name); unlock_slurmctld(fed_write_lock); return; } slurm_mutex_lock(&cluster->lock); /* This will get handled at the end of the thread, don't free it here */ cluster->fed.recv = NULL; // persist_conn = cluster->fed.recv; // slurm_persist_conn_close(persist_conn); persist_conn = cluster->fed.send; if (persist_conn) { if (slurmctld_conf.debug_flags & DEBUG_FLAG_FEDR) info("Closing send to sibling cluster %s", cluster->name); slurm_persist_conn_close(persist_conn); } slurm_mutex_unlock(&cluster->lock); unlock_slurmctld(fed_write_lock); }
/* distribute unused reservation usage to associations that could have run jobs */ static int _process_resv_idle_time(List resv_usage_list, List assoc_usage_list) { ListIterator r_itr; local_resv_usage_t *r_usage; local_id_usage_t *a_usage = NULL; int seconds; int last_id = -1; r_itr = list_iterator_create(resv_usage_list); while((r_usage = list_next(r_itr))) { char *assoc = NULL; ListIterator tmp_itr = NULL; int64_t idle = r_usage->total_time - r_usage->a_cpu; if (idle <= 0) continue; /* now divide that time by the number of associations in the reservation and add them to each association */ seconds = idle / list_count(r_usage->local_assocs); /* info("resv %d got %d for seconds for %d assocs", */ /* r_usage->id, seconds, */ /* list_count(r_usage->local_assocs)); */ tmp_itr = list_iterator_create(r_usage->local_assocs); while((assoc = list_next(tmp_itr))) { int associd = atoi(assoc); if (last_id != associd) { a_usage = list_find_first(assoc_usage_list, _cmp_local_id, &associd); } if (!a_usage) { a_usage = xmalloc(sizeof(local_id_usage_t)); a_usage->id = associd; list_append(assoc_usage_list, a_usage); last_id = associd; } a_usage->a_cpu += seconds; } list_iterator_destroy(tmp_itr); } list_iterator_destroy(r_itr); return SLURM_SUCCESS; }
/* * license_job_test - Test if the licenses required for a job are available * IN job_ptr - job identification * IN when - time to check * RET: SLURM_SUCCESS, EAGAIN (not available now), SLURM_ERROR (never runnable) */ extern int license_job_test(struct job_record *job_ptr, time_t when) { ListIterator iter; licenses_t *license_entry, *match; int rc = SLURM_SUCCESS, resv_licenses; if (!job_ptr->license_list) /* no licenses needed */ return rc; slurm_mutex_lock(&license_mutex); iter = list_iterator_create(job_ptr->license_list); if (iter == NULL) fatal("malloc failure from list_iterator_create"); while ((license_entry = (licenses_t *) list_next(iter))) { match = list_find_first(license_list, _license_find_rec, license_entry->name); if (!match) { error("could not find license %s for job %u", license_entry->name, job_ptr->job_id); rc = SLURM_ERROR; break; } else if (license_entry->total > match->total) { info("job %u wants more %s licenses than configured", job_ptr->job_id, match->name); rc = SLURM_ERROR; break; } else if ((license_entry->total + match->used) > match->total) { rc = EAGAIN; break; } else { resv_licenses = job_test_lic_resv(job_ptr, license_entry->name, when); if ((license_entry->total + match->used + resv_licenses) > match->total) { rc = EAGAIN; break; } } } list_iterator_destroy(iter); slurm_mutex_unlock(&license_mutex); return rc; }
static local_tres_usage_t *_add_time_tres(List tres_list, int type, uint32_t id, uint64_t time, bool times_count) { local_tres_usage_t *loc_tres; if (!time) return NULL; loc_tres = list_find_first(tres_list, _find_loc_tres, &id); if (!loc_tres) { if (times_count) return NULL; loc_tres = xmalloc(sizeof(local_tres_usage_t)); loc_tres->id = id; list_append(tres_list, loc_tres); } if (times_count) { if (!loc_tres->count) return NULL; time *= loc_tres->count; } switch (type) { case TIME_ALLOC: loc_tres->time_alloc += time; break; case TIME_DOWN: loc_tres->time_down += time; break; case TIME_PDOWN: loc_tres->time_pd += time; break; case TIME_RESV: loc_tres->time_resv += time; break; default: error("_add_time_tres: unknown type %d given", type); xassert(0); break; } return loc_tres; }
/* Update licenses on this system based upon slurm.conf. * Preserve all previously allocated licenses */ extern int license_update(char *licenses) { ListIterator iter; licenses_t *license_entry, *match; List new_list; bool valid; new_list = _build_license_list(licenses, &valid); if (!valid) fatal("Invalid configured licenses: %s", licenses); slurm_mutex_lock(&license_mutex); if (!license_list) { /* no licenses before now */ license_list = new_list; slurm_mutex_unlock(&license_mutex); return SLURM_SUCCESS; } iter = list_iterator_create(license_list); if (iter == NULL) fatal("malloc failure from list_iterator_create"); while ((license_entry = (licenses_t *) list_next(iter))) { match = list_find_first(new_list, _license_find_rec, license_entry->name); if (!match) { info("license %s removed with %u in use", license_entry->name, license_entry->used); } else { match->used = license_entry->used; if (match->used > match->total) { info("license %s count decreased", match->name); } } } list_iterator_destroy(iter); list_destroy(license_list); license_list = new_list; _licenses_print("update_license", license_list, 0); slurm_mutex_unlock(&license_mutex); return SLURM_SUCCESS; }
/* * license_validate - Test if the required licenses are valid * IN licenses - required licenses * OUT valid - true if required licenses are valid and a sufficient number * are configured (though not necessarily available now) * RET license_list, must be destroyed by caller */ extern List license_validate(char *licenses, bool *valid) { ListIterator iter; licenses_t *license_entry, *match; List job_license_list; job_license_list = _build_license_list(licenses, valid); if (!job_license_list) return job_license_list; slurm_mutex_lock(&license_mutex); _licenses_print("request_license", job_license_list, 0); iter = list_iterator_create(job_license_list); if (iter == NULL) fatal("malloc failure from list_iterator_create"); while ((license_entry = (licenses_t *) list_next(iter))) { if (license_list) { match = list_find_first(license_list, _license_find_rec, license_entry->name); } else match = NULL; if (!match) { debug("could not find license %s for job", license_entry->name); *valid = false; break; } else if (license_entry->total > match->total) { debug("job wants more %s licenses than configured", match->name); *valid = false; break; } } list_iterator_destroy(iter); slurm_mutex_unlock(&license_mutex); if (!(*valid)) { list_destroy(job_license_list); job_license_list = NULL; } return job_license_list; }