static bool _run_in_daemon(void) { static bool set = false; static bool run = false; if (!set) { set = 1; run = run_in_daemon("slurmstepd"); } return run; }
static bool _is_thread_launcher(void) { static bool set = false; static bool run = false; if (!set) { set = 1; run = run_in_daemon("slurmd"); } return run; }
int main() { openlog("smadom_auto_bl", LOG_PID | LOG_CONS, LOG_USER); syslog(LOG_INFO, "smadom_auto_bl server starting"); printf("smadom_auto_bl starting....\n"); init_sensors(); printf("Initialed sensor's interfaces\n"); init_equipments() ; printf("Initialed equipmen's interfaces\n"); run_in_daemon(); TIMES_LIGHT = 15; DELAY_LIGHT = 20; TIMES_BED_LIGHT_PERSON = 15; DELAY_BED_LIGHT_PERSON = 20; KEEP_BED_LIGHT = 3; void *arg; thread_auto_bed_light(arg); }
/* * init() is called when the plugin is loaded, before any other functions * are called. Put global initialization here. */ extern int init ( void ) { /* We must call the api here since we call this from other * things other than the slurmctld. */ uint16_t select_type_param = slurm_get_select_type_param(); if (select_type_param & CR_OTHER_CONS_RES) plugin_id = 108; debug_flags = slurm_get_debug_flags(); #ifdef HAVE_NATIVE_CRAY // Spawn the aeld thread, only in slurmctld. if (run_in_daemon("slurmctld")) { _spawn_cleanup_thread(NULL, _aeld_event_loop); } #endif verbose("%s loaded", plugin_name); return SLURM_SUCCESS; }
int main() { openlog("smadom_auto_tl", LOG_PID | LOG_CONS, LOG_USER); syslog(LOG_INFO, "smadom_auto_tl server starting"); printf("smadom_auto_tl starting....\n"); init_sensors(); printf("Initialed sensor's interfaces\n"); init_equipments() ; printf("Initialed equipmen's interfaces\n"); run_in_daemon(); TIMES_LIGHT = 15; DELAY_LIGHT = 20; TIMES_SEAT_PERSON = 15; DELAY_SEAT_PERSON = 20; KEEP_TABLE_LIGHT= 10; void *arg; thread_auto_table_light(arg); // auto_table_light(); // while (tmp_running) { // sleep (1); // printf("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"); // } }
extern int jobacct_gather_init(void) { char *plugin_type = "jobacct_gather"; char *type = NULL; int retval=SLURM_SUCCESS; if (slurmdbd_conf || (_init_run_test() && g_context)) return retval; slurm_mutex_lock(&g_context_lock); if (g_context) goto done; type = slurm_get_jobacct_gather_type(); g_context = plugin_context_create( plugin_type, type, (void **)&ops, syms, sizeof(syms)); if (!g_context) { error("cannot create %s context for %s", plugin_type, type); retval = SLURM_ERROR; goto done; } if (!xstrcasecmp(type, "jobacct_gather/none")) { plugin_polling = false; goto done; } slurm_mutex_lock(&init_run_mutex); init_run = true; slurm_mutex_unlock(&init_run_mutex); /* only print the WARNING messages if in the slurmctld */ if (!run_in_daemon("slurmctld")) goto done; plugin_type = type; type = slurm_get_proctrack_type(); if (!xstrcasecmp(type, "proctrack/pgid")) { info("WARNING: We will use a much slower algorithm with " "proctrack/pgid, use Proctracktype=proctrack/linuxproc " "or some other proctrack when using %s", plugin_type); pgid_plugin = true; } xfree(type); xfree(plugin_type); type = slurm_get_accounting_storage_type(); if (!xstrcasecmp(type, ACCOUNTING_STORAGE_TYPE_NONE)) { error("WARNING: Even though we are collecting accounting " "information you have asked for it not to be stored " "(%s) if this is not what you have in mind you will " "need to change it.", ACCOUNTING_STORAGE_TYPE_NONE); } done: slurm_mutex_unlock(&g_context_lock); xfree(type); return(retval); }
/* * _set_collectors call the split_hostlist API on the all nodes hostlist * to set the node to be used as a collector for unsolicited node aggregation. * * If this node is a forwarding node (first node in any hostlist), * then its collector and backup are the ControlMachine and it's backup. * * Otherwise, we find the hostlist containing this node. * The forwarding node in that hostlist becomes a collector, the next node * which is not this node becomes the backup. * That list is split, we iterate through it and searching for a list in * which this node is a forwarding node. If found, we set the collector and * backup, else this process is repeated. */ static void _set_collectors(char *this_node_name) { slurm_ctl_conf_t *conf; hostlist_t nodes; hostlist_t* hll = NULL; char *parent = NULL, *backup = NULL; char addrbuf[32]; int i, j, f = -1; int hl_count = 0; uint16_t parent_port; uint16_t backup_port; bool found = false; bool ctldparent = true; #ifdef HAVE_FRONT_END return; /* on a FrontEnd system this would never be useful. */ #endif if (!run_in_daemon("slurmd")) return; /* Only compute nodes have collectors */ /* Set the initial iteration, collector is controller, * full list is split */ xassert(this_node_name); conf = slurm_conf_lock(); nodes = _get_all_nodes(); parent = strdup(conf->control_addr); if (conf->backup_addr) { backup = strdup(conf->backup_addr); } parent_port = conf->slurmctld_port; backup_port = parent_port; slurm_conf_unlock(); while (!found) { if ( route_g_split_hostlist(nodes, &hll, &hl_count) ) { error("unable to split forward hostlist"); goto clean; /* collector addrs remains null */ } /* Find which hostlist contains this node */ for (i=0; i < hl_count; i++) { f = hostlist_find(hll[i], this_node_name); if (f != -1) break; } if (i == hl_count) { fatal("ROUTE -- %s not found in node_record_table", this_node_name); } if (f == 0) { /* we are a forwarded to node, * so our parent is parent */ if (hostlist_count(hll[i]) > 1) this_is_collector = true; xfree(msg_collect_node); msg_collect_node = xmalloc(sizeof(slurm_addr_t)); if (ctldparent) slurm_set_addr(msg_collect_node, parent_port, parent); else { slurm_conf_get_addr(parent, msg_collect_node); msg_collect_node->sin_port = htons(parent_port); } if (debug_flags & DEBUG_FLAG_ROUTE) { slurm_print_slurm_addr(msg_collect_node, addrbuf, 32); info("ROUTE -- message collector address is %s", addrbuf); } xfree(msg_collect_backup); if (backup) { msg_collect_backup = xmalloc(sizeof(slurm_addr_t)); if (ctldparent) { slurm_set_addr(msg_collect_backup, backup_port, backup); } else { slurm_conf_get_addr(backup, msg_collect_backup); msg_collect_backup->sin_port = htons(backup_port); } if (debug_flags & DEBUG_FLAG_ROUTE) { slurm_print_slurm_addr( msg_collect_backup, addrbuf, 32); info("ROUTE -- message collector backup" " address is %s", addrbuf); } } else { if (debug_flags & DEBUG_FLAG_ROUTE) { info("ROUTE -- no message collector " "backup"); } } found = true; goto clean; } /* We are not a forwarding node, the first node in this list * will split the forward_list. * We also know that the forwarding node is not a controller. * * clean up parent context */ ctldparent = false; hostlist_destroy(nodes); if (parent) free(parent); if (backup) free(backup); nodes = hostlist_copy(hll[i]); for (j=0; j < hl_count; j++) { hostlist_destroy(hll[j]); } xfree(hll); /* set our parent, backup, and continue search */ parent = hostlist_shift(nodes); backup = hostlist_nth(nodes, 0); if (strcmp(backup, this_node_name) == 0) { free(backup); backup = NULL; if (hostlist_count(nodes) > 1) backup = hostlist_nth(nodes, 1); } parent_port = slurm_conf_get_port(parent); if (backup) { backup_port = slurm_conf_get_port(backup); } else backup_port = 0; } clean: if (debug_flags & DEBUG_FLAG_ROUTE) { if (this_is_collector) info("ROUTE -- %s is a collector node", this_node_name); else info("ROUTE -- %s is a leaf node", this_node_name); } hostlist_destroy(nodes); if (parent) free(parent); if (backup) free(backup); for (i=0; i < hl_count; i++) { hostlist_destroy(hll[i]); } xfree(hll); }
/* * called to check if the node supports setting CPU frequency * if so, initialize fields in cpu_freq_data structure */ extern void cpu_freq_init(slurmd_conf_t *conf) { char path[PATH_MAX]; struct stat statbuf; FILE *fp; char value[LINE_LEN]; unsigned int i, j; debug_flags = slurm_get_debug_flags(); /* init for slurmd */ xfree(slurmd_spooldir); slurmd_spooldir = xstrdup(conf->spooldir); if (run_in_daemon("slurmstepd")) return; /* check for cpufreq support */ if ( stat(PATH_TO_CPU "cpu0/cpufreq", &statbuf) != 0 ) { info("CPU frequency setting not configured for this node"); return; } if (!S_ISDIR(statbuf.st_mode)) { error(PATH_TO_CPU "cpu0/cpufreq not a directory"); return; } /* get the cpu frequency info into the cpu_freq_data structure */ cpu_freq_count = conf->block_map_size; if (!cpufreq) { int cpuidx; cpufreq = (struct cpu_freq_data *) xmalloc(cpu_freq_count * sizeof(struct cpu_freq_data)); for (cpuidx = 0; cpuidx < cpu_freq_count; cpuidx++) _cpu_freq_init_data(cpuidx); } debug2("Gathering cpu frequency information for %u cpus", cpu_freq_count); for (i = 0; i < cpu_freq_count; i++) { snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/scaling_available_governors", i); if ((fp = fopen(path, "r")) == NULL) continue; if (fgets(value, LINE_LEN, fp) == NULL) { fclose(fp); continue; } if (strstr(value, "conservative")) { cpufreq[i].avail_governors |= GOV_CONSERVATIVE; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { info("cpu_freq: Conservative governor " "defined on cpu 0"); } } if (strstr(value, "ondemand")) { cpufreq[i].avail_governors |= GOV_ONDEMAND; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { info("cpu_freq: OnDemand governor " "defined on cpu 0"); } } if (strstr(value, "performance")) { cpufreq[i].avail_governors |= GOV_PERFORMANCE; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { info("cpu_freq: Performance governor " "defined on cpu 0"); } } if (strstr(value, "powersave")) { cpufreq[i].avail_governors |= GOV_POWERSAVE; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { info("cpu_freq: PowerSave governor " "defined on cpu 0"); } } if (strstr(value, "userspace")) { cpufreq[i].avail_governors |= GOV_USERSPACE; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { info("cpu_freq: UserSpace governor " "defined on cpu 0"); } } fclose(fp); if (_cpu_freq_cpu_avail(i) == SLURM_FAILURE) continue; if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) { for (j = 0; j < cpufreq[i].nfreq; j++) { info("cpu_freq: frequency %u defined on cpu 0", cpufreq[i].avail_freq[j]); } } } return; }