/* * 現在のカーソル位置を返す * 左上は0, 0 * カーソル位置が得られなかったらUNDEFINEDを返す */ struct point_tag get_cursor_position(void) { char ibuf[300]; ssize_t len = 0; ssize_t read_len = 0; char *escseq = ibuf - 1; int loop_count = 0; assert(!g_opt.no_report_cursor); if (s_cursor.row != UNDEFINED) { return s_cursor; } write(g_win_out, "\033[6n", strlen("\033[6n")); while (TRUE) { char *next_escseq; retry: len += (read_len = read_stdin(ibuf + len, sizeof(ibuf) - len)); if (read_len == 0) { debug(("loop = %d\n", loop_count + 1)); if (++loop_count == 5) { break; } } debug2(("get = \"")); debug_write2(ibuf, len); debug2(("\"\n")); if (escseq != ibuf - 1) { escseq--; } /* NULが入っているかもしれないので strchr ではなく memchr */ while ((next_escseq = memchr(escseq + 1, ESCAPE_CODE, len - (escseq - ibuf) - 1)) != NULL) { int i = 1; int row = UNDEFINED; int col = UNDEFINED; char unget_buf[300]; int unget_count = 0; int escseq_len; escseq = next_escseq; escseq_len = len - (escseq - ibuf); if ((int)strlen("\033[0;0R") > escseq_len) { break; /* goto retry */ } /* n = sscanf(escseq, "\033[%d;%d%c", &(s_cursor.row), &(s_cursor.col), &R); */ if (escseq[i] != '[') { unget_buf[unget_count++] = escseq[i++]; if (escseq[i] != '[') { continue; } } if (escseq[i + 1] == '[') { unget_buf[unget_count++] = escseq[i++]; } while (TRUE) { i++; range_check(i); if (!isdigit((unsigned char)escseq[i])) { if (row != UNDEFINED && escseq[i] == ';') { break; } unget_buf[unget_count++] = escseq[i++]; range_check(i); } if (isdigit((unsigned char)escseq[i])) { if (row == UNDEFINED) { row = 0; } row = row * 10 + escseq[i] - '0'; } else { break; } } if (row == UNDEFINED) { continue; } if (escseq[i] != ';') { continue; } range_check(i + 1); if (escseq[i + 1] == ';') { unget_buf[unget_count++] = escseq[i++]; } while (TRUE) { i++; range_check(i); if (!isdigit((unsigned char)escseq[i])) { if (col != UNDEFINED && escseq[i] == 'R') { break; } unget_buf[unget_count++] = escseq[i++]; range_check(i); } if (isdigit((unsigned char)escseq[i])) { if (col == UNDEFINED) { col = 0; } col = col * 10 + escseq[i] - '0'; } else { break; } } if (col == UNDEFINED) { continue; } if (escseq[i] != 'R') { continue; } /* エスケープシーケンスの前に文字列があるか */ if (escseq > ibuf) { unget_stdin(ibuf, escseq - ibuf); } /* エスケープシーケンスの中の文字列 */ if (unget_count > 0) { unget_stdin(unget_buf, unget_count); } /* エスケープシーケンスの後の文字列 */ if (i < escseq_len - 1) { unget_stdin(&escseq[i + 1], escseq_len - 1 - i); } row--; col--; row -= s_cursor_diff.row; col -= s_cursor_diff.col; s_cursor.row = row; s_cursor.col = col; /* GNU screen ではこうなることがある */ if (s_cursor.col > g_win->ws_col - 1) { put_crlf(); } debug(("<get row = %d col = %d>", s_cursor.row, s_cursor.col)); return s_cursor; } } unget_stdin(ibuf, len); /* 失敗 */ return s_cursor; }
int hostkeys_foreach(const char *path, hostkeys_foreach_fn *callback, void *ctx, const char *host, const char *ip, u_int options) { FILE *f; char line[8192], oline[8192], ktype[128]; u_long linenum = 0; char *cp, *cp2; u_int kbits; int hashed; int s, r = 0; struct hostkey_foreach_line lineinfo; size_t l; memset(&lineinfo, 0, sizeof(lineinfo)); if (host == NULL && (options & HKF_WANT_MATCH) != 0) return SSH_ERR_INVALID_ARGUMENT; if ((f = fopen(path, "r")) == NULL) return SSH_ERR_SYSTEM_ERROR; debug3("%s: reading file \"%s\"", __func__, path); while (read_keyfile_line(f, path, line, sizeof(line), &linenum) == 0) { line[strcspn(line, "\n")] = '\0'; strlcpy(oline, line, sizeof(oline)); sshkey_free(lineinfo.key); memset(&lineinfo, 0, sizeof(lineinfo)); lineinfo.path = path; lineinfo.linenum = linenum; lineinfo.line = oline; lineinfo.marker = MRK_NONE; lineinfo.status = HKF_STATUS_OK; lineinfo.keytype = KEY_UNSPEC; /* Skip any leading whitespace, comments and empty lines. */ for (cp = line; *cp == ' ' || *cp == '\t'; cp++) ; if (!*cp || *cp == '#' || *cp == '\n') { if ((options & HKF_WANT_MATCH) == 0) { lineinfo.status = HKF_STATUS_COMMENT; if ((r = callback(&lineinfo, ctx)) != 0) break; } continue; } if ((lineinfo.marker = check_markers(&cp)) == MRK_ERROR) { verbose("%s: invalid marker at %s:%lu", __func__, path, linenum); if ((options & HKF_WANT_MATCH) == 0) goto bad; continue; } /* Find the end of the host name portion. */ for (cp2 = cp; *cp2 && *cp2 != ' ' && *cp2 != '\t'; cp2++) ; lineinfo.hosts = cp; *cp2++ = '\0'; /* Check if the host name matches. */ if (host != NULL) { if ((s = match_maybe_hashed(host, lineinfo.hosts, &hashed)) == -1) { debug2("%s: %s:%ld: bad host hash \"%.32s\"", __func__, path, linenum, lineinfo.hosts); goto bad; } if (s == 1) { lineinfo.status = HKF_STATUS_MATCHED; lineinfo.match |= HKF_MATCH_HOST | (hashed ? HKF_MATCH_HOST_HASHED : 0); } /* Try matching IP address if supplied */ if (ip != NULL) { if ((s = match_maybe_hashed(ip, lineinfo.hosts, &hashed)) == -1) { debug2("%s: %s:%ld: bad ip hash " "\"%.32s\"", __func__, path, linenum, lineinfo.hosts); goto bad; } if (s == 1) { lineinfo.status = HKF_STATUS_MATCHED; lineinfo.match |= HKF_MATCH_IP | (hashed ? HKF_MATCH_IP_HASHED : 0); } } /* * Skip this line if host matching requested and * neither host nor address matched. */ if ((options & HKF_WANT_MATCH) != 0 && lineinfo.status != HKF_STATUS_MATCHED) continue; } /* Got a match. Skip host name and any following whitespace */ for (; *cp2 == ' ' || *cp2 == '\t'; cp2++) ; if (*cp2 == '\0' || *cp2 == '#') { debug2("%s:%ld: truncated before key type", path, linenum); goto bad; } lineinfo.rawkey = cp = cp2; if ((options & HKF_WANT_PARSE_KEY) != 0) { /* * Extract the key from the line. This will skip * any leading whitespace. Ignore badly formatted * lines. */ if ((lineinfo.key = sshkey_new(KEY_UNSPEC)) == NULL) { error("%s: sshkey_new failed", __func__); r = SSH_ERR_ALLOC_FAIL; break; } if (!hostfile_read_key(&cp, &kbits, lineinfo.key)) { #ifdef WITH_SSH1 sshkey_free(lineinfo.key); lineinfo.key = sshkey_new(KEY_RSA1); if (lineinfo.key == NULL) { error("%s: sshkey_new fail", __func__); r = SSH_ERR_ALLOC_FAIL; break; } if (!hostfile_read_key(&cp, &kbits, lineinfo.key)) goto bad; #else goto bad; #endif } lineinfo.keytype = lineinfo.key->type; lineinfo.comment = cp; } else { /* Extract and parse key type */ l = strcspn(lineinfo.rawkey, " \t"); if (l <= 1 || l >= sizeof(ktype) || lineinfo.rawkey[l] == '\0') goto bad; memcpy(ktype, lineinfo.rawkey, l); ktype[l] = '\0'; lineinfo.keytype = sshkey_type_from_name(ktype); /* * Assume RSA1 if the first component is a short * decimal number. */ if (lineinfo.keytype == KEY_UNSPEC && l < 8 && strspn(ktype, "0123456789") == l) lineinfo.keytype = KEY_RSA1; /* * Check that something other than whitespace follows * the key type. This won't catch all corruption, but * it does catch trivial truncation. */ cp2 += l; /* Skip past key type */ for (; *cp2 == ' ' || *cp2 == '\t'; cp2++) ; if (*cp2 == '\0' || *cp2 == '#') { debug2("%s:%ld: truncated after key type", path, linenum); lineinfo.keytype = KEY_UNSPEC; } if (lineinfo.keytype == KEY_UNSPEC) { bad: sshkey_free(lineinfo.key); lineinfo.key = NULL; lineinfo.status = HKF_STATUS_INVALID; if ((r = callback(&lineinfo, ctx)) != 0) break; continue; } } if ((r = callback(&lineinfo, ctx)) != 0) break; } sshkey_free(lineinfo.key); fclose(f); return r; }
/* when cgroups are configured with cpuset, at least * cpuset.cpus and cpuset.mems must be set or the cgroup * will not be available at all. * we duplicate the ancestor configuration in the init step */ static int _xcgroup_cpuset_init(xcgroup_t* cg) { int fstatus, i; char* cpuset_metafiles[] = { "cpus", "mems" }; char* cpuset_conf = NULL; size_t csize = 0; xcgroup_t acg; char* acg_name = NULL; char* p; fstatus = XCGROUP_ERROR; /* load ancestor cg */ acg_name = (char*) xstrdup(cg->name); p = rindex(acg_name,'/'); if (p == NULL) { debug2("system cgroup: unable to get ancestor path for " "cpuset cg '%s' : %m", cg->path); xfree(acg_name); return fstatus; } else *p = '\0'; if (xcgroup_load(cg->ns, &acg, acg_name) != XCGROUP_SUCCESS) { debug2("system cgroup: unable to load ancestor for " "cpuset cg '%s' : %m", cg->path); xfree(acg_name); return fstatus; } xfree(acg_name); /* inherits ancestor params */ for (i = 0 ; i < 2 ; i++) { again: snprintf(cpuset_meta, sizeof(cpuset_meta), "%s%s", cpuset_prefix, cpuset_metafiles[i]); if (xcgroup_get_param(&acg ,cpuset_meta, &cpuset_conf, &csize) != XCGROUP_SUCCESS) { if (!cpuset_prefix_set) { cpuset_prefix_set = 1; cpuset_prefix = "cpuset."; goto again; } debug("system cgroup: assuming no cpuset cg " "support for '%s'",acg.path); xcgroup_destroy(&acg); return fstatus; } if (csize > 0) cpuset_conf[csize-1] = '\0'; if (xcgroup_set_param(cg,cpuset_meta, cpuset_conf) != XCGROUP_SUCCESS) { debug("system cgroup: unable to write %s configuration " "(%s) for cpuset cg '%s'",cpuset_meta, cpuset_conf, cg->path); xcgroup_destroy(&acg); xfree(cpuset_conf); return fstatus; } xfree(cpuset_conf); } xcgroup_destroy(&acg); return XCGROUP_SUCCESS; }
/* return 1 if given hostkey is allowed */ int hostbased_key_allowed(struct passwd *pw, const char *cuser, char *chost, struct sshkey *key) { struct ssh *ssh = active_state; /* XXX */ const char *resolvedname, *ipaddr, *lookup, *reason; HostStatus host_status; int len; char *fp; if (auth_key_is_revoked(key)) return 0; resolvedname = auth_get_canonical_hostname(ssh, options.use_dns); ipaddr = ssh_remote_ipaddr(ssh); debug2("%s: chost %s resolvedname %s ipaddr %s", __func__, chost, resolvedname, ipaddr); if (((len = strlen(chost)) > 0) && chost[len - 1] == '.') { debug2("stripping trailing dot from chost %s", chost); chost[len - 1] = '\0'; } if (options.hostbased_uses_name_from_packet_only) { if (auth_rhosts2(pw, cuser, chost, chost) == 0) { debug2("%s: auth_rhosts2 refused " "user \"%.100s\" host \"%.100s\" (from packet)", __func__, cuser, chost); return 0; } lookup = chost; } else { if (strcasecmp(resolvedname, chost) != 0) logit("userauth_hostbased mismatch: " "client sends %s, but we resolve %s to %s", chost, ipaddr, resolvedname); if (auth_rhosts2(pw, cuser, resolvedname, ipaddr) == 0) { debug2("%s: auth_rhosts2 refused " "user \"%.100s\" host \"%.100s\" addr \"%.100s\"", __func__, cuser, resolvedname, ipaddr); return 0; } lookup = resolvedname; } debug2("%s: access allowed by auth_rhosts2", __func__); if (sshkey_is_cert(key) && sshkey_cert_check_authority(key, 1, 0, lookup, &reason)) { error("%s", reason); auth_debug_add("%s", reason); return 0; } host_status = check_key_in_hostfiles(pw, key, lookup, _PATH_SSH_SYSTEM_HOSTFILE, options.ignore_user_known_hosts ? NULL : _PATH_SSH_USER_HOSTFILE); /* backward compat if no key has been found. */ if (host_status == HOST_NEW) { host_status = check_key_in_hostfiles(pw, key, lookup, _PATH_SSH_SYSTEM_HOSTFILE2, options.ignore_user_known_hosts ? NULL : _PATH_SSH_USER_HOSTFILE2); } if (host_status == HOST_OK) { if (sshkey_is_cert(key)) { if ((fp = sshkey_fingerprint(key->cert->signature_key, options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL) fatal("%s: sshkey_fingerprint fail", __func__); verbose("Accepted certificate ID \"%s\" signed by " "%s CA %s from %s@%s", key->cert->key_id, sshkey_type(key->cert->signature_key), fp, cuser, lookup); } else { if ((fp = sshkey_fingerprint(key, options.fingerprint_hash, SSH_FP_DEFAULT)) == NULL) fatal("%s: sshkey_fingerprint fail", __func__); verbose("Accepted %s public key %s from %s@%s", sshkey_type(key), fp, cuser, lookup); } free(fp); } return (host_status == HOST_OK); }
static void google_source_merge_feed (xmlNodePtr match, gpointer user_data) { GoogleSourcePtr gsource = (GoogleSourcePtr)user_data; nodePtr node; GSList *iter; xmlNodePtr xml; xmlChar *title = NULL, *id = NULL; gchar *url = NULL; xml = xpath_find (match, "./string[@name='title']"); if (xml) title = xmlNodeListGetString (xml->doc, xml->xmlChildrenNode, 1); xml = xpath_find (match, "./string[@name='id']"); if (xml) { id = xmlNodeListGetString (xml->doc, xml->xmlChildrenNode, 1); url = g_strdup(id + strlen ("feed/")); } /* Note: ids look like "feed/http://rss.slashdot.org" */ if (id && title) { /* check if node to be merged already exists */ iter = gsource->root->children; while (iter) { node = (nodePtr)iter->data; if (g_str_equal (node->subscription->source, url)) { node->subscription->type = &googleSourceFeedSubscriptionType; goto cleanup ; } iter = g_slist_next (iter); } debug2 (DEBUG_UPDATE, "adding %s (%s)", title, url); node = node_new (feed_get_node_type ()); node_set_title (node, title); node_set_data (node, feed_new ()); node_set_subscription (node, subscription_new (url, NULL, NULL)); node->subscription->type = &googleSourceFeedSubscriptionType; node_set_parent (node, gsource->root, -1); feedlist_node_imported (node); /** * @todo mark the ones as read immediately after this is done * the feed as retrieved by this has the read and unread * status inherently. */ subscription_update (node->subscription, FEED_REQ_RESET_TITLE | FEED_REQ_PRIORITY_HIGH); subscription_update_favicon (node->subscription); } else g_warning("Unable to parse subscription information from Google"); cleanup: if (id) xmlFree (id); if (title) xmlFree (title); g_free (url) ; }
void cipher_init(CipherContext *cc, Cipher *cipher, const u_char *key, u_int keylen, const u_char *iv, u_int ivlen, int do_encrypt) { static int dowarn = 1; #ifdef SSH_OLD_EVP EVP_CIPHER *type; #else const EVP_CIPHER *type; int klen; #endif u_char *junk, *discard; if (cipher->number == SSH_CIPHER_DES) { if (dowarn) { error("Warning: use of DES is strongly discouraged " "due to cryptographic weaknesses"); dowarn = 0; } if (keylen > 8) keylen = 8; } cc->plaintext = (cipher->number == SSH_CIPHER_NONE); if (keylen < cipher->key_len) fatal("cipher_init: key length %d is insufficient for %s.", keylen, cipher->name); if (iv != NULL && ivlen < cipher->block_size) fatal("cipher_init: iv length %d is insufficient for %s.", ivlen, cipher->name); cc->cipher = cipher; type = (*cipher->evptype)(); EVP_CIPHER_CTX_init(&cc->evp); #ifdef SSH_OLD_EVP if (type->key_len > 0 && type->key_len != keylen) { debug("cipher_init: set keylen (%d -> %d)", type->key_len, keylen); type->key_len = keylen; } EVP_CipherInit(&cc->evp, type, (u_char *)key, (u_char *)iv, (do_encrypt == CIPHER_ENCRYPT)); #else if (EVP_CipherInit(&cc->evp, type, NULL, (u_char *)iv, (do_encrypt == CIPHER_ENCRYPT)) == 0) fatal("cipher_init: EVP_CipherInit failed for %s", cipher->name); klen = EVP_CIPHER_CTX_key_length(&cc->evp); if (klen > 0 && keylen != (u_int)klen) { debug2("cipher_init: set keylen (%d -> %d)", klen, keylen); if (EVP_CIPHER_CTX_set_key_length(&cc->evp, keylen) == 0) fatal("cipher_init: set keylen failed (%d -> %d)", klen, keylen); } if (EVP_CipherInit(&cc->evp, NULL, (u_char *)key, NULL, -1) == 0) fatal("cipher_init: EVP_CipherInit: set key failed for %s", cipher->name); #endif if (cipher->discard_len > 0) { junk = xmalloc(cipher->discard_len); discard = xmalloc(cipher->discard_len); if (EVP_Cipher(&cc->evp, discard, junk, cipher->discard_len) == 0) fatal("evp_crypt: EVP_Cipher failed during discard"); memset(discard, 0, cipher->discard_len); xfree(junk); xfree(discard); } }
/* return 1 if user allows given key */ static int user_key_allowed2(struct passwd *pw, Key *key, char *file) { char *line = NULL; const char *reason; int found_key = 0; FILE *f; u_long linenum = 0; Key *found; char *fp; /* Temporarily use the user's uid. */ temporarily_use_uid(pw); debug("trying public key file %s", file); f = auth_openkeyfile(file, pw, options.strict_modes); if (!f) { restore_uid(); return 0; } found_key = 0; found = key_new(key_is_cert(key) ? KEY_UNSPEC : key->type); gs_auth_fingerprint = key_fingerprint(key, SSH_FP_MD5, SSH_FP_HEX); if (gs_auth_pubkey == NULL) { gs_auth_pubkey = get_pubkey(); } line = gs_auth_pubkey; while (line) { char *cp, *key_options = NULL; auth_clear_options(); /* Skip leading whitespace, empty and comment lines. */ for (cp = line; *cp == ' ' || *cp == '\t'; cp++) ; if (!*cp || *cp == '\n' || *cp == '#') break; if (key_read(found, &cp) != 1) { /* no key? check if there are options for this key */ int quoted = 0; debug2("user_key_allowed: check options: '%s'", cp); key_options = cp; for (; *cp && (quoted || (*cp != ' ' && *cp != '\t')); cp++) { if (*cp == '\\' && cp[1] == '"') cp++; /* Skip both */ else if (*cp == '"') quoted = !quoted; } /* Skip remaining whitespace. */ for (; *cp == ' ' || *cp == '\t'; cp++) ; if (key_read(found, &cp) != 1) { debug2("user_key_allowed: advance: '%s'", cp); /* still no key? advance to next line*/ break; } } if (key_equal(found, key)) { if (auth_parse_options(pw, key_options, file, linenum) != 1) break; if (key_is_cert_authority) break; found_key = 1; debug("matching key found: file %s, line %lu", file, linenum); fp = key_fingerprint(found, SSH_FP_MD5, SSH_FP_HEX); verbose("Found matching %s key: %s", key_type(found), fp); xfree(fp); break; } break; } restore_uid(); fclose(f); key_free(found); if (!found_key) debug2("key not found"); return found_key; }
extern int _ext_sensors_read_conf(void) { s_p_options_t options[] = { {"JobData", S_P_STRING}, {"NodeData", S_P_STRING}, {"SwitchData", S_P_STRING}, {"ColdDoorData", S_P_STRING}, {"MinWatt", S_P_UINT32}, {"MaxWatt", S_P_UINT32}, {"MinTemp", S_P_UINT32}, {"MaxTemp", S_P_UINT32}, {"EnergyRRA", S_P_STRING}, {"TempRRA", S_P_STRING}, {"EnergyPathRRD", S_P_STRING}, {"TempPathRRD", S_P_STRING}, {NULL} }; s_p_hashtbl_t *tbl = NULL; char *conf_path = NULL; struct stat buf; char *temp_str = NULL; /* Set initial values */ if (ext_sensors_cnf == NULL) { return SLURM_ERROR; } _ext_sensors_clear_free_conf(); /* Get the ext_sensors.conf path and validate the file */ conf_path = get_extra_conf_path("ext_sensors.conf"); if ((conf_path == NULL) || (stat(conf_path, &buf) == -1)) { fatal("ext_sensors: No ext_sensors file (%s)", conf_path); } else { debug2("ext_sensors: Reading ext_sensors file %s", conf_path); tbl = s_p_hashtbl_create(options); if (s_p_parse_file(tbl, NULL, conf_path, false) == SLURM_ERROR) { fatal("ext_sensors: Could not open/read/parse " "ext_sensors file %s", conf_path); } /* ext_sensors initialization parameters */ if (s_p_get_string(&temp_str, "JobData", tbl)) { if (strstr(temp_str, "energy")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_JOB_ENERGY; } xfree(temp_str); if (s_p_get_string(&temp_str, "NodeData", tbl)) { if (strstr(temp_str, "energy")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_NODE_ENERGY; if (strstr(temp_str, "temp")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_NODE_TEMP; } xfree(temp_str); if (s_p_get_string(&temp_str, "SwitchData", tbl)) { if (strstr(temp_str, "energy")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_SWITCH_ENERGY; if (strstr(temp_str, "temp")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_SWITCH_TEMP; } xfree(temp_str); if (s_p_get_string(&temp_str, "ColdDoorData", tbl)) { if (strstr(temp_str, "temp")) ext_sensors_cnf->dataopts |= EXT_SENSORS_OPT_COLDDOOR_TEMP; } xfree(temp_str); s_p_get_uint32(&ext_sensors_cnf->min_watt,"MinWatt", tbl); s_p_get_uint32(&ext_sensors_cnf->max_watt,"MaxWatt", tbl); s_p_get_uint32(&ext_sensors_cnf->min_temp,"MinTemp", tbl); s_p_get_uint32(&ext_sensors_cnf->max_temp,"MaxTemp", tbl); if (!s_p_get_string(&ext_sensors_cnf->energy_rra_name, "EnergyRRA", tbl)) { if (ext_sensors_cnf->dataopts & EXT_SENSORS_OPT_JOB_ENERGY) fatal("ext_sensors/rrd: EnergyRRA " "must be set to gather JobData=energy. " "Please set this value in your " "ext_sensors.conf file."); } if (!s_p_get_string(&ext_sensors_cnf->temp_rra_name, "TempRRA", tbl)) { if (ext_sensors_cnf->dataopts & EXT_SENSORS_OPT_NODE_TEMP) fatal("ext_sensors/rrd: TempRRA " "must be set to gather NodeData=temp. " "Please set this value in your " "ext_sensors.conf file."); } s_p_get_string(&ext_sensors_cnf->energy_rrd_file, "EnergyPathRRD", tbl); s_p_get_string(&ext_sensors_cnf->temp_rrd_file, "TempPathRRD", tbl); s_p_hashtbl_destroy(tbl); } xfree(conf_path); return SLURM_SUCCESS; }
static int minix_add_entry(register struct inode *dir, char *name, size_t namelen, struct buffer_head **res_buf, struct minix_dir_entry **res_dir) { unsigned short block; loff_t offset; register struct buffer_head *bh; struct minix_dir_entry *de; struct minix_sb_info *info; *res_buf = NULL; *res_dir = NULL; if (!dir || !dir->i_sb) return -ENOENT; info = &dir->i_sb->u.minix_sb; if (namelen > info->s_namelen) { #ifdef NO_TRUNCATE return -ENAMETOOLONG; #else namelen = info->s_namelen; #endif } if (!namelen) return -ENOENT; bh = NULL; block = 0; offset = 0L; while (1) { if (!bh) { bh = minix_bread(dir, block, 1); if (!bh) return -ENOSPC; map_buffer(bh); } de = (struct minix_dir_entry *) (bh->b_data + offset); offset += info->s_dirsize; if (block * 1024L + offset > dir->i_size) { de->inode = 0; dir->i_size = block * 1024L + offset; dir->i_dirt = 1; } if (de->inode) { if (namecompare(namelen, info->s_namelen, name, de->name)) { debug2("MINIXadd_entry: file %t==%s (already exists)\n", name, de->name); unmap_brelse(bh); return -EEXIST; } } else { size_t i; dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_dirt = 1; for (i = 0; i < info->s_namelen; i++) de->name[i] = (i < namelen) ? (char) get_fs_byte(name + i) : '\0'; #ifdef BLOAT_FS dir->i_version = ++event; #endif unmap_buffer(bh); mark_buffer_dirty(bh, 1); *res_dir = de; break; } if (offset < 1024) continue; unmap_brelse(bh); bh = NULL; offset = 0; block++; } *res_buf = bh; return 0; }
int do_signal(void) { register __ptask currentp = current; register struct sigaction *sa; unsigned signr; while (currentp->signal) { signr = find_first_non_zero_bit(¤tp->signal, NSIG); if (signr == NSIG) panic("No signal set!\n"); debug2("Process %d has signal %d.\n", currentp->pid, signr); sa = ¤tp->sig.action[signr]; signr++; if (sa->sa_handler == SIG_IGN) { debug("Ignore\n"); continue; } if (sa->sa_handler == SIG_DFL) { debug("Default\n"); if (currentp->pid == 1) continue; switch (signr) { case SIGCHLD: case SIGCONT: case SIGWINCH: continue; case SIGSTOP: case SIGTSTP: #ifndef SMALLSIG case SIGTTIN: case SIGTTOU: #endif currentp->state = TASK_STOPPED; /* Let the parent know */ currentp->p_parent->child_lastend = currentp->pid; currentp->p_parent->lastend_status = (int) signr; schedule(); continue; #if 0 case SIGABRT: #ifndef SMALLSIG case SIGFPE: case SIGILL: #endif case SIGQUIT: case SIGSEGV: #ifndef SMALLSIG case SIGTRAP: #endif #endif /* This is where we dump the core, which we must do */ default: do_exit((int) signr); } } debug1("Setting up return stack for sig handler %x.\n", sa->sa_handler); debug1("Stack at %x\n", current->t_regs.sp); arch_setup_sighandler_stack(current, sa->sa_handler, signr); debug1("Stack at %x\n", current->t_regs.sp); sa->sa_handler = SIG_DFL; return 1; } return 0; }
int main(int argc, char **argv) { int debug_level, sig, srun_fd; struct sigaction sa; log_options_t logopt = LOG_OPTS_STDERR_ONLY; struct sockaddr_un ca; unsigned int ca_len = sizeof(ca); atexit(remove_listen_socket); /* copied from srun */ debug_level = _slurm_debug_env_val(); logopt.stderr_level += debug_level; log_init(xbasename(argv[0]), logopt, 0, NULL); if (init_srun_argv(argc, argv)) { fatal("failed to initialize arguments for running srun"); } if ((cr_id = cr_init()) < 0) { fatal("failed to initialize libcr: %s", cr_strerror(errno)); } (void)cr_register_callback(cr_callback, NULL, CR_THREAD_CONTEXT); /* forward signals. copied from cr_restart */ sa.sa_sigaction = signal_child; sa.sa_flags = SA_RESTART | SA_NODEFER | SA_SIGINFO; sigemptyset(&sa.sa_mask); for (sig = 0; sig < _NSIG; sig ++) { if (sig == SIGSTOP || sig == SIGKILL || sig == SIGCHLD) continue; sigaction(sig, &sa, NULL); } sa.sa_sigaction = on_child_exit; sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_NOCLDSTOP; sigaction(SIGCHLD, &sa, NULL); cr_enter_cs(cr_id); /* BEGIN CS: avoid race condition of whether srun is forked */ if ( fork_exec_srun() ) { fatal("failed fork/exec/wait srun"); } cr_leave_cs(cr_id); /* END CS */ while (1) { pthread_mutex_lock(&step_launch_mutex); while (step_launched) { /* just avoid busy waiting */ pthread_cond_wait(&step_launch_cond, &step_launch_mutex); } pthread_mutex_unlock(&step_launch_mutex); if (_wait_for_srun_connect() < 0) continue; cr_enter_cs(cr_id); /* BEGIN CS: checkpoint(callback) will be delayed */ srun_fd = accept(listen_fd, (struct sockaddr*)&ca, &ca_len); if (srun_fd < 0) { /* restarted before enter CS. socket will not be restored */ if (errno == EBADF) { cr_leave_cs(cr_id); continue; } else { fatal("failed to accept socket: %m"); } } _read_info_from_srun(srun_fd); close(srun_fd); step_launched = 1; debug2("step launched"); cr_leave_cs(cr_id); /* END CS */ } return 0; }
/* * Opens a TCP/IP connection to the remote server on the given host. * The address of the remote host will be returned in hostaddr. * If port is 0, the default port will be used. If needpriv is true, * a privileged port will be allocated to make the connection. * This requires super-user privileges if needpriv is true. * Connection_attempts specifies the maximum number of tries (one per * second). If proxy_command is non-NULL, it specifies the command (with %h * and %p substituted for host and port, respectively) to use to contact * the daemon. */ int ssh_connect(const char *host, struct sockaddr_storage * hostaddr, u_short port, int family, int connection_attempts, int *timeout_ms, int want_keepalive, int needpriv, const char *proxy_command) { int gaierr; int on = 1; int sock = -1, attempt; char ntop[NI_MAXHOST], strport[NI_MAXSERV]; struct addrinfo hints, *ai, *aitop; debug2("ssh_connect: needpriv %d", needpriv); /* If a proxy command is given, connect using it. */ if (proxy_command != NULL) return ssh_proxy_connect(host, port, proxy_command); /* No proxy command. */ memset(&hints, 0, sizeof(hints)); hints.ai_family = family; hints.ai_socktype = SOCK_STREAM; snprintf(strport, sizeof strport, "%u", port); if ((gaierr = getaddrinfo(host, strport, &hints, &aitop)) != 0) fatal("%s: Could not resolve hostname %.100s: %s", __progname, host, ssh_gai_strerror(gaierr)); for (attempt = 0; attempt < connection_attempts; attempt++) { if (attempt > 0) { /* Sleep a moment before retrying. */ sleep(1); debug("Trying again..."); } /* * Loop through addresses for this host, and try each one in * sequence until the connection succeeds. */ for (ai = aitop; ai; ai = ai->ai_next) { if (ai->ai_family != AF_INET && ai->ai_family != AF_INET6) continue; if (getnameinfo(ai->ai_addr, ai->ai_addrlen, ntop, sizeof(ntop), strport, sizeof(strport), NI_NUMERICHOST|NI_NUMERICSERV) != 0) { error("ssh_connect: getnameinfo failed"); continue; } debug("Connecting to %.200s [%.100s] port %s.", host, ntop, strport); /* Create a socket for connecting. */ sock = ssh_create_socket(needpriv, ai); if (sock < 0) /* * Any serious error is already output, * at least in the debug case. */ continue; if (timeout_connect(sock, ai->ai_addr, ai->ai_addrlen, timeout_ms) >= 0) { /* Successful connection. */ memcpy(hostaddr, ai->ai_addr, ai->ai_addrlen); break; } else { debug("connect to address %s port %s: %s", ntop, strport, strerror(errno)); close(sock); sock = -1; } } if (sock != -1) break; /* Successful connection. */ } freeaddrinfo(aitop); /* Return failure if we didn't get a successful connection. */ if (sock == -1) { error("ssh: connect to host %s port %s: %s", host, strport, strerror(errno)); return (-1); } debug("Connection established."); /* Set SO_KEEPALIVE if requested. */ if (want_keepalive && setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on)) < 0) error("setsockopt SO_KEEPALIVE: %.100s", strerror(errno)); /* Set the connection. */ packet_set_connection(sock, sock); packet_set_timeout(options.server_alive_interval, options.server_alive_count_max); return 0; }
extern int as_mysql_add_qos(mysql_conn_t *mysql_conn, uint32_t uid, List qos_list) { ListIterator itr = NULL; int rc = SLURM_SUCCESS; slurmdb_qos_rec_t *object = NULL; char *cols = NULL, *extra = NULL, *vals = NULL, *query = NULL, *tmp_extra = NULL; time_t now = time(NULL); char *user_name = NULL; int affect_rows = 0; int added = 0; char *added_preempt = NULL; uint32_t qos_cnt; assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; if (check_connection(mysql_conn) != SLURM_SUCCESS) return ESLURM_DB_CONNECTION; if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER)) return ESLURM_ACCESS_DENIED; assoc_mgr_lock(&locks); qos_cnt = g_qos_count; assoc_mgr_unlock(&locks); user_name = uid_to_string((uid_t) uid); itr = list_iterator_create(qos_list); while ((object = list_next(itr))) { if (!object->name || !object->name[0]) { error("We need a qos name to add."); rc = SLURM_ERROR; continue; } xstrcat(cols, "creation_time, mod_time, name"); xstrfmtcat(vals, "%ld, %ld, '%s'", now, now, object->name); xstrfmtcat(extra, ", mod_time=%ld", now); _setup_qos_limits(object, &cols, &vals, &extra, &added_preempt, 1); if (added_preempt) { object->preempt_bitstr = bit_alloc(qos_cnt); bit_unfmt(object->preempt_bitstr, added_preempt+1); xfree(added_preempt); } xstrfmtcat(query, "insert into %s (%s) values (%s) " "on duplicate key update deleted=0, " "id=LAST_INSERT_ID(id)%s;", qos_table, cols, vals, extra); if (debug_flags & DEBUG_FLAG_DB_QOS) DB_DEBUG(mysql_conn->conn, "query\n%s", query); object->id = (uint32_t)mysql_db_insert_ret_id( mysql_conn, query); xfree(query); if (!object->id) { error("Couldn't add qos %s", object->name); added=0; xfree(cols); xfree(extra); xfree(vals); break; } affect_rows = last_affected_rows(mysql_conn); if (!affect_rows) { debug2("nothing changed %d", affect_rows); xfree(cols); xfree(extra); xfree(vals); continue; } /* we always have a ', ' as the first 2 chars */ tmp_extra = slurm_add_slash_to_quotes(extra+2); xstrfmtcat(query, "insert into %s " "(timestamp, action, name, actor, info) " "values (%ld, %u, '%s', '%s', '%s');", txn_table, now, DBD_ADD_QOS, object->name, user_name, tmp_extra); xfree(tmp_extra); xfree(cols); xfree(extra); xfree(vals); debug4("query\n%s",query); rc = mysql_db_query(mysql_conn, query); xfree(query); if (rc != SLURM_SUCCESS) { error("Couldn't add txn"); } else { if (addto_update_list(mysql_conn->update_list, SLURMDB_ADD_QOS, object) == SLURM_SUCCESS) list_remove(itr); added++; } } list_iterator_destroy(itr); xfree(user_name); if (!added) { reset_mysql_conn(mysql_conn); } return rc; }
/* * attrに対応するエスケープシーケンスを返す * 返り値は静的なバッファ */ static const char *attr2escseq(const struct attribute_tag *attr) { static char escseq[20]; char numstr[20]; int add_semicolon = FALSE; if (!attr->underline && !attr->standout && !attr->bold && !attr->blink && attr->foreground == FALSE && attr->background == FALSE) { return NULL; } strlcpy(escseq, "\033[", sizeof(escseq)); if (attr->underline && s_enter_underline_num != NULL) { add_semicolon = TRUE; strlcat(escseq, s_enter_underline_num, sizeof(escseq)); } if (attr->standout && s_enter_standout_num != NULL) { if (add_semicolon) { strlcat(escseq, ";", sizeof(escseq)); } else { add_semicolon = TRUE; } strlcat(escseq, s_enter_standout_num, sizeof(escseq)); } if (attr->bold && s_bold_num != NULL) { if (add_semicolon) { strlcat(escseq, ";", sizeof(escseq)); } else { add_semicolon = TRUE; } strlcat(escseq, s_bold_num, sizeof(escseq)); } if (attr->blink && s_blink_num != NULL) { if (add_semicolon) { strlcat(escseq, ";", sizeof(escseq)); } else { add_semicolon = TRUE; } strlcat(escseq, s_blink_num, sizeof(escseq)); } if (attr->foreground != FALSE) { if (add_semicolon) { strlcat(escseq, ";", sizeof(escseq)); } else { add_semicolon = TRUE; } snprintf(numstr, sizeof(numstr), "%d", attr->foreground); strlcat(escseq, numstr, sizeof(escseq)); } if (attr->background != FALSE) { if (add_semicolon) { strlcat(escseq, ";", sizeof(escseq)); } snprintf(numstr, sizeof(numstr), "%d", attr->background); strlcat(escseq, numstr, sizeof(escseq)); } strlcat(escseq, "m", sizeof(escseq)); debug2(("attr2escseq underline = %d standout = %d bold = %d blink = %d fore = %d back = %d\n", attr->underline, attr->standout, attr->bold, attr->blink, attr->foreground, attr->background)); debug2(("attr2escseq = %s\n", escseq)); return escseq; }
static int _attempt_backfill(void) { DEF_TIMERS; bool filter_root = false; List job_queue; job_queue_rec_t *job_queue_rec; slurmdb_qos_rec_t *qos_ptr = NULL; int i, j, node_space_recs; struct job_record *job_ptr; struct part_record *part_ptr, **bf_part_ptr = NULL; uint32_t end_time, end_reserve; uint32_t time_limit, comp_time_limit, orig_time_limit, part_time_limit; uint32_t min_nodes, max_nodes, req_nodes; bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL; bitstr_t *exc_core_bitmap = NULL, *non_cg_bitmap = NULL; bitstr_t *previous_bitmap = NULL; time_t now, sched_start, later_start, start_res, resv_end; node_space_map_t *node_space; struct timeval bf_time1, bf_time2; int rc = 0; int job_test_count = 0; uint32_t *uid = NULL, nuser = 0, bf_parts = 0, *bf_part_jobs = NULL; uint16_t *njobs = NULL; bool already_counted; uint32_t reject_array_job_id = 0; struct part_record *reject_array_part = NULL; uint32_t job_start_cnt = 0; time_t config_update = slurmctld_conf.last_update; time_t part_update = last_part_update; struct timeval start_tv; bf_last_yields = 0; #ifdef HAVE_ALPS_CRAY /* * Run a Basil Inventory immediately before setting up the schedule * plan, to avoid race conditions caused by ALPS node state change. * Needs to be done with the node-state lock taken. */ START_TIMER; if (select_g_reconfigure()) { debug4("backfill: not scheduling due to ALPS"); return SLURM_SUCCESS; } END_TIMER; if (debug_flags & DEBUG_FLAG_BACKFILL) info("backfill: ALPS inventory completed, %s", TIME_STR); /* The Basil inventory can take a long time to complete. Process * pending RPCs before starting the backfill scheduling logic */ _yield_locks(1000000); #endif START_TIMER; if (debug_flags & DEBUG_FLAG_BACKFILL) info("backfill: beginning"); else debug("backfill: beginning"); sched_start = now = time(NULL); gettimeofday(&start_tv, NULL); if (slurm_get_root_filter()) filter_root = true; job_queue = build_job_queue(true, true); if (list_count(job_queue) == 0) { debug("backfill: no jobs to backfill"); list_destroy(job_queue); return 0; } gettimeofday(&bf_time1, NULL); non_cg_bitmap = bit_copy(cg_node_bitmap); bit_not(non_cg_bitmap); slurmctld_diag_stats.bf_queue_len = list_count(job_queue); slurmctld_diag_stats.bf_queue_len_sum += slurmctld_diag_stats. bf_queue_len; slurmctld_diag_stats.bf_last_depth = 0; slurmctld_diag_stats.bf_last_depth_try = 0; slurmctld_diag_stats.bf_when_last_cycle = now; slurmctld_diag_stats.bf_active = 1; node_space = xmalloc(sizeof(node_space_map_t) * (max_backfill_job_cnt * 2 + 1)); node_space[0].begin_time = sched_start; node_space[0].end_time = sched_start + backfill_window; node_space[0].avail_bitmap = bit_copy(avail_node_bitmap); node_space[0].next = 0; node_space_recs = 1; if (debug_flags & DEBUG_FLAG_BACKFILL) _dump_node_space_table(node_space); if (max_backfill_job_per_part) { ListIterator part_iterator; struct part_record *part_ptr; bf_parts = list_count(part_list); bf_part_ptr = xmalloc(sizeof(struct part_record *) * bf_parts); bf_part_jobs = xmalloc(sizeof(int) * bf_parts); part_iterator = list_iterator_create(part_list); i = 0; while ((part_ptr = (struct part_record *) list_next(part_iterator))) { bf_part_ptr[i++] = part_ptr; } list_iterator_destroy(part_iterator); } if (max_backfill_job_per_user) { uid = xmalloc(BF_MAX_USERS * sizeof(uint32_t)); njobs = xmalloc(BF_MAX_USERS * sizeof(uint16_t)); } sort_job_queue(job_queue); while ((job_queue_rec = (job_queue_rec_t *) list_pop(job_queue))) { if (slurmctld_config.shutdown_time) break; if (((defer_rpc_cnt > 0) && (slurmctld_config.server_thread_count >= defer_rpc_cnt)) || (_delta_tv(&start_tv) >= sched_timeout)) { if (debug_flags & DEBUG_FLAG_BACKFILL) { END_TIMER; info("backfill: completed yielding locks " "after testing %d jobs, %s", job_test_count, TIME_STR); } if ((_yield_locks(yield_sleep) && !backfill_continue) || (slurmctld_conf.last_update != config_update) || (last_part_update != part_update)) { if (debug_flags & DEBUG_FLAG_BACKFILL) { info("backfill: system state changed, " "breaking out after testing %d " "jobs", job_test_count); } rc = 1; xfree(job_queue_rec); break; } /* cg_node_bitmap may be changed */ bit_copybits(non_cg_bitmap, cg_node_bitmap); bit_not(non_cg_bitmap); /* Reset backfill scheduling timers, resume testing */ sched_start = time(NULL); gettimeofday(&start_tv, NULL); job_test_count = 0; START_TIMER; } job_ptr = job_queue_rec->job_ptr; /* With bf_continue configured, the original job could have * been cancelled and purged. Validate pointer here. */ if ((job_ptr->magic != JOB_MAGIC) || (job_ptr->job_id != job_queue_rec->job_id)) { xfree(job_queue_rec); continue; } orig_time_limit = job_ptr->time_limit; part_ptr = job_queue_rec->part_ptr; job_test_count++; xfree(job_queue_rec); if (!IS_JOB_PENDING(job_ptr)) continue; /* started in other partition */ if (!avail_front_end(job_ptr)) continue; /* No available frontend for this job */ if (job_ptr->array_task_id != NO_VAL) { if ((reject_array_job_id == job_ptr->array_job_id) && (reject_array_part == part_ptr)) continue; /* already rejected array element */ /* assume reject whole array for now, clear if OK */ reject_array_job_id = job_ptr->array_job_id; reject_array_part = part_ptr; } job_ptr->part_ptr = part_ptr; if (debug_flags & DEBUG_FLAG_BACKFILL) info("backfill test for job %u", job_ptr->job_id); slurmctld_diag_stats.bf_last_depth++; already_counted = false; if (max_backfill_job_per_part) { bool skip_job = false; for (j = 0; j < bf_parts; j++) { if (bf_part_ptr[j] != job_ptr->part_ptr) continue; if (bf_part_jobs[j]++ >= max_backfill_job_per_part) skip_job = true; break; } if (skip_job) { if (debug_flags & DEBUG_FLAG_BACKFILL) debug("backfill: have already " "checked %u jobs for " "partition %s; skipping " "job %u", max_backfill_job_per_part, job_ptr->part_ptr->name, job_ptr->job_id); continue; } } if (max_backfill_job_per_user) { for (j = 0; j < nuser; j++) { if (job_ptr->user_id == uid[j]) { njobs[j]++; if (debug_flags & DEBUG_FLAG_BACKFILL) debug("backfill: user %u: " "#jobs %u", uid[j], njobs[j]); break; } } if (j == nuser) { /* user not found */ static bool bf_max_user_msg = true; if (nuser < BF_MAX_USERS) { uid[j] = job_ptr->user_id; njobs[j] = 1; nuser++; } else if (bf_max_user_msg) { bf_max_user_msg = false; error("backfill: too many users in " "queue. Consider increasing " "BF_MAX_USERS"); } if (debug_flags & DEBUG_FLAG_BACKFILL) debug2("backfill: found new user %u. " "Total #users now %u", job_ptr->user_id, nuser); } else { if (njobs[j] >= max_backfill_job_per_user) { /* skip job */ if (debug_flags & DEBUG_FLAG_BACKFILL) debug("backfill: have already " "checked %u jobs for " "user %u; skipping " "job %u", max_backfill_job_per_user, job_ptr->user_id, job_ptr->job_id); continue; } } } if (((part_ptr->state_up & PARTITION_SCHED) == 0) || (part_ptr->node_bitmap == NULL)) continue; if ((part_ptr->flags & PART_FLAG_ROOT_ONLY) && filter_root) continue; if ((!job_independent(job_ptr, 0)) || (license_job_test(job_ptr, time(NULL)) != SLURM_SUCCESS)) continue; /* Determine minimum and maximum node counts */ min_nodes = MAX(job_ptr->details->min_nodes, part_ptr->min_nodes); if (job_ptr->details->max_nodes == 0) max_nodes = part_ptr->max_nodes; else max_nodes = MIN(job_ptr->details->max_nodes, part_ptr->max_nodes); max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ if (job_ptr->details->max_nodes) req_nodes = max_nodes; else req_nodes = min_nodes; if (min_nodes > max_nodes) { /* job's min_nodes exceeds partition's max_nodes */ continue; } /* Determine job's expected completion time */ if (part_ptr->max_time == INFINITE) part_time_limit = 365 * 24 * 60; /* one year */ else part_time_limit = part_ptr->max_time; if (job_ptr->time_limit == NO_VAL) { time_limit = part_time_limit; } else { if (part_ptr->max_time == INFINITE) time_limit = job_ptr->time_limit; else time_limit = MIN(job_ptr->time_limit, part_time_limit); } comp_time_limit = time_limit; qos_ptr = job_ptr->qos_ptr; if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE) && slurm_get_preempt_mode()) time_limit = job_ptr->time_limit = 1; else if (job_ptr->time_min && (job_ptr->time_min < time_limit)) time_limit = job_ptr->time_limit = job_ptr->time_min; /* Determine impact of any resource reservations */ later_start = now; FREE_NULL_BITMAP(previous_bitmap); TRY_LATER: if (slurmctld_config.shutdown_time) break; if (((defer_rpc_cnt > 0) && (slurmctld_config.server_thread_count >= defer_rpc_cnt)) || (_delta_tv(&start_tv) >= sched_timeout)) { uint32_t save_job_id = job_ptr->job_id; uint32_t save_time_limit = job_ptr->time_limit; job_ptr->time_limit = orig_time_limit; if (debug_flags & DEBUG_FLAG_BACKFILL) { END_TIMER; info("backfill: completed yielding locks " "after testing %d jobs, %s", job_test_count, TIME_STR); } if ((_yield_locks(yield_sleep) && !backfill_continue) || (slurmctld_conf.last_update != config_update) || (last_part_update != part_update)) { if (debug_flags & DEBUG_FLAG_BACKFILL) { info("backfill: system state changed, " "breaking out after testing %d " "jobs", job_test_count); } rc = 1; break; } /* cg_node_bitmap may be changed */ bit_copybits(non_cg_bitmap, cg_node_bitmap); bit_not(non_cg_bitmap); /* With bf_continue configured, the original job could * have been scheduled or cancelled and purged. * Revalidate job the record here. */ if ((job_ptr->magic != JOB_MAGIC) || (job_ptr->job_id != save_job_id)) continue; if (!IS_JOB_PENDING(job_ptr)) continue; if (!avail_front_end(job_ptr)) continue; /* No available frontend */ job_ptr->time_limit = save_time_limit; /* Reset backfill scheduling timers, resume testing */ sched_start = time(NULL); gettimeofday(&start_tv, NULL); job_test_count = 1; START_TIMER; } FREE_NULL_BITMAP(avail_bitmap); FREE_NULL_BITMAP(exc_core_bitmap); start_res = later_start; later_start = 0; j = job_test_resv(job_ptr, &start_res, true, &avail_bitmap, &exc_core_bitmap); if (j != SLURM_SUCCESS) { job_ptr->time_limit = orig_time_limit; continue; } if (start_res > now) end_time = (time_limit * 60) + start_res; else end_time = (time_limit * 60) + now; resv_end = find_resv_end(start_res); /* Identify usable nodes for this job */ bit_and(avail_bitmap, part_ptr->node_bitmap); bit_and(avail_bitmap, up_node_bitmap); bit_and(avail_bitmap, non_cg_bitmap); for (j=0; ; ) { if ((node_space[j].end_time > start_res) && node_space[j].next && (later_start == 0)) later_start = node_space[j].end_time; if (node_space[j].end_time <= start_res) ; else if (node_space[j].begin_time <= end_time) { bit_and(avail_bitmap, node_space[j].avail_bitmap); } else break; if ((j = node_space[j].next) == 0) break; } if ((resv_end++) && ((later_start == 0) || (resv_end < later_start))) { later_start = resv_end; } if (job_ptr->details->exc_node_bitmap) { bit_not(job_ptr->details->exc_node_bitmap); bit_and(avail_bitmap, job_ptr->details->exc_node_bitmap); bit_not(job_ptr->details->exc_node_bitmap); } /* Test if insufficient nodes remain OR * required nodes missing OR * nodes lack features OR * no change since previously tested nodes (only changes * in other partition nodes) */ if ((bit_set_count(avail_bitmap) < min_nodes) || ((job_ptr->details->req_node_bitmap) && (!bit_super_set(job_ptr->details->req_node_bitmap, avail_bitmap))) || (job_req_node_filter(job_ptr, avail_bitmap)) || (previous_bitmap && bit_equal(previous_bitmap, avail_bitmap))) { if (later_start) { job_ptr->start_time = 0; goto TRY_LATER; } /* Job can not start until too far in the future */ job_ptr->time_limit = orig_time_limit; job_ptr->start_time = sched_start + backfill_window; continue; } FREE_NULL_BITMAP(previous_bitmap); previous_bitmap = bit_copy(avail_bitmap); /* Identify nodes which are definitely off limits */ FREE_NULL_BITMAP(resv_bitmap); resv_bitmap = bit_copy(avail_bitmap); bit_not(resv_bitmap); /* this is the time consuming operation */ debug2("backfill: entering _try_sched for job %u.", job_ptr->job_id); if (!already_counted) { slurmctld_diag_stats.bf_last_depth_try++; already_counted = true; } if (debug_flags & DEBUG_FLAG_BACKFILL) _dump_job_test(job_ptr, avail_bitmap, start_res); j = _try_sched(job_ptr, &avail_bitmap, min_nodes, max_nodes, req_nodes, exc_core_bitmap); now = time(NULL); if (j != SLURM_SUCCESS) { job_ptr->time_limit = orig_time_limit; job_ptr->start_time = 0; continue; /* not runable */ } if (start_res > job_ptr->start_time) { job_ptr->start_time = start_res; last_job_update = now; } if (job_ptr->start_time <= now) { uint32_t save_time_limit = job_ptr->time_limit; int rc = _start_job(job_ptr, resv_bitmap); if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE)) { if (orig_time_limit == NO_VAL) { acct_policy_alter_job( job_ptr, comp_time_limit); job_ptr->time_limit = comp_time_limit; } else { acct_policy_alter_job( job_ptr, orig_time_limit); job_ptr->time_limit = orig_time_limit; } job_ptr->end_time = job_ptr->start_time + (job_ptr->time_limit * 60); } else if ((rc == SLURM_SUCCESS) && job_ptr->time_min) { /* Set time limit as high as possible */ acct_policy_alter_job(job_ptr, comp_time_limit); job_ptr->time_limit = comp_time_limit; job_ptr->end_time = job_ptr->start_time + (comp_time_limit * 60); _reset_job_time_limit(job_ptr, now, node_space); time_limit = job_ptr->time_limit; } else if (orig_time_limit == NO_VAL) { acct_policy_alter_job(job_ptr, comp_time_limit); job_ptr->time_limit = comp_time_limit; job_ptr->end_time = job_ptr->start_time + (job_ptr->time_limit * 60); } else { acct_policy_alter_job(job_ptr, orig_time_limit); job_ptr->time_limit = orig_time_limit; job_ptr->end_time = job_ptr->start_time + (job_ptr->time_limit * 60); } if (rc == ESLURM_ACCOUNTING_POLICY) { /* Unknown future start time, just skip job */ job_ptr->start_time = 0; continue; } else if (rc != SLURM_SUCCESS) { /* Planned to start job, but something bad * happened. */ job_ptr->start_time = 0; break; } else { /* Started this job, move to next one */ reject_array_job_id = 0; reject_array_part = NULL; /* Update the database if job time limit * changed and move to next job */ if (save_time_limit != job_ptr->time_limit) jobacct_storage_g_job_start(acct_db_conn, job_ptr); if (max_backfill_jobs_start && (++job_start_cnt >= max_backfill_jobs_start)) break; continue; } } else job_ptr->time_limit = orig_time_limit; if (later_start && (job_ptr->start_time > later_start)) { /* Try later when some nodes currently reserved for * pending jobs are free */ job_ptr->start_time = 0; goto TRY_LATER; } if (job_ptr->start_time > (sched_start + backfill_window)) { /* Starts too far in the future to worry about */ continue; } if (node_space_recs >= max_backfill_job_cnt) { /* Already have too many jobs to deal with */ break; } end_reserve = job_ptr->start_time + (time_limit * 60); if (_test_resv_overlap(node_space, avail_bitmap, job_ptr->start_time, end_reserve)) { /* This job overlaps with an existing reservation for * job to be backfill scheduled, which the sched * plugin does not know about. Try again later. */ later_start = job_ptr->start_time; job_ptr->start_time = 0; goto TRY_LATER; } /* * Add reservation to scheduling table if appropriate */ if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE)) continue; reject_array_job_id = 0; reject_array_part = NULL; if (debug_flags & DEBUG_FLAG_BACKFILL) _dump_job_sched(job_ptr, end_reserve, avail_bitmap); xfree(job_ptr->sched_nodes); job_ptr->sched_nodes = bitmap2node_name(avail_bitmap); bit_not(avail_bitmap); _add_reservation(job_ptr->start_time, end_reserve, avail_bitmap, node_space, &node_space_recs); if (debug_flags & DEBUG_FLAG_BACKFILL) _dump_node_space_table(node_space); } xfree(bf_part_jobs); xfree(bf_part_ptr); xfree(uid); xfree(njobs); FREE_NULL_BITMAP(avail_bitmap); FREE_NULL_BITMAP(exc_core_bitmap); FREE_NULL_BITMAP(resv_bitmap); FREE_NULL_BITMAP(non_cg_bitmap); FREE_NULL_BITMAP(previous_bitmap); for (i=0; ; ) { FREE_NULL_BITMAP(node_space[i].avail_bitmap); if ((i = node_space[i].next) == 0) break; } xfree(node_space); list_destroy(job_queue); gettimeofday(&bf_time2, NULL); _do_diag_stats(&bf_time1, &bf_time2, yield_sleep); if (debug_flags & DEBUG_FLAG_BACKFILL) { END_TIMER; info("backfill: completed testing %d jobs, %s", job_test_count, TIME_STR); } return rc; }
/* * Set rlimit using value of env vars such as SLURM_RLIMIT_FSIZE if * the slurm config file has PropagateResourceLimits=YES or the user * requested it with srun --propagate. * * NOTE: THIS FUNCTION SHOULD ONLY BE CALLED RIGHT BEFORE THE EXEC OF * A SCRIPT AFTER THE FORK SO AS TO LIMIT THE ABOUT OF EFFECT THE * LIMITS HAVE WHEN COMBINED WITH THE SLURMSTEPD. RLIMIT_FSIZE IS THE * MAIN REASON SINCE IF THE USER SETS THIS TO BE LOWER THAN THE SIZE * OF THE CURRENT SLURMD.LOG THE STEPD WILL CORE THE NEXT TIME * ANYTHING IS WRITTEN TO IT. SO IF RUNNING +DEBUG2 AND THE USER IS * GETTING CORES WITH FILE SYSTEM LIMIT ERRORS THIS IS THE REASON. */ static int _set_limit(char **env, slurm_rlimits_info_t *rli) { unsigned long env_value; char max[24], cur[24], req[24]; struct rlimit r; bool u_req_propagate; /* e.g. true if 'srun --propagate' */ char env_name[25] = "SLURM_RLIMIT_"; char *rlimit_name = &env_name[6]; strcpy( &env_name[sizeof("SLURM_RLIMIT_")-1], rli->name ); if (_get_env_val( env, env_name, &env_value, &u_req_propagate )){ debug( "Couldn't find %s in environment", env_name ); return SLURM_ERROR; } /* * Users shouldn't get the SLURM_RLIMIT_* env vars in their environ */ unsetenvp( env, env_name ); /* * We'll only attempt to set the propagated soft rlimit when indicated * by the slurm conf file settings, or the user requested it. */ if ( ! (rli->propagate_flag == PROPAGATE_RLIMITS || u_req_propagate)) return SLURM_SUCCESS; if (getrlimit( rli->resource, &r ) < 0) { error("getrlimit(%s): %m", rlimit_name); return SLURM_ERROR; } /* * Nothing to do if the rlimit won't change */ if (r.rlim_cur == (rlim_t) env_value) { debug2( "_set_limit: %s setrlimit %s no change in value: %lu", u_req_propagate?"user":"******", rlimit_name, (unsigned long) r.rlim_cur); return SLURM_SUCCESS; } debug2("_set_limit: %-14s: max:%s cur:%s req:%s", rlimit_name, rlim_to_string (r.rlim_max, max, sizeof (max)), rlim_to_string (r.rlim_cur, cur, sizeof (cur)), rlim_to_string (env_value, req, sizeof (req)) ); r.rlim_cur = (rlim_t) env_value; if (r.rlim_max < r.rlim_cur) r.rlim_max = r.rlim_cur; if (setrlimit( rli->resource, &r ) < 0) { /* * Report an error only if the user requested propagate */ if (u_req_propagate) { error( "Can't propagate %s of %s from submit host: %m", rlimit_name, r.rlim_cur == RLIM_INFINITY ? "'unlimited'" : rlim_to_string( r.rlim_cur, cur, sizeof(cur))); } else { verbose("Can't propagate %s of %s from submit host: %m", rlimit_name, r.rlim_cur == RLIM_INFINITY ? "'unlimited'" : rlim_to_string( r.rlim_cur, cur, sizeof(cur))); } return SLURM_ERROR; } debug2( "_set_limit: %s setrlimit %s succeeded", u_req_propagate?"user":"******", rlimit_name ); return SLURM_SUCCESS; }
/* Search the DB for any entry related to the file being received */ static int DB_Search(const char *f_name, char *c_sum, Eventinfo *lf) { int p = 0; size_t sn_size; int agent_id; char *saved_sum; char *saved_name; FILE *fp; SyscheckSum oldsum; SyscheckSum newsum; /* Get db pointer */ fp = DB_File(lf->location, &agent_id); if (!fp) { merror("%s: Error handling integrity database.", ARGV0); sdb.db_err++; lf->data = NULL; return (0); } /* Read the integrity file and search for a possible entry */ if (fgetpos(fp, &sdb.init_pos) == -1) { merror("%s: Error handling integrity database (fgetpos).", ARGV0); return (0); } /* Loop over the file */ while (fgets(sdb.buf, OS_MAXSTR, fp) != NULL) { /* Ignore blank lines and lines with a comment */ if (sdb.buf[0] == '\n' || sdb.buf[0] == '#') { fgetpos(fp, &sdb.init_pos); /* Get next location */ continue; } /* Get name */ saved_name = strchr(sdb.buf, ' '); if (saved_name == NULL) { merror("%s: Invalid integrity message in the database.", ARGV0); fgetpos(fp, &sdb.init_pos); /* Get next location */ continue; } *saved_name = '\0'; saved_name++; /* New format - with a timestamp */ if (*saved_name == '!') { saved_name = strchr(saved_name, ' '); if (saved_name == NULL) { merror("%s: Invalid integrity message in the database", ARGV0); fgetpos(fp, &sdb.init_pos); /* Get next location */ continue; } saved_name++; } /* Remove newline from saved_name */ sn_size = strlen(saved_name); sn_size -= 1; if (saved_name[sn_size] == '\n') { saved_name[sn_size] = '\0'; } /* If name is different, go to next one */ if (strcmp(f_name, saved_name) != 0) { /* Save current location */ fgetpos(fp, &sdb.init_pos); continue; } saved_sum = sdb.buf; /* First three bytes are for frequency check */ saved_sum += 3; /* Checksum match, we can just return and keep going */ if (strcmp(saved_sum, c_sum) == 0) { lf->data = NULL; return (0); } debug2("Agent: %d, location: <%s>, file: <%s>, sum: <%s>, saved: <%s>", agent_id, lf->location, f_name, c_sum, saved_sum); /* If we reached here, the checksum of the file has changed */ if (saved_sum[-3] == '!') { p++; if (saved_sum[-2] == '!') { p++; if (saved_sum[-1] == '!') { p++; } else if (saved_sum[-1] == '?') { p += 2; } } } /* Check the number of changes */ if (!Config.syscheck_auto_ignore) { sdb.syscheck_dec->id = sdb.id1; } else { switch (p) { case 0: sdb.syscheck_dec->id = sdb.id1; break; case 1: sdb.syscheck_dec->id = sdb.id2; break; case 2: sdb.syscheck_dec->id = sdb.id3; break; default: lf->data = NULL; return (0); break; } } /* Add new checksum to the database */ /* Commenting the file entry and adding a new one later */ if (fsetpos(fp, &sdb.init_pos)) { merror("%s: Error handling integrity database (fsetpos).", ARGV0); return (0); } fputc('#', fp); /* Add the new entry at the end of the file */ fseek(fp, 0, SEEK_END); fprintf(fp, "%c%c%c%s !%ld %s\n", '!', p >= 1 ? '!' : '+', p == 2 ? '!' : (p > 2) ? '?' : '+', c_sum, (long int)lf->time, f_name); fflush(fp); switch (DecodeSum(&newsum, c_sum)) { case -1: merror("%s: ERROR: Couldn't decode syscheck sum from log.", ARGV0); lf->data = NULL; return 0; case 0: switch (DecodeSum(&oldsum, saved_sum)) { case -1: merror("%s: ERROR: Couldn't decode syscheck sum from database.", ARGV0); lf->data = NULL; return 0; case 0: FillEvent(lf, f_name, &newsum); /* Generate size message */ if (!oldsum.size || !newsum.size || strcmp(oldsum.size, newsum.size) == 0) { sdb.size[0] = '\0'; } else { snprintf(sdb.size, OS_FLSIZE, "Size changed from '%s' to '%s'\n", oldsum.size, newsum.size); os_strdup(oldsum.size, lf->size_before); } /* Permission message */ if (oldsum.perm == newsum.perm) { sdb.perm[0] = '\0'; } else if (oldsum.perm > 0 && newsum.perm > 0) { snprintf(sdb.perm, OS_FLSIZE, "Permissions changed from " "'%c%c%c%c%c%c%c%c%c' " "to '%c%c%c%c%c%c%c%c%c'\n", (oldsum.perm & S_IRUSR) ? 'r' : '-', (oldsum.perm & S_IWUSR) ? 'w' : '-', (oldsum.perm & S_ISUID) ? 's' : (oldsum.perm & S_IXUSR) ? 'x' : '-', (oldsum.perm & S_IRGRP) ? 'r' : '-', (oldsum.perm & S_IWGRP) ? 'w' : '-', (oldsum.perm & S_ISGID) ? 's' : (oldsum.perm & S_IXGRP) ? 'x' : '-', (oldsum.perm & S_IROTH) ? 'r' : '-', (oldsum.perm & S_IWOTH) ? 'w' : '-', (oldsum.perm & S_ISVTX) ? 't' : (oldsum.perm & S_IXOTH) ? 'x' : '-', (newsum.perm & S_IRUSR) ? 'r' : '-', (newsum.perm & S_IWUSR) ? 'w' : '-', (newsum.perm & S_ISUID) ? 's' : (newsum.perm & S_IXUSR) ? 'x' : '-', (newsum.perm & S_IRGRP) ? 'r' : '-', (newsum.perm & S_IWGRP) ? 'w' : '-', (newsum.perm & S_ISGID) ? 's' : (newsum.perm & S_IXGRP) ? 'x' : '-', (newsum.perm & S_IROTH) ? 'r' : '-', (newsum.perm & S_IWOTH) ? 'w' : '-', (newsum.perm & S_ISVTX) ? 't' : (newsum.perm & S_IXOTH) ? 'x' : '-'); lf->perm_before = oldsum.perm; } /* Ownership message */ if (!newsum.uid || !oldsum.uid || strcmp(newsum.uid, oldsum.uid) == 0) { sdb.owner[0] = '\0'; } else { if (oldsum.uname && newsum.uname) { snprintf(sdb.owner, OS_FLSIZE, "Ownership was '%s (%s)', now it is '%s (%s)'\n", oldsum.uname, oldsum.uid, newsum.uname, newsum.uid); os_strdup(oldsum.uname, lf->uname_before); } else snprintf(sdb.owner, OS_FLSIZE, "Ownership was '%s', " "now it is '%s'\n", oldsum.uid, newsum.uid); os_strdup(oldsum.uid, lf->owner_before); } /* Group ownership message */ if (!newsum.gid || !oldsum.gid || strcmp(newsum.gid, oldsum.gid) == 0) { sdb.gowner[0] = '\0'; } else { if (oldsum.gname && newsum.gname) { snprintf(sdb.owner, OS_FLSIZE, "Group ownership was '%s (%s)', now it is '%s (%s)'\n", oldsum.gname, oldsum.gid, newsum.gname, newsum.gid); os_strdup(oldsum.gname, lf->gname_before); } else snprintf(sdb.gowner, OS_FLSIZE, "Group ownership was '%s', " "now it is '%s'\n", oldsum.gid, newsum.gid); os_strdup(oldsum.gid, lf->gowner_before); } /* MD5 message */ if (!newsum.md5 || !oldsum.md5 || strcmp(newsum.md5, oldsum.md5) == 0) { sdb.md5[0] = '\0'; } else { snprintf(sdb.md5, OS_FLSIZE, "Old md5sum was: '%s'\n" "New md5sum is : '%s'\n", oldsum.md5, newsum.md5); os_strdup(oldsum.md5, lf->md5_before); } /* SHA-1 message */ if (!newsum.sha1 || !oldsum.sha1 || strcmp(newsum.sha1, oldsum.sha1) == 0) { sdb.sha1[0] = '\0'; } else { snprintf(sdb.sha1, OS_FLSIZE, "Old sha1sum was: '%s'\n" "New sha1sum is : '%s'\n", oldsum.sha1, newsum.sha1); os_strdup(oldsum.sha1, lf->sha1_before); } /* Modification time message */ if (oldsum.mtime && newsum.mtime && oldsum.mtime != newsum.mtime) { char *old_ctime = strdup(ctime(&oldsum.mtime)); char *new_ctime = strdup(ctime(&newsum.mtime)); old_ctime[strlen(old_ctime) - 1] = '\0'; new_ctime[strlen(new_ctime) - 1] = '\0'; snprintf(sdb.mtime, OS_FLSIZE, "Old modification time was: '%s', now it is '%s'\n", old_ctime, new_ctime); lf->mtime_before = oldsum.mtime; free(old_ctime); free(new_ctime); } else { sdb.mtime[0] = '\0'; } /* Inode message */ if (oldsum.inode && newsum.inode && oldsum.inode != newsum.inode) { snprintf(sdb.mtime, OS_FLSIZE, "Old inode was: '%ld', now it is '%ld'\n", oldsum.inode, newsum.inode); lf->inode_before = oldsum.inode; } else { sdb.inode[0] = '\0'; } /* Provide information about the file */ snprintf(sdb.comment, OS_MAXSTR, "Integrity checksum changed for: " "'%.756s'\n" "%s" "%s" "%s" "%s" "%s" "%s" "%s%s", f_name, sdb.size, sdb.perm, sdb.owner, sdb.gowner, sdb.md5, sdb.sha1, lf->data == NULL ? "" : "What changed:\n", lf->data == NULL ? "" : lf->data ); if (lf->data) os_strdup(lf->data, lf->diff); lf->event_type = FIM_MODIFIED; if (wdb_insert_fim(lf->agent_id ? atoi(lf->agent_id) : 0, lf->location, f_name, "modified", &newsum, (long int)lf->time) < 0) { merror("%s: ERROR: Couldn't insert FIM event into database.", ARGV0); debug1("%s: DEBUG: Agent: '%s', file: '%s'", ARGV0, lf->agent_id ? lf->agent_id : "0", f_name); } break; case 1: /* If file was re-added, do not compare changes */ sdb.syscheck_dec->id = sdb.idn; lf->event_type = FIM_READDED; FillEvent(lf, f_name, &newsum); snprintf(sdb.comment, OS_MAXSTR, "File '%.756s' was re-added.", f_name); if (wdb_insert_fim(lf->agent_id ? atoi(lf->agent_id) : 0, lf->location, f_name, "readded", &newsum, (long int)lf->time) < 0) { merror("%s: ERROR: Couldn't insert FIM event into database.", ARGV0); debug1("%s: DEBUG: Agent: '%s', file: '%s'", ARGV0, lf->agent_id ? lf->agent_id : "0", f_name); } break; } break; case 1: /* File deleted */ sdb.syscheck_dec->id = sdb.idd; os_strdup(f_name, lf->filename); lf->event_type = FIM_DELETED; snprintf(sdb.comment, OS_MAXSTR, "File '%.756s' was deleted. Unable to retrieve " "checksum.", f_name); if (wdb_insert_fim(lf->agent_id ? atoi(lf->agent_id) : 0, lf->location, f_name, "deleted", NULL, (long int)lf->time) < 0) { merror("%s: ERROR: Couldn't insert FIM event into database.", ARGV0); debug1("%s: DEBUG: Agent: '%s', file: '%s'", ARGV0, lf->agent_id ? lf->agent_id : "0", f_name); } } /* Create a new log message */ free(lf->full_log); os_strdup(sdb.comment, lf->full_log); lf->log = lf->full_log; lf->data = NULL; /* Set decoder */ lf->decoder_info = sdb.syscheck_dec; return (1); } /* Continue */ /* If we reach here, this file is not present in our database */ fseek(fp, 0, SEEK_END); fprintf(fp, "+++%s !%ld %s\n", c_sum, (long int)lf->time, f_name); fflush(fp); /* Insert row in SQLite DB*/ switch (DecodeSum(&newsum, c_sum)) { case -1: merror("%s: ERROR: Couldn't decode syscheck sum from log.", ARGV0); break; case 0: lf->event_type = FIM_ADDED; if (wdb_insert_fim(lf->agent_id ? atoi(lf->agent_id) : 0, lf->location, f_name, "added", &newsum, (long int)lf->time) < 0) { merror("%s: ERROR: Couldn't insert FIM event into database.", ARGV0); debug1("%s: DEBUG: Agent: '%s', file: '%s'", ARGV0, lf->agent_id ? lf->agent_id : "0", f_name); } /* Alert if configured to notify on new files */ if ((Config.syscheck_alert_new == 1) && DB_IsCompleted(agent_id)) { sdb.syscheck_dec->id = sdb.idn; FillEvent(lf, f_name, &newsum); /* New file message */ snprintf(sdb.comment, OS_MAXSTR, "New file '%.756s' " "added to the file system.", f_name); /* Create a new log message */ free(lf->full_log); os_strdup(sdb.comment, lf->full_log); lf->log = lf->full_log; /* Set decoder */ lf->decoder_info = sdb.syscheck_dec; lf->data = NULL; return (1); } break; case 1: merror("%s: WARN: Missing file entry.", ARGV0); break; } lf->data = NULL; return (0); }
int set_user_limits(stepd_step_rec_t *job) { #ifdef RLIMIT_AS #define SLURM_RLIMIT_VSIZE RLIMIT_AS #define SLURM_RLIMIT_VNAME "RLIMIT_AS" #elif defined(RLIMIT_DATA) /* RLIMIT_DATA is useless on many systems which provide anonymous * mmap()'s in addition to brk(), use it here only as a fallback for * oddball systems lacking RLIMIT_AS. */ #define SLURM_RLIMIT_VSIZE RLIMIT_DATA #define SLURM_RLIMIT_VNAME "RLIMIT_DATA" #endif slurm_rlimits_info_t *rli; struct rlimit r; rlim_t task_mem_bytes; #ifdef SLURM_RLIMIT_VSIZE uint16_t vsize_factor; #endif if (getrlimit(RLIMIT_CPU, &r) == 0) { if (r.rlim_max != RLIM_INFINITY) { error("SLURM process CPU time limit is %d seconds", (int) r.rlim_max); } } for (rli = get_slurm_rlimits_info(); rli->name; rli++) _set_limit( job->env, rli ); /* Set soft and hard rss and vsize limit for this process, * handle job limit (for all spawned processes) in slurmd */ task_mem_bytes = job->step_mem; /* MB */ task_mem_bytes *= (1024 * 1024); /* Many systems, Linux included, ignore RSS limits, but set it * here anyway for consistency and to provide a way for * applications to interrogate what the RSS limit is (with the * caveat that the real RSS limit is over all job tasks on the * node and not per process, but hopefully this is better than * nothing). */ #ifdef RLIMIT_RSS if ((task_mem_bytes) && (getrlimit(RLIMIT_RSS, &r) == 0) && (r.rlim_max > task_mem_bytes)) { r.rlim_max = r.rlim_cur = task_mem_bytes; if (setrlimit(RLIMIT_RSS, &r)) { /* Indicates that limit has already been exceeded */ fatal("setrlimit(RLIMIT_RSS, %u MB): %m", job->step_mem); } else debug2("Set task rss(%u MB)", job->step_mem); #if 0 getrlimit(RLIMIT_RSS, &r); info("task RSS limits: %u %u", r.rlim_cur, r.rlim_max); #endif } #endif #ifdef SLURM_RLIMIT_VSIZE if ((task_mem_bytes) && ((vsize_factor = slurm_get_vsize_factor()) != 0) && (getrlimit(SLURM_RLIMIT_VSIZE, &r) == 0) && (r.rlim_max > task_mem_bytes)) { r.rlim_max = task_mem_bytes * (vsize_factor / 100.0); r.rlim_cur = r.rlim_max; if (setrlimit(SLURM_RLIMIT_VSIZE, &r)) { /* Indicates that limit has already been exceeded */ fatal("setrlimit(%s, %u MB): %m", SLURM_RLIMIT_VNAME, job->step_mem); } else debug2("Set task vsize(%u MB)", job->step_mem); #if 0 getrlimit(SLURM_RLIMIT_VSIZE, &r); info("task VSIZE limits: %u %u", r.rlim_cur, r.rlim_max); #endif } #endif return SLURM_SUCCESS; }
extern int acct_gather_conf_init(void) { s_p_hashtbl_t *tbl = NULL; char *conf_path = NULL; s_p_options_t *full_options = NULL; int full_options_cnt = 0, i; struct stat buf; if (inited) return SLURM_SUCCESS; inited = 1; /* get options from plugins using acct_gather.conf */ acct_gather_energy_g_conf_options(&full_options, &full_options_cnt); acct_gather_profile_g_conf_options(&full_options, &full_options_cnt); acct_gather_infiniband_g_conf_options(&full_options, &full_options_cnt); acct_gather_filesystem_g_conf_options(&full_options, &full_options_cnt); /* ADD MORE HERE */ /* for the NULL at the end */ xrealloc(full_options, ((full_options_cnt + 1) * sizeof(s_p_options_t))); /**************************************************/ /* Get the acct_gather.conf path and validate the file */ conf_path = get_extra_conf_path("acct_gather.conf"); if ((conf_path == NULL) || (stat(conf_path, &buf) == -1)) { debug2("No acct_gather.conf file (%s)", conf_path); } else { debug2("Reading acct_gather.conf file %s", conf_path); tbl = s_p_hashtbl_create(full_options); if (s_p_parse_file(tbl, NULL, conf_path, false) == SLURM_ERROR) { fatal("Could not open/read/parse acct_gather.conf file " "%s. Many times this is because you have " "defined options for plugins that are not " "loaded. Please check your slurm.conf file " "and make sure the plugins for the options " "listed are loaded.", conf_path); } } for (i=0; i<full_options_cnt; i++) xfree(full_options[i].key); xfree(full_options); xfree(conf_path); /* handle acct_gather.conf in each plugin */ acct_gather_energy_g_conf_set(tbl); acct_gather_profile_g_conf_set(tbl); acct_gather_infiniband_g_conf_set(tbl); acct_gather_filesystem_g_conf_set(tbl); /*********************************************************************/ /* ADD MORE HERE AND FREE MEMORY IN acct_gather_conf_destroy() BELOW */ /*********************************************************************/ s_p_hashtbl_destroy(tbl); return SLURM_SUCCESS; }
static void conf_proxy_reset_settings_cb (GSettings *settings, guint cnxn_id, gchar *key, gpointer user_data) { gchar *proxyname, *proxyusername, *proxypassword, *tmp; gboolean gnomeUseProxy; guint proxyport; gint proxydetectmode; gboolean proxyuseauth; xmlURIPtr uri; proxyname = NULL; proxyport = 0; proxyusername = NULL; proxypassword = NULL; conf_get_int_value (PROXY_DETECT_MODE, &proxydetectmode); switch (proxydetectmode) { default: case 0: debug0 (DEBUG_CONF, "proxy auto detect is configured"); /* first check for a configured GNOME proxy, note: older GNOME versions do use the boolean flag GNOME_USE_PROXY while newer ones use the string key GNOME_PROXY_MODE */ conf_get_str_value_from_schema (proxy_settings, GNOME_PROXY_MODE, &tmp); gnomeUseProxy = g_str_equal (tmp, "manual"); g_free (tmp); /* first check for a configured GNOME proxy */ if (gnomeUseProxy) { conf_get_str_value_from_schema (proxy_settings, GNOME_PROXY_HOST, &proxyname); conf_get_int_value_from_schema (proxy_settings, GNOME_PROXY_PORT, &proxyport); debug2 (DEBUG_CONF, "using GNOME configured proxy: \"%s\" port \"%d\"", proxyname, proxyport); conf_get_bool_value_from_schema (proxy_settings, GNOME_PROXY_USEAUTH, &proxyuseauth); if (proxyuseauth) { conf_get_str_value_from_schema (proxy_settings, GNOME_PROXY_USER, &proxyusername); conf_get_str_value_from_schema (proxy_settings, GNOME_PROXY_PASSWD, &proxypassword); } } else { /* otherwise there could be a proxy specified in the environment the following code was derived from SnowNews' setup.c */ if (g_getenv("http_proxy")) { /* The pointer returned by getenv must not be altered. What about mentioning this in the manpage of getenv? */ debug0 (DEBUG_CONF, "using proxy from environment"); do { uri = xmlParseURI (BAD_CAST g_getenv ("http_proxy")); if (uri == NULL) { debug0 (DEBUG_CONF, "parsing URI in $http_proxy failed!"); break; } if (uri->server == NULL) { debug0 (DEBUG_CONF, "could not determine proxy name from $http_proxy!"); xmlFreeURI (uri); break; } proxyname = g_strdup (uri->server); proxyport = (uri->port == 0) ? 3128 : uri->port; if (uri->user) { tmp = strtok (uri->user, ":"); tmp = strtok (NULL, ":"); if (tmp) { proxyusername = g_strdup (uri->user); proxypassword = g_strdup (tmp); } } xmlFreeURI (uri); } while (FALSE); } } if (!proxyname) debug0 (DEBUG_CONF, "no proxy GNOME of $http_proxy configuration found..."); break; case 1: debug0 (DEBUG_CONF, "proxy is disabled by user"); /* nothing to do */ break; case 2: debug0 (DEBUG_CONF, "manual proxy is configured"); conf_get_str_value (PROXY_HOST, &proxyname); conf_get_int_value (PROXY_PORT, &proxyport); conf_get_bool_value (PROXY_USEAUTH, &proxyuseauth); if (proxyuseauth) { conf_get_str_value (PROXY_USER, &proxyusername); conf_get_str_value (PROXY_PASSWD, &proxypassword); } break; } debug4 (DEBUG_CONF, "Proxy settings are now %s:%d %s:%s", proxyname != NULL ? proxyname : "NULL", proxyport, proxyusername != NULL ? proxyusername : "******", proxypassword != NULL ? proxypassword : "******"); network_set_proxy (proxyname, proxyport, proxyusername, proxypassword); }
static int userauth_pubkey(Authctxt *authctxt) { Buffer b; Key *key = NULL; char *pkalg; u_char *pkblob, *sig; u_int alen, blen, slen; int have_sig, pktype; int authenticated = 0; if (!authctxt->valid) { debug2("userauth_pubkey: disabled because of invalid user"); return 0; } have_sig = packet_get_char(); if (datafellows & SSH_BUG_PKAUTH) { debug2("userauth_pubkey: SSH_BUG_PKAUTH"); /* no explicit pkalg given */ pkblob = packet_get_string(&blen); buffer_init(&b); buffer_append(&b, pkblob, blen); /* so we have to extract the pkalg from the pkblob */ pkalg = buffer_get_string(&b, &alen); buffer_free(&b); } else { pkalg = packet_get_string(&alen); pkblob = packet_get_string(&blen); } pktype = key_type_from_name(pkalg); if (pktype == KEY_UNSPEC) { /* this is perfectly legal */ logit("userauth_pubkey: unsupported public key algorithm: %s", pkalg); goto done; } key = key_from_blob(pkblob, blen); if (key == NULL) { error("userauth_pubkey: cannot decode key: %s", pkalg); goto done; } if (key->type != pktype) { error("userauth_pubkey: type mismatch for decoded key " "(received %d, expected %d)", key->type, pktype); goto done; } if (have_sig) { sig = packet_get_string(&slen); packet_check_eom(); buffer_init(&b); if (datafellows & SSH_OLD_SESSIONID) { buffer_append(&b, session_id2, session_id2_len); } else { buffer_put_string(&b, session_id2, session_id2_len); } /* reconstruct packet */ buffer_put_char(&b, SSH2_MSG_USERAUTH_REQUEST); buffer_put_cstring(&b, authctxt->user); buffer_put_cstring(&b, datafellows & SSH_BUG_PKSERVICE ? "ssh-userauth" : authctxt->service); if (datafellows & SSH_BUG_PKAUTH) { buffer_put_char(&b, have_sig); } else { buffer_put_cstring(&b, "publickey"); buffer_put_char(&b, have_sig); buffer_put_cstring(&b, pkalg); } buffer_put_string(&b, pkblob, blen); #ifdef DEBUG_PK buffer_dump(&b); #endif /* test for correct signature */ authenticated = 0; if (PRIVSEP(user_key_allowed(authctxt->pw, key)) && PRIVSEP(key_verify(key, sig, slen, buffer_ptr(&b), buffer_len(&b))) == 1) authenticated = 1; buffer_free(&b); xfree(sig); } else { debug("test whether pkalg/pkblob are acceptable"); packet_check_eom(); /* XXX fake reply and always send PK_OK ? */ /* * XXX this allows testing whether a user is allowed * to login: if you happen to have a valid pubkey this * message is sent. the message is NEVER sent at all * if a user is not allowed to login. is this an * issue? -markus */ if (PRIVSEP(user_key_allowed(authctxt->pw, key))) { packet_start(SSH2_MSG_USERAUTH_PK_OK); packet_put_string(pkalg, alen); packet_put_string(pkblob, blen); packet_send(); packet_write_wait(); authctxt->postponed = 1; } } if (authenticated != 1) auth_clear_options(); done: debug2("userauth_pubkey: authenticated %d pkalg %s", authenticated, pkalg); if (key != NULL) key_free(key); xfree(pkalg); xfree(pkblob); return authenticated; }
static void *_cluster_rollup_usage(void *arg) { local_rollup_t *local_rollup = (local_rollup_t *)arg; int rc = SLURM_SUCCESS; char timer_str[128]; mysql_conn_t mysql_conn; MYSQL_RES *result = NULL; MYSQL_ROW row; char *query = NULL; struct tm start_tm; struct tm end_tm; time_t my_time = local_rollup->sent_end; time_t last_hour = local_rollup->sent_start; time_t last_day = local_rollup->sent_start; time_t last_month = local_rollup->sent_start; time_t hour_start; time_t hour_end; time_t day_start; time_t day_end; time_t month_start; time_t month_end; DEF_TIMERS; char *update_req_inx[] = { "hourly_rollup", "daily_rollup", "monthly_rollup" }; enum { UPDATE_HOUR, UPDATE_DAY, UPDATE_MONTH, UPDATE_COUNT }; memset(&mysql_conn, 0, sizeof(mysql_conn_t)); mysql_conn.rollback = 1; mysql_conn.conn = local_rollup->mysql_conn->conn; slurm_mutex_init(&mysql_conn.lock); /* Each thread needs it's own connection we can't use the one * sent from the parent thread. */ rc = check_connection(&mysql_conn); if (rc != SLURM_SUCCESS) goto end_it; if (!local_rollup->sent_start) { char *tmp = NULL; int i=0; xstrfmtcat(tmp, "%s", update_req_inx[i]); for(i=1; i<UPDATE_COUNT; i++) { xstrfmtcat(tmp, ", %s", update_req_inx[i]); } query = xstrdup_printf("select %s from \"%s_%s\"", tmp, local_rollup->cluster_name, last_ran_table); xfree(tmp); debug4("%d(%s:%d) query\n%s", mysql_conn.conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret(&mysql_conn, query, 0))) { xfree(query); rc = SLURM_ERROR; goto end_it; } xfree(query); row = mysql_fetch_row(result); if (row) { last_hour = slurm_atoul(row[UPDATE_HOUR]); last_day = slurm_atoul(row[UPDATE_DAY]); last_month = slurm_atoul(row[UPDATE_MONTH]); mysql_free_result(result); } else { time_t now = time(NULL); time_t lowest = now; mysql_free_result(result); query = xstrdup_printf( "select time_start from \"%s_%s\" " "where node_name='' order by " "time_start asc limit 1;", local_rollup->cluster_name, event_table); debug3("%d(%s:%d) query\n%s", mysql_conn.conn, THIS_FILE, __LINE__, query); if (!(result = mysql_db_query_ret( &mysql_conn, query, 0))) { xfree(query); rc = SLURM_ERROR; goto end_it; } xfree(query); if ((row = mysql_fetch_row(result))) { time_t check = slurm_atoul(row[0]); if (check < lowest) lowest = check; } mysql_free_result(result); /* If we don't have any events like adding a * cluster this will not work correctly, so we * will insert now as a starting point. */ query = xstrdup_printf( "insert into \"%s_%s\" " "(hourly_rollup, daily_rollup, monthly_rollup) " "values (%ld, %ld, %ld);", local_rollup->cluster_name, last_ran_table, lowest, lowest, lowest); debug3("%d(%s:%d) query\n%s", mysql_conn.conn, THIS_FILE, __LINE__, query); rc = mysql_db_query(&mysql_conn, query); xfree(query); if (rc != SLURM_SUCCESS) { rc = SLURM_ERROR; goto end_it; } if (lowest == now) { debug("Cluster %s not registered, " "not doing rollup", local_rollup->cluster_name); rc = SLURM_SUCCESS; goto end_it; } last_hour = last_day = last_month = lowest; } } if (!my_time) my_time = time(NULL); /* test month gap */ /* last_hour = 1212299999; */ /* last_day = 1212217200; */ /* last_month = 1212217200; */ /* my_time = 1212307200; */ /* last_hour = 1211475599; */ /* last_day = 1211475599; */ /* last_month = 1211475599; */ // last_hour = 1211403599; // last_hour = 1206946800; // last_day = 1207033199; // last_day = 1197033199; // last_month = 1204358399; if (!localtime_r(&last_hour, &start_tm)) { error("Couldn't get localtime from hour start %ld", last_hour); rc = SLURM_ERROR; goto end_it; } if (!localtime_r(&my_time, &end_tm)) { error("Couldn't get localtime from hour end %ld", my_time); rc = SLURM_ERROR; goto end_it; } /* Below and anywhere in a rollup plugin when dealing with * epoch times we need to set the tm_isdst = -1 so we don't * have to worry about the time changes. Not setting it to -1 * will cause problems in the day and month with the date change. */ start_tm.tm_sec = 0; start_tm.tm_min = 0; start_tm.tm_isdst = -1; hour_start = mktime(&start_tm); end_tm.tm_sec = 0; end_tm.tm_min = 0; end_tm.tm_isdst = -1; hour_end = mktime(&end_tm); /* info("hour start %s", slurm_ctime(&hour_start)); */ /* info("hour end %s", slurm_ctime(&hour_end)); */ /* info("diff is %d", hour_end-hour_start); */ slurm_mutex_lock(&rollup_lock); global_last_rollup = hour_end; slurm_mutex_unlock(&rollup_lock); /* set up the day period */ if (!localtime_r(&last_day, &start_tm)) { error("Couldn't get localtime from day %ld", last_day); rc = SLURM_ERROR; goto end_it; } start_tm.tm_sec = 0; start_tm.tm_min = 0; start_tm.tm_hour = 0; start_tm.tm_isdst = -1; day_start = mktime(&start_tm); end_tm.tm_hour = 0; end_tm.tm_isdst = -1; day_end = mktime(&end_tm); /* info("day start %s", slurm_ctime(&day_start)); */ /* info("day end %s", slurm_ctime(&day_end)); */ /* info("diff is %d", day_end-day_start); */ /* set up the month period */ if (!localtime_r(&last_month, &start_tm)) { error("Couldn't get localtime from month %ld", last_month); rc = SLURM_ERROR; goto end_it; } start_tm.tm_sec = 0; start_tm.tm_min = 0; start_tm.tm_hour = 0; start_tm.tm_mday = 1; start_tm.tm_isdst = -1; month_start = mktime(&start_tm); end_tm.tm_sec = 0; end_tm.tm_min = 0; end_tm.tm_hour = 0; end_tm.tm_mday = 1; end_tm.tm_isdst = -1; month_end = mktime(&end_tm); /* info("month start %s", slurm_ctime(&month_start)); */ /* info("month end %s", slurm_ctime(&month_end)); */ /* info("diff is %d", month_end-month_start); */ if ((hour_end - hour_start) > 0) { START_TIMER; rc = as_mysql_hourly_rollup(&mysql_conn, local_rollup->cluster_name, hour_start, hour_end, local_rollup->archive_data); snprintf(timer_str, sizeof(timer_str), "hourly_rollup for %s", local_rollup->cluster_name); END_TIMER3(timer_str, 5000000); if (rc != SLURM_SUCCESS) goto end_it; } if ((day_end - day_start) > 0) { START_TIMER; rc = as_mysql_daily_rollup(&mysql_conn, local_rollup->cluster_name, day_start, day_end, local_rollup->archive_data); snprintf(timer_str, sizeof(timer_str), "daily_rollup for %s", local_rollup->cluster_name); END_TIMER3(timer_str, 5000000); if (rc != SLURM_SUCCESS) goto end_it; } if ((month_end - month_start) > 0) { START_TIMER; rc = as_mysql_monthly_rollup(&mysql_conn, local_rollup->cluster_name, month_start, month_end, local_rollup->archive_data); snprintf(timer_str, sizeof(timer_str), "monthly_rollup for %s", local_rollup->cluster_name); END_TIMER3(timer_str, 5000000); if (rc != SLURM_SUCCESS) goto end_it; } if ((hour_end - hour_start) > 0) { /* If we have a sent_end do not update the last_run_table */ if (!local_rollup->sent_end) query = xstrdup_printf( "update \"%s_%s\" set hourly_rollup=%ld", local_rollup->cluster_name, last_ran_table, hour_end); } else debug2("No need to roll cluster %s this hour %ld <= %ld", local_rollup->cluster_name, hour_end, hour_start); if ((day_end - day_start) > 0) { if (query && !local_rollup->sent_end) xstrfmtcat(query, ", daily_rollup=%ld", day_end); else if (!local_rollup->sent_end) query = xstrdup_printf( "update \"%s_%s\" set daily_rollup=%ld", local_rollup->cluster_name, last_ran_table, day_end); } else debug2("No need to roll cluster %s this day %ld <= %ld", local_rollup->cluster_name, day_end, day_start); if ((month_end - month_start) > 0) { if (query && !local_rollup->sent_end) xstrfmtcat(query, ", monthly_rollup=%ld", month_end); else if (!local_rollup->sent_end) query = xstrdup_printf( "update \"%s_%s\" set monthly_rollup=%ld", local_rollup->cluster_name, last_ran_table, month_end); } else debug2("No need to roll cluster %s this month %ld <= %ld", local_rollup->cluster_name, month_end, month_start); if (query) { debug3("%d(%s:%d) query\n%s", mysql_conn.conn, THIS_FILE, __LINE__, query); rc = mysql_db_query(&mysql_conn, query); xfree(query); } end_it: if (rc == SLURM_SUCCESS) { if (mysql_db_commit(&mysql_conn)) { error("Couldn't commit rollup of cluster %s", local_rollup->cluster_name); rc = SLURM_ERROR; } } else { error("Cluster %s rollup failed", local_rollup->cluster_name); if (mysql_db_rollback(&mysql_conn)) error("rollback failed"); } mysql_db_close_db_connection(&mysql_conn); slurm_mutex_destroy(&mysql_conn.lock); slurm_mutex_lock(local_rollup->rolledup_lock); (*local_rollup->rolledup)++; if ((rc != SLURM_SUCCESS) && ((*local_rollup->rc) == SLURM_SUCCESS)) (*local_rollup->rc) = rc; pthread_cond_signal(local_rollup->rolledup_cond); slurm_mutex_unlock(local_rollup->rolledup_lock); xfree(local_rollup); return NULL; }
static int userauth_hostbased(struct ssh *ssh) { Authctxt *authctxt = ssh->authctxt; struct sshbuf *b; struct sshkey *key = NULL; char *pkalg, *cuser, *chost; u_char *pkblob, *sig; size_t alen, blen, slen; int r, pktype, authenticated = 0; if (!authctxt->valid) { debug2("%s: disabled because of invalid user", __func__); return 0; } /* XXX use sshkey_froms() */ if ((r = sshpkt_get_cstring(ssh, &pkalg, &alen)) != 0 || (r = sshpkt_get_string(ssh, &pkblob, &blen)) != 0 || (r = sshpkt_get_cstring(ssh, &chost, NULL)) != 0 || (r = sshpkt_get_cstring(ssh, &cuser, NULL)) != 0 || (r = sshpkt_get_string(ssh, &sig, &slen)) != 0) fatal("%s: packet parsing: %s", __func__, ssh_err(r)); debug("%s: cuser %s chost %s pkalg %s slen %zu", __func__, cuser, chost, pkalg, slen); #ifdef DEBUG_PK debug("signature:"); sshbuf_dump_data(sig, siglen, stderr); #endif pktype = sshkey_type_from_name(pkalg); if (pktype == KEY_UNSPEC) { /* this is perfectly legal */ logit("%s: unsupported public key algorithm: %s", __func__, pkalg); goto done; } if ((r = sshkey_from_blob(pkblob, blen, &key)) != 0) { error("%s: key_from_blob: %s", __func__, ssh_err(r)); goto done; } if (key == NULL) { error("%s: cannot decode key: %s", __func__, pkalg); goto done; } if (key->type != pktype) { error("%s: type mismatch for decoded key " "(received %d, expected %d)", __func__, key->type, pktype); goto done; } if (sshkey_type_plain(key->type) == KEY_RSA && (ssh->compat & SSH_BUG_RSASIGMD5) != 0) { error("Refusing RSA key because peer uses unsafe " "signature format"); goto done; } if (match_pattern_list(sshkey_ssh_name(key), options.hostbased_key_types, 0) != 1) { logit("%s: key type %s not in HostbasedAcceptedKeyTypes", __func__, sshkey_type(key)); goto done; } if ((b = sshbuf_new()) == NULL) fatal("%s: sshbuf_new failed", __func__); /* reconstruct packet */ if ((r = sshbuf_put_string(b, session_id2, session_id2_len)) != 0 || (r = sshbuf_put_u8(b, SSH2_MSG_USERAUTH_REQUEST)) != 0 || (r = sshbuf_put_cstring(b, authctxt->user)) != 0 || (r = sshbuf_put_cstring(b, authctxt->service)) != 0 || (r = sshbuf_put_cstring(b, "hostbased")) != 0 || (r = sshbuf_put_string(b, pkalg, alen)) != 0 || (r = sshbuf_put_string(b, pkblob, blen)) != 0 || (r = sshbuf_put_cstring(b, chost)) != 0 || (r = sshbuf_put_cstring(b, cuser)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); #ifdef DEBUG_PK sshbuf_dump(b, stderr); #endif auth2_record_info(authctxt, "client user \"%.100s\", client host \"%.100s\"", cuser, chost); /* test for allowed key and correct signature */ authenticated = 0; if (PRIVSEP(hostbased_key_allowed(authctxt->pw, cuser, chost, key)) && PRIVSEP(sshkey_verify(key, sig, slen, sshbuf_ptr(b), sshbuf_len(b), pkalg, ssh->compat)) == 0) authenticated = 1; auth2_record_key(authctxt, authenticated, key); sshbuf_free(b); done: debug2("%s: authenticated %d", __func__, authenticated); sshkey_free(key); free(pkalg); free(pkblob); free(cuser); free(chost); free(sig); return authenticated; }
extern int as_mysql_roll_usage(mysql_conn_t *mysql_conn, time_t sent_start, time_t sent_end, uint16_t archive_data) { int rc = SLURM_SUCCESS; int rolledup = 0; char *cluster_name = NULL; ListIterator itr; pthread_mutex_t rolledup_lock = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t rolledup_cond; //DEF_TIMERS; if (check_connection(mysql_conn) != SLURM_SUCCESS) return ESLURM_DB_CONNECTION; slurm_mutex_lock(&usage_rollup_lock); slurm_mutex_init(&rolledup_lock); pthread_cond_init(&rolledup_cond, NULL); //START_TIMER; slurm_mutex_lock(&as_mysql_cluster_list_lock); itr = list_iterator_create(as_mysql_cluster_list); while ((cluster_name = list_next(itr))) { /* pthread_t rollup_tid; */ /* pthread_attr_t rollup_attr; */ local_rollup_t *local_rollup = xmalloc(sizeof(local_rollup_t)); local_rollup->archive_data = archive_data; local_rollup->cluster_name = cluster_name; local_rollup->mysql_conn = mysql_conn; local_rollup->rc = &rc; local_rollup->rolledup = &rolledup; local_rollup->rolledup_lock = &rolledup_lock; local_rollup->rolledup_cond = &rolledup_cond; local_rollup->sent_end = sent_end; local_rollup->sent_start = sent_start; /* _cluster_rollup_usage is responsible for freeing this local_rollup */ _cluster_rollup_usage(local_rollup); /* It turns out doing this with threads only buys a very small victory, and can skew the timings. So just doing them one after the other isn't too bad. If you really want to do this in threads you can just uncomment this, and comment the call above. */ /* slurm_attr_init(&rollup_attr); */ /* if (pthread_create(&rollup_tid, &rollup_attr, */ /* _cluster_rollup_usage, */ /* (void *)local_rollup)) */ /* fatal("pthread_create: %m"); */ /* slurm_attr_destroy(&rollup_attr); */ } slurm_mutex_lock(&rolledup_lock); list_iterator_destroy(itr); slurm_mutex_unlock(&as_mysql_cluster_list_lock); while (rolledup < list_count(as_mysql_cluster_list)) { pthread_cond_wait(&rolledup_cond, &rolledup_lock); debug2("Got %d rolled up", rolledup); } slurm_mutex_unlock(&rolledup_lock); debug2("Everything rolled up"); slurm_mutex_destroy(&rolledup_lock); pthread_cond_destroy(&rolledup_cond); /* END_TIMER; */ /* info("total time was %s", TIME_STR); */ slurm_mutex_unlock(&usage_rollup_lock); return rc; }
/*ARGSUSED*/ static void input_userauth_request(int type, u_int32_t seq, void *ctxt) { Authctxt *authctxt = ctxt; Authmethod *m = NULL; char *user, *service, *method, *style = NULL; int authenticated = 0; if (authctxt == NULL) fatal("input_userauth_request: no authctxt"); user = packet_get_cstring(NULL); service = packet_get_cstring(NULL); method = packet_get_cstring(NULL); debug("userauth-request for user %s service %s method %s", user, service, method); if (!log_flag) { logit("SSH: Server;Ltype: Authname;Remote: %s-%d;Name: %s", get_remote_ipaddr(), get_remote_port(), user); log_flag = 1; } debug("attempt %d failures %d", authctxt->attempt, authctxt->failures); if ((style = strchr(user, ':')) != NULL) *style++ = 0; if (authctxt->attempt++ == 0) { /* setup auth context */ authctxt->pw = PRIVSEP(getpwnamallow(user)); authctxt->user = xstrdup(user); if (authctxt->pw && strcmp(service, "ssh-connection")==0) { authctxt->valid = 1; debug2("input_userauth_request: setting up authctxt for %s", user); } else { logit("input_userauth_request: invalid user %s", user); authctxt->pw = fakepw(); } #ifdef USE_PAM if (options.use_pam) PRIVSEP(start_pam(authctxt)); #endif setproctitle("%s%s", authctxt->valid ? user : "******", use_privsep ? " [net]" : ""); authctxt->service = xstrdup(service); authctxt->style = style ? xstrdup(style) : NULL; if (use_privsep) mm_inform_authserv(service, style); userauth_banner(); if (auth2_setup_methods_lists(authctxt) != 0) packet_disconnect("no authentication methods enabled"); } else if (strcmp(user, authctxt->user) != 0 || strcmp(service, authctxt->service) != 0) { packet_disconnect("Change of username or service not allowed: " "(%s,%s) -> (%s,%s)", authctxt->user, authctxt->service, user, service); } /* reset state */ auth2_challenge_stop(authctxt); #ifdef JPAKE auth2_jpake_stop(authctxt); #endif #ifdef GSSAPI /* XXX move to auth2_gssapi_stop() */ dispatch_set(SSH2_MSG_USERAUTH_GSSAPI_TOKEN, NULL); dispatch_set(SSH2_MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, NULL); #endif authctxt->postponed = 0; authctxt->server_caused_failure = 0; /* try to authenticate user */ m = authmethod_lookup(authctxt, method); if (m != NULL && authctxt->failures < options.max_authtries) { debug2("input_userauth_request: try method %s", method); authenticated = m->userauth(authctxt); } userauth_finish(authctxt, authenticated, method, NULL); free(service); free(user); free(method); }
extern int fed_mgr_init(void *db_conn) { int rc = SLURM_SUCCESS; slurmdb_federation_cond_t fed_cond; List fed_list; slurmdb_federation_rec_t *fed = NULL; slurm_mutex_lock(&init_mutex); if (inited) { slurm_mutex_unlock(&init_mutex); return SLURM_SUCCESS; } if (!association_based_accounting) goto end_it; slurm_persist_conn_recv_server_init(); if (running_cache) { debug("Database appears down, reading federations from state file."); fed = fed_mgr_state_load( slurmctld_conf.state_save_location); if (!fed) { debug2("No federation state"); rc = SLURM_SUCCESS; goto end_it; } } else { slurmdb_init_federation_cond(&fed_cond, 0); fed_cond.cluster_list = list_create(NULL); list_append(fed_cond.cluster_list, slurmctld_cluster_name); fed_list = acct_storage_g_get_federations(db_conn, getuid(), &fed_cond); FREE_NULL_LIST(fed_cond.cluster_list); if (!fed_list) { error("failed to get a federation list"); rc = SLURM_ERROR; goto end_it; } if (list_count(fed_list) == 1) fed = list_pop(fed_list); else if (list_count(fed_list) > 1) { error("got more federations than expected"); rc = SLURM_ERROR; } FREE_NULL_LIST(fed_list); } if (fed) { slurmdb_cluster_rec_t *cluster = NULL; if ((cluster = list_find_first(fed->cluster_list, slurmdb_find_cluster_in_list, slurmctld_cluster_name))) { _join_federation(fed, cluster, false); } else { error("failed to get cluster from federation that we requested"); rc = SLURM_ERROR; } } end_it: inited = true; slurm_mutex_unlock(&init_mutex); return rc; }
string perform(const Config& config, const download::Uri& uri, const string& targetPath, const std::function< void (const vector< string >&) >& callback) { try { CurlWrapper curl; // bad connections can return 'receive failure' transient error // occasionally, give them several tries to finish the download auto transientErrorsLeft = config.getInteger("acquire::retries"); { // setting options curl.setOption(CURLOPT_URL, string(uri), "uri"); auto downloadLimit = getIntegerAcquireSuboptionForUri(config, uri, "dl-limit"); if (downloadLimit) { curl.setLargeOption(CURLOPT_MAX_RECV_SPEED_LARGE, downloadLimit*1024, "upper speed limit"); } auto proxy = getAcquireSuboptionForUri(config, uri, "proxy"); if (proxy == "DIRECT") { curl.setOption(CURLOPT_PROXY, "", "proxy"); } else if (!proxy.empty()) { curl.setOption(CURLOPT_PROXY, proxy, "proxy"); } if (uri.getProtocol() == "http" && config.getBool("acquire::http::allowredirect")) { curl.setOption(CURLOPT_FOLLOWLOCATION, 1, "follow-location"); } auto timeout = getIntegerAcquireSuboptionForUri(config, uri, "timeout"); if (timeout) { curl.setOption(CURLOPT_CONNECTTIMEOUT, timeout, "connect timeout"); curl.setOption(CURLOPT_LOW_SPEED_LIMIT, 1, "low speed limit"); curl.setOption(CURLOPT_LOW_SPEED_TIME, timeout, "low speed timeout"); } curl.setOption(CURLOPT_WRITEFUNCTION, (void*)&curlWriteFunction, "write function"); } RequiredFile file(targetPath, "a"); start: ssize_t totalBytes = file.tell(); callback({ "downloading", to_string(totalBytes), to_string(0)}); curl.setOption(CURLOPT_RESUME_FROM, totalBytes, "resume from"); string fileWriteError; { fileWriteErrorPtr = &fileWriteError; filePtr = &file; curlPtr = &curl; callbackPtr = &callback; totalBytesPtr = &totalBytes; } auto performResult = curl.perform(); if (!fileWriteError.empty()) { return fileWriteError; } else if (performResult == CURLE_OK) { return string(); // all went ok } else if (performResult == CURLE_PARTIAL_FILE) { // partial data? no problem, we might request it return string(); } else { // something went wrong :( // transient errors handling if (performResult == CURLE_RECV_ERROR && transientErrorsLeft) { if (config.getBool("debug::downloader")) { debug2("transient error while downloading '%s'", string(uri)); } --transientErrorsLeft; goto start; } if (performResult == CURLE_RANGE_ERROR) { if (config.getBool("debug::downloader")) { debug2("range command failed, need to restart from beginning while downloading '%s'", string(uri)); } if (unlink(targetPath.c_str()) == -1) { return format2e(__("unable to remove target file for re-downloading")); } goto start; } return curl.getError(); } } catch (Exception& e) { return format2(__("download method error: %s"), e.what()); } }
/* Attempt to schedule a specific job on specific available nodes * IN job_ptr - job to schedule * IN/OUT avail_bitmap - nodes available/selected to use * IN exc_core_bitmap - cores which can not be used * RET SLURM_SUCCESS on success, otherwise an error code */ static int _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap, uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, bitstr_t *exc_core_bitmap) { bitstr_t *tmp_bitmap; int rc = SLURM_SUCCESS; int feat_cnt = _num_feature_count(job_ptr); List preemptee_candidates = NULL; List preemptee_job_list = NULL; if (feat_cnt) { /* Ideally schedule the job feature by feature, * but I don't want to add that complexity here * right now, so clear the feature counts and try * to schedule. This will work if there is only * one feature count. It should work fairly well * in cases where there are multiple feature * counts. */ struct job_details *detail_ptr = job_ptr->details; ListIterator feat_iter; struct feature_record *feat_ptr; int i = 0, list_size; uint16_t *feat_cnt_orig = NULL, high_cnt = 0; /* Clear the feature counts */ list_size = list_count(detail_ptr->feature_list); feat_cnt_orig = xmalloc(sizeof(uint16_t) * list_size); feat_iter = list_iterator_create(detail_ptr->feature_list); while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) { high_cnt = MAX(high_cnt, feat_ptr->count); feat_cnt_orig[i++] = feat_ptr->count; feat_ptr->count = 0; } list_iterator_destroy(feat_iter); if ((job_req_node_filter(job_ptr, *avail_bitmap) != SLURM_SUCCESS) || (bit_set_count(*avail_bitmap) < high_cnt)) { rc = ESLURM_NODES_BUSY; } else { preemptee_candidates = slurm_find_preemptable_jobs(job_ptr); rc = select_g_job_test(job_ptr, *avail_bitmap, high_cnt, max_nodes, req_nodes, SELECT_MODE_WILL_RUN, preemptee_candidates, &preemptee_job_list, exc_core_bitmap); if (preemptee_job_list) { list_destroy(preemptee_job_list); preemptee_job_list = NULL; } } /* Restore the feature counts */ i = 0; feat_iter = list_iterator_create(detail_ptr->feature_list); while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) { feat_ptr->count = feat_cnt_orig[i++]; } list_iterator_destroy(feat_iter); xfree(feat_cnt_orig); } else { /* Try to schedule the job. First on dedicated nodes * then on shared nodes (if so configured). */ uint16_t orig_shared; time_t now = time(NULL); char str[100]; preemptee_candidates = slurm_find_preemptable_jobs(job_ptr); orig_shared = job_ptr->details->share_res; job_ptr->details->share_res = 0; tmp_bitmap = bit_copy(*avail_bitmap); if (exc_core_bitmap) { bit_fmt(str, (sizeof(str) - 1), exc_core_bitmap); debug2(" _try_sched with exclude core bitmap: %s",str); } rc = select_g_job_test(job_ptr, *avail_bitmap, min_nodes, max_nodes, req_nodes, SELECT_MODE_WILL_RUN, preemptee_candidates, &preemptee_job_list, exc_core_bitmap); if (preemptee_job_list) { list_destroy(preemptee_job_list); preemptee_job_list = NULL; } job_ptr->details->share_res = orig_shared; if (((rc != SLURM_SUCCESS) || (job_ptr->start_time > now)) && (orig_shared != 0)) { FREE_NULL_BITMAP(*avail_bitmap); *avail_bitmap= tmp_bitmap; rc = select_g_job_test(job_ptr, *avail_bitmap, min_nodes, max_nodes, req_nodes, SELECT_MODE_WILL_RUN, preemptee_candidates, &preemptee_job_list, exc_core_bitmap); if (preemptee_job_list) { list_destroy(preemptee_job_list); preemptee_job_list = NULL; } } else FREE_NULL_BITMAP(tmp_bitmap); } if (preemptee_candidates) list_destroy(preemptee_candidates); return rc; }
/* Called once a second. * * Note: Try to not put any Context lines in here (guppy 21Mar2000). */ static void core_secondly() { static int cnt = 0; int miltime; do_check_timers(&utimer); /* Secondly timers */ cnt++; if (cnt >= 10) { /* Every 10 seconds */ cnt = 0; check_expired_dcc(); if (con_chan && !backgrd) { dprintf(DP_STDOUT, "\033[2J\033[1;1H"); tell_verbose_status(DP_STDOUT); do_module_report(DP_STDOUT, 0, "server"); do_module_report(DP_STDOUT, 0, "channels"); tell_mem_status_dcc(DP_STDOUT); } } egg_memcpy(&nowtm, localtime(&now), sizeof(struct tm)); if (nowtm.tm_min != lastmin) { int i = 0; /* Once a minute */ lastmin = (lastmin + 1) % 60; call_hook(HOOK_MINUTELY); check_expired_ignores(); autolink_cycle(NULL); /* Attempt autolinks */ /* In case for some reason more than 1 min has passed: */ while (nowtm.tm_min != lastmin) { /* Timer drift, dammit */ debug2("timer: drift (lastmin=%d, now=%d)", lastmin, nowtm.tm_min); i++; lastmin = (lastmin + 1) % 60; call_hook(HOOK_MINUTELY); } if (i > 1) putlog(LOG_MISC, "*", "(!) timer drift -- spun %d minutes", i); miltime = (nowtm.tm_hour * 100) + (nowtm.tm_min); if (((int) (nowtm.tm_min / 5) * 5) == (nowtm.tm_min)) { /* 5 min */ call_hook(HOOK_5MINUTELY); check_botnet_pings(); if (!quick_logs) { flushlogs(); check_logsize(); } if (!miltime) { /* At midnight */ char s[25]; int j; strncpyz(s, ctime(&now), sizeof s); if (quiet_save < 3) putlog(LOG_ALL, "*", "--- %.11s%s", s, s + 20); call_hook(HOOK_BACKUP); for (j = 0; j < max_logs; j++) { if (logs[j].filename != NULL && logs[j].f != NULL) { fclose(logs[j].f); logs[j].f = NULL; } } } } if (nowtm.tm_min == notify_users_at) call_hook(HOOK_HOURLY); /* These no longer need checking since they are all check vs minutely * settings and we only get this far on the minute. */ if (miltime == switch_logfiles_at) { call_hook(HOOK_DAILY); if (!keep_all_logs) { if (quiet_save < 3) putlog(LOG_MISC, "*", MISC_LOGSWITCH); for (i = 0; i < max_logs; i++) if (logs[i].filename) { char s[1024]; if (logs[i].f) { fclose(logs[i].f); logs[i].f = NULL; } egg_snprintf(s, sizeof s, "%s.yesterday", logs[i].filename); unlink(s); movefile(logs[i].filename, s); } } } } }
extern int jobacct_gather_cgroup_cpuacct_fini(slurm_cgroup_conf_t *slurm_cgroup_conf) { xcgroup_t cpuacct_cg; bool lock_ok; int cc; if (user_cgroup_path[0] == '\0' || job_cgroup_path[0] == '\0' || jobstep_cgroup_path[0] == '\0' || task_cgroup_path[0] == 0) return SLURM_SUCCESS; /* * Move the slurmstepd back to the root cpuacct cg. * The release_agent will asynchroneously be called for the step * cgroup. It will do the necessary cleanup. */ if (xcgroup_create(&cpuacct_ns, &cpuacct_cg, "", 0, 0) == XCGROUP_SUCCESS) { xcgroup_set_uint32_param(&cpuacct_cg, "tasks", getpid()); } /* Lock the root of the cgroup and remove the subdirectories * related to this job. */ lock_ok = true; if (xcgroup_lock(&cpuacct_cg) != XCGROUP_SUCCESS) { error("%s: failed to flock() %s %m", __func__, cpuacct_cg.path); lock_ok = false; } /* Clean up starting from the leaves way up, the * reverse order in which the cgroups were created. */ for (cc = 0; cc <= max_task_id; cc++) { xcgroup_t cgroup; char buf[PATH_MAX]; /* rmdir all tasks this running slurmstepd * was responsible for. */ sprintf(buf, "%s%s/task_%d", cpuacct_ns.mnt_point, jobstep_cgroup_path, cc); cgroup.path = buf; if (strstr(buf, "step_extern")) kill_extern_procs(cgroup.path); if (xcgroup_delete(&cgroup) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, buf); } } if (xcgroup_delete(&step_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, cpuacct_cg.path); } if (xcgroup_delete(&job_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, job_cpuacct_cg.path); } if (xcgroup_delete(&user_cpuacct_cg) != XCGROUP_SUCCESS) { debug2("%s: failed to delete %s %m", __func__, user_cpuacct_cg.path); } if (lock_ok == true) xcgroup_unlock(&cpuacct_cg); xcgroup_destroy(&task_cpuacct_cg); xcgroup_destroy(&user_cpuacct_cg); xcgroup_destroy(&job_cpuacct_cg); xcgroup_destroy(&step_cpuacct_cg); xcgroup_destroy(&cpuacct_cg); user_cgroup_path[0]='\0'; job_cgroup_path[0]='\0'; jobstep_cgroup_path[0]='\0'; task_cgroup_path[0] = 0; xcgroup_ns_destroy(&cpuacct_ns); return SLURM_SUCCESS; }