/* * Send a msg to the next msg aggregation collector node. If primary * collector is unavailable or returns error, try backup collector. * If backup collector is unavailable or returns error, send msg * directly to controller. */ static int _send_to_next_collector(slurm_msg_t *msg) { slurm_addr_t *next_dest = NULL; bool i_am_collector; int rc = SLURM_SUCCESS; if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) info("msg aggr: send_to_next_collector: getting primary next " "collector"); if ((next_dest = route_g_next_collector(&i_am_collector))) { if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) { char addrbuf[100]; slurm_print_slurm_addr(next_dest, addrbuf, 32); info("msg aggr: send_to_next_collector: *next_dest is " "%s", addrbuf); } memcpy(&msg->address, next_dest, sizeof(slurm_addr_t)); rc = slurm_send_only_node_msg(msg); } if (!next_dest || (rc != SLURM_SUCCESS)) rc = _send_to_backup_collector(msg, rc); return rc; }
static int _send_to_backup_collector(slurm_msg_t *msg, int rc) { slurm_addr_t *next_dest = NULL; if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) { info("_send_to_backup_collector: primary %s, " "getting backup", rc ? "can't be reached" : "is null"); } if ((next_dest = route_g_next_collector_backup())) { if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) { char addrbuf[100]; slurm_print_slurm_addr(next_dest, addrbuf, 32); info("_send_to_backup_collector: *next_dest is " "%s", addrbuf); } memcpy(&msg->address, next_dest, sizeof(slurm_addr_t)); rc = slurm_send_only_node_msg(msg); } if (!next_dest || (rc != SLURM_SUCCESS)) { if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) info("_send_to_backup_collector: backup %s, " "sending msg to controller", rc ? "can't be reached" : "is null"); rc = slurm_send_only_controller_msg(msg, working_cluster_rec); } return rc; }
int p_mpi_hook_slurmstepd_task(const mpi_plugin_task_info_t *job, char ***env) { char addrbuf[1024]; char *p; char *addr = getenvp(*env, "SLURM_LAUNCH_NODE_IPADDR"); debug("Using mpi/mpich-gm"); slurm_print_slurm_addr (job->self, addrbuf, sizeof(addrbuf)); if ((p = strchr (addrbuf, ':')) != NULL) *p = '\0'; env_array_overwrite_fmt(env, "GMPI_MASTER", "%s", addr); env_array_overwrite_fmt(env, "GMPI_SLAVE", "%s", addrbuf); env_array_overwrite_fmt(env, "GMPI_ID", "%u", job->gtaskid); if (!getenv("GMPI_RECV")) { env_array_overwrite_fmt(env, "GMPI_RECV", "%s", "hybrid"); } env_array_overwrite_fmt(env, "MXMPI_MASTER", "%s", addr); env_array_overwrite_fmt(env, "MXMPI_ID", "%u", job->gtaskid); env_array_overwrite_fmt(env, "MXMPI_SLAVE", "%s", addrbuf); if (!getenv("MXMPI_RECV")) { env_array_overwrite_fmt(env, "MXMPI_RECV", "%s", "hybrid"); } debug2("init for mpi rank %u", job->gtaskid); return SLURM_SUCCESS; }
extern void msg_aggr_resp(slurm_msg_t *msg) { slurm_msg_t *next_msg; composite_msg_t *comp_msg; msg_aggr_t *msg_aggr; ListIterator itr; comp_msg = (composite_msg_t *)msg->data; itr = list_iterator_create(comp_msg->msg_list); if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) info("msg_aggr_resp: processing composite msg_list..."); while ((next_msg = list_next(itr))) { switch (next_msg->msg_type) { case RESPONSE_SLURM_RC: /* signal sending thread that slurmctld received this * epilog complete msg */ if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) info("msg_aggr_resp: rc message found for " "index %u signaling sending thread", next_msg->msg_index); slurm_mutex_lock(&msg_collection.aggr_mutex); if (!(msg_aggr = _handle_msg_aggr_ret( next_msg->msg_index, 1))) { debug2("msg_aggr_resp: error: unable to " "locate aggr message struct for job %u", next_msg->msg_index); slurm_mutex_unlock(&msg_collection.aggr_mutex); continue; } pthread_cond_signal(&msg_aggr->wait_cond); slurm_mutex_unlock(&msg_collection.aggr_mutex); break; case RESPONSE_MESSAGE_COMPOSITE: comp_msg = (composite_msg_t *)next_msg->data; /* set up the address here for the next node */ memcpy(&next_msg->address, &comp_msg->sender, sizeof(slurm_addr_t)); if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) { char addrbuf[100]; slurm_print_slurm_addr(&next_msg->address, addrbuf, 32); info("msg_aggr_resp: composite response msg " "found for %s", addrbuf); } slurm_send_only_node_msg(next_msg); break; default: error("_rpc_composite_resp: invalid msg type in " "composite msg_list"); break; } } list_iterator_destroy(itr); if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) info("msg aggr: _rpc_composite_resp: finished processing " "composite msg_list..."); }
/* * _set_collectors call the split_hostlist API on the all nodes hostlist * to set the node to be used as a collector for unsolicited node aggregation. * * If this node is a forwarding node (first node in any hostlist), * then its collector and backup are the ControlMachine and it's backup. * * Otherwise, we find the hostlist containing this node. * The forwarding node in that hostlist becomes a collector, the next node * which is not this node becomes the backup. * That list is split, we iterate through it and searching for a list in * which this node is a forwarding node. If found, we set the collector and * backup, else this process is repeated. */ static void _set_collectors(char *this_node_name) { slurm_ctl_conf_t *conf; hostlist_t nodes; hostlist_t* hll = NULL; char *parent = NULL, *backup = NULL; char addrbuf[32]; int i, j, f = -1; int hl_count = 0; uint16_t parent_port; uint16_t backup_port; bool found = false; bool ctldparent = true; #ifdef HAVE_FRONT_END return; /* on a FrontEnd system this would never be useful. */ #endif if (!run_in_daemon("slurmd")) return; /* Only compute nodes have collectors */ /* Set the initial iteration, collector is controller, * full list is split */ xassert(this_node_name); conf = slurm_conf_lock(); nodes = _get_all_nodes(); parent = strdup(conf->control_addr); if (conf->backup_addr) { backup = strdup(conf->backup_addr); } parent_port = conf->slurmctld_port; backup_port = parent_port; slurm_conf_unlock(); while (!found) { if ( route_g_split_hostlist(nodes, &hll, &hl_count) ) { error("unable to split forward hostlist"); goto clean; /* collector addrs remains null */ } /* Find which hostlist contains this node */ for (i=0; i < hl_count; i++) { f = hostlist_find(hll[i], this_node_name); if (f != -1) break; } if (i == hl_count) { fatal("ROUTE -- %s not found in node_record_table", this_node_name); } if (f == 0) { /* we are a forwarded to node, * so our parent is parent */ if (hostlist_count(hll[i]) > 1) this_is_collector = true; xfree(msg_collect_node); msg_collect_node = xmalloc(sizeof(slurm_addr_t)); if (ctldparent) slurm_set_addr(msg_collect_node, parent_port, parent); else { slurm_conf_get_addr(parent, msg_collect_node); msg_collect_node->sin_port = htons(parent_port); } if (debug_flags & DEBUG_FLAG_ROUTE) { slurm_print_slurm_addr(msg_collect_node, addrbuf, 32); info("ROUTE -- message collector address is %s", addrbuf); } xfree(msg_collect_backup); if (backup) { msg_collect_backup = xmalloc(sizeof(slurm_addr_t)); if (ctldparent) { slurm_set_addr(msg_collect_backup, backup_port, backup); } else { slurm_conf_get_addr(backup, msg_collect_backup); msg_collect_backup->sin_port = htons(backup_port); } if (debug_flags & DEBUG_FLAG_ROUTE) { slurm_print_slurm_addr( msg_collect_backup, addrbuf, 32); info("ROUTE -- message collector backup" " address is %s", addrbuf); } } else { if (debug_flags & DEBUG_FLAG_ROUTE) { info("ROUTE -- no message collector " "backup"); } } found = true; goto clean; } /* We are not a forwarding node, the first node in this list * will split the forward_list. * We also know that the forwarding node is not a controller. * * clean up parent context */ ctldparent = false; hostlist_destroy(nodes); if (parent) free(parent); if (backup) free(backup); nodes = hostlist_copy(hll[i]); for (j=0; j < hl_count; j++) { hostlist_destroy(hll[j]); } xfree(hll); /* set our parent, backup, and continue search */ parent = hostlist_shift(nodes); backup = hostlist_nth(nodes, 0); if (strcmp(backup, this_node_name) == 0) { free(backup); backup = NULL; if (hostlist_count(nodes) > 1) backup = hostlist_nth(nodes, 1); } parent_port = slurm_conf_get_port(parent); if (backup) { backup_port = slurm_conf_get_port(backup); } else backup_port = 0; } clean: if (debug_flags & DEBUG_FLAG_ROUTE) { if (this_is_collector) info("ROUTE -- %s is a collector node", this_node_name); else info("ROUTE -- %s is a leaf node", this_node_name); } hostlist_destroy(nodes); if (parent) free(parent); if (backup) free(backup); for (i=0; i < hl_count; i++) { hostlist_destroy(hll[i]); } xfree(hll); }
int setup_env(env_t *env, bool preserve_env) { int rc = SLURM_SUCCESS; char *dist = NULL, *lllp_dist = NULL; char addrbuf[INET_ADDRSTRLEN]; uint32_t cluster_flags = slurmdb_setup_cluster_flags(); if (env == NULL) return SLURM_ERROR; if (env->task_pid && setenvf(&env->env, "SLURM_TASK_PID", "%d", (int)env->task_pid)) { error("Unable to set SLURM_TASK_PID environment variable"); rc = SLURM_FAILURE; } if (!preserve_env && env->ntasks) { if(setenvf(&env->env, "SLURM_NTASKS", "%d", env->ntasks)) { error("Unable to set SLURM_NTASKS " "environment variable"); rc = SLURM_FAILURE; } if(setenvf(&env->env, "SLURM_NPROCS", "%d", env->ntasks)) { error("Unable to set SLURM_NPROCS " "environment variable"); rc = SLURM_FAILURE; } } if (env->cpus_per_task && setenvf(&env->env, "SLURM_CPUS_PER_TASK", "%d", env->cpus_per_task) ) { error("Unable to set SLURM_CPUS_PER_TASK"); rc = SLURM_FAILURE; } if (env->ntasks_per_node && setenvf(&env->env, "SLURM_NTASKS_PER_NODE", "%d", env->ntasks_per_node) ) { error("Unable to set SLURM_NTASKS_PER_NODE"); rc = SLURM_FAILURE; } if (env->ntasks_per_socket && setenvf(&env->env, "SLURM_NTASKS_PER_SOCKET", "%d", env->ntasks_per_socket) ) { error("Unable to set SLURM_NTASKS_PER_SOCKET"); rc = SLURM_FAILURE; } if (env->ntasks_per_core && setenvf(&env->env, "SLURM_NTASKS_PER_CORE", "%d", env->ntasks_per_core) ) { error("Unable to set SLURM_NTASKS_PER_CORE"); rc = SLURM_FAILURE; } if (env->cpus_on_node && setenvf(&env->env, "SLURM_CPUS_ON_NODE", "%d", env->cpus_on_node) ) { error("Unable to set SLURM_CPUS_ON_NODE"); rc = SLURM_FAILURE; } _set_distribution(env->distribution, &dist, &lllp_dist); if(dist) if (setenvf(&env->env, "SLURM_DISTRIBUTION", "%s", dist)) { error("Can't set SLURM_DISTRIBUTION env variable"); rc = SLURM_FAILURE; } if(env->distribution == SLURM_DIST_PLANE) if (setenvf(&env->env, "SLURM_DIST_PLANESIZE", "%u", env->plane_size)) { error("Can't set SLURM_DIST_PLANESIZE " "env variable"); rc = SLURM_FAILURE; } if(lllp_dist) if (setenvf(&env->env, "SLURM_DIST_LLLP", "%s", lllp_dist)) { error("Can't set SLURM_DIST_LLLP env variable"); rc = SLURM_FAILURE; } if (env->cpu_bind_type) { char *str_verbose, *str_bind_type, *str_bind_list; char *str_bind; int len; if (env->batch_flag) { unsetenvp(env->env, "SBATCH_CPU_BIND_VERBOSE"); unsetenvp(env->env, "SBATCH_CPU_BIND_TYPE"); unsetenvp(env->env, "SBATCH_CPU_BIND_LIST"); unsetenvp(env->env, "SBATCH_CPU_BIND"); } else { unsetenvp(env->env, "SLURM_CPU_BIND_VERBOSE"); unsetenvp(env->env, "SLURM_CPU_BIND_TYPE"); unsetenvp(env->env, "SLURM_CPU_BIND_LIST"); unsetenvp(env->env, "SLURM_CPU_BIND"); } str_verbose = xstrdup (""); if (env->cpu_bind_type & CPU_BIND_VERBOSE) { xstrcat(str_verbose, "verbose"); } else { xstrcat(str_verbose, "quiet"); } str_bind_type = xstrdup (""); if (env->cpu_bind_type & CPU_BIND_TO_THREADS) { xstrcat(str_bind_type, "threads,"); } else if (env->cpu_bind_type & CPU_BIND_TO_CORES) { xstrcat(str_bind_type, "cores,"); } else if (env->cpu_bind_type & CPU_BIND_TO_SOCKETS) { xstrcat(str_bind_type, "sockets,"); } else if (env->cpu_bind_type & CPU_BIND_TO_LDOMS) { xstrcat(str_bind_type, "ldoms,"); } if (env->cpu_bind_type & CPU_BIND_NONE) { xstrcat(str_bind_type, "none"); } else if (env->cpu_bind_type & CPU_BIND_RANK) { xstrcat(str_bind_type, "rank"); } else if (env->cpu_bind_type & CPU_BIND_MAP) { xstrcat(str_bind_type, "map_cpu:"); } else if (env->cpu_bind_type & CPU_BIND_MASK) { xstrcat(str_bind_type, "mask_cpu:"); } else if (env->cpu_bind_type & CPU_BIND_LDRANK) { xstrcat(str_bind_type, "rank_ldom"); } else if (env->cpu_bind_type & CPU_BIND_LDMAP) { xstrcat(str_bind_type, "map_ldom:"); } else if (env->cpu_bind_type & CPU_BIND_LDMASK) { xstrcat(str_bind_type, "mask_ldom:"); } len = strlen(str_bind_type); if (len) { /* remove a possible trailing ',' */ if (str_bind_type[len-1] == ',') { str_bind_type[len-1] = '\0'; } } str_bind_list = xstrdup (""); if (env->cpu_bind) { xstrcat(str_bind_list, env->cpu_bind); } str_bind = xstrdup (""); xstrcat(str_bind, str_verbose); if (str_bind[0] && str_bind_type && str_bind_type[0]) xstrcatchar(str_bind, ','); xstrcat(str_bind, str_bind_type); xstrcat(str_bind, str_bind_list); if (env->batch_flag) { if (setenvf(&env->env, "SBATCH_CPU_BIND_VERBOSE", str_verbose)) { error("Unable to set SBATCH_CPU_BIND_VERBOSE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_CPU_BIND_TYPE", str_bind_type)) { error("Unable to set SBATCH_CPU_BIND_TYPE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_CPU_BIND_LIST", str_bind_list)) { error("Unable to set SBATCH_CPU_BIND_LIST"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_CPU_BIND", str_bind)) { error("Unable to set SBATCH_CPU_BIND"); rc = SLURM_FAILURE; } } else { if (setenvf(&env->env, "SLURM_CPU_BIND_VERBOSE", str_verbose)) { error("Unable to set SLURM_CPU_BIND_VERBOSE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_CPU_BIND_TYPE", str_bind_type)) { error("Unable to set SLURM_CPU_BIND_TYPE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_CPU_BIND_LIST", str_bind_list)) { error("Unable to set SLURM_CPU_BIND_LIST"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_CPU_BIND", str_bind)) { error("Unable to set SLURM_CPU_BIND"); rc = SLURM_FAILURE; } } } if (env->mem_bind_type) { char *str_verbose, *str_bind_type, *str_bind_list; char *str_bind; if (env->batch_flag) { unsetenvp(env->env, "SBATCH_MEM_BIND_VERBOSE"); unsetenvp(env->env, "SBATCH_MEM_BIND_TYPE"); unsetenvp(env->env, "SBATCH_MEM_BIND_LIST"); unsetenvp(env->env, "SBATCH_MEM_BIND"); } else { unsetenvp(env->env, "SLURM_MEM_BIND_VERBOSE"); unsetenvp(env->env, "SLURM_MEM_BIND_TYPE"); unsetenvp(env->env, "SLURM_MEM_BIND_LIST"); unsetenvp(env->env, "SLURM_MEM_BIND"); } str_verbose = xstrdup (""); if (env->mem_bind_type & MEM_BIND_VERBOSE) { xstrcat(str_verbose, "verbose"); } else { xstrcat(str_verbose, "quiet"); } str_bind_type = xstrdup (""); if (env->mem_bind_type & MEM_BIND_NONE) { xstrcat(str_bind_type, "none"); } else if (env->mem_bind_type & MEM_BIND_RANK) { xstrcat(str_bind_type, "rank"); } else if (env->mem_bind_type & MEM_BIND_MAP) { xstrcat(str_bind_type, "map_mem:"); } else if (env->mem_bind_type & MEM_BIND_MASK) { xstrcat(str_bind_type, "mask_mem:"); } else if (env->mem_bind_type & MEM_BIND_LOCAL) { xstrcat(str_bind_type, "local"); } str_bind_list = xstrdup (""); if (env->mem_bind) { xstrcat(str_bind_list, env->mem_bind); } str_bind = xstrdup (""); xstrcat(str_bind, str_verbose); if (str_bind[0]) { /* add ',' if str_verbose */ xstrcatchar(str_bind, ','); } xstrcat(str_bind, str_bind_type); xstrcat(str_bind, str_bind_list); if (env->batch_flag) { if (setenvf(&env->env, "SBATCH_MEM_BIND_VERBOSE", str_verbose)) { error("Unable to set SBATCH_MEM_BIND_VERBOSE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_MEM_BIND_TYPE", str_bind_type)) { error("Unable to set SBATCH_MEM_BIND_TYPE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_MEM_BIND_LIST", str_bind_list)) { error("Unable to set SBATCH_MEM_BIND_LIST"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SBATCH_MEM_BIND", str_bind)) { error("Unable to set SBATCH_MEM_BIND"); rc = SLURM_FAILURE; } } else { if (setenvf(&env->env, "SLURM_MEM_BIND_VERBOSE", str_verbose)) { error("Unable to set SLURM_MEM_BIND_VERBOSE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_MEM_BIND_TYPE", str_bind_type)) { error("Unable to set SLURM_MEM_BIND_TYPE"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_MEM_BIND_LIST", str_bind_list)) { error("Unable to set SLURM_MEM_BIND_LIST"); rc = SLURM_FAILURE; } if (setenvf(&env->env, "SLURM_MEM_BIND", str_bind)) { error("Unable to set SLURM_MEM_BIND"); rc = SLURM_FAILURE; } } } if (env->overcommit && (setenvf(&env->env, "SLURM_OVERCOMMIT", "1"))) { error("Unable to set SLURM_OVERCOMMIT environment variable"); rc = SLURM_FAILURE; } if (env->slurmd_debug && setenvf(&env->env, "SLURMD_DEBUG", "%d", env->slurmd_debug)) { error("Can't set SLURMD_DEBUG environment variable"); rc = SLURM_FAILURE; } if (env->labelio && setenvf(&env->env, "SLURM_LABELIO", "1")) { error("Unable to set SLURM_LABELIO environment variable"); rc = SLURM_FAILURE; } if (env->select_jobinfo) { _setup_particulars(cluster_flags, &env->env, env->select_jobinfo); } if (env->jobid >= 0) { if (setenvf(&env->env, "SLURM_JOB_ID", "%d", env->jobid)) { error("Unable to set SLURM_JOB_ID environment"); rc = SLURM_FAILURE; } /* and for backwards compatability... */ if (setenvf(&env->env, "SLURM_JOBID", "%d", env->jobid)) { error("Unable to set SLURM_JOBID environment"); rc = SLURM_FAILURE; } } if (env->nodeid >= 0 && setenvf(&env->env, "SLURM_NODEID", "%d", env->nodeid)) { error("Unable to set SLURM_NODEID environment"); rc = SLURM_FAILURE; } if (env->procid >= 0 && setenvf(&env->env, "SLURM_PROCID", "%d", env->procid)) { error("Unable to set SLURM_PROCID environment"); rc = SLURM_FAILURE; } if (env->localid >= 0 && setenvf(&env->env, "SLURM_LOCALID", "%d", env->localid)) { error("Unable to set SLURM_LOCALID environment"); rc = SLURM_FAILURE; } if (env->stepid >= 0 && setenvf(&env->env, "SLURM_STEPID", "%d", env->stepid)) { error("Unable to set SLURM_STEPID environment"); rc = SLURM_FAILURE; } if (!preserve_env && env->nhosts && setenvf(&env->env, "SLURM_NNODES", "%d", env->nhosts)) { error("Unable to set SLURM_NNODES environment var"); rc = SLURM_FAILURE; } if (env->nodelist && setenvf(&env->env, "SLURM_NODELIST", "%s", env->nodelist)) { error("Unable to set SLURM_NODELIST environment var."); rc = SLURM_FAILURE; } if (!preserve_env && env->task_count && setenvf (&env->env, "SLURM_TASKS_PER_NODE", "%s", env->task_count)) { error ("Can't set SLURM_TASKS_PER_NODE env variable"); rc = SLURM_FAILURE; } if (env->comm_port && setenvf (&env->env, "SLURM_SRUN_COMM_PORT", "%u", env->comm_port)) { error ("Can't set SLURM_SRUN_COMM_PORT env variable"); rc = SLURM_FAILURE; } if (env->cli) { slurm_print_slurm_addr (env->cli, addrbuf, INET_ADDRSTRLEN); /* * XXX: Eventually, need a function for slurm_addrs that * returns just the IP address (not addr:port) */ if ((dist = strchr (addrbuf, ':')) != NULL) *dist = '\0'; setenvf (&env->env, "SLURM_LAUNCH_NODE_IPADDR", "%s", addrbuf); } if (env->sgtids && setenvf(&env->env, "SLURM_GTIDS", "%s", env->sgtids)) { error("Unable to set SLURM_GTIDS environment variable"); rc = SLURM_FAILURE; } if(cluster_flags & CLUSTER_FLAG_AIX) { char res_env[128]; char *debug_env = (char *)getenv("SLURM_LL_API_DEBUG"); int debug_num = 0; /* MP_POERESTART_ENV causes a warning message for "poe", but * is needed for "poerestart". Presently we have no means to * determine what command a user will execute. We could * possibly add a "srestart" command which would set * MP_POERESTART_ENV, but that presently seems unnecessary. */ /* setenvf(&env->env, "MP_POERESTART_ENV", res_env); */ if (debug_env) debug_num = atoi(debug_env); snprintf(res_env, sizeof(res_env), "SLURM_LL_API_DEBUG=%d", debug_num); /* Required for AIX/POE systems indicating pre-allocation */ setenvf(&env->env, "LOADLBATCH", "yes"); setenvf(&env->env, "LOADL_ACTIVE", "3.2.0"); } if (env->pty_port && setenvf(&env->env, "SLURM_PTY_PORT", "%hu", env->pty_port)) { error("Can't set SLURM_PTY_PORT env variable"); rc = SLURM_FAILURE; } if (env->ws_col && setenvf(&env->env, "SLURM_PTY_WIN_COL", "%hu", env->ws_col)) { error("Can't set SLURM_PTY_WIN_COL env variable"); rc = SLURM_FAILURE; } if (env->ws_row && setenvf(&env->env, "SLURM_PTY_WIN_ROW", "%hu", env->ws_row)) { error("Can't set SLURM_PTY_WIN_ROW env variable"); rc = SLURM_FAILURE; } if (env->ckpt_dir && setenvf(&env->env, "SLURM_CHECKPOINT_IMAGE_DIR", "%s", env->ckpt_dir)) { error("Can't set SLURM_CHECKPOINT_IMAGE_DIR env variable"); rc = SLURM_FAILURE; } if (env->restart_cnt && setenvf(&env->env, "SLURM_RESTART_COUNT", "%u", env->restart_cnt)) { error("Can't set SLURM_RESTART_COUNT env variable"); rc = SLURM_FAILURE; } return rc; }