Example #1
0
/* Close the persistent connection */
extern void slurm_persist_conn_members_destroy(
	slurm_persist_conn_t *persist_conn)
{
	if (!persist_conn)
		return;

	persist_conn->inited = false;
	slurm_persist_conn_close(persist_conn);

	if (persist_conn->auth_cred) {
		g_slurm_auth_destroy(persist_conn->auth_cred);
		persist_conn->auth_cred = NULL;
	}
	xfree(persist_conn->cluster_name);
	xfree(persist_conn->rem_host);
}
Example #2
0
extern int slurm_persist_msg_unpack(slurm_persist_conn_t *persist_conn,
				    persist_msg_t *resp_msg, Buf buffer)
{
	int rc;

	xassert(persist_conn);
	xassert(resp_msg);

	if (persist_conn->flags & PERSIST_FLAG_DBD) {
		rc = unpack_slurmdbd_msg((slurmdbd_msg_t *)resp_msg,
					 persist_conn->version,
					 buffer);
	} else {
		slurm_msg_t msg;

		slurm_msg_t_init(&msg);

		msg.protocol_version = persist_conn->version;

		safe_unpack16(&msg.msg_type, buffer);

		rc = unpack_msg(&msg, buffer);

		resp_msg->msg_type = msg.msg_type;
		resp_msg->data = msg.data;
	}

	/* Here we transfer the auth_cred to the persist_conn just in case in the
	 * future we need to use it in some way to verify things for messages
	 * that don't have on that will follow on the connection.
	 */
	if (resp_msg->msg_type == REQUEST_PERSIST_INIT) {
		slurm_msg_t *msg = resp_msg->data;
		if (persist_conn->auth_cred)
			g_slurm_auth_destroy(persist_conn->auth_cred);

		persist_conn->auth_cred = msg->auth_cred;
		msg->auth_cred = NULL;
	}

	return rc;
unpack_error:
	return SLURM_ERROR;
}
Example #3
0
extern void
spawn_req_pack(spawn_req_t *req, Buf buf)
{
	int i, j;
	spawn_subcmd_t *subcmd;
	void *auth_cred;

	auth_cred = g_slurm_auth_create(NULL, 2, slurm_get_auth_info());
	if (auth_cred == NULL) {
		error("authentication: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)) );
		return;
	}
	(void) g_slurm_auth_pack(auth_cred, buf);
	(void) g_slurm_auth_destroy(auth_cred);

	pack32(req->seq, buf);
	packstr(req->from_node, buf);
	pack32(req->subcmd_cnt, buf);
	pack32(req->preput_cnt, buf);
	for (i = 0; i < req->preput_cnt; i ++) {
		packstr(req->pp_keys[i], buf);
		packstr(req->pp_vals[i], buf);
	}
	for (i = 0; i < req->subcmd_cnt; i ++) {
		subcmd = req->subcmds[i];

		packstr(subcmd->cmd, buf);
		pack32(subcmd->max_procs, buf);
		pack32(subcmd->argc, buf);
		for (j = 0; j < subcmd->argc; j ++) {
			packstr(subcmd->argv[j], buf);
		}
		pack32(subcmd->info_cnt, buf);
		for (j = 0; j < subcmd->info_cnt; j ++) {
			packstr(subcmd->info_keys[j], buf);
			packstr(subcmd->info_vals[j], buf);
		}
	}
}
Example #4
0
/*
 * slurm_load_slurmd_status - issue RPC to get the status of slurmd
 *	daemon on this machine
 * IN slurmd_info_ptr - place to store slurmd status information
 * RET 0 or -1 on error
 * NOTE: free the response using slurm_free_slurmd_status()
 */
extern int
slurm_load_slurmd_status(slurmd_status_t **slurmd_status_ptr)
{
	int rc;
	slurm_msg_t req_msg;
	slurm_msg_t resp_msg;
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
	char *this_addr;

	slurm_msg_t_init(&req_msg);
	slurm_msg_t_init(&resp_msg);

	if (cluster_flags & CLUSTER_FLAG_MULTSD) {
		if ((this_addr = getenv("SLURMD_NODENAME"))) {
			slurm_conf_get_addr(this_addr, &req_msg.address);
		} else {
			this_addr = "localhost";
			slurm_set_addr(&req_msg.address,
				       (uint16_t)slurm_get_slurmd_port(),
				       this_addr);
		}
	} else {
		char this_host[256];

		/*
		 *  Set request message address to slurmd on localhost
		 */
		gethostname_short(this_host, sizeof(this_host));
		this_addr = slurm_conf_get_nodeaddr(this_host);
		if (this_addr == NULL)
			this_addr = xstrdup("localhost");
		slurm_set_addr(&req_msg.address,
			       (uint16_t)slurm_get_slurmd_port(),
			       this_addr);
		xfree(this_addr);
	}
	req_msg.msg_type = REQUEST_DAEMON_STATUS;
	req_msg.data     = NULL;

	rc = slurm_send_recv_node_msg(&req_msg, &resp_msg, 0);

	if ((rc != 0) || !resp_msg.auth_cred) {
		error("slurm_slurmd_info: %m");
		if (resp_msg.auth_cred)
			g_slurm_auth_destroy(resp_msg.auth_cred);
		return SLURM_ERROR;
	}
	if (resp_msg.auth_cred)
		g_slurm_auth_destroy(resp_msg.auth_cred);

	switch (resp_msg.msg_type) {
	case RESPONSE_SLURMD_STATUS:
		*slurmd_status_ptr = (slurmd_status_t *) resp_msg.data;
		break;
	case RESPONSE_SLURM_RC:
		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
		slurm_free_return_code_msg(resp_msg.data);
		if (rc)
			slurm_seterrno_ret(rc);
		break;
	default:
		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
		break;
	}

	return SLURM_PROTOCOL_SUCCESS;
}
Example #5
0
File: req.c Project: VURM/slurm
static void *
_handle_accept(void *arg)
{
	/*struct request_params *param = (struct request_params *)arg;*/
	int fd = ((struct request_params *)arg)->fd;
	slurmd_job_t *job = ((struct request_params *)arg)->job;
	int req;
	int len;
	Buf buffer;
	void *auth_cred;
	int rc;
	uid_t uid;
	gid_t gid;

	debug3("Entering _handle_accept (new thread)");
	xfree(arg);

	safe_read(fd, &req, sizeof(int));
	if (req != REQUEST_CONNECT) {
		error("First message must be REQUEST_CONNECT");
		goto fail;
	}

	safe_read(fd, &len, sizeof(int));
	buffer = init_buf(len);
	safe_read(fd, get_buf_data(buffer), len);

	/* Unpack and verify the auth credential */
	auth_cred = g_slurm_auth_unpack(buffer);
	if (auth_cred == NULL) {
		error("Unpacking authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
		free_buf(buffer);
		goto fail;
	}
	rc = g_slurm_auth_verify(auth_cred, NULL, 2, NULL);
	if (rc != SLURM_SUCCESS) {
		error("Verifying authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
		(void) g_slurm_auth_destroy(auth_cred);
		free_buf(buffer);
		goto fail;
	}

	/* Get the uid & gid from the credential, then destroy it. */
	uid = g_slurm_auth_get_uid(auth_cred, NULL);
	gid = g_slurm_auth_get_gid(auth_cred, NULL);
	debug3("  Identity: uid=%d, gid=%d", uid, gid);
	g_slurm_auth_destroy(auth_cred);
	free_buf(buffer);

	rc = SLURM_SUCCESS;
	safe_write(fd, &rc, sizeof(int));

	while (1) {
		rc = _handle_request(fd, job, uid, gid);
		if (rc != SLURM_SUCCESS)
			break;
	}

	if (close(fd) == -1)
		error("Closing accepted fd: %m");

	pthread_mutex_lock(&message_lock);
	message_connections--;
	pthread_cond_signal(&message_cond);
	pthread_mutex_unlock(&message_lock);

	debug3("Leaving  _handle_accept");
	return NULL;

fail:
	rc = SLURM_FAILURE;
	safe_write(fd, &rc, sizeof(int));
rwfail:
	if (close(fd) == -1)
		error("Closing accepted fd after error: %m");
	debug("Leaving  _handle_accept on an error");
	return NULL;
}
Example #6
0
/*
 * slurm_get_node_energy_n - issue RPC to get the energy data of all
 * configured sensors on the target machine
 * IN  host  - name of node to query, NULL if localhost
 * IN  delta - Use cache if data is newer than this in seconds
 * OUT sensors_cnt - number of sensors
 * OUT energy - array of acct_gather_energy_t structures on success or
 *                NULL other wise
 * RET 0 on success or a slurm error code
 * NOTE: free the response using xfree
 */
extern int slurm_get_node_energy(char *host, uint16_t delta,
				 uint16_t *sensor_cnt,
				 acct_gather_energy_t **energy)
{
	int rc;
	slurm_msg_t req_msg;
	slurm_msg_t resp_msg;
	acct_gather_energy_req_msg_t req;
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
	char *this_addr;

	xassert(sensor_cnt);
	xassert(energy);

	*sensor_cnt = 0;
	*energy = NULL;

	slurm_msg_t_init(&req_msg);
	slurm_msg_t_init(&resp_msg);

	if (host)
		slurm_conf_get_addr(host, &req_msg.address);
	else if (cluster_flags & CLUSTER_FLAG_MULTSD) {
		if ((this_addr = getenv("SLURMD_NODENAME"))) {
			slurm_conf_get_addr(this_addr, &req_msg.address);
		} else {
			this_addr = "localhost";
			slurm_set_addr(&req_msg.address,
				       (uint16_t)slurm_get_slurmd_port(),
				       this_addr);
		}
	} else {
		char this_host[256];
		/*
		 *  Set request message address to slurmd on localhost
		 */
		gethostname_short(this_host, sizeof(this_host));
		this_addr = slurm_conf_get_nodeaddr(this_host);
		if (this_addr == NULL)
			this_addr = xstrdup("localhost");
		slurm_set_addr(&req_msg.address,
			       (uint16_t)slurm_get_slurmd_port(),
			       this_addr);
		xfree(this_addr);
	}

	req.delta        = delta;
	req_msg.msg_type = REQUEST_ACCT_GATHER_ENERGY;
	req_msg.data     = &req;

	rc = slurm_send_recv_node_msg(&req_msg, &resp_msg, 0);

	if (rc != 0 || !resp_msg.auth_cred) {
		error("slurm_get_node_energy: %m");
		if (resp_msg.auth_cred)
			g_slurm_auth_destroy(resp_msg.auth_cred);
		return SLURM_ERROR;
	}
	if (resp_msg.auth_cred)
		g_slurm_auth_destroy(resp_msg.auth_cred);
	switch (resp_msg.msg_type) {
	case RESPONSE_ACCT_GATHER_ENERGY:
		*sensor_cnt = ((acct_gather_node_resp_msg_t *)
			       resp_msg.data)->sensor_cnt;
		*energy = ((acct_gather_node_resp_msg_t *)
			   resp_msg.data)->energy;
		((acct_gather_node_resp_msg_t *) resp_msg.data)->energy = NULL;
		slurm_free_acct_gather_node_resp_msg(resp_msg.data);
		break;
	case RESPONSE_SLURM_RC:
	        rc = ((return_code_msg_t *) resp_msg.data)->return_code;
		slurm_free_return_code_msg(resp_msg.data);
		if (rc)
			slurm_seterrno_ret(rc);
		break;
	default:
		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
		break;
	}

	return SLURM_PROTOCOL_SUCCESS;
}
Example #7
0
/* Wait for barrier and get full PMI Keyval space data */
int  slurm_get_kvs_comm_set(struct kvs_comm_set **kvs_set_ptr,
		int pmi_rank, int pmi_size)
{
	int rc, srun_fd, retries = 0, timeout = 0;
	slurm_msg_t msg_send, msg_rcv;
	slurm_addr_t slurm_addr, srun_reply_addr;
	char hostname[64];
	uint16_t port;
	kvs_get_msg_t data;
	char *env_pmi_ifhn;

	if (kvs_set_ptr == NULL)
		return EINVAL;
	*kvs_set_ptr = NULL;	/* initialization */

	if ((rc = _get_addr()) != SLURM_SUCCESS) {
		error("_get_addr: %m");
		return rc;
	}

	_set_pmi_time();

	if (pmi_fd < 0) {
		if ((pmi_fd = slurm_init_msg_engine_port(0)) < 0) {
			error("slurm_init_msg_engine_port: %m");
			return SLURM_ERROR;
		}
		fd_set_blocking(pmi_fd);
	}
	if (slurm_get_stream_addr(pmi_fd, &slurm_addr) < 0) {
		error("slurm_get_stream_addr: %m");
		return SLURM_ERROR;
	}
	/* hostname is not set here, so slurm_get_addr fails
	slurm_get_addr(&slurm_addr, &port, hostname, sizeof(hostname)); */
	port = ntohs(slurm_addr.sin_port);
	if ((env_pmi_ifhn = getenv("SLURM_PMI_RESP_IFHN"))) {
		strncpy(hostname, env_pmi_ifhn, sizeof(hostname));
		hostname[sizeof(hostname)-1] = 0;
	} else
		gethostname_short(hostname, sizeof(hostname));

	data.task_id = pmi_rank;
	data.size = pmi_size;
	data.port = port;
	data.hostname = hostname;
	slurm_msg_t_init(&msg_send);
	slurm_msg_t_init(&msg_rcv);
	msg_send.address = srun_addr;
	msg_send.msg_type = PMI_KVS_GET_REQ;
	msg_send.data = &data;

	/* Send the RPC to the local srun communcation manager.
	 * Since the srun can be sent thousands of messages at
	 * the same time and refuse some connections, retry as
	 * needed. Wait until all key-pairs have been sent by
	 * all tasks then spread out messages by task's rank.
	 * Also increase the message timeout if many tasks
	 * since the srun command can get very overloaded (the
	 * default timeout is 10 secs).
	 */
	_delay_rpc(pmi_rank, pmi_size);
	if      (pmi_size > 4000)	/* 240 secs */
		timeout = slurm_get_msg_timeout() * 24000;
	else if (pmi_size > 1000)	/* 120 secs */
		timeout = slurm_get_msg_timeout() * 12000;
	else if (pmi_size > 100)	/* 60 secs */
		timeout = slurm_get_msg_timeout() * 6000;
	else if (pmi_size > 10)		/* 20 secs */
		timeout = slurm_get_msg_timeout() * 2000;

	while (slurm_send_recv_rc_msg_only_one(&msg_send, &rc, timeout) < 0) {
		if (retries++ > MAX_RETRIES) {
			error("slurm_get_kvs_comm_set: %m");
			return SLURM_ERROR;
		} else
			debug("get kvs retry %d", retries);
		_delay_rpc(pmi_rank, pmi_size);
	}
	if (rc != SLURM_SUCCESS) {
		error("slurm_get_kvs_comm_set error_code=%d", rc);
		return rc;
	}

	/* get the message after all tasks reach the barrier */
	srun_fd = slurm_accept_msg_conn(pmi_fd, &srun_reply_addr);
	if (srun_fd < 0) {
		error("slurm_accept_msg_conn: %m");
		return errno;
	}

	while ((rc = slurm_receive_msg(srun_fd, &msg_rcv, timeout)) != 0) {
		if (errno == EINTR)
			continue;
		error("slurm_receive_msg: %m");
		slurm_close(srun_fd);
		return errno;
	}
	if (msg_rcv.auth_cred)
		(void)g_slurm_auth_destroy(msg_rcv.auth_cred);

	if (msg_rcv.msg_type != PMI_KVS_GET_RESP) {
		error("slurm_get_kvs_comm_set msg_type=%d", msg_rcv.msg_type);
		slurm_close(srun_fd);
		return SLURM_UNEXPECTED_MSG_ERROR;
	}
	if (slurm_send_rc_msg(&msg_rcv, SLURM_SUCCESS) < 0)
		error("slurm_send_rc_msg: %m");

	slurm_close(srun_fd);
	*kvs_set_ptr = msg_rcv.data;

	rc = _forward_comm_set(*kvs_set_ptr);
	return rc;
}
Example #8
0
extern int
spawn_req_unpack(spawn_req_t **req_ptr, Buf buf)
{
	spawn_req_t *req = NULL;
	spawn_subcmd_t *subcmd = NULL;
	uint32_t temp32;
	int i, j;
	void *auth_cred;
	uid_t auth_uid, my_uid;

	auth_cred = g_slurm_auth_unpack(buf);
	if (auth_cred == NULL) {
		error("authentication: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)) );
		return SLURM_ERROR;
	}
	auth_uid = g_slurm_auth_get_uid(auth_cred, NULL);
	(void) g_slurm_auth_destroy(auth_cred);
	my_uid = getuid();
	if ((auth_uid != 0) && (auth_uid != my_uid)) {
		error("mpi/pmi2: spawn request apparently from uid %u",
		      (uint32_t) auth_uid);
		return SLURM_ERROR;
	}

	req = xmalloc(sizeof(spawn_req_t));

	safe_unpack32(&req->seq, buf);
	safe_unpackstr_xmalloc(&req->from_node, &temp32, buf);
	safe_unpack32(&req->subcmd_cnt, buf);
	/* subcmd_cnt must be greater than 0 */
	req->subcmds = xmalloc(req->subcmd_cnt * sizeof(spawn_subcmd_t *));
	safe_unpack32(&req->preput_cnt, buf);
	if (req->preput_cnt > 0) {
		req->pp_keys = xmalloc(req->preput_cnt * sizeof(char *));
		req->pp_vals = xmalloc(req->preput_cnt * sizeof(char *));
		for (i = 0; i < req->preput_cnt; i ++) {
			safe_unpackstr_xmalloc(&req->pp_keys[i], &temp32, buf);
			safe_unpackstr_xmalloc(&req->pp_vals[i], &temp32, buf);
		}
	}
	for (i = 0; i < req->subcmd_cnt; i ++) {
		req->subcmds[i] = spawn_subcmd_new();
		subcmd = req->subcmds[i];

		safe_unpackstr_xmalloc(&(subcmd->cmd), &temp32, buf);
		safe_unpack32(&(subcmd->max_procs), buf);
		safe_unpack32(&(subcmd->argc), buf);
		if (subcmd->argc > 0) {
			subcmd->argv = xmalloc(subcmd->argc * sizeof(char *));
			for (j = 0; j < subcmd->argc; j ++) {
				safe_unpackstr_xmalloc(&(subcmd->argv[j]),
						       &temp32, buf);
			}
		}
		safe_unpack32(&(subcmd->info_cnt), buf);
		if (subcmd->info_cnt > 0) {
			subcmd->info_keys = xmalloc(subcmd->info_cnt *
						    sizeof(char *));
			subcmd->info_vals = xmalloc(subcmd->info_cnt *
						    sizeof(char *));
			for (j = 0; j < subcmd->info_cnt; j ++) {
				safe_unpackstr_xmalloc(&(subcmd->info_keys[j]),
						       &temp32, buf);
				safe_unpackstr_xmalloc(&(subcmd->info_vals[j]),
						       &temp32, buf);
			}
		}
	}
	*req_ptr = req;
	return SLURM_SUCCESS;

unpack_error:
	spawn_req_free(req);
	return SLURM_ERROR;
}
Example #9
0
/*
 * Connect to a slurmstepd proccess by way of its unix domain socket.
 *
 * Both "directory" and "nodename" may be null, in which case stepd_connect
 * will attempt to determine them on its own.  If you are using multiple
 * slurmd on one node (unusual outside of development environments), you
 * will get one of the local NodeNames more-or-less at random.
 *
 * Returns a socket descriptor for the opened socket on success,
 * and -1 on error.
 */
int
stepd_connect(const char *directory, const char *nodename,
	      uint32_t jobid, uint32_t stepid)
{
	int req = REQUEST_CONNECT;
	int fd = -1;
	int rc;
	void *auth_cred;
	Buf buffer;
	int len;

	if (nodename == NULL) {
		if (!(nodename = _guess_nodename()))
			return -1;
	}
	if (directory == NULL) {
		slurm_ctl_conf_t *cf;

		cf = slurm_conf_lock();
		directory = slurm_conf_expand_slurmd_path(
			cf->slurmd_spooldir, nodename);
		slurm_conf_unlock();
	}

	buffer = init_buf(0);
	/* Create an auth credential */
	auth_cred = g_slurm_auth_create(NULL, 2, NULL);
	if (auth_cred == NULL) {
		error("Creating authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto fail1;
	}

	/* Pack the auth credential */
	rc = g_slurm_auth_pack(auth_cred, buffer);
	(void) g_slurm_auth_destroy(auth_cred);
	if (rc) {
		error("Packing authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto fail1;
	}

	/* Connect to the step */
	fd = _step_connect(directory, nodename, jobid, stepid);
	if (fd == -1)
		goto fail1;

	safe_write(fd, &req, sizeof(int));
	len = size_buf(buffer);
	safe_write(fd, &len, sizeof(int));
	safe_write(fd, get_buf_data(buffer), len);

	safe_read(fd, &rc, sizeof(int));
	if (rc < 0) {
		error("slurmstepd refused authentication: %m");
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto rwfail;
	}

	free_buf(buffer);
	return fd;

rwfail:
	close(fd);
fail1:
	free_buf(buffer);
	return -1;
}
Example #10
0
/*
 * Connect to a slurmstepd proccess by way of its unix domain socket.
 *
 * Both "directory" and "nodename" may be null, in which case stepd_connect
 * will attempt to determine them on its own.  If you are using multiple
 * slurmd on one node (unusual outside of development environments), you
 * will get one of the local NodeNames more-or-less at random.
 *
 * Returns a socket descriptor for the opened socket on success,
 * and -1 on error.
 */
int
stepd_connect(const char *directory, const char *nodename,
	      uint32_t jobid, uint32_t stepid, uint16_t *protocol_version)
{
	int req = REQUEST_CONNECT;
	int fd = -1;
	int rc;
	void *auth_cred;
	Buf buffer;
	int len;

	*protocol_version = 0;

	if (nodename == NULL) {
		if (!(nodename = _guess_nodename()))
			return -1;
	}
	if (directory == NULL) {
		slurm_ctl_conf_t *cf;

		cf = slurm_conf_lock();
		directory = slurm_conf_expand_slurmd_path(
			cf->slurmd_spooldir, nodename);
		slurm_conf_unlock();
	}

	buffer = init_buf(0);
	/* Create an auth credential */
	auth_cred = g_slurm_auth_create(NULL, 2, NULL);
	if (auth_cred == NULL) {
		error("Creating authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto fail1;
	}

	/* Pack the auth credential */
	rc = g_slurm_auth_pack(auth_cred, buffer);
	(void) g_slurm_auth_destroy(auth_cred);
	if (rc) {
		error("Packing authentication credential: %s",
		      g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred)));
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto fail1;
	}

	/* Connect to the step */
	fd = _step_connect(directory, nodename, jobid, stepid);
	if (fd == -1)
		goto fail1;

	safe_write(fd, &req, sizeof(int));
	len = size_buf(buffer);
	safe_write(fd, &len, sizeof(int));
	safe_write(fd, get_buf_data(buffer), len);

	safe_read(fd, &rc, sizeof(int));
	if (rc < 0) {
		error("slurmstepd refused authentication: %m");
		slurm_seterrno(SLURM_PROTOCOL_AUTHENTICATION_ERROR);
		goto rwfail;
	} else if (rc)
		*protocol_version = rc;
	else {
		/* 0n older versions of Slurm < 14.11 SLURM_SUCCESS
		 * was returned here instead of the protocol version.
		 * This can be removed when we are 2 versions past
		 * 14.11.
		 */
		slurmstepd_info_t *stepd_info = stepd_get_info(fd);
		*protocol_version = stepd_info->protocol_version;
		xfree(stepd_info);
	}

	free_buf(buffer);
	return fd;

rwfail:
	close(fd);
fail1:
	free_buf(buffer);
	return -1;
}