Ejemplo n.º 1
0
/*---------------------------------------------------------------------------*/
int xio_accept(struct xio_session *session,
	       const char **portals_array,
	       size_t portals_array_len,
	       void *user_context,
	       size_t user_context_len)
{
	int			retval = 0;
	struct xio_msg		*msg;
	struct xio_task		*task;

	msg = xio_session_write_accept_rsp(session,
					   XIO_ACTION_ACCEPT,
					   portals_array,
					   portals_array_len,
					   user_context,
					   user_context_len);
	if (!msg) {
		ERROR_LOG("setup request creation failed\n");
		return -1;
	}

	msg->request	= session->setup_req;
	msg->type	= (enum xio_msg_type)XIO_SESSION_SETUP_RSP;

	task = container_of(msg->request,
			    struct xio_task, imsg);

	if (portals_array_len != 0) {
		/* server side state is changed to ACCEPT, will be move to
		 * ONLINE state when first "hello" message arrives
		 */
		session->state = XIO_SESSION_STATE_ACCEPTED;
		/* temporary disable teardown */
		session->disable_teardown = 1;
		TRACE_LOG("session state is now ACCEPT. session:%p\n",
			  session);
	} else {
		/* initialize credits */
		task->connection->peer_credits_msgs =
					session->peer_rcv_queue_depth_msgs;
		task->connection->credits_msgs	= 0;
		task->connection->peer_credits_bytes =
					session->peer_rcv_queue_depth_bytes;
		task->connection->credits_bytes	= 0;

		/* server side state is changed to ONLINE, immediately  */
		session->state = XIO_SESSION_STATE_ONLINE;
		TRACE_LOG("session state changed to ONLINE. session:%p\n",
			  session);
	}
	retval = xio_connection_send(task->connection, msg);
	if (retval && retval != -EAGAIN) {
		ERROR_LOG("failed to send message. errno:%d\n", -retval);
		xio_set_error(-retval);
		return -1;
	}

	return 0;
}
Ejemplo n.º 2
0
/*---------------------------------------------------------------------------*/
int xio_redirect(struct xio_session *session,
		 const char **portals_array,
		 size_t portals_array_len)
{
	int			retval = 0;
	struct xio_msg		*msg;
	struct xio_task		*task;

	if (portals_array_len == 0 || !portals_array) {
		xio_set_error(EINVAL);
		ERROR_LOG("portals array for redirect is mandatory\n");
		return -1;
	}

	msg = xio_session_write_accept_rsp(session,
					   XIO_ACTION_REDIRECT,
					   portals_array,
					   portals_array_len,
					   NULL,
					   0);
	if (unlikely(!msg)) {
		ERROR_LOG("setup request creation failed\n");
		return -1;
	}
	if (portals_array_len != 0) {
		/* server side state is changed to ACCEPT */
		session->state = XIO_SESSION_STATE_REDIRECTED;
		TRACE_LOG("session state is now REDIRECTED. session:%p\n",
			  session);
	}
	msg->request = session->setup_req;
	msg->type    = (enum xio_msg_type)XIO_SESSION_SETUP_RSP;

	task = container_of(msg->request,
			    struct xio_task, imsg);

	retval = xio_connection_send(task->connection, msg);
	if (retval && retval != -EAGAIN) {
		ERROR_LOG("failed to send message errno:%d\n", -retval);
		xio_set_error(-retval);
		return -1;
	}

	return 0;
}
Ejemplo n.º 3
0
/*---------------------------------------------------------------------------*/
struct xio_mr *xio_reg_mr(void *addr, size_t length)
{
	if (addr == NULL) {
		xio_set_error(EINVAL);
		return NULL;
	}

	return &dummy_mr;
}
Ejemplo n.º 4
0
/*---------------------------------------------------------------------------*/
int xio_get_opt(void *xio_obj, int level,  int optname,
		void *optval, int *optlen)
{
	static struct xio_transport *rdma_transport;
	static struct xio_transport *tcp_transport;

	switch (level) {
	case XIO_OPTLEVEL_ACCELIO:
		return xio_general_get_opt(xio_obj, optname, optval, optlen);
	case XIO_OPTLEVEL_RDMA:
		if (!rdma_transport) {
			rdma_transport = xio_get_transport("rdma");
			if (!rdma_transport) {
				xio_set_error(EFAULT);
				return -1;
			}
		}
		if (!rdma_transport->get_opt)
			break;
		return rdma_transport->get_opt(xio_obj,
					       optname, optval, optlen);
		break;
	case XIO_OPTLEVEL_TCP:
		if (!tcp_transport) {
			tcp_transport = xio_get_transport("tcp");
			if (!tcp_transport) {
				xio_set_error(EFAULT);
				return -1;
			}
		}
		if (!tcp_transport->get_opt)
			break;
		return tcp_transport->get_opt(xio_obj,
					      optname, optval, optlen);
		break;
	default:
		break;
	}

	xio_set_error(XIO_E_NOT_SUPPORTED);
	return -1;
}
Ejemplo n.º 5
0
/*---------------------------------------------------------------------------*/
int xio_ctx_del_work(struct xio_context *ctx,
		     xio_ctx_work_t *work)

{
	int retval;

	retval = xio_workqueue_del_work(ctx->workqueue, work);
	if (retval) {
		xio_set_error(retval);
		ERROR_LOG("xio_workqueue_del_work failed. err=%d\n", retval);
	}

	return retval;
}
Ejemplo n.º 6
0
/*---------------------------------------------------------------------------*/
int xio_query_context(struct xio_context *ctx,
		      struct xio_context_attr *attr,
		      int attr_mask)
{
	if (!ctx || !attr) {
		xio_set_error(EINVAL);
		ERROR_LOG("invalid parameters\n");
		return -1;
	}

	if (attr_mask & XIO_CONTEXT_ATTR_USER_CTX)
		attr->user_context = ctx->user_context;

	return 0;
}
Ejemplo n.º 7
0
/*---------------------------------------------------------------------------*/
int xio_ctx_add_work(struct xio_context *ctx,
		     void *data,
		     void (*function)(void *data),
		     xio_ctx_work_t *work)
{
	int retval;

	retval = xio_workqueue_add_work(ctx->workqueue,
					data, function, work);
	if (retval) {
		xio_set_error(retval);
		ERROR_LOG("xio_workqueue_add_work failed. err=%d\n", retval);
	}

	return retval;
}
Ejemplo n.º 8
0
/*---------------------------------------------------------------------------*/
int xio_ctx_add_delayed_work(struct xio_context *ctx,
			     int msec_duration, void *data,
			     void (*timer_fn)(void *data),
			     xio_ctx_delayed_work_t *work)
{
	int retval;

	retval = xio_workqueue_add_delayed_work(ctx->workqueue,
						msec_duration, data,
						timer_fn, work);
	if (retval) {
		xio_set_error(retval);
		ERROR_LOG("xio_workqueue_add_delayed_work failed. err=%d\n",
			   retval);
	}

	return retval;
}
Ejemplo n.º 9
0
/*---------------------------------------------------------------------------*/
int xio_ctx_del_work(struct xio_context *ctx,
		     xio_ctx_work_t *work)

{
	int retval;

	/* test if work is pending */
	if (!xio_is_work_pending(work))
		return 0;

	retval = xio_workqueue_del_work(ctx->workqueue, work);
	if (retval) {
		xio_set_error(retval);
		ERROR_LOG("xio_workqueue_del_work failed. err=%d\n", retval);
	}

	return retval;
}
Ejemplo n.º 10
0
/*---------------------------------------------------------------------------*/
struct xio_buf *xio_alloc(size_t length)
{
	struct xio_buf		*buf;
	size_t			real_size;
	int			alloced = 0;

	buf = (struct xio_buf *)ucalloc(1, sizeof(*buf));
	if (!buf) {
		xio_set_error(errno);
		ERROR_LOG("calloc failed. (errno=%d %m)\n", errno);
		return NULL;
	}

	real_size = ALIGN(length, page_size);
	buf->addr = umemalign(page_size, real_size);
	if (!buf->addr) {
		ERROR_LOG("xio_memalign failed. sz:%zu\n", real_size);
		goto cleanup;
	}
	memset(buf->addr, 0, real_size);
	alloced = 1;

	buf->mr = xio_reg_mr(&buf->addr, length);
	if (!buf->mr) {
		ERROR_LOG("xio_reg_mr failed. addr:%p, length:%d\n",
			  buf->addr, length, access);

		goto cleanup1;
	}
	buf->length = length;

	return buf;

cleanup1:
	if (alloced)
		ufree(buf->addr);

cleanup:
	ufree(buf);
	return NULL;
}
Ejemplo n.º 11
0
/*---------------------------------------------------------------------------*/
int xio_ctx_set_work_destructor(
		     struct xio_context *ctx, void *data,
		     void (*destructor)(void *data),
		     xio_ctx_work_t *work)
{
	int retval;

	/* test if work is pending */
	if (xio_is_work_pending(work))
		return 0;

	retval = xio_workqueue_set_work_destructor(
					ctx->workqueue,
					data, destructor, work);
	if (retval) {
		xio_set_error(retval);
		ERROR_LOG("xio_workqueue_set_work_destructor failed. %m\n");
	}

	return retval;
}
Ejemplo n.º 12
0
/*---------------------------------------------------------------------------*/
int xio_reject(struct xio_session *session,
	       enum xio_status reason,
	       void *user_context,
	       size_t user_context_len)
{
	int			retval = 0;
	struct xio_msg		*msg;
	struct xio_task		*task;

	msg = xio_session_write_reject_rsp(session,
		reason,
		user_context,
		user_context_len);
	if (msg == NULL) {
		ERROR_LOG("setup request creation failed\n");
		return -1;
	}
	/* server side state is changed to REJECTED */
	session->state = XIO_SESSION_STATE_REJECTED;
	TRACE_LOG("session state is now REJECT. session:%p\n",
		  session);

	msg->request = session->setup_req;
	msg->type    = XIO_SESSION_SETUP_RSP;

	task = container_of(msg->request,
			    struct xio_task, imsg);

	task->connection->close_reason = XIO_E_SESSION_REJECTED;

	retval = xio_connection_send(task->connection, msg);
	if (retval && retval != -EAGAIN) {
		ERROR_LOG("failed to send message. errno:%d\n", -retval);
		xio_set_error(-retval);
		return -1;
	}

	return 0;
}
Ejemplo n.º 13
0
void xio_context_destroy(struct xio_context *ctx)
{
	int found;

	found = xio_idr_lookup_uobj(usr_idr, ctx);
	if (found) {
		xio_idr_remove_uobj(usr_idr, ctx);
	} else {
		ERROR_LOG("context not found:%p\n", ctx);
		xio_set_error(XIO_E_USER_OBJ_NOT_FOUND);
		return;
	}

	ctx->run_private = 0;
	xio_observable_notify_all_observers(&ctx->observable,
					    XIO_CONTEXT_EVENT_CLOSE, NULL);
	/* allow internally to run the loop for final cleanup */
	if (ctx->run_private)
		xio_context_run_loop(ctx);
	if (ctx->run_private == 0)
		xio_destroy_context_continue(&ctx->destroy_ctx_work.work);
}
Ejemplo n.º 14
0
/*---------------------------------------------------------------------------*/
int xio_unbind(struct xio_server *server)
{
	int retval = 0;
	int found;

	if (!server)
		return -1;

	found = xio_idr_lookup_uobj(usr_idr, server);
	if (found) {
		xio_idr_remove_uobj(usr_idr, server);
	} else {
		ERROR_LOG("server not found:%p\n", server);
		xio_set_error(XIO_E_USER_OBJ_NOT_FOUND);
		return -1;
	}
	/* notify all observers that the server wishes to exit */
	xio_observable_notify_all_observers(&server->nexus_observable,
					    XIO_SERVER_EVENT_CLOSE, NULL);

	kref_put(&server->kref, xio_server_destroy);

	return retval;
}
Ejemplo n.º 15
0
/*---------------------------------------------------------------------------*/
int xio_on_setup_req_recv(struct xio_connection *connection,
			  struct xio_task *task)
{
	struct xio_msg			*msg = &task->imsg;
	struct xio_new_session_req	req;
	uint8_t				*ptr;
	uint16_t			len;
	struct xio_session_hdr		hdr;
	struct xio_session		*session = connection->session;
	int				retval;
	struct xio_session_event_data  error_event = {};

	error_event.event = XIO_SESSION_ERROR_EVENT;

	/* read session header */
	xio_session_read_header(task, &hdr);
#ifdef XIO_SESSION_DEBUG
	connection->peer_connection = hdr.connection;
	connection->peer_session = hdr.session;
#endif
	task->imsg.sn = hdr.serial_num;
	task->connection = connection;
	task->session = session;
	connection->session->setup_req = msg;

	/* read the header */
	ptr = (uint8_t *)msg->in.header.iov_base;

	memset(&req, 0, sizeof(req));

	/* session id */
	len = xio_read_uint32(&session->peer_session_id, 0, ptr);
	ptr  = ptr + len;

	/* queue depth bytes */
	len = xio_read_uint64(&session->peer_snd_queue_depth_bytes, 0, ptr);
	ptr = ptr + len;

	len = xio_read_uint64(&session->peer_rcv_queue_depth_bytes, 0, ptr);
	ptr = ptr + len;

	/* queue depth msgs */
	len = xio_read_uint16((uint16_t *)&session->peer_snd_queue_depth_msgs,
			      0, ptr);
	ptr = ptr + len;

	len = xio_read_uint16((uint16_t *)&session->peer_rcv_queue_depth_msgs,
			      0, ptr);
	ptr = ptr + len;

	/* uri length */
	len = xio_read_uint16(&req.uri_len, 0, ptr);
	ptr = ptr + len;

	/* private length */
	len = xio_read_uint16(&req.private_data_len, 0, ptr);
	ptr = ptr + len;

	if (req.uri_len) {
		req.uri =
		  (char *)kcalloc(req.uri_len, sizeof(char), GFP_KERNEL);
		if (unlikely(!req.uri)) {
			xio_set_error(ENOMEM);
			ERROR_LOG("uri allocation failed. len:%d\n",
				  req.uri_len);
			goto cleanup1;
		}

		len = xio_read_array((uint8_t *)req.uri,
				     req.uri_len, 0, ptr);
		ptr = ptr + len;
	}
	if (req.private_data_len) {
		req.private_data = kcalloc(req.private_data_len,
					   sizeof(uint8_t), GFP_KERNEL);
		if (unlikely(!req.private_data)) {
			xio_set_error(ENOMEM);
			ERROR_LOG("private data allocation failed. len:%d\n",
				  req.private_data_len);
			goto cleanup2;
		}
		len = xio_read_array((uint8_t *)req.private_data,
				     req.private_data_len,
				     0, ptr);
		ptr = ptr + len;
	}

	req.proto = (enum xio_proto)xio_nexus_get_proto(connection->nexus);
	xio_nexus_get_peer_addr(connection->nexus,
				&req.src_addr, sizeof(req.src_addr));

	/* cache the task in io queue*/
	xio_connection_queue_io_task(connection, task);

	/* notify the upper layer */
	if (connection->ses_ops.on_new_session) {
		retval = connection->ses_ops.on_new_session(
					session, &req,
					connection->cb_user_context);
		if (retval)
			goto cleanup2;
	} else {
		retval = xio_accept(session, NULL, 0, NULL, 0);
		if (retval) {
			ERROR_LOG("failed to auto accept session. session:%p\n",
				  session);
			goto cleanup2;
		}
	}

	/* Don't move session state to ONLINE. In case of multiple portals
	 * the accept moves the state to ACCEPTED until the first "HELLO"
	 * message arrives. Note that the "upper layer" may call redirect or
	 * reject.
	 */

	xio_session_notify_new_connection(session, connection);

	kfree(req.private_data);
	kfree(req.uri);

	return 0;

cleanup2:
	kfree(req.private_data);

cleanup1:
	kfree(req.uri);

	if (session->ses_ops.on_session_event) {
		error_event.reason = (enum xio_status)xio_errno();
		session->ses_ops.on_session_event(
				session, &error_event,
				session->cb_user_context);
	}
	return 0;
}
Ejemplo n.º 16
0
/*---------------------------------------------------------------------------*/
void *xio_ev_loop_init(unsigned long flags, struct xio_context *ctx,
		       struct xio_loop_ops *loop_ops)
{
	struct xio_ev_loop *loop;
	char queue_name[64];

	loop = kzalloc(sizeof(struct xio_ev_loop), GFP_KERNEL);
	if (loop == NULL) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kmalloc failed. %m\n");
		goto cleanup0;
	}

	set_bit(XIO_EV_LOOP_STOP, &loop->states);

	init_llist_head(&loop->ev_llist);

	/* use default implementation */
	loop->run  = priv_ev_loop_run;
	loop->stop = priv_ev_loop_stop;
	loop->loop_object = loop;

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		/* override with user provided routines and object */
		loop->run  = loop_ops->run;
		loop->stop = loop_ops->stop;
		loop->add_event = loop_ops->add_event;
		loop->loop_object = loop_ops->ev_loop;
		break;
	case XIO_LOOP_GIVEN_THREAD:
		loop->add_event = priv_ev_add_thread;
		init_waitqueue_head(&loop->wait);
		break;
	case XIO_LOOP_TASKLET:
		loop->add_event = priv_ev_add_tasklet;
		tasklet_init(&loop->tasklet, priv_ev_loop_run_tasklet,
			     (unsigned long)loop);
		break;
	case XIO_LOOP_WORKQUEUE:
		/* temp (also change to single thread) */
		sprintf(queue_name, "xio-%p", loop);
		/* check flags and bw comp */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
		loop->workqueue = create_workqueue(queue_name);
#else
		loop->workqueue = alloc_workqueue(queue_name,
						WQ_MEM_RECLAIM | WQ_HIGHPRI,
						0);
#endif
		if (!loop->workqueue) {
			ERROR_LOG("workqueue create failed.\n");
			goto cleanup1;
		}
		loop->add_event = priv_ev_add_workqueue;
		break;
	default:
		ERROR_LOG("wrong type. %lu\n", flags);
		goto cleanup1;
	}

	loop->flags = flags;
	loop->ctx = ctx;

	return loop;

cleanup1:
	clear_bit(XIO_EV_LOOP_STOP, &loop->states);
	kfree(loop);
cleanup0:
	ERROR_LOG("event loop creation failed.\n");
	return NULL;
}
Ejemplo n.º 17
0
/*---------------------------------------------------------------------------*/
struct xio_msg *xio_session_write_reject_rsp(struct xio_session *session,
					     enum xio_status reason,
					     void *user_context,
					     uint16_t user_context_len)
{
	struct xio_msg	*msg;
	uint8_t			*buf;
	uint8_t			*ptr;
	uint16_t		len,  tot_len;
	uint16_t		action = XIO_ACTION_REJECT;

	/* calclate length */
	tot_len = 2*sizeof(uint16_t) + 2*sizeof(uint32_t);
	tot_len += user_context_len;

	if (tot_len > SETUP_BUFFER_LEN)  {
		ERROR_LOG("buffer is too small\n");
		xio_set_error(EMSGSIZE);
		return NULL;
	}

	/* allocate message */
	buf = (uint8_t *)kcalloc(SETUP_BUFFER_LEN + sizeof(struct xio_msg),
		      sizeof(uint8_t), GFP_KERNEL);
	if (!buf) {
		ERROR_LOG("message allocation failed\n");
		xio_set_error(ENOMEM);
		return NULL;
	}

	/* fill the message */
	msg = (struct xio_msg *)buf;
	msg->out.header.iov_base = buf + sizeof(struct xio_msg);
	msg->out.header.iov_len = 0;

	ptr = (uint8_t *)msg->out.header.iov_base;
	len = 0;

	/* serialize message into the buffer */

	/* session_id */
	len = xio_write_uint32(session->session_id, 0, ptr);
	ptr  = ptr + len;

	/* action */
	len = xio_write_uint16(action, 0, ptr);
	ptr  = ptr + len;

	/* reason */
	len = xio_write_uint32(reason, 0, ptr);
	ptr  = ptr + len;

	/* user_context_len */
	len = xio_write_uint16(user_context_len, 0, ptr);
	ptr  = ptr + len;

	if (user_context_len) {
		len = xio_write_array((const uint8_t *)user_context,
				      user_context_len,
				      0, ptr);
		ptr  = ptr + len;
	}

	msg->out.header.iov_len = ptr - (uint8_t *)msg->out.header.iov_base;

	if (msg->out.header.iov_len != tot_len) {
		ERROR_LOG("calculated length %d != actual length %zd\n",
			  tot_len, msg->out.header.iov_len);
	}

	return msg;
}
Ejemplo n.º 18
0
/*---------------------------------------------------------------------------*/
struct xio_msg *xio_session_write_accept_rsp(struct xio_session *session,
					     uint16_t action,
					     const char **portals_array,
					     uint16_t portals_array_len,
					     void *user_context,
					     uint16_t user_context_len)
{
	struct xio_msg		*msg;
	uint8_t			*buf;
	uint8_t			*ptr;
	uint16_t		len, i, str_len, tot_len;

	/* calculate length */
	tot_len = 5*sizeof(uint16_t) + sizeof(uint32_t) + 2*sizeof(uint64_t);
	for (i = 0; i < portals_array_len; i++)
		tot_len += strlen(portals_array[i]) + sizeof(uint16_t);
	tot_len += user_context_len;

	if (tot_len > SETUP_BUFFER_LEN)  {
		ERROR_LOG("buffer is too small\n");
		xio_set_error(EMSGSIZE);
		return NULL;
	}

	/* allocate message */
	buf = (uint8_t *)kcalloc(SETUP_BUFFER_LEN + sizeof(struct xio_msg),
		      sizeof(uint8_t), GFP_KERNEL);
	if (unlikely(!buf)) {
		ERROR_LOG("message allocation failed\n");
		xio_set_error(ENOMEM);
		return NULL;
	}

	/* fill the message */
	msg = (struct xio_msg *)buf;
	msg->out.header.iov_base = buf + sizeof(struct xio_msg);
	msg->out.header.iov_len = 0;

	ptr = (uint8_t *)msg->out.header.iov_base;
	len = 0;

	/* serialize message into the buffer */

	/* session_id */
	len = xio_write_uint32(session->session_id, 0, ptr);
	ptr  = ptr + len;

	/* action */
	len = xio_write_uint16(action, 0, ptr);
	ptr  = ptr + len;

	if (action == XIO_ACTION_ACCEPT) {
		/* tx queue depth bytes */
		len = xio_write_uint64(session->snd_queue_depth_bytes, 0, ptr);
		ptr  = ptr + len;

		/* rx queue depth bytes */
		len = xio_write_uint64(session->rcv_queue_depth_bytes, 0, ptr);
		ptr  = ptr + len;

		/* tx queue depth msgs */
		len = xio_write_uint16(session->snd_queue_depth_msgs, 0, ptr);
		ptr  = ptr + len;

		/* rx queue depth msgs */
		len = xio_write_uint16(session->rcv_queue_depth_msgs, 0, ptr);
		ptr  = ptr + len;
	}

	/* portals_array_len */
	len = xio_write_uint16(portals_array_len, 0, ptr);
	ptr  = ptr + len;

	/* user_context_len */
	len = xio_write_uint16(user_context_len, 0, ptr);
	ptr  = ptr + len;

	for (i = 0; i < portals_array_len; i++) {
		str_len = strlen(portals_array[i]);

		len = xio_write_uint16(str_len, 0, ptr);
		ptr  = ptr + len;

		len = xio_write_array((uint8_t *)portals_array[i],
				      str_len, 0, ptr);
		ptr  = ptr + len;
	}

	if (user_context_len) {
		len = xio_write_array((const uint8_t *)user_context,
				      user_context_len,
				      0, ptr);
		ptr  = ptr + len;
	}

	msg->out.header.iov_len = ptr - (uint8_t *)msg->out.header.iov_base;

	if (msg->out.header.iov_len != tot_len) {
		ERROR_LOG("calculated length %d != actual length %zd\n",
			  tot_len, msg->out.header.iov_len);
	}

	return msg;
}
Ejemplo n.º 19
0
/*---------------------------------------------------------------------------*/
struct xio_context *xio_context_create(struct xio_context_params *ctx_params,
				       int polling_timeout,
				       int cpu_hint)
{
	struct xio_context		*ctx;
	struct xio_loop_ops		*loop_ops;
	struct task_struct		*worker;
	struct xio_transport		*transport;
	int				flags, cpu;

	if (!ctx_params) {
		xio_set_error(EINVAL);
		ERROR_LOG("ctx_params is NULL\n");
		goto cleanup0;

	}

	loop_ops = ctx_params->loop_ops;
	worker = ctx_params->worker;
	flags = ctx_params->flags;

	if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) {
		xio_set_error(EINVAL);
		ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n",
			  cpu_hint, num_online_cpus());
		goto cleanup0;
	}

	if ((flags == XIO_LOOP_USER_LOOP) &&
	    (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) {
		xio_set_error(EINVAL);
		ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are " \
			  "mandatory with loop_ops\n");
		goto cleanup0;
	}

	xio_read_logging_level();

	/* no need to disable preemption */
	cpu = raw_smp_processor_id();

	if (cpu == -1)
		goto cleanup0;

	/* allocate new context */
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kzalloc failed\n");
		goto cleanup0;
	}

	if (cpu_hint < 0)
		cpu_hint = cpu;

	ctx->run_private = 0;
	ctx->user_context = ctx_params->user_context;
	ctx->flags = flags;
	ctx->cpuid  = cpu_hint;
	ctx->nodeid = cpu_to_node(cpu_hint);
	ctx->polling_timeout = polling_timeout;
	ctx->prealloc_xio_inline_bufs =
		!!ctx_params->prealloc_xio_inline_bufs;
	ctx->rq_depth = ctx_params->rq_depth;

	if (!ctx_params->max_conns_per_ctx)
		ctx->max_conns_per_ctx = 100;
	else
		ctx->max_conns_per_ctx =
			max(ctx_params->max_conns_per_ctx , 2);

	ctx->workqueue = xio_workqueue_create(ctx);
	if (!ctx->workqueue) {
		xio_set_error(ENOMEM);
		ERROR_LOG("xio_workqueue_init failed.\n");
		goto cleanup1;
	}
	ctx->msg_pool = xio_objpool_create(sizeof(struct xio_msg),
					   MSGPOOL_INIT_NR, MSGPOOL_GROW_NR);
	if (!ctx->msg_pool) {
		xio_set_error(ENOMEM);
		ERROR_LOG("context's msg_pool create failed. %m\n");
		goto cleanup2;
	}

	XIO_OBSERVABLE_INIT(&ctx->observable, ctx);
	INIT_LIST_HEAD(&ctx->ctx_list);

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		break;
	case XIO_LOOP_GIVEN_THREAD:
		set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint));
		ctx->worker = (uint64_t)worker;
		break;
	case XIO_LOOP_TASKLET:
		break;
	case XIO_LOOP_WORKQUEUE:
		break;
	default:
		ERROR_LOG("wrong type. %u\n", flags);
		goto cleanup3;
	}

	ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops);
	if (!ctx->ev_loop)
		goto cleanup3;

	ctx->stats.hertz = HZ;
	/* Initialize default counters' name */
	ctx->stats.name[XIO_STAT_TX_MSG]   = kstrdup("TX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_MSG]   = kstrdup("RX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_DELAY]    = kstrdup("DELAY", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL);

	/* initialize rdma pools only */
	transport = xio_get_transport("rdma");
	if (transport && ctx->prealloc_xio_inline_bufs) {
		int retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					         XIO_CONTEXT_POOL_CLASS_INITIAL);
		if (retval) {
			ERROR_LOG("Failed to create initial pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
		retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					     XIO_CONTEXT_POOL_CLASS_PRIMARY);
		if (retval) {
			ERROR_LOG("Failed to create primary pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
	}
	spin_lock_init(&ctx->ctx_list_lock);

	xio_idr_add_uobj(usr_idr, ctx, "xio_context");
	return ctx;

cleanup3:
	xio_objpool_destroy(ctx->msg_pool);

cleanup2:
	xio_workqueue_destroy(ctx->workqueue);

cleanup1:
	kfree(ctx);

cleanup0:
	ERROR_LOG("xio_ctx_open failed\n");

	return NULL;
}
Ejemplo n.º 20
0
/*---------------------------------------------------------------------------*/
int xio_ctx_pool_create(struct xio_context *ctx, enum xio_proto proto,
		        enum xio_context_pool_class pool_cls)
{
	struct xio_tasks_pool_ops	*pool_ops;
	struct xio_tasks_pool		**tasks_pool;
	struct xio_transport		*transport;
	struct xio_tasks_pool_params	params;
	char				pool_name[64];
	const char			*proto_str = xio_proto_str(proto);

	/* get the transport's proto */
	transport = xio_get_transport(proto_str);
	if (!transport) {
		ERROR_LOG("failed to load %s transport layer.\n", proto_str);
		ERROR_LOG("validate that your system support %s " \
			  "and the accelio's %s module is loaded\n",
			  proto_str, proto_str);
		xio_set_error(ENOPROTOOPT);
		return -1;
	}

	if (transport->get_pools_setup_ops) {
		if (!ctx->primary_pool_ops[proto] ||
		    !ctx->initial_pool_ops[proto])
			transport->get_pools_setup_ops(
					NULL,
					&ctx->initial_pool_ops[proto],
					&ctx->primary_pool_ops[proto]);
	} else {
		ERROR_LOG("transport does not implement " \
			  "\"get_pools_setup_ops\"\n");
		return -1;
	}

	switch(pool_cls) {
	case XIO_CONTEXT_POOL_CLASS_INITIAL:
		tasks_pool = &ctx->initial_tasks_pool[proto];
		pool_ops = ctx->initial_pool_ops[proto];
		sprintf(pool_name, "ctx:%p - initial_pool_%s", ctx, proto_str);

	break;
	case XIO_CONTEXT_POOL_CLASS_PRIMARY:
		tasks_pool = &ctx->primary_tasks_pool[proto];
		pool_ops = ctx->primary_pool_ops[proto];
		sprintf(pool_name, "ctx:%p - primary_pool_%s", ctx, proto_str);
	break;
	default:
		xio_set_error(EINVAL);
		ERROR_LOG("unknown pool class\n");
		return -1;
	};

	/* if already exist */
	if (*tasks_pool)
		return 0;

	if (!pool_ops)
		return -1;

	if (!pool_ops->pool_get_params ||
	    !pool_ops->slab_pre_create ||
	    !pool_ops->slab_init_task ||
	    !pool_ops->pool_post_create ||
	    !pool_ops->slab_destroy)
		return -1;

	/* get pool properties from the transport */
	memset(&params, 0, sizeof(params));

	pool_ops->pool_get_params(NULL,
				  (int *)&params.start_nr,
				  (int *)&params.max_nr,
				  (int *)&params.alloc_nr,
				  (int *)&params.pool_dd_data_sz,
				  (int *)&params.slab_dd_data_sz,
				  (int *)&params.task_dd_data_sz);
	if (ctx->prealloc_xio_inline_bufs) {
		params.start_nr = params.max_nr;
		params.alloc_nr = 0;
	}

	params.pool_hooks.slab_pre_create  =
		(int (*)(void *, int, void *, void *))
				pool_ops->slab_pre_create;
	params.pool_hooks.slab_post_create = (int (*)(void *, void *, void *))
				pool_ops->slab_post_create;
	params.pool_hooks.slab_destroy	   = (int (*)(void *, void *, void *))
				pool_ops->slab_destroy;
	params.pool_hooks.slab_init_task   =
		(int (*)(void *, void *, void *, int,  struct xio_task *))
				pool_ops->slab_init_task;
	params.pool_hooks.slab_uninit_task =
		(int (*)(void *, void *, void *, struct xio_task *))
				pool_ops->slab_uninit_task;
	params.pool_hooks.slab_remap_task =
		(int (*)(void *, void *, void *, void *, struct xio_task *))
				pool_ops->slab_remap_task;
	params.pool_hooks.pool_pre_create  = (int (*)(void *, void *, void *))
				pool_ops->pool_pre_create;
	params.pool_hooks.pool_post_create = (int (*)(void *, void *, void *))
				pool_ops->pool_post_create;
	params.pool_hooks.pool_destroy	   = (int (*)(void *, void *, void *))
				pool_ops->pool_destroy;
	params.pool_hooks.task_pre_put  = (int (*)(void *, struct xio_task *))
		pool_ops->task_pre_put;
	params.pool_hooks.task_post_get = (int (*)(void *, struct xio_task *))
		pool_ops->task_post_get;

	params.pool_name = kstrdup(pool_name, GFP_KERNEL);

	/* initialize the tasks pool */
	*tasks_pool = xio_tasks_pool_create(&params);
	if (!*tasks_pool) {
		ERROR_LOG("xio_tasks_pool_create failed\n");
		return -1;
	}

	return 0;
}
Ejemplo n.º 21
0
/*---------------------------------------------------------------------------*/
static int xio_general_get_opt(void  *xio_obj, int optname,
			       void *optval, int *optlen)
{
	switch (optname) {
	case XIO_OPTNAME_LOG_LEVEL:
		*((enum xio_log_level *)optval) = xio_get_log_level();
		*optlen = sizeof(enum xio_log_level);
		return 0;
	case XIO_OPTNAME_MAX_IN_IOVLEN:
		*optlen = sizeof(int);
		*((int *)optval) = g_options.max_in_iovsz;
		return 0;
	case XIO_OPTNAME_MAX_OUT_IOVLEN:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.max_out_iovsz;
		 return 0;
	case XIO_OPTNAME_ENABLE_RECONNECT:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.reconnect;
		 return 0;
	case XIO_OPTNAME_ENABLE_FLOW_CONTROL:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.enable_flow_control;
		 return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_MSGS:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.snd_queue_depth_msgs;
		 return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_MSGS:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.rcv_queue_depth_msgs;
		 return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_BYTES:
		*optlen = sizeof(uint64_t);
		 *((uint64_t *)optval) = g_options.snd_queue_depth_bytes;
		 return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_BYTES:
		*optlen = sizeof(uint64_t);
		 *((uint64_t *)optval) = g_options.rcv_queue_depth_bytes;
		 return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_HEADER:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.max_inline_xio_hdr;
		 return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_DATA:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.max_inline_xio_data;
		 return 0;
	case XIO_OPTNAME_INLINE_XIO_DATA_ALIGN:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.inline_xio_data_align;
		 return 0;
	case XIO_OPTNAME_XFER_BUF_ALIGN:
		*optlen = sizeof(int);
		 *((int *)optval) = g_options.xfer_buf_align;
		 return 0;
	case XIO_OPTNAME_ENABLE_KEEPALIVE:
		*optlen = sizeof(int);
		*((int *)optval) = g_options.enable_keepalive;
		return 0;
	case XIO_OPTNAME_CONFIG_KEEPALIVE:
		if (*optlen == sizeof(struct xio_options_keepalive)) {
			memcpy(optval, &g_options.ka, *optlen);
			return 0;
		} else {
			xio_set_error(EINVAL);
			return -1;
		}
	default:
		break;
	}
	xio_set_error(XIO_E_NOT_SUPPORTED);
	return -1;
}
Ejemplo n.º 22
0
/*---------------------------------------------------------------------------*/
static int xio_general_set_opt(void *xio_obj, int optname,
			       const void *optval, int optlen)
{
	int tmp;

	switch (optname) {
	case XIO_OPTNAME_LOG_FN:
		if (optlen == 0 && !optval)
			return xio_set_log_fn(NULL);
		else if (optlen == sizeof(xio_log_fn))
			return xio_set_log_fn((xio_log_fn)optval);
		break;
	case XIO_OPTNAME_LOG_LEVEL:
		if (optlen != sizeof(enum xio_log_level))
			return -1;
		return xio_set_log_level(*((enum xio_log_level *)optval));
	case XIO_OPTNAME_DISABLE_HUGETBL:
		xio_disable_huge_pages(*((int *)optval));
		return 0;
	case XIO_OPTNAME_MEM_ALLOCATOR:
		if (optlen == sizeof(struct xio_mem_allocator))
			return xio_set_mem_allocator(
					(struct xio_mem_allocator *)optval);
		break;
	case XIO_OPTNAME_CONFIG_MEMPOOL:
		if (optlen == sizeof(struct xio_mempool_config)) {
			memcpy(&g_mempool_config,
			       (struct xio_mempool_config *)optval, optlen);
			return 0;
		}
		break;
	case XIO_OPTNAME_MAX_IN_IOVLEN:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (*((int *)optval) > XIO_IOVLEN &&
			    *((int *)optval) <= XIO_MAX_IOV) {
				g_options.max_in_iovsz = *((int *)optval);
				if (rdma_transport &&
				    rdma_transport->set_opt)
					retval |= rdma_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
				if (tcp_transport &&
				    tcp_transport->set_opt)
					retval |= tcp_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
			}
			return retval;
		}
		break;
	case XIO_OPTNAME_MAX_OUT_IOVLEN:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (*((int *)optval) > XIO_IOVLEN &&
			    *((int *)optval) <= XIO_MAX_IOV) {
				g_options.max_out_iovsz = *((int *)optval);
				if (rdma_transport &&
				    rdma_transport->set_opt)
					retval |= rdma_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
				if (tcp_transport &&
				    tcp_transport->set_opt)
					retval |= tcp_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
			}
			return retval;
		}
		break;
	case XIO_OPTNAME_ENABLE_DMA_LATENCY:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (rdma_transport &&
			    rdma_transport->set_opt)
				retval |= rdma_transport->set_opt(
						xio_obj, optname,
						optval, optlen);
			if (tcp_transport &&
			    tcp_transport->set_opt)
				retval |= tcp_transport->set_opt(
						xio_obj, optname,
						optval, optlen);

			return retval;
		}
		break;
	case XIO_OPTNAME_ENABLE_RECONNECT:
		g_options.reconnect = *((int *)optval);
		if (g_options.reconnect){
			g_options.enable_keepalive = 0;
		}
		return 0;
	case XIO_OPTNAME_ENABLE_FLOW_CONTROL:
		g_options.enable_flow_control = *((int *)optval);
		return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_MSGS:
		if (*((int *)optval) < 1)
			break;
		g_options.snd_queue_depth_msgs = (int)*((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_MSGS:
		if (*((int *)optval) < 1)
			break;
		g_options.rcv_queue_depth_msgs = *((int *)optval);
		return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_BYTES:
		if (*((uint64_t *)optval) < 1)
			break;
		g_options.snd_queue_depth_bytes = *((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_BYTES:
		if (*((uint64_t *)optval) < 1)
			break;
		g_options.rcv_queue_depth_bytes = *((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_HEADER:
		if (optlen != sizeof(int))
			break;
		if (*((int *)optval) < 0)
			break;
		g_options.max_inline_xio_hdr = *((int *)optval);
		return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_DATA:
		if (optlen != sizeof(int))
			break;
		if (*((int *)optval) < 0)
			break;
		g_options.max_inline_xio_data = *((int *)optval);
		return 0;
	case XIO_OPTNAME_XFER_BUF_ALIGN:
		if (optlen != sizeof(int))
			break;
		tmp = *(int *)optval;
		if (!is_power_of_2(tmp) || !(tmp % sizeof(void *) == 0)) {
			xio_set_error(EINVAL);
			return -1;
		}
		g_options.xfer_buf_align = tmp;
		return 0;
	case XIO_OPTNAME_INLINE_XIO_DATA_ALIGN:
		if (optlen != sizeof(int))
			break;
		tmp = *(int *)optval;
		if (!tmp) {
			g_options.inline_xio_data_align = tmp;
			return 0;
		}
		if (!is_power_of_2(tmp) || !(tmp % sizeof(void *) == 0)) {
			xio_set_error(EINVAL);
			return -1;
		}
		g_options.inline_xio_data_align = tmp;
		return 0;
	case XIO_OPTNAME_ENABLE_KEEPALIVE:
		g_options.enable_keepalive = *((int *)optval);
		return 0;
	case XIO_OPTNAME_CONFIG_KEEPALIVE:
		if (optlen == sizeof(struct xio_options_keepalive)) {
			memcpy(&g_options.ka, optval, optlen);
			return 0;
		} else {
			xio_set_error(EINVAL);
			return -1;
		}
		break;
	default:
		break;
	}
	xio_set_error(XIO_E_NOT_SUPPORTED);
	return -1;
}
Ejemplo n.º 23
0
/*---------------------------------------------------------------------------*/
int xio_on_setup_req_recv(struct xio_connection *connection,
			  struct xio_task *task)
{
	struct xio_msg			*msg = &task->imsg;
	struct xio_new_session_req	req;
	uint8_t				*ptr;
	uint16_t			len;
	struct xio_session_hdr		hdr;
	struct xio_session		*session = connection->session;
	int				retval;
	struct xio_session_event_data  error_event = {
		.conn = NULL,
		.conn_user_context = NULL,
		.event = XIO_SESSION_ERROR_EVENT,
		.reason = XIO_E_SUCCESS,
		.private_data = NULL,
		.private_data_len = 0,
	};

	/* read session header */
	xio_session_read_header(task, &hdr);
#ifdef XIO_SESSION_DEBUG
	connection->peer_connection = hdr.connection;
	connection->peer_session = hdr.session;
#endif
	task->imsg.sn = hdr.serial_num;
	task->connection = connection;
	task->session = session;
	connection->session->setup_req = msg;
	connection->session->connection_srv_first = connection;

	/* read the header */
	ptr = (uint8_t *)msg->in.header.iov_base;

	memset(&req, 0, sizeof(req));

	/* session id */
	len = xio_read_uint32(&session->peer_session_id, 0, ptr);
	ptr  = ptr + len;

	/* queue depth bytes */
	len = xio_read_uint64(&session->peer_snd_queue_depth_bytes, 0, ptr);
	ptr = ptr + len;

	len = xio_read_uint64(&session->peer_rcv_queue_depth_bytes, 0, ptr);
	ptr = ptr + len;

	/* queue depth msgs */
	len = xio_read_uint16((uint16_t *)&session->peer_snd_queue_depth_msgs,
			      0, ptr);
	ptr = ptr + len;

	len = xio_read_uint16((uint16_t *)&session->peer_rcv_queue_depth_msgs,
			      0, ptr);
	ptr = ptr + len;

	/* uri length */
	len = xio_read_uint16(&req.uri_len, 0, ptr);
	ptr = ptr + len;

	/* private length */
	len = xio_read_uint16(&req.private_data_len, 0, ptr);
	ptr = ptr + len;

	if (req.uri_len) {
		req.uri =
		  (char *)kcalloc(req.uri_len, sizeof(char), GFP_KERNEL);
		if (unlikely(!req.uri)) {
			xio_set_error(ENOMEM);
			ERROR_LOG("uri allocation failed. len:%d\n",
				  req.uri_len);
			goto cleanup1;
		}

		len = xio_read_array((uint8_t *)req.uri,
				     req.uri_len, 0, ptr);
		ptr = ptr + len;
	}
	if (req.private_data_len) {
		req.private_data = kcalloc(req.private_data_len,
					   sizeof(uint8_t), GFP_KERNEL);
		if (unlikely(!req.private_data)) {
			xio_set_error(ENOMEM);
			ERROR_LOG("private data allocation failed. len:%d\n",
				  req.private_data_len);
			goto cleanup2;
		}
		len = xio_read_array((uint8_t *)req.private_data,
				     req.private_data_len,
				     0, ptr);
		ptr = ptr + len;
	}

	req.proto = (enum xio_proto)xio_nexus_get_proto(connection->nexus);
	xio_nexus_get_peer_addr(connection->nexus,
				&req.src_addr, sizeof(req.src_addr));

	/* cache the task in io queue*/
	xio_connection_queue_io_task(connection, task);

	/* notify the upper layer */
	if (connection->ses_ops.on_new_session) {
#ifdef XIO_THREAD_SAFE_DEBUG
		xio_ctx_debug_thread_unlock(connection->ctx);
#endif
		retval = connection->ses_ops.on_new_session(
					session, &req,
					connection->cb_user_context);
#ifdef XIO_THREAD_SAFE_DEBUG
		xio_ctx_debug_thread_lock(connection->ctx);
#endif
		if (retval)
			goto cleanup2;
	} else {
		retval = xio_accept(session, NULL, 0, NULL, 0);
		if (retval) {
			ERROR_LOG("failed to auto accept session. session:%p\n",
				  session);
			goto cleanup2;
		}
	}

	/* Don't move session state to ONLINE. In case of multiple portals
	 * the accept moves the state to ACCEPTED until the first "HELLO"
	 * message arrives. Note that the "upper layer" may call redirect or
	 * reject.
	 */

	xio_session_notify_new_connection(session, connection);

	kfree(req.private_data);
	kfree(req.uri);

	return 0;

cleanup2:
	kfree(req.private_data);

cleanup1:
	kfree(req.uri);

	if (session->ses_ops.on_session_event) {
#ifdef XIO_THREAD_SAFE_DEBUG
		xio_ctx_debug_thread_unlock(connection->ctx);
#endif
		error_event.reason = (enum xio_status)xio_errno();
		session->ses_ops.on_session_event(
				session, &error_event,
				session->cb_user_context);
#ifdef XIO_THREAD_SAFE_DEBUG
		xio_ctx_debug_thread_lock(connection->ctx);
#endif
	}
	return 0;
}

/*---------------------------------------------------------------------------*/
/* xio_session_write_accept_rsp						     */
/*---------------------------------------------------------------------------*/
struct xio_msg *xio_session_write_accept_rsp(struct xio_session *session,
					     uint16_t action,
					     const char **portals_array,
					     uint16_t portals_array_len,
					     void *user_context,
					     uint16_t user_context_len)
{
	struct xio_msg		*msg;
	uint8_t			*buf;
	uint8_t			*ptr;
	uint16_t		len, i, str_len, tot_len;

	/* calculate length */
	tot_len = 5*sizeof(uint16_t) + sizeof(uint32_t) + 2*sizeof(uint64_t);
	for (i = 0; i < portals_array_len; i++)
		tot_len += strlen(portals_array[i]) + sizeof(uint16_t);
	tot_len += user_context_len;

	if (tot_len > SETUP_BUFFER_LEN)  {
		ERROR_LOG("buffer is too small\n");
		xio_set_error(EMSGSIZE);
		return NULL;
	}

	/* allocate message */
	buf = (uint8_t *)kcalloc(SETUP_BUFFER_LEN + sizeof(struct xio_msg),
		      sizeof(uint8_t), GFP_KERNEL);
	if (unlikely(!buf)) {
		ERROR_LOG("message allocation failed\n");
		xio_set_error(ENOMEM);
		return NULL;
	}

	/* fill the message */
	msg = (struct xio_msg *)buf;
	msg->out.header.iov_base = buf + sizeof(struct xio_msg);
	msg->out.header.iov_len = 0;

	ptr = (uint8_t *)msg->out.header.iov_base;
	len = 0;

	/* serialize message into the buffer */

	/* session_id */
	len = xio_write_uint32(session->session_id, 0, ptr);
	ptr  = ptr + len;

	/* action */
	len = xio_write_uint16(action, 0, ptr);
	ptr  = ptr + len;

	if (action == XIO_ACTION_ACCEPT) {
		/* tx queue depth bytes */
		len = xio_write_uint64(session->snd_queue_depth_bytes, 0, ptr);
		ptr  = ptr + len;

		/* rx queue depth bytes */
		len = xio_write_uint64(session->rcv_queue_depth_bytes, 0, ptr);
		ptr  = ptr + len;

		/* tx queue depth msgs */
		len = xio_write_uint16(session->snd_queue_depth_msgs, 0, ptr);
		ptr  = ptr + len;

		/* rx queue depth msgs */
		len = xio_write_uint16(session->rcv_queue_depth_msgs, 0, ptr);
		ptr  = ptr + len;
	}

	/* portals_array_len */
	len = xio_write_uint16(portals_array_len, 0, ptr);
	ptr  = ptr + len;

	/* user_context_len */
	len = xio_write_uint16(user_context_len, 0, ptr);
	ptr  = ptr + len;

	for (i = 0; i < portals_array_len; i++) {
		str_len = strlen(portals_array[i]);

		len = xio_write_uint16(str_len, 0, ptr);
		ptr  = ptr + len;

		len = xio_write_array((uint8_t *)portals_array[i],
				      str_len, 0, ptr);
		ptr  = ptr + len;
	}

	if (user_context_len) {
		len = xio_write_array((const uint8_t *)user_context,
				      user_context_len,
				      0, ptr);
		ptr  = ptr + len;
	}

	msg->out.header.iov_len = ptr - (uint8_t *)msg->out.header.iov_base;

	if (msg->out.header.iov_len != tot_len) {
		ERROR_LOG("calculated length %d != actual length %zd\n",
			  tot_len, msg->out.header.iov_len);
	}

	return msg;
}

/*---------------------------------------------------------------------------*/
/* xio_session_write_reject_rsp						     */
/*---------------------------------------------------------------------------*/
struct xio_msg *xio_session_write_reject_rsp(struct xio_session *session,
					     enum xio_status reason,
					     void *user_context,
					     uint16_t user_context_len)
{
	struct xio_msg	*msg;
	uint8_t			*buf;
	uint8_t			*ptr;
	uint16_t		len,  tot_len;
	uint16_t		action = XIO_ACTION_REJECT;

	/* calclate length */
	tot_len = 2*sizeof(uint16_t) + 2*sizeof(uint32_t);
	tot_len += user_context_len;

	if (tot_len > SETUP_BUFFER_LEN)  {
		ERROR_LOG("buffer is too small\n");
		xio_set_error(EMSGSIZE);
		return NULL;
	}

	/* allocate message */
	buf = (uint8_t *)kcalloc(SETUP_BUFFER_LEN + sizeof(struct xio_msg),
		      sizeof(uint8_t), GFP_KERNEL);
	if (!buf) {
		ERROR_LOG("message allocation failed\n");
		xio_set_error(ENOMEM);
		return NULL;
	}

	/* fill the message */
	msg = (struct xio_msg *)buf;
	msg->out.header.iov_base = buf + sizeof(struct xio_msg);
	msg->out.header.iov_len = 0;

	ptr = (uint8_t *)msg->out.header.iov_base;
	len = 0;

	/* serialize message into the buffer */

	/* session_id */
	len = xio_write_uint32(session->session_id, 0, ptr);
	ptr  = ptr + len;

	/* action */
	len = xio_write_uint16(action, 0, ptr);
	ptr  = ptr + len;

	/* reason */
	len = xio_write_uint32(reason, 0, ptr);
	ptr  = ptr + len;

	/* user_context_len */
	len = xio_write_uint16(user_context_len, 0, ptr);
	ptr  = ptr + len;

	if (user_context_len) {
		len = xio_write_array((const uint8_t *)user_context,
				      user_context_len,
				      0, ptr);
		ptr  = ptr + len;
	}

	msg->out.header.iov_len = ptr - (uint8_t *)msg->out.header.iov_base;

	if (msg->out.header.iov_len != tot_len) {
		ERROR_LOG("calculated length %d != actual length %zd\n",
			  tot_len, msg->out.header.iov_len);
	}

	return msg;
}

/*---------------------------------------------------------------------------*/
/* xio_accept								     */
/*---------------------------------------------------------------------------*/
int xio_accept(struct xio_session *session,
	       const char **portals_array,
	       size_t portals_array_len,
	       void *user_context,
	       size_t user_context_len)
{
	int			retval = 0;
	struct xio_msg		*msg;
	struct xio_task		*task;

	msg = xio_session_write_accept_rsp(session,
					   XIO_ACTION_ACCEPT,
					   portals_array,
					   portals_array_len,
					   user_context,
					   user_context_len);
	if (!msg) {
		ERROR_LOG("setup request creation failed\n");
		return -1;
	}

	msg->request	= session->setup_req;
	msg->type	= (enum xio_msg_type)XIO_SESSION_SETUP_RSP;

	task = container_of(msg->request,
			    struct xio_task, imsg);

	if (portals_array_len != 0) {
		/* server side state is changed to ACCEPT, will be move to
		 * ONLINE state when first "hello" message arrives
		 */
		session->state = XIO_SESSION_STATE_ACCEPTED;
		/* temporary disable teardown */
		session->disable_teardown = 1;
		TRACE_LOG("session state is now ACCEPT. session:%p\n",
			  session);
	} else {
		/* initialize credits */
		task->connection->peer_credits_msgs =
					session->peer_rcv_queue_depth_msgs;
		task->connection->credits_msgs	= 0;
		task->connection->peer_credits_bytes =
					session->peer_rcv_queue_depth_bytes;
		task->connection->credits_bytes	= 0;

		/* server side state is changed to ONLINE, immediately  */
		session->state = XIO_SESSION_STATE_ONLINE;
		TRACE_LOG("session state changed to ONLINE. session:%p\n",
			  session);
	}
	retval = xio_connection_send(task->connection, msg);
	if (retval && retval != -EAGAIN) {
		ERROR_LOG("failed to send message. errno:%d\n", -retval);
		xio_set_error(-retval);
		return -1;
	}

	return 0;
}
Ejemplo n.º 24
0
/*---------------------------------------------------------------------------*/
struct xio_context *xio_context_create(unsigned int flags,
				       struct xio_loop_ops *loop_ops,
				       struct task_struct *worker,
				       int polling_timeout,
				       int cpu_hint)
{
	struct xio_context *ctx;
	struct dentry *xio_root;
	char name[32];
	int cpu;

	if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) {
		xio_set_error(EINVAL);
		ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n",
			  cpu_hint, num_online_cpus());
		goto cleanup0;
	}

	if ((flags == XIO_LOOP_USER_LOOP) &&
	    (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) {
		xio_set_error(EINVAL);
		ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are mandatory with loop_ops\n");
		goto cleanup0;
	}

	xio_read_logging_level();

	/* no need to disable preemption */
	cpu = raw_smp_processor_id();

	if (cpu == -1)
		goto cleanup0;

	/* allocate new context */
	ctx = kzalloc(sizeof(struct xio_context), GFP_KERNEL);
	if (ctx == NULL) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kzalloc failed\n");
		goto cleanup0;
	}

	if (cpu_hint < 0)
		cpu_hint = cpu;

	ctx->flags = flags;
	ctx->cpuid  = cpu_hint;
	ctx->nodeid = cpu_to_node(cpu_hint);
	ctx->polling_timeout = polling_timeout;
	ctx->workqueue = xio_workqueue_create(ctx);
	if (!ctx->workqueue) {
		xio_set_error(ENOMEM);
		ERROR_LOG("xio_workqueue_init failed.\n");
		goto cleanup1;
	}

	XIO_OBSERVABLE_INIT(&ctx->observable, ctx);
	INIT_LIST_HEAD(&ctx->ctx_list);

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		break;
	case XIO_LOOP_GIVEN_THREAD:
		set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint));
		ctx->worker = (uint64_t) worker;
		break;
	case XIO_LOOP_TASKLET:
		break;
	case XIO_LOOP_WORKQUEUE:
		break;
	default:
		ERROR_LOG("wrong type. %u\n", flags);
		goto cleanup2;
	}

	xio_root = xio_debugfs_root();
	if (xio_root) {
		/* More then one contexts can share the core */
		sprintf(name, "ctx-%d-%p", cpu_hint, worker);
		ctx->ctx_dentry = debugfs_create_dir(name, xio_root);
		if (!ctx->ctx_dentry) {
			ERROR_LOG("debugfs entry %s create failed\n", name);
			goto cleanup2;
		}
	}

	ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops);
	if (!ctx->ev_loop)
		goto cleanup3;

	ctx->stats.hertz = HZ;
	/* Initialize default counters' name */
	ctx->stats.name[XIO_STAT_TX_MSG]   = kstrdup("TX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_MSG]   = kstrdup("RX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_DELAY]    = kstrdup("DELAY", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL);

	return ctx;

cleanup3:
	debugfs_remove_recursive(ctx->ctx_dentry);
	ctx->ctx_dentry = NULL;

cleanup2:
	xio_workqueue_destroy(ctx->workqueue);

cleanup1:
	kfree(ctx);

cleanup0:
	ERROR_LOG("xio_ctx_open failed\n");

	return NULL;
}
Ejemplo n.º 25
0
/*---------------------------------------------------------------------------*/
struct xio_server *xio_bind(struct xio_context *ctx,
			    struct xio_session_ops *ops,
			    const char *uri,
			    uint16_t *src_port,
			    uint32_t session_flags,
			    void *cb_private_data)
{
	struct xio_server	*server;
	int			retval;
	int			backlog = 4;

	if (!ctx  || !ops || !uri) {
		ERROR_LOG("invalid parameters ctx:%p, ops:%p, uri:%p\n",
			  ctx, ops, uri);
		xio_set_error(EINVAL);
		return NULL;
	}

	TRACE_LOG("bind to %s\n", uri);

	/* create the server */
	server = (struct xio_server *)
			kcalloc(1, sizeof(struct xio_server), GFP_KERNEL);
	if (!server) {
		xio_set_error(ENOMEM);
		return NULL;
	}
	kref_init(&server->kref);

	/* fill server data*/
	server->ctx = ctx;
	server->cb_private_data	= cb_private_data;
	server->uri = kstrdup(uri, GFP_KERNEL);

	server->session_flags = session_flags;
	memcpy(&server->ops, ops, sizeof(*ops));

	XIO_OBSERVER_INIT(&server->observer, server, xio_on_nexus_event);

	XIO_OBSERVABLE_INIT(&server->nexus_observable, server);

	server->listener = xio_nexus_open(ctx, uri, NULL, 0, 0, NULL);
	if (!server->listener) {
		ERROR_LOG("failed to create connection\n");
		goto cleanup;
	}
	retval = xio_nexus_listen(server->listener,
				  uri, src_port, backlog);
	if (retval != 0) {
		ERROR_LOG("connection listen failed\n");
		goto cleanup1;
	}
	xio_nexus_set_server(server->listener, server);
	xio_idr_add_uobj(usr_idr, server, "xio_server");

	return server;

cleanup1:
	xio_nexus_close(server->listener, NULL);
cleanup:
	kfree(server->uri);
	kfree(server);

	return NULL;
}