Ejemplo n.º 1
0
/*---------------------------------------------------------------------------*/
int xio_get_opt(void *xio_obj, int level,  int optname,
		void *optval, int *optlen)
{
	static struct xio_transport *rdma_transport;
	static struct xio_transport *tcp_transport;

	switch (level) {
	case XIO_OPTLEVEL_ACCELIO:
		return xio_general_get_opt(xio_obj, optname, optval, optlen);
	case XIO_OPTLEVEL_RDMA:
		if (!rdma_transport) {
			rdma_transport = xio_get_transport("rdma");
			if (!rdma_transport) {
				xio_set_error(EFAULT);
				return -1;
			}
		}
		if (!rdma_transport->get_opt)
			break;
		return rdma_transport->get_opt(xio_obj,
					       optname, optval, optlen);
		break;
	case XIO_OPTLEVEL_TCP:
		if (!tcp_transport) {
			tcp_transport = xio_get_transport("tcp");
			if (!tcp_transport) {
				xio_set_error(EFAULT);
				return -1;
			}
		}
		if (!tcp_transport->get_opt)
			break;
		return tcp_transport->get_opt(xio_obj,
					      optname, optval, optlen);
		break;
	default:
		break;
	}

	xio_set_error(XIO_E_NOT_SUPPORTED);
	return -1;
}
Ejemplo n.º 2
0
/*---------------------------------------------------------------------------*/
struct xio_context *xio_context_create(struct xio_context_params *ctx_params,
				       int polling_timeout,
				       int cpu_hint)
{
	struct xio_context		*ctx;
	struct xio_loop_ops		*loop_ops;
	struct task_struct		*worker;
	struct xio_transport		*transport;
	int				flags, cpu;

	if (!ctx_params) {
		xio_set_error(EINVAL);
		ERROR_LOG("ctx_params is NULL\n");
		goto cleanup0;

	}

	loop_ops = ctx_params->loop_ops;
	worker = ctx_params->worker;
	flags = ctx_params->flags;

	if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) {
		xio_set_error(EINVAL);
		ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n",
			  cpu_hint, num_online_cpus());
		goto cleanup0;
	}

	if ((flags == XIO_LOOP_USER_LOOP) &&
	    (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) {
		xio_set_error(EINVAL);
		ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are " \
			  "mandatory with loop_ops\n");
		goto cleanup0;
	}

	xio_read_logging_level();

	/* no need to disable preemption */
	cpu = raw_smp_processor_id();

	if (cpu == -1)
		goto cleanup0;

	/* allocate new context */
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kzalloc failed\n");
		goto cleanup0;
	}

	if (cpu_hint < 0)
		cpu_hint = cpu;

	ctx->run_private = 0;
	ctx->user_context = ctx_params->user_context;
	ctx->flags = flags;
	ctx->cpuid  = cpu_hint;
	ctx->nodeid = cpu_to_node(cpu_hint);
	ctx->polling_timeout = polling_timeout;
	ctx->prealloc_xio_inline_bufs =
		!!ctx_params->prealloc_xio_inline_bufs;
	ctx->rq_depth = ctx_params->rq_depth;

	if (!ctx_params->max_conns_per_ctx)
		ctx->max_conns_per_ctx = 100;
	else
		ctx->max_conns_per_ctx =
			max(ctx_params->max_conns_per_ctx , 2);

	ctx->workqueue = xio_workqueue_create(ctx);
	if (!ctx->workqueue) {
		xio_set_error(ENOMEM);
		ERROR_LOG("xio_workqueue_init failed.\n");
		goto cleanup1;
	}
	ctx->msg_pool = xio_objpool_create(sizeof(struct xio_msg),
					   MSGPOOL_INIT_NR, MSGPOOL_GROW_NR);
	if (!ctx->msg_pool) {
		xio_set_error(ENOMEM);
		ERROR_LOG("context's msg_pool create failed. %m\n");
		goto cleanup2;
	}

	XIO_OBSERVABLE_INIT(&ctx->observable, ctx);
	INIT_LIST_HEAD(&ctx->ctx_list);

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		break;
	case XIO_LOOP_GIVEN_THREAD:
		set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint));
		ctx->worker = (uint64_t)worker;
		break;
	case XIO_LOOP_TASKLET:
		break;
	case XIO_LOOP_WORKQUEUE:
		break;
	default:
		ERROR_LOG("wrong type. %u\n", flags);
		goto cleanup3;
	}

	ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops);
	if (!ctx->ev_loop)
		goto cleanup3;

	ctx->stats.hertz = HZ;
	/* Initialize default counters' name */
	ctx->stats.name[XIO_STAT_TX_MSG]   = kstrdup("TX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_MSG]   = kstrdup("RX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_DELAY]    = kstrdup("DELAY", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL);

	/* initialize rdma pools only */
	transport = xio_get_transport("rdma");
	if (transport && ctx->prealloc_xio_inline_bufs) {
		int retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					         XIO_CONTEXT_POOL_CLASS_INITIAL);
		if (retval) {
			ERROR_LOG("Failed to create initial pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
		retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					     XIO_CONTEXT_POOL_CLASS_PRIMARY);
		if (retval) {
			ERROR_LOG("Failed to create primary pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
	}
	spin_lock_init(&ctx->ctx_list_lock);

	xio_idr_add_uobj(usr_idr, ctx, "xio_context");
	return ctx;

cleanup3:
	xio_objpool_destroy(ctx->msg_pool);

cleanup2:
	xio_workqueue_destroy(ctx->workqueue);

cleanup1:
	kfree(ctx);

cleanup0:
	ERROR_LOG("xio_ctx_open failed\n");

	return NULL;
}
Ejemplo n.º 3
0
/*---------------------------------------------------------------------------*/
int xio_ctx_pool_create(struct xio_context *ctx, enum xio_proto proto,
		        enum xio_context_pool_class pool_cls)
{
	struct xio_tasks_pool_ops	*pool_ops;
	struct xio_tasks_pool		**tasks_pool;
	struct xio_transport		*transport;
	struct xio_tasks_pool_params	params;
	char				pool_name[64];
	const char			*proto_str = xio_proto_str(proto);

	/* get the transport's proto */
	transport = xio_get_transport(proto_str);
	if (!transport) {
		ERROR_LOG("failed to load %s transport layer.\n", proto_str);
		ERROR_LOG("validate that your system support %s " \
			  "and the accelio's %s module is loaded\n",
			  proto_str, proto_str);
		xio_set_error(ENOPROTOOPT);
		return -1;
	}

	if (transport->get_pools_setup_ops) {
		if (!ctx->primary_pool_ops[proto] ||
		    !ctx->initial_pool_ops[proto])
			transport->get_pools_setup_ops(
					NULL,
					&ctx->initial_pool_ops[proto],
					&ctx->primary_pool_ops[proto]);
	} else {
		ERROR_LOG("transport does not implement " \
			  "\"get_pools_setup_ops\"\n");
		return -1;
	}

	switch(pool_cls) {
	case XIO_CONTEXT_POOL_CLASS_INITIAL:
		tasks_pool = &ctx->initial_tasks_pool[proto];
		pool_ops = ctx->initial_pool_ops[proto];
		sprintf(pool_name, "ctx:%p - initial_pool_%s", ctx, proto_str);

	break;
	case XIO_CONTEXT_POOL_CLASS_PRIMARY:
		tasks_pool = &ctx->primary_tasks_pool[proto];
		pool_ops = ctx->primary_pool_ops[proto];
		sprintf(pool_name, "ctx:%p - primary_pool_%s", ctx, proto_str);
	break;
	default:
		xio_set_error(EINVAL);
		ERROR_LOG("unknown pool class\n");
		return -1;
	};

	/* if already exist */
	if (*tasks_pool)
		return 0;

	if (!pool_ops)
		return -1;

	if (!pool_ops->pool_get_params ||
	    !pool_ops->slab_pre_create ||
	    !pool_ops->slab_init_task ||
	    !pool_ops->pool_post_create ||
	    !pool_ops->slab_destroy)
		return -1;

	/* get pool properties from the transport */
	memset(&params, 0, sizeof(params));

	pool_ops->pool_get_params(NULL,
				  (int *)&params.start_nr,
				  (int *)&params.max_nr,
				  (int *)&params.alloc_nr,
				  (int *)&params.pool_dd_data_sz,
				  (int *)&params.slab_dd_data_sz,
				  (int *)&params.task_dd_data_sz);
	if (ctx->prealloc_xio_inline_bufs) {
		params.start_nr = params.max_nr;
		params.alloc_nr = 0;
	}

	params.pool_hooks.slab_pre_create  =
		(int (*)(void *, int, void *, void *))
				pool_ops->slab_pre_create;
	params.pool_hooks.slab_post_create = (int (*)(void *, void *, void *))
				pool_ops->slab_post_create;
	params.pool_hooks.slab_destroy	   = (int (*)(void *, void *, void *))
				pool_ops->slab_destroy;
	params.pool_hooks.slab_init_task   =
		(int (*)(void *, void *, void *, int,  struct xio_task *))
				pool_ops->slab_init_task;
	params.pool_hooks.slab_uninit_task =
		(int (*)(void *, void *, void *, struct xio_task *))
				pool_ops->slab_uninit_task;
	params.pool_hooks.slab_remap_task =
		(int (*)(void *, void *, void *, void *, struct xio_task *))
				pool_ops->slab_remap_task;
	params.pool_hooks.pool_pre_create  = (int (*)(void *, void *, void *))
				pool_ops->pool_pre_create;
	params.pool_hooks.pool_post_create = (int (*)(void *, void *, void *))
				pool_ops->pool_post_create;
	params.pool_hooks.pool_destroy	   = (int (*)(void *, void *, void *))
				pool_ops->pool_destroy;
	params.pool_hooks.task_pre_put  = (int (*)(void *, struct xio_task *))
		pool_ops->task_pre_put;
	params.pool_hooks.task_post_get = (int (*)(void *, struct xio_task *))
		pool_ops->task_post_get;

	params.pool_name = kstrdup(pool_name, GFP_KERNEL);

	/* initialize the tasks pool */
	*tasks_pool = xio_tasks_pool_create(&params);
	if (!*tasks_pool) {
		ERROR_LOG("xio_tasks_pool_create failed\n");
		return -1;
	}

	return 0;
}
Ejemplo n.º 4
0
/*---------------------------------------------------------------------------*/
static int xio_general_set_opt(void *xio_obj, int optname,
			       const void *optval, int optlen)
{
	int tmp;

	switch (optname) {
	case XIO_OPTNAME_LOG_FN:
		if (optlen == 0 && !optval)
			return xio_set_log_fn(NULL);
		else if (optlen == sizeof(xio_log_fn))
			return xio_set_log_fn((xio_log_fn)optval);
		break;
	case XIO_OPTNAME_LOG_LEVEL:
		if (optlen != sizeof(enum xio_log_level))
			return -1;
		return xio_set_log_level(*((enum xio_log_level *)optval));
	case XIO_OPTNAME_DISABLE_HUGETBL:
		xio_disable_huge_pages(*((int *)optval));
		return 0;
	case XIO_OPTNAME_MEM_ALLOCATOR:
		if (optlen == sizeof(struct xio_mem_allocator))
			return xio_set_mem_allocator(
					(struct xio_mem_allocator *)optval);
		break;
	case XIO_OPTNAME_CONFIG_MEMPOOL:
		if (optlen == sizeof(struct xio_mempool_config)) {
			memcpy(&g_mempool_config,
			       (struct xio_mempool_config *)optval, optlen);
			return 0;
		}
		break;
	case XIO_OPTNAME_MAX_IN_IOVLEN:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (*((int *)optval) > XIO_IOVLEN &&
			    *((int *)optval) <= XIO_MAX_IOV) {
				g_options.max_in_iovsz = *((int *)optval);
				if (rdma_transport &&
				    rdma_transport->set_opt)
					retval |= rdma_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
				if (tcp_transport &&
				    tcp_transport->set_opt)
					retval |= tcp_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
			}
			return retval;
		}
		break;
	case XIO_OPTNAME_MAX_OUT_IOVLEN:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (*((int *)optval) > XIO_IOVLEN &&
			    *((int *)optval) <= XIO_MAX_IOV) {
				g_options.max_out_iovsz = *((int *)optval);
				if (rdma_transport &&
				    rdma_transport->set_opt)
					retval |= rdma_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
				if (tcp_transport &&
				    tcp_transport->set_opt)
					retval |= tcp_transport->set_opt(
							xio_obj, optname,
							optval, optlen);
			}
			return retval;
		}
		break;
	case XIO_OPTNAME_ENABLE_DMA_LATENCY:
		if (optlen == sizeof(int)) {
			struct xio_transport *rdma_transport =
						xio_get_transport("rdma");
			struct xio_transport *tcp_transport =
						xio_get_transport("tcp");
			int retval = 0;

			if (rdma_transport &&
			    rdma_transport->set_opt)
				retval |= rdma_transport->set_opt(
						xio_obj, optname,
						optval, optlen);
			if (tcp_transport &&
			    tcp_transport->set_opt)
				retval |= tcp_transport->set_opt(
						xio_obj, optname,
						optval, optlen);

			return retval;
		}
		break;
	case XIO_OPTNAME_ENABLE_RECONNECT:
		g_options.reconnect = *((int *)optval);
		if (g_options.reconnect){
			g_options.enable_keepalive = 0;
		}
		return 0;
	case XIO_OPTNAME_ENABLE_FLOW_CONTROL:
		g_options.enable_flow_control = *((int *)optval);
		return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_MSGS:
		if (*((int *)optval) < 1)
			break;
		g_options.snd_queue_depth_msgs = (int)*((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_MSGS:
		if (*((int *)optval) < 1)
			break;
		g_options.rcv_queue_depth_msgs = *((int *)optval);
		return 0;
	case XIO_OPTNAME_SND_QUEUE_DEPTH_BYTES:
		if (*((uint64_t *)optval) < 1)
			break;
		g_options.snd_queue_depth_bytes = *((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_RCV_QUEUE_DEPTH_BYTES:
		if (*((uint64_t *)optval) < 1)
			break;
		g_options.rcv_queue_depth_bytes = *((uint64_t *)optval);
		return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_HEADER:
		if (optlen != sizeof(int))
			break;
		if (*((int *)optval) < 0)
			break;
		g_options.max_inline_xio_hdr = *((int *)optval);
		return 0;
	case XIO_OPTNAME_MAX_INLINE_XIO_DATA:
		if (optlen != sizeof(int))
			break;
		if (*((int *)optval) < 0)
			break;
		g_options.max_inline_xio_data = *((int *)optval);
		return 0;
	case XIO_OPTNAME_XFER_BUF_ALIGN:
		if (optlen != sizeof(int))
			break;
		tmp = *(int *)optval;
		if (!is_power_of_2(tmp) || !(tmp % sizeof(void *) == 0)) {
			xio_set_error(EINVAL);
			return -1;
		}
		g_options.xfer_buf_align = tmp;
		return 0;
	case XIO_OPTNAME_INLINE_XIO_DATA_ALIGN:
		if (optlen != sizeof(int))
			break;
		tmp = *(int *)optval;
		if (!tmp) {
			g_options.inline_xio_data_align = tmp;
			return 0;
		}
		if (!is_power_of_2(tmp) || !(tmp % sizeof(void *) == 0)) {
			xio_set_error(EINVAL);
			return -1;
		}
		g_options.inline_xio_data_align = tmp;
		return 0;
	case XIO_OPTNAME_ENABLE_KEEPALIVE:
		g_options.enable_keepalive = *((int *)optval);
		return 0;
	case XIO_OPTNAME_CONFIG_KEEPALIVE:
		if (optlen == sizeof(struct xio_options_keepalive)) {
			memcpy(&g_options.ka, optval, optlen);
			return 0;
		} else {
			xio_set_error(EINVAL);
			return -1;
		}
		break;
	default:
		break;
	}
	xio_set_error(XIO_E_NOT_SUPPORTED);
	return -1;
}