Esempio n. 1
0
/*---------------------------------------------------------------------------*/
static void *portal_server_cb(void *data)
{
	struct hw_thread_data	*tdata = data;
	cpu_set_t		cpuset;
	struct xio_context	*ctx;
	struct xio_server	*server;
	char			str[128];
	int			i;

	/* set affinity to thread */

	CPU_ZERO(&cpuset);
	CPU_SET(tdata->affinity, &cpuset);

	pthread_setaffinity_np(tdata->thread_id, sizeof(cpu_set_t), &cpuset);

	/* open default event loop */
	tdata->loop = xio_ev_loop_init();

	/* create thread context for the client */
	ctx = xio_ctx_open(NULL, tdata->loop, 0);

	/* bind a listener server to a portal/url */
	printf("thread [%d] - listen:%s\n", tdata->affinity, tdata->portal);
	server = xio_bind(ctx, &portal_server_ops, tdata->portal, NULL, 0, tdata);
	if (server == NULL)
		goto cleanup;

	sprintf(str,"hello world header response from thread %d",
	        tdata->affinity);
	/* create "hello world" message */
	for (i = 0; i <QUEUE_DEPTH; i++) {
		tdata->rsp[i].out.header.iov_base = strdup(str);
		tdata->rsp[i].out.header.iov_len =
			strlen(tdata->rsp[i].out.header.iov_base);
	}

	/* the default xio supplied main loop */
	xio_ev_loop_run(tdata->loop);

	/* normal exit phase */
	fprintf(stdout, "exit signaled\n");

	/* detach the server */
	xio_unbind(server);

	/* free the message */
	for (i = 0; i <QUEUE_DEPTH; i++)
		free(tdata->rsp[i].out.header.iov_base);

cleanup:
	/* free the context */
	xio_ctx_close(ctx);

	/* destroy the default loop */
	xio_ev_loop_destroy(tdata->loop);

	return NULL;
}
Esempio n. 2
0
/*---------------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
	struct xio_server	*server;	/* server portal */
	struct hw_server_data	server_data;
	char			url[256];
	struct xio_context	*ctx;
	void			*loop;
	int			i;
	uint16_t		port = atoi(argv[2]);


	memset(&server_data, 0, sizeof(server_data));

	/* open default event loop */
	loop	= xio_ev_loop_init();

	/* create thread context for the client */
	ctx	= xio_ctx_open(NULL, loop, 0);

	/* create url to connect to */
	sprintf(url, "rdma://%s:%d", argv[1], port);
	/* bind a listener server to a portal/url */
	server = xio_bind(ctx, &server_ops, url, NULL, 0, &server_data);
	if (server == NULL)
		goto cleanup;


	/* spawn portals */
	for (i = 0; i < MAX_THREADS; i++) {
		server_data.tdata[i].affinity = i+1;
		port += 1;
		sprintf(server_data.tdata[i].portal, "rdma://%s:%d",
			argv[1], port);
		pthread_create(&server_data.tdata[i].thread_id, NULL,
			       portal_server_cb, &server_data.tdata[i]);
	}

	xio_ev_loop_run(loop);

	/* normal exit phase */
	fprintf(stdout, "exit signaled\n");

	/* join the threads */
	for (i = 0; i < MAX_THREADS; i++)
		pthread_join(server_data.tdata[i].thread_id, NULL);

	/* free the server */
	xio_unbind(server);
cleanup:
	/* free the context */
	xio_ctx_close(ctx);

	/* destroy the default loop */
	xio_ev_loop_destroy(&loop);

	return 0;
}
Esempio n. 3
0
/*---------------------------------------------------------------------------*/
struct xio_context *xio_context_create(struct xio_context_params *ctx_params,
				       int polling_timeout,
				       int cpu_hint)
{
	struct xio_context		*ctx;
	struct xio_loop_ops		*loop_ops;
	struct task_struct		*worker;
	struct xio_transport		*transport;
	int				flags, cpu;

	if (!ctx_params) {
		xio_set_error(EINVAL);
		ERROR_LOG("ctx_params is NULL\n");
		goto cleanup0;

	}

	loop_ops = ctx_params->loop_ops;
	worker = ctx_params->worker;
	flags = ctx_params->flags;

	if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) {
		xio_set_error(EINVAL);
		ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n",
			  cpu_hint, num_online_cpus());
		goto cleanup0;
	}

	if ((flags == XIO_LOOP_USER_LOOP) &&
	    (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) {
		xio_set_error(EINVAL);
		ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are " \
			  "mandatory with loop_ops\n");
		goto cleanup0;
	}

	xio_read_logging_level();

	/* no need to disable preemption */
	cpu = raw_smp_processor_id();

	if (cpu == -1)
		goto cleanup0;

	/* allocate new context */
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kzalloc failed\n");
		goto cleanup0;
	}

	if (cpu_hint < 0)
		cpu_hint = cpu;

	ctx->run_private = 0;
	ctx->user_context = ctx_params->user_context;
	ctx->flags = flags;
	ctx->cpuid  = cpu_hint;
	ctx->nodeid = cpu_to_node(cpu_hint);
	ctx->polling_timeout = polling_timeout;
	ctx->prealloc_xio_inline_bufs =
		!!ctx_params->prealloc_xio_inline_bufs;
	ctx->rq_depth = ctx_params->rq_depth;

	if (!ctx_params->max_conns_per_ctx)
		ctx->max_conns_per_ctx = 100;
	else
		ctx->max_conns_per_ctx =
			max(ctx_params->max_conns_per_ctx , 2);

	ctx->workqueue = xio_workqueue_create(ctx);
	if (!ctx->workqueue) {
		xio_set_error(ENOMEM);
		ERROR_LOG("xio_workqueue_init failed.\n");
		goto cleanup1;
	}
	ctx->msg_pool = xio_objpool_create(sizeof(struct xio_msg),
					   MSGPOOL_INIT_NR, MSGPOOL_GROW_NR);
	if (!ctx->msg_pool) {
		xio_set_error(ENOMEM);
		ERROR_LOG("context's msg_pool create failed. %m\n");
		goto cleanup2;
	}

	XIO_OBSERVABLE_INIT(&ctx->observable, ctx);
	INIT_LIST_HEAD(&ctx->ctx_list);

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		break;
	case XIO_LOOP_GIVEN_THREAD:
		set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint));
		ctx->worker = (uint64_t)worker;
		break;
	case XIO_LOOP_TASKLET:
		break;
	case XIO_LOOP_WORKQUEUE:
		break;
	default:
		ERROR_LOG("wrong type. %u\n", flags);
		goto cleanup3;
	}

	ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops);
	if (!ctx->ev_loop)
		goto cleanup3;

	ctx->stats.hertz = HZ;
	/* Initialize default counters' name */
	ctx->stats.name[XIO_STAT_TX_MSG]   = kstrdup("TX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_MSG]   = kstrdup("RX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_DELAY]    = kstrdup("DELAY", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL);

	/* initialize rdma pools only */
	transport = xio_get_transport("rdma");
	if (transport && ctx->prealloc_xio_inline_bufs) {
		int retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					         XIO_CONTEXT_POOL_CLASS_INITIAL);
		if (retval) {
			ERROR_LOG("Failed to create initial pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
		retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA,
					     XIO_CONTEXT_POOL_CLASS_PRIMARY);
		if (retval) {
			ERROR_LOG("Failed to create primary pool. ctx:%p\n", ctx);
			goto cleanup2;
		}
	}
	spin_lock_init(&ctx->ctx_list_lock);

	xio_idr_add_uobj(usr_idr, ctx, "xio_context");
	return ctx;

cleanup3:
	xio_objpool_destroy(ctx->msg_pool);

cleanup2:
	xio_workqueue_destroy(ctx->workqueue);

cleanup1:
	kfree(ctx);

cleanup0:
	ERROR_LOG("xio_ctx_open failed\n");

	return NULL;
}
Esempio n. 4
0
/*---------------------------------------------------------------------------*/
struct xio_context *xio_context_create(unsigned int flags,
				       struct xio_loop_ops *loop_ops,
				       struct task_struct *worker,
				       int polling_timeout,
				       int cpu_hint)
{
	struct xio_context *ctx;
	struct dentry *xio_root;
	char name[32];
	int cpu;

	if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) {
		xio_set_error(EINVAL);
		ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n",
			  cpu_hint, num_online_cpus());
		goto cleanup0;
	}

	if ((flags == XIO_LOOP_USER_LOOP) &&
	    (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) {
		xio_set_error(EINVAL);
		ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are mandatory with loop_ops\n");
		goto cleanup0;
	}

	xio_read_logging_level();

	/* no need to disable preemption */
	cpu = raw_smp_processor_id();

	if (cpu == -1)
		goto cleanup0;

	/* allocate new context */
	ctx = kzalloc(sizeof(struct xio_context), GFP_KERNEL);
	if (ctx == NULL) {
		xio_set_error(ENOMEM);
		ERROR_LOG("kzalloc failed\n");
		goto cleanup0;
	}

	if (cpu_hint < 0)
		cpu_hint = cpu;

	ctx->flags = flags;
	ctx->cpuid  = cpu_hint;
	ctx->nodeid = cpu_to_node(cpu_hint);
	ctx->polling_timeout = polling_timeout;
	ctx->workqueue = xio_workqueue_create(ctx);
	if (!ctx->workqueue) {
		xio_set_error(ENOMEM);
		ERROR_LOG("xio_workqueue_init failed.\n");
		goto cleanup1;
	}

	XIO_OBSERVABLE_INIT(&ctx->observable, ctx);
	INIT_LIST_HEAD(&ctx->ctx_list);

	switch (flags) {
	case XIO_LOOP_USER_LOOP:
		break;
	case XIO_LOOP_GIVEN_THREAD:
		set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint));
		ctx->worker = (uint64_t) worker;
		break;
	case XIO_LOOP_TASKLET:
		break;
	case XIO_LOOP_WORKQUEUE:
		break;
	default:
		ERROR_LOG("wrong type. %u\n", flags);
		goto cleanup2;
	}

	xio_root = xio_debugfs_root();
	if (xio_root) {
		/* More then one contexts can share the core */
		sprintf(name, "ctx-%d-%p", cpu_hint, worker);
		ctx->ctx_dentry = debugfs_create_dir(name, xio_root);
		if (!ctx->ctx_dentry) {
			ERROR_LOG("debugfs entry %s create failed\n", name);
			goto cleanup2;
		}
	}

	ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops);
	if (!ctx->ev_loop)
		goto cleanup3;

	ctx->stats.hertz = HZ;
	/* Initialize default counters' name */
	ctx->stats.name[XIO_STAT_TX_MSG]   = kstrdup("TX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_MSG]   = kstrdup("RX_MSG", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_DELAY]    = kstrdup("DELAY", GFP_KERNEL);
	ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL);

	return ctx;

cleanup3:
	debugfs_remove_recursive(ctx->ctx_dentry);
	ctx->ctx_dentry = NULL;

cleanup2:
	xio_workqueue_destroy(ctx->workqueue);

cleanup1:
	kfree(ctx);

cleanup0:
	ERROR_LOG("xio_ctx_open failed\n");

	return NULL;
}
Esempio n. 5
0
static void *worker_thread(void *data)
{
	struct thread_data	*tdata = data;
	cpu_set_t		cpuset;
	struct xio_session	**sessions = NULL;
	struct xio_context	*ctx;
	struct session_data	*session_data = NULL;
	int  			j = 0, n = 0;
	struct timespec		start, end;
	void 			*loop = NULL;
	double 			*sec =  NULL;
	/* set affinity to thread */

	CPU_ZERO(&cpuset);
	CPU_SET(tdata->affinity, &cpuset);

	pthread_setaffinity_np(tdata->thread_id, sizeof(cpu_set_t), &cpuset);

	/* open default event loop */
	loop = xio_ev_loop_init();
	if (loop == NULL) {
		fprintf(stderr, "Failed to allocate event loop\n");
		return (void*)(-1);
	}

	/* create thread context for the client */
	ctx= xio_ctx_open(NULL, loop, 0);
	if(ctx == NULL) {
		fprintf(stderr, "Failed to allocate thread context\n");
		xio_ev_loop_destroy(&loop);
		return (void*)-1;
	}

	session_data = malloc(tdata->num_sessions*sizeof(struct session_data));
	sessions     = malloc(tdata->num_sessions*sizeof(struct sessions *));

	if( session_data == NULL || sessions == NULL ) {
		fprintf(stderr, "Allocation failed\n");
		xio_ctx_close(ctx);
		xio_ev_loop_destroy(&loop);
		if(session_data) {
			free(session_data);
		}
		if(sessions) {
			free(sessions);
		}
		return (void*)-1;
	}

	for(n = 0; n < tdata->num_sessions; n++)
	{
		tdata->loop = loop;
		session_data[n].tdata = tdata;
	}

	/* client session attributes */
	struct xio_session_attr attr = {
		&ses_ops, /* callbacks structure */
		NULL,	  /* no need to pass the server private data */
		0
	};

	if(clock_gettime(CLOCK_MONOTONIC, &start)) {
		fprintf(stderr, "clock_gettime() failed, errno = %d\n", errno);
	}

	for (j = 0; j< NUM_ITER; j++)
	{

		for(n = 0; n < tdata->num_sessions; n++)
		{
			sessions[n] = xio_session_open(XIO_SESSION_REQ,
					&attr, tdata->url, 0, 0, &session_data[n]);

			/* connect the session  */
			//fprintf(stderr, "Connect Session\n");
			session_data[n].conn = xio_connect(sessions[n], ctx, 0, &session_data[n]);
		}

		/* the default xio supplied main loop */
		xio_ev_loop_run(loop);
	}

	if(clock_gettime(CLOCK_MONOTONIC, &end)) {
			fprintf(stderr, "clock_gettime() failed, errno = %d\n", errno);
	}

	/* normal exit phase */
	fprintf(stdout, "exit signaled\n");

	/* free the context */
	xio_ctx_close(ctx);

	/* destroy the default loop */
	xio_ev_loop_destroy(&loop);

	sec = malloc(sizeof(double));
	*sec = (double)(((end.tv_sec * 1000000000 + end.tv_nsec) - (start.tv_sec * 1000000000 - start.tv_nsec))/NUM_ITER)/1000000000;
	fprintf(stdout, "THREAD [ %lu ] It took %lf sec for %d sessions\n",
				tdata->thread_id, *sec, tdata->num_sessions);

	free(session_data);
	free(sessions);
	return ((void *)sec);
}