/*---------------------------------------------------------------------------*/ void xio_context_destroy(struct xio_context *ctx) { int i; xio_observable_notify_all_observers(&ctx->observable, XIO_CONTEXT_EVENT_CLOSE, NULL); xio_observable_unreg_all_observers(&ctx->observable); for (i = 0; i < XIO_STAT_LAST; i++) if (ctx->stats.name[i]) kfree(ctx->stats.name[i]); xio_workqueue_destroy(ctx->workqueue); /* can free only xio created loop */ if (ctx->flags != XIO_LOOP_USER_LOOP) xio_ev_loop_destroy(ctx->ev_loop); ctx->ev_loop = NULL; if (ctx->ctx_dentry) { debugfs_remove_recursive(ctx->ctx_dentry); ctx->ctx_dentry = NULL; } kfree(ctx); }
/*---------------------------------------------------------------------------*/ void xio_destroy_context_continue(struct work_struct *work) { xio_work_handle_t *xio_work; struct xio_context *ctx; int i; xio_work = container_of(work, xio_work_handle_t, work); ctx = container_of(xio_work, struct xio_context, destroy_ctx_work); if (ctx->run_private) ERROR_LOG("not all observers finished! run_private=%d\n", ctx->run_private); xio_observable_notify_all_observers(&ctx->observable, XIO_CONTEXT_EVENT_POST_CLOSE, NULL); if (!xio_observable_is_empty(&ctx->observable)) ERROR_LOG("context destroy: observers leak - %p\n", ctx); xio_observable_unreg_all_observers(&ctx->observable); for (i = 0; i < XIO_STAT_LAST; i++) kfree(ctx->stats.name[i]); xio_workqueue_destroy(ctx->workqueue); xio_objpool_destroy(ctx->msg_pool); /* can free only xio created loop */ if (ctx->flags != XIO_LOOP_USER_LOOP) xio_ev_loop_destroy(ctx->ev_loop); ctx->ev_loop = NULL; XIO_OBSERVABLE_DESTROY(&ctx->observable); xio_ctx_task_pools_destroy(ctx); if (ctx->mempool) { xio_mempool_destroy(ctx->mempool); ctx->mempool = NULL; } kfree(ctx); }
/*---------------------------------------------------------------------------*/ struct xio_context *xio_context_create(struct xio_context_params *ctx_params, int polling_timeout, int cpu_hint) { struct xio_context *ctx; struct xio_loop_ops *loop_ops; struct task_struct *worker; struct xio_transport *transport; int flags, cpu; if (!ctx_params) { xio_set_error(EINVAL); ERROR_LOG("ctx_params is NULL\n"); goto cleanup0; } loop_ops = ctx_params->loop_ops; worker = ctx_params->worker; flags = ctx_params->flags; if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) { xio_set_error(EINVAL); ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n", cpu_hint, num_online_cpus()); goto cleanup0; } if ((flags == XIO_LOOP_USER_LOOP) && (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) { xio_set_error(EINVAL); ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are " \ "mandatory with loop_ops\n"); goto cleanup0; } xio_read_logging_level(); /* no need to disable preemption */ cpu = raw_smp_processor_id(); if (cpu == -1) goto cleanup0; /* allocate new context */ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { xio_set_error(ENOMEM); ERROR_LOG("kzalloc failed\n"); goto cleanup0; } if (cpu_hint < 0) cpu_hint = cpu; ctx->run_private = 0; ctx->user_context = ctx_params->user_context; ctx->flags = flags; ctx->cpuid = cpu_hint; ctx->nodeid = cpu_to_node(cpu_hint); ctx->polling_timeout = polling_timeout; ctx->prealloc_xio_inline_bufs = !!ctx_params->prealloc_xio_inline_bufs; ctx->rq_depth = ctx_params->rq_depth; if (!ctx_params->max_conns_per_ctx) ctx->max_conns_per_ctx = 100; else ctx->max_conns_per_ctx = max(ctx_params->max_conns_per_ctx , 2); ctx->workqueue = xio_workqueue_create(ctx); if (!ctx->workqueue) { xio_set_error(ENOMEM); ERROR_LOG("xio_workqueue_init failed.\n"); goto cleanup1; } ctx->msg_pool = xio_objpool_create(sizeof(struct xio_msg), MSGPOOL_INIT_NR, MSGPOOL_GROW_NR); if (!ctx->msg_pool) { xio_set_error(ENOMEM); ERROR_LOG("context's msg_pool create failed. %m\n"); goto cleanup2; } XIO_OBSERVABLE_INIT(&ctx->observable, ctx); INIT_LIST_HEAD(&ctx->ctx_list); switch (flags) { case XIO_LOOP_USER_LOOP: break; case XIO_LOOP_GIVEN_THREAD: set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint)); ctx->worker = (uint64_t)worker; break; case XIO_LOOP_TASKLET: break; case XIO_LOOP_WORKQUEUE: break; default: ERROR_LOG("wrong type. %u\n", flags); goto cleanup3; } ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops); if (!ctx->ev_loop) goto cleanup3; ctx->stats.hertz = HZ; /* Initialize default counters' name */ ctx->stats.name[XIO_STAT_TX_MSG] = kstrdup("TX_MSG", GFP_KERNEL); ctx->stats.name[XIO_STAT_RX_MSG] = kstrdup("RX_MSG", GFP_KERNEL); ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL); ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL); ctx->stats.name[XIO_STAT_DELAY] = kstrdup("DELAY", GFP_KERNEL); ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL); /* initialize rdma pools only */ transport = xio_get_transport("rdma"); if (transport && ctx->prealloc_xio_inline_bufs) { int retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA, XIO_CONTEXT_POOL_CLASS_INITIAL); if (retval) { ERROR_LOG("Failed to create initial pool. ctx:%p\n", ctx); goto cleanup2; } retval = xio_ctx_pool_create(ctx, XIO_PROTO_RDMA, XIO_CONTEXT_POOL_CLASS_PRIMARY); if (retval) { ERROR_LOG("Failed to create primary pool. ctx:%p\n", ctx); goto cleanup2; } } spin_lock_init(&ctx->ctx_list_lock); xio_idr_add_uobj(usr_idr, ctx, "xio_context"); return ctx; cleanup3: xio_objpool_destroy(ctx->msg_pool); cleanup2: xio_workqueue_destroy(ctx->workqueue); cleanup1: kfree(ctx); cleanup0: ERROR_LOG("xio_ctx_open failed\n"); return NULL; }
/*---------------------------------------------------------------------------*/ struct xio_context *xio_context_create(unsigned int flags, struct xio_loop_ops *loop_ops, struct task_struct *worker, int polling_timeout, int cpu_hint) { struct xio_context *ctx; struct dentry *xio_root; char name[32]; int cpu; if (cpu_hint > 0 && cpu_hint >= num_online_cpus()) { xio_set_error(EINVAL); ERROR_LOG("cpu_hint(%d) >= num_online_cpus(%d)\n", cpu_hint, num_online_cpus()); goto cleanup0; } if ((flags == XIO_LOOP_USER_LOOP) && (!(loop_ops && loop_ops->add_event && loop_ops->ev_loop))) { xio_set_error(EINVAL); ERROR_LOG("loop_ops and ev_loop and ev_loop_add_event are mandatory with loop_ops\n"); goto cleanup0; } xio_read_logging_level(); /* no need to disable preemption */ cpu = raw_smp_processor_id(); if (cpu == -1) goto cleanup0; /* allocate new context */ ctx = kzalloc(sizeof(struct xio_context), GFP_KERNEL); if (ctx == NULL) { xio_set_error(ENOMEM); ERROR_LOG("kzalloc failed\n"); goto cleanup0; } if (cpu_hint < 0) cpu_hint = cpu; ctx->flags = flags; ctx->cpuid = cpu_hint; ctx->nodeid = cpu_to_node(cpu_hint); ctx->polling_timeout = polling_timeout; ctx->workqueue = xio_workqueue_create(ctx); if (!ctx->workqueue) { xio_set_error(ENOMEM); ERROR_LOG("xio_workqueue_init failed.\n"); goto cleanup1; } XIO_OBSERVABLE_INIT(&ctx->observable, ctx); INIT_LIST_HEAD(&ctx->ctx_list); switch (flags) { case XIO_LOOP_USER_LOOP: break; case XIO_LOOP_GIVEN_THREAD: set_cpus_allowed_ptr(worker, cpumask_of(cpu_hint)); ctx->worker = (uint64_t) worker; break; case XIO_LOOP_TASKLET: break; case XIO_LOOP_WORKQUEUE: break; default: ERROR_LOG("wrong type. %u\n", flags); goto cleanup2; } xio_root = xio_debugfs_root(); if (xio_root) { /* More then one contexts can share the core */ sprintf(name, "ctx-%d-%p", cpu_hint, worker); ctx->ctx_dentry = debugfs_create_dir(name, xio_root); if (!ctx->ctx_dentry) { ERROR_LOG("debugfs entry %s create failed\n", name); goto cleanup2; } } ctx->ev_loop = xio_ev_loop_init(flags, ctx, loop_ops); if (!ctx->ev_loop) goto cleanup3; ctx->stats.hertz = HZ; /* Initialize default counters' name */ ctx->stats.name[XIO_STAT_TX_MSG] = kstrdup("TX_MSG", GFP_KERNEL); ctx->stats.name[XIO_STAT_RX_MSG] = kstrdup("RX_MSG", GFP_KERNEL); ctx->stats.name[XIO_STAT_TX_BYTES] = kstrdup("TX_BYTES", GFP_KERNEL); ctx->stats.name[XIO_STAT_RX_BYTES] = kstrdup("RX_BYTES", GFP_KERNEL); ctx->stats.name[XIO_STAT_DELAY] = kstrdup("DELAY", GFP_KERNEL); ctx->stats.name[XIO_STAT_APPDELAY] = kstrdup("APPDELAY", GFP_KERNEL); return ctx; cleanup3: debugfs_remove_recursive(ctx->ctx_dentry); ctx->ctx_dentry = NULL; cleanup2: xio_workqueue_destroy(ctx->workqueue); cleanup1: kfree(ctx); cleanup0: ERROR_LOG("xio_ctx_open failed\n"); return NULL; }