static void mthca_free_context(struct ibv_context *ibctx) { struct mthca_context *context = to_mctx(ibctx); mthca_free_pd(context->pd); munmap(context->uar, to_mdev(ibctx->device)->page_size); mthca_free_db_tab(context->db_tab); free(context); }
static struct ibv_context *mthca_alloc_context(struct ibv_device *ibdev, int cmd_fd) { struct mthca_context *context; struct ibv_get_context cmd; struct mthca_alloc_ucontext_resp resp; int i; context = calloc(1, sizeof *context); if (!context) return NULL; context->ibv_ctx.cmd_fd = cmd_fd; if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd, &resp.ibv_resp, sizeof resp)) goto err_free; context->num_qps = resp.qp_tab_size; context->qp_table_shift = ffs(context->num_qps) - 1 - MTHCA_QP_TABLE_BITS; context->qp_table_mask = (1 << context->qp_table_shift) - 1; /* * Need to set ibv_ctx.device because mthca_is_memfree() will * look at it to figure out the HCA type. */ context->ibv_ctx.device = ibdev; if (mthca_is_memfree(&context->ibv_ctx)) { context->db_tab = mthca_alloc_db_tab(resp.uarc_size); if (!context->db_tab) goto err_free; } else context->db_tab = NULL; pthread_mutex_init(&context->qp_table_mutex, NULL); for (i = 0; i < MTHCA_QP_TABLE_SIZE; ++i) context->qp_table[i].refcnt = 0; context->uar = mmap(NULL, to_mdev(ibdev)->page_size, PROT_WRITE, MAP_SHARED, cmd_fd, 0); if (context->uar == MAP_FAILED) goto err_db_tab; pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE); context->pd = mthca_alloc_pd(&context->ibv_ctx); if (!context->pd) goto err_unmap; context->pd->context = &context->ibv_ctx; context->ibv_ctx.ops = mthca_ctx_ops; if (mthca_is_memfree(&context->ibv_ctx)) { context->ibv_ctx.ops.req_notify_cq = mthca_arbel_arm_cq; context->ibv_ctx.ops.cq_event = mthca_arbel_cq_event; context->ibv_ctx.ops.post_send = mthca_arbel_post_send; context->ibv_ctx.ops.post_recv = mthca_arbel_post_recv; context->ibv_ctx.ops.post_srq_recv = mthca_arbel_post_srq_recv; } else { context->ibv_ctx.ops.req_notify_cq = mthca_tavor_arm_cq; context->ibv_ctx.ops.cq_event = NULL; context->ibv_ctx.ops.post_send = mthca_tavor_post_send; context->ibv_ctx.ops.post_recv = mthca_tavor_post_recv; context->ibv_ctx.ops.post_srq_recv = mthca_tavor_post_srq_recv; } return &context->ibv_ctx; err_unmap: munmap(context->uar, to_mdev(ibdev)->page_size); err_db_tab: mthca_free_db_tab(context->db_tab); err_free: free(context); return NULL; }
static struct verbs_context *mthca_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { struct mthca_context *context; struct ibv_get_context cmd; struct umthca_alloc_ucontext_resp resp; int i; context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, RDMA_DRIVER_MTHCA); if (!context) return NULL; if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd, &resp.ibv_resp, sizeof resp)) goto err_free; context->num_qps = resp.qp_tab_size; context->qp_table_shift = ffs(context->num_qps) - 1 - MTHCA_QP_TABLE_BITS; context->qp_table_mask = (1 << context->qp_table_shift) - 1; if (mthca_is_memfree(&context->ibv_ctx.context)) { context->db_tab = mthca_alloc_db_tab(resp.uarc_size); if (!context->db_tab) goto err_free; } else context->db_tab = NULL; pthread_mutex_init(&context->qp_table_mutex, NULL); for (i = 0; i < MTHCA_QP_TABLE_SIZE; ++i) context->qp_table[i].refcnt = 0; context->uar = mmap(NULL, to_mdev(ibdev)->page_size, PROT_WRITE, MAP_SHARED, cmd_fd, 0); if (context->uar == MAP_FAILED) goto err_db_tab; pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE); context->pd = mthca_alloc_pd(&context->ibv_ctx.context); if (!context->pd) goto err_unmap; context->pd->context = &context->ibv_ctx.context; verbs_set_ops(&context->ibv_ctx, &mthca_ctx_common_ops); if (mthca_is_memfree(&context->ibv_ctx.context)) verbs_set_ops(&context->ibv_ctx, &mthca_ctx_arbel_ops); else verbs_set_ops(&context->ibv_ctx, &mthca_ctx_tavor_ops); return &context->ibv_ctx; err_unmap: munmap(context->uar, to_mdev(ibdev)->page_size); err_db_tab: mthca_free_db_tab(context->db_tab); err_free: verbs_uninit_context(&context->ibv_ctx); free(context); return NULL; }