/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 *
 * The caller's error handling must be sure to not leak the endpoint
 * if this function fails.
 */
int
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	dprintk("RPC:       %s: entering, connected is %d\n",
		__func__, ep->rep_connected);

	if (ia->ri_id->qp) {
		rc = rpcrdma_ep_disconnect(ep, ia);
		if (rc)
			dprintk("RPC:       %s: rpcrdma_ep_disconnect"
				" returned %i\n", __func__, rc);
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
	}

	/* padding - could be done in rpcrdma_buffer_destroy... */
	if (ep->rep_pad_mr) {
		rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
		ep->rep_pad_mr = NULL;
	}

	rpcrdma_clean_cq(ep->rep_cq);
	rc = ib_destroy_cq(ep->rep_cq);
	if (rc)
		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
			__func__, rc);

	return rc;
}
Exemple #2
0
/**
 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
 * -1 on failure
 */
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	/* qp is created only once both addr & route are resolved */
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	/* if cma handler context, the caller acts s.t the cma destroy the id */
	if (ib_conn->cma_id != NULL && can_destroy_id)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	return 0;
}
Exemple #3
0
/**
 * Cleanup a conn_t struct.
 *
 * @param[in] conn
 */
void conn_fini(void *arg)
{
    conn_t *conn = arg;

#if WITH_TRANSPORT_IB
    if (conn->transport.type == CONN_TYPE_RDMA) {
        if (conn->rdma.cm_id) {
            if (conn->rdma.cm_id->qp)
                rdma_destroy_qp(conn->rdma.cm_id);
            rdma_destroy_id(conn->rdma.cm_id);
            conn->rdma.cm_id = NULL;
        }
    }
#endif

#if WITH_TRANSPORT_UDP && WITH_RUDP
    if (conn->transport.type == CONN_TYPE_UDP) {
        atomic_set(&conn->udp.send_seq_num, 0);
        atomic_set(&conn->udp.recv_seq_num, 0);
    }
#endif

    pthread_mutex_destroy(&conn->mutex);
#if WITH_TRANSPORT_IB || WITH_TRANSPORT_UDP
    pthread_cond_destroy(&conn->move_wait);
#endif
}
Exemple #4
0
void IBConnection::on_rejected(struct rdma_cm_event* /* event */)
{
    L_(debug) << "[" << index_ << "] "
              << "connection rejected";

    rdma_destroy_qp(cm_id_);
}
Exemple #5
0
static void destroy_node(struct cmatest_node *node)
{
	if (!node->cma_id)
		return;

	if (node->cma_id->qp)
		rdma_destroy_qp(node->cma_id);

	if (node->cq[SEND_CQ_INDEX])
		ibv_destroy_cq(node->cq[SEND_CQ_INDEX]);

	if (node->cq[RECV_CQ_INDEX])
		ibv_destroy_cq(node->cq[RECV_CQ_INDEX]);

	if (node->mem) {
		ibv_dereg_mr(node->mr);
		free(node->mem);
	}

	if (node->pd)
		ibv_dealloc_pd(node->pd);

	/* Destroy the RDMA ID after all device resources */
	rdma_destroy_id(node->cma_id);
}
Exemple #6
0
static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	if (ib_conn->cma_id != NULL)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	return 0;
}
Exemple #7
0
void on_disconnect(struct rdma_cm_id *id)
{
  struct timeval start, end, dt;
  gettimeofday(&start, NULL);
  struct connection *conn = (struct connection *)id->context;

  printf("disconnected.\n");

  rdma_destroy_qp(id);

  ibv_dereg_mr(conn->send_region_mr);
  ibv_dereg_mr(conn->recv_region_mr);

  ibv_dereg_mr(conn->send_msg_mr);
  ibv_dereg_mr(conn->recv_msg_mr);

  free(conn->send_region);
  free(conn->recv_region);

  free(conn->recv_msg);
  free(conn->send_msg);

  rdma_destroy_id(id);

  free(conn);
  gettimeofday(&end, NULL);
  timersub(&end, &start, &dt);
  long usec = dt.tv_usec + 1000000 * dt.tv_sec;
  printf("[Derigester] takes %ld micro_secs.\n", usec);


  return; /* exit event loop */
}
Exemple #8
0
/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 */
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	dprintk("RPC:       %s: entering, connected is %d\n",
		__func__, ep->rep_connected);

	cancel_delayed_work_sync(&ep->rep_connect_worker);

	if (ia->ri_id->qp) {
		rpcrdma_ep_disconnect(ep, ia);
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
	}

	ib_free_cq(ep->rep_attr.recv_cq);
	ib_free_cq(ep->rep_attr.send_cq);

	if (ia->ri_dma_mr) {
		rc = ib_dereg_mr(ia->ri_dma_mr);
		dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
			__func__, rc);
	}
}
Exemple #9
0
void destroy_connection(rdma_conn_t *conn)
{
    rdma_destroy_qp(conn->id);

    ibv_dereg_mr(conn->send_mr);
    ibv_dereg_mr(conn->recv_mr);
    ibv_dereg_mr(conn->data_mr);
    ibv_dereg_mr(conn->addr_mr);

    if (conn->send_msg) {
        free(conn->send_msg);
        conn->send_msg = NULL;
    }
    if (conn->recv_msg) {
        free(conn->recv_msg);
        conn->recv_msg = NULL;
    }

    rdma_destroy_id(conn->id);

    if (conn) {
        free(conn);
        conn = NULL;
    }
}
Exemple #10
0
void IBConnection::on_timewait_exit(struct rdma_cm_event* /* event */)
{
    L_(debug) << "[" << index_ << "] "
              << "connection reached timewait_exit";

    rdma_destroy_qp(cm_id_);
}
Exemple #11
0
static void process_connect_reject(struct rdma_cm_event *event, conn_t *conn)
{
    pthread_mutex_lock(&conn->mutex);

    if (event->status == 28) {
        /* 28 = Consumer Reject. The remote side called rdma_reject,
         * so there is a payload. */
        const struct cm_priv_reject *rej = event->param.conn.private_data;

        if (rej->reason == REJECT_REASON_CONNECTED ||
            rej->reason == REJECT_REASON_CONNECTING) {
            /* Both sides tried to connect at the same time. This is
             * good. */
            pthread_mutex_unlock(&conn->mutex);
            return;
        }
    }

    /* That's bad, and that should not happen. */
    conn->state = CONN_STATE_DISCONNECTED;
    pthread_cond_broadcast(&conn->move_wait);

    rdma_destroy_qp(conn->rdma.cm_id);

    pthread_mutex_unlock(&conn->mutex);

    conn_put(conn);
}
Exemple #12
0
static ssize_t
fi_ibv_rdm_process_addr_resolved(struct rdma_cm_id *id,
				 struct fi_ibv_rdm_ep *ep)
{
	ssize_t ret = FI_SUCCESS;
	struct ibv_qp_init_attr qp_attr;
	struct fi_ibv_rdm_tagged_conn *conn = id->context;

	VERBS_INFO(FI_LOG_AV, "ADDR_RESOLVED conn %p, addr %s:%u\n",
		   conn, inet_ntoa(conn->addr.sin_addr),
		   ntohs(conn->addr.sin_port));

	assert(id->verbs == ep->domain->verbs);

	do {
		fi_ibv_rdm_tagged_init_qp_attributes(&qp_attr, ep);
		if (rdma_create_qp(id, ep->domain->pd, &qp_attr)) {
			VERBS_INFO_ERRNO(FI_LOG_AV,
					 "rdma_create_qp failed\n", errno);
			return -errno;
		}

		if (conn->cm_role == FI_VERBS_CM_PASSIVE) {
			break;
		}

		conn->qp[0] = id->qp;
		assert(conn->id[0] == id);
		if (conn->cm_role == FI_VERBS_CM_SELF) {
			break;
		}

		ret = fi_ibv_rdm_prepare_conn_memory(ep, conn);
		if (ret != FI_SUCCESS) {
			goto err;
		}

		ret = fi_ibv_rdm_repost_receives(conn, ep, ep->rq_wr_depth);
		if (ret < 0) {
			VERBS_INFO(FI_LOG_AV, "repost receives failed\n");
			goto err;
		} else {
			ret = FI_SUCCESS;
		}
	} while (0);

	if (rdma_resolve_route(id, FI_IBV_RDM_CM_RESOLVEADDR_TIMEOUT)) {
		VERBS_INFO(FI_LOG_AV, "rdma_resolve_route failed\n");
		ret = -FI_EHOSTUNREACH;
		goto err;
	}

	return ret;
err:
	rdma_destroy_qp(id);
	return ret;
}
Exemple #13
0
void xfer_rdma_destroy_ctx(struct xfer_context *ctx) {
	rdma_destroy_qp(ctx->cm_id);
	ibv_destroy_cq(ctx->cq);
	ibv_destroy_comp_channel(ctx->ch);
	ibv_dereg_mr(ctx->send_mr);
	ibv_dereg_mr(ctx->recv_mr);
	ibv_dealloc_pd(ctx->pd);
	free(ctx);
}
void network_release() {
    ibv_dereg_mr(mr_data);
    
    rdma_destroy_qp(cm_id);
    ibv_destroy_cq(cq);
    ibv_destroy_comp_channel(comp_chan);
    rdma_destroy_id(cm_id);
    rdma_destroy_event_channel(cm_channel);
}
Exemple #15
0
/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	struct rdma_cm_id *id, *old;
	int rc = 0;
	int retry_count = 0;

	if (ep->rep_connected != 0) {
		struct rpcrdma_xprt *xprt;
retry:
		dprintk("RPC:       %s: reconnecting...\n", __func__);

		rpcrdma_ep_disconnect(ep, ia);
		rpcrdma_flush_cqs(ep);

		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
		ia->ri_ops->ro_reset(xprt);

		id = rpcrdma_create_id(xprt, ia,
				(struct sockaddr *)&xprt->rx_data.addr);
		if (IS_ERR(id)) {
			rc = -EHOSTUNREACH;
			goto out;
		}
		/* TEMP TEMP TEMP - fail if new device:
		 * Deregister/remarshal *all* requests!
		 * Close and recreate adapter, pd, etc!
		 * Re-determine all attributes still sane!
		 * More stuff I haven't thought of!
		 * Rrrgh!
		 */
		if (ia->ri_id->device != id->device) {
			printk("RPC:       %s: can't reconnect on "
				"different device!\n", __func__);
			rdma_destroy_id(id);
			rc = -ENETUNREACH;
			goto out;
		}
		/* END TEMP */
		rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
			rdma_destroy_id(id);
			rc = -ENETUNREACH;
			goto out;
		}

		write_lock(&ia->ri_qplock);
		old = ia->ri_id;
		ia->ri_id = id;
		write_unlock(&ia->ri_qplock);

		rdma_destroy_qp(old);
		rdma_destroy_id(old);
	} else {
Exemple #16
0
/**
 * @param[in] ni
 * @param[in] conn
 * @param[in] event
 *
 * @return status
 *
 * conn is locked
 */
static int accept_connection_request(ni_t *ni, conn_t *conn,
                                     struct rdma_cm_event *event)
{
    struct rdma_conn_param conn_param;
    struct ibv_qp_init_attr init_attr;
    struct cm_priv_accept priv;

    conn->state = CONN_STATE_CONNECTING;

    memset(&init_attr, 0, sizeof(init_attr));

    init_attr.qp_type = IBV_QPT_RC;
    init_attr.cap.max_send_wr = ni->iface->cap.max_send_wr;
    init_attr.send_cq = ni->rdma.cq;
    init_attr.recv_cq = ni->rdma.cq;
    init_attr.srq = ni->rdma.srq;
    init_attr.cap.max_send_sge = ni->iface->cap.max_send_sge;

    if (rdma_create_qp(event->id, ni->iface->pd, &init_attr)) {
        conn->state = CONN_STATE_DISCONNECTED;
        pthread_cond_broadcast(&conn->move_wait);

        return PTL_FAIL;
    }

    /* If we were already trying to connect ourselves, cancel it. */
    if (conn->rdma.cm_id != NULL) {
        assert(conn->rdma.cm_id->context == conn);
        conn->rdma.cm_id->context = NULL;
    }

    event->id->context = conn;
    conn->rdma.cm_id = event->id;

    memset(&conn_param, 0, sizeof conn_param);
    conn_param.responder_resources = 1;
    conn_param.initiator_depth = 1;
    conn_param.retry_count = 7;
    conn_param.rnr_retry_count = 7;

    if (ni->options & PTL_NI_LOGICAL) {
        conn_param.private_data = &priv;
        conn_param.private_data_len = sizeof(priv);
    }

    if (rdma_accept(event->id, &conn_param)) {
        rdma_destroy_qp(event->id);
        conn->rdma.cm_id = NULL;
        conn->state = CONN_STATE_DISCONNECTED;
        pthread_cond_broadcast(&conn->move_wait);

        return PTL_FAIL;
    }

    return PTL_OK;
}
Exemple #17
0
void CompletionContext::removeConnection(struct rdma_cm_id* id) {
    if (id->qp == nullptr) {
        // Queue Pair already destroyed
        return;
    }

    LOG_TRACE("%1%: Destroying queue pair", formatRemoteAddress(id));
    mSocketMap.erase(id->qp->qp_num);
    rdma_destroy_qp(id);
}
static ssize_t
fi_ibv_rdm_process_event_rejected(struct fi_ibv_rdm_ep *ep,
				  struct rdma_cm_event *event)
{
	struct fi_ibv_rdm_conn *conn = event->id->context;
	ssize_t ret = FI_SUCCESS;
	const int *pdata = event->param.conn.private_data;

	if ((pdata && *pdata == 0xdeadbeef) ||
	    /* 
	     * TODO: this is a workaround of the case when private_data is not
	     * arriving from rdma_reject call on iWarp devices
	     */
	    (conn->cm_role == FI_VERBS_CM_PASSIVE &&
	     event->status == -ECONNREFUSED))
	{
		errno = 0;
		rdma_destroy_qp(event->id);
		if (errno) {
			VERBS_INFO_ERRNO(FI_LOG_AV, "rdma_destroy_qp failed\n",
					 errno);
			ret = -errno;
		}
		if (rdma_destroy_id(event->id)) {
			VERBS_INFO_ERRNO(FI_LOG_AV, "rdma_destroy_id failed\n",
					 errno);
			if (ret == FI_SUCCESS)
				ret = -errno;
		}
		VERBS_INFO(FI_LOG_AV,
			"Rejected from conn %p, addr %s:%u, cm_role %d, status %d\n",
			conn, inet_ntoa(conn->addr.sin_addr),
			ntohs(conn->addr.sin_port),
			conn->cm_role,
			event->status);
	} else {
		VERBS_INFO(FI_LOG_AV,
			"Unexpected REJECT from conn %p, addr %s:%u, cm_role %d, "
			"msg len %d, msg %x, status %d, err %d\n",
			conn, inet_ntoa(conn->addr.sin_addr),
			ntohs(conn->addr.sin_port),
			conn->cm_role,
			event->param.conn.private_data_len,
			event->param.conn.private_data ?
			*(int *)event->param.conn.private_data : 0,
			event->status, errno);
		conn->state = FI_VERBS_CONN_REJECTED;

	}
	return ret;
}
Exemple #19
0
/*
 * Clean up/close an IA.
 *   o if event handles and PD have been initialized, free them.
 *   o close the IA
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
	dprintk("RPC:       %s: entering\n", __func__);
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
		rpcrdma_destroy_id(ia->ri_id);
		ia->ri_id = NULL;
	}

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
		ib_dealloc_pd(ia->ri_pd);
}
Exemple #20
0
static int ibw_conn_priv_destruct(struct ibw_conn_priv *pconn)
{
	DEBUG(DEBUG_DEBUG, ("ibw_conn_priv_destruct(%p, cmid: %p)\n",
		pconn, pconn->cm_id));

	/* pconn->wr_index is freed by talloc */
	/* pconn->wr_index[i] are freed by talloc */

	/*
	 * tevent_fd must be removed before the fd is closed
	 */
	TALLOC_FREE(pconn->verbs_channel_event);

	/* destroy verbs */
	if (pconn->cm_id!=NULL && pconn->cm_id->qp!=NULL) {
		rdma_destroy_qp(pconn->cm_id);
		pconn->cm_id->qp = NULL;
	}

	if (pconn->cq!=NULL) {
		ibv_destroy_cq(pconn->cq);
		pconn->cq = NULL;
	}

	if (pconn->verbs_channel!=NULL) {
		ibv_destroy_comp_channel(pconn->verbs_channel);
		pconn->verbs_channel = NULL;
	}

	/* free memory regions */
	ibw_free_mr(&pconn->buf_send, &pconn->mr_send);
	ibw_free_mr(&pconn->buf_recv, &pconn->mr_recv);

	if (pconn->pd) {
		ibv_dealloc_pd(pconn->pd);
		pconn->pd = NULL;
		DEBUG(DEBUG_DEBUG, ("pconn=%p pd deallocated\n", pconn));
	}

	if (pconn->cm_id) {
		rdma_destroy_id(pconn->cm_id);
		pconn->cm_id = NULL;
		DEBUG(DEBUG_DEBUG, ("pconn=%p cm_id destroyed\n", pconn));
	}

	return 0;
}
Exemple #21
0
/**
 * Accept an RC connection request to self.
 *
 * called while holding connect->mutex
 * only used for physical NIs
 *
 * @param[in] ni
 * @param[in] conn
 * @param[in] event
 *
 * @return status
 */
static int accept_connection_self(ni_t *ni, conn_t *conn,
                                  struct rdma_cm_event *event)
{
    struct rdma_conn_param conn_param;
    struct ibv_qp_init_attr init_attr;

    conn->state = CONN_STATE_CONNECTING;

    memset(&init_attr, 0, sizeof(init_attr));
    init_attr.qp_type = IBV_QPT_RC;
    init_attr.send_cq = ni->rdma.cq;
    init_attr.recv_cq = ni->rdma.cq;
    init_attr.srq = ni->rdma.srq;
    init_attr.cap.max_send_wr = ni->iface->cap.max_send_wr;
    init_attr.cap.max_send_sge = ni->iface->cap.max_send_sge;

    if (rdma_create_qp(event->id, ni->iface->pd, &init_attr)) {
        conn->state = CONN_STATE_DISCONNECTED;
        pthread_cond_broadcast(&conn->move_wait);

        return PTL_FAIL;
    }

    ni->rdma.self_cm_id = event->id;

    /* The lower 2 bits (on 32 bits hosts), or 3 bits (on 64 bits
     * hosts) of a pointer is always 0. Use it to store the type of
     * context. 0=conn; 1=NI. */
    event->id->context = (void *)((uintptr_t) ni | 1);

    memset(&conn_param, 0, sizeof conn_param);
    conn_param.responder_resources = 1;
    conn_param.initiator_depth = 1;
    conn_param.rnr_retry_count = 7;

    if (rdma_accept(event->id, &conn_param)) {
        rdma_destroy_qp(event->id);
        conn->state = CONN_STATE_DISCONNECTED;
        pthread_cond_broadcast(&conn->move_wait);

        return PTL_FAIL;
    }

    return PTL_OK;
}
Exemple #22
0
void destroy_connection(void *context)
{
  struct connection *conn = (struct connection *)context;

  rdma_destroy_qp(conn->id);

  ibv_dereg_mr(conn->send_mr);
  ibv_dereg_mr(conn->recv_mr);
  ibv_dereg_mr(conn->rdma_msg_mr);

  free(conn->send_msg);
  free(conn->recv_msg);
  free(conn->rdma_msg_region);

  rdma_destroy_id(conn->id);

  free(conn);
}
Exemple #23
0
static ssize_t
fi_ibv_rdm_process_event_rejected(struct fi_ibv_rdm_ep *ep,
				  struct rdma_cm_event *event)
{
	struct fi_ibv_rdm_tagged_conn *conn = event->id->context;
	ssize_t ret = FI_SUCCESS;

	if (NULL != event->param.conn.private_data &&
	    *((int *)event->param.conn.private_data) == 0xdeadbeef ) {
		assert(conn->cm_role == FI_VERBS_CM_PASSIVE);
		errno = 0;
		rdma_destroy_qp(event->id);
		if (errno) {
			VERBS_INFO_ERRNO(FI_LOG_AV, "rdma_destroy_qp failed\n",
					 errno);
			ret = -errno;
		}
		if (rdma_destroy_id(event->id)) {
			VERBS_INFO_ERRNO(FI_LOG_AV, "rdma_destroy_id failed\n",
					 errno);
			if (ret == FI_SUCCESS)
				ret = -errno;
		}
		VERBS_INFO(FI_LOG_AV,
			"Rejected from conn %p, addr %s:%u, cm_role %d, status %d\n",
			conn, inet_ntoa(conn->addr.sin_addr),
			ntohs(conn->addr.sin_port),
			conn->cm_role,
			event->status);
	} else {
		VERBS_INFO(FI_LOG_AV,
			"Unexpected REJECT from conn %p, addr %s:%u, cm_role %d, msg len %d, msg %x, status %d\n",
			conn, inet_ntoa(conn->addr.sin_addr),
			ntohs(conn->addr.sin_port),
			conn->cm_role,
			event->param.conn.private_data_len,
			event->param.conn.private_data ?
			*(int *)event->param.conn.private_data : 0,
			event->status);
		conn->state = FI_VERBS_CONN_REJECTED;

	}
	return ret;
}
Exemple #24
0
void
destroy_connection(IbvConnection *conn)
{
    rdma_destroy_qp(conn->id);

    ibv_dereg_mr(conn->send_mr);
    ibv_dereg_mr(conn->recv_mr);
    ibv_dereg_mr(conn->rdma_local_mr);
    ibv_dereg_mr(conn->rdma_remote_mr);

    free(conn->send_msg);
    free(conn->recv_msg);
    free(conn->rdma_local_region);
    free(conn->rdma_remote_region);

    rdma_destroy_id(conn->id);

    free(conn);
}
int on_disconnect(struct rdma_cm_id *id)
{
  struct connection *conn = (struct connection *)id->context;

  printf("disconnected.\n");

  rdma_destroy_qp(id);

  ibv_dereg_mr(conn->send_mr);
  ibv_dereg_mr(conn->recv_mr);

  free(conn->send_region);
  free(conn->recv_region);

  free(conn);

  rdma_destroy_id(id);

  return 1; /* exit event loop */
}
Exemple #26
0
/*
 * Clean up/close an IA.
 *   o if event handles and PD have been initialized, free them.
 *   o close the IA
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
    int rc;

    dprintk("RPC:       %s: entering\n", __func__);
    if (ia->ri_bind_mem != NULL) {
        rc = ib_dereg_mr(ia->ri_bind_mem);
        dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
            __func__, rc);
    }
    if (ia->ri_id != NULL && !IS_ERR(ia->ri_id) && ia->ri_id->qp)
        rdma_destroy_qp(ia->ri_id);
    if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
        rc = ib_dealloc_pd(ia->ri_pd);
        dprintk("RPC:       %s: ib_dealloc_pd returned %i\n",
            __func__, rc);
    }
    if (ia->ri_id != NULL && !IS_ERR(ia->ri_id))
        rdma_destroy_id(ia->ri_id);
}
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	
	if (ib_conn->cma_id != NULL && can_destroy_id)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	if (ib_conn->login_buf) {
		if (ib_conn->login_req_dma)
			ib_dma_unmap_single(ib_conn->device->ib_device,
				ib_conn->login_req_dma,
				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
		if (ib_conn->login_resp_dma)
			ib_dma_unmap_single(ib_conn->device->ib_device,
				ib_conn->login_resp_dma,
				ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
		kfree(ib_conn->login_buf);
	}

	return 0;
}
Exemple #28
0
/**
 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
 * -1 on failure
 */
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
	BUG_ON(ib_conn == NULL);

	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
		 ib_conn, ib_conn->cma_id,
		 ib_conn->fmr_pool, ib_conn->qp);

	/* qp is created only once both addr & route are resolved */
	if (ib_conn->fmr_pool != NULL)
		ib_destroy_fmr_pool(ib_conn->fmr_pool);

	if (ib_conn->qp != NULL)
		rdma_destroy_qp(ib_conn->cma_id);

	/* if cma handler context, the caller acts s.t the cma destroy the id */
	if (ib_conn->cma_id != NULL && can_destroy_id)
		rdma_destroy_id(ib_conn->cma_id);

	ib_conn->fmr_pool = NULL;
	ib_conn->qp	  = NULL;
	ib_conn->cma_id   = NULL;
	kfree(ib_conn->page_vec);

	if (ib_conn->login_req_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_req_dma,
				    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
	if (ib_conn->login_resp_dma)
		ib_dma_unmap_single(ib_conn->device->ib_device,
				    ib_conn->login_resp_dma,
				    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
	kfree(ib_conn->login_buf);
	ib_conn->login_buf = NULL;
	ib_conn->login_req_dma = ib_conn->login_resp_dma = 0;

	return 0;
}
/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	struct rdma_cm_id *id;
	int rc = 0;
	int retry_count = 0;

	if (ep->rep_connected != 0) {
		struct rpcrdma_xprt *xprt;
retry:
		rc = rpcrdma_ep_disconnect(ep, ia);
		if (rc && rc != -ENOTCONN)
			dprintk("RPC:       %s: rpcrdma_ep_disconnect"
				" status %i\n", __func__, rc);
		rpcrdma_clean_cq(ep->rep_cq);

		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
		id = rpcrdma_create_id(xprt, ia,
				(struct sockaddr *)&xprt->rx_data.addr);
		if (IS_ERR(id)) {
			rc = PTR_ERR(id);
			goto out;
		}
		/* TEMP TEMP TEMP - fail if new device:
		 * Deregister/remarshal *all* requests!
		 * Close and recreate adapter, pd, etc!
		 * Re-determine all attributes still sane!
		 * More stuff I haven't thought of!
		 * Rrrgh!
		 */
		if (ia->ri_id->device != id->device) {
			printk("RPC:       %s: can't reconnect on "
				"different device!\n", __func__);
			rdma_destroy_id(id);
			rc = -ENETDOWN;
			goto out;
		}
		/* END TEMP */
		rdma_destroy_qp(ia->ri_id);
		rdma_destroy_id(ia->ri_id);
		ia->ri_id = id;
	}

	rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
	if (rc) {
		dprintk("RPC:       %s: rdma_create_qp failed %i\n",
			__func__, rc);
		goto out;
	}

/* XXX Tavor device performs badly with 2K MTU! */
if (strnicmp(ia->ri_id->device->dma_device->bus->name, "pci", 3) == 0) {
	struct pci_dev *pcid = to_pci_dev(ia->ri_id->device->dma_device);
	if (pcid->device == PCI_DEVICE_ID_MELLANOX_TAVOR &&
	    (pcid->vendor == PCI_VENDOR_ID_MELLANOX ||
	     pcid->vendor == PCI_VENDOR_ID_TOPSPIN)) {
		struct ib_qp_attr attr = {
			.path_mtu = IB_MTU_1024
		};
		rc = ib_modify_qp(ia->ri_id->qp, &attr, IB_QP_PATH_MTU);
	}
}

	ep->rep_connected = 0;

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
	if (rc) {
		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
				__func__, rc);
		goto out;
	}

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);

	/*
	 * Check state. A non-peer reject indicates no listener
	 * (ECONNREFUSED), which may be a transient state. All
	 * others indicate a transport condition which has already
	 * undergone a best-effort.
	 */
	if (ep->rep_connected == -ECONNREFUSED &&
	    ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
		dprintk("RPC:       %s: non-peer_reject, retry\n", __func__);
		goto retry;
	}
	if (ep->rep_connected <= 0) {
		/* Sometimes, the only way to reliably connect to remote
		 * CMs is to use same nonzero values for ORD and IRD. */
		if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
		    (ep->rep_remote_cma.responder_resources == 0 ||
		     ep->rep_remote_cma.initiator_depth !=
				ep->rep_remote_cma.responder_resources)) {
			if (ep->rep_remote_cma.responder_resources == 0)
				ep->rep_remote_cma.responder_resources = 1;
			ep->rep_remote_cma.initiator_depth =
				ep->rep_remote_cma.responder_resources;
			goto retry;
		}
		rc = ep->rep_connected;
	} else {
		dprintk("RPC:       %s: connected\n", __func__);
	}

out:
	if (rc)
		ep->rep_connected = rc;
	return rc;
}
Exemple #30
0
static void isert_conn_qp_destroy(struct isert_connection *isert_conn)
{
	rdma_destroy_qp(isert_conn->cm_id);
	isert_conn->qp = NULL;
}