Ejemplo n.º 1
0
Archivo: c2_qp.c Proyecto: 274914765/C
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
    struct c2_cq *send_cq;
    struct c2_cq *recv_cq;

    send_cq = to_c2cq(qp->ibqp.send_cq);
    recv_cq = to_c2cq(qp->ibqp.recv_cq);

    /*
     * Lock CQs here, so that CQ polling code can do QP lookup
     * without taking a lock.
     */
    c2_lock_cqs(send_cq, recv_cq);
    c2_free_qpn(c2dev, qp->qpn);
    c2_unlock_cqs(send_cq, recv_cq);

    /*
     * Destory qp in the rnic...
     */
    destroy_qp(c2dev, qp);

    /*
     * Mark any unreaped CQEs as null and void.
     */
    c2_cq_clean(c2dev, qp, send_cq->cqn);
    if (send_cq != recv_cq)
        c2_cq_clean(c2dev, qp, recv_cq->cqn);
    /*
     * Unmap the MQs and return the shared pointers
     * to the message pool.
     */
    iounmap(qp->sq_mq.peer);
    iounmap(qp->rq_mq.peer);
    c2_free_mqsp(qp->sq_mq.shared);
    c2_free_mqsp(qp->rq_mq.shared);

    atomic_dec(&qp->refcount);
    wait_event(qp->wait, !atomic_read(&qp->refcount));
}
Ejemplo n.º 2
0
int c2_init_cq(struct c2_dev *c2dev, int entries,
	       struct c2_ucontext *ctx, struct c2_cq *cq)
{
	struct c2wr_cq_create_req wr;
	struct c2wr_cq_create_rep *reply;
	unsigned long peer_pa;
	struct c2_vq_req *vq_req;
	int err;

	might_sleep();

	cq->ibcq.cqe = entries - 1;
	cq->is_kernel = !ctx;

	/* Allocate a shared pointer */
	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
				      &cq->mq.shared_dma, GFP_KERNEL);
	if (!cq->mq.shared)
		return -ENOMEM;

	/* Allocate pages for the message pool */
	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
	if (err)
		goto bail0;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail1;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
	wr.depth = cpu_to_be32(cq->mq.q_size);
	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
	wr.user_context = (u64) (unsigned long) (cq);

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail2;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail2;

	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail2;
	}

	if ((err = c2_errno(reply)) != 0)
		goto bail3;

	cq->adapter_handle = reply->cq_handle;
	cq->mq.index = be32_to_cpu(reply->mq_index);

	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
	if (!cq->mq.peer) {
		err = -ENOMEM;
		goto bail3;
	}

	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	spin_lock_init(&cq->lock);
	atomic_set(&cq->refcount, 1);
	init_waitqueue_head(&cq->wait);

	/*
	 * Use the MQ index allocated by the adapter to
	 * store the CQ in the qptr_array
	 */
	cq->cqn = cq->mq.index;
	c2dev->qptr_array[cq->cqn] = cq;

	return 0;

bail3:
	vq_repbuf_free(c2dev, reply);
bail2:
	vq_req_free(c2dev, vq_req);
bail1:
	c2_free_cq_buf(c2dev, &cq->mq);
bail0:
	c2_free_mqsp(cq->mq.shared);

	return err;
}
Ejemplo n.º 3
0
Archivo: c2_qp.c Proyecto: 274914765/C
int c2_alloc_qp(struct c2_dev *c2dev,
        struct c2_pd *pd,
        struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
    struct c2wr_qp_create_req wr;
    struct c2wr_qp_create_rep *reply;
    struct c2_vq_req *vq_req;
    struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
    struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
    unsigned long peer_pa;
    u32 q_size, msg_size, mmap_size;
    void __iomem *mmap;
    int err;

    err = c2_alloc_qpn(c2dev, qp);
    if (err)
        return err;
    qp->ibqp.qp_num = qp->qpn;
    qp->ibqp.qp_type = IB_QPT_RC;

    /* Allocate the SQ and RQ shared pointers */
    qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->sq_mq.shared_dma, GFP_KERNEL);
    if (!qp->sq_mq.shared) {
        err = -ENOMEM;
        goto bail0;
    }

    qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->rq_mq.shared_dma, GFP_KERNEL);
    if (!qp->rq_mq.shared) {
        err = -ENOMEM;
        goto bail1;
    }

    /* Allocate the verbs request */
    vq_req = vq_req_alloc(c2dev);
    if (vq_req == NULL) {
        err = -ENOMEM;
        goto bail2;
    }

    /* Initialize the work request */
    memset(&wr, 0, sizeof(wr));
    c2_wr_set_id(&wr, CCWR_QP_CREATE);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.sq_cq_handle = send_cq->adapter_handle;
    wr.rq_cq_handle = recv_cq->adapter_handle;
    wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
    wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
    wr.srq_handle = 0;
    wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
                   QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
    wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
    wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
    wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
    wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
    wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
    wr.pd_id = pd->pd_id;
    wr.user_context = (unsigned long) qp;

    vq_req_get(c2dev, vq_req);

    /* Send the WR to the adapter */
    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail3;
    }

    /* Wait for the verb reply  */
    err = vq_wait_for_reply(c2dev, vq_req);
    if (err) {
        goto bail3;
    }

    /* Process the reply */
    reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
    if (!reply) {
        err = -ENOMEM;
        goto bail3;
    }

    if ((err = c2_wr_get_result(reply)) != 0) {
        goto bail4;
    }

    /* Fill in the kernel QP struct */
    atomic_set(&qp->refcount, 1);
    qp->adapter_handle = reply->qp_handle;
    qp->state = IB_QPS_RESET;
    qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
    init_waitqueue_head(&qp->wait);

    /* Initialize the SQ MQ */
    q_size = be32_to_cpu(reply->sq_depth);
    msg_size = be32_to_cpu(reply->sq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail5;
    }

    c2_mq_req_init(&qp->sq_mq,
               be32_to_cpu(reply->sq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    /* Initialize the RQ mq */
    q_size = be32_to_cpu(reply->rq_depth);
    msg_size = be32_to_cpu(reply->rq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail6;
    }

    c2_mq_req_init(&qp->rq_mq,
               be32_to_cpu(reply->rq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    vq_repbuf_free(c2dev, reply);
    vq_req_free(c2dev, vq_req);

    return 0;

      bail6:
    iounmap(qp->sq_mq.peer);
      bail5:
    destroy_qp(c2dev, qp);
      bail4:
    vq_repbuf_free(c2dev, reply);
      bail3:
    vq_req_free(c2dev, vq_req);
      bail2:
    c2_free_mqsp(qp->rq_mq.shared);
      bail1:
    c2_free_mqsp(qp->sq_mq.shared);
      bail0:
    c2_free_qpn(c2dev, qp->qpn);
    return err;
}