Ejemplo n.º 1
0
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
	struct c2_mq_shared __iomem *shared;
	struct c2_cq *cq;
	unsigned long flags;
	int ret = 0;

	cq = to_c2cq(ibcq);
	shared = cq->mq.peer;

	if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
	else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
		writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
	else
		return -EINVAL;

	writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);

	/*
	 * Now read back shared->armed to make the PCI
	 * write synchronous.  This is necessary for
	 * correct cq notification semantics.
	 */
	readb(&shared->armed);

	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
		spin_lock_irqsave(&cq->lock, flags);
		ret = !c2_mq_empty(&cq->mq);
		spin_unlock_irqrestore(&cq->lock, flags);
	}

	return ret;
}
Ejemplo n.º 2
0
Archivo: c2_qp.c Proyecto: 274914765/C
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
    struct c2_cq *send_cq;
    struct c2_cq *recv_cq;

    send_cq = to_c2cq(qp->ibqp.send_cq);
    recv_cq = to_c2cq(qp->ibqp.recv_cq);

    /*
     * Lock CQs here, so that CQ polling code can do QP lookup
     * without taking a lock.
     */
    c2_lock_cqs(send_cq, recv_cq);
    c2_free_qpn(c2dev, qp->qpn);
    c2_unlock_cqs(send_cq, recv_cq);

    /*
     * Destory qp in the rnic...
     */
    destroy_qp(c2dev, qp);

    /*
     * Mark any unreaped CQEs as null and void.
     */
    c2_cq_clean(c2dev, qp, send_cq->cqn);
    if (send_cq != recv_cq)
        c2_cq_clean(c2dev, qp, recv_cq->cqn);
    /*
     * Unmap the MQs and return the shared pointers
     * to the message pool.
     */
    iounmap(qp->sq_mq.peer);
    iounmap(qp->rq_mq.peer);
    c2_free_mqsp(qp->sq_mq.shared);
    c2_free_mqsp(qp->rq_mq.shared);

    atomic_dec(&qp->refcount);
    wait_event(qp->wait, !atomic_read(&qp->refcount));
}
Ejemplo n.º 3
0
int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
	struct c2_dev *c2dev = to_c2dev(ibcq->device);
	struct c2_cq *cq = to_c2cq(ibcq);
	unsigned long flags;
	int npolled, err;

	spin_lock_irqsave(&cq->lock, flags);

	for (npolled = 0; npolled < num_entries; ++npolled) {

		err = c2_poll_one(c2dev, cq, entry + npolled);
		if (err)
			break;
	}

	spin_unlock_irqrestore(&cq->lock, flags);

	return npolled;
}
Ejemplo n.º 4
0
Archivo: c2_qp.c Proyecto: 274914765/C
int c2_alloc_qp(struct c2_dev *c2dev,
        struct c2_pd *pd,
        struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
    struct c2wr_qp_create_req wr;
    struct c2wr_qp_create_rep *reply;
    struct c2_vq_req *vq_req;
    struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
    struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
    unsigned long peer_pa;
    u32 q_size, msg_size, mmap_size;
    void __iomem *mmap;
    int err;

    err = c2_alloc_qpn(c2dev, qp);
    if (err)
        return err;
    qp->ibqp.qp_num = qp->qpn;
    qp->ibqp.qp_type = IB_QPT_RC;

    /* Allocate the SQ and RQ shared pointers */
    qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->sq_mq.shared_dma, GFP_KERNEL);
    if (!qp->sq_mq.shared) {
        err = -ENOMEM;
        goto bail0;
    }

    qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->rq_mq.shared_dma, GFP_KERNEL);
    if (!qp->rq_mq.shared) {
        err = -ENOMEM;
        goto bail1;
    }

    /* Allocate the verbs request */
    vq_req = vq_req_alloc(c2dev);
    if (vq_req == NULL) {
        err = -ENOMEM;
        goto bail2;
    }

    /* Initialize the work request */
    memset(&wr, 0, sizeof(wr));
    c2_wr_set_id(&wr, CCWR_QP_CREATE);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.sq_cq_handle = send_cq->adapter_handle;
    wr.rq_cq_handle = recv_cq->adapter_handle;
    wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
    wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
    wr.srq_handle = 0;
    wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
                   QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
    wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
    wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
    wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
    wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
    wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
    wr.pd_id = pd->pd_id;
    wr.user_context = (unsigned long) qp;

    vq_req_get(c2dev, vq_req);

    /* Send the WR to the adapter */
    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail3;
    }

    /* Wait for the verb reply  */
    err = vq_wait_for_reply(c2dev, vq_req);
    if (err) {
        goto bail3;
    }

    /* Process the reply */
    reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
    if (!reply) {
        err = -ENOMEM;
        goto bail3;
    }

    if ((err = c2_wr_get_result(reply)) != 0) {
        goto bail4;
    }

    /* Fill in the kernel QP struct */
    atomic_set(&qp->refcount, 1);
    qp->adapter_handle = reply->qp_handle;
    qp->state = IB_QPS_RESET;
    qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
    init_waitqueue_head(&qp->wait);

    /* Initialize the SQ MQ */
    q_size = be32_to_cpu(reply->sq_depth);
    msg_size = be32_to_cpu(reply->sq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail5;
    }

    c2_mq_req_init(&qp->sq_mq,
               be32_to_cpu(reply->sq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    /* Initialize the RQ mq */
    q_size = be32_to_cpu(reply->rq_depth);
    msg_size = be32_to_cpu(reply->rq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail6;
    }

    c2_mq_req_init(&qp->rq_mq,
               be32_to_cpu(reply->rq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    vq_repbuf_free(c2dev, reply);
    vq_req_free(c2dev, vq_req);

    return 0;

      bail6:
    iounmap(qp->sq_mq.peer);
      bail5:
    destroy_qp(c2dev, qp);
      bail4:
    vq_repbuf_free(c2dev, reply);
      bail3:
    vq_req_free(c2dev, vq_req);
      bail2:
    c2_free_mqsp(qp->rq_mq.shared);
      bail1:
    c2_free_mqsp(qp->sq_mq.shared);
      bail0:
    c2_free_qpn(c2dev, qp->qpn);
    return err;
}