Пример #1
0
int c2_alloc_qp(struct c2_dev *c2dev,
        struct c2_pd *pd,
        struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
    struct c2wr_qp_create_req wr;
    struct c2wr_qp_create_rep *reply;
    struct c2_vq_req *vq_req;
    struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
    struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
    unsigned long peer_pa;
    u32 q_size, msg_size, mmap_size;
    void __iomem *mmap;
    int err;

    err = c2_alloc_qpn(c2dev, qp);
    if (err)
        return err;
    qp->ibqp.qp_num = qp->qpn;
    qp->ibqp.qp_type = IB_QPT_RC;

    /* Allocate the SQ and RQ shared pointers */
    qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->sq_mq.shared_dma, GFP_KERNEL);
    if (!qp->sq_mq.shared) {
        err = -ENOMEM;
        goto bail0;
    }

    qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->rq_mq.shared_dma, GFP_KERNEL);
    if (!qp->rq_mq.shared) {
        err = -ENOMEM;
        goto bail1;
    }

    /* Allocate the verbs request */
    vq_req = vq_req_alloc(c2dev);
    if (vq_req == NULL) {
        err = -ENOMEM;
        goto bail2;
    }

    /* Initialize the work request */
    memset(&wr, 0, sizeof(wr));
    c2_wr_set_id(&wr, CCWR_QP_CREATE);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.sq_cq_handle = send_cq->adapter_handle;
    wr.rq_cq_handle = recv_cq->adapter_handle;
    wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
    wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
    wr.srq_handle = 0;
    wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
                   QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
    wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
    wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
    wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
    wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
    wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
    wr.pd_id = pd->pd_id;
    wr.user_context = (unsigned long) qp;

    vq_req_get(c2dev, vq_req);

    /* Send the WR to the adapter */
    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail3;
    }

    /* Wait for the verb reply  */
    err = vq_wait_for_reply(c2dev, vq_req);
    if (err) {
        goto bail3;
    }

    /* Process the reply */
    reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
    if (!reply) {
        err = -ENOMEM;
        goto bail3;
    }

    if ((err = c2_wr_get_result(reply)) != 0) {
        goto bail4;
    }

    /* Fill in the kernel QP struct */
    atomic_set(&qp->refcount, 1);
    qp->adapter_handle = reply->qp_handle;
    qp->state = IB_QPS_RESET;
    qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
    init_waitqueue_head(&qp->wait);

    /* Initialize the SQ MQ */
    q_size = be32_to_cpu(reply->sq_depth);
    msg_size = be32_to_cpu(reply->sq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail5;
    }

    c2_mq_req_init(&qp->sq_mq,
               be32_to_cpu(reply->sq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    /* Initialize the RQ mq */
    q_size = be32_to_cpu(reply->rq_depth);
    msg_size = be32_to_cpu(reply->rq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail6;
    }

    c2_mq_req_init(&qp->rq_mq,
               be32_to_cpu(reply->rq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    vq_repbuf_free(c2dev, reply);
    vq_req_free(c2dev, vq_req);

    return 0;

      bail6:
    iounmap(qp->sq_mq.peer);
      bail5:
    destroy_qp(c2dev, qp);
      bail4:
    vq_repbuf_free(c2dev, reply);
      bail3:
    vq_req_free(c2dev, vq_req);
      bail2:
    c2_free_mqsp(qp->rq_mq.shared);
      bail1:
    c2_free_mqsp(qp->sq_mq.shared);
      bail0:
    c2_free_qpn(c2dev, qp->qpn);
    return err;
}
Пример #2
0
static inline int c2_poll_one(struct c2_dev *c2dev,
			      struct c2_cq *cq, struct ib_wc *entry)
{
	struct c2wr_ce *ce;
	struct c2_qp *qp;
	int is_recv = 0;

	ce = c2_mq_consume(&cq->mq);
	if (!ce) {
		return -EAGAIN;
	}

	/*
	 * if the qp returned is null then this qp has already
	 * been freed and we are unable process the completion.
	 * try pulling the next message
	 */
	while ((qp =
		(struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
		c2_mq_free(&cq->mq);
		ce = c2_mq_consume(&cq->mq);
		if (!ce)
			return -EAGAIN;
	}

	entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
	entry->wr_id = ce->hdr.context;
	entry->qp = &qp->ibqp;
	entry->wc_flags = 0;
	entry->slid = 0;
	entry->sl = 0;
	entry->src_qp = 0;
	entry->dlid_path_bits = 0;
	entry->pkey_index = 0;

	switch (c2_wr_get_id(ce)) {
	case C2_WR_TYPE_SEND:
		entry->opcode = IB_WC_SEND;
		break;
	case C2_WR_TYPE_RDMA_WRITE:
		entry->opcode = IB_WC_RDMA_WRITE;
		break;
	case C2_WR_TYPE_RDMA_READ:
		entry->opcode = IB_WC_RDMA_READ;
		break;
	case C2_WR_TYPE_RECV:
		entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
		entry->opcode = IB_WC_RECV;
		is_recv = 1;
		break;
	default:
		break;
	}

	/* consume the WQEs */
	if (is_recv)
		c2_mq_lconsume(&qp->rq_mq, 1);
	else
		c2_mq_lconsume(&qp->sq_mq,
			       be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);

	/* free the message */
	c2_mq_free(&cq->mq);

	return 0;
}
Пример #3
0
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
{
	struct c2_mq *mq = c2dev->qptr_array[mq_index];
	union c2wr *wr;
	void *resource_user_context;
	struct iw_cm_event cm_event;
	struct ib_event ib_event;
	enum c2_resource_indicator resource_indicator;
	enum c2_event_id event_id;
	unsigned long flags;
	int status;

	/*
	 * retrieve the message
	 */
	wr = c2_mq_consume(mq);
	if (!wr)
		return;

	memset(&ib_event, 0, sizeof(ib_event));
	memset(&cm_event, 0, sizeof(cm_event));

	event_id = c2_wr_get_id(wr);
	resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
	resource_user_context =
	    (void *) (unsigned long) wr->ae.ae_generic.user_context;

	status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));

	pr_debug("event received c2_dev=%p, event_id=%d, "
		"resource_indicator=%d, user_context=%p, status = %d\n",
		c2dev, event_id, resource_indicator, resource_user_context,
		status);

	switch (resource_indicator) {
	case C2_RES_IND_QP:{

		struct c2_qp *qp = (struct c2_qp *)resource_user_context;
		struct iw_cm_id *cm_id = qp->cm_id;
		struct c2wr_ae_active_connect_results *res;

		if (!cm_id) {
			pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
				qp);
			goto ignore_it;
		}
		pr_debug("%s: event = %s, user_context=%llx, "
			"resource_type=%x, "
			"resource=%x, qp_state=%s\n",
			__func__,
			to_event_str(event_id),
			(unsigned long long) wr->ae.ae_generic.user_context,
			be32_to_cpu(wr->ae.ae_generic.resource_type),
			be32_to_cpu(wr->ae.ae_generic.resource),
			to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));

		c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));

		switch (event_id) {
		case CCAE_ACTIVE_CONNECT_RESULTS:
			res = &wr->ae.ae_active_connect_results;
			cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
			cm_event.local_addr.sin_addr.s_addr = res->laddr;
			cm_event.remote_addr.sin_addr.s_addr = res->raddr;
			cm_event.local_addr.sin_port = res->lport;
			cm_event.remote_addr.sin_port =	res->rport;
			if (status == 0) {
				cm_event.private_data_len =
					be32_to_cpu(res->private_data_length);
				cm_event.private_data = res->private_data;
			} else {
				spin_lock_irqsave(&qp->lock, flags);
				if (qp->cm_id) {
					qp->cm_id->rem_ref(qp->cm_id);
					qp->cm_id = NULL;
				}
				spin_unlock_irqrestore(&qp->lock, flags);
				cm_event.private_data_len = 0;
				cm_event.private_data = NULL;
			}
			if (cm_id->event_handler)
				cm_id->event_handler(cm_id, &cm_event);
			break;
		case CCAE_TERMINATE_MESSAGE_RECEIVED:
		case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
			ib_event.device = &c2dev->ibdev;
			ib_event.element.qp = &qp->ibqp;
			ib_event.event = IB_EVENT_QP_REQ_ERR;

			if (qp->ibqp.event_handler)
				qp->ibqp.event_handler(&ib_event,
						       qp->ibqp.
						       qp_context);
			break;
		case CCAE_BAD_CLOSE:
		case CCAE_LLP_CLOSE_COMPLETE:
		case CCAE_LLP_CONNECTION_RESET:
		case CCAE_LLP_CONNECTION_LOST:
			BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);

			spin_lock_irqsave(&qp->lock, flags);
			if (qp->cm_id) {
				qp->cm_id->rem_ref(qp->cm_id);
				qp->cm_id = NULL;
			}
			spin_unlock_irqrestore(&qp->lock, flags);
			cm_event.event = IW_CM_EVENT_CLOSE;
			cm_event.status = 0;
			if (cm_id->event_handler)
				cm_id->event_handler(cm_id, &cm_event);
			break;
		default:
			BUG_ON(1);
			pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
				"CM_ID=%p\n",
				__func__, __LINE__,
				event_id, qp, cm_id);
			break;
		}
		break;
	}

	case C2_RES_IND_EP:{

		struct c2wr_ae_connection_request *req =
			&wr->ae.ae_connection_request;
		struct iw_cm_id *cm_id =
			(struct iw_cm_id *)resource_user_context;

		pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
		if (event_id != CCAE_CONNECTION_REQUEST) {
			pr_debug("%s: Invalid event_id: %d\n",
				__func__, event_id);
			break;
		}
		cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
		cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
		cm_event.local_addr.sin_addr.s_addr = req->laddr;
		cm_event.remote_addr.sin_addr.s_addr = req->raddr;
		cm_event.local_addr.sin_port = req->lport;
		cm_event.remote_addr.sin_port = req->rport;
		cm_event.private_data_len =
			be32_to_cpu(req->private_data_length);
		cm_event.private_data = req->private_data;
		/*
		 * Until ird/ord negotiation via MPAv2 support is added, send
		 * max supported values
		 */
		cm_event.ird = cm_event.ord = 128;

		if (cm_id->event_handler)
			cm_id->event_handler(cm_id, &cm_event);
		break;
	}

	case C2_RES_IND_CQ:{
		struct c2_cq *cq =
		    (struct c2_cq *) resource_user_context;

		pr_debug("IB_EVENT_CQ_ERR\n");
		ib_event.device = &c2dev->ibdev;
		ib_event.element.cq = &cq->ibcq;
		ib_event.event = IB_EVENT_CQ_ERR;

		if (cq->ibcq.event_handler)
			cq->ibcq.event_handler(&ib_event,
					       cq->ibcq.cq_context);
	}

	default:
		printk("Bad resource indicator = %d\n",
		       resource_indicator);
		break;
	}

 ignore_it:
	c2_mq_free(mq);
}