Пример #1
0
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
{
	struct c2_dev *c2dev;
	struct c2wr_ep_listen_create_req wr;
	struct c2wr_ep_listen_create_rep *reply;
	struct c2_vq_req *vq_req;
	int err;

	c2dev = to_c2dev(cm_id->device);
	if (c2dev == NULL)
		return -EINVAL;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
	wr.hdr.context = (u64) (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
	wr.local_port = cm_id->local_addr.sin_port;
	wr.backlog = cpu_to_be32(backlog);
	wr.user_context = (u64) (unsigned long) cm_id;

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	reply =
	    (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}

	if ((err = c2_errno(reply)) != 0)
		goto bail1;

	cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;

	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	return 0;

 bail1:
	vq_repbuf_free(c2dev, reply);
 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #2
0
/*
 * Delete an IP address from the RNIC interface
 */
int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
{
	struct c2_vq_req *vq_req;
	struct c2wr_rnic_setconfig_req *wr;
	struct c2wr_rnic_setconfig_rep *reply;
	struct c2_netaddr netaddr;
	int err, len;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	len = sizeof(struct c2_netaddr);
	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail0;
	}

	c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
	wr->hdr.context = (unsigned long) vq_req;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);

	netaddr.ip_addr = inaddr;
	netaddr.netmask = inmask;
	netaddr.mtu = 0;

	memcpy(wr->data, &netaddr, len);

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail1;

	reply =
	    (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}

	err = c2_errno(reply);
	vq_repbuf_free(c2dev, reply);

      bail1:
	kfree(wr);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #3
0
int c2_llp_service_destroy(struct iw_cm_id *cm_id)
{

	struct c2_dev *c2dev;
	struct c2wr_ep_listen_destroy_req wr;
	struct c2wr_ep_listen_destroy_rep *reply;
	struct c2_vq_req *vq_req;
	int err;

	c2dev = to_c2dev(cm_id->device);
	if (c2dev == NULL)
		return -EINVAL;

	
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	
	c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;

	
	vq_req_get(c2dev, vq_req);

	
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	
	reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}
	if ((err = c2_errno(reply)) != 0)
		goto bail1;

 bail1:
	vq_repbuf_free(c2dev, reply);
 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #4
0
void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
{
	int err;
	struct c2_vq_req *vq_req;
	struct c2wr_cq_destroy_req wr;
	struct c2wr_cq_destroy_rep *reply;

	might_sleep();

	/* Clear CQ from the qptr array */
	spin_lock_irq(&c2dev->lock);
	c2dev->qptr_array[cq->mq.index] = NULL;
	atomic_dec(&cq->refcount);
	spin_unlock_irq(&c2dev->lock);

	wait_event(cq->wait, !atomic_read(&cq->refcount));

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		goto bail0;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.cq_handle = cq->adapter_handle;

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail1;

	reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
	if (reply)
		vq_repbuf_free(c2dev, reply);
bail1:
	vq_req_free(c2dev, vq_req);
bail0:
	if (cq->is_kernel) {
		c2_free_cq_buf(c2dev, &cq->mq);
	}

	return;
}
Пример #5
0
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
	struct c2_dev *c2dev;
	struct c2wr_cr_reject_req wr;
	struct c2_vq_req *vq_req;
	struct c2wr_cr_reject_rep *reply;
	int err;

	c2dev = to_c2dev(cm_id->device);

	
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	
	c2_wr_set_id(&wr, CCWR_CR_REJECT);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;

	
	vq_req_get(c2dev, vq_req);

	
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	
	reply = (struct c2wr_cr_reject_rep *) (unsigned long)
		vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}
	err = c2_errno(reply);
	
	vq_repbuf_free(c2dev, reply);

 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #6
0
/*
 * Send the adapter TERM message to the amso1100
 */
static void c2_adapter_term(struct c2_dev *c2dev)
{
	struct c2wr_init_req wr;

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_TERM);
	wr.hdr.context = 0;

	/* Post the init message */
	vq_send_wr(c2dev, (union c2wr *) & wr);
	c2dev->init = 0;

	return;
}
Пример #7
0
/*
 * Open a single RNIC instance to use with all
 * low level openib calls
 */
static int c2_rnic_open(struct c2_dev *c2dev)
{
	struct c2_vq_req *vq_req;
	union c2wr wr;
	struct c2wr_rnic_open_rep *reply;
	int err;

	vq_req = vq_req_alloc(c2dev);
	if (vq_req == NULL) {
		return -ENOMEM;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
	wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
	wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
	wr.rnic_open.req.port_num = cpu_to_be16(0);
	wr.rnic_open.req.user_context = (unsigned long) c2dev;

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, &wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail0;
	}

	reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}

	if ((err = c2_errno(reply)) != 0) {
		goto bail1;
	}

	c2dev->adapter_handle = reply->rnic_handle;

      bail1:
	vq_repbuf_free(c2dev, reply);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #8
0
/*
 * Close the RNIC instance
 */
static int c2_rnic_close(struct c2_dev *c2dev)
{
	struct c2_vq_req *vq_req;
	union c2wr wr;
	struct c2wr_rnic_close_rep *reply;
	int err;

	vq_req = vq_req_alloc(c2dev);
	if (vq_req == NULL) {
		return -ENOMEM;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
	wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
	wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, &wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail0;
	}

	reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}

	if ((err = c2_errno(reply)) != 0) {
		goto bail1;
	}

	c2dev->adapter_handle = 0;

      bail1:
	vq_repbuf_free(c2dev, reply);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #9
0
int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
              int ord, int ird)
{
    struct c2wr_qp_modify_req wr;
    struct c2wr_qp_modify_rep *reply;
    struct c2_vq_req *vq_req;
    int err;

    vq_req = vq_req_alloc(c2dev);
    if (!vq_req)
        return -ENOMEM;

    c2_wr_set_id(&wr, CCWR_QP_MODIFY);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.qp_handle = qp->adapter_handle;
    wr.ord = cpu_to_be32(ord);
    wr.ird = cpu_to_be32(ird);
    wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
    wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
    wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);

    /* reference the request struct */
    vq_req_get(c2dev, vq_req);

    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail0;
    }

    err = vq_wait_for_reply(c2dev, vq_req);
    if (err)
        goto bail0;

    reply = (struct c2wr_qp_modify_rep *) (unsigned long)
        vq_req->reply_msg;
    if (!reply) {
        err = -ENOMEM;
        goto bail0;
    }

    err = c2_errno(reply);
    vq_repbuf_free(c2dev, reply);
      bail0:
    vq_req_free(c2dev, vq_req);
    return err;
}
Пример #10
0
/*
 * Send the adapter INIT message to the amso1100
 */
static int c2_adapter_init(struct c2_dev *c2dev)
{
	struct c2wr_init_req wr;
	int err;

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_INIT);
	wr.hdr.context = 0;
	wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
	wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
	wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
	wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
	wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
	wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);

	/* Post the init message */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);

	return err;
}
Пример #11
0
int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
         struct ib_qp_attr *attr, int attr_mask)
{
    struct c2wr_qp_modify_req wr;
    struct c2wr_qp_modify_rep *reply;
    struct c2_vq_req *vq_req;
    unsigned long flags;
    u8 next_state;
    int err;

    pr_debug("%s:%d qp=%p, %s --> %s\n",
        __func__, __LINE__,
        qp,
        to_ib_state_str(qp->state),
        to_ib_state_str(attr->qp_state));

    vq_req = vq_req_alloc(c2dev);
    if (!vq_req)
        return -ENOMEM;

    c2_wr_set_id(&wr, CCWR_QP_MODIFY);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.qp_handle = qp->adapter_handle;
    wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
    wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
    wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
    wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);

    if (attr_mask & IB_QP_STATE) {
        /* Ensure the state is valid */
        if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
            err = -EINVAL;
            goto bail0;
        }

        wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));

        if (attr->qp_state == IB_QPS_ERR) {
            spin_lock_irqsave(&qp->lock, flags);
            if (qp->cm_id && qp->state == IB_QPS_RTS) {
                pr_debug("Generating CLOSE event for QP-->ERR, "
                    "qp=%p, cm_id=%p\n",qp,qp->cm_id);
                /* Generate an CLOSE event */
                vq_req->cm_id = qp->cm_id;
                vq_req->event = IW_CM_EVENT_CLOSE;
            }
            spin_unlock_irqrestore(&qp->lock, flags);
        }
        next_state =  attr->qp_state;

    } else if (attr_mask & IB_QP_CUR_STATE) {

        if (attr->cur_qp_state != IB_QPS_RTR &&
            attr->cur_qp_state != IB_QPS_RTS &&
            attr->cur_qp_state != IB_QPS_SQD &&
            attr->cur_qp_state != IB_QPS_SQE) {
            err = -EINVAL;
            goto bail0;
        } else
            wr.next_qp_state =
                cpu_to_be32(to_c2_state(attr->cur_qp_state));

        next_state = attr->cur_qp_state;

    } else {
        err = 0;
        goto bail0;
    }

    /* reference the request struct */
    vq_req_get(c2dev, vq_req);

    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail0;
    }

    err = vq_wait_for_reply(c2dev, vq_req);
    if (err)
        goto bail0;

    reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
    if (!reply) {
        err = -ENOMEM;
        goto bail0;
    }

    err = c2_errno(reply);
    if (!err)
        qp->state = next_state;
#ifdef DEBUG
    else
        pr_debug("%s: c2_errno=%d\n", __func__, err);
#endif
    /*
     * If we're going to error and generating the event here, then
     * we need to remove the reference because there will be no
     * close event generated by the adapter
    */
    spin_lock_irqsave(&qp->lock, flags);
    if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
        qp->cm_id->rem_ref(qp->cm_id);
        qp->cm_id = NULL;
    }
    spin_unlock_irqrestore(&qp->lock, flags);

    vq_repbuf_free(c2dev, reply);
      bail0:
    vq_req_free(c2dev, vq_req);

    pr_debug("%s:%d qp=%p, cur_state=%s\n",
        __func__, __LINE__,
        qp,
        to_ib_state_str(qp->state));
    return err;
}
Пример #12
0
int c2_llp_service_destroy(struct iw_cm_id *cm_id)
{

	struct c2_dev *c2dev;
	struct c2wr_ep_listen_destroy_req wr;
	struct c2wr_ep_listen_destroy_rep *reply;
	struct c2_vq_req *vq_req;
	int err;

	c2dev = to_c2dev(cm_id->device);
	if (c2dev == NULL)
		return -EINVAL;

	/*
	 * Allocate verbs request.
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	/*
	 * Build the WR
	 */
	c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;

	/*
	 * reference the request struct.  dereferenced in the int handler.
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * Send WR to adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	/*
	 * Wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	/*
	 * Process reply
	 */
	reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}

	vq_repbuf_free(c2dev, reply);
 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #13
0
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
{
	struct c2_vq_req *vq_req;	/* verbs request object */
	struct c2wr_stag_dealloc_req wr;	/* work request */
	struct c2wr_stag_dealloc_rep *reply;	/* WR reply  */
	int err;


	/*
	 * allocate verbs request object
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		return -ENOMEM;
	}

	/*
	 * Build the WR
	 */
	c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
	wr.hdr.context = (u64) (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.stag_index = cpu_to_be32(stag_index);

	/*
	 * reference the request struct.  dereferenced in the int handler.
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * Send WR to adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	/*
	 * Wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail0;
	}

	/*
	 * Process reply
	 */
	reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}

	err = c2_errno(reply);

	vq_repbuf_free(c2dev, reply);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #14
0
/*
 * Send all the PBL messages to convey the remainder of the PBL
 * Wait for the adapter's reply on the last one.
 * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
 *
 * NOTE:  vq_req is _not_ freed by this function.  The VQ Host
 *	  Reply buffer _is_ freed by this function.
 */
static int
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
		  unsigned long va, u32 pbl_depth,
		  struct c2_vq_req *vq_req, int pbl_type)
{
	u32 pbe_count;		/* amt that fits in a PBL msg */
	u32 count;		/* amt in this PBL MSG. */
	struct c2wr_nsmr_pbl_req *wr;	/* PBL WR ptr */
	struct c2wr_nsmr_pbl_rep *reply;	/* reply ptr */
 	int err, pbl_virt, pbl_index, i;

	switch (pbl_type) {
	case PBL_VIRT:
		pbl_virt = 1;
		break;
	case PBL_PHYS:
		pbl_virt = 0;
		break;
	default:
		return -EINVAL;
		break;
	}

	pbe_count = (c2dev->req_vq.msg_size -
		     sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		return -ENOMEM;
	}
	c2_wr_set_id(wr, CCWR_NSMR_PBL);

	/*
	 * Only the last PBL message will generate a reply from the verbs,
	 * so we set the context to 0 indicating there is no kernel verbs
	 * handler blocked awaiting this reply.
	 */
	wr->hdr.context = 0;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->stag_index = stag_index;	/* already swapped */
	wr->flags = 0;
	pbl_index = 0;
	while (pbl_depth) {
		count = min(pbe_count, pbl_depth);
		wr->addrs_length = cpu_to_be32(count);

		/*
		 *  If this is the last message, then reference the
		 *  vq request struct cuz we're gonna wait for a reply.
		 *  also make this PBL msg as the last one.
		 */
		if (count == pbl_depth) {
			/*
			 * reference the request struct.  dereferenced in the
			 * int handler.
			 */
			vq_req_get(c2dev, vq_req);
			wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);

			/*
			 * This is the last PBL message.
			 * Set the context to our VQ Request Object so we can
			 * wait for the reply.
			 */
			wr->hdr.context = (unsigned long) vq_req;
		}

		/*
		 * If pbl_virt is set then va is a virtual address
		 * that describes a virtually contiguous memory
		 * allocation. The wr needs the start of each virtual page
		 * to be converted to the corresponding physical address
		 * of the page. If pbl_virt is not set then va is an array
		 * of physical addresses and there is no conversion to do.
		 * Just fill in the wr with what is in the array.
		 */
		for (i = 0; i < count; i++) {
			if (pbl_virt) {
				va += PAGE_SIZE;
			} else {
 				wr->paddrs[i] =
				    cpu_to_be64(((u64 *)va)[pbl_index + i]);
			}
		}

		/*
		 * Send WR to adapter
		 */
		err = vq_send_wr(c2dev, (union c2wr *) wr);
		if (err) {
			if (count <= pbe_count) {
				vq_req_put(c2dev, vq_req);
			}
			goto bail0;
		}
		pbl_depth -= count;
		pbl_index += count;
	}

	/*
	 *  Now wait for the reply...
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail0;
	}

	/*
	 * Process reply
	 */
	reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}

	err = c2_errno(reply);

	vq_repbuf_free(c2dev, reply);
      bail0:
	kfree(wr);
	return err;
}
Пример #15
0
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
	struct c2_dev *c2dev;
	struct c2wr_cr_reject_req wr;
	struct c2_vq_req *vq_req;
	struct c2wr_cr_reject_rep *reply;
	int err;

	c2dev = to_c2dev(cm_id->device);

	/*
	 * Allocate verbs request.
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	/*
	 * Build the WR
	 */
	c2_wr_set_id(&wr, CCWR_CR_REJECT);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;

	/*
	 * reference the request struct.  dereferenced in the int handler.
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * Send WR to adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	/*
	 * Wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	/*
	 * Process reply
	 */
	reply = (struct c2wr_cr_reject_rep *) (unsigned long)
		vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail0;
	}
	err = c2_errno(reply);
	/*
	 * free vq stuff
	 */
	vq_repbuf_free(c2dev, reply);

 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #16
0
int
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
 			   int page_size, int pbl_depth, u32 length,
 			   u32 offset, u64 *va, enum c2_acf acf,
			   struct c2_mr *mr)
{
	struct c2_vq_req *vq_req;
	struct c2wr_nsmr_register_req *wr;
	struct c2wr_nsmr_register_rep *reply;
	u16 flags;
	int i, pbe_count, count;
	int err;

	if (!va || !length || !addr_list || !pbl_depth)
		return -EINTR;

	/*
	 * Verify PBL depth is within rnic max
	 */
	if (pbl_depth > C2_PBL_MAX_DEPTH) {
		return -EINTR;
	}

	/*
	 * allocate verbs request object
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail0;
	}

	/*
	 * build the WR
	 */
	c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
	wr->hdr.context = (unsigned long) vq_req;
	wr->rnic_handle = c2dev->adapter_handle;

	flags = (acf | MEM_VA_BASED | MEM_REMOTE);

	/*
	 * compute how many pbes can fit in the message
	 */
	pbe_count = (c2dev->req_vq.msg_size -
		     sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);

	if (pbl_depth <= pbe_count) {
		flags |= MEM_PBL_COMPLETE;
	}
	wr->flags = cpu_to_be16(flags);
	wr->stag_key = 0;	//stag_key;
	wr->va = cpu_to_be64(*va);
	wr->pd_id = mr->pd->pd_id;
	wr->pbe_size = cpu_to_be32(page_size);
	wr->length = cpu_to_be32(length);
	wr->pbl_depth = cpu_to_be32(pbl_depth);
	wr->fbo = cpu_to_be32(offset);
	count = min(pbl_depth, pbe_count);
	wr->addrs_length = cpu_to_be32(count);

	/*
	 * fill out the PBL for this message
	 */
	for (i = 0; i < count; i++) {
		wr->paddrs[i] = cpu_to_be64(addr_list[i]);
	}

	/*
	 * regerence the request struct
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * send the WR to the adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	/*
	 * wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail1;
	}

	/*
	 * process reply
	 */
	reply =
	    (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}
	if ((err = c2_errno(reply))) {
		goto bail2;
	}
	//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
	mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
	vq_repbuf_free(c2dev, reply);

	/*
	 * if there are still more PBEs we need to send them to
	 * the adapter and wait for a reply on the final one.
	 * reuse vq_req for this purpose.
	 */
	pbl_depth -= count;
	if (pbl_depth) {

		vq_req->reply_msg = (unsigned long) NULL;
		atomic_set(&vq_req->reply_ready, 0);
		err = send_pbl_messages(c2dev,
					cpu_to_be32(mr->ibmr.lkey),
					(unsigned long) &addr_list[i],
					pbl_depth, vq_req, PBL_PHYS);
		if (err) {
			goto bail1;
		}
	}

	vq_req_free(c2dev, vq_req);
	kfree(wr);

	return err;

      bail2:
	vq_repbuf_free(c2dev, reply);
      bail1:
	kfree(wr);
      bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #17
0
int c2_init_cq(struct c2_dev *c2dev, int entries,
	       struct c2_ucontext *ctx, struct c2_cq *cq)
{
	struct c2wr_cq_create_req wr;
	struct c2wr_cq_create_rep *reply;
	unsigned long peer_pa;
	struct c2_vq_req *vq_req;
	int err;

	might_sleep();

	cq->ibcq.cqe = entries - 1;
	cq->is_kernel = !ctx;

	/* Allocate a shared pointer */
	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
				      &cq->mq.shared_dma, GFP_KERNEL);
	if (!cq->mq.shared)
		return -ENOMEM;

	/* Allocate pages for the message pool */
	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
	if (err)
		goto bail0;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail1;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
	wr.depth = cpu_to_be32(cq->mq.q_size);
	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
	wr.user_context = (u64) (unsigned long) (cq);

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail2;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail2;

	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail2;
	}

	if ((err = c2_errno(reply)) != 0)
		goto bail3;

	cq->adapter_handle = reply->cq_handle;
	cq->mq.index = be32_to_cpu(reply->mq_index);

	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
	if (!cq->mq.peer) {
		err = -ENOMEM;
		goto bail3;
	}

	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	spin_lock_init(&cq->lock);
	atomic_set(&cq->refcount, 1);
	init_waitqueue_head(&cq->wait);

	/*
	 * Use the MQ index allocated by the adapter to
	 * store the CQ in the qptr_array
	 */
	cq->cqn = cq->mq.index;
	c2dev->qptr_array[cq->cqn] = cq;

	return 0;

bail3:
	vq_repbuf_free(c2dev, reply);
bail2:
	vq_req_free(c2dev, vq_req);
bail1:
	c2_free_cq_buf(c2dev, &cq->mq);
bail0:
	c2_free_mqsp(cq->mq.shared);

	return err;
}
Пример #18
0
int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
         struct ib_send_wr **bad_wr)
{
    struct c2_dev *c2dev = to_c2dev(ibqp->device);
    struct c2_qp *qp = to_c2qp(ibqp);
    union c2wr wr;
    unsigned long lock_flags;
    int err = 0;

    u32 flags;
    u32 tot_len;
    u8 actual_sge_count;
    u32 msg_size;

    if (qp->state > IB_QPS_RTS)
        return -EINVAL;

    while (ib_wr) {

        flags = 0;
        wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
        if (ib_wr->send_flags & IB_SEND_SIGNALED) {
            flags |= SQ_SIGNALED;
        }

        switch (ib_wr->opcode) {
        case IB_WR_SEND:
        case IB_WR_SEND_WITH_INV:
            if (ib_wr->opcode == IB_WR_SEND) {
                if (ib_wr->send_flags & IB_SEND_SOLICITED)
                    c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
                else
                    c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
                wr.sqwr.send.remote_stag = 0;
            } else {
                if (ib_wr->send_flags & IB_SEND_SOLICITED)
                    c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
                else
                    c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
                wr.sqwr.send.remote_stag =
                    cpu_to_be32(ib_wr->ex.invalidate_rkey);
            }

            msg_size = sizeof(struct c2wr_send_req) +
                sizeof(struct c2_data_addr) * ib_wr->num_sge;
            if (ib_wr->num_sge > qp->send_sgl_depth) {
                err = -EINVAL;
                break;
            }
            if (ib_wr->send_flags & IB_SEND_FENCE) {
                flags |= SQ_READ_FENCE;
            }
            err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
                       ib_wr->sg_list,
                       ib_wr->num_sge,
                       &tot_len, &actual_sge_count);
            wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
            c2_wr_set_sge_count(&wr, actual_sge_count);
            break;
        case IB_WR_RDMA_WRITE:
            c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
            msg_size = sizeof(struct c2wr_rdma_write_req) +
                (sizeof(struct c2_data_addr) * ib_wr->num_sge);
            if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
                err = -EINVAL;
                break;
            }
            if (ib_wr->send_flags & IB_SEND_FENCE) {
                flags |= SQ_READ_FENCE;
            }
            wr.sqwr.rdma_write.remote_stag =
                cpu_to_be32(ib_wr->wr.rdma.rkey);
            wr.sqwr.rdma_write.remote_to =
                cpu_to_be64(ib_wr->wr.rdma.remote_addr);
            err = move_sgl((struct c2_data_addr *)
                       & (wr.sqwr.rdma_write.data),
                       ib_wr->sg_list,
                       ib_wr->num_sge,
                       &tot_len, &actual_sge_count);
            wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
            c2_wr_set_sge_count(&wr, actual_sge_count);
            break;
        case IB_WR_RDMA_READ:
            c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
            msg_size = sizeof(struct c2wr_rdma_read_req);

            /* IWarp only suppots 1 sge for RDMA reads */
            if (ib_wr->num_sge > 1) {
                err = -EINVAL;
                break;
            }

            /*
             * Move the local and remote stag/to/len into the WR.
             */
            wr.sqwr.rdma_read.local_stag =
                cpu_to_be32(ib_wr->sg_list->lkey);
            wr.sqwr.rdma_read.local_to =
                cpu_to_be64(ib_wr->sg_list->addr);
            wr.sqwr.rdma_read.remote_stag =
                cpu_to_be32(ib_wr->wr.rdma.rkey);
            wr.sqwr.rdma_read.remote_to =
                cpu_to_be64(ib_wr->wr.rdma.remote_addr);
            wr.sqwr.rdma_read.length =
                cpu_to_be32(ib_wr->sg_list->length);
            break;
        default:
            /* error */
            msg_size = 0;
            err = -EINVAL;
            break;
        }

        /*
         * If we had an error on the last wr build, then
         * break out.  Possible errors include bogus WR
         * type, and a bogus SGL length...
         */
        if (err) {
            break;
        }

        /*
         * Store flags
         */
        c2_wr_set_flags(&wr, flags);

        /*
         * Post the puppy!
         */
        spin_lock_irqsave(&qp->lock, lock_flags);
        err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
        if (err) {
            spin_unlock_irqrestore(&qp->lock, lock_flags);
            break;
        }

        /*
         * Enqueue mq index to activity FIFO.
         */
        c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
        spin_unlock_irqrestore(&qp->lock, lock_flags);

        ib_wr = ib_wr->next;
    }

    if (err)
        *bad_wr = ib_wr;
    return err;
}
Пример #19
0
static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
    struct c2_vq_req *vq_req;
    struct c2wr_qp_destroy_req wr;
    struct c2wr_qp_destroy_rep *reply;
    unsigned long flags;
    int err;

    /*
     * Allocate a verb request message
     */
    vq_req = vq_req_alloc(c2dev);
    if (!vq_req) {
        return -ENOMEM;
    }

    /*
     * Initialize the WR
     */
    c2_wr_set_id(&wr, CCWR_QP_DESTROY);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.qp_handle = qp->adapter_handle;

    /*
     * reference the request struct.  dereferenced in the int handler.
     */
    vq_req_get(c2dev, vq_req);

    spin_lock_irqsave(&qp->lock, flags);
    if (qp->cm_id && qp->state == IB_QPS_RTS) {
        pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
            "qp=%p, cm_id=%p\n",qp,qp->cm_id);
        /* Generate an CLOSE event */
        vq_req->qp = qp;
        vq_req->cm_id = qp->cm_id;
        vq_req->event = IW_CM_EVENT_CLOSE;
    }
    spin_unlock_irqrestore(&qp->lock, flags);

    /*
     * Send WR to adapter
     */
    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail0;
    }

    /*
     * Wait for reply from adapter
     */
    err = vq_wait_for_reply(c2dev, vq_req);
    if (err) {
        goto bail0;
    }

    /*
     * Process reply
     */
    reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
    if (!reply) {
        err = -ENOMEM;
        goto bail0;
    }

    spin_lock_irqsave(&qp->lock, flags);
    if (qp->cm_id) {
        qp->cm_id->rem_ref(qp->cm_id);
        qp->cm_id = NULL;
    }
    spin_unlock_irqrestore(&qp->lock, flags);

    vq_repbuf_free(c2dev, reply);
      bail0:
    vq_req_free(c2dev, vq_req);
    return err;
}
Пример #20
0
/*
 * Query the adapter
 */
static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
{
	struct c2_vq_req *vq_req;
	struct c2wr_rnic_query_req wr;
	struct c2wr_rnic_query_rep *reply;
	int err;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) &wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail1;

	reply =
	    (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply)
		err = -ENOMEM;
	else
		err = c2_errno(reply);
	if (err)
		goto bail2;

	props->fw_ver =
		((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
		((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
		(be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
	memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
	props->max_mr_size         = 0xFFFFFFFF;
	props->page_size_cap       = ~(C2_MIN_PAGESIZE-1);
	props->vendor_id           = be32_to_cpu(reply->vendor_id);
	props->vendor_part_id      = be32_to_cpu(reply->part_number);
	props->hw_ver              = be32_to_cpu(reply->hw_version);
	props->max_qp              = be32_to_cpu(reply->max_qps);
	props->max_qp_wr           = be32_to_cpu(reply->max_qp_depth);
	props->device_cap_flags    = c2dev->device_cap_flags;
	props->max_sge             = C2_MAX_SGES;
	props->max_sge_rd          = C2_MAX_SGE_RD;
	props->max_cq              = be32_to_cpu(reply->max_cqs);
	props->max_cqe             = be32_to_cpu(reply->max_cq_depth);
	props->max_mr              = be32_to_cpu(reply->max_mrs);
	props->max_pd              = be32_to_cpu(reply->max_pds);
	props->max_qp_rd_atom      = be32_to_cpu(reply->max_qp_ird);
	props->max_ee_rd_atom      = 0;
	props->max_res_rd_atom     = be32_to_cpu(reply->max_global_ird);
	props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
	props->max_ee_init_rd_atom = 0;
	props->atomic_cap          = IB_ATOMIC_NONE;
	props->max_ee              = 0;
	props->max_rdd             = 0;
	props->max_mw              = be32_to_cpu(reply->max_mws);
	props->max_raw_ipv6_qp     = 0;
	props->max_raw_ethy_qp     = 0;
	props->max_mcast_grp       = 0;
	props->max_mcast_qp_attach = 0;
	props->max_total_mcast_qp_attach = 0;
	props->max_ah              = 0;
	props->max_fmr             = 0;
	props->max_map_per_fmr     = 0;
	props->max_srq             = 0;
	props->max_srq_wr          = 0;
	props->max_srq_sge         = 0;
	props->max_pkeys           = 0;
	props->local_ca_ack_delay  = 0;

 bail2:
	vq_repbuf_free(c2dev, reply);

 bail1:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #21
0
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
	struct c2_dev *c2dev = to_c2dev(cm_id->device);
	struct c2_qp *qp;
	struct ib_qp *ibqp;
	struct c2wr_cr_accept_req *wr;	/* variable length WR */
	struct c2_vq_req *vq_req;
	struct c2wr_cr_accept_rep *reply;	/* VQ Reply msg ptr. */
	int err;

	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
	if (!ibqp)
		return -EINVAL;
	qp = to_c2qp(ibqp);

	/* Set the RDMA read limits */
	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
	if (err)
		goto bail0;

	/* Allocate verbs request. */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail0;
	}
	vq_req->qp = qp;
	vq_req->cm_id = cm_id;
	vq_req->event = IW_CM_EVENT_ESTABLISHED;

	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail1;
	}

	/* Build the WR */
	c2_wr_set_id(wr, CCWR_CR_ACCEPT);
	wr->hdr.context = (unsigned long) vq_req;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
	wr->qp_handle = qp->adapter_handle;

	/* Replace the cr_handle with the QP after accept */
	cm_id->provider_data = qp;
	cm_id->add_ref(cm_id);
	qp->cm_id = cm_id;

	cm_id->provider_data = qp;

	/* Validate private_data length */
	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
		err = -EINVAL;
		goto bail1;
	}

	if (iw_param->private_data) {
		wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
		memcpy(&wr->private_data[0],
		       iw_param->private_data, iw_param->private_data_len);
	} else
		wr->private_data_length = 0;

	/* Reference the request struct.  Dereferenced in the int handler. */
	vq_req_get(c2dev, vq_req);

	/* Send WR to adapter */
	err = vq_send_wr(c2dev, (union c2wr *) wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	/* Wait for reply from adapter */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail1;

	/* Check that reply is present */
	reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}

	err = c2_errno(reply);
	vq_repbuf_free(c2dev, reply);

	if (!err)
		c2_set_qp_state(qp, C2_QP_STATE_RTS);
 bail1:
	kfree(wr);
	vq_req_free(c2dev, vq_req);
 bail0:
	if (err) {
		/*
		 * If we fail, release reference on QP and
		 * disassociate QP from CM_ID
		 */
		cm_id->provider_data = NULL;
		qp->cm_id = NULL;
		cm_id->rem_ref(cm_id);
	}
	return err;
}
Пример #22
0
int c2_alloc_qp(struct c2_dev *c2dev,
        struct c2_pd *pd,
        struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
    struct c2wr_qp_create_req wr;
    struct c2wr_qp_create_rep *reply;
    struct c2_vq_req *vq_req;
    struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
    struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
    unsigned long peer_pa;
    u32 q_size, msg_size, mmap_size;
    void __iomem *mmap;
    int err;

    err = c2_alloc_qpn(c2dev, qp);
    if (err)
        return err;
    qp->ibqp.qp_num = qp->qpn;
    qp->ibqp.qp_type = IB_QPT_RC;

    /* Allocate the SQ and RQ shared pointers */
    qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->sq_mq.shared_dma, GFP_KERNEL);
    if (!qp->sq_mq.shared) {
        err = -ENOMEM;
        goto bail0;
    }

    qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
                     &qp->rq_mq.shared_dma, GFP_KERNEL);
    if (!qp->rq_mq.shared) {
        err = -ENOMEM;
        goto bail1;
    }

    /* Allocate the verbs request */
    vq_req = vq_req_alloc(c2dev);
    if (vq_req == NULL) {
        err = -ENOMEM;
        goto bail2;
    }

    /* Initialize the work request */
    memset(&wr, 0, sizeof(wr));
    c2_wr_set_id(&wr, CCWR_QP_CREATE);
    wr.hdr.context = (unsigned long) vq_req;
    wr.rnic_handle = c2dev->adapter_handle;
    wr.sq_cq_handle = send_cq->adapter_handle;
    wr.rq_cq_handle = recv_cq->adapter_handle;
    wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
    wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
    wr.srq_handle = 0;
    wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
                   QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
    wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
    wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
    wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
    wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
    wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
    wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
    wr.pd_id = pd->pd_id;
    wr.user_context = (unsigned long) qp;

    vq_req_get(c2dev, vq_req);

    /* Send the WR to the adapter */
    err = vq_send_wr(c2dev, (union c2wr *) & wr);
    if (err) {
        vq_req_put(c2dev, vq_req);
        goto bail3;
    }

    /* Wait for the verb reply  */
    err = vq_wait_for_reply(c2dev, vq_req);
    if (err) {
        goto bail3;
    }

    /* Process the reply */
    reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
    if (!reply) {
        err = -ENOMEM;
        goto bail3;
    }

    if ((err = c2_wr_get_result(reply)) != 0) {
        goto bail4;
    }

    /* Fill in the kernel QP struct */
    atomic_set(&qp->refcount, 1);
    qp->adapter_handle = reply->qp_handle;
    qp->state = IB_QPS_RESET;
    qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
    qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
    init_waitqueue_head(&qp->wait);

    /* Initialize the SQ MQ */
    q_size = be32_to_cpu(reply->sq_depth);
    msg_size = be32_to_cpu(reply->sq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail5;
    }

    c2_mq_req_init(&qp->sq_mq,
               be32_to_cpu(reply->sq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    /* Initialize the RQ mq */
    q_size = be32_to_cpu(reply->rq_depth);
    msg_size = be32_to_cpu(reply->rq_msg_size);
    peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
    mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
    mmap = ioremap_nocache(peer_pa, mmap_size);
    if (!mmap) {
        err = -ENOMEM;
        goto bail6;
    }

    c2_mq_req_init(&qp->rq_mq,
               be32_to_cpu(reply->rq_mq_index),
               q_size,
               msg_size,
               mmap + sizeof(struct c2_mq_shared),    /* pool start */
               mmap,                /* peer */
               C2_MQ_ADAPTER_TARGET);

    vq_repbuf_free(c2dev, reply);
    vq_req_free(c2dev, vq_req);

    return 0;

      bail6:
    iounmap(qp->sq_mq.peer);
      bail5:
    destroy_qp(c2dev, qp);
      bail4:
    vq_repbuf_free(c2dev, reply);
      bail3:
    vq_req_free(c2dev, vq_req);
      bail2:
    c2_free_mqsp(qp->rq_mq.shared);
      bail1:
    c2_free_mqsp(qp->sq_mq.shared);
      bail0:
    c2_free_qpn(c2dev, qp->qpn);
    return err;
}
Пример #23
0
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
	struct c2_dev *c2dev = to_c2dev(cm_id->device);
	struct c2_qp *qp;
	struct ib_qp *ibqp;
	struct c2wr_cr_accept_req *wr;	
	struct c2_vq_req *vq_req;
	struct c2wr_cr_accept_rep *reply;	
	int err;

	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
	if (!ibqp)
		return -EINVAL;
	qp = to_c2qp(ibqp);

	
	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
	if (err)
		goto bail0;

	
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail0;
	}
	vq_req->qp = qp;
	vq_req->cm_id = cm_id;
	vq_req->event = IW_CM_EVENT_ESTABLISHED;

	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail1;
	}

	
	c2_wr_set_id(wr, CCWR_CR_ACCEPT);
	wr->hdr.context = (unsigned long) vq_req;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
	wr->qp_handle = qp->adapter_handle;

	
	cm_id->provider_data = qp;
	cm_id->add_ref(cm_id);
	qp->cm_id = cm_id;

	cm_id->provider_data = qp;

	
	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
		err = -EINVAL;
		goto bail1;
	}

	if (iw_param->private_data) {
		wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
		memcpy(&wr->private_data[0],
		       iw_param->private_data, iw_param->private_data_len);
	} else
		wr->private_data_length = 0;

	
	vq_req_get(c2dev, vq_req);

	
	err = vq_send_wr(c2dev, (union c2wr *) wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail1;
	}

	
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail1;

	
	reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}

	err = c2_errno(reply);
	vq_repbuf_free(c2dev, reply);

	if (!err)
		c2_set_qp_state(qp, C2_QP_STATE_RTS);
 bail1:
	kfree(wr);
	vq_req_free(c2dev, vq_req);
 bail0:
	if (err) {
		cm_id->provider_data = NULL;
		qp->cm_id = NULL;
		cm_id->rem_ref(cm_id);
	}
	return err;
}
Пример #24
0
int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
            struct ib_recv_wr **bad_wr)
{
    struct c2_dev *c2dev = to_c2dev(ibqp->device);
    struct c2_qp *qp = to_c2qp(ibqp);
    union c2wr wr;
    unsigned long lock_flags;
    int err = 0;

    if (qp->state > IB_QPS_RTS)
        return -EINVAL;

    /*
     * Try and post each work request
     */
    while (ib_wr) {
        u32 tot_len;
        u8 actual_sge_count;

        if (ib_wr->num_sge > qp->recv_sgl_depth) {
            err = -EINVAL;
            break;
        }

        /*
         * Create local host-copy of the WR
         */
        wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
        c2_wr_set_id(&wr, CCWR_RECV);
        c2_wr_set_flags(&wr, 0);

        /* sge_count is limited to eight bits. */
        BUG_ON(ib_wr->num_sge >= 256);
        err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
                   ib_wr->sg_list,
                   ib_wr->num_sge, &tot_len, &actual_sge_count);
        c2_wr_set_sge_count(&wr, actual_sge_count);

        /*
         * If we had an error on the last wr build, then
         * break out.  Possible errors include bogus WR
         * type, and a bogus SGL length...
         */
        if (err) {
            break;
        }

        spin_lock_irqsave(&qp->lock, lock_flags);
        err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
        if (err) {
            spin_unlock_irqrestore(&qp->lock, lock_flags);
            break;
        }

        /*
         * Enqueue mq index to activity FIFO
         */
        c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
        spin_unlock_irqrestore(&qp->lock, lock_flags);

        ib_wr = ib_wr->next;
    }

    if (err)
        *bad_wr = ib_wr;
    return err;
}
Пример #25
0
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
	struct c2_dev *c2dev = to_c2dev(cm_id->device);
	struct ib_qp *ibqp;
	struct c2_qp *qp;
	struct c2wr_qp_connect_req *wr;	
	struct c2_vq_req *vq_req;
	int err;

	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
	if (!ibqp)
		return -EINVAL;
	qp = to_c2qp(ibqp);

	
	cm_id->provider_data = qp;
	cm_id->add_ref(cm_id);
	qp->cm_id = cm_id;

	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
		err = -EINVAL;
		goto bail0;
	}
	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
	if (err)
		goto bail0;

	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail0;
	}

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail1;
	}

	c2_wr_set_id(wr, CCWR_QP_CONNECT);
	wr->hdr.context = 0;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->qp_handle = qp->adapter_handle;

	wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
	wr->remote_port = cm_id->remote_addr.sin_port;

	if (iw_param->private_data) {
		wr->private_data_length =
			cpu_to_be32(iw_param->private_data_len);
		memcpy(&wr->private_data[0], iw_param->private_data,
		       iw_param->private_data_len);
	} else
		wr->private_data_length = 0;

	err = vq_send_wr(c2dev, (union c2wr *) wr);
	vq_req_free(c2dev, vq_req);

 bail1:
	kfree(wr);
 bail0:
	if (err) {
		cm_id->provider_data = NULL;
		qp->cm_id = NULL;
		cm_id->rem_ref(cm_id);
	}
	return err;
}
Пример #26
0
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
{
	struct c2_dev *c2dev;
	struct c2wr_ep_listen_create_req wr;
	struct c2wr_ep_listen_create_rep *reply;
	struct c2_vq_req *vq_req;
	int err;
	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;

	if (cm_id->local_addr.ss_family != AF_INET)
		return -ENOSYS;

	c2dev = to_c2dev(cm_id->device);
	if (c2dev == NULL)
		return -EINVAL;

	/*
	 * Allocate verbs request.
	 */
	vq_req = vq_req_alloc(c2dev);
	if (!vq_req)
		return -ENOMEM;

	/*
	 * Build the WR
	 */
	c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
	wr.hdr.context = (u64) (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.local_addr = laddr->sin_addr.s_addr;
	wr.local_port = laddr->sin_port;
	wr.backlog = cpu_to_be32(backlog);
	wr.user_context = (u64) (unsigned long) cm_id;

	/*
	 * Reference the request struct.  Dereferenced in the int handler.
	 */
	vq_req_get(c2dev, vq_req);

	/*
	 * Send WR to adapter
	 */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail0;
	}

	/*
	 * Wait for reply from adapter
	 */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail0;

	/*
	 * Process reply
	 */
	reply =
	    (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
	if (!reply) {
		err = -ENOMEM;
		goto bail1;
	}

	if ((err = c2_errno(reply)) != 0)
		goto bail1;

	/*
	 * Keep the adapter handle. Used in subsequent destroy
	 */
	cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;

	/*
	 * free vq stuff
	 */
	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	return 0;

 bail1:
	vq_repbuf_free(c2dev, reply);
 bail0:
	vq_req_free(c2dev, vq_req);
	return err;
}
Пример #27
0
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
	struct c2_dev *c2dev = to_c2dev(cm_id->device);
	struct ib_qp *ibqp;
	struct c2_qp *qp;
	struct c2wr_qp_connect_req *wr;	/* variable size needs a malloc. */
	struct c2_vq_req *vq_req;
	int err;
	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;

	if (cm_id->remote_addr.ss_family != AF_INET)
		return -ENOSYS;

	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
	if (!ibqp)
		return -EINVAL;
	qp = to_c2qp(ibqp);

	/* Associate QP <--> CM_ID */
	cm_id->provider_data = qp;
	cm_id->add_ref(cm_id);
	qp->cm_id = cm_id;

	/*
	 * only support the max private_data length
	 */
	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
		err = -EINVAL;
		goto bail0;
	}
	/*
	 * Set the rdma read limits
	 */
	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
	if (err)
		goto bail0;

	/*
	 * Create and send a WR_QP_CONNECT...
	 */
	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
	if (!wr) {
		err = -ENOMEM;
		goto bail0;
	}

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail1;
	}

	c2_wr_set_id(wr, CCWR_QP_CONNECT);
	wr->hdr.context = 0;
	wr->rnic_handle = c2dev->adapter_handle;
	wr->qp_handle = qp->adapter_handle;

	wr->remote_addr = raddr->sin_addr.s_addr;
	wr->remote_port = raddr->sin_port;

	/*
	 * Move any private data from the callers's buf into
	 * the WR.
	 */
	if (iw_param->private_data) {
		wr->private_data_length =
			cpu_to_be32(iw_param->private_data_len);
		memcpy(&wr->private_data[0], iw_param->private_data,
		       iw_param->private_data_len);
	} else
		wr->private_data_length = 0;

	/*
	 * Send WR to adapter.  NOTE: There is no synch reply from
	 * the adapter.
	 */
	err = vq_send_wr(c2dev, (union c2wr *) wr);
	vq_req_free(c2dev, vq_req);

 bail1:
	kfree(wr);
 bail0:
	if (err) {
		/*
		 * If we fail, release reference on QP and
		 * disassociate QP from CM_ID
		 */
		cm_id->provider_data = NULL;
		qp->cm_id = NULL;
		cm_id->rem_ref(cm_id);
	}
	return err;
}