Esempio n. 1
0
int mlx4_destroy_xrc_srq(struct ibv_srq *srq)
{
	struct mlx4_context *mctx = to_mctx(srq->context);
	struct mlx4_srq *msrq = to_msrq(srq);
	struct mlx4_cq *mcq;
	int ret;

	mcq = to_mcq(msrq->verbs_srq.cq);
	mlx4_cq_clean(mcq, 0, msrq);
	pthread_spin_lock(&mcq->lock);
	mlx4_clear_xsrq(&mctx->xsrq_table, msrq->verbs_srq.srq_num);
	pthread_spin_unlock(&mcq->lock);

	ret = ibv_cmd_destroy_srq(srq);
	if (ret) {
		pthread_spin_lock(&mcq->lock);
		mlx4_store_xsrq(&mctx->xsrq_table, msrq->verbs_srq.srq_num, msrq);
		pthread_spin_unlock(&mcq->lock);
		return ret;
	}

	mlx4_free_db(mctx, MLX4_DB_TYPE_RQ, msrq->db);
	mlx4_free_buf(&msrq->buf);
	free(msrq->wrid);
	free(msrq);

	return 0;
}
Esempio n. 2
0
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
	int ret;

	/* We don't support resizing SRQs (yet?) */
	if (attr_mask & IB_SRQ_MAX_WR)
		return -EINVAL;

	if (attr_mask & IB_SRQ_LIMIT) {
		if (attr->srq_limit >= srq->msrq.max)
			return -EINVAL;

		mutex_lock(&srq->mutex);
		ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
		mutex_unlock(&srq->mutex);

		if (ret)
			return ret;
	}

	return 0;
}
Esempio n. 3
0
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
			  struct ib_recv_wr **bad_wr)
{
	struct mlx5_ib_srq *srq = to_msrq(ibsrq);
	struct mlx5_wqe_srq_next_seg *next;
	struct mlx5_wqe_data_seg *scat;
	unsigned long flags;
	int err = 0;
	int nreq;
	int i;

	spin_lock_irqsave(&srq->lock, flags);

	for (nreq = 0; wr; nreq++, wr = wr->next) {
		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
			err = -EINVAL;
			*bad_wr = wr;
			break;
		}

		if (unlikely(srq->head == srq->tail)) {
			err = -ENOMEM;
			*bad_wr = wr;
			break;
		}

		srq->wrid[srq->head] = wr->wr_id;

		next      = get_wqe(srq, srq->head);
		srq->head = be16_to_cpu(next->next_wqe_index);
		scat      = (struct mlx5_wqe_data_seg *)(next + 1);

		for (i = 0; i < wr->num_sge; i++) {
			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
		}

		if (i < srq->msrq.max_avail_gather) {
			scat[i].byte_count = 0;
			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
			scat[i].addr       = 0;
		}
	}

	if (likely(nreq)) {
		srq->wqe_ctr += nreq;

		/* Make sure that descriptors are written before
		 * doorbell record.
		 */
		wmb();

		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
	}

	spin_unlock_irqrestore(&srq->lock, flags);

	return err;
}
Esempio n. 4
0
int mlx4_ib_destroy_srq(struct ib_srq *srq)
{
    struct mlx4_ib_dev *dev = to_mdev(srq->device);
    struct mlx4_ib_srq *msrq = to_msrq(srq);
    struct mlx4_ib_cq *cq;

    mlx4_srq_invalidate(dev->dev, &msrq->msrq);

    if (srq->xrc_cq && !srq->uobject) {
        cq = to_mcq(srq->xrc_cq);
        spin_lock_irq(&cq->lock);
        __mlx4_ib_cq_clean(cq, -1, msrq);
        mlx4_srq_remove(dev->dev, &msrq->msrq);
        spin_unlock_irq(&cq->lock);
    } else
        mlx4_srq_remove(dev->dev, &msrq->msrq);

    mlx4_srq_free(dev->dev, &msrq->msrq);
    mlx4_mtt_cleanup(dev->dev, &msrq->mtt);

    if (srq->uobject) {
        mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
        ib_umem_release(msrq->umem);
    } else {
        kfree(msrq->wrid);
        mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
                      &msrq->buf);
        mlx4_db_free(dev->dev, &msrq->db);
    }

    kfree(msrq);

    return 0;
}
Esempio n. 5
0
int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
	int ret;
	int limit_watermark;

	ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
	if (ret)
		return ret;

	srq_attr->srq_limit = limit_watermark;
	srq_attr->max_wr    = srq->msrq.max - 1;
	srq_attr->max_sge   = srq->msrq.max_gs;

	return 0;
}
Esempio n. 6
0
File: srq.c Progetto: Cai900205/test
int mlx5_ib_destroy_srq(struct ib_srq *srq)
{
	struct mlx5_ib_dev *dev = to_mdev(srq->device);
	struct mlx5_ib_srq *msrq = to_msrq(srq);

	mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);

	if (srq->uobject) {
		mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
		ib_umem_release(msrq->umem);
	} else {
		destroy_srq_kernel(dev, msrq);
	}

	kfree(srq);
	return 0;
}
Esempio n. 7
0
int mlx4_ib_destroy_srq(struct ib_srq *srq)
{
	struct mlx4_ib_dev *dev = to_mdev(srq->device);
	struct mlx4_ib_srq *msrq = to_msrq(srq);

	mlx4_srq_free(dev->dev, &msrq->msrq);
	mlx4_mtt_cleanup(dev->dev, &msrq->mtt);

	if (srq->uobject) {
		mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
		ib_umem_release(msrq->umem);
	} else {
		kfree(msrq->wrid);
		mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
			      &msrq->buf);
		mlx4_db_free(dev->dev, &msrq->db);
	}

	kfree(msrq);

	return 0;
}
Esempio n. 8
0
File: srq.c Progetto: Cai900205/test
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
	struct mlx5_ib_srq *srq = to_msrq(ibsrq);
	int ret;
	struct mlx5_query_srq_mbox_out *out;

	out = kzalloc(sizeof(*out), GFP_KERNEL);
	if (!out)
		return -ENOMEM;

	ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
	if (ret)
		goto out_box;

	srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
	srq_attr->max_wr    = srq->msrq.max - 1;
	srq_attr->max_sge   = srq->msrq.max_gs;

out_box:
	kfree(out);
	return ret;
}
Esempio n. 9
0
File: cq.c Progetto: bond-os/linux
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
			    struct mlx4_ib_qp **cur_qp,
			    struct ib_wc *wc)
{
	struct mlx4_cqe *cqe;
	struct mlx4_qp *mqp;
	struct mlx4_ib_wq *wq;
	struct mlx4_ib_srq *srq;
	int is_send;
	int is_error;
	u32 g_mlpath_rqpn;
	u16 wqe_ctr;

repoll:
	cqe = next_cqe_sw(cq);
	if (!cqe)
		return -EAGAIN;

	++cq->mcq.cons_index;

	/*
	 * Make sure we read CQ entry contents after we've checked the
	 * ownership bit.
	 */
	rmb();

	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
		MLX4_CQE_OPCODE_ERROR;

	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
		     is_send)) {
		printk(KERN_WARNING "Completion for NOP opcode detected!\n");
		return -EINVAL;
	}

	/* Resize CQ in progress */
	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
		if (cq->resize_buf) {
			struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);

			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
			cq->buf      = cq->resize_buf->buf;
			cq->ibcq.cqe = cq->resize_buf->cqe;

			kfree(cq->resize_buf);
			cq->resize_buf = NULL;
		}

		goto repoll;
	}

	if (!*cur_qp ||
	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
		/*
		 * We do not have to take the QP table lock here,
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 */
		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
				       be32_to_cpu(cqe->vlan_my_qpn));
		if (unlikely(!mqp)) {
			printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
			       cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
			return -EINVAL;
		}

		*cur_qp = to_mibqp(mqp);
	}

	wc->qp = &(*cur_qp)->ibqp;

	if (is_send) {
		wq = &(*cur_qp)->sq;
		if (!(*cur_qp)->sq_signal_bits) {
			wqe_ctr = be16_to_cpu(cqe->wqe_index);
			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
		}
		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
		++wq->tail;
	} else if ((*cur_qp)->ibqp.srq) {
		srq = to_msrq((*cur_qp)->ibqp.srq);
		wqe_ctr = be16_to_cpu(cqe->wqe_index);
		wc->wr_id = srq->wrid[wqe_ctr];
		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
	} else {
		wq	  = &(*cur_qp)->rq;
		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
		++wq->tail;
	}

	if (unlikely(is_error)) {
		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
		return 0;
	}

	wc->status = IB_WC_SUCCESS;

	if (is_send) {
		wc->wc_flags = 0;
		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
		case MLX4_OPCODE_RDMA_WRITE_IMM:
			wc->wc_flags |= IB_WC_WITH_IMM;
		case MLX4_OPCODE_RDMA_WRITE:
			wc->opcode    = IB_WC_RDMA_WRITE;
			break;
		case MLX4_OPCODE_SEND_IMM:
			wc->wc_flags |= IB_WC_WITH_IMM;
		case MLX4_OPCODE_SEND:
		case MLX4_OPCODE_SEND_INVAL:
			wc->opcode    = IB_WC_SEND;
			break;
		case MLX4_OPCODE_RDMA_READ:
			wc->opcode    = IB_WC_RDMA_READ;
			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
			break;
		case MLX4_OPCODE_ATOMIC_CS:
			wc->opcode    = IB_WC_COMP_SWAP;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_ATOMIC_FA:
			wc->opcode    = IB_WC_FETCH_ADD;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_MASKED_ATOMIC_CS:
			wc->opcode    = IB_WC_MASKED_COMP_SWAP;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_MASKED_ATOMIC_FA:
			wc->opcode    = IB_WC_MASKED_FETCH_ADD;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_BIND_MW:
			wc->opcode    = IB_WC_BIND_MW;
			break;
		case MLX4_OPCODE_LSO:
			wc->opcode    = IB_WC_LSO;
			break;
		case MLX4_OPCODE_FMR:
			wc->opcode    = IB_WC_FAST_REG_MR;
			break;
		case MLX4_OPCODE_LOCAL_INVAL:
			wc->opcode    = IB_WC_LOCAL_INV;
			break;
		}
	} else {
		wc->byte_len = be32_to_cpu(cqe->byte_cnt);

		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
			wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
			wc->wc_flags	= IB_WC_WITH_IMM;
			wc->ex.imm_data = cqe->immed_rss_invalid;
			break;
		case MLX4_RECV_OPCODE_SEND_INVAL:
			wc->opcode	= IB_WC_RECV;
			wc->wc_flags	= IB_WC_WITH_INVALIDATE;
			wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
			break;
		case MLX4_RECV_OPCODE_SEND:
			wc->opcode   = IB_WC_RECV;
			wc->wc_flags = 0;
			break;
		case MLX4_RECV_OPCODE_SEND_IMM:
			wc->opcode	= IB_WC_RECV;
			wc->wc_flags	= IB_WC_WITH_IMM;
			wc->ex.imm_data = cqe->immed_rss_invalid;
			break;
		}

		wc->slid	   = be16_to_cpu(cqe->rlid);
		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
		wc->csum_ok	   = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
		if (rdma_port_get_link_layer(wc->qp->device,
				(*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
		else
			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
	}
Esempio n. 10
0
static int mlx4_poll_one(struct mlx4_cq *cq,
			 struct mlx4_qp **cur_qp,
			 struct ibv_wc *wc)
{
	struct mlx4_wq *wq;
	struct mlx4_cqe *cqe;
	struct mlx4_srq *srq = NULL;
	uint32_t qpn;
	uint32_t srqn;
	uint32_t g_mlpath_rqpn;
	uint16_t wqe_index;
	int is_error;
	int is_send;

	cqe = next_cqe_sw(cq);
	if (!cqe)
		return CQ_EMPTY;

	++cq->cons_index;

	VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe);

	/*
	 * Make sure we read CQ entry contents after we've checked the
	 * ownership bit.
	 */
	rmb();

	qpn = ntohl(cqe->my_qpn);

	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
		MLX4_CQE_OPCODE_ERROR;

	if (qpn & MLX4_XRC_QPN_BIT && !is_send) {
		srqn = ntohl(cqe->g_mlpath_rqpn) & 0xffffff;
		/*
		 * We do not have to take the XRC SRQ table lock here,
		 * because CQs will be locked while XRC SRQs are removed
		 * from the table.
		 */
		srq = mlx4_find_xrc_srq(to_mctx(cq->ibv_cq.context), srqn);
		if (!srq)
			return CQ_POLL_ERR;
	} else if (!*cur_qp || (qpn & 0xffffff) != (*cur_qp)->ibv_qp.qp_num) {
		/*
		 * We do not have to take the QP table lock here,
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 */
		*cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context),
				       qpn & 0xffffff);
		if (!*cur_qp)
			return CQ_POLL_ERR;
	}

	wc->qp_num = qpn & 0xffffff;

	if (is_send) {
		wq = &(*cur_qp)->sq;
		wqe_index = ntohs(cqe->wqe_index);
		wq->tail += (uint16_t) (wqe_index - (uint16_t) wq->tail);
		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
		++wq->tail;
	} else if (srq) {
		wqe_index = htons(cqe->wqe_index);
		wc->wr_id = srq->wrid[wqe_index];
		mlx4_free_srq_wqe(srq, wqe_index);
	} else if ((*cur_qp)->ibv_qp.srq) {
		srq = to_msrq((*cur_qp)->ibv_qp.srq);
		wqe_index = htons(cqe->wqe_index);
		wc->wr_id = srq->wrid[wqe_index];
		mlx4_free_srq_wqe(srq, wqe_index);
	} else {
		wq = &(*cur_qp)->rq;
		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
		++wq->tail;
	}

	if (is_error) {
		mlx4_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
		return CQ_OK;
	}

	wc->status = IBV_WC_SUCCESS;

	if (is_send) {
		wc->wc_flags = 0;
		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
		case MLX4_OPCODE_RDMA_WRITE_IMM:
			wc->wc_flags |= IBV_WC_WITH_IMM;
		case MLX4_OPCODE_RDMA_WRITE:
			wc->opcode    = IBV_WC_RDMA_WRITE;
			break;
		case MLX4_OPCODE_SEND_IMM:
			wc->wc_flags |= IBV_WC_WITH_IMM;
		case MLX4_OPCODE_SEND:
			wc->opcode    = IBV_WC_SEND;
			break;
		case MLX4_OPCODE_RDMA_READ:
			wc->opcode    = IBV_WC_RDMA_READ;
			wc->byte_len  = ntohl(cqe->byte_cnt);
			break;
		case MLX4_OPCODE_ATOMIC_CS:
			wc->opcode    = IBV_WC_COMP_SWAP;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_ATOMIC_FA:
			wc->opcode    = IBV_WC_FETCH_ADD;
			wc->byte_len  = 8;
			break;
		case MLX4_OPCODE_BIND_MW:
			wc->opcode    = IBV_WC_BIND_MW;
			break;
		default:
			/* assume it's a send completion */
			wc->opcode    = IBV_WC_SEND;
			break;
		}
	} else {
		wc->byte_len = ntohl(cqe->byte_cnt);

		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
			wc->opcode   = IBV_WC_RECV_RDMA_WITH_IMM;
			wc->wc_flags = IBV_WC_WITH_IMM;
			wc->imm_data = cqe->immed_rss_invalid;
			break;
		case MLX4_RECV_OPCODE_SEND:
			wc->opcode   = IBV_WC_RECV;
			wc->wc_flags = 0;
			break;
		case MLX4_RECV_OPCODE_SEND_IMM:
			wc->opcode   = IBV_WC_RECV;
			wc->wc_flags = IBV_WC_WITH_IMM;
			wc->imm_data = cqe->immed_rss_invalid;
			break;
		}

		wc->slid	   = ntohs(cqe->rlid);
		wc->sl		   = cqe->sl >> 4;
		g_mlpath_rqpn	   = ntohl(cqe->g_mlpath_rqpn);
		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IBV_WC_GRH : 0;
		wc->pkey_index     = ntohl(cqe->immed_rss_invalid) & 0x7f;
	}

	return CQ_OK;
}
Esempio n. 11
0
int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq,
			      struct ibv_recv_wr *wr,
			      struct ibv_recv_wr **bad_wr)
{
	struct mthca_srq *srq = to_msrq(ibsrq);
	uint32_t doorbell[2];
	int err = 0;
	int first_ind;
	int ind;
	int next_ind;
	int nreq;
	int i;
	void *wqe;
	void *prev_wqe;

	pthread_spin_lock(&srq->lock);

	first_ind = srq->first_free;

	for (nreq = 0; wr; wr = wr->next) {
		ind	  = srq->first_free;
		wqe       = get_wqe(srq, ind);
		next_ind  = *wqe_to_link(wqe);

		if (next_ind < 0) {
			err = -1;
			*bad_wr = wr;
			break;
		}

		prev_wqe  = srq->last;
		srq->last = wqe;

		((struct mthca_next_seg *) wqe)->ee_nds = 0;
		/* flags field will always remain 0 */

		wqe += sizeof (struct mthca_next_seg);

		if (wr->num_sge > srq->max_gs) {
			err = -1;
			*bad_wr = wr;
			srq->last = prev_wqe;
			break;
		}

		for (i = 0; i < wr->num_sge; ++i) {
			((struct mthca_data_seg *) wqe)->byte_count =
				htonl(wr->sg_list[i].length);
			((struct mthca_data_seg *) wqe)->lkey =
				htonl(wr->sg_list[i].lkey);
			((struct mthca_data_seg *) wqe)->addr =
				htonll(wr->sg_list[i].addr);
			wqe += sizeof (struct mthca_data_seg);
		}

		if (i < srq->max_gs) {
			((struct mthca_data_seg *) wqe)->byte_count = 0;
			((struct mthca_data_seg *) wqe)->lkey = htonl(MTHCA_INVAL_LKEY);
			((struct mthca_data_seg *) wqe)->addr = 0;
		}

		((struct mthca_next_seg *) prev_wqe)->ee_nds =
			htonl(MTHCA_NEXT_DBD);

		srq->wrid[ind]  = wr->wr_id;
		srq->first_free = next_ind;

		if (++nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB) {
			nreq = 0;

			doorbell[0] = htonl(first_ind << srq->wqe_shift);
			doorbell[1] = htonl(srq->srqn << 8);

			/*
			 * Make sure that descriptors are written
			 * before doorbell is rung.
			 */
			wmb();

			mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);

			first_ind = srq->first_free;
		}
	}

	if (nreq) {
		doorbell[0] = htonl(first_ind << srq->wqe_shift);
		doorbell[1] = htonl((srq->srqn << 8) | nreq);

		/*
		 * Make sure that descriptors are written before
		 * doorbell is rung.
		 */
		wmb();

		mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL);
	}

	pthread_spin_unlock(&srq->lock);
	return err;
}
Esempio n. 12
0
int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq,
			      struct ibv_recv_wr *wr,
			      struct ibv_recv_wr **bad_wr)
{
	struct mthca_srq *srq = to_msrq(ibsrq);
	int err = 0;
	int ind;
	int next_ind;
	int nreq;
	int i;
	void *wqe;

	pthread_spin_lock(&srq->lock);

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
		ind	  = srq->first_free;
		wqe       = get_wqe(srq, ind);
		next_ind  = *wqe_to_link(wqe);

		if (next_ind < 0) {
			err = -1;
			*bad_wr = wr;
			break;
		}

		((struct mthca_next_seg *) wqe)->ee_nds = 0;
		/* flags field will always remain 0 */

		wqe += sizeof (struct mthca_next_seg);

		if (wr->num_sge > srq->max_gs) {
			err = -1;
			*bad_wr = wr;
			break;
		}

		for (i = 0; i < wr->num_sge; ++i) {
			((struct mthca_data_seg *) wqe)->byte_count =
				htonl(wr->sg_list[i].length);
			((struct mthca_data_seg *) wqe)->lkey =
				htonl(wr->sg_list[i].lkey);
			((struct mthca_data_seg *) wqe)->addr =
				htonll(wr->sg_list[i].addr);
			wqe += sizeof (struct mthca_data_seg);
		}

		if (i < srq->max_gs) {
			((struct mthca_data_seg *) wqe)->byte_count = 0;
			((struct mthca_data_seg *) wqe)->lkey = htonl(MTHCA_INVAL_LKEY);
			((struct mthca_data_seg *) wqe)->addr = 0;
		}

		srq->wrid[ind]  = wr->wr_id;
		srq->first_free = next_ind;
	}

	if (nreq) {
		srq->counter += nreq;

		/*
		 * Make sure that descriptors are written before
		 * we write doorbell record.
		 */
		wmb();
		*srq->db = htonl(srq->counter);
	}

	pthread_spin_unlock(&srq->lock);
	return err;
}
Esempio n. 13
0
int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
		       struct ibv_recv_wr *wr,
		       struct ibv_recv_wr **bad_wr)
{
	struct mlx4_srq *srq = to_msrq(ibsrq);
	struct mlx4_wqe_srq_next_seg *next;
	struct mlx4_wqe_data_seg *scat;
	int err = 0;
	int nreq;
	int i;

	pthread_spin_lock(&srq->lock);

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
		if (wr->num_sge > srq->max_gs) {
			err = -1;
			*bad_wr = wr;
			break;
		}

		if (srq->head == srq->tail) {
			/* SRQ is full*/
			err = -1;
			*bad_wr = wr;
			break;
		}

		srq->wrid[srq->head] = wr->wr_id;

		next      = get_wqe(srq, srq->head);
		srq->head = be16toh(next->next_wqe_index);
		scat      = (struct mlx4_wqe_data_seg *) (next + 1);

		for (i = 0; i < wr->num_sge; ++i) {
			scat[i].byte_count = htobe32(wr->sg_list[i].length);
			scat[i].lkey       = htobe32(wr->sg_list[i].lkey);
			scat[i].addr       = htobe64(wr->sg_list[i].addr);
		}

		if (i < srq->max_gs) {
			scat[i].byte_count = 0;
			scat[i].lkey       = htobe32(MLX4_INVALID_LKEY);
			scat[i].addr       = 0;
		}
	}

	if (nreq) {
		srq->counter += nreq;

		/*
		 * Make sure that descriptors are written before
		 * we write doorbell record.
		 */
		udma_to_device_barrier();

		*srq->db = htobe32(srq->counter);
	}

	pthread_spin_unlock(&srq->lock);

	return err;
}
Esempio n. 14
0
File: srq.c Progetto: mhei/linux
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
                          struct ib_recv_wr **bad_wr)
{
    struct mlx4_ib_srq *srq = to_msrq(ibsrq);
    struct mlx4_wqe_srq_next_seg *next;
    struct mlx4_wqe_data_seg *scat;
    unsigned long flags;
    int err = 0;
    int nreq;
    int i;
    struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);

    spin_lock_irqsave(&srq->lock, flags);
    if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
        err = -EIO;
        *bad_wr = wr;
        nreq = 0;
        goto out;
    }

    for (nreq = 0; wr; ++nreq, wr = wr->next) {
        if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
            err = -EINVAL;
            *bad_wr = wr;
            break;
        }

        if (unlikely(srq->head == srq->tail)) {
            err = -ENOMEM;
            *bad_wr = wr;
            break;
        }

        srq->wrid[srq->head] = wr->wr_id;

        next      = get_wqe(srq, srq->head);
        srq->head = be16_to_cpu(next->next_wqe_index);
        scat      = (struct mlx4_wqe_data_seg *) (next + 1);

        for (i = 0; i < wr->num_sge; ++i) {
            scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
            scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
            scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
        }

        if (i < srq->msrq.max_gs) {
            scat[i].byte_count = 0;
            scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
            scat[i].addr       = 0;
        }
    }

    if (likely(nreq)) {
        srq->wqe_ctr += nreq;

        /*
         * Make sure that descriptors are written before
         * doorbell record.
         */
        wmb();

        *srq->db.db = cpu_to_be32(srq->wqe_ctr);
    }
out:

    spin_unlock_irqrestore(&srq->lock, flags);

    return err;
}