Exemple #1
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
    struct mlx4_en_dev *mdev = priv->mdev;
    int err = 0;
    char name[25];

    cq->dev = mdev->pndev[priv->port];
    cq->mcq.set_ci_db  = cq->wqres.db.db;
    cq->mcq.arm_db     = cq->wqres.db.db + 1;
    *cq->mcq.set_ci_db = 0;
    *cq->mcq.arm_db    = 0;
    memset(cq->buf, 0, cq->buf_size);

    if (cq->is_tx == RX) {
        if (mdev->dev->caps.comp_pool) {
            if (!cq->vector) {
                sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
                if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
                    cq->vector = (cq->ring + 1 + priv->port) %
                                 mdev->dev->caps.num_comp_vectors;
                    mlx4_warn(mdev, "Failed Assigning an EQ to "
                              "%s_rx-%d ,Falling back to legacy EQ's\n",
                              priv->dev->name, cq->ring);
                }
            }
        } else {
            cq->vector = (cq->ring + 1 + priv->port) %
                         mdev->dev->caps.num_comp_vectors;
        }
    } else {
        if (!cq->vector || !mdev->dev->caps.comp_pool) {
            /*Fallback to legacy pool in case of error*/
            cq->vector   = 0;
        }
    }

    if (!cq->is_tx)
        cq->size = priv->rx_ring[cq->ring].actual_size;

    err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
                        cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
    if (err)
        return err;

    cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
    cq->mcq.event = mlx4_en_cq_event;

    if (cq->is_tx) {
        init_timer(&cq->timer);
        cq->timer.function = mlx4_en_poll_tx_cq;
        cq->timer.data = (unsigned long) cq;
    } else {
        netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
        napi_enable(&cq->napi);
    }

    return 0;
}
Exemple #2
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring].actual_size;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
			    cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
	if (err)
		return err;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		init_timer(&cq->timer);
		cq->timer.function = mlx4_en_poll_tx_cq;
		cq->timer.data = (unsigned long) cq;
	} else {
		char name[IFNAMSIZ];

		snprintf(name, IFNAMSIZ, "mlx4_en-%d-%d", priv->port, cq->ring);
		cq->poll_dev = alloc_netdev(0, name, ether_setup);
		if (!cq->poll_dev)
			return -ENOMEM;

		cq->poll_dev->priv = cq;
		cq->poll_dev->weight = 64;
		cq->poll_dev->poll = mlx4_en_poll_rx_cq;
		set_bit(__LINK_STATE_START, &cq->poll_dev->state);
	}

	return 0;
}
Exemple #3
0
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
				struct ib_ucontext *context,
				struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct mlx4_ib_cq *cq;
	struct mlx4_uar *uar;
	int err;

	if (entries < 1 || entries > dev->dev->caps.max_cqes)
		return ERR_PTR(-EINVAL);

	cq = kmalloc(sizeof *cq, GFP_KERNEL);
	if (!cq)
		return ERR_PTR(-ENOMEM);

	entries      = roundup_pow_of_two(entries + 1);
	cq->ibcq.cqe = entries - 1;
	mutex_init(&cq->resize_mutex);
	spin_lock_init(&cq->lock);
	cq->resize_buf = NULL;
	cq->resize_umem = NULL;

	if (context) {
		struct mlx4_ib_create_cq ucmd;

		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
			err = -EFAULT;
			goto err_cq;
		}

		err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
					  ucmd.buf_addr, entries);
		if (err)
			goto err_cq;

		err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
					  &cq->db);
		if (err)
			goto err_mtt;

		uar = &to_mucontext(context)->uar;
	} else {
		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
		if (err)
			goto err_cq;

		cq->mcq.set_ci_db  = cq->db.db;
		cq->mcq.arm_db     = cq->db.db + 1;
		*cq->mcq.set_ci_db = 0;
		*cq->mcq.arm_db    = 0;

		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
		if (err)
			goto err_db;

		uar = &dev->priv_uar;
	}

	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
			    cq->db.dma, &cq->mcq, vector, 0);
	if (err)
		goto err_dbmap;

	cq->mcq.comp  = mlx4_ib_cq_comp;
	cq->mcq.event = mlx4_ib_cq_event;

	if (context)
		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
			err = -EFAULT;
			goto err_dbmap;
		}

	return &cq->ibcq;

err_dbmap:
	if (context)
		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);

err_mtt:
	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);

	if (context)
		ib_umem_release(cq->umem);
	else
		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);

err_db:
	if (!context)
		mlx4_db_free(dev->dev, &cq->db);

err_cq:
	kfree(cq);

	return ERR_PTR(err);
}
Exemple #4
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	bool assigned_eq = false;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
					     cq->vector)) {
			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);

			err = mlx4_assign_eq(mdev->dev, priv->port,
					     MLX4_EQ_ID_TO_UUID(MLX4_EQ_ID_EN,
								priv->port,
								cq_idx),
					     mlx4_en_cq_eq_cb,
					     &priv->rx_cq[cq_idx],
					     &cq->vector);
			if (err) {
				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
					 name);
				goto free_eq;
			}

			assigned_eq = true;
		}

		/* Set IRQ for specific name (per ring) */
		err = mlx4_rename_eq(mdev->dev, priv->port, cq->vector,
				     MLX4_EN_EQ_NAME_PRIORITY, "%s-%d",
				     priv->dev->name, cq->ring);

		if (err) {
			mlx4_warn(mdev, "Failed to rename EQ, continuing with default name\n");
			err = 0;
		}

#if defined(HAVE_IRQ_DESC_GET_IRQ_DATA) && defined(HAVE_IRQ_TO_DESC_EXPORTED)
		cq->irq_desc =
			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
						    cq->vector));
#endif
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
	if (err)
		goto free_eq;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
#ifdef HAVE_NAPI_HASH_ADD
		napi_hash_add(&cq->napi);
#endif
	}

	napi_enable(&cq->napi);

	return 0;

free_eq:
	if (assigned_eq)
		mlx4_release_eq(mdev->dev, MLX4_EQ_ID_TO_UUID(
					MLX4_EQ_ID_EN, priv->port, cq_idx),
				cq->vector);
	cq->vector = mdev->dev->caps.num_comp_vectors;
	return err;
}
Exemple #5
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	int timestamp_en = 0;
	bool assigned_eq = false;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
					     cq->vector)) {
			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);

			err = mlx4_assign_eq(mdev->dev, priv->port,
					     &cq->vector);
			if (err) {
				mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
					 cq->vector);
				goto free_eq;
			}

			assigned_eq = true;
		}

		cq->irq_desc =
			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
						    cq->vector));
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		goto free_eq;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx)
		netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
				  NAPI_POLL_WEIGHT);
	else
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);

	napi_enable(&cq->napi);

	return 0;

free_eq:
	if (assigned_eq)
		mlx4_release_eq(mdev->dev, cq->vector);
	cq->vector = mdev->dev->caps.num_comp_vectors;
	return err;
}
Exemple #6
0
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
				const struct ib_cq_init_attr *attr,
				struct ib_udata *udata)
{
	int entries = attr->cqe;
	int vector = attr->comp_vector;
	struct mlx4_ib_dev *dev = to_mdev(ibdev);
	struct mlx4_ib_cq *cq;
	struct mlx4_uar *uar;
	void *buf_addr;
	int err;
	struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
		udata, struct mlx4_ib_ucontext, ibucontext);

	if (entries < 1 || entries > dev->dev->caps.max_cqes)
		return ERR_PTR(-EINVAL);

	if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
		return ERR_PTR(-EINVAL);

	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq)
		return ERR_PTR(-ENOMEM);

	entries      = roundup_pow_of_two(entries + 1);
	cq->ibcq.cqe = entries - 1;
	mutex_init(&cq->resize_mutex);
	spin_lock_init(&cq->lock);
	cq->resize_buf = NULL;
	cq->resize_umem = NULL;
	cq->create_flags = attr->flags;
	INIT_LIST_HEAD(&cq->send_qp_list);
	INIT_LIST_HEAD(&cq->recv_qp_list);

	if (udata) {
		struct mlx4_ib_create_cq ucmd;

		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
			err = -EFAULT;
			goto err_cq;
		}

		buf_addr = (void *)(unsigned long)ucmd.buf_addr;
		err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
					  ucmd.buf_addr, entries);
		if (err)
			goto err_cq;

		err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
		if (err)
			goto err_mtt;

		uar = &context->uar;
		cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
	} else {
		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
		if (err)
			goto err_cq;

		cq->mcq.set_ci_db  = cq->db.db;
		cq->mcq.arm_db     = cq->db.db + 1;
		*cq->mcq.set_ci_db = 0;
		*cq->mcq.arm_db    = 0;

		err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
		if (err)
			goto err_db;

		buf_addr = &cq->buf.buf;

		uar = &dev->priv_uar;
		cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
	}

	if (dev->eq_table)
		vector = dev->eq_table[vector % ibdev->num_comp_vectors];

	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
			    &cq->mcq, vector, 0,
			    !!(cq->create_flags &
			       IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
			    buf_addr, !!udata);
	if (err)
		goto err_dbmap;

	if (udata)
		cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
	else
		cq->mcq.comp = mlx4_ib_cq_comp;
	cq->mcq.event = mlx4_ib_cq_event;

	if (udata)
		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
			err = -EFAULT;
			goto err_cq_free;
		}

	return &cq->ibcq;

err_cq_free:
	mlx4_cq_free(dev->dev, &cq->mcq);

err_dbmap:
	if (udata)
		mlx4_ib_db_unmap_user(context, &cq->db);

err_mtt:
	mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);

	if (udata)
		ib_umem_release(cq->umem);
	else
		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);

err_db:
	if (!udata)
		mlx4_db_free(dev->dev, &cq->db);

err_cq:
	kfree(cq);

	return ERR_PTR(err);
}
Exemple #7
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	struct cpu_rmap *rmap =
#ifdef CONFIG_RFS_ACCEL
		priv->dev->rx_cpu_rmap;
#else
		NULL;
#endif

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (mdev->dev->caps.comp_pool) {
			if (!cq->vector) {
				sprintf(name, "%s-%d", priv->dev->name,
					cq->ring);
				/* Set IRQ for specific name (per ring) */
				if (mlx4_assign_eq(mdev->dev, name, rmap,
						   &cq->vector)) {
					cq->vector = (cq->ring + 1 + priv->port)
					    % mdev->dev->caps.num_comp_vectors;
					mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
						  name);
				}

				cq->irq_desc =
					irq_to_desc(mlx4_eq_get_irq(mdev->dev,
								    cq->vector));
			}
		} else {
			cq->vector = (cq->ring + 1 + priv->port) %
				mdev->dev->caps.num_comp_vectors;
		}
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		return err;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];

		err = irq_set_affinity_hint(cq->mcq.irq,
					    ring->affinity_mask);
		if (err)
			mlx4_warn(mdev, "Failed setting affinity hint\n");

		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
		napi_hash_add(&cq->napi);
	}

	napi_enable(&cq->napi);

	return 0;
}
Exemple #8
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (mdev->dev->caps.comp_pool) {
			if (!cq->vector) {
				sprintf(name, "%s-%d", if_name(priv->dev),
					cq->ring);
				/* Set IRQ for specific name (per ring) */
				if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
					cq->vector = (cq->ring + 1 + priv->port)
					    % mdev->dev->caps.num_comp_vectors;
					mlx4_warn(mdev, "Failed Assigning an EQ to "
						  "%s ,Falling back to legacy EQ's\n",
						  name);
				}
			}
		} else {
			cq->vector = (cq->ring + 1 + priv->port) %
				mdev->dev->caps.num_comp_vectors;
		}
	} else {
		struct mlx4_en_cq *rx_cq;
		/*
		 * For TX we use the same irq per
		 * ring we assigned for the RX
		 */
		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;
	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		return err;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

        if (cq->is_tx) {
                init_timer(&cq->timer);
                cq->timer.function = mlx4_en_poll_tx_cq;
                cq->timer.data = (unsigned long) cq;
        }


	return 0;
}
Exemple #9
0
/* CQ allocation and modification test  */
int cq_test(struct mlx4_dev *dev, char* log) {

    struct mlx4_cq *cq;
    struct mlx4_mtt *mtt;
    struct mlx4_uar *uar;
    struct mlx4_db *db;

    int err;
    int expected_rc 		= 0;
    int collapsed 			= 0;
    int timestamp_en 		= 0;
    int npages 			= 1;
    int page_shift			= get_order(dev->caps.cqe_size) + PAGE_SHIFT;
    int ret_val 			= FAIL;
    int vector 			= 0;
    int nent 			= 2 * MLX4_NUM_TUNNEL_BUFS;

    u16 count 			= 88;
    u16 period 			= 0;
    u64 mtt_addr;

    uar = malloc(sizeof *uar ,M_CQ_VAL, M_WAITOK );
    VL_CHECK_MALLOC(uar, goto without_free, log);

    mtt = malloc(sizeof *mtt ,M_CQ_VAL, M_WAITOK );
    VL_CHECK_MALLOC(mtt, goto free_uar, log);

    cq = malloc(sizeof *cq ,M_CQ_VAL, M_WAITOK );
    VL_CHECK_MALLOC(cq, goto free_mtt, log);

    db = malloc(sizeof *db ,M_CQ_VAL, M_WAITOK );
    VL_CHECK_MALLOC(db, goto free_cq, log);

    err = mlx4_mtt_init(dev, npages, page_shift, mtt);
    VL_CHECK_RC(err, expected_rc, goto free_db , log, "failed to initialize MTT");
    uprintf("MTT was initialized successfuly\n");
    VL_CHECK_INT_VALUE(mtt->order, 0, goto cleanup_mtt, log, "mtt->order is wrong");
    VL_CHECK_INT_VALUE(mtt->page_shift, 12, goto cleanup_mtt, log, "mtt->page_shift is wrong");
    mtt_addr = mlx4_mtt_addr(dev, mtt);
    uprintf("MTT address is: %lu\n", mtt_addr);

    err = mlx4_uar_alloc(dev, uar);
    VL_CHECK_RC(err, expected_rc, goto cleanup_mtt , log, "failed to allocate UAR");
    uprintf("UAR was allocated successfuly\n");

    err = mlx4_db_alloc(dev, db, 1);
    VL_CHECK_RC(err, expected_rc, goto dealloc_uar , log, "failed to allocate DB");
    uprintf("DB was allocated successfuly\n");

    err = mlx4_cq_alloc(dev, nent, mtt, uar, db->dma, cq, vector, collapsed, timestamp_en);
    VL_CHECK_RC(err, expected_rc, goto dealloc_db , log, "failed to allocate CQ");
    uprintf("CQ allocated successfuly\n");

    VL_CHECK_INT_VALUE(cq->cons_index, 0, goto dealloc_cq, log, "cq->cons_index is wrong");
    VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn is wrong");
    uprintf("cq->cqn = %d, cq->uar->pfn = %lu, cq->eqn = %d, cq->irq = %u\n", cq->cqn, cq->uar->pfn, cq->eqn, cq->irq );
    VL_CHECK_UNSIGNED_INT_VALUE(cq->cons_index, (unsigned int)0, goto dealloc_cq, log, "cq->cons_index != 0");
    VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn != 1");

    err = mlx4_cq_modify(dev, cq, count, period);
    VL_CHECK_RC(err, expected_rc, goto dealloc_cq , log, "failed to modify CQ");
    uprintf("CQ was modifyed successfuly\n");

    ret_val = SUCCESS;

dealloc_cq:
    mlx4_cq_free(dev, cq);
    uprintf("CQ was freed successfuly\n");

dealloc_db:
    mlx4_db_free(dev, db);
    uprintf( "DB free was successful\n");

dealloc_uar:
    mlx4_uar_free(dev,uar);
    uprintf("UAR free was successful\n");

cleanup_mtt:
    mlx4_mtt_cleanup(dev, mtt);
    uprintf( "mtt clean-up was successful\n");

free_db:
    free(db, M_CQ_VAL);

free_cq:
    free(cq, M_CQ_VAL);

free_mtt:
    free(mtt, M_CQ_VAL);

free_uar:
    free(uar, M_CQ_VAL);

without_free:
    return ret_val;
}