void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; mlx4_table_put(dev, &srq_table->cmpt_table, srqn); mlx4_table_put(dev, &srq_table->table, srqn); mlx4_bitmap_free(&srq_table->bitmap, srqn); }
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; mlx4_table_put(dev, &cq_table->cmpt_table, cqn); mlx4_table_put(dev, &cq_table->table, cqn); mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); }
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; int err; *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); if (*cqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &cq_table->table, *cqn); if (err) goto err_out; err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); if (err) goto err_put; return 0; err_put: mlx4_table_put(dev, &cq_table->table, *cqn); err_out: mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); return err; }
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); if (*srqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &srq_table->table, *srqn); if (err) goto err_out; err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); if (err) goto err_put; return 0; err_put: mlx4_table_put(dev, &srq_table->table, *srqn); err_out: mlx4_bitmap_free(&srq_table->bitmap, *srqn); return err; }
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); mlx4_table_put(dev, &srq_table->table, srq->srqn); mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); }
void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; u64 in_param; int err; if (mlx4_is_slave(dev)) { *((u32 *) &in_param) = srqn; *(((u32 *) &in_param) + 1) = 0; err = mlx4_cmd(dev, in_param, RES_SRQ, ICM_RESERVE_AND_ALLOC, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A); if (err) mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); } else { mlx4_table_put(dev, &srq_table->cmpt_table, srqn); mlx4_table_put(dev, &srq_table->table, srqn); mlx4_bitmap_free(&srq_table->bitmap, srqn); } }
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); if (err) mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); spin_lock_irq(&srq_table->lock); radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); mlx4_table_put(dev, &srq_table->table, srq->srqn); mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); }
int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; u64 out_param; int err; if (mlx4_is_slave(dev)) { err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, ICM_RESERVE_AND_ALLOC, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A); if (err) { *cqn = -1; return err; } else { *cqn = out_param; return 0; } } *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); if (*cqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &cq_table->table, *cqn); if (err) goto err_out; err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); if (err) goto err_put; return 0; err_put: mlx4_table_put(dev, &cq_table->table, *cqn); err_out: mlx4_bitmap_free(&cq_table->bitmap, *cqn); return err; }
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; int err; err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); mlx4_table_put(dev, &cq_table->table, cq->cqn); mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); }
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, int collapsed) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_cq_context *cq_context; u64 mtt_addr; int err; cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); if (cq->cqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &cq_table->table, cq->cqn); if (err) goto err_out; err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); if (err) goto err_put; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } cq_context = mailbox->buf; memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(!!collapsed << 18); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; atomic_set(&cq->refcount, 1); init_completion(&cq->free); return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); err_cmpt_put: mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); err_put: mlx4_table_put(dev, &cq_table->table, cq->cqn); err_out: mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); return err; }
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_srq_context *srq_context; u64 mtt_addr; int err; srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); if (srq->srqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &srq_table->table, srq->srqn); if (err) goto err_out; err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); if (err) goto err_put; spin_lock_irq(&srq_table->lock); err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); spin_unlock_irq(&srq_table->lock); if (err) goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } srq_context = mailbox->buf; memset(srq_context, 0, sizeof *srq_context); srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); srq_context->mtt_base_addr_h = mtt_addr >> 32; srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); srq_context->pd = cpu_to_be32(pdn); srq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; atomic_set(&srq->refcount, 1); init_completion(&srq->free); return 0; err_radix: spin_lock_irq(&srq_table->lock); radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); err_cmpt_put: mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); err_put: mlx4_table_put(dev, &srq_table->table, srq->srqn); err_out: mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); return err; }