int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, int entries, struct mlx4_mtt *mtt) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_cq_context *cq_context; u64 mtt_addr; int err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); cq_context = mailbox->buf; memset(cq_context, 0, sizeof *cq_context); cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); cq_context->log_page_size = mtt->page_shift - 12; mtt_addr = mlx4_mtt_addr(dev, mtt); cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); mlx4_free_cmd_mailbox(dev, mailbox); return err; }
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_srq_context *srq_context; u64 mtt_addr; int err; err = mlx4_srq_alloc_icm(dev, &srq->srqn); if (err) return err; spin_lock_irq(&srq_table->lock); err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); spin_unlock_irq(&srq_table->lock); if (err) goto err_icm; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } srq_context = mailbox->buf; memset(srq_context, 0, sizeof *srq_context); srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; srq_context->xrcd = cpu_to_be16(xrcd); srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); srq_context->mtt_base_addr_h = mtt_addr >> 32; srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); srq_context->pd = cpu_to_be32(pdn); srq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; atomic_set(&srq->refcount, 1); init_completion(&srq->free); return 0; err_radix: spin_lock_irq(&srq_table->lock); radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); err_icm: mlx4_srq_free_icm(dev, srq->srqn); return err; }
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_cq_context *cq_context; u64 mtt_addr; int err; if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) return -EINVAL; cq->vector = vector; err = mlx4_cq_alloc_icm(dev, &cq->cqn); if (err) return err; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) goto err_icm; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } cq_context = mailbox->buf; memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(!!collapsed << 18); if (timestamp_en) cq_context->flags |= cpu_to_be32(1 << 19); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; cq->eqn = priv->eq_table.eq[cq->vector].eqn; cq->irq = priv->eq_table.eq[cq->vector].irq; return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); return err; }
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, int collapsed) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_cq_context *cq_context; u64 mtt_addr; int err; cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); if (cq->cqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &cq_table->table, cq->cqn); if (err) goto err_out; err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn); if (err) goto err_put; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } cq_context = mailbox->buf; memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(!!collapsed << 18); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; atomic_set(&cq->refcount, 1); init_completion(&cq->free); return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); err_cmpt_put: mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn); err_put: mlx4_table_put(dev, &cq_table->table, cq->cqn); err_out: mlx4_bitmap_free(&cq_table->bitmap, cq->cqn); return err; }
/* CQ allocation and modification test */ int cq_test(struct mlx4_dev *dev, char* log) { struct mlx4_cq *cq; struct mlx4_mtt *mtt; struct mlx4_uar *uar; struct mlx4_db *db; int err; int expected_rc = 0; int collapsed = 0; int timestamp_en = 0; int npages = 1; int page_shift = get_order(dev->caps.cqe_size) + PAGE_SHIFT; int ret_val = FAIL; int vector = 0; int nent = 2 * MLX4_NUM_TUNNEL_BUFS; u16 count = 88; u16 period = 0; u64 mtt_addr; uar = malloc(sizeof *uar ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(uar, goto without_free, log); mtt = malloc(sizeof *mtt ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(mtt, goto free_uar, log); cq = malloc(sizeof *cq ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(cq, goto free_mtt, log); db = malloc(sizeof *db ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(db, goto free_cq, log); err = mlx4_mtt_init(dev, npages, page_shift, mtt); VL_CHECK_RC(err, expected_rc, goto free_db , log, "failed to initialize MTT"); uprintf("MTT was initialized successfuly\n"); VL_CHECK_INT_VALUE(mtt->order, 0, goto cleanup_mtt, log, "mtt->order is wrong"); VL_CHECK_INT_VALUE(mtt->page_shift, 12, goto cleanup_mtt, log, "mtt->page_shift is wrong"); mtt_addr = mlx4_mtt_addr(dev, mtt); uprintf("MTT address is: %lu\n", mtt_addr); err = mlx4_uar_alloc(dev, uar); VL_CHECK_RC(err, expected_rc, goto cleanup_mtt , log, "failed to allocate UAR"); uprintf("UAR was allocated successfuly\n"); err = mlx4_db_alloc(dev, db, 1); VL_CHECK_RC(err, expected_rc, goto dealloc_uar , log, "failed to allocate DB"); uprintf("DB was allocated successfuly\n"); err = mlx4_cq_alloc(dev, nent, mtt, uar, db->dma, cq, vector, collapsed, timestamp_en); VL_CHECK_RC(err, expected_rc, goto dealloc_db , log, "failed to allocate CQ"); uprintf("CQ allocated successfuly\n"); VL_CHECK_INT_VALUE(cq->cons_index, 0, goto dealloc_cq, log, "cq->cons_index is wrong"); VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn is wrong"); uprintf("cq->cqn = %d, cq->uar->pfn = %lu, cq->eqn = %d, cq->irq = %u\n", cq->cqn, cq->uar->pfn, cq->eqn, cq->irq ); VL_CHECK_UNSIGNED_INT_VALUE(cq->cons_index, (unsigned int)0, goto dealloc_cq, log, "cq->cons_index != 0"); VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn != 1"); err = mlx4_cq_modify(dev, cq, count, period); VL_CHECK_RC(err, expected_rc, goto dealloc_cq , log, "failed to modify CQ"); uprintf("CQ was modifyed successfuly\n"); ret_val = SUCCESS; dealloc_cq: mlx4_cq_free(dev, cq); uprintf("CQ was freed successfuly\n"); dealloc_db: mlx4_db_free(dev, db); uprintf( "DB free was successful\n"); dealloc_uar: mlx4_uar_free(dev,uar); uprintf("UAR free was successful\n"); cleanup_mtt: mlx4_mtt_cleanup(dev, mtt); uprintf( "mtt clean-up was successful\n"); free_db: free(db, M_CQ_VAL); free_cq: free(cq, M_CQ_VAL); free_mtt: free(mtt, M_CQ_VAL); free_uar: free(uar, M_CQ_VAL); without_free: return ret_val; }
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_cq_context *cq_context; u64 mtt_addr; int err; cq->vector = (vector == MLX4_LEAST_ATTACHED_VECTOR) ? mlx4_find_least_loaded_vector(priv) : vector; if (cq->vector > dev->caps.num_comp_vectors + dev->caps.poolsz) return -EINVAL; err = mlx4_cq_alloc_icm(dev, &cq->cqn); if (err) return err; spin_lock_irq(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); spin_unlock_irq(&cq_table->lock); if (err) goto err_icm; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } cq_context = mailbox->buf; memset(cq_context, 0, sizeof *cq_context); cq_context->flags = cpu_to_be32(!!collapsed << 18); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->comp_eqn = priv->eq_table.eq[cq->vector].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); cq_context->mtt_base_addr_h = mtt_addr >> 32; cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; priv->eq_table.eq[cq->vector].load++; cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; atomic_set(&cq->refcount, 1); init_completion(&cq->free); return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); spin_unlock_irq(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); return err; }
/* static void mlx4_unmap_uar(struct mlx4_priv *priv) { struct mlx4_priv *priv = mlx4_priv(&priv->dev); int i; for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i) if (priv->eq_table.uar_map[i]) { iounmap(priv->eq_table.uar_map[i]); priv->eq_table.uar_map[i] = NULL; } } */ static int mlx4_create_eq(struct mlx4_priv *priv, int nent, u8 intr, struct mlx4_eq *eq) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_eq_context *eq_context; int npages; u64 *dma_list = NULL; genpaddr_t t = 0; u64 mtt_addr; int err = -ENOMEM; int i; eq->priv = priv; eq->nent = roundup_pow_of_two(max(nent, 2)); /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes*/ npages = PAGE_ALIGN( eq->nent * (MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor)) / BASE_PAGE_SIZE; eq->page_list = malloc(npages * sizeof *eq->page_list); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; dma_list = malloc(npages * sizeof *dma_list); if (!dma_list) goto err_out_free; mailbox = mlx4_alloc_cmd_mailbox(); if (IS_ERR(mailbox)) goto err_out_free; eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t); if (!eq->page_list[i].buf) goto err_out_free_pages; dma_list[i] = t; eq->page_list[i].map = t; memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE); } eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); if (eq->eqn == -1) goto err_out_free_pages; eq->doorbell = mlx4_get_eq_uar(priv, eq); if (!eq->doorbell) { err = -ENOMEM; goto err_out_free_eq; } err = mlx4_mtt_init(&priv->dev, npages, PAGE_SHIFT, &eq->mtt); if (err) goto err_out_free_eq; err = mlx4_write_mtt(&priv->dev, &eq->mtt, 0, npages, dma_list); if (err) goto err_out_free_mtt; memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | MLX4_EQ_STATE_ARMED); eq_context->log_eq_size = ilog2(eq->nent); eq_context->intr = intr; eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; /*printf("mtt_addr: %lx\n", mlx4_mtt_addr(&priv->dev, &eq->mtt)); printf("off: %d\n", eq->mtt.offset); printf("size: %d\n", priv->dev.caps.mtt_entry_sz);*/ mtt_addr = mlx4_mtt_addr(&priv->dev, &eq->mtt); eq_context->mtt_base_addr_h = mtt_addr >> 32; eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); err = mlx4_SW2HW_EQ(priv, mailbox, eq->eqn); if (err) { MLX4_DEBUG("SW2HW_EQ failed (%d)\n", err); goto err_out_free_mtt; } free(dma_list); mlx4_free_cmd_mailbox(mailbox); eq->cons_index = 0; return err; /*TODO*/ err_out_free_mtt: /*mlx4_mtt_cleanup(&priv->dev, &eq->mtt);*/ err_out_free_eq: /*mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);*/ err_out_free_pages: /*for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) dma_free(&priv->dev.pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map);*/ mlx4_free_cmd_mailbox(mailbox); err_out_free: free(eq->page_list); free(dma_list); err_out: return err; }