/* Like bitmap_init, but doesn't require 'num' to be a power of 2 or * a non-trivial mask */ int mlx4_bitmap_init_no_mask(struct mlx4_bitmap *bitmap, u32 num, u32 reserved_bot, u32 reserved_top) { u32 num_rounded = roundup_pow_of_two(num); return mlx4_bitmap_init(bitmap, num_rounded, num_rounded - 1, reserved_bot, num_rounded - num + reserved_top); }
int mlx4_init_xrcd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); }
int mlx4_init_pd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, (1 << 24) - 1, dev->caps.reserved_pds, 0); }
int mlx4_init_uar_table(struct mlx4_dev *dev) { if (dev->caps.num_uars <= 128) { mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", dev->caps.num_uars); mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); return -ENODEV; } return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, dev->caps.num_uars, dev->caps.num_uars - 1, max(128, dev->caps.reserved_uars), 0); }
int mlx4_init_cq_table(struct mlx4_dev *dev) { struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; int err; spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, dev->caps.num_cqs - 1, dev->caps.reserved_cqs); if (err) return err; return 0; }
int mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); if (err) return err; return 0; }
struct mlx4_hugetlb_mem *mxl4_hugetlb_mem_alloc(size_t size) { struct mlx4_hugetlb_mem *hmem; size_t shm_len; hmem = malloc(sizeof(*hmem)); if (!hmem) return NULL; shm_len = (size > MLX4_SHM_LENGTH) ? align(size, MLX4_SHM_LENGTH) : MLX4_SHM_LENGTH; hmem->shmid = shmget(IPC_PRIVATE, shm_len, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W); if (hmem->shmid < 0) { if (mlx4_trace) perror("shmget"); free(hmem); return NULL; } hmem->shmaddr = shmat(hmem->shmid, MLX4_SHM_ADDR, MLX4_SHMAT_FLAGS); if (hmem->shmaddr == (char *)-1) { if (mlx4_trace) perror("Shared memory attach failure"); shmctl(hmem->shmid, IPC_RMID, NULL); free(hmem); return NULL; } if (mlx4_bitmap_init(&hmem->bitmap, shm_len/MLX4_Q_CHUNK_SIZE, shm_len/MLX4_Q_CHUNK_SIZE - 1)) { if (mlx4_trace) perror("mlx4_bitmap_init"); mlx4_hugetlb_mem_free(hmem); return NULL; } /* Marked to destroy when process detaches from shmget segment */ shmctl(hmem->shmid, IPC_RMID, NULL); return hmem; }
/* void mlx4_cleanup_pd_table(struct mlx4_dev *dev) { mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); } */ int mlx4_init_xrcd_table(struct mlx4_priv *priv) { return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), (1 << 16) - 1, priv->dev.caps.reserved_xrcds + 1, 0); }
/* EXPORT_SYMBOL_GPL(mlx4_pd_alloc); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) { mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR); } EXPORT_SYMBOL_GPL(mlx4_pd_free); int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) { struct mlx4_dev *dev = mlx4_priv(dev); *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); if (*xrcdn == -1) return -ENOMEM; return 0; } int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) { u64 out_param; int err; if (mlx4_is_mfunc(dev)) { err = mlx4_cmd_imm(dev, 0, &out_param, RES_XRCD, RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) return err; *xrcdn = get_param_l(&out_param); return 0; } return __mlx4_xrcd_alloc(dev, xrcdn); } EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR); } void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, xrcdn); err = mlx4_cmd(dev, in_param, RES_XRCD, RES_OP_RESERVE, MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn); } else __mlx4_xrcd_free(dev, xrcdn); } EXPORT_SYMBOL_GPL(mlx4_xrcd_free); */ int mlx4_init_pd_table(struct mlx4_priv *priv) { return mlx4_bitmap_init(&priv->pd_bitmap, priv->dev.caps.num_pds, (1 << NOT_MASKED_PD_BITS) - 1, priv->dev.caps.reserved_pds, 0); }
int mlx4_init_uar_table(struct mlx4_dev *dev) { return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, dev->caps.num_uars, dev->caps.num_uars - 1, max(128, dev->caps.reserved_uars)); }
/* void mlx4_free_eq_table(struct mlx4_priv *priv) { kfree(mlx4_priv(&priv->dev)->eq_table.eq); } */ int mlx4_init_eq_table(struct mlx4_priv *priv) { int err; int i; priv->eq_table.uar_map = calloc(mlx4_num_eq_uar(&priv->dev), sizeof *priv->eq_table.uar_map); if (!priv->eq_table.uar_map) { err = -ENOMEM; goto err_out_free; } err = mlx4_bitmap_init(&priv->eq_table.bitmap, priv->dev.caps.num_eqs, priv->dev.caps.num_eqs - 1, priv->dev.caps.reserved_eqs, 0); if (err) goto err_out_free; for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i) priv->eq_table.uar_map[i] = NULL; if (!mlx4_is_slave(&priv->dev)) { err = mlx4_map_clr_int(priv); if (err) goto err_out_bitmap; priv->eq_table.clr_mask = swab32(1 << (priv->eq_table.inta_pin & 31)); priv->eq_table.clr_int = priv->clr_base + (priv->eq_table.inta_pin < 32 ? 4 : 0); } priv->eq_table.irq_names = malloc( MLX4_IRQNAME_SIZE * (priv->dev.caps.num_comp_vectors + 1 + priv->dev.caps.comp_pool)); if (!priv->eq_table.irq_names) { err = -ENOMEM; goto err_out_clr_int; } for (i = 0; i < priv->dev.caps.num_comp_vectors; ++i) { err = mlx4_create_eq(priv, priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs + MLX4_NUM_SPARE_EQE, (priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0, &priv->eq_table.eq[i]); if (err) { --i; goto err_out_unmap; } } err = mlx4_create_eq(priv, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, (priv->dev.flags & MLX4_FLAG_MSI_X) ? priv->dev.caps.num_comp_vectors : 0, &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]); if (err) goto err_out_comp; /*if additional completion vectors poolsize is 0 this loop will not run*/ for (i = priv->dev.caps.num_comp_vectors + 1; i < priv->dev.caps.num_comp_vectors + priv->dev.caps.comp_pool + 1; ++i) { err = mlx4_create_eq(priv, priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs + MLX4_NUM_SPARE_EQE, (priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0, &priv->eq_table.eq[i]); if (err) { --i; goto err_out_unmap; } } if (priv->dev.flags & MLX4_FLAG_MSI_X) { assert(!"not implemented!"); /*const char *eq_name; for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i) { if (i < priv->dev.caps.num_comp_vectors) { snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-comp-%d@pci:", i pci_name(priv->dev.pdev)); } else { snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE, "mlx4-async@pci:" pci_name(priv->dev.pdev)); } eq_name = priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE; err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0, eq_name, priv->eq_table.eq + i); if (err) goto err_out_async; priv->eq_table.eq[i].have_irq = 1; }*/ } else { snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, DRV_NAME "@pci:"/*, pci_name(priv->dev.pdev)*/); /*err = request_irq(priv->dev.pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) goto err_out_async;*/ priv->eq_table.have_irq = 1; } err = mlx4_MAP_EQ(priv, get_async_ev_mask(priv), 0, priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn); if (err) MLX4_DEBUG("MAP_EQ for async EQ %d failed (%d)\n", priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn, err); for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i) eq_set_ci(&priv->eq_table.eq[i], 1); return 0; /*TODO*/ /*err_out_async:*//*mlx4_free_eq(&priv->dev, &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);*/ err_out_comp: i = priv->dev.caps.num_comp_vectors - 1; err_out_unmap: /*while (i >= 0) { mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]); --i; } mlx4_free_irqs(&priv->dev);*/ err_out_clr_int: /*if (!mlx4_is_slave(&priv->dev)) mlx4_unmap_clr_int(&priv->dev);*/ err_out_bitmap: /*mlx4_unmap_uar(&priv->dev);*/ mlx4_bitmap_cleanup(&priv->eq_table.bitmap); err_out_free: free(priv->eq_table.uar_map); return err; }