static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, int srqn, int cqn, enum mlx4_qp_state *state, struct mlx4_qp *qp) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_qp_context *context; int err = 0; context = kmalloc(sizeof *context , GFP_KERNEL); if (!context) { mlx4_err(mdev, "Failed to allocate qp context\n"); return -ENOMEM; } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); goto out; return err; } qp->event = mlx4_en_sqp_event; memset(context, 0, sizeof *context); mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); if (err) { mlx4_qp_remove(mdev->dev, qp); mlx4_qp_free(mdev->dev, qp); } out: kfree(context); return err; }
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_rx_ring *ring, enum mlx4_qp_state *state, struct mlx4_qp *qp) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_qp_context *context; int err = 0; context = kmalloc(sizeof *context , GFP_KERNEL); if (!context) { en_err(priv, "Failed to allocate qp context\n"); return -ENOMEM; } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { en_err(priv, "Failed to allocate qp #%x\n", qpn); goto out; } qp->event = mlx4_en_sqp_event; memset(context, 0, sizeof *context); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn, ring->cqn, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); if (err) { mlx4_qp_remove(mdev->dev, qp); mlx4_qp_free(mdev->dev, qp); } mlx4_en_update_rx_prod_db(ring); out: kfree(context); return err; }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; int tmp; int err; ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); if (!ring) { ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } ring->size = size; ring->size_mask = size - 1; ring->stride = stride; #ifdef CONFIG_RATELIMIT ring->rl_data.rate_index = 0; /* User_valid should be false in a rate_limit ring until the * creation process of the ring is done, after the activation. */ if (queue_idx < priv->native_tx_ring_num) ring->rl_data.user_valid = true; else ring->rl_data.user_valid = false; #endif ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; ring->inline_thold = min(inline_thold, MAX_INLINE); mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); /* Allocate the buf ring */ #ifdef CONFIG_RATELIMIT if (queue_idx < priv->native_tx_ring_num) ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); else ring->br = buf_ring_alloc(size / 4, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #else ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #endif if (ring->br == NULL) { en_err(priv, "Failed allocating tx_info ring\n"); return -ENOMEM; } tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); if (!ring->bounce_buf) { ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; } } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; ring->queue_index = queue_idx; if (queue_idx < priv->num_tx_rings_p_up ) CPU_SET(queue_idx, &ring->affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); err_info: vfree(ring->tx_info); err_ring: buf_ring_free(ring->br, M_DEVBUF); kfree(ring); return err; }
/* Allocate rx qp's and configure them according to rss map */ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; struct mlx4_en_rss_context *rss_context; void *ptr; u8 rss_mask = 0x3f; int i, qpn; int err = 0; int good_qps = 0; en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, priv->rx_ring_num, &rss_map->base_qpn); if (err) { en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); return err; } for (i = 0; i < priv->rx_ring_num; i++) { qpn = rss_map->base_qpn + i; err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], &rss_map->state[i], &rss_map->qps[i]); if (err) goto rss_err; ++good_qps; } /* Configure RSS indirection qp */ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); goto rss_err; } rss_map->indir_qp.event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, &context); ptr = ((void *) &context) + 0x3c; rss_context = (struct mlx4_en_rss_context *) ptr; rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); rss_context->flags = rss_mask; if (priv->mdev->profile.udp_rss) rss_context->base_qpn_udp = rss_context->default_qpn; err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) goto indir_err; return 0; indir_err: mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); mlx4_qp_free(mdev->dev, &rss_map->indir_qp); rss_err: for (i = 0; i < good_qps; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); mlx4_qp_free(mdev->dev, &rss_map->qps[i]); } mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); return err; }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int qpn, u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; int tmp; int err; ring->size = size; ring->size_mask = size - 1; ring->stride = stride; inline_thold = min(inline_thold, MAX_INLINE); spin_lock_init(&ring->comp_lock); tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { en_err(priv, "Failed allocating tx_info ring\n"); return -ENOMEM; } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { en_err(priv, "Failed allocating bounce buffer\n"); err = -ENOMEM; goto err_tx; } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); ring->qpn = qpn; err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_map; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; return 0; err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; err_tx: vfree(ring->tx_info); ring->tx_info = NULL; return err; }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, uint32_t size, uint16_t stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; int tmp; int err; ring = kzalloc_node(sizeof(*ring), KMALLOC_WAIT, node); if (!ring) { ring = kzmalloc(sizeof(*ring), KMALLOC_WAIT); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } ring->size = size; ring->size_mask = size - 1; ring->stride = stride; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kmalloc_node(tmp, KMALLOC_WAIT | __GFP_NOWARN, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, KMALLOC_WAIT, node); if (!ring->bounce_buf) { ring->bounce_buf = kmalloc(MAX_DESC_SIZE, KMALLOC_WAIT); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; } } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_ETH_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, KMALLOC_WAIT); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; #if 0 // AKAROS_PORT err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { #else if (true) { #endif en_dbg(DRV, priv, "working without blueflame (%d)\n", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; ring->bf_alloced = false; priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } else { ring->bf_alloced = true; ring->bf_enabled = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); } ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; if (queue_index < priv->num_tx_rings_p_up) cpumask_set_cpu(cpumask_local_spread(queue_index, priv->mdev->dev->numa_node), &ring->affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; err_info: #if 0 // AKAROS_PORT kvfree(ring->tx_info); #endif ring->tx_info = NULL; err_ring: kfree(ring); *pring = NULL; return err; } void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring) { panic("Disabled"); #if 0 // AKAROS_PORT struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring = *pring; en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); if (ring->bf_alloced) mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; kvfree(ring->tx_info); ring->tx_info = NULL; kfree(ring); *pring = NULL; #endif }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; int tmp; int err; ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } ring->size = size; ring->size_mask = size - 1; ring->sp_stride = stride; ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); if (!ring->bounce_buf) { ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; } } ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } ring->buf = ring->sp_wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->sp_wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_ETH_BF_QP, MLX4_RES_USAGE_DRIVER); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_hwq_res; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->sp_qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)\n", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; ring->bf_alloced = false; priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } else { ring->bf_alloced = true; ring->bf_enabled = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); } ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; if (queue_index < priv->num_tx_rings_p_up) cpumask_set_cpu(cpumask_local_spread(queue_index, priv->mdev->dev->numa_node), &ring->sp_affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; err_info: kvfree(ring->tx_info); ring->tx_info = NULL; err_ring: kfree(ring); *pring = NULL; return err; }
/* Allocate rx qp's and configure them according to rss map */ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; struct mlx4_en_rss_context *rss_context; void *ptr; int rss_xor = mdev->profile.rss_xor; u8 rss_mask = mdev->profile.rss_mask; int i, srqn, qpn, cqn; int err = 0; int good_qps = 0; mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, rss_map->size, &rss_map->base_qpn); if (err) { mlx4_err(mdev, "Failed reserving %d qps for port %u\n", rss_map->size, priv->port); return err; } for (i = 0; i < rss_map->size; i++) { cqn = priv->rx_ring[rss_map->map[i]].cqn; srqn = priv->rx_ring[rss_map->map[i]].srq.srqn; qpn = rss_map->base_qpn + i; err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, &rss_map->state[i], &rss_map->qps[i]); if (err) goto rss_err; ++good_qps; } /* Configure RSS indirection qp */ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); if (err) { mlx4_err(mdev, "Failed to reserve range for RSS " "indirection qp\n"); goto rss_err; } err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); if (err) { mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); goto reserve_err; } rss_map->indir_qp.event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, 0, &context); ptr = ((void *) &context) + 0x3c; rss_context = (struct mlx4_en_rss_context *) ptr; rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); rss_context->hash_fn = rss_xor & 0x3; rss_context->flags = rss_mask << 2; err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, &rss_map->indir_qp, &rss_map->indir_state); if (err) goto indir_err; return 0; indir_err: mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); mlx4_qp_free(mdev->dev, &rss_map->indir_qp); reserve_err: mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); rss_err: for (i = 0; i < good_qps; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); mlx4_qp_free(mdev->dev, &rss_map->qps[i]); } mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size); return err; }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; uint32_t x; int tmp; int err; ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); if (!ring) { ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */ MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */ MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &ring->dma_tag))) goto done; ring->size = size; ring->size_mask = size - 1; ring->stride = stride; ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE)); mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); /* Allocate the buf ring */ ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m, queue_idx, priv->tx_ring_num); if (ring->br == NULL) { en_err(priv, "Failed allocating tx_info ring\n"); err = -ENOMEM; goto err_free_dma_tag; } tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); if (!ring->tx_info) { ring->tx_info = kzalloc(tmp, GFP_KERNEL); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } /* Create DMA descriptor MAPs */ for (x = 0; x != size; x++) { err = -bus_dmamap_create(ring->dma_tag, 0, &ring->tx_info[x].dma_map); if (err != 0) { while (x--) { bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); } goto err_info; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_dma_map; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; ring->queue_index = queue_idx; if (queue_idx < priv->num_tx_rings_p_up ) CPU_SET(queue_idx, &ring->affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_dma_map: for (x = 0; x != size; x++) bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); err_info: vfree(ring->tx_info); err_ring: buf_ring_free(ring->br, M_DEVBUF); err_free_dma_tag: bus_dma_tag_destroy(ring->dma_tag); done: kfree(ring); return err; }