static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = &priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { mlx4_err(mdev, "Failed to allocate " "enough rx buffers\n"); return -ENOMEM; } else { if (netif_msg_rx_err(priv)) mlx4_warn(mdev, "Only %d buffers allocated\n", ring->actual_size); goto out; } } ring->actual_size++; ring->prod++; } } out: return 0; }
static int mlx4_en_fill_rx_buf(struct net_device *dev, struct mlx4_en_rx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int num = 0; int err; while ((u32) (ring->prod - ring->cons) < ring->actual_size) { err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask); if (err) { if (netif_msg_rx_err(priv)) mlx4_warn(priv->mdev, "Failed preparing rx descriptor\n"); priv->port_stats.rx_alloc_failed++; break; } ++num; ++ring->prod; } if ((u32) (ring->prod - ring->cons) == ring->size) ring->full = 1; return num; }
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; int new_size; int err; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; err = mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size); if (err) { if (ring->actual_size == 0) { en_err(priv, "Failed to allocate " "enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); en_warn(priv, "Only %d buffers allocated " "reducing ring size to %d\n", ring->actual_size, new_size); goto reduce_rings; } } ring->actual_size++; ring->prod++; } } return 0; reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; mlx4_en_free_buf(ring, ring->mbuf + ring->actual_size); } } return 0; }
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; int new_size; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, GFP_KERNEL | __GFP_COLD)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", ring->actual_size, new_size); goto reduce_rings; } } ring->actual_size++; ring->prod++; } } return 0; reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; mlx4_en_free_rx_desc(priv, ring, ring->actual_size); } } return 0; }