/* * Called from a taskqueue to drain queued transmit packets. */ void ixl_deferred_mq_start(void *arg, int pending) { struct ixl_queue *que = arg; struct tx_ring *txr = &que->txr; struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; IXL_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); }
static void mlx4_en_destroy_rl_res(struct mlx4_en_priv *priv, int ring_id) { struct mlx4_en_tx_ring *ring; struct mlx4_en_dev *mdev = priv->mdev; ring = priv->tx_ring[ring_id]; mutex_lock(&mdev->state_lock); /* Index was validated, thus ring is not NULL */ spin_lock(&ring->tx_lock); if (ring->rl_data.user_valid == false) { en_err(priv, "ring %d doesn't exist\n", ring_id); spin_unlock(&ring->tx_lock); return; } else { ring->rl_data.user_valid = false; } if (!drbr_empty(priv->dev, ring->br)) { struct mbuf *m; while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) { m_freem(m); } } spin_unlock(&ring->tx_lock); atomic_subtract_int(&priv->rate_limits[ring->rl_data.rate_index].ref, 1); /* Deactivate resources */ if (priv->port_up) { mlx4_en_deactivate_tx_ring(priv, ring); mlx4_en_deactivate_cq(priv, priv->tx_cq[ring_id]); msleep(10); mlx4_en_free_tx_buf(priv->dev, ring); } mutex_unlock(&mdev->state_lock); /* clear statistics */ ring->bytes = 0; ring->packets = 0; sysctl_ctx_free(&ring->rl_data.rl_stats_ctx); /* Add index to re-use list */ priv->rate_limit_tx_ring_num--; mlx4_en_rl_reused_index_insert(priv, ring_id); }