Пример #1
0
/*
** Flush all queue ring buffers
*/
void
ixl_qflush(struct ifnet *ifp)
{
	struct ixl_vsi	*vsi = ifp->if_softc;

        for (int i = 0; i < vsi->num_queues; i++) {
		struct ixl_queue *que = &vsi->queues[i];
		struct tx_ring	*txr = &que->txr;
		struct mbuf	*m;
		IXL_TX_LOCK(txr);
		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
			m_freem(m);
		IXL_TX_UNLOCK(txr);
	}
	if_qflush(ifp);
}
Пример #2
0
static void mlx4_en_destroy_rl_res(struct mlx4_en_priv *priv,
                                    int ring_id)
{
	struct mlx4_en_tx_ring *ring;
	struct mlx4_en_dev *mdev = priv->mdev;

	ring = priv->tx_ring[ring_id];

	mutex_lock(&mdev->state_lock);

	/* Index was validated, thus ring is not NULL */
	spin_lock(&ring->tx_lock);
	if (ring->rl_data.user_valid == false) {
		en_err(priv, "ring %d doesn't exist\n", ring_id);
		spin_unlock(&ring->tx_lock);
		return;
	} else {
		ring->rl_data.user_valid = false;
	}
	if (!drbr_empty(priv->dev, ring->br)) {
		struct mbuf *m;
		while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) {
			m_freem(m);
		}
	}
	spin_unlock(&ring->tx_lock);
	atomic_subtract_int(&priv->rate_limits[ring->rl_data.rate_index].ref, 1);

	/* Deactivate resources */
	if (priv->port_up) {
		mlx4_en_deactivate_tx_ring(priv, ring);
		mlx4_en_deactivate_cq(priv, priv->tx_cq[ring_id]);
		msleep(10);
		mlx4_en_free_tx_buf(priv->dev, ring);
	}
	mutex_unlock(&mdev->state_lock);

	/* clear statistics */
	ring->bytes = 0;
	ring->packets = 0;

	sysctl_ctx_free(&ring->rl_data.rl_stats_ctx);

	/* Add index to re-use list */
	priv->rate_limit_tx_ring_num--;
	mlx4_en_rl_reused_index_insert(priv, ring_id);
}
Пример #3
0
static void
nicvf_if_qflush(struct ifnet *ifp)
{
	struct nicvf *nic;
	struct queue_set *qs;
	struct snd_queue *sq;
	struct mbuf *mbuf;
	size_t idx;

	nic = if_getsoftc(ifp);
	qs = nic->qs;

	for (idx = 0; idx < qs->sq_cnt; idx++) {
		sq = &qs->sq[idx];
		NICVF_TX_LOCK(sq);
		while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
			m_freem(mbuf);
		NICVF_TX_UNLOCK(sq);
	}
	if_qflush(ifp);
}