Example #1
0
/* Frees buffers until index _not_ inclusive */
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int last_to_free;
	int freed = 0;

	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
		return 0;

	lockdep_assert_held(&txq->lock);

	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
		return 0;
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
		return 0;

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
			continue;

		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);

		txq->entries[txq->q.read_ptr].skb = NULL;

		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);

		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
		freed++;
	}

	iwl_queue_progress(trans_pcie, txq);

	return freed;
}
Example #2
0
/**
 * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
 */
static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
{
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwl_queue *q = &txq->q;

	if (!q->n_bd)
		return;

	while (q->write_ptr != q->read_ptr) {
		/* The read_ptr needs to bound by q->n_window */
		iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
}
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int last_to_free;
	int freed = 0;

	
	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
		return 0;

	lockdep_assert_held(&txq->lock);

	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
		return 0;
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
		return 0;

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
			continue;

		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);

		txq->skbs[txq->q.read_ptr] = NULL;

		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);

		iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
		freed++;
	}
	return freed;
}