static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, enum dma_data_direction dma_dir) { int i; int num_tbs; /* Sanity check on number of chunks */ num_tbs = iwl_tfd_get_num_tbs(tfd); if (num_tbs >= IWL_NUM_OF_TBS) { IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (num_tbs) dma_unmap_single(priv->bus->dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i), iwl_tfd_tb_get_len(tfd, i), dma_dir); }
/** * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; unsigned long flags; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != priv->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, priv->cmd_queue, sequence, priv->txq[priv->cmd_queue].q.read_ptr, priv->txq[priv->cmd_queue].q.write_ptr)) { iwl_print_hex_error(priv, pkt, 32); return; } cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; txq->time_stamp = jiffies; pci_unmap_single(priv->pci_dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { meta->source->reply_page = (unsigned long)rxb_addr(rxb); rxb->page = NULL; } else if (meta->callback) meta->callback(priv, cmd, pkt); spin_lock_irqsave(&priv->hcmd_lock, flags); iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); if (!(meta->flags & CMD_ASYNC)) { clear_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", iwl_legacy_get_cmd_string(cmd->hdr.cmd)); wake_up(&priv->wait_command_queue); } /* Mark as unmapped */ meta->flags = 0; spin_unlock_irqrestore(&priv->hcmd_lock, flags); }
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, enum dma_data_direction dma_dir) { int i; int num_tbs; num_tbs = iwl_tfd_get_num_tbs(tfd); if (num_tbs >= IWL_NUM_OF_TBS) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return; } if (num_tbs) dma_unmap_single(trans->dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); for (i = 1; i < num_tbs; i++) dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), iwl_tfd_tb_get_len(tfd, i), dma_dir); }
static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *tx_bi) { xdp_return_frame(tx_bi->xdpf); dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_bi, len, 0); }
/** * arc_emac_tx_clean - clears processed by EMAC Tx BDs. * @ndev: Pointer to the network device. */ static void arc_emac_tx_clean(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; unsigned int i; for (i = 0; i < TX_BD_NUM; i++) { unsigned int *txbd_dirty = &priv->txbd_dirty; struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; struct sk_buff *skb = tx_buff->skb; unsigned int info = le32_to_cpu(txbd->info); if ((info & FOR_EMAC) || !txbd->data || !skb) break; if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { stats->tx_errors++; stats->tx_dropped++; if (info & DEFR) stats->tx_carrier_errors++; if (info & LTCL) stats->collisions++; if (info & UFLO) stats->tx_fifo_errors++; } else if (likely(info & FIRST_OR_LAST_MASK)) { stats->tx_packets++; stats->tx_bytes += skb->len; } dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ dev_consume_skb_irq(skb); txbd->data = 0; txbd->info = 0; tx_buff->skb = NULL; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; } /* Ensure that txbd_dirty is visible to tx() before checking * for queue stopped. */ smp_mb(); if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) netif_wake_queue(ndev); }
void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, struct fm10k_tx_buffer *tx_buffer) { if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* tx_buffer must be completely set up in the transmit path */ }
/** * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr] * * Does NOT advance any idxes */ void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) { struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds; int idx = txq->q.read_ptr; struct il3945_tfd *tfd = &tfd_tmp[idx]; struct pci_dev *dev = il->pci_dev; int i; int counter; /* sanity check */ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); if (counter > NUM_TFD_CHUNKS) { IL_ERR("Too many chunks: %i\n", counter); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (counter) pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), PCI_DMA_TODEVICE); /* unmap chunks if any */ for (i = 1; i < counter; i++) pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); /* free SKB */ if (txq->skbs) { struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); txq->skbs[txq->q.read_ptr] = NULL; } } }
/** * arc_emac_rx - processing of Rx packets. * @ndev: Pointer to the network device. * @budget: How many BDs to process on 1 call. * * returns: Number of processed BDs * * Iterate through Rx BDs and deliver received packages to upper layer. */ static int arc_emac_rx(struct net_device *ndev, int budget) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &ndev->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; unsigned int pktlen, info = le32_to_cpu(rxbd->info); struct sk_buff *skb; dma_addr_t addr; if (unlikely((info & OWN_MASK) == FOR_EMAC)) break; /* Make a note that we saw a packet at this BD. * So next time, driver starts from this + 1 */ *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; if (unlikely((info & FIRST_OR_LAST_MASK) != FIRST_OR_LAST_MASK)) { /* We pre-allocate buffers of MTU size so incoming * packets won't be split/chained. */ if (net_ratelimit()) netdev_err(ndev, "incomplete packet received\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_length_errors++; continue; } /* Prepare the BD for next cycle. netif_receive_skb() * only if new skb was allocated and mapped to avoid holes * in the RX fifo. */ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(ndev, "cannot allocate skb\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } addr = dma_map_single(&ndev->dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot map dma buffer\n"); dev_kfree_skb(skb); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } /* unmap previosly mapped skb */ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); pktlen = info & LEN_MASK; stats->rx_packets++; stats->rx_bytes += pktlen; skb_put(rx_buff->skb, pktlen); rx_buff->skb->dev = ndev; rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); netif_receive_skb(rx_buff->skb); rx_buff->skb = skb; dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); } return work_done; }