void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; static const struct b43_txstatus fake; /* filled with 0 */ const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; int skip; static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ if (slot == next_slot(ring, next_slot(ring, firstused))) { /* If a single header/data pair was missed, skip over * the first two slots in an attempt to recover. */ slot = firstused; skip = 2; if (!err_out1) { /* Report the error once. */ b43dbg(dev->wl, "Skip on DMA ring %d slot %d.\n", ring->index, slot); err_out1 = 1; } } else { /* More than a single header/data pair were missed. * Report this error once. */ if (!err_out2) b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", ring->index, firstused, slot); err_out2 = 1; return; } } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); /* get meta - ignore returned value */ ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, * so the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. When skipping over * a missed TX status report, use a status structure * filled with zeros to indicate that the frame was not * sent (frame_count 0) and not acknowledged */ if (unlikely(skip)) txstat = &fake; else txstat = status; frame_succeed = b43_fill_txstatus_report(dev, info, txstat); #ifdef CPTCFG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); if (skip > 0) --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ring->stopped = false; } if (dev->wl->tx_queue_stopped[ring->queue_prio]) { dev->wl->tx_queue_stopped[ring->queue_prio] = 0; } else { /* If the driver queue is running wake the corresponding * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } /* Add work to the queue. */ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); }
static int dma_tx_fragment(struct b43_dmaring *ring, struct sk_buff *skb) { const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); cookie = generate_cookie(ring, slot); err = b43_generate_txhdr(ring->dev, header, skb, info, cookie); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, hdrsize, 1); if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, hdrsize, 1, 0, 0); slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = true; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { priv_info->bouncebuffer = kmemdup(skb->data, skb->len, GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_unmap_hdr; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { b43_shm_write16(ring->dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } wmb(); ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); return err; }
static int dma_tx_fragment(struct b43_dmaring *ring, struct sk_buff *skb) { const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); /* Important note: If the number of used DMA slots per TX frame * is changed here, the TX_SLOTS_PER_FRAME definition at the top of * the file has to be updated, too! */ old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; /* Get a slot for the header. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); cookie = generate_cookie(ring, slot); err = b43_generate_txhdr(ring->dev, header, skb, info, cookie); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, hdrsize, 1); if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, hdrsize, 1, 0, 0); /* Get a slot for the payload. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = true; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { priv_info->bouncebuffer = kmemdup(skb->data, skb->len, GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_unmap_hdr; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ b43_shm_write16(ring->dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } /* Now transfer the whole frame. */ wmb(); ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); return err; }
void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " "Expected %d, but got %d\n", ring->index, firstused, slot); return; } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); /* get meta - ignore returned value */ ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, so * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. */ frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } }