/* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) { struct b43_dma *dma = &dev->dma; struct b43_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0x1000: ring = dma->tx_ring_AC_BK; break; case 0x2000: ring = dma->tx_ring_AC_BE; break; case 0x3000: ring = dma->tx_ring_AC_VI; break; case 0x4000: ring = dma->tx_ring_AC_VO; break; case 0x5000: ring = dma->tx_ring_mcast; break; } *slot = (cookie & 0x0FFF); if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { b43dbg(dev->wl, "TX-status contains " "invalid cookie: 0x%04X\n", cookie); return NULL; } return ring; }
static inline void assert_mac_suspended(struct b43_wldev *dev) { if (!B43_DEBUG) return; if ((b43_status(dev) >= B43_STAT_INITIALIZED) && (dev->mac_suspended <= 0)) { b43dbg(dev->wl, "PHY/RADIO register access with " "enabled MAC.\n"); dump_stack(); } }
static void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { if (current_used_slots <= ring->max_used_slots) return; ring->max_used_slots = current_used_slots; if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { b43dbg(ring->dev->wl, "max_used_slots increased to %d on %s ring %d\n", ring->max_used_slots, ring->tx ? "TX" : "RX", ring->index); } }
int b43_pio_init(struct b43_wldev *dev) { struct b43_pio *pio = &dev->pio; int err = -ENOMEM; b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & ~B43_MACCTL_BE); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0); pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0); if (!pio->tx_queue_AC_BK) goto out; pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1); if (!pio->tx_queue_AC_BE) goto err_destroy_bk; pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2); if (!pio->tx_queue_AC_VI) goto err_destroy_be; pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3); if (!pio->tx_queue_AC_VO) goto err_destroy_vi; pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4); if (!pio->tx_queue_mcast) goto err_destroy_vo; pio->rx_queue = b43_setup_pioqueue_rx(dev, 0); if (!pio->rx_queue) goto err_destroy_mcast; b43dbg(dev->wl, "PIO initialized\n"); err = 0; out: return err; err_destroy_mcast: destroy_queue_tx(pio, tx_queue_mcast); err_destroy_vo: destroy_queue_tx(pio, tx_queue_AC_VO); err_destroy_vi: destroy_queue_tx(pio, tx_queue_AC_VI); err_destroy_be: destroy_queue_tx(pio, tx_queue_AC_BE); err_destroy_bk: destroy_queue_tx(pio, tx_queue_AC_BK); return err; }
/* Main cleanup function. */ static void b43_destroy_dmaring(struct b43_dmaring *ring, const char *ringname) { if (!ring) return; #ifdef CPTCFG_B43_DEBUG { /* Print some statistics. */ u64 failed_packets = ring->nr_failed_tx_packets; u64 succeed_packets = ring->nr_succeed_tx_packets; u64 nr_packets = failed_packets + succeed_packets; u64 permille_failed = 0, average_tries = 0; if (nr_packets) permille_failed = divide(failed_packets * 1000, nr_packets); if (nr_packets) average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); b43dbg(ring->dev->wl, "DMA-%u %s: " "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " "Average tries %llu.%02llu\n", (unsigned int)(ring->type), ringname, ring->max_used_slots, ring->nr_slots, (unsigned long long)failed_packets, (unsigned long long)nr_packets, (unsigned long long)divide(permille_failed, 10), (unsigned long long)modulo(permille_failed, 10), (unsigned long long)divide(average_tries, 100), (unsigned long long)modulo(average_tries, 100)); } #endif /* DEBUG */ /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff. */ dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); }
static inline int should_inject_overflow(struct b43_dmaring *ring) { #ifdef CONFIG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43dbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif return 0; }
static void b43_destroy_dmaring(struct b43_dmaring *ring, const char *ringname) { if (!ring) return; #ifdef CONFIG_B43_DEBUG { u64 failed_packets = ring->nr_failed_tx_packets; u64 succeed_packets = ring->nr_succeed_tx_packets; u64 nr_packets = failed_packets + succeed_packets; u64 permille_failed = 0, average_tries = 0; if (nr_packets) permille_failed = divide(failed_packets * 1000, nr_packets); if (nr_packets) average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); b43dbg(ring->dev->wl, "DMA-%u %s: " "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " "Average tries %llu.%02llu\n", (unsigned int)(ring->type), ringname, ring->max_used_slots, ring->nr_slots, (unsigned long long)failed_packets, (unsigned long long)nr_packets, (unsigned long long)divide(permille_failed, 10), (unsigned long long)modulo(permille_failed, 10), (unsigned long long)divide(average_tries, 100), (unsigned long long)modulo(average_tries, 100)); } #endif dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); }
static inline int should_inject_overflow(struct b43_dmaring *ring) { #ifdef CPTCFG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow * to test handling of this situation in the stack. */ unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43dbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif /* CPTCFG_B43_DEBUG */ return 0; }
/* Write the LocalOscillator Control (adjust) value-pair. */ static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) { struct b43_phy *phy = &dev->phy; u16 value; u16 reg; if (B43_DEBUG) { if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { b43dbg(dev->wl, "Invalid LO control pair " "(I: %d, Q: %d)\n", control->i, control->q); dump_stack(); return; } } value = (u8) (control->q); value |= ((u8) (control->i)) << 8; reg = (phy->type == B43_PHYTYPE_B) ? 0x002F : B43_PHY_LO_CTL; b43_phy_write(dev, reg, value); }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_pio_txqueue *q; struct ieee80211_hdr *hdr; unsigned int hdrlen, total_len; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast queue will be sent after the DTIM. */ q = dev->pio.tx_queue_mcast; /* Set the frame More-Data bit. Ucode will clear it * for us on the last frame. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { /* Decide by priority where to put this frame. */ q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); } hdrlen = b43_txhdr_size(dev); total_len = roundup(skb->len + hdrlen, 4); if (unlikely(total_len > q->buffer_size)) { err = -ENOBUFS; b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); goto out; } if (unlikely(q->free_packet_slots == 0)) { err = -ENOBUFS; b43warn(dev->wl, "PIO: TX packet overflow.\n"); goto out; } B43_WARN_ON(q->buffer_used > q->buffer_size); if (total_len > (q->buffer_size - q->buffer_used)) { /* Not enough memory on the queue. */ err = -EBUSY; ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); q->stopped = 1; goto out; } /* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The mac80211-queue to b43-queue * mapping is static, so we don't need to store it per frame. */ q->queue_prio = skb_get_queue_mapping(skb); err = pio_tx_frame(q, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "PIO transmission failure\n"); goto out; } q->nr_tx_packets++; B43_WARN_ON(q->buffer_used > q->buffer_size); if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || (q->free_packet_slots == 0)) { /* The queue is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); q->stopped = 1; } out: return err; }
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast ring will be sent after the DTIM */ ring = dev->dma.tx_ring_mcast; /* Set the more-data bit. Ucode will clear it on * the last frame for us. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { /* Decide by priority where to put this frame. */ ring = select_ring_by_priority( dev, skb_get_queue_mapping(skb)); } B43_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't. * For now, just refuse the transmit. */ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) b43err(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out; } if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43err(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out; } /* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The queue to ring mapping is * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ unsigned int skb_mapping = skb_get_queue_mapping(skb); ieee80211_stop_queue(dev->wl->hw, skb_mapping); dev->wl->tx_queue_stopped[skb_mapping] = 1; ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } } out: return err; }
void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; static const struct b43_txstatus fake; /* filled with 0 */ const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; int skip; static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ if (slot == next_slot(ring, next_slot(ring, firstused))) { /* If a single header/data pair was missed, skip over * the first two slots in an attempt to recover. */ slot = firstused; skip = 2; if (!err_out1) { /* Report the error once. */ b43dbg(dev->wl, "Skip on DMA ring %d slot %d.\n", ring->index, slot); err_out1 = 1; } } else { /* More than a single header/data pair were missed. * Report this error once. */ if (!err_out2) b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", ring->index, firstused, slot); err_out2 = 1; return; } } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); /* get meta - ignore returned value */ ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, * so the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. When skipping over * a missed TX status report, use a status structure * filled with zeros to indicate that the frame was not * sent (frame_count 0) and not acknowledged */ if (unlikely(skip)) txstat = &fake; else txstat = status; frame_succeed = b43_fill_txstatus_report(dev, info, txstat); #ifdef CPTCFG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); if (skip > 0) --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ring->stopped = false; } if (dev->wl->tx_queue_stopped[ring->queue_prio]) { dev->wl->tx_queue_stopped[ring->queue_prio] = 0; } else { /* If the driver queue is running wake the corresponding * mac80211 queue. */ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } /* Add work to the queue. */ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); }
static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; u16 len; u32 macstat = 0; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; struct b43_rxhdr_fw4 *rxhdr = (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; size_t rxhdr_size = sizeof(*rxhdr); BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); switch (dev->fw.hdr_format) { case B43_FW_HDR_410: case B43_FW_HDR_351: rxhdr_size -= sizeof(rxhdr->format_598) - sizeof(rxhdr->format_351); break; case B43_FW_HDR_598: break; } memset(rxhdr, 0, rxhdr_size); if (q->rev >= 8) { u32 ctl; ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) return 0; b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (ctl & B43_PIO8_RXCTL_DATARDY) goto data_ready; udelay(10); } } else { u16 ctl; ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) return 0; b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (ctl & B43_PIO_RXCTL_DATARDY) goto data_ready; udelay(10); } } b43dbg(q->dev->wl, "PIO RX timed out\n"); return 1; data_ready: if (q->rev >= 8) { b43_block_read(dev, rxhdr, rxhdr_size, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { b43_block_read(dev, rxhdr, rxhdr_size, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } len = le16_to_cpu(rxhdr->frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; } if (unlikely(len == 0)) { err_msg = "len == 0"; goto rx_error; } switch (dev->fw.hdr_format) { case B43_FW_HDR_598: macstat = le32_to_cpu(rxhdr->format_598.mac_status); break; case B43_FW_HDR_410: case B43_FW_HDR_351: macstat = le32_to_cpu(rxhdr->format_351.mac_status); break; } if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { err_msg = "Frame FCS error"; goto rx_error; } } padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; skb = dev_alloc_skb(len + padding + 2); if (unlikely(!skb)) { err_msg = "Out of memory"; goto rx_error; } skb_reserve(skb, 2); skb_put(skb, len + padding); if (q->rev >= 8) { b43_block_read(dev, skb->data + padding, (len & ~3), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); b43_block_read(dev, tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: skb->data[len + padding - 3] = tail[0]; skb->data[len + padding - 2] = tail[1]; skb->data[len + padding - 1] = tail[2]; break; case 2: skb->data[len + padding - 2] = tail[0]; skb->data[len + padding - 1] = tail[1]; break; case 1: skb->data[len + padding - 1] = tail[0]; break; } } } else { b43_block_read(dev, skb->data + padding, (len & ~1), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); b43_block_read(dev, tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); skb->data[len + padding - 1] = tail[0]; } } b43_rx(q->dev, skb, rxhdr); return 1; rx_error: if (err_msg) b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); if (q->rev >= 8) b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); else b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); return 1; }
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { ring = dev->dma.tx_ring_mcast; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { ring = select_ring_by_priority( dev, skb_get_queue_mapping(skb)); } B43_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { if (b43_debug(dev, B43_DBG_DMAVERBOSE)) b43err(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out; } if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { b43err(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out; } ring->queue_prio = skb_get_queue_mapping(skb); err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ ieee80211_free_txskb(dev->wl->hw, skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { unsigned int skb_mapping = skb_get_queue_mapping(skb); ieee80211_stop_queue(dev->wl->hw, skb_mapping); dev->wl->tx_queue_stopped[skb_mapping] = 1; ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } } out: return err; }
int b43_dma_init(struct b43_wldev *dev) { struct b43_dma *dma = &dev->dma; int err; u64 dmamask; enum b43_dmatype type; dmamask = supported_dma_mask(dev); type = dma_mask_to_engine_type(dmamask); err = b43_dma_set_mask(dev, dmamask); if (err) return err; switch (dev->dev->bus_type) { #ifdef CPTCFG_B43_BCMA case B43_BUS_BCMA: dma->translation = bcma_core_dma_translation(dev->dev->bdev); break; #endif #ifdef CPTCFG_B43_SSB case B43_BUS_SSB: dma->translation = ssb_dma_translation(dev->dev->sdev); break; #endif } dma->translation_in_low = b43_dma_translation_in_low_word(dev, type); dma->parity = true; #ifdef CPTCFG_B43_BCMA /* TODO: find out which SSB devices need disabling parity */ if (dev->dev->bus_type == B43_BUS_BCMA) dma->parity = false; #endif err = -ENOMEM; /* setup TX DMA channels. */ dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); if (!dma->tx_ring_AC_BK) goto out; dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); if (!dma->tx_ring_AC_BE) goto err_destroy_bk; dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); if (!dma->tx_ring_AC_VI) goto err_destroy_be; dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); if (!dma->tx_ring_AC_VO) goto err_destroy_vi; dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); if (!dma->tx_ring_mcast) goto err_destroy_vo; /* setup RX DMA channel. */ dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); if (!dma->rx_ring) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ B43_WARN_ON(dev->dev->core_rev < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); err = 0; out: return err; err_destroy_mcast: destroy_ring(dma, tx_ring_mcast); err_destroy_vo: destroy_ring(dma, tx_ring_AC_VO); err_destroy_vi: destroy_ring(dma, tx_ring_AC_VI); err_destroy_be: destroy_ring(dma, tx_ring_AC_BE); err_destroy_bk: destroy_ring(dma, tx_ring_AC_BK); return err; }
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_pio_txqueue *q; struct ieee80211_hdr *hdr; unsigned int hdrlen, total_len; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { q = dev->pio.tx_queue_mcast; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); } hdrlen = b43_txhdr_size(dev); total_len = roundup(skb->len + hdrlen, 4); if (unlikely(total_len > q->buffer_size)) { err = -ENOBUFS; b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); goto out; } if (unlikely(q->free_packet_slots == 0)) { err = -ENOBUFS; b43warn(dev->wl, "PIO: TX packet overflow.\n"); goto out; } B43_WARN_ON(q->buffer_used > q->buffer_size); if (total_len > (q->buffer_size - q->buffer_used)) { err = -EBUSY; ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); q->stopped = true; goto out; } q->queue_prio = skb_get_queue_mapping(skb); err = pio_tx_frame(q, skb); if (unlikely(err == -ENOKEY)) { dev_kfree_skb_any(skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "PIO transmission failure\n"); goto out; } B43_WARN_ON(q->buffer_used > q->buffer_size); if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || (q->free_packet_slots == 0)) { ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); q->stopped = true; } out: return err; }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
/* Returns whether we should fetch another frame. */ static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; u16 len; u32 macstat; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; struct b43_rxhdr_fw4 *rxhdr = (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); memset(rxhdr, 0, sizeof(*rxhdr)); /* Check if we have data and wait for it to get ready. */ if (q->rev >= 8) { u32 ctl; ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) return 0; b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (ctl & B43_PIO8_RXCTL_DATARDY) goto data_ready; udelay(10); } } else { u16 ctl; ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) return 0; b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (ctl & B43_PIO_RXCTL_DATARDY) goto data_ready; udelay(10); } } b43dbg(q->dev->wl, "PIO RX timed out\n"); return 1; data_ready: /* Get the preamble (RX header) */ if (q->rev >= 8) { ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } /* Sanity checks. */ len = le16_to_cpu(rxhdr->frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; } if (unlikely(len == 0)) { err_msg = "len == 0"; goto rx_error; } macstat = le32_to_cpu(rxhdr->mac_status); if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { /* Drop frames with failed FCS. */ err_msg = "Frame FCS error"; goto rx_error; } } /* We always pad 2 bytes, as that's what upstream code expects * due to the RX-header being 30 bytes. In case the frame is * unaligned, we pad another 2 bytes. */ padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; skb = dev_alloc_skb(len + padding + 2); if (unlikely(!skb)) { err_msg = "Out of memory"; goto rx_error; } skb_reserve(skb, 2); skb_put(skb, len + padding); if (q->rev >= 8) { ssb_block_read(dev->dev, skb->data + padding, (len & ~3), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); /* Read the last few bytes. */ ssb_block_read(dev->dev, tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: skb->data[len + padding - 3] = tail[0]; skb->data[len + padding - 2] = tail[1]; skb->data[len + padding - 1] = tail[2]; break; case 2: skb->data[len + padding - 2] = tail[0]; skb->data[len + padding - 1] = tail[1]; break; case 1: skb->data[len + padding - 1] = tail[0]; break; } } } else { ssb_block_read(dev->dev, skb->data + padding, (len & ~1), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); /* Read the last byte. */ ssb_block_read(dev->dev, tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); skb->data[len + padding - 1] = tail[0]; } } b43_rx(q->dev, skb, rxhdr); return 1; rx_error: if (err_msg) b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); if (q->rev >= 8) b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); else b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); return 1; }
void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " "Expected %d, but got %d\n", ring->index, firstused, slot); return; } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); /* get meta - ignore returned value */ ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, so * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. */ frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } }