static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
/* Returns whether we should fetch another frame. */ static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; u16 len; u32 macstat; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; struct b43_rxhdr_fw4 *rxhdr = (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); memset(rxhdr, 0, sizeof(*rxhdr)); /* Check if we have data and wait for it to get ready. */ if (q->rev >= 8) { u32 ctl; ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) return 0; b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (ctl & B43_PIO8_RXCTL_DATARDY) goto data_ready; udelay(10); } } else { u16 ctl; ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) return 0; b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (ctl & B43_PIO_RXCTL_DATARDY) goto data_ready; udelay(10); } } b43dbg(q->dev->wl, "PIO RX timed out\n"); return 1; data_ready: /* Get the preamble (RX header) */ if (q->rev >= 8) { ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } /* Sanity checks. */ len = le16_to_cpu(rxhdr->frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; } if (unlikely(len == 0)) { err_msg = "len == 0"; goto rx_error; } macstat = le32_to_cpu(rxhdr->mac_status); if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { /* Drop frames with failed FCS. */ err_msg = "Frame FCS error"; goto rx_error; } } /* We always pad 2 bytes, as that's what upstream code expects * due to the RX-header being 30 bytes. In case the frame is * unaligned, we pad another 2 bytes. */ padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; skb = dev_alloc_skb(len + padding + 2); if (unlikely(!skb)) { err_msg = "Out of memory"; goto rx_error; } skb_reserve(skb, 2); skb_put(skb, len + padding); if (q->rev >= 8) { ssb_block_read(dev->dev, skb->data + padding, (len & ~3), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); /* Read the last few bytes. */ ssb_block_read(dev->dev, tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: skb->data[len + padding - 3] = tail[0]; skb->data[len + padding - 2] = tail[1]; skb->data[len + padding - 1] = tail[2]; break; case 2: skb->data[len + padding - 2] = tail[0]; skb->data[len + padding - 1] = tail[1]; break; case 1: skb->data[len + padding - 1] = tail[0]; break; } } } else { ssb_block_read(dev->dev, skb->data + padding, (len & ~1), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); /* Read the last byte. */ ssb_block_read(dev->dev, tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); skb->data[len + padding - 1] = tail[0]; } } b43_rx(q->dev, skb, rxhdr); return 1; rx_error: if (err_msg) b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); if (q->rev >= 8) b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); else b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); return 1; }
static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; u16 len; u32 macstat = 0; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; struct b43_rxhdr_fw4 *rxhdr = (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; size_t rxhdr_size = sizeof(*rxhdr); BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); switch (dev->fw.hdr_format) { case B43_FW_HDR_410: case B43_FW_HDR_351: rxhdr_size -= sizeof(rxhdr->format_598) - sizeof(rxhdr->format_351); break; case B43_FW_HDR_598: break; } memset(rxhdr, 0, rxhdr_size); if (q->rev >= 8) { u32 ctl; ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) return 0; b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); if (ctl & B43_PIO8_RXCTL_DATARDY) goto data_ready; udelay(10); } } else { u16 ctl; ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) return 0; b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_FRAMERDY); for (i = 0; i < 10; i++) { ctl = b43_piorx_read16(q, B43_PIO_RXCTL); if (ctl & B43_PIO_RXCTL_DATARDY) goto data_ready; udelay(10); } } b43dbg(q->dev->wl, "PIO RX timed out\n"); return 1; data_ready: if (q->rev >= 8) { b43_block_read(dev, rxhdr, rxhdr_size, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { b43_block_read(dev, rxhdr, rxhdr_size, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } len = le16_to_cpu(rxhdr->frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; } if (unlikely(len == 0)) { err_msg = "len == 0"; goto rx_error; } switch (dev->fw.hdr_format) { case B43_FW_HDR_598: macstat = le32_to_cpu(rxhdr->format_598.mac_status); break; case B43_FW_HDR_410: case B43_FW_HDR_351: macstat = le32_to_cpu(rxhdr->format_351.mac_status); break; } if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { err_msg = "Frame FCS error"; goto rx_error; } } padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; skb = dev_alloc_skb(len + padding + 2); if (unlikely(!skb)) { err_msg = "Out of memory"; goto rx_error; } skb_reserve(skb, 2); skb_put(skb, len + padding); if (q->rev >= 8) { b43_block_read(dev, skb->data + padding, (len & ~3), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); b43_block_read(dev, tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: skb->data[len + padding - 3] = tail[0]; skb->data[len + padding - 2] = tail[1]; skb->data[len + padding - 1] = tail[2]; break; case 2: skb->data[len + padding - 2] = tail[0]; skb->data[len + padding - 1] = tail[1]; break; case 1: skb->data[len + padding - 1] = tail[0]; break; } } } else { b43_block_read(dev, skb->data + padding, (len & ~1), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { u8 *tail = wl->pio_tailspace; BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); b43_block_read(dev, tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); skb->data[len + padding - 1] = tail[0]; } } b43_rx(q->dev, skb, rxhdr); return 1; rx_error: if (err_msg) b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); if (q->rev >= 8) b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); else b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); return 1; }