Exemple #1
0
static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
	int ring_index, struct p54p_desc *ring, u32 ring_limit,
	struct sk_buff **rx_buf)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	struct p54p_desc *desc;
	u32 idx, i;

	i = (*index) % ring_limit;
	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
	idx %= ring_limit;
	while (i != idx) {
		u16 len;
		struct sk_buff *skb;
		dma_addr_t dma_addr;
		desc = &ring[i];
		len = le16_to_cpu(desc->len);
		skb = rx_buf[i];

		if (!skb) {
			i++;
			i %= ring_limit;
			continue;
		}

		if (unlikely(len > priv->common.rx_mtu)) {
			if (net_ratelimit())
				dev_err(&priv->pdev->dev, "rx'd frame size "
					"exceeds length threshold.\n");

			len = priv->common.rx_mtu;
		}
		dma_addr = le32_to_cpu(desc->host_addr);
		pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
			priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
		skb_put(skb, len);

		if (p54_rx(dev, skb)) {
			pci_unmap_single(priv->pdev, dma_addr,
				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
			rx_buf[i] = NULL;
			desc->host_addr = cpu_to_le32(0);
		} else {
			skb_trim(skb, 0);
			pci_dma_sync_single_for_device(priv->pdev, dma_addr,
				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
		}

		i++;
		i %= ring_limit;
	}

	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
Exemple #2
0
static int p54spi_rx(struct p54s_priv *priv)
{
	struct sk_buff *skb;
	u16 len;
	u16 rx_head[2];
#define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16))

	if (p54spi_wakeup(priv) < 0)
		return -EBUSY;

	/* Read data size and first data word in one SPI transaction
	 * This is workaround for firmware/DMA bug,
	 * when first data word gets lost under high load.
	 */
	p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head));
	len = rx_head[0];

	if (len == 0) {
		p54spi_sleep(priv);
		dev_err(&priv->spi->dev, "rx request of zero bytes\n");
		return 0;
	}

	/* Firmware may insert up to 4 padding bytes after the lmac header,
	 * but it does not amend the size of SPI data transfer.
	 * Such packets has correct data size in header, thus referencing
	 * past the end of allocated skb. Reserve extra 4 bytes for this case
	 */
	skb = dev_alloc_skb(len + 4);
	if (!skb) {
		p54spi_sleep(priv);
		dev_err(&priv->spi->dev, "could not alloc skb");
		return -ENOMEM;
	}

	if (len <= READAHEAD_SZ) {
		memcpy(skb_put(skb, len), rx_head + 1, len);
	} else {
		memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ);
		p54spi_spi_read(priv, SPI_ADRS_DMA_DATA,
				skb_put(skb, len - READAHEAD_SZ),
				len - READAHEAD_SZ);
	}
	p54spi_sleep(priv);
	/* Put additional bytes to compensate for the possible
	 * alignment-caused truncation
	 */
	skb_put(skb, 4);

	if (p54_rx(priv->hw, skb) == 0)
		dev_kfree_skb(skb);

	return 0;
}
static void p54u_rx_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
	struct ieee80211_hw *dev = info->dev;
	struct p54u_priv *priv = dev->priv;

	if (unlikely(urb->status)) {
		info->urb = NULL;
		usb_free_urb(urb);
		return;
	}

	skb_unlink(skb, &priv->rx_queue);
	skb_put(skb, urb->actual_length);
	if (!priv->hw_type)
		skb_pull(skb, sizeof(struct net2280_tx_hdr));

	if (p54_rx(dev, skb)) {
		skb = dev_alloc_skb(MAX_RX_SIZE);
		if (unlikely(!skb)) {
			usb_free_urb(urb);
			/* TODO check rx queue length and refill *somewhere* */
			return;
		}

		info = (struct p54u_rx_info *) skb->cb;
		info->urb = urb;
		info->dev = dev;
		urb->transfer_buffer = skb_tail_pointer(skb);
		urb->context = skb;
		skb_queue_tail(&priv->rx_queue, skb);
	} else {
		if (!priv->hw_type)
			skb_push(skb, sizeof(struct net2280_tx_hdr));

		skb_reset_tail_pointer(skb);
		skb_trim(skb, 0);
		if (urb->transfer_buffer != skb_tail_pointer(skb)) {
			/* this should not happen */
			WARN_ON(1);
			urb->transfer_buffer = skb_tail_pointer(skb);
		}

		skb_queue_tail(&priv->rx_queue, skb);
	}

	usb_submit_urb(urb, GFP_ATOMIC);
}
Exemple #4
0
static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
	int ring_index, struct p54p_desc *ring, u32 ring_limit,
	struct sk_buff **rx_buf)
{
	struct p54p_priv *priv = dev->priv;
	struct p54p_ring_control *ring_control = priv->ring_control;
	struct p54p_desc *desc;
	u32 idx, i;

	i = (*index) % ring_limit;
	(*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
	idx %= ring_limit;
	while (i != idx) {
		u16 len;
		struct sk_buff *skb;
		desc = &ring[i];
		len = le16_to_cpu(desc->len);
		skb = rx_buf[i];

		if (!skb) {
			i++;
			i %= ring_limit;
			continue;
		}
		skb_put(skb, len);

		if (p54_rx(dev, skb)) {
			pci_unmap_single(priv->pdev,
					 le32_to_cpu(desc->host_addr),
					 priv->common.rx_mtu + 32,
					 PCI_DMA_FROMDEVICE);
			rx_buf[i] = NULL;
			desc->host_addr = 0;
		} else {
			skb_trim(skb, 0);
			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
		}

		i++;
		i %= ring_limit;
	}

	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
}
Exemple #5
0
static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
    struct ieee80211_hw *dev = dev_id;
    struct p54p_priv *priv = dev->priv;
    struct p54p_ring_control *ring_control = priv->ring_control;
    __le32 reg;

    spin_lock(&priv->lock);
    reg = P54P_READ(int_ident);
    if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
        spin_unlock(&priv->lock);
        return IRQ_HANDLED;
    }

    P54P_WRITE(int_ack, reg);

    reg &= P54P_READ(int_enable);

    if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
        struct p54p_desc *desc;
        u32 idx, i;
        i = priv->tx_idx;
        i %= ARRAY_SIZE(ring_control->tx_data);
        priv->tx_idx = idx = le32_to_cpu(ring_control->device_idx[1]);
        idx %= ARRAY_SIZE(ring_control->tx_data);

        while (i != idx) {
            desc = &ring_control->tx_data[i];
            if (priv->tx_buf[i]) {
                kfree(priv->tx_buf[i]);
                priv->tx_buf[i] = NULL;
            }

            pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
                     le16_to_cpu(desc->len), PCI_DMA_TODEVICE);

            desc->host_addr = 0;
            desc->device_addr = 0;
            desc->len = 0;
            desc->flags = 0;

            i++;
            i %= ARRAY_SIZE(ring_control->tx_data);
        }

        i = priv->rx_idx;
        i %= ARRAY_SIZE(ring_control->rx_data);
        priv->rx_idx = idx = le32_to_cpu(ring_control->device_idx[0]);
        idx %= ARRAY_SIZE(ring_control->rx_data);
        while (i != idx) {
            u16 len;
            struct sk_buff *skb;
            desc = &ring_control->rx_data[i];
            len = le16_to_cpu(desc->len);
            skb = priv->rx_buf[i];

            skb_put(skb, len);

            if (p54_rx(dev, skb)) {
                pci_unmap_single(priv->pdev,
                         le32_to_cpu(desc->host_addr),
                         MAX_RX_SIZE, PCI_DMA_FROMDEVICE);

                priv->rx_buf[i] = NULL;
                desc->host_addr = 0;
            } else {
                skb_trim(skb, 0);
                desc->len = cpu_to_le16(MAX_RX_SIZE);
            }

            i++;
            i %= ARRAY_SIZE(ring_control->rx_data);
        }

        p54p_refill_rx_ring(dev);

        wmb();
        P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
    } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
        complete(&priv->boot_comp);

    spin_unlock(&priv->lock);

    return reg ? IRQ_HANDLED : IRQ_NONE;
}