Ejemplo n.º 1
0
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	int i, err, nsgs;

	spin_lock_init(&q->rx_page_lock);
	spin_lock_init(&q->lock);
	q->entry = devm_kcalloc(dev->dev,
				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
				GFP_KERNEL);
	if (!q->entry)
		return -ENOMEM;

	if (mt76u_check_sg(dev)) {
		q->buf_size = MT_RX_BUF_SIZE;
		nsgs = MT_SG_MAX_SIZE;
	} else {
		q->buf_size = PAGE_SIZE;
		nsgs = 1;
	}

	for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
		err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
				      nsgs, q->buf_size,
				      SKB_WITH_OVERHEAD(q->buf_size),
				      GFP_KERNEL);
		if (err < 0)
			return err;
	}
	q->ndesc = MT_NUM_RX_ENTRIES;

	return mt76u_submit_rx_buffers(dev);
}
Ejemplo n.º 2
0
static void mt76u_rx_tasklet(unsigned long data)
{
	struct mt76_dev *dev = (struct mt76_dev *)data;
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	int err, nsgs, buf_len = q->buf_size;
	struct mt76u_buf *buf;

	rcu_read_lock();

	while (true) {
		buf = mt76u_get_next_rx_entry(q);
		if (!buf)
			break;

		nsgs = mt76u_process_rx_entry(dev, buf->urb);
		if (nsgs > 0) {
			err = mt76u_fill_rx_sg(dev, buf, nsgs,
					       buf_len,
					       SKB_WITH_OVERHEAD(buf_len));
			if (err < 0)
				break;
		}
		mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
				 buf, GFP_ATOMIC,
				 mt76u_complete_rx, dev);
	}
	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);

	rcu_read_unlock();
}
Ejemplo n.º 3
0
static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
		 int *len, u32 *info, bool *more)
{
	struct mt76_queue_entry *e = &q->entry[idx];
	struct mt76_desc *desc = &q->desc[idx];
	dma_addr_t buf_addr;
	void *buf = e->buf;
	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);

	buf_addr = ACCESS_ONCE(desc->buf0);
	if (len) {
		u32 ctl = ACCESS_ONCE(desc->ctrl);
		*len = MT76_GET(MT_DMA_CTL_SD_LEN0, ctl);
		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
	}

	if (info)
		*info = le32_to_cpu(desc->info);

	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
	e->buf = NULL;

	return buf;
}
Ejemplo n.º 4
0
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
{
	dma_addr_t addr;
	void *buf;
	int frames = 0;
	int len = SKB_WITH_OVERHEAD(q->buf_size);
	int offset = q->buf_offset;
	int idx;
	void *(*alloc)(unsigned int fragsz);

	if (napi)
		alloc = napi_alloc_frag;
	else
		alloc = netdev_alloc_frag;

	spin_lock_bh(&q->lock);

	while (q->queued < q->ndesc - 1) {
		struct mt76_queue_buf qbuf;

		buf = alloc(q->buf_size);
		if (!buf)
			break;

		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
		if (dma_mapping_error(dev->dev, addr)) {
			skb_free_frag(buf);
			break;
		}

		qbuf.addr = addr + offset;
		qbuf.len = len - offset;
		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
		frames++;
	}

	if (frames)
		mt76_dma_kick_queue(dev, q);

	spin_unlock_bh(&q->lock);

	return frames;
}