int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, int data_len, u32 max_payload, u32 offset) { int err, len, pos = 0, max_len = max_payload - 8; struct mt76u_buf buf; err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload, GFP_KERNEL); if (err < 0) return err; while (data_len > 0) { len = min_t(int, data_len, max_len); err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos, len, offset + pos); if (err < 0) break; data_len -= len; pos += len; usleep_range(5000, 10000); } mt76u_buf_free(&buf); return err; }
static int mt76u_alloc_rx(struct mt76_dev *dev) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; int i, err, nsgs; spin_lock_init(&q->rx_page_lock); spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, MT_NUM_RX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; if (mt76u_check_sg(dev)) { q->buf_size = MT_RX_BUF_SIZE; nsgs = MT_SG_MAX_SIZE; } else { q->buf_size = PAGE_SIZE; nsgs = 1; } for (i = 0; i < MT_NUM_RX_ENTRIES; i++) { err = mt76u_buf_alloc(dev, &q->entry[i].ubuf, nsgs, q->buf_size, SKB_WITH_OVERHEAD(q->buf_size), GFP_KERNEL); if (err < 0) return err; } q->ndesc = MT_NUM_RX_ENTRIES; return mt76u_submit_rx_buffers(dev); }