void mt76x2u_mcu_deinit(struct mt76x2_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; usb_kill_urb(usb->mcu.res.urb); mt76u_buf_free(&usb->mcu.res); }
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, int data_len, u32 max_payload, u32 offset) { int err, len, pos = 0, max_len = max_payload - 8; struct mt76u_buf buf; err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload, GFP_KERNEL); if (err < 0) return err; while (data_len > 0) { len = min_t(int, data_len, max_len); err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos, len, offset + pos); if (err < 0) break; data_len -= len; pos += len; usleep_range(5000, 10000); } mt76u_buf_free(&buf); return err; }
static void mt76u_free_rx(struct mt76_dev *dev) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct page *page; int i; for (i = 0; i < q->ndesc; i++) mt76u_buf_free(&q->entry[i].ubuf); spin_lock_bh(&q->rx_page_lock); if (!q->rx_page.va) goto out; page = virt_to_page(q->rx_page.va); __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); memset(&q->rx_page, 0, sizeof(q->rx_page)); out: spin_unlock_bh(&q->rx_page_lock); }