static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) { struct mt76x02_dev *dev = usb_get_intfdata(intf); struct mt76_usb *usb = &dev->mt76.usb; int err; reinit_completion(&usb->mcu.cmpl); err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, MT_EP_IN_CMD_RESP, &usb->mcu.res, GFP_KERNEL, mt76u_mcu_complete_urb, &usb->mcu.cmpl); if (err < 0) goto err; err = mt76u_submit_rx_buffers(&dev->mt76); if (err < 0) goto err; tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); err = mt76x2u_init_hardware(dev); if (err < 0) goto err; return 0; err: mt76x2u_cleanup(dev); return err; }
static void mt76u_rx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; int err, nsgs, buf_len = q->buf_size; struct mt76u_buf *buf; rcu_read_lock(); while (true) { buf = mt76u_get_next_rx_entry(q); if (!buf) break; nsgs = mt76u_process_rx_entry(dev, buf->urb); if (nsgs > 0) { err = mt76u_fill_rx_sg(dev, buf, nsgs, buf_len, SKB_WITH_OVERHEAD(buf_len)); if (err < 0) break; } mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, buf, GFP_ATOMIC, mt76u_complete_rx, dev); } mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); rcu_read_unlock(); }
static int __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, const void *fw_data, int len, u32 dst_addr) { u8 *data = sg_virt(&buf->urb->sg[0]); DECLARE_COMPLETION_ONSTACK(cmpl); __le32 info; u32 val; int err; info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, len) | MT_MCU_MSG_TYPE_CMD); memcpy(data, &info, sizeof(info)); memcpy(data + sizeof(info), fw_data, len); memset(data + sizeof(info) + len, 0, 4); mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_ADDR, dst_addr); len = roundup(len, 4); mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_LEN, len << 16); buf->len = MT_CMD_HDR_LEN + len + sizeof(info); err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, buf, GFP_KERNEL, mt76u_mcu_complete_urb, &cmpl); if (err < 0) return err; if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { dev_err(dev->mt76.dev, "firmware upload timed out\n"); usb_kill_urb(buf->urb); return -ETIMEDOUT; } if (mt76u_urb_error(buf->urb)) { dev_err(dev->mt76.dev, "firmware upload failed: %d\n", buf->urb->status); return buf->urb->status; } val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); val++; mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); return 0; }
static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) { struct mt76_usb *usb = &dev->usb; struct mt76u_buf *buf = &usb->mcu.res; struct urb *urb = buf->urb; int i, ret; u32 rxfce; u8 *data; for (i = 0; i < 5; i++) { if (!wait_for_completion_timeout(&usb->mcu.cmpl, msecs_to_jiffies(300))) continue; if (urb->status) return -EIO; data = sg_virt(&urb->sg[0]); if (usb->mcu.rp) mt76x02u_multiple_mcu_reads(dev, data + 4, urb->actual_length - 8); rxfce = get_unaligned_le32(data); ret = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, buf, GFP_KERNEL, mt76u_mcu_complete_urb, &usb->mcu.cmpl); if (ret) return ret; if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) && FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE) return 0; dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n", FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); } dev_err(dev->dev, "error: %s timed out\n", __func__); return -ETIMEDOUT; }
int mt76u_submit_rx_buffers(struct mt76_dev *dev) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; unsigned long flags; int i, err = 0; spin_lock_irqsave(&q->lock, flags); for (i = 0; i < q->ndesc; i++) { err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, &q->entry[i].ubuf, GFP_ATOMIC, mt76u_complete_rx, dev); if (err < 0) break; } q->head = q->tail = 0; q->queued = 0; spin_unlock_irqrestore(&q->lock, flags); return err; }