static void mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, struct mt76_queue_entry *prev_e) { struct mt76_queue_entry *e = &q->entry[idx]; __le32 __ctrl = ACCESS_ONCE(q->desc[idx].ctrl); u32 ctrl = le32_to_cpu(__ctrl); if (!e->txwi || !e->skb) { __le32 addr = ACCESS_ONCE(q->desc[idx].buf0); u32 len = MT76_GET(MT_DMA_CTL_SD_LEN0, ctrl); dma_unmap_single(dev->dev, le32_to_cpu(addr), len, DMA_TO_DEVICE); } if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { __le32 addr = ACCESS_ONCE(q->desc[idx].buf1); u32 len = MT76_GET(MT_DMA_CTL_SD_LEN1, ctrl); dma_unmap_single(dev->dev, le32_to_cpu(addr), len, DMA_TO_DEVICE); } if (e->txwi == DMA_DUMMY_TXWI) e->txwi = NULL; *prev_e = *e; memset(e, 0, sizeof(*e)); }
static int mt76_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, enum mcu_cmd cmd) { unsigned long expires = jiffies + HZ; u32 info; int ret; u8 seq; if (!skb) return -EINVAL; mutex_lock(&dev->mcu.mutex); seq = ++dev->mcu.msg_seq & 0xf; if (!seq) seq = ++dev->mcu.msg_seq & 0xf; info = MT_MCU_MSG_TYPE_CMD | MT76_SET(MT_MCU_MSG_CMD_TYPE, cmd) | MT76_SET(MT_MCU_MSG_CMD_SEQ, seq) | MT76_SET(MT_MCU_MSG_PORT, CPU_TX_PORT) | MT76_SET(MT_MCU_MSG_LEN, skb->len); ret = __mt76_tx_queue_skb(dev, MT_TXQ_MCU, skb, info); if (ret) goto out; while (1) { u32 *rxfce; bool check_seq = false; skb = mt76_mcu_get_response(dev, expires); if (!skb) { printk("MCU message %d (seq %d) timed out\n", cmd, MT76_GET(MT_MCU_MSG_CMD_SEQ, info)); ret = -ETIMEDOUT; break; } rxfce = (u32 *) skb->cb; if (seq == MT76_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) check_seq = true; dev_kfree_skb(skb); if (check_seq) break; } out: mutex_unlock(&dev->mcu.mutex); return ret; }
static void mt76_phy_update_channel_gain(struct mt76_dev *dev) { u32 val = mt76_rr(dev, MT_BBP(AGC, 20)); int rssi0 = (s8) MT76_GET(MT_BBP_AGC20_RSSI0, val); int rssi1 = (s8) MT76_GET(MT_BBP_AGC20_RSSI1, val); bool low_gain; u8 gain[2], gain_delta; dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8); dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi1 << 8); dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + dev->cal.avg_rssi[1]) / 512; low_gain = dev->cal.avg_rssi_all > mt76_get_rssi_gain_thresh(dev); if (dev->cal.low_gain == low_gain) return; dev->cal.low_gain = low_gain; if (dev->chandef.width >= NL80211_CHAN_WIDTH_40) val = 0x1e42 << 16; else val = 0x1836 << 16; mt76_get_agc_gain(dev, gain); val |= 0xf8; if (dev->chandef.width == NL80211_CHAN_WIDTH_80) mt76_wr(dev, MT_BBP(RXO, 14), 0x00560411); else mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); if (low_gain) { mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); if (mt76_has_ext_lna(dev)) gain_delta = 10; else gain_delta = 14; } else { mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); mt76_wr(dev, MT_BBP(AGC, 37), 0x1010161C); gain_delta = 0; } mt76_wr(dev, MT_BBP(AGC, 8), val | MT76_SET(MT_BBP_AGC_GAIN, gain[0] - gain_delta)); mt76_wr(dev, MT_BBP(AGC, 9), val | MT76_SET(MT_BBP_AGC_GAIN, gain[1] - gain_delta)); }
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, u32 seg_len, struct page *p) { struct sk_buff *skb; struct mt7601u_rxwi *rxwi; u32 fce_info, truesize = seg_len; /* DMA_INFO field at the beginning of the segment contains only some of * the information, we need to read the FCE descriptor from the end. */ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); seg_len -= MT_FCE_INFO_LEN; data += MT_DMA_HDR_LEN; seg_len -= MT_DMA_HDR_LEN; rxwi = (struct mt7601u_rxwi *) data; data += sizeof(struct mt7601u_rxwi); seg_len -= sizeof(struct mt7601u_rxwi); if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info))) dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); trace_mt_rx(dev, rxwi, fce_info); skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); if (!skb) return; ieee80211_rx_ni(dev->hw, skb); }
static void * mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, int *len, u32 *info, bool *more) { struct mt76_queue_entry *e = &q->entry[idx]; struct mt76_desc *desc = &q->desc[idx]; dma_addr_t buf_addr; void *buf = e->buf; int buf_len = SKB_WITH_OVERHEAD(q->buf_size); buf_addr = ACCESS_ONCE(desc->buf0); if (len) { u32 ctl = ACCESS_ONCE(desc->ctrl); *len = MT76_GET(MT_DMA_CTL_SD_LEN0, ctl); *more = !(ctl & MT_DMA_CTL_LAST_SEC0); } if (info) *info = le32_to_cpu(desc->info); dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); e->buf = NULL; return buf; }
static void mt76_adjust_agc_gain(struct mt76_dev *dev, int reg, s8 offset) { s8 gain; gain = MT76_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg))); gain += offset; mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); }
static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq) { struct urb *urb = dev->mcu.resp.urb; u32 rxfce; int urb_status, ret, i = 5; while (i--) { if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, msecs_to_jiffies(300))) { dev_warn(dev->dev, "Warning: %s retrying\n", __func__); continue; } /* Make copies of important data before reusing the urb */ rxfce = get_unaligned_le32(dev->mcu.resp.buf); urb_status = urb->status * mt7601u_urb_has_error(urb); ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, &dev->mcu.resp, GFP_KERNEL, mt7601u_complete_urb, &dev->mcu.resp_cmpl); if (ret) return ret; if (urb_status) dev_err(dev->dev, "Error: MCU resp urb failed:%d\n", urb_status); if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) return 0; dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n", MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); } dev_err(dev->dev, "Error: %s timed out\n", __func__); return -ETIMEDOUT; }
static void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev) { u16 val = MT_EE_NIC_CONF_0; switch (MT76_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { case BOARD_TYPE_5GHZ: dev->mt76.cap.has_5ghz = true; break; case BOARD_TYPE_2GHZ: dev->mt76.cap.has_2ghz = true; break; default: dev->mt76.cap.has_2ghz = true; dev->mt76.cap.has_5ghz = true; break; } }
static void mt76_fixup_xtal(struct mt76_dev *dev) { u16 eep_val; s8 offset = 0; eep_val = mt76_eeprom_get(dev, MT_EE_XTAL_TRIM_2); offset = eep_val & 0x7f; if ((eep_val & 0xff) == 0xff) offset = 0; else if (eep_val & 0x80) offset = 0 - offset; eep_val >>= 8; if (eep_val == 0x00 || eep_val == 0xff) { eep_val = mt76_eeprom_get(dev, MT_EE_XTAL_TRIM_1); eep_val &= 0xff; if (eep_val == 0x00 || eep_val == 0xff) eep_val = 0x14; } eep_val &= 0x7f; mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); eep_val = mt76_eeprom_get(dev, MT_EE_NIC_CONF_2); switch (MT76_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { case 0: mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); break; case 1: mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0); break; default: break; } }
static int mt76pci_load_firmware(struct mt76_dev *dev) { const struct firmware *fw; const struct mt76_fw_header *hdr; int i, len, ret; __le32 *cur; u32 offset, val; ret = request_firmware(&fw, MT7662_FIRMWARE, dev->dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) goto error; hdr = (const struct mt76_fw_header *) fw->data; len = sizeof(*hdr); len += le32_to_cpu(hdr->ilm_len); len += le32_to_cpu(hdr->dlm_len); if (fw->size != len) goto error; val = le16_to_cpu(hdr->fw_ver); printk("Firmware Version: %d.%d.%02d\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); val = le16_to_cpu(hdr->build_ver); printk("Build: %x\n", val); printk("Build Time: %.16s\n", hdr->build_time); cur = (__le32 *) (fw->data + sizeof(*hdr)); len = le32_to_cpu(hdr->ilm_len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); write_data(dev, MT_MCU_ILM_ADDR, cur, len); cur += len / sizeof(*cur); len = le32_to_cpu(hdr->dlm_len); if (mt76xx_rev(dev) >= MT76XX_REV_E3) offset = MT_MCU_DLM_ADDR_E3; else offset = MT_MCU_DLM_ADDR; mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); write_data(dev, offset, cur, len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); val = mt76_eeprom_get(dev, MT_EE_NIC_CONF_2); if (MT76_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); /* trigger firmware */ mt76_wr(dev, MT_MCU_INT_LEVEL, 2); for (i = 200; i > 0; i--) { val = mt76_rr(dev, MT_MCU_COM_REG0); if (val & 1) break; msleep(10); } if (!i) { printk("Firmware failed to start\n"); release_firmware(fw); return -ETIMEDOUT; } printk("Firmware running!\n"); release_firmware(fw); return ret; error: printk("Invalid firmware\n"); release_firmware(fw); return -ENOENT; }