static int mt76pci_mcu_restart(struct mt76_dev *mdev) { struct mt76x02_dev *dev; int ret; dev = container_of(mdev, struct mt76x02_dev, mt76); mt76x02_mcu_cleanup(dev); mt76x2_mac_reset(dev, true); ret = mt76pci_load_firmware(dev); if (ret) return ret; mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); return 0; }
static void mt76_power_on_rf_patch(struct mt76_dev *dev) { mt76_set(dev, 0x10130, BIT(0) | BIT(16)); udelay(1); mt76_clear(dev, 0x1001c, 0xff); mt76_set(dev, 0x1001c, 0x30); mt76_wr(dev, 0x10014, 0x484f); udelay(1); mt76_set(dev, 0x10130, BIT(17)); udelay(125); mt76_clear(dev, 0x10130, BIT(16)); udelay(50); mt76_set(dev, 0x1014c, BIT(19) | BIT(20)); }
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key) { enum mt76x02_cipher_type cipher; u8 key_data[32]; u32 val; cipher = mt76x02_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) return -EOPNOTSUPP; val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, sizeof(key_data)); return 0; }
static void mt76_apply_rate_power_table(struct mt76_dev *dev) { struct mt76_rate_power t; mt76_get_rate_power(dev, &t); mt76_wr(dev, MT_TX_PWR_CFG_0, mt76_tx_power_mask(t.cck[0], t.cck[1], t.ofdm[0], t.ofdm[1])); mt76_wr(dev, MT_TX_PWR_CFG_1, mt76_tx_power_mask(t.ofdm[2], t.ofdm[3], t.ht[0], t.ht[1])); mt76_wr(dev, MT_TX_PWR_CFG_2, mt76_tx_power_mask(t.ht[2], t.ht[3], t.ht[4], t.ht[5])); mt76_wr(dev, MT_TX_PWR_CFG_3, mt76_tx_power_mask(t.ht[6], t.ht[7], t.ht[0], t.ht[1])); mt76_wr(dev, MT_TX_PWR_CFG_4, mt76_tx_power_mask(t.ht[2], t.ht[3], 0, 0)); mt76_wr(dev, MT_TX_PWR_CFG_7, mt76_tx_power_mask(t.ofdm[2], t.vht[4], t.ht[3], t.vht[4])); mt76_wr(dev, MT_TX_PWR_CFG_8, mt76_tx_power_mask(t.ht[7], t.vht[4], t.vht[4], 0)); mt76_wr(dev, MT_TX_PWR_CFG_9, mt76_tx_power_mask(t.ht[3], t.vht[4], t.vht[4], 0)); }
static void mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct mt7603_dev *dev = hw->priv; u32 flags = 0; #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ dev->rxfilter &= ~(_hw); \ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ } while (0) dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | MT_WF_RFCR_DROP_OTHER_BEACON | MT_WF_RFCR_DROP_FRAME_REPORT | MT_WF_RFCR_DROP_PROBEREQ | MT_WF_RFCR_DROP_MCAST_FILTERED | MT_WF_RFCR_DROP_MCAST | MT_WF_RFCR_DROP_BCAST | MT_WF_RFCR_DROP_DUPLICATE | MT_WF_RFCR_DROP_A2_BSSID | MT_WF_RFCR_DROP_UNWANTED_CTL | MT_WF_RFCR_DROP_STBC_MULTI); MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_UC | MT_WF_RFCR_DROP_OTHER_TIM | MT_WF_RFCR_DROP_A3_MAC | MT_WF_RFCR_DROP_A3_BSSID); MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | MT_WF_RFCR_DROP_RTS | MT_WF_RFCR_DROP_CTL_RSV | MT_WF_RFCR_DROP_NDPA); *total_flags = flags; mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); }
static int mt76x2_config(struct ieee80211_hw *hw, u32 changed) { struct mt76x2_dev *dev = hw->priv; int ret = 0; mutex_lock(&dev->mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC; else dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC; mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); } if (changed & IEEE80211_CONF_CHANGE_POWER) { dev->txpower_conf = hw->conf.power_level * 2; /* convert to per-chain power for 2x2 devices */ dev->txpower_conf -= 6; if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) { mt76x2_phy_set_txpower(dev); mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf); } } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); ret = mt76x2_set_channel(dev, &hw->conf.chandef); ieee80211_wake_queues(hw); } mutex_unlock(&dev->mutex); return ret; }
int mt76_mac_reset(struct mt76_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; u32 val; int i, k; if (!mt76_wait_for_mac(dev)) return -ETIMEDOUT; val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_BUSY | MT_WPDMA_GLO_CFG_DMA_BURST_SIZE); val |= MT76_SET(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3); mt76_wr(dev, MT_WPDMA_GLO_CFG, val); mt76_mac_pbf_init(dev); mt76_write_mac_initvals(dev); mt76_fixup_xtal(dev); mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | MT_MAC_SYS_CTRL_RESET_BBP); if (is_mt7612(dev)) mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN); mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000); mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31)); mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000); mt76_wr(dev, MT_RF_SETTING_0, 0x08800000); msleep(5); mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000); mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4)); mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(dev->macaddr)); mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(dev->macaddr + 4) | MT76_SET(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); /* Fire a pre-TBTT interrupt 8 ms before TBTT */ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4); mt76_wr(dev, MT_INT_TIMER_EN, 0); mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); if (!hard) return 0; for (i = 0; i < 256; i++) mt76_mac_wcid_setup(dev, i, 0, NULL); for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) mt76_mac_shared_key_setup(dev, i, k, NULL); for (i = 0; i < 8; i++) { mt76_mac_set_bssid(dev, i, null_addr); mt76_mac_set_beacon(dev, i, NULL); } for (i = 0; i < 16; i++) mt76_rr(dev, MT_TX_STAT_FIFO); mt76_set(dev, MT_MAC_APC_BSSID_H(0), MT_MAC_APC_BSSID0_H_EN); mt76_init_beacon_offsets(dev); return 0; }
static int mt76x0u_load_firmware(struct mt76x02_dev *dev) { const struct firmware *fw; const struct mt76x02_fw_header *hdr; int len, ret; u32 val; mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN)); if (mt76x0_firmware_running(dev)) return 0; ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) goto err_inv_fw; hdr = (const struct mt76x02_fw_header *)fw->data; if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) goto err_inv_fw; len = sizeof(*hdr); len += le32_to_cpu(hdr->ilm_len); len += le32_to_cpu(hdr->dlm_len); if (fw->size != len) goto err_inv_fw; val = le16_to_cpu(hdr->fw_ver); dev_dbg(dev->mt76.dev, "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, le16_to_cpu(hdr->build_ver), hdr->build_time); len = le32_to_cpu(hdr->ilm_len); mt76_wr(dev, 0x1004, 0x2c); mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); mt76x02u_mcu_fw_reset(dev); usleep_range(5000, 6000); /* mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | MT_PBF_CFG_TX1Q_EN | MT_PBF_CFG_TX2Q_EN | MT_PBF_CFG_TX3Q_EN)); */ mt76_wr(dev, MT_FCE_PSE_CTRL, 1); /* FCE tx_fs_base_ptr */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); /* FCE tx_fs_max_cnt */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); /* FCE pdma enable */ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); /* FCE skip_fs_en */ mt76_wr(dev, MT_FCE_SKIP_FS, 3); val = mt76_rr(dev, MT_USB_DMA_CFG); val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP; mt76_wr(dev, MT_USB_DMA_CFG, val); val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP; mt76_wr(dev, MT_USB_DMA_CFG, val); ret = mt76x0u_upload_firmware(dev, hdr); release_firmware(fw); mt76_wr(dev, MT_FCE_PSE_CTRL, 1); return ret; err_inv_fw: dev_err(dev->mt76.dev, "Invalid firmware image\n"); release_firmware(fw); return -ENOENT; }
int mt76_phy_set_channel(struct mt76_dev *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *chan = chandef->chan; bool scan = test_bit(MT76_SCANNING, &dev->state); enum ieee80211_band band = chan->band; u8 channel; u32 ext_cca_chan[4] = { [0] = MT76_SET(MT_EXT_CCA_CFG_CCA0, 0) | MT76_SET(MT_EXT_CCA_CFG_CCA1, 1) | MT76_SET(MT_EXT_CCA_CFG_CCA2, 2) | MT76_SET(MT_EXT_CCA_CFG_CCA3, 3) | MT76_SET(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)), [1] = MT76_SET(MT_EXT_CCA_CFG_CCA0, 1) | MT76_SET(MT_EXT_CCA_CFG_CCA1, 0) | MT76_SET(MT_EXT_CCA_CFG_CCA2, 2) | MT76_SET(MT_EXT_CCA_CFG_CCA3, 3) | MT76_SET(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)), [2] = MT76_SET(MT_EXT_CCA_CFG_CCA0, 2) | MT76_SET(MT_EXT_CCA_CFG_CCA1, 3) | MT76_SET(MT_EXT_CCA_CFG_CCA2, 1) | MT76_SET(MT_EXT_CCA_CFG_CCA3, 0) | MT76_SET(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)), [3] = MT76_SET(MT_EXT_CCA_CFG_CCA0, 3) | MT76_SET(MT_EXT_CCA_CFG_CCA1, 2) | MT76_SET(MT_EXT_CCA_CFG_CCA2, 1) | MT76_SET(MT_EXT_CCA_CFG_CCA3, 0) | MT76_SET(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)), }; int ch_group_index; u8 bw, bw_index; int freq, freq1; int ret; u8 sifs = 13; dev->chandef = *chandef; dev->cal.channel_cal_done = false; freq = chandef->chan->center_freq; freq1 = chandef->center_freq1; channel = chan->hw_value; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: bw = 1; if (freq1 > freq) { bw_index = 1; ch_group_index = 0; } else { bw_index = 3; ch_group_index = 1; } channel += 2 - ch_group_index * 4; break; case NL80211_CHAN_WIDTH_80: ch_group_index = (freq - freq1 + 30) / 20; if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) ch_group_index = 0; bw = 2; bw_index = ch_group_index; channel += 6 - ch_group_index * 4; break; default: bw = 0; bw_index = 0; ch_group_index = 0; break; } mt76_read_rx_gain(dev); mt76_phy_set_txpower_regs(dev, band); mt76_configure_tx_delay(dev, band, bw); mt76_phy_set_txpower(dev); mt76_apply_rate_power_table(dev); mt76_set_rx_chains(dev); mt76_phy_set_band(dev, chan->band, ch_group_index & 1); mt76_phy_set_bw(dev, chandef->width, ch_group_index); mt76_set_tx_dac(dev); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | MT_EXT_CCA_CFG_CCA1 | MT_EXT_CCA_CFG_CCA2 | MT_EXT_CCA_CFG_CCA3 | MT_EXT_CCA_CFG_CCA_MASK), ext_cca_chan[ch_group_index]); if (chandef->width >= NL80211_CHAN_WIDTH_40) sifs++; mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, sifs); ret = mt76_mcu_set_channel(dev, channel, bw, bw_index, scan); if (ret) return ret; mt76_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); /* Enable LDPC Rx */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { u8 val = mt76_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) mt76_mcu_calibrate(dev, MCU_CAL_R, 0); } mt76_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) mt76_mcu_calibrate(dev, MCU_CAL_RC, 0); dev->cal.init_cal_done = true; mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2); mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010); mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404); mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F); if (scan) return 0; dev->cal.low_gain = -1; mt76_phy_channel_calibrate(dev, true); mt76_get_agc_gain(dev, dev->cal.agc_gain_init); ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); return 0; } static void mt76_phy_tssi_compensate(struct mt76_dev *dev) { struct ieee80211_channel *chan = dev->chandef.chan; struct mt76_tx_power_info txp; struct mt76_tssi_comp t = {}; if (!dev->cal.tssi_cal_done) return; if (dev->cal.tssi_comp_done) { /* TSSI trigger */ t.cal_mode = BIT(0); mt76_mcu_tssi_comp(dev, &t); } else { if (!(mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))) return; mt76_get_power_info(dev, &txp); if (mt76_ext_pa_enabled(dev, chan->band)) t.pa_mode = 1; t.cal_mode = BIT(1); t.slope0 = txp.chain[0].tssi_slope; t.offset0 = txp.chain[0].tssi_offset; t.slope1 = txp.chain[1].tssi_slope; t.offset1 = txp.chain[1].tssi_offset; dev->cal.tssi_comp_done = true; mt76_mcu_tssi_comp(dev, &t); if (t.pa_mode || dev->cal.dpd_cal_done) return; msleep(10); mt76_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); dev->cal.dpd_cal_done = true; } } static void mt76_phy_temp_compensate(struct mt76_dev *dev) { struct mt76_temp_comp t; int temp, db_diff; if (mt76_get_temp_comp(dev, &t)) return; temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL); temp -= t.temp_25_ref; temp = (temp * 1789) / 1000 + 25; dev->cal.temp = temp; if (temp > 25) db_diff = (temp - 25) / t.high_slope; else db_diff = (25 - temp) / t.low_slope; db_diff = min(db_diff, t.upper_bound); db_diff = max(db_diff, t.lower_bound); mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, db_diff * 2); mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, db_diff * 2); } void mt76_phy_calibrate(struct work_struct *work) { struct mt76_dev *dev; dev = container_of(work, struct mt76_dev, cal_work.work); mt76_phy_channel_calibrate(dev, false); mt76_phy_tssi_compensate(dev); mt76_phy_temp_compensate(dev); mt76_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); } int mt76_phy_start(struct mt76_dev *dev) { int ret; ret = mt76_mcu_set_radio_state(dev, true); if (ret) return ret; mt76_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); return ret; }
static void mt76_phy_set_txpower_regs(struct mt76_dev *dev, enum ieee80211_band band) { u32 pa_mode[2]; u32 pa_mode_adj; u32 tx_cfg = 0; if (band == IEEE80211_BAND_2GHZ) { pa_mode[0] = 0x010055ff; pa_mode[1] = 0x00550055; mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); if (mt76_ext_pa_enabled(dev, band)) { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); } else { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200); } } else { pa_mode[0] = 0x0000ffff; pa_mode[1] = 0x00ff00ff; mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); mt76_wr(dev, MT_TX_ALC_CFG_4, 0); if (mt76_ext_pa_enabled(dev, band)) { tx_cfg = 0x00830083; pa_mode_adj = 0x04000000; } else { pa_mode_adj = 0; } mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj); } mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]); mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); mt76_wr(dev, MT_PROT_AUTO_TX_CFG, tx_cfg); if (mt76_ext_pa_enabled(dev, band)) { u32 val = 0x3c3c023c; mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818); } else { if (band == IEEE80211_BAND_2GHZ) { u32 val = 0x0f3c3c3c; mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606); } else { mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28); mt76_wr(dev, MT_TX_ALC_CFG_4, 0); } } }
int mt76_mcu_set_channel(struct mt76_dev *dev, u8 channel, u8 bw, u8 bw_index, bool scan) { struct sk_buff *skb; struct { u8 idx; u8 scan; u8 bw; u8 _pad0; __le16 chainmask; u8 ext_chan; u8 _pad1; } __packed __aligned(4) msg = { .idx = channel, .scan = scan, .bw = bw, .chainmask = cpu_to_le16(dev->chainmask), }; /* first set the channel without the extension channel info */ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); mt76_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); msleep(5); msg.ext_chan = 0xe0 + bw_index; skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP); } int mt76_mcu_set_radio_state(struct mt76_dev *dev, bool on) { struct sk_buff *skb; struct { __le32 mode; __le32 level; } __packed __aligned(4) msg = { .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF), .level = cpu_to_le32(0), }; skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP); } int mt76_mcu_calibrate(struct mt76_dev *dev, enum mcu_calibration type, u32 param) { struct sk_buff *skb; struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(type), .value = cpu_to_le32(param), }; int ret; mt76_clear(dev, MT_MCU_COM_REG0, BIT(31)); skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); ret = mt76_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); if (ret) return ret; if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, BIT(31), BIT(31), 100))) return -ETIMEDOUT; return 0; } int mt76_mcu_tssi_comp(struct mt76_dev *dev, struct mt76_tssi_comp *tssi_data) { struct sk_buff *skb; struct { __le32 id; struct mt76_tssi_comp data; } __packed __aligned(4) msg = { .id = cpu_to_le32(MCU_CAL_TSSI_COMP), .data = *tssi_data, }; skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP); } int mt76_mcu_init_gain(struct mt76_dev *dev, u8 channel, u32 gain, bool force) { struct sk_buff *skb; struct { __le32 channel; __le32 gain_val; } __packed __aligned(4) msg = { .channel = cpu_to_le32(channel), .gain_val = cpu_to_le32(gain), }; if (force) msg.channel |= cpu_to_le32(BIT(31)); skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); return mt76_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP); } int mt76_mcu_init(struct mt76_dev *dev) { int ret; mutex_init(&dev->mcu.mutex); ret = mt76pci_load_rom_patch(dev); if (ret) return ret; ret = mt76pci_load_firmware(dev); if (ret) return ret; mt76_mcu_function_select(dev, Q_SELECT, 1); return 0; } int mt76_mcu_cleanup(struct mt76_dev *dev) { mt76_wr(dev, MT_MCU_INT_LEVEL, 1); msleep(20); return 0; }
static int mt76pci_load_rom_patch(struct mt76x02_dev *dev) { const struct firmware *fw = NULL; struct mt76x02_patch_header *hdr; bool rom_protect = !is_mt7612(dev); int len, ret = 0; __le32 *cur; u32 patch_mask, patch_reg; if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { dev_err(dev->mt76.dev, "Could not get hardware semaphore for ROM PATCH\n"); return -ETIMEDOUT; } if (mt76xx_rev(dev) >= MT76XX_REV_E3) { patch_mask = BIT(0); patch_reg = MT_MCU_CLOCK_CTL; } else { patch_mask = BIT(1); patch_reg = MT_MCU_COM_REG0; } if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { dev_info(dev->mt76.dev, "ROM patch already applied\n"); goto out; } ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev); if (ret) goto out; if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { ret = -EIO; dev_err(dev->mt76.dev, "Failed to load firmware\n"); goto out; } hdr = (struct mt76x02_patch_header *)fw->data; dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET); cur = (__le32 *) (fw->data + sizeof(*hdr)); len = fw->size - sizeof(*hdr); mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); /* Trigger ROM */ mt76_wr(dev, MT_MCU_INT_LEVEL, 4); if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) { dev_err(dev->mt76.dev, "Failed to load ROM patch\n"); ret = -ETIMEDOUT; } out: /* release semaphore */ if (rom_protect) mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); release_firmware(fw); return ret; }
void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, enum nl80211_band band) { u32 pa_mode[2]; u32 pa_mode_adj; if (band == NL80211_BAND_2GHZ) { pa_mode[0] = 0x010055ff; pa_mode[1] = 0x00550055; mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); if (mt76x02_ext_pa_enabled(dev, band)) { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); } else { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200); } } else { pa_mode[0] = 0x0000ffff; pa_mode[1] = 0x00ff00ff; if (mt76x02_ext_pa_enabled(dev, band)) { mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); } else { mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); } if (mt76x02_ext_pa_enabled(dev, band)) pa_mode_adj = 0x04000000; else pa_mode_adj = 0; mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj); } mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]); mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); if (mt76x02_ext_pa_enabled(dev, band)) { u32 val; if (band == NL80211_BAND_2GHZ) val = 0x3c3c023c; else val = 0x363c023c; mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818); } else { if (band == NL80211_BAND_2GHZ) { u32 val = 0x0f3c3c3c; mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val); mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606); } else { mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c); mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28); mt76_wr(dev, MT_TX_ALC_CFG_4, 0); } } }
int mt76_init_hardware(struct mt76_dev *dev) { static const u16 beacon_offsets[16] = { /* 1024 byte per beacon */ 0xc000, 0xc400, 0xc800, 0xcc00, 0xd000, 0xd400, 0xd800, 0xdc00, /* BSS idx 8-15 not used for beacons */ 0xc000, 0xc000, 0xc000, 0xc000, 0xc000, 0xc000, 0xc000, 0xc000, }; u32 val; int ret; dev->beacon_offsets = beacon_offsets; tasklet_init(&dev->pre_tbtt_tasklet, mt76_pre_tbtt_tasklet, (unsigned long) dev); dev->chainmask = 0x202; val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | MT_WPDMA_GLO_CFG_BIG_ENDIAN | MT_WPDMA_GLO_CFG_HDR_SEG_LEN; val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; mt76_wr(dev, MT_WPDMA_GLO_CFG, val); mt76_reset_wlan(dev, true); mt76_power_on(dev); ret = mt76_eeprom_init(dev); if (ret) return ret; ret = mt76_mac_reset(dev, true); if (ret) return ret; ret = mt76_dma_init(dev); if (ret) return ret; set_bit(MT76_STATE_INITIALIZED, &dev->state); ret = mt76_mac_start(dev); if (ret) return ret; ret = mt76_mcu_init(dev); if (ret) return ret; mt76_mac_stop(dev, false); dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); return 0; }
void mt7615_mcu_exit(struct mt7615_dev *dev) { mt7615_mcu_restart(dev); mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN); skb_queue_purge(&dev->mt76.mmio.mcu.res_q); }
static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get) { struct { __le32 operation; } req = { .operation = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE), }; struct event { u8 status; u8 reserved[3]; } *resp; struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req)); struct sk_buff *skb_ret; int ret; ret = mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_SEM_CONTROL, MCU_Q_NA, MCU_S2D_H2N, &skb_ret); if (ret) goto out; resp = (struct event *)(skb_ret->data); ret = resp->status; dev_kfree_skb(skb_ret); out: return ret; } static int mt7615_mcu_start_patch(struct mt7615_dev *dev) { struct { u8 check_crc; u8 reserved[3]; } req = { .check_crc = 0, }; struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req)); return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_FINISH_REQ, MCU_Q_NA, MCU_S2D_H2N, NULL); } static int mt7615_driver_own(struct mt7615_dev *dev) { mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN); if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) { dev_err(dev->mt76.dev, "Timeout for driver own\n"); return -EIO; } return 0; } static int mt7615_load_patch(struct mt7615_dev *dev) { const struct firmware *fw; const struct mt7615_patch_hdr *hdr; const char *firmware = MT7615_ROM_PATCH; int len, ret, sem; sem = mt7615_mcu_patch_sem_ctrl(dev, 1); switch (sem) { case PATCH_IS_DL: return 0; case PATCH_NOT_DL_SEM_SUCCESS: break; default: dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); return -EAGAIN; } ret = request_firmware(&fw, firmware, dev->mt76.dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev->mt76.dev, "Invalid firmware\n"); ret = -EINVAL; goto out; } hdr = (const struct mt7615_patch_hdr *)(fw->data); dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n", be32_to_cpu(hdr->hw_sw_ver), hdr->build_date); len = fw->size - sizeof(*hdr); ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len, DL_MODE_NEED_RSP); if (ret) { dev_err(dev->mt76.dev, "Download request failed\n"); goto out; } ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); goto out; } ret = mt7615_mcu_start_patch(dev); if (ret) dev_err(dev->mt76.dev, "Failed to start patch\n"); out: release_firmware(fw); sem = mt7615_mcu_patch_sem_ctrl(dev, 0); switch (sem) { case PATCH_REL_SEM_SUCCESS: break; default: ret = -EAGAIN; dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); break; } return ret; } static u32 gen_dl_mode(u8 feature_set, bool is_cr4) { u32 ret = 0; ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ? (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0; ret |= FIELD_PREP(DL_MODE_KEY_IDX, FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set)); ret |= DL_MODE_NEED_RSP; ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0; return ret; }
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { const u8 *macaddr = dev->mt76.macaddr; u32 val; int i, k; if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_BUSY | MT_WPDMA_GLO_CFG_DMA_BURST_SIZE); val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3); mt76_wr(dev, MT_WPDMA_GLO_CFG, val); mt76x2_mac_pbf_init(dev); mt76_write_mac_initvals(dev); mt76x2_fixup_xtal(dev); mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | MT_MAC_SYS_CTRL_RESET_BBP); if (is_mt7612(dev)) mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN); mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000); mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31)); mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000); mt76_wr(dev, MT_RF_SETTING_0, 0x08800000); usleep_range(5000, 10000); mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000); mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); mt76x02_mac_setaddr(dev, macaddr); mt76x02_init_beacon_config(dev); if (!hard) return 0; for (i = 0; i < 256 / 32; i++) mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); for (i = 0; i < 256; i++) { mt76x02_mac_wcid_setup(dev, i, 0, NULL); mt76_wr(dev, MT_WCID_TX_RATE(i), 0); mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0); } for (i = 0; i < MT_MAX_VIFS; i++) mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) mt76x02_mac_shared_key_setup(dev, i, k, NULL); for (i = 0; i < 16; i++) mt76_rr(dev, MT_TX_STAT_FIFO); mt76_wr(dev, MT_CH_TIME_CFG, MT_CH_TIME_CFG_TIMER_EN | MT_CH_TIME_CFG_TX_AS_BUSY | MT_CH_TIME_CFG_RX_AS_BUSY | MT_CH_TIME_CFG_NAV_AS_BUSY | MT_CH_TIME_CFG_EIFS_AS_BUSY | MT_CH_CCA_RC_EN | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); mt76x02_set_tx_ackto(dev); return 0; }
static int mt76pci_load_firmware(struct mt76x02_dev *dev) { const struct firmware *fw; const struct mt76x02_fw_header *hdr; int len, ret; __le32 *cur; u32 offset, val; ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) goto error; hdr = (const struct mt76x02_fw_header *)fw->data; len = sizeof(*hdr); len += le32_to_cpu(hdr->ilm_len); len += le32_to_cpu(hdr->dlm_len); if (fw->size != len) goto error; val = le16_to_cpu(hdr->fw_ver); dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); val = le16_to_cpu(hdr->build_ver); dev_info(dev->mt76.dev, "Build: %x\n", val); dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); cur = (__le32 *) (fw->data + sizeof(*hdr)); len = le32_to_cpu(hdr->ilm_len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len); cur += len / sizeof(*cur); len = le32_to_cpu(hdr->dlm_len); if (mt76xx_rev(dev) >= MT76XX_REV_E3) offset = MT_MCU_DLM_ADDR_E3; else offset = MT_MCU_DLM_ADDR; mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); mt76_wr_copy(dev, offset, cur, len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); /* trigger firmware */ mt76_wr(dev, MT_MCU_INT_LEVEL, 2); if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) { dev_err(dev->mt76.dev, "Firmware failed to start\n"); release_firmware(fw); return -ETIMEDOUT; } mt76x02_set_ethtool_fwver(dev, hdr); dev_info(dev->mt76.dev, "Firmware running!\n"); release_firmware(fw); return ret; error: dev_err(dev->mt76.dev, "Invalid firmware\n"); release_firmware(fw); return -ENOENT; }
void mt76_mac_resume(struct mt76_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); }
int mt76x2u_mac_stop(struct mt76x02_dev *dev) { int i, count = 0, val; bool stopped = false; u32 rts_cfg; if (test_bit(MT76_REMOVED, &dev->mt76.state)) return -EIO; rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); /* wait tx dma to stop */ for (i = 0; i < 2000; i++) { val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10) break; usleep_range(50, 100); } /* page count on TxQ */ for (i = 0; i < 200; i++) { if (!(mt76_rr(dev, 0x0438) & 0xffffffff) && !(mt76_rr(dev, 0x0a30) & 0x000000ff) && !(mt76_rr(dev, 0x0a34) & 0xff00ff00)) break; usleep_range(10, 20); } /* disable tx-rx */ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX | MT_MAC_SYS_CTRL_ENABLE_TX); /* Wait for MAC to become idle */ for (i = 0; i < 1000; i++) { if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) && !mt76_rr(dev, MT_BBP(IBI, 12))) { stopped = true; break; } usleep_range(10, 20); } if (!stopped) { mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); } /* page count on RxQ */ for (i = 0; i < 200; i++) { if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) && !(mt76_rr(dev, 0x0a30) & 0xffffffff) && !(mt76_rr(dev, 0x0a34) & 0xffffffff) && ++count > 10) break; msleep(50); } if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000)) dev_warn(dev->mt76.dev, "MAC RX failed to stop\n"); /* wait rx dma to stop */ for (i = 0; i < 2000; i++) { val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10) break; usleep_range(50, 100); } mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); return 0; }
static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; u32 val; int i, k; if (!mt76x2_wait_for_mac(dev)) return -ETIMEDOUT; val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_BUSY | MT_WPDMA_GLO_CFG_DMA_BURST_SIZE); val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3); mt76_wr(dev, MT_WPDMA_GLO_CFG, val); mt76x2_mac_pbf_init(dev); mt76_write_mac_initvals(dev); mt76x2_fixup_xtal(dev); mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | MT_MAC_SYS_CTRL_RESET_BBP); if (is_mt7612(dev)) mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN); mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000); mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31)); mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000); mt76_wr(dev, MT_RF_SETTING_0, 0x08800000); usleep_range(5000, 10000); mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000); mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4)); mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr)); mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) | FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); /* Fire a pre-TBTT interrupt 8 ms before TBTT */ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4); mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, MT_DFS_GP_INTERVAL); mt76_wr(dev, MT_INT_TIMER_EN, 0); mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); if (!hard) return 0; for (i = 0; i < 256 / 32; i++) mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); for (i = 0; i < 256; i++) mt76x2_mac_wcid_setup(dev, i, 0, NULL); for (i = 0; i < MT_MAX_VIFS; i++) mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) mt76x2_mac_shared_key_setup(dev, i, k, NULL); for (i = 0; i < 8; i++) { mt76x2_mac_set_bssid(dev, i, null_addr); mt76x2_mac_set_beacon(dev, i, NULL); } for (i = 0; i < 16; i++) mt76_rr(dev, MT_TX_STAT_FIFO); mt76_wr(dev, MT_CH_TIME_CFG, MT_CH_TIME_CFG_TIMER_EN | MT_CH_TIME_CFG_TX_AS_BUSY | MT_CH_TIME_CFG_RX_AS_BUSY | MT_CH_TIME_CFG_NAV_AS_BUSY | MT_CH_TIME_CFG_EIFS_AS_BUSY | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); mt76x2_init_beacon_offsets(dev); mt76x2_set_tx_ackto(dev); return 0; }
void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 *gain = dev->cal.agc_gain_init; u8 low_gain_delta, gain_delta; bool gain_change; int low_gain; u32 val; dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); gain_change = dev->cal.low_gain < 0 || (dev->cal.low_gain & 2) ^ (low_gain & 2); dev->cal.low_gain = low_gain; if (!gain_change) { if (mt76x02_phy_adjust_vga_gain(dev)) mt76x2_phy_set_gain_val(dev); return; } if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; if (low_gain == 2) val |= 0x3; else val |= 0x5; mt76_wr(dev, MT_BBP(AGC, 26), val); } else { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); } if (mt76x2_has_ext_lna(dev)) low_gain_delta = 10; else low_gain_delta = 14; if (low_gain == 2) { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); gain_delta = low_gain_delta; dev->cal.agc_gain_adjust = 0; } else { mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); else mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); gain_delta = 0; dev->cal.agc_gain_adjust = low_gain_delta; } dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; mt76x2_phy_set_gain_val(dev); /* clear false CCA counters */ mt76_rr(dev, MT_RX_STAT_1); }
static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) { u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET; const struct mt76x2_fw_header *hdr; int err, len, ilm_len, dlm_len; const struct firmware *fw; err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev); if (err < 0) return err; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { err = -EINVAL; goto out; } hdr = (const struct mt76x2_fw_header *)fw->data; ilm_len = le32_to_cpu(hdr->ilm_len); dlm_len = le32_to_cpu(hdr->dlm_len); len = sizeof(*hdr) + ilm_len + dlm_len; if (fw->size != len) { err = -EINVAL; goto out; } val = le16_to_cpu(hdr->fw_ver); dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); val = le16_to_cpu(hdr->build_ver); dev_info(dev->mt76.dev, "Build: %x\n", val); dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); /* vendor reset */ mt76u_mcu_fw_reset(&dev->mt76); usleep_range(5000, 10000); /* enable USB_DMA_CFG */ val = MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20); mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); /* enable FCE to send in-band cmd */ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); /* FCE tx_fs_base_ptr */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); /* FCE tx_fs_max_cnt */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1); /* FCE pdma enable */ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); /* FCE skip_fs_en */ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); /* load ILM */ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), ilm_len, MCU_FW_URB_MAX_PAYLOAD, MT76U_MCU_ILM_OFFSET); if (err < 0) { err = -EIO; goto out; } /* load DLM */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) dlm_offset += 0x800; err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr) + ilm_len, dlm_len, MCU_FW_URB_MAX_PAYLOAD, dlm_offset); if (err < 0) { err = -EIO; goto out; } mt76x2u_mcu_load_ivb(dev); if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) { dev_err(dev->mt76.dev, "firmware failed to start\n"); err = -ETIMEDOUT; goto out; } mt76_set(dev, MT_MCU_COM_REG0, BIT(1)); /* enable FCE to send in-band cmd */ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); dev_dbg(dev->mt76.dev, "firmware running\n"); out: release_firmware(fw); return err; }
static int mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len) { struct { __le32 addr; __le32 len; __le32 mode; } req = { .addr = cpu_to_le32(addr), .len = cpu_to_le32(len), .mode = cpu_to_le32(BIT(31)), }; struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ, MCU_Q_NA); } static int mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len) { struct sk_buff *skb; int ret = 0; while (len > 0) { int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd), len); skb = mt7603_mcu_msg_alloc(data, cur_len); if (!skb) return -ENOMEM; ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER, MCU_Q_NA, NULL); if (ret) break; data += cur_len; len -= cur_len; } return ret; } static int mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr) { struct { __le32 override; __le32 addr; } req = { .override = cpu_to_le32(addr ? 1 : 0), .addr = cpu_to_le32(addr), }; struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ, MCU_Q_NA); } static int mt7603_mcu_restart(struct mt7603_dev *dev) { struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0); return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ, MCU_Q_NA); } static int mt7603_load_firmware(struct mt7603_dev *dev) { const struct firmware *fw; const struct mt7603_fw_trailer *hdr; const char *firmware; int dl_len; u32 addr, val; int ret; if (is_mt7628(dev)) { if (mt76xx_rev(dev) == MT7628_REV_E1) firmware = MT7628_FIRMWARE_E1; else firmware = MT7628_FIRMWARE_E2; } else { if (mt76xx_rev(dev) < MT7603_REV_E2) firmware = MT7603_FIRMWARE_E1; else firmware = MT7603_FIRMWARE_E2; } ret = request_firmware(&fw, firmware, dev->mt76.dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev->mt76.dev, "Invalid firmware\n"); ret = -EINVAL; goto out; } hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size - sizeof(*hdr)); dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver); dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date); addr = mt7603_reg_map(dev, 0x50012498); mt76_wr(dev, addr, 0x5); mt76_wr(dev, addr, 0x5); udelay(1); /* switch to bypass mode */ mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID, MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5)); val = mt76_rr(dev, MT_TOP_MISC2); if (val & BIT(1)) { dev_info(dev->mt76.dev, "Firmware already running...\n"); goto running; } if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) { dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n"); ret = -EIO; goto out; } dl_len = le32_to_cpu(hdr->dl_len) + 4; ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len); if (ret) { dev_err(dev->mt76.dev, "Download request failed\n"); goto out; } ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); goto out; } ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS); if (ret) { dev_err(dev->mt76.dev, "Failed to start firmware\n"); goto out; } if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) { dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n"); ret = -EIO; goto out; } running: mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS); mt76_set(dev, MT_SCH_4, BIT(8)); mt76_clear(dev, MT_SCH_4, BIT(8)); dev->mcu_running = true; dev_info(dev->mt76.dev, "firmware init done\n"); out: release_firmware(fw); return ret; } int mt7603_mcu_init(struct mt7603_dev *dev) { mutex_init(&dev->mt76.mmio.mcu.mutex); return mt7603_load_firmware(dev); }
int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val) { struct { __le32 mode; __le32 level; } __packed __aligned(4) msg = { .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF), .level = cpu_to_le32(0), }; struct sk_buff *skb; skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP, false); } int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, u8 channel) { struct { u8 cr_mode; u8 temp; u8 ch; u8 _pad0; __le32 cfg; } __packed __aligned(4) msg = { .cr_mode = type, .temp = temp_level, .ch = channel, }; struct sk_buff *skb; u32 val; val = BIT(31); val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; msg.cfg = cpu_to_le32(val); /* first set the channel without the extension channel info */ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true); } int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, u8 bw_index, bool scan) { struct { u8 idx; u8 scan; u8 bw; u8 _pad0; __le16 chainmask; u8 ext_chan; u8 _pad1; } __packed __aligned(4) msg = { .idx = channel, .scan = scan, .bw = bw, .chainmask = cpu_to_le16(dev->chainmask), }; struct sk_buff *skb; /* first set the channel without the extension channel info */ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); usleep_range(5000, 10000); msg.ext_chan = 0xe0 + bw_index; skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true); } int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, u32 val) { struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(type), .value = cpu_to_le32(val), }; struct sk_buff *skb; skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); } int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, bool force) { struct { __le32 channel; __le32 gain_val; } __packed __aligned(4) msg = { .channel = cpu_to_le32(channel), .gain_val = cpu_to_le32(gain), }; struct sk_buff *skb; if (force) msg.channel |= cpu_to_le32(BIT(31)); skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true); } int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, bool ext, int rssi, u32 false_cca) { struct { __le32 channel; __le32 rssi_val; __le32 false_cca_val; } __packed __aligned(4) msg = { .rssi_val = cpu_to_le32(rssi), .false_cca_val = cpu_to_le32(false_cca), }; struct sk_buff *skb; u32 val = channel; if (ap) val |= BIT(31); if (ext) val |= BIT(30); msg.channel = cpu_to_le32(val); skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true); } int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data) { struct { __le32 id; struct mt76x2_tssi_comp data; } __packed __aligned(4) msg = { .id = cpu_to_le32(MCU_CAL_TSSI_COMP), .data = *tssi_data, }; struct sk_buff *skb; skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true); } static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev) { mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_VENDOR, 0x12, 0, NULL, 0); } static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; const u8 data[] = { 0x6f, 0xfc, 0x08, 0x01, 0x20, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, }; memcpy(usb->data, data, sizeof(data)); mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_CLASS, 0x12, 0, usb->data, sizeof(data)); } static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; u8 data[] = { 0x6f, 0xfc, 0x05, 0x01, 0x07, 0x01, 0x00, 0x04 }; memcpy(usb->data, data, sizeof(data)); mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_CLASS, 0x12, 0, usb->data, sizeof(data)); } static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) { bool rom_protect = !is_mt7612(dev); struct mt76x2_patch_header *hdr; u32 val, patch_mask, patch_reg; const struct firmware *fw; int err; if (rom_protect && !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) { dev_err(dev->mt76.dev, "could not get hardware semaphore for ROM PATCH\n"); return -ETIMEDOUT; } if (mt76xx_rev(dev) >= MT76XX_REV_E3) { patch_mask = BIT(0); patch_reg = MT_MCU_CLOCK_CTL; } else { patch_mask = BIT(1); patch_reg = MT_MCU_COM_REG0; } if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) { dev_info(dev->mt76.dev, "ROM patch already applied\n"); return 0; } err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev); if (err < 0) return err; if (!fw || !fw->data || fw->size <= sizeof(*hdr)) { dev_err(dev->mt76.dev, "failed to load firmware\n"); err = -EIO; goto out; } hdr = (struct mt76x2_patch_header *)fw->data; dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time); /* enable USB_DMA_CFG */ val = MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20); mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); /* vendor reset */ mt76u_mcu_fw_reset(&dev->mt76); usleep_range(5000, 10000); /* enable FCE to send in-band cmd */ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); /* FCE tx_fs_base_ptr */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); /* FCE tx_fs_max_cnt */ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1); /* FCE pdma enable */ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); /* FCE skip_fs_en */ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), fw->size - sizeof(*hdr), MCU_ROM_PATCH_MAX_PAYLOAD, MT76U_MCU_ROM_PATCH_OFFSET); if (err < 0) { err = -EIO; goto out; } mt76x2u_mcu_enable_patch(dev); mt76x2u_mcu_reset_wmt(dev); mdelay(20); if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) { dev_err(dev->mt76.dev, "failed to load ROM patch\n"); err = -ETIMEDOUT; } out: if (rom_protect) mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1); release_firmware(fw); return err; }
int mt76x2_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *chan = chandef->chan; bool scan = test_bit(MT76_SCANNING, &dev->mt76.state); enum nl80211_band band = chan->band; u8 channel; u32 ext_cca_chan[4] = { [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) | FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)), [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) | FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) | FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) | FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) | FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)), [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) | FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) | FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)), [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) | FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) | FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) | FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) | FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)), }; int ch_group_index; u8 bw, bw_index; int freq, freq1; int ret; dev->cal.channel_cal_done = false; freq = chandef->chan->center_freq; freq1 = chandef->center_freq1; channel = chan->hw_value; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: bw = 1; if (freq1 > freq) { bw_index = 1; ch_group_index = 0; } else { bw_index = 3; ch_group_index = 1; } channel += 2 - ch_group_index * 4; break; case NL80211_CHAN_WIDTH_80: ch_group_index = (freq - freq1 + 30) / 20; if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) ch_group_index = 0; bw = 2; bw_index = ch_group_index; channel += 6 - ch_group_index * 4; break; default: bw = 0; bw_index = 0; ch_group_index = 0; break; } mt76x2_read_rx_gain(dev); mt76x2_phy_set_txpower_regs(dev, band); mt76x2_configure_tx_delay(dev, band, bw); mt76x2_phy_set_txpower(dev); mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1); mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | MT_EXT_CCA_CFG_CCA1 | MT_EXT_CCA_CFG_CCA2 | MT_EXT_CCA_CFG_CCA3 | MT_EXT_CCA_CFG_CCA_MASK), ext_cca_chan[ch_group_index]); ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan); if (ret) return ret; mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true); mt76x2_phy_set_antenna(dev); /* Enable LDPC Rx */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0); } mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0); dev->cal.init_cal_done = true; mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2); mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010); mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404); mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F); if (scan) return 0; mt76x2_phy_channel_calibrate(dev, true); mt76x02_init_agc_gain(dev); /* init default values for temp compensation */ if (mt76x2_tssi_enabled(dev)) { mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 0x38); mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, 0x38); } ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); return 0; } static void mt76x2_phy_temp_compensate(struct mt76x02_dev *dev) { struct mt76x2_temp_comp t; int temp, db_diff; if (mt76x2_get_temp_comp(dev, &t)) return; temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL); temp -= t.temp_25_ref; temp = (temp * 1789) / 1000 + 25; dev->cal.temp = temp; if (temp > 25) db_diff = (temp - 25) / t.high_slope; else db_diff = (25 - temp) / t.low_slope; db_diff = min(db_diff, t.upper_bound); db_diff = max(db_diff, t.lower_bound); mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, db_diff * 2); mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, db_diff * 2); } void mt76x2_phy_calibrate(struct work_struct *work) { struct mt76x02_dev *dev; dev = container_of(work, struct mt76x02_dev, cal_work.work); mt76x2_phy_channel_calibrate(dev, false); mt76x2_phy_tssi_compensate(dev); mt76x2_phy_temp_compensate(dev); mt76x2_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); } int mt76x2_phy_start(struct mt76x02_dev *dev) { int ret; ret = mt76x02_mcu_set_radio_state(dev, true); if (ret) return ret; mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0); return ret; }
static int mt76pci_load_firmware(struct mt76_dev *dev) { const struct firmware *fw; const struct mt76_fw_header *hdr; int i, len, ret; __le32 *cur; u32 offset, val; ret = request_firmware(&fw, MT7662_FIRMWARE, dev->dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) goto error; hdr = (const struct mt76_fw_header *) fw->data; len = sizeof(*hdr); len += le32_to_cpu(hdr->ilm_len); len += le32_to_cpu(hdr->dlm_len); if (fw->size != len) goto error; val = le16_to_cpu(hdr->fw_ver); printk("Firmware Version: %d.%d.%02d\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf); val = le16_to_cpu(hdr->build_ver); printk("Build: %x\n", val); printk("Build Time: %.16s\n", hdr->build_time); cur = (__le32 *) (fw->data + sizeof(*hdr)); len = le32_to_cpu(hdr->ilm_len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET); write_data(dev, MT_MCU_ILM_ADDR, cur, len); cur += len / sizeof(*cur); len = le32_to_cpu(hdr->dlm_len); if (mt76xx_rev(dev) >= MT76XX_REV_E3) offset = MT_MCU_DLM_ADDR_E3; else offset = MT_MCU_DLM_ADDR; mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET); write_data(dev, offset, cur, len); mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); val = mt76_eeprom_get(dev, MT_EE_NIC_CONF_2); if (MT76_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); /* trigger firmware */ mt76_wr(dev, MT_MCU_INT_LEVEL, 2); for (i = 200; i > 0; i--) { val = mt76_rr(dev, MT_MCU_COM_REG0); if (val & 1) break; msleep(10); } if (!i) { printk("Firmware failed to start\n"); release_firmware(fw); return -ETIMEDOUT; } printk("Firmware running!\n"); release_firmware(fw); return ret; error: printk("Invalid firmware\n"); release_firmware(fw); return -ENOENT; }